summaryrefslogtreecommitdiffstats
path: root/src/rocksdb/db_stress_tool
diff options
context:
space:
mode:
Diffstat (limited to 'src/rocksdb/db_stress_tool')
-rw-r--r--src/rocksdb/db_stress_tool/CMakeLists.txt17
-rw-r--r--src/rocksdb/db_stress_tool/batched_ops_stress.cc399
-rw-r--r--src/rocksdb/db_stress_tool/cf_consistency_stress.cc640
-rw-r--r--src/rocksdb/db_stress_tool/db_stress.cc25
-rw-r--r--src/rocksdb/db_stress_tool/db_stress_common.cc460
-rw-r--r--src/rocksdb/db_stress_tool/db_stress_common.h650
-rw-r--r--src/rocksdb/db_stress_tool/db_stress_compaction_filter.h96
-rw-r--r--src/rocksdb/db_stress_tool/db_stress_driver.cc212
-rw-r--r--src/rocksdb/db_stress_tool/db_stress_driver.h17
-rw-r--r--src/rocksdb/db_stress_tool/db_stress_env_wrapper.h42
-rw-r--r--src/rocksdb/db_stress_tool/db_stress_gflags.cc1074
-rw-r--r--src/rocksdb/db_stress_tool/db_stress_listener.cc191
-rw-r--r--src/rocksdb/db_stress_tool/db_stress_listener.h271
-rw-r--r--src/rocksdb/db_stress_tool/db_stress_shared_state.cc17
-rw-r--r--src/rocksdb/db_stress_tool/db_stress_shared_state.h427
-rw-r--r--src/rocksdb/db_stress_tool/db_stress_stat.cc17
-rw-r--r--src/rocksdb/db_stress_tool/db_stress_stat.h219
-rw-r--r--src/rocksdb/db_stress_tool/db_stress_table_properties_collector.h65
-rw-r--r--src/rocksdb/db_stress_tool/db_stress_test_base.cc3383
-rw-r--r--src/rocksdb/db_stress_tool/db_stress_test_base.h337
-rw-r--r--src/rocksdb/db_stress_tool/db_stress_tool.cc365
-rw-r--r--src/rocksdb/db_stress_tool/expected_state.cc761
-rw-r--r--src/rocksdb/db_stress_tool/expected_state.h287
-rw-r--r--src/rocksdb/db_stress_tool/multi_ops_txns_stress.cc1808
-rw-r--r--src/rocksdb/db_stress_tool/multi_ops_txns_stress.h444
-rw-r--r--src/rocksdb/db_stress_tool/no_batched_ops_stress.cc1505
26 files changed, 13729 insertions, 0 deletions
diff --git a/src/rocksdb/db_stress_tool/CMakeLists.txt b/src/rocksdb/db_stress_tool/CMakeLists.txt
new file mode 100644
index 000000000..96d70dd0e
--- /dev/null
+++ b/src/rocksdb/db_stress_tool/CMakeLists.txt
@@ -0,0 +1,17 @@
+add_executable(db_stress${ARTIFACT_SUFFIX}
+ batched_ops_stress.cc
+ cf_consistency_stress.cc
+ db_stress.cc
+ db_stress_common.cc
+ db_stress_driver.cc
+ db_stress_gflags.cc
+ db_stress_listener.cc
+ db_stress_shared_state.cc
+ db_stress_stat.cc
+ db_stress_test_base.cc
+ db_stress_tool.cc
+ expected_state.cc
+ multi_ops_txns_stress.cc
+ no_batched_ops_stress.cc)
+target_link_libraries(db_stress${ARTIFACT_SUFFIX} ${ROCKSDB_LIB} ${THIRDPARTY_LIBS})
+list(APPEND tool_deps db_stress)
diff --git a/src/rocksdb/db_stress_tool/batched_ops_stress.cc b/src/rocksdb/db_stress_tool/batched_ops_stress.cc
new file mode 100644
index 000000000..3f3446076
--- /dev/null
+++ b/src/rocksdb/db_stress_tool/batched_ops_stress.cc
@@ -0,0 +1,399 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+//
+// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#ifdef GFLAGS
+#include "db_stress_tool/db_stress_common.h"
+
+namespace ROCKSDB_NAMESPACE {
+class BatchedOpsStressTest : public StressTest {
+ public:
+ BatchedOpsStressTest() {}
+ virtual ~BatchedOpsStressTest() {}
+
+ bool IsStateTracked() const override { return false; }
+
+ // Given a key K and value V, this puts ("0"+K, V+"0"), ("1"+K, V+"1"), ...,
+ // ("9"+K, V+"9") in DB atomically i.e in a single batch.
+ // Also refer BatchedOpsStressTest::TestGet
+ Status TestPut(ThreadState* thread, WriteOptions& write_opts,
+ const ReadOptions& /* read_opts */,
+ const std::vector<int>& rand_column_families,
+ const std::vector<int64_t>& rand_keys,
+ char (&value)[100]) override {
+ assert(!rand_column_families.empty());
+ assert(!rand_keys.empty());
+
+ const std::string key_body = Key(rand_keys[0]);
+
+ const uint32_t value_base =
+ thread->rand.Next() % thread->shared->UNKNOWN_SENTINEL;
+ const size_t sz = GenerateValue(value_base, value, sizeof(value));
+ const std::string value_body = Slice(value, sz).ToString();
+
+ WriteBatch batch(0 /* reserved_bytes */, 0 /* max_bytes */,
+ FLAGS_batch_protection_bytes_per_key,
+ FLAGS_user_timestamp_size);
+
+ ColumnFamilyHandle* const cfh = column_families_[rand_column_families[0]];
+ assert(cfh);
+
+ for (int i = 9; i >= 0; --i) {
+ const std::string num = std::to_string(i);
+
+ // Note: the digit in num is prepended to the key; however, it is appended
+ // to the value because we want the "value base" to be encoded uniformly
+ // at the beginning of the value for all types of stress tests (e.g.
+ // batched, non-batched, CF consistency).
+ const std::string k = num + key_body;
+ const std::string v = value_body + num;
+
+ if (FLAGS_use_merge) {
+ batch.Merge(cfh, k, v);
+ } else if (FLAGS_use_put_entity_one_in > 0 &&
+ (value_base % FLAGS_use_put_entity_one_in) == 0) {
+ batch.PutEntity(cfh, k, GenerateWideColumns(value_base, v));
+ } else {
+ batch.Put(cfh, k, v);
+ }
+ }
+
+ const Status s = db_->Write(write_opts, &batch);
+
+ if (!s.ok()) {
+ fprintf(stderr, "multiput error: %s\n", s.ToString().c_str());
+ thread->stats.AddErrors(1);
+ } else {
+ // we did 10 writes each of size sz + 1
+ thread->stats.AddBytesForWrites(10, (sz + 1) * 10);
+ }
+
+ return s;
+ }
+
+ // Given a key K, this deletes ("0"+K), ("1"+K), ..., ("9"+K)
+ // in DB atomically i.e in a single batch. Also refer MultiGet.
+ Status TestDelete(ThreadState* thread, WriteOptions& writeoptions,
+ const std::vector<int>& rand_column_families,
+ const std::vector<int64_t>& rand_keys) override {
+ std::string keys[10] = {"9", "7", "5", "3", "1", "8", "6", "4", "2", "0"};
+
+ WriteBatch batch(0 /* reserved_bytes */, 0 /* max_bytes */,
+ FLAGS_batch_protection_bytes_per_key,
+ FLAGS_user_timestamp_size);
+ Status s;
+ auto cfh = column_families_[rand_column_families[0]];
+ std::string key_str = Key(rand_keys[0]);
+ for (int i = 0; i < 10; i++) {
+ keys[i] += key_str;
+ batch.Delete(cfh, keys[i]);
+ }
+
+ s = db_->Write(writeoptions, &batch);
+ if (!s.ok()) {
+ fprintf(stderr, "multidelete error: %s\n", s.ToString().c_str());
+ thread->stats.AddErrors(1);
+ } else {
+ thread->stats.AddDeletes(10);
+ }
+
+ return s;
+ }
+
+ Status TestDeleteRange(ThreadState* /* thread */,
+ WriteOptions& /* write_opts */,
+ const std::vector<int>& /* rand_column_families */,
+ const std::vector<int64_t>& /* rand_keys */) override {
+ assert(false);
+ return Status::NotSupported(
+ "BatchedOpsStressTest does not support "
+ "TestDeleteRange");
+ }
+
+ void TestIngestExternalFile(
+ ThreadState* /* thread */,
+ const std::vector<int>& /* rand_column_families */,
+ const std::vector<int64_t>& /* rand_keys */) override {
+ assert(false);
+ fprintf(stderr,
+ "BatchedOpsStressTest does not support "
+ "TestIngestExternalFile\n");
+ std::terminate();
+ }
+
+ // Given a key K, this gets values for "0"+K, "1"+K, ..., "9"+K
+ // in the same snapshot, and verifies that all the values are of the form
+ // V+"0", V+"1", ..., V+"9".
+ // ASSUMES that BatchedOpsStressTest::TestPut was used to put (K, V) into
+ // the DB.
+ Status TestGet(ThreadState* thread, const ReadOptions& readoptions,
+ const std::vector<int>& rand_column_families,
+ const std::vector<int64_t>& rand_keys) override {
+ std::string keys[10] = {"0", "1", "2", "3", "4", "5", "6", "7", "8", "9"};
+ Slice key_slices[10];
+ std::string values[10];
+ ReadOptions readoptionscopy = readoptions;
+ readoptionscopy.snapshot = db_->GetSnapshot();
+ std::string key_str = Key(rand_keys[0]);
+ Slice key = key_str;
+ auto cfh = column_families_[rand_column_families[0]];
+ std::string from_db;
+ Status s;
+ for (int i = 0; i < 10; i++) {
+ keys[i] += key.ToString();
+ key_slices[i] = keys[i];
+ s = db_->Get(readoptionscopy, cfh, key_slices[i], &from_db);
+ if (!s.ok() && !s.IsNotFound()) {
+ fprintf(stderr, "get error: %s\n", s.ToString().c_str());
+ values[i] = "";
+ thread->stats.AddErrors(1);
+ // we continue after error rather than exiting so that we can
+ // find more errors if any
+ } else if (s.IsNotFound()) {
+ values[i] = "";
+ thread->stats.AddGets(1, 0);
+ } else {
+ values[i] = from_db;
+
+ assert(!keys[i].empty());
+ assert(!values[i].empty());
+
+ const char expected = keys[i].front();
+ const char actual = values[i].back();
+
+ if (expected != actual) {
+ fprintf(stderr, "get error expected = %c actual = %c\n", expected,
+ actual);
+ }
+
+ values[i].pop_back(); // get rid of the differing character
+
+ thread->stats.AddGets(1, 1);
+ }
+ }
+ db_->ReleaseSnapshot(readoptionscopy.snapshot);
+
+ // Now that we retrieved all values, check that they all match
+ for (int i = 1; i < 10; i++) {
+ if (values[i] != values[0]) {
+ fprintf(stderr, "get error: inconsistent values for key %s: %s, %s\n",
+ key.ToString(true).c_str(), StringToHex(values[0]).c_str(),
+ StringToHex(values[i]).c_str());
+ // we continue after error rather than exiting so that we can
+ // find more errors if any
+ }
+ }
+
+ return s;
+ }
+
+ std::vector<Status> TestMultiGet(
+ ThreadState* thread, const ReadOptions& readoptions,
+ const std::vector<int>& rand_column_families,
+ const std::vector<int64_t>& rand_keys) override {
+ size_t num_keys = rand_keys.size();
+ std::vector<Status> ret_status(num_keys);
+ std::array<std::string, 10> keys = {
+ {"0", "1", "2", "3", "4", "5", "6", "7", "8", "9"}};
+ size_t num_prefixes = keys.size();
+ for (size_t rand_key = 0; rand_key < num_keys; ++rand_key) {
+ std::vector<Slice> key_slices;
+ std::vector<PinnableSlice> values(num_prefixes);
+ std::vector<Status> statuses(num_prefixes);
+ ReadOptions readoptionscopy = readoptions;
+ readoptionscopy.snapshot = db_->GetSnapshot();
+ readoptionscopy.rate_limiter_priority =
+ FLAGS_rate_limit_user_ops ? Env::IO_USER : Env::IO_TOTAL;
+ std::vector<std::string> key_str;
+ key_str.reserve(num_prefixes);
+ key_slices.reserve(num_prefixes);
+ std::string from_db;
+ ColumnFamilyHandle* cfh = column_families_[rand_column_families[0]];
+
+ for (size_t key = 0; key < num_prefixes; ++key) {
+ key_str.emplace_back(keys[key] + Key(rand_keys[rand_key]));
+ key_slices.emplace_back(key_str.back());
+ }
+ db_->MultiGet(readoptionscopy, cfh, num_prefixes, key_slices.data(),
+ values.data(), statuses.data());
+ for (size_t i = 0; i < num_prefixes; i++) {
+ Status s = statuses[i];
+ if (!s.ok() && !s.IsNotFound()) {
+ fprintf(stderr, "multiget error: %s\n", s.ToString().c_str());
+ thread->stats.AddErrors(1);
+ ret_status[rand_key] = s;
+ // we continue after error rather than exiting so that we can
+ // find more errors if any
+ } else if (s.IsNotFound()) {
+ thread->stats.AddGets(1, 0);
+ ret_status[rand_key] = s;
+ } else {
+ assert(!keys[i].empty());
+ assert(!values[i].empty());
+
+ const char expected = keys[i][0];
+ const char actual = values[i][values[i].size() - 1];
+
+ if (expected != actual) {
+ fprintf(stderr, "multiget error expected = %c actual = %c\n",
+ expected, actual);
+ }
+
+ values[i].remove_suffix(1); // get rid of the differing character
+
+ thread->stats.AddGets(1, 1);
+ }
+ }
+ db_->ReleaseSnapshot(readoptionscopy.snapshot);
+
+ // Now that we retrieved all values, check that they all match
+ for (size_t i = 1; i < num_prefixes; i++) {
+ if (values[i] != values[0]) {
+ fprintf(stderr,
+ "multiget error: inconsistent values for key %s: %s, %s\n",
+ StringToHex(key_str[i]).c_str(),
+ StringToHex(values[0].ToString()).c_str(),
+ StringToHex(values[i].ToString()).c_str());
+ // we continue after error rather than exiting so that we can
+ // find more errors if any
+ }
+ }
+ }
+
+ return ret_status;
+ }
+
+ // Given a key, this does prefix scans for "0"+P, "1"+P, ..., "9"+P
+ // in the same snapshot where P is the first FLAGS_prefix_size - 1 bytes
+ // of the key. Each of these 10 scans returns a series of values;
+ // each series should be the same length, and it is verified for each
+ // index i that all the i'th values are of the form V+"0", V+"1", ..., V+"9".
+ // ASSUMES that MultiPut was used to put (K, V)
+ Status TestPrefixScan(ThreadState* thread, const ReadOptions& readoptions,
+ const std::vector<int>& rand_column_families,
+ const std::vector<int64_t>& rand_keys) override {
+ assert(!rand_column_families.empty());
+ assert(!rand_keys.empty());
+
+ const std::string key = Key(rand_keys[0]);
+
+ assert(FLAGS_prefix_size > 0);
+ const size_t prefix_to_use = static_cast<size_t>(FLAGS_prefix_size);
+
+ constexpr size_t num_prefixes = 10;
+
+ std::array<std::string, num_prefixes> prefixes;
+ std::array<Slice, num_prefixes> prefix_slices;
+ std::array<ReadOptions, num_prefixes> ro_copies;
+ std::array<std::string, num_prefixes> upper_bounds;
+ std::array<Slice, num_prefixes> ub_slices;
+ std::array<std::unique_ptr<Iterator>, num_prefixes> iters;
+
+ const Snapshot* const snapshot = db_->GetSnapshot();
+
+ ColumnFamilyHandle* const cfh = column_families_[rand_column_families[0]];
+ assert(cfh);
+
+ for (size_t i = 0; i < num_prefixes; ++i) {
+ prefixes[i] = std::to_string(i) + key;
+ prefix_slices[i] = Slice(prefixes[i].data(), prefix_to_use);
+
+ ro_copies[i] = readoptions;
+ ro_copies[i].snapshot = snapshot;
+ if (thread->rand.OneIn(2) &&
+ GetNextPrefix(prefix_slices[i], &(upper_bounds[i]))) {
+ // For half of the time, set the upper bound to the next prefix
+ ub_slices[i] = upper_bounds[i];
+ ro_copies[i].iterate_upper_bound = &(ub_slices[i]);
+ }
+
+ iters[i].reset(db_->NewIterator(ro_copies[i], cfh));
+ iters[i]->Seek(prefix_slices[i]);
+ }
+
+ uint64_t count = 0;
+
+ while (iters[0]->Valid() && iters[0]->key().starts_with(prefix_slices[0])) {
+ ++count;
+
+ std::array<std::string, num_prefixes> values;
+
+ // get list of all values for this iteration
+ for (size_t i = 0; i < num_prefixes; ++i) {
+ // no iterator should finish before the first one
+ assert(iters[i]->Valid() &&
+ iters[i]->key().starts_with(prefix_slices[i]));
+ values[i] = iters[i]->value().ToString();
+
+ // make sure the last character of the value is the expected digit
+ assert(!prefixes[i].empty());
+ assert(!values[i].empty());
+
+ const char expected = prefixes[i].front();
+ const char actual = values[i].back();
+
+ if (expected != actual) {
+ fprintf(stderr, "prefix scan error expected = %c actual = %c\n",
+ expected, actual);
+ }
+
+ values[i].pop_back(); // get rid of the differing character
+
+ // make sure all values are equivalent
+ if (values[i] != values[0]) {
+ fprintf(stderr,
+ "prefix scan error : %" ROCKSDB_PRIszt
+ ", inconsistent values for prefix %s: %s, %s\n",
+ i, prefix_slices[i].ToString(/* hex */ true).c_str(),
+ StringToHex(values[0]).c_str(),
+ StringToHex(values[i]).c_str());
+ // we continue after error rather than exiting so that we can
+ // find more errors if any
+ }
+
+ // make sure value() and columns() are consistent
+ const WideColumns expected_columns = GenerateExpectedWideColumns(
+ GetValueBase(iters[i]->value()), iters[i]->value());
+ if (iters[i]->columns() != expected_columns) {
+ fprintf(stderr,
+ "prefix scan error : %" ROCKSDB_PRIszt
+ ", value and columns inconsistent for prefix %s: %s\n",
+ i, prefix_slices[i].ToString(/* hex */ true).c_str(),
+ DebugString(iters[i]->value(), iters[i]->columns(),
+ expected_columns)
+ .c_str());
+ }
+
+ iters[i]->Next();
+ }
+ }
+
+ // cleanup iterators and snapshot
+ for (size_t i = 0; i < num_prefixes; ++i) {
+ // if the first iterator finished, they should have all finished
+ assert(!iters[i]->Valid() ||
+ !iters[i]->key().starts_with(prefix_slices[i]));
+ assert(iters[i]->status().ok());
+ }
+
+ db_->ReleaseSnapshot(snapshot);
+
+ thread->stats.AddPrefixes(1, count);
+
+ return Status::OK();
+ }
+
+ void VerifyDb(ThreadState* /* thread */) const override {}
+
+ void ContinuouslyVerifyDb(ThreadState* /* thread */) const override {}
+};
+
+StressTest* CreateBatchedOpsStressTest() { return new BatchedOpsStressTest(); }
+
+} // namespace ROCKSDB_NAMESPACE
+#endif // GFLAGS
diff --git a/src/rocksdb/db_stress_tool/cf_consistency_stress.cc b/src/rocksdb/db_stress_tool/cf_consistency_stress.cc
new file mode 100644
index 000000000..33f7b1f2e
--- /dev/null
+++ b/src/rocksdb/db_stress_tool/cf_consistency_stress.cc
@@ -0,0 +1,640 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+//
+// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#ifdef GFLAGS
+#include "db_stress_tool/db_stress_common.h"
+#include "file/file_util.h"
+
+namespace ROCKSDB_NAMESPACE {
+class CfConsistencyStressTest : public StressTest {
+ public:
+ CfConsistencyStressTest() : batch_id_(0) {}
+
+ ~CfConsistencyStressTest() override {}
+
+ bool IsStateTracked() const override { return false; }
+
+ Status TestPut(ThreadState* thread, WriteOptions& write_opts,
+ const ReadOptions& /* read_opts */,
+ const std::vector<int>& rand_column_families,
+ const std::vector<int64_t>& rand_keys,
+ char (&value)[100]) override {
+ assert(!rand_column_families.empty());
+ assert(!rand_keys.empty());
+
+ const std::string k = Key(rand_keys[0]);
+
+ const uint32_t value_base = batch_id_.fetch_add(1);
+ const size_t sz = GenerateValue(value_base, value, sizeof(value));
+ const Slice v(value, sz);
+
+ WriteBatch batch;
+
+ const bool use_put_entity = !FLAGS_use_merge &&
+ FLAGS_use_put_entity_one_in > 0 &&
+ (value_base % FLAGS_use_put_entity_one_in) == 0;
+
+ for (auto cf : rand_column_families) {
+ ColumnFamilyHandle* const cfh = column_families_[cf];
+ assert(cfh);
+
+ if (FLAGS_use_merge) {
+ batch.Merge(cfh, k, v);
+ } else if (use_put_entity) {
+ batch.PutEntity(cfh, k, GenerateWideColumns(value_base, v));
+ } else {
+ batch.Put(cfh, k, v);
+ }
+ }
+
+ Status s = db_->Write(write_opts, &batch);
+
+ if (!s.ok()) {
+ fprintf(stderr, "multi put or merge error: %s\n", s.ToString().c_str());
+ thread->stats.AddErrors(1);
+ } else {
+ auto num = static_cast<long>(rand_column_families.size());
+ thread->stats.AddBytesForWrites(num, (sz + 1) * num);
+ }
+
+ return s;
+ }
+
+ Status TestDelete(ThreadState* thread, WriteOptions& write_opts,
+ const std::vector<int>& rand_column_families,
+ const std::vector<int64_t>& rand_keys) override {
+ std::string key_str = Key(rand_keys[0]);
+ Slice key = key_str;
+ WriteBatch batch;
+ for (auto cf : rand_column_families) {
+ ColumnFamilyHandle* cfh = column_families_[cf];
+ batch.Delete(cfh, key);
+ }
+ Status s = db_->Write(write_opts, &batch);
+ if (!s.ok()) {
+ fprintf(stderr, "multidel error: %s\n", s.ToString().c_str());
+ thread->stats.AddErrors(1);
+ } else {
+ thread->stats.AddDeletes(static_cast<long>(rand_column_families.size()));
+ }
+ return s;
+ }
+
+ Status TestDeleteRange(ThreadState* thread, WriteOptions& write_opts,
+ const std::vector<int>& rand_column_families,
+ const std::vector<int64_t>& rand_keys) override {
+ int64_t rand_key = rand_keys[0];
+ auto shared = thread->shared;
+ int64_t max_key = shared->GetMaxKey();
+ if (rand_key > max_key - FLAGS_range_deletion_width) {
+ rand_key =
+ thread->rand.Next() % (max_key - FLAGS_range_deletion_width + 1);
+ }
+ std::string key_str = Key(rand_key);
+ Slice key = key_str;
+ std::string end_key_str = Key(rand_key + FLAGS_range_deletion_width);
+ Slice end_key = end_key_str;
+ WriteBatch batch;
+ for (auto cf : rand_column_families) {
+ ColumnFamilyHandle* cfh = column_families_[rand_column_families[cf]];
+ batch.DeleteRange(cfh, key, end_key);
+ }
+ Status s = db_->Write(write_opts, &batch);
+ if (!s.ok()) {
+ fprintf(stderr, "multi del range error: %s\n", s.ToString().c_str());
+ thread->stats.AddErrors(1);
+ } else {
+ thread->stats.AddRangeDeletions(
+ static_cast<long>(rand_column_families.size()));
+ }
+ return s;
+ }
+
+ void TestIngestExternalFile(
+ ThreadState* /* thread */,
+ const std::vector<int>& /* rand_column_families */,
+ const std::vector<int64_t>& /* rand_keys */) override {
+ assert(false);
+ fprintf(stderr,
+ "CfConsistencyStressTest does not support TestIngestExternalFile "
+ "because it's not possible to verify the result\n");
+ std::terminate();
+ }
+
+ Status TestGet(ThreadState* thread, const ReadOptions& readoptions,
+ const std::vector<int>& rand_column_families,
+ const std::vector<int64_t>& rand_keys) override {
+ std::string key_str = Key(rand_keys[0]);
+ Slice key = key_str;
+ Status s;
+ bool is_consistent = true;
+
+ if (thread->rand.OneIn(2)) {
+ // 1/2 chance, does a random read from random CF
+ auto cfh =
+ column_families_[rand_column_families[thread->rand.Next() %
+ rand_column_families.size()]];
+ std::string from_db;
+ s = db_->Get(readoptions, cfh, key, &from_db);
+ } else {
+ // 1/2 chance, comparing one key is the same across all CFs
+ const Snapshot* snapshot = db_->GetSnapshot();
+ ReadOptions readoptionscopy = readoptions;
+ readoptionscopy.snapshot = snapshot;
+
+ std::string value0;
+ s = db_->Get(readoptionscopy, column_families_[rand_column_families[0]],
+ key, &value0);
+ if (s.ok() || s.IsNotFound()) {
+ bool found = s.ok();
+ for (size_t i = 1; i < rand_column_families.size(); i++) {
+ std::string value1;
+ s = db_->Get(readoptionscopy,
+ column_families_[rand_column_families[i]], key, &value1);
+ if (!s.ok() && !s.IsNotFound()) {
+ break;
+ }
+ if (!found && s.ok()) {
+ fprintf(stderr, "Get() return different results with key %s\n",
+ Slice(key_str).ToString(true).c_str());
+ fprintf(stderr, "CF %s is not found\n",
+ column_family_names_[0].c_str());
+ fprintf(stderr, "CF %s returns value %s\n",
+ column_family_names_[i].c_str(),
+ Slice(value1).ToString(true).c_str());
+ is_consistent = false;
+ } else if (found && s.IsNotFound()) {
+ fprintf(stderr, "Get() return different results with key %s\n",
+ Slice(key_str).ToString(true).c_str());
+ fprintf(stderr, "CF %s returns value %s\n",
+ column_family_names_[0].c_str(),
+ Slice(value0).ToString(true).c_str());
+ fprintf(stderr, "CF %s is not found\n",
+ column_family_names_[i].c_str());
+ is_consistent = false;
+ } else if (s.ok() && value0 != value1) {
+ fprintf(stderr, "Get() return different results with key %s\n",
+ Slice(key_str).ToString(true).c_str());
+ fprintf(stderr, "CF %s returns value %s\n",
+ column_family_names_[0].c_str(),
+ Slice(value0).ToString(true).c_str());
+ fprintf(stderr, "CF %s returns value %s\n",
+ column_family_names_[i].c_str(),
+ Slice(value1).ToString(true).c_str());
+ is_consistent = false;
+ }
+ if (!is_consistent) {
+ break;
+ }
+ }
+ }
+
+ db_->ReleaseSnapshot(snapshot);
+ }
+ if (!is_consistent) {
+ fprintf(stderr, "TestGet error: is_consistent is false\n");
+ thread->stats.AddErrors(1);
+ // Fail fast to preserve the DB state.
+ thread->shared->SetVerificationFailure();
+ } else if (s.ok()) {
+ thread->stats.AddGets(1, 1);
+ } else if (s.IsNotFound()) {
+ thread->stats.AddGets(1, 0);
+ } else {
+ fprintf(stderr, "TestGet error: %s\n", s.ToString().c_str());
+ thread->stats.AddErrors(1);
+ }
+ return s;
+ }
+
+ std::vector<Status> TestMultiGet(
+ ThreadState* thread, const ReadOptions& read_opts,
+ const std::vector<int>& rand_column_families,
+ const std::vector<int64_t>& rand_keys) override {
+ size_t num_keys = rand_keys.size();
+ std::vector<std::string> key_str;
+ std::vector<Slice> keys;
+ keys.reserve(num_keys);
+ key_str.reserve(num_keys);
+ std::vector<PinnableSlice> values(num_keys);
+ std::vector<Status> statuses(num_keys);
+ ColumnFamilyHandle* cfh = column_families_[rand_column_families[0]];
+ ReadOptions readoptionscopy = read_opts;
+ readoptionscopy.rate_limiter_priority =
+ FLAGS_rate_limit_user_ops ? Env::IO_USER : Env::IO_TOTAL;
+
+ for (size_t i = 0; i < num_keys; ++i) {
+ key_str.emplace_back(Key(rand_keys[i]));
+ keys.emplace_back(key_str.back());
+ }
+ db_->MultiGet(readoptionscopy, cfh, num_keys, keys.data(), values.data(),
+ statuses.data());
+ for (auto s : statuses) {
+ if (s.ok()) {
+ // found case
+ thread->stats.AddGets(1, 1);
+ } else if (s.IsNotFound()) {
+ // not found case
+ thread->stats.AddGets(1, 0);
+ } else {
+ // errors case
+ fprintf(stderr, "MultiGet error: %s\n", s.ToString().c_str());
+ thread->stats.AddErrors(1);
+ }
+ }
+ return statuses;
+ }
+
+ Status TestPrefixScan(ThreadState* thread, const ReadOptions& readoptions,
+ const std::vector<int>& rand_column_families,
+ const std::vector<int64_t>& rand_keys) override {
+ assert(!rand_column_families.empty());
+ assert(!rand_keys.empty());
+
+ const std::string key = Key(rand_keys[0]);
+
+ const size_t prefix_to_use =
+ (FLAGS_prefix_size < 0) ? 7 : static_cast<size_t>(FLAGS_prefix_size);
+
+ const Slice prefix(key.data(), prefix_to_use);
+
+ std::string upper_bound;
+ Slice ub_slice;
+
+ ReadOptions ro_copy = readoptions;
+
+ // Get the next prefix first and then see if we want to set upper bound.
+ // We'll use the next prefix in an assertion later on
+ if (GetNextPrefix(prefix, &upper_bound) && thread->rand.OneIn(2)) {
+ ub_slice = Slice(upper_bound);
+ ro_copy.iterate_upper_bound = &ub_slice;
+ }
+
+ ColumnFamilyHandle* const cfh =
+ column_families_[rand_column_families[thread->rand.Uniform(
+ static_cast<int>(rand_column_families.size()))]];
+ assert(cfh);
+
+ std::unique_ptr<Iterator> iter(db_->NewIterator(ro_copy, cfh));
+
+ uint64_t count = 0;
+ Status s;
+
+ for (iter->Seek(prefix); iter->Valid() && iter->key().starts_with(prefix);
+ iter->Next()) {
+ ++count;
+
+ const WideColumns expected_columns = GenerateExpectedWideColumns(
+ GetValueBase(iter->value()), iter->value());
+ if (iter->columns() != expected_columns) {
+ s = Status::Corruption(
+ "Value and columns inconsistent",
+ DebugString(iter->value(), iter->columns(), expected_columns));
+ break;
+ }
+ }
+
+ assert(prefix_to_use == 0 ||
+ count <= GetPrefixKeyCount(prefix.ToString(), upper_bound));
+
+ if (s.ok()) {
+ s = iter->status();
+ }
+
+ if (!s.ok()) {
+ fprintf(stderr, "TestPrefixScan error: %s\n", s.ToString().c_str());
+ thread->stats.AddErrors(1);
+
+ return s;
+ }
+
+ thread->stats.AddPrefixes(1, count);
+
+ return Status::OK();
+ }
+
+ ColumnFamilyHandle* GetControlCfh(ThreadState* thread,
+ int /*column_family_id*/
+ ) override {
+ // All column families should contain the same data. Randomly pick one.
+ return column_families_[thread->rand.Next() % column_families_.size()];
+ }
+
+ void VerifyDb(ThreadState* thread) const override {
+ // This `ReadOptions` is for validation purposes. Ignore
+ // `FLAGS_rate_limit_user_ops` to avoid slowing any validation.
+ ReadOptions options(FLAGS_verify_checksum, true);
+
+ // We must set total_order_seek to true because we are doing a SeekToFirst
+ // on a column family whose memtables may support (by default) prefix-based
+ // iterator. In this case, NewIterator with options.total_order_seek being
+ // false returns a prefix-based iterator. Calling SeekToFirst using this
+ // iterator causes the iterator to become invalid. That means we cannot
+ // iterate the memtable using this iterator any more, although the memtable
+ // contains the most up-to-date key-values.
+ options.total_order_seek = true;
+
+ ManagedSnapshot snapshot_guard(db_);
+ options.snapshot = snapshot_guard.snapshot();
+
+ const size_t num = column_families_.size();
+
+ std::vector<std::unique_ptr<Iterator>> iters;
+ iters.reserve(num);
+
+ for (size_t i = 0; i < num; ++i) {
+ iters.emplace_back(db_->NewIterator(options, column_families_[i]));
+ iters.back()->SeekToFirst();
+ }
+
+ std::vector<Status> statuses(num, Status::OK());
+
+ assert(thread);
+
+ auto shared = thread->shared;
+ assert(shared);
+
+ do {
+ if (shared->HasVerificationFailedYet()) {
+ break;
+ }
+
+ size_t valid_cnt = 0;
+
+ for (size_t i = 0; i < num; ++i) {
+ const auto& iter = iters[i];
+ assert(iter);
+
+ if (iter->Valid()) {
+ const WideColumns expected_columns = GenerateExpectedWideColumns(
+ GetValueBase(iter->value()), iter->value());
+ if (iter->columns() != expected_columns) {
+ statuses[i] = Status::Corruption(
+ "Value and columns inconsistent",
+ DebugString(iter->value(), iter->columns(), expected_columns));
+ } else {
+ ++valid_cnt;
+ }
+ } else {
+ statuses[i] = iter->status();
+ }
+ }
+
+ if (valid_cnt == 0) {
+ for (size_t i = 0; i < num; ++i) {
+ const auto& s = statuses[i];
+ if (!s.ok()) {
+ fprintf(stderr, "Iterator on cf %s has error: %s\n",
+ column_families_[i]->GetName().c_str(),
+ s.ToString().c_str());
+ shared->SetVerificationFailure();
+ }
+ }
+
+ break;
+ }
+
+ if (valid_cnt < num) {
+ shared->SetVerificationFailure();
+
+ for (size_t i = 0; i < num; ++i) {
+ assert(iters[i]);
+
+ if (!iters[i]->Valid()) {
+ if (statuses[i].ok()) {
+ fprintf(stderr, "Finished scanning cf %s\n",
+ column_families_[i]->GetName().c_str());
+ } else {
+ fprintf(stderr, "Iterator on cf %s has error: %s\n",
+ column_families_[i]->GetName().c_str(),
+ statuses[i].ToString().c_str());
+ }
+ } else {
+ fprintf(stderr, "cf %s has remaining data to scan\n",
+ column_families_[i]->GetName().c_str());
+ }
+ }
+
+ break;
+ }
+
+ if (shared->HasVerificationFailedYet()) {
+ break;
+ }
+
+ // If the program reaches here, then all column families' iterators are
+ // still valid.
+ assert(valid_cnt == num);
+
+ if (shared->PrintingVerificationResults()) {
+ continue;
+ }
+
+ assert(iters[0]);
+
+ const Slice key = iters[0]->key();
+ const Slice value = iters[0]->value();
+
+ int num_mismatched_cfs = 0;
+
+ for (size_t i = 1; i < num; ++i) {
+ assert(iters[i]);
+
+ const int cmp = key.compare(iters[i]->key());
+
+ if (cmp != 0) {
+ ++num_mismatched_cfs;
+
+ if (1 == num_mismatched_cfs) {
+ fprintf(stderr, "Verification failed\n");
+ fprintf(stderr, "Latest Sequence Number: %" PRIu64 "\n",
+ db_->GetLatestSequenceNumber());
+ fprintf(stderr, "[%s] %s => %s\n",
+ column_families_[0]->GetName().c_str(),
+ key.ToString(true /* hex */).c_str(),
+ value.ToString(true /* hex */).c_str());
+ }
+
+ fprintf(stderr, "[%s] %s => %s\n",
+ column_families_[i]->GetName().c_str(),
+ iters[i]->key().ToString(true /* hex */).c_str(),
+ iters[i]->value().ToString(true /* hex */).c_str());
+
+#ifndef ROCKSDB_LITE
+ Slice begin_key;
+ Slice end_key;
+ if (cmp < 0) {
+ begin_key = key;
+ end_key = iters[i]->key();
+ } else {
+ begin_key = iters[i]->key();
+ end_key = key;
+ }
+
+ const auto print_key_versions = [&](ColumnFamilyHandle* cfh) {
+ constexpr size_t kMaxNumIKeys = 8;
+
+ std::vector<KeyVersion> versions;
+ const Status s = GetAllKeyVersions(db_, cfh, begin_key, end_key,
+ kMaxNumIKeys, &versions);
+ if (!s.ok()) {
+ fprintf(stderr, "%s\n", s.ToString().c_str());
+ return;
+ }
+
+ assert(cfh);
+
+ fprintf(stderr,
+ "Internal keys in CF '%s', [%s, %s] (max %" ROCKSDB_PRIszt
+ ")\n",
+ cfh->GetName().c_str(),
+ begin_key.ToString(true /* hex */).c_str(),
+ end_key.ToString(true /* hex */).c_str(), kMaxNumIKeys);
+
+ for (const KeyVersion& kv : versions) {
+ fprintf(stderr, " key %s seq %" PRIu64 " type %d\n",
+ Slice(kv.user_key).ToString(true).c_str(), kv.sequence,
+ kv.type);
+ }
+ };
+
+ if (1 == num_mismatched_cfs) {
+ print_key_versions(column_families_[0]);
+ }
+
+ print_key_versions(column_families_[i]);
+#endif // ROCKSDB_LITE
+
+ shared->SetVerificationFailure();
+ }
+ }
+
+ shared->FinishPrintingVerificationResults();
+
+ for (auto& iter : iters) {
+ assert(iter);
+ iter->Next();
+ }
+ } while (true);
+ }
+
+#ifndef ROCKSDB_LITE
+ void ContinuouslyVerifyDb(ThreadState* thread) const override {
+ assert(thread);
+ Status status;
+
+ DB* db_ptr = cmp_db_ ? cmp_db_ : db_;
+ const auto& cfhs = cmp_db_ ? cmp_cfhs_ : column_families_;
+
+ // Take a snapshot to preserve the state of primary db.
+ ManagedSnapshot snapshot_guard(db_);
+
+ SharedState* shared = thread->shared;
+ assert(shared);
+
+ if (cmp_db_) {
+ status = cmp_db_->TryCatchUpWithPrimary();
+ if (!status.ok()) {
+ fprintf(stderr, "TryCatchUpWithPrimary: %s\n",
+ status.ToString().c_str());
+ shared->SetShouldStopTest();
+ assert(false);
+ return;
+ }
+ }
+
+ const auto checksum_column_family = [](Iterator* iter,
+ uint32_t* checksum) -> Status {
+ assert(nullptr != checksum);
+
+ uint32_t ret = 0;
+ for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
+ ret = crc32c::Extend(ret, iter->key().data(), iter->key().size());
+ ret = crc32c::Extend(ret, iter->value().data(), iter->value().size());
+
+ for (const auto& column : iter->columns()) {
+ ret = crc32c::Extend(ret, column.name().data(), column.name().size());
+ ret =
+ crc32c::Extend(ret, column.value().data(), column.value().size());
+ }
+ }
+
+ *checksum = ret;
+ return iter->status();
+ };
+ // This `ReadOptions` is for validation purposes. Ignore
+ // `FLAGS_rate_limit_user_ops` to avoid slowing any validation.
+ ReadOptions ropts(FLAGS_verify_checksum, true);
+ ropts.total_order_seek = true;
+ if (nullptr == cmp_db_) {
+ ropts.snapshot = snapshot_guard.snapshot();
+ }
+ uint32_t crc = 0;
+ {
+ // Compute crc for all key-values of default column family.
+ std::unique_ptr<Iterator> it(db_ptr->NewIterator(ropts));
+ status = checksum_column_family(it.get(), &crc);
+ if (!status.ok()) {
+ fprintf(stderr, "Computing checksum of default cf: %s\n",
+ status.ToString().c_str());
+ assert(false);
+ }
+ }
+ // Since we currently intentionally disallow reading from the secondary
+ // instance with snapshot, we cannot achieve cross-cf consistency if WAL is
+ // enabled because there is no guarantee that secondary instance replays
+ // the primary's WAL to a consistent point where all cfs have the same
+ // data.
+ if (status.ok() && FLAGS_disable_wal) {
+ uint32_t tmp_crc = 0;
+ for (ColumnFamilyHandle* cfh : cfhs) {
+ if (cfh == db_ptr->DefaultColumnFamily()) {
+ continue;
+ }
+ std::unique_ptr<Iterator> it(db_ptr->NewIterator(ropts, cfh));
+ status = checksum_column_family(it.get(), &tmp_crc);
+ if (!status.ok() || tmp_crc != crc) {
+ break;
+ }
+ }
+ if (!status.ok()) {
+ fprintf(stderr, "status: %s\n", status.ToString().c_str());
+ shared->SetShouldStopTest();
+ assert(false);
+ } else if (tmp_crc != crc) {
+ fprintf(stderr, "tmp_crc=%" PRIu32 " crc=%" PRIu32 "\n", tmp_crc, crc);
+ shared->SetShouldStopTest();
+ assert(false);
+ }
+ }
+ }
+#else // ROCKSDB_LITE
+ void ContinuouslyVerifyDb(ThreadState* /*thread*/) const override {}
+#endif // !ROCKSDB_LITE
+
+ std::vector<int> GenerateColumnFamilies(
+ const int /* num_column_families */,
+ int /* rand_column_family */) const override {
+ std::vector<int> ret;
+ int num = static_cast<int>(column_families_.size());
+ int k = 0;
+ std::generate_n(back_inserter(ret), num, [&k]() -> int { return k++; });
+ return ret;
+ }
+
+ private:
+ std::atomic<uint32_t> batch_id_;
+};
+
+StressTest* CreateCfConsistencyStressTest() {
+ return new CfConsistencyStressTest();
+}
+
+} // namespace ROCKSDB_NAMESPACE
+#endif // GFLAGS
diff --git a/src/rocksdb/db_stress_tool/db_stress.cc b/src/rocksdb/db_stress_tool/db_stress.cc
new file mode 100644
index 000000000..2d03f5d26
--- /dev/null
+++ b/src/rocksdb/db_stress_tool/db_stress.cc
@@ -0,0 +1,25 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+//
+// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#ifndef GFLAGS
+#include <cstdio>
+
+int main() {
+ fprintf(stderr, "Please install gflags to run rocksdb tools\n");
+ return 1;
+}
+#else
+#include "port/stack_trace.h"
+#include "rocksdb/db_stress_tool.h"
+
+int main(int argc, char** argv) {
+ ROCKSDB_NAMESPACE::port::InstallStackTraceHandler();
+ return ROCKSDB_NAMESPACE::db_stress_tool(argc, argv);
+}
+#endif // GFLAGS
diff --git a/src/rocksdb/db_stress_tool/db_stress_common.cc b/src/rocksdb/db_stress_tool/db_stress_common.cc
new file mode 100644
index 000000000..af8db9e2f
--- /dev/null
+++ b/src/rocksdb/db_stress_tool/db_stress_common.cc
@@ -0,0 +1,460 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+//
+// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+//
+
+#ifdef GFLAGS
+#include "db_stress_tool/db_stress_common.h"
+
+#include <cmath>
+
+#include "util/file_checksum_helper.h"
+#include "util/xxhash.h"
+
+ROCKSDB_NAMESPACE::Env* db_stress_listener_env = nullptr;
+ROCKSDB_NAMESPACE::Env* db_stress_env = nullptr;
+// If non-null, injects read error at a rate specified by the
+// read_fault_one_in or write_fault_one_in flag
+std::shared_ptr<ROCKSDB_NAMESPACE::FaultInjectionTestFS> fault_fs_guard;
+enum ROCKSDB_NAMESPACE::CompressionType compression_type_e =
+ ROCKSDB_NAMESPACE::kSnappyCompression;
+enum ROCKSDB_NAMESPACE::CompressionType bottommost_compression_type_e =
+ ROCKSDB_NAMESPACE::kSnappyCompression;
+enum ROCKSDB_NAMESPACE::ChecksumType checksum_type_e =
+ ROCKSDB_NAMESPACE::kCRC32c;
+enum RepFactory FLAGS_rep_factory = kSkipList;
+std::vector<double> sum_probs(100001);
+constexpr int64_t zipf_sum_size = 100000;
+
+namespace ROCKSDB_NAMESPACE {
+
+// Zipfian distribution is generated based on a pre-calculated array.
+// It should be used before start the stress test.
+// First, the probability distribution function (PDF) of this Zipfian follows
+// power low. P(x) = 1/(x^alpha).
+// So we calculate the PDF when x is from 0 to zipf_sum_size in first for loop
+// and add the PDF value togetger as c. So we get the total probability in c.
+// Next, we calculate inverse CDF of Zipfian and store the value of each in
+// an array (sum_probs). The rank is from 0 to zipf_sum_size. For example, for
+// integer k, its Zipfian CDF value is sum_probs[k].
+// Third, when we need to get an integer whose probability follows Zipfian
+// distribution, we use a rand_seed [0,1] which follows uniform distribution
+// as a seed and search it in the sum_probs via binary search. When we find
+// the closest sum_probs[i] of rand_seed, i is the integer that in
+// [0, zipf_sum_size] following Zipfian distribution with parameter alpha.
+// Finally, we can scale i to [0, max_key] scale.
+// In order to avoid that hot keys are close to each other and skew towards 0,
+// we use Rando64 to shuffle it.
+void InitializeHotKeyGenerator(double alpha) {
+ double c = 0;
+ for (int64_t i = 1; i <= zipf_sum_size; i++) {
+ c = c + (1.0 / std::pow(static_cast<double>(i), alpha));
+ }
+ c = 1.0 / c;
+
+ sum_probs[0] = 0;
+ for (int64_t i = 1; i <= zipf_sum_size; i++) {
+ sum_probs[i] =
+ sum_probs[i - 1] + c / std::pow(static_cast<double>(i), alpha);
+ }
+}
+
+// Generate one key that follows the Zipfian distribution. The skewness
+// is decided by the parameter alpha. Input is the rand_seed [0,1] and
+// the max of the key to be generated. If we directly return tmp_zipf_seed,
+// the closer to 0, the higher probability will be. To randomly distribute
+// the hot keys in [0, max_key], we use Random64 to shuffle it.
+int64_t GetOneHotKeyID(double rand_seed, int64_t max_key) {
+ int64_t low = 1, mid, high = zipf_sum_size, zipf = 0;
+ while (low <= high) {
+ mid = (low + high) / 2;
+ if (sum_probs[mid] >= rand_seed && sum_probs[mid - 1] < rand_seed) {
+ zipf = mid;
+ break;
+ } else if (sum_probs[mid] >= rand_seed) {
+ high = mid - 1;
+ } else {
+ low = mid + 1;
+ }
+ }
+ int64_t tmp_zipf_seed = zipf * max_key / zipf_sum_size;
+ Random64 rand_local(tmp_zipf_seed);
+ return rand_local.Next() % max_key;
+}
+
+void PoolSizeChangeThread(void* v) {
+ assert(FLAGS_compaction_thread_pool_adjust_interval > 0);
+ ThreadState* thread = reinterpret_cast<ThreadState*>(v);
+ SharedState* shared = thread->shared;
+
+ while (true) {
+ {
+ MutexLock l(shared->GetMutex());
+ if (shared->ShouldStopBgThread()) {
+ shared->IncBgThreadsFinished();
+ if (shared->BgThreadsFinished()) {
+ shared->GetCondVar()->SignalAll();
+ }
+ return;
+ }
+ }
+
+ auto thread_pool_size_base = FLAGS_max_background_compactions;
+ auto thread_pool_size_var = FLAGS_compaction_thread_pool_variations;
+ int new_thread_pool_size =
+ thread_pool_size_base - thread_pool_size_var +
+ thread->rand.Next() % (thread_pool_size_var * 2 + 1);
+ if (new_thread_pool_size < 1) {
+ new_thread_pool_size = 1;
+ }
+ db_stress_env->SetBackgroundThreads(new_thread_pool_size,
+ ROCKSDB_NAMESPACE::Env::Priority::LOW);
+ // Sleep up to 3 seconds
+ db_stress_env->SleepForMicroseconds(
+ thread->rand.Next() % FLAGS_compaction_thread_pool_adjust_interval *
+ 1000 +
+ 1);
+ }
+}
+
+void DbVerificationThread(void* v) {
+ assert(FLAGS_continuous_verification_interval > 0);
+ auto* thread = reinterpret_cast<ThreadState*>(v);
+ SharedState* shared = thread->shared;
+ StressTest* stress_test = shared->GetStressTest();
+ assert(stress_test != nullptr);
+ while (true) {
+ {
+ MutexLock l(shared->GetMutex());
+ if (shared->ShouldStopBgThread()) {
+ shared->IncBgThreadsFinished();
+ if (shared->BgThreadsFinished()) {
+ shared->GetCondVar()->SignalAll();
+ }
+ return;
+ }
+ }
+ if (!shared->HasVerificationFailedYet()) {
+ stress_test->ContinuouslyVerifyDb(thread);
+ }
+ db_stress_env->SleepForMicroseconds(
+ thread->rand.Next() % FLAGS_continuous_verification_interval * 1000 +
+ 1);
+ }
+}
+
+void PrintKeyValue(int cf, uint64_t key, const char* value, size_t sz) {
+ if (!FLAGS_verbose) {
+ return;
+ }
+ std::string tmp;
+ tmp.reserve(sz * 2 + 16);
+ char buf[4];
+ for (size_t i = 0; i < sz; i++) {
+ snprintf(buf, 4, "%X", value[i]);
+ tmp.append(buf);
+ }
+ auto key_str = Key(key);
+ Slice key_slice = key_str;
+ fprintf(stdout, "[CF %d] %s (%" PRIi64 ") == > (%" ROCKSDB_PRIszt ") %s\n",
+ cf, key_slice.ToString(true).c_str(), key, sz, tmp.c_str());
+}
+
+// Note that if hot_key_alpha != 0, it generates the key based on Zipfian
+// distribution. Keys are randomly scattered to [0, FLAGS_max_key]. It does
+// not ensure the order of the keys being generated and the keys does not have
+// the active range which is related to FLAGS_active_width.
+int64_t GenerateOneKey(ThreadState* thread, uint64_t iteration) {
+ const double completed_ratio =
+ static_cast<double>(iteration) / FLAGS_ops_per_thread;
+ const int64_t base_key = static_cast<int64_t>(
+ completed_ratio * (FLAGS_max_key - FLAGS_active_width));
+ int64_t rand_seed = base_key + thread->rand.Next() % FLAGS_active_width;
+ int64_t cur_key = rand_seed;
+ if (FLAGS_hot_key_alpha != 0) {
+ // If set the Zipfian distribution Alpha to non 0, use Zipfian
+ double float_rand =
+ (static_cast<double>(thread->rand.Next() % FLAGS_max_key)) /
+ FLAGS_max_key;
+ cur_key = GetOneHotKeyID(float_rand, FLAGS_max_key);
+ }
+ return cur_key;
+}
+
+// Note that if hot_key_alpha != 0, it generates the key based on Zipfian
+// distribution. Keys being generated are in random order.
+// If user want to generate keys based on uniform distribution, user needs to
+// set hot_key_alpha == 0. It will generate the random keys in increasing
+// order in the key array (ensure key[i] >= key[i+1]) and constrained in a
+// range related to FLAGS_active_width.
+std::vector<int64_t> GenerateNKeys(ThreadState* thread, int num_keys,
+ uint64_t iteration) {
+ const double completed_ratio =
+ static_cast<double>(iteration) / FLAGS_ops_per_thread;
+ const int64_t base_key = static_cast<int64_t>(
+ completed_ratio * (FLAGS_max_key - FLAGS_active_width));
+ std::vector<int64_t> keys;
+ keys.reserve(num_keys);
+ int64_t next_key = base_key + thread->rand.Next() % FLAGS_active_width;
+ keys.push_back(next_key);
+ for (int i = 1; i < num_keys; ++i) {
+ // Generate the key follows zipfian distribution
+ if (FLAGS_hot_key_alpha != 0) {
+ double float_rand =
+ (static_cast<double>(thread->rand.Next() % FLAGS_max_key)) /
+ FLAGS_max_key;
+ next_key = GetOneHotKeyID(float_rand, FLAGS_max_key);
+ } else {
+ // This may result in some duplicate keys
+ next_key = next_key + thread->rand.Next() %
+ (FLAGS_active_width - (next_key - base_key));
+ }
+ keys.push_back(next_key);
+ }
+ return keys;
+}
+
+size_t GenerateValue(uint32_t rand, char* v, size_t max_sz) {
+ size_t value_sz =
+ ((rand % kRandomValueMaxFactor) + 1) * FLAGS_value_size_mult;
+ assert(value_sz <= max_sz && value_sz >= sizeof(uint32_t));
+ (void)max_sz;
+ PutUnaligned(reinterpret_cast<uint32_t*>(v), rand);
+ for (size_t i = sizeof(uint32_t); i < value_sz; i++) {
+ v[i] = (char)(rand ^ i);
+ }
+ v[value_sz] = '\0';
+ return value_sz; // the size of the value set.
+}
+
+uint32_t GetValueBase(Slice s) {
+ assert(s.size() >= sizeof(uint32_t));
+ uint32_t res;
+ GetUnaligned(reinterpret_cast<const uint32_t*>(s.data()), &res);
+ return res;
+}
+
+WideColumns GenerateWideColumns(uint32_t value_base, const Slice& slice) {
+ WideColumns columns;
+
+ constexpr size_t max_columns = 4;
+ const size_t num_columns = (value_base % max_columns) + 1;
+
+ columns.reserve(num_columns);
+
+ assert(slice.size() >= num_columns);
+
+ columns.emplace_back(kDefaultWideColumnName, slice);
+
+ for (size_t i = 1; i < num_columns; ++i) {
+ const Slice name(slice.data(), i);
+ const Slice value(slice.data() + i, slice.size() - i);
+
+ columns.emplace_back(name, value);
+ }
+
+ return columns;
+}
+
+WideColumns GenerateExpectedWideColumns(uint32_t value_base,
+ const Slice& slice) {
+ if (FLAGS_use_put_entity_one_in == 0 ||
+ (value_base % FLAGS_use_put_entity_one_in) != 0) {
+ return WideColumns{{kDefaultWideColumnName, slice}};
+ }
+
+ WideColumns columns = GenerateWideColumns(value_base, slice);
+
+ std::sort(columns.begin(), columns.end(),
+ [](const WideColumn& lhs, const WideColumn& rhs) {
+ return lhs.name().compare(rhs.name()) < 0;
+ });
+
+ return columns;
+}
+
+std::string GetNowNanos() {
+ uint64_t t = db_stress_env->NowNanos();
+ std::string ret;
+ PutFixed64(&ret, t);
+ return ret;
+}
+
+namespace {
+
+class MyXXH64Checksum : public FileChecksumGenerator {
+ public:
+ explicit MyXXH64Checksum(bool big) : big_(big) {
+ state_ = XXH64_createState();
+ XXH64_reset(state_, 0);
+ }
+
+ virtual ~MyXXH64Checksum() override { XXH64_freeState(state_); }
+
+ void Update(const char* data, size_t n) override {
+ XXH64_update(state_, data, n);
+ }
+
+ void Finalize() override {
+ assert(str_.empty());
+ uint64_t digest = XXH64_digest(state_);
+ // Store as little endian raw bytes
+ PutFixed64(&str_, digest);
+ if (big_) {
+ // Throw in some more data for stress testing (448 bits total)
+ PutFixed64(&str_, GetSliceHash64(str_));
+ PutFixed64(&str_, GetSliceHash64(str_));
+ PutFixed64(&str_, GetSliceHash64(str_));
+ PutFixed64(&str_, GetSliceHash64(str_));
+ PutFixed64(&str_, GetSliceHash64(str_));
+ PutFixed64(&str_, GetSliceHash64(str_));
+ }
+ }
+
+ std::string GetChecksum() const override {
+ assert(!str_.empty());
+ return str_;
+ }
+
+ const char* Name() const override {
+ return big_ ? "MyBigChecksum" : "MyXXH64Checksum";
+ }
+
+ private:
+ bool big_;
+ XXH64_state_t* state_;
+ std::string str_;
+};
+
+class DbStressChecksumGenFactory : public FileChecksumGenFactory {
+ std::string default_func_name_;
+
+ std::unique_ptr<FileChecksumGenerator> CreateFromFuncName(
+ const std::string& func_name) {
+ std::unique_ptr<FileChecksumGenerator> rv;
+ if (func_name == "FileChecksumCrc32c") {
+ rv.reset(new FileChecksumGenCrc32c(FileChecksumGenContext()));
+ } else if (func_name == "MyXXH64Checksum") {
+ rv.reset(new MyXXH64Checksum(false /* big */));
+ } else if (func_name == "MyBigChecksum") {
+ rv.reset(new MyXXH64Checksum(true /* big */));
+ } else {
+ // Should be a recognized function when we get here
+ assert(false);
+ }
+ return rv;
+ }
+
+ public:
+ explicit DbStressChecksumGenFactory(const std::string& default_func_name)
+ : default_func_name_(default_func_name) {}
+
+ std::unique_ptr<FileChecksumGenerator> CreateFileChecksumGenerator(
+ const FileChecksumGenContext& context) override {
+ if (context.requested_checksum_func_name.empty()) {
+ return CreateFromFuncName(default_func_name_);
+ } else {
+ return CreateFromFuncName(context.requested_checksum_func_name);
+ }
+ }
+
+ const char* Name() const override { return "FileChecksumGenCrc32cFactory"; }
+};
+
+} // namespace
+
+std::shared_ptr<FileChecksumGenFactory> GetFileChecksumImpl(
+ const std::string& name) {
+ // Translate from friendly names to internal names
+ std::string internal_name;
+ if (name == "crc32c") {
+ internal_name = "FileChecksumCrc32c";
+ } else if (name == "xxh64") {
+ internal_name = "MyXXH64Checksum";
+ } else if (name == "big") {
+ internal_name = "MyBigChecksum";
+ } else {
+ assert(name.empty() || name == "none");
+ return nullptr;
+ }
+ return std::make_shared<DbStressChecksumGenFactory>(internal_name);
+}
+
+Status DeleteFilesInDirectory(const std::string& dirname) {
+ std::vector<std::string> filenames;
+ Status s = Env::Default()->GetChildren(dirname, &filenames);
+ for (size_t i = 0; s.ok() && i < filenames.size(); ++i) {
+ s = Env::Default()->DeleteFile(dirname + "/" + filenames[i]);
+ }
+ return s;
+}
+
+Status SaveFilesInDirectory(const std::string& src_dirname,
+ const std::string& dst_dirname) {
+ std::vector<std::string> filenames;
+ Status s = Env::Default()->GetChildren(src_dirname, &filenames);
+ for (size_t i = 0; s.ok() && i < filenames.size(); ++i) {
+ bool is_dir = false;
+ s = Env::Default()->IsDirectory(src_dirname + "/" + filenames[i], &is_dir);
+ if (s.ok()) {
+ if (is_dir) {
+ continue;
+ }
+ s = Env::Default()->LinkFile(src_dirname + "/" + filenames[i],
+ dst_dirname + "/" + filenames[i]);
+ }
+ }
+ return s;
+}
+
+Status InitUnverifiedSubdir(const std::string& dirname) {
+ Status s = Env::Default()->FileExists(dirname);
+ if (s.IsNotFound()) {
+ return Status::OK();
+ }
+
+ const std::string kUnverifiedDirname = dirname + "/unverified";
+ if (s.ok()) {
+ s = Env::Default()->CreateDirIfMissing(kUnverifiedDirname);
+ }
+ if (s.ok()) {
+ // It might already exist with some stale contents. Delete any such
+ // contents.
+ s = DeleteFilesInDirectory(kUnverifiedDirname);
+ }
+ if (s.ok()) {
+ s = SaveFilesInDirectory(dirname, kUnverifiedDirname);
+ }
+ return s;
+}
+
+Status DestroyUnverifiedSubdir(const std::string& dirname) {
+ Status s = Env::Default()->FileExists(dirname);
+ if (s.IsNotFound()) {
+ return Status::OK();
+ }
+
+ const std::string kUnverifiedDirname = dirname + "/unverified";
+ if (s.ok()) {
+ s = Env::Default()->FileExists(kUnverifiedDirname);
+ }
+ if (s.IsNotFound()) {
+ return Status::OK();
+ }
+
+ if (s.ok()) {
+ s = DeleteFilesInDirectory(kUnverifiedDirname);
+ }
+ if (s.ok()) {
+ s = Env::Default()->DeleteDir(kUnverifiedDirname);
+ }
+ return s;
+}
+
+} // namespace ROCKSDB_NAMESPACE
+#endif // GFLAGS
diff --git a/src/rocksdb/db_stress_tool/db_stress_common.h b/src/rocksdb/db_stress_tool/db_stress_common.h
new file mode 100644
index 000000000..45f3e9c19
--- /dev/null
+++ b/src/rocksdb/db_stress_tool/db_stress_common.h
@@ -0,0 +1,650 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+//
+// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+//
+// The test uses an array to compare against values written to the database.
+// Keys written to the array are in 1:1 correspondence to the actual values in
+// the database according to the formula in the function GenerateValue.
+
+// Space is reserved in the array from 0 to FLAGS_max_key and values are
+// randomly written/deleted/read from those positions. During verification we
+// compare all the positions in the array. To shorten/elongate the running
+// time, you could change the settings: FLAGS_max_key, FLAGS_ops_per_thread,
+// (sometimes also FLAGS_threads).
+//
+// NOTE that if FLAGS_test_batches_snapshots is set, the test will have
+// different behavior. See comment of the flag for details.
+
+#ifdef GFLAGS
+#pragma once
+#include <fcntl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/types.h>
+
+#include <algorithm>
+#include <array>
+#include <chrono>
+#include <cinttypes>
+#include <exception>
+#include <queue>
+#include <thread>
+
+#include "db/db_impl/db_impl.h"
+#include "db/version_set.h"
+#include "db_stress_tool/db_stress_env_wrapper.h"
+#include "db_stress_tool/db_stress_listener.h"
+#include "db_stress_tool/db_stress_shared_state.h"
+#include "db_stress_tool/db_stress_test_base.h"
+#include "logging/logging.h"
+#include "monitoring/histogram.h"
+#include "options/options_helper.h"
+#include "port/port.h"
+#include "rocksdb/cache.h"
+#include "rocksdb/env.h"
+#include "rocksdb/slice.h"
+#include "rocksdb/slice_transform.h"
+#include "rocksdb/statistics.h"
+#include "rocksdb/utilities/backup_engine.h"
+#include "rocksdb/utilities/checkpoint.h"
+#include "rocksdb/utilities/db_ttl.h"
+#include "rocksdb/utilities/debug.h"
+#include "rocksdb/utilities/options_util.h"
+#include "rocksdb/utilities/transaction.h"
+#include "rocksdb/utilities/transaction_db.h"
+#include "rocksdb/write_batch.h"
+#include "test_util/testutil.h"
+#include "util/coding.h"
+#include "util/compression.h"
+#include "util/crc32c.h"
+#include "util/gflags_compat.h"
+#include "util/mutexlock.h"
+#include "util/random.h"
+#include "util/string_util.h"
+#include "utilities/blob_db/blob_db.h"
+#include "utilities/fault_injection_fs.h"
+#include "utilities/merge_operators.h"
+
+using GFLAGS_NAMESPACE::ParseCommandLineFlags;
+using GFLAGS_NAMESPACE::RegisterFlagValidator;
+using GFLAGS_NAMESPACE::SetUsageMessage;
+
+DECLARE_uint64(seed);
+DECLARE_bool(read_only);
+DECLARE_int64(max_key);
+DECLARE_double(hot_key_alpha);
+DECLARE_int32(max_key_len);
+DECLARE_string(key_len_percent_dist);
+DECLARE_int32(key_window_scale_factor);
+DECLARE_int32(column_families);
+DECLARE_string(options_file);
+DECLARE_int64(active_width);
+DECLARE_bool(test_batches_snapshots);
+DECLARE_bool(atomic_flush);
+DECLARE_int32(manual_wal_flush_one_in);
+DECLARE_bool(test_cf_consistency);
+DECLARE_bool(test_multi_ops_txns);
+DECLARE_int32(threads);
+DECLARE_int32(ttl);
+DECLARE_int32(value_size_mult);
+DECLARE_int32(compaction_readahead_size);
+DECLARE_bool(enable_pipelined_write);
+DECLARE_bool(verify_before_write);
+DECLARE_bool(histogram);
+DECLARE_bool(destroy_db_initially);
+DECLARE_bool(verbose);
+DECLARE_bool(progress_reports);
+DECLARE_uint64(db_write_buffer_size);
+DECLARE_int32(write_buffer_size);
+DECLARE_int32(max_write_buffer_number);
+DECLARE_int32(min_write_buffer_number_to_merge);
+DECLARE_int32(max_write_buffer_number_to_maintain);
+DECLARE_int64(max_write_buffer_size_to_maintain);
+DECLARE_double(memtable_prefix_bloom_size_ratio);
+DECLARE_bool(memtable_whole_key_filtering);
+DECLARE_int32(open_files);
+DECLARE_int64(compressed_cache_size);
+DECLARE_int32(compressed_cache_numshardbits);
+DECLARE_int32(compaction_style);
+DECLARE_int32(compaction_pri);
+DECLARE_int32(num_levels);
+DECLARE_int32(level0_file_num_compaction_trigger);
+DECLARE_int32(level0_slowdown_writes_trigger);
+DECLARE_int32(level0_stop_writes_trigger);
+DECLARE_int32(block_size);
+DECLARE_int32(format_version);
+DECLARE_int32(index_block_restart_interval);
+DECLARE_bool(disable_auto_compactions);
+DECLARE_int32(max_background_compactions);
+DECLARE_int32(num_bottom_pri_threads);
+DECLARE_int32(compaction_thread_pool_adjust_interval);
+DECLARE_int32(compaction_thread_pool_variations);
+DECLARE_int32(max_background_flushes);
+DECLARE_int32(universal_size_ratio);
+DECLARE_int32(universal_min_merge_width);
+DECLARE_int32(universal_max_merge_width);
+DECLARE_int32(universal_max_size_amplification_percent);
+DECLARE_int32(clear_column_family_one_in);
+DECLARE_int32(get_live_files_one_in);
+DECLARE_int32(get_sorted_wal_files_one_in);
+DECLARE_int32(get_current_wal_file_one_in);
+DECLARE_int32(set_options_one_in);
+DECLARE_int32(set_in_place_one_in);
+DECLARE_int64(cache_size);
+DECLARE_int32(cache_numshardbits);
+DECLARE_bool(cache_index_and_filter_blocks);
+DECLARE_bool(charge_compression_dictionary_building_buffer);
+DECLARE_bool(charge_filter_construction);
+DECLARE_bool(charge_table_reader);
+DECLARE_bool(charge_file_metadata);
+DECLARE_bool(charge_blob_cache);
+DECLARE_int32(top_level_index_pinning);
+DECLARE_int32(partition_pinning);
+DECLARE_int32(unpartitioned_pinning);
+DECLARE_string(cache_type);
+DECLARE_uint64(subcompactions);
+DECLARE_uint64(periodic_compaction_seconds);
+DECLARE_uint64(compaction_ttl);
+DECLARE_bool(allow_concurrent_memtable_write);
+DECLARE_double(experimental_mempurge_threshold);
+DECLARE_bool(enable_write_thread_adaptive_yield);
+DECLARE_int32(reopen);
+DECLARE_double(bloom_bits);
+DECLARE_int32(ribbon_starting_level);
+DECLARE_bool(partition_filters);
+DECLARE_bool(optimize_filters_for_memory);
+DECLARE_bool(detect_filter_construct_corruption);
+DECLARE_int32(index_type);
+DECLARE_int32(data_block_index_type);
+DECLARE_string(db);
+DECLARE_string(secondaries_base);
+DECLARE_bool(test_secondary);
+DECLARE_string(expected_values_dir);
+DECLARE_bool(verify_checksum);
+DECLARE_bool(mmap_read);
+DECLARE_bool(mmap_write);
+DECLARE_bool(use_direct_reads);
+DECLARE_bool(use_direct_io_for_flush_and_compaction);
+DECLARE_bool(mock_direct_io);
+DECLARE_bool(statistics);
+DECLARE_bool(sync);
+DECLARE_bool(use_fsync);
+DECLARE_uint64(stats_dump_period_sec);
+DECLARE_uint64(bytes_per_sync);
+DECLARE_uint64(wal_bytes_per_sync);
+DECLARE_int32(kill_random_test);
+DECLARE_string(kill_exclude_prefixes);
+DECLARE_bool(disable_wal);
+DECLARE_uint64(recycle_log_file_num);
+DECLARE_int64(target_file_size_base);
+DECLARE_int32(target_file_size_multiplier);
+DECLARE_uint64(max_bytes_for_level_base);
+DECLARE_double(max_bytes_for_level_multiplier);
+DECLARE_int32(range_deletion_width);
+DECLARE_uint64(rate_limiter_bytes_per_sec);
+DECLARE_bool(rate_limit_bg_reads);
+DECLARE_bool(rate_limit_user_ops);
+DECLARE_bool(rate_limit_auto_wal_flush);
+DECLARE_uint64(sst_file_manager_bytes_per_sec);
+DECLARE_uint64(sst_file_manager_bytes_per_truncate);
+DECLARE_bool(use_txn);
+DECLARE_uint64(txn_write_policy);
+DECLARE_bool(unordered_write);
+DECLARE_int32(backup_one_in);
+DECLARE_uint64(backup_max_size);
+DECLARE_int32(checkpoint_one_in);
+DECLARE_int32(ingest_external_file_one_in);
+DECLARE_int32(ingest_external_file_width);
+DECLARE_int32(compact_files_one_in);
+DECLARE_int32(compact_range_one_in);
+DECLARE_int32(mark_for_compaction_one_file_in);
+DECLARE_int32(flush_one_in);
+DECLARE_int32(pause_background_one_in);
+DECLARE_int32(compact_range_width);
+DECLARE_int32(acquire_snapshot_one_in);
+DECLARE_bool(compare_full_db_state_snapshot);
+DECLARE_uint64(snapshot_hold_ops);
+DECLARE_bool(long_running_snapshots);
+DECLARE_bool(use_multiget);
+DECLARE_int32(readpercent);
+DECLARE_int32(prefixpercent);
+DECLARE_int32(writepercent);
+DECLARE_int32(delpercent);
+DECLARE_int32(delrangepercent);
+DECLARE_int32(nooverwritepercent);
+DECLARE_int32(iterpercent);
+DECLARE_uint64(num_iterations);
+DECLARE_int32(customopspercent);
+DECLARE_string(compression_type);
+DECLARE_string(bottommost_compression_type);
+DECLARE_int32(compression_max_dict_bytes);
+DECLARE_int32(compression_zstd_max_train_bytes);
+DECLARE_int32(compression_parallel_threads);
+DECLARE_uint64(compression_max_dict_buffer_bytes);
+DECLARE_bool(compression_use_zstd_dict_trainer);
+DECLARE_string(checksum_type);
+DECLARE_string(env_uri);
+DECLARE_string(fs_uri);
+DECLARE_uint64(ops_per_thread);
+DECLARE_uint64(log2_keys_per_lock);
+DECLARE_uint64(max_manifest_file_size);
+DECLARE_bool(in_place_update);
+DECLARE_string(memtablerep);
+DECLARE_int32(prefix_size);
+DECLARE_bool(use_merge);
+DECLARE_uint32(use_put_entity_one_in);
+DECLARE_bool(use_full_merge_v1);
+DECLARE_int32(sync_wal_one_in);
+DECLARE_bool(avoid_unnecessary_blocking_io);
+DECLARE_bool(write_dbid_to_manifest);
+DECLARE_bool(avoid_flush_during_recovery);
+DECLARE_uint64(max_write_batch_group_size_bytes);
+DECLARE_bool(level_compaction_dynamic_level_bytes);
+DECLARE_int32(verify_checksum_one_in);
+DECLARE_int32(verify_db_one_in);
+DECLARE_int32(continuous_verification_interval);
+DECLARE_int32(get_property_one_in);
+DECLARE_string(file_checksum_impl);
+
+#ifndef ROCKSDB_LITE
+// Options for StackableDB-based BlobDB
+DECLARE_bool(use_blob_db);
+DECLARE_uint64(blob_db_min_blob_size);
+DECLARE_uint64(blob_db_bytes_per_sync);
+DECLARE_uint64(blob_db_file_size);
+DECLARE_bool(blob_db_enable_gc);
+DECLARE_double(blob_db_gc_cutoff);
+#endif // !ROCKSDB_LITE
+
+// Options for integrated BlobDB
+DECLARE_bool(allow_setting_blob_options_dynamically);
+DECLARE_bool(enable_blob_files);
+DECLARE_uint64(min_blob_size);
+DECLARE_uint64(blob_file_size);
+DECLARE_string(blob_compression_type);
+DECLARE_bool(enable_blob_garbage_collection);
+DECLARE_double(blob_garbage_collection_age_cutoff);
+DECLARE_double(blob_garbage_collection_force_threshold);
+DECLARE_uint64(blob_compaction_readahead_size);
+DECLARE_int32(blob_file_starting_level);
+DECLARE_bool(use_blob_cache);
+DECLARE_bool(use_shared_block_and_blob_cache);
+DECLARE_uint64(blob_cache_size);
+DECLARE_int32(blob_cache_numshardbits);
+DECLARE_int32(prepopulate_blob_cache);
+
+DECLARE_int32(approximate_size_one_in);
+DECLARE_bool(sync_fault_injection);
+
+DECLARE_bool(best_efforts_recovery);
+DECLARE_bool(skip_verifydb);
+DECLARE_bool(enable_compaction_filter);
+DECLARE_bool(paranoid_file_checks);
+DECLARE_bool(fail_if_options_file_error);
+DECLARE_uint64(batch_protection_bytes_per_key);
+DECLARE_uint32(memtable_protection_bytes_per_key);
+
+DECLARE_uint64(user_timestamp_size);
+DECLARE_string(secondary_cache_uri);
+DECLARE_int32(secondary_cache_fault_one_in);
+
+DECLARE_int32(prepopulate_block_cache);
+
+DECLARE_bool(two_write_queues);
+#ifndef ROCKSDB_LITE
+DECLARE_bool(use_only_the_last_commit_time_batch_for_recovery);
+DECLARE_uint64(wp_snapshot_cache_bits);
+DECLARE_uint64(wp_commit_cache_bits);
+#endif // !ROCKSDB_LITE
+
+DECLARE_bool(adaptive_readahead);
+DECLARE_bool(async_io);
+DECLARE_string(wal_compression);
+DECLARE_bool(verify_sst_unique_id_in_manifest);
+
+DECLARE_int32(create_timestamped_snapshot_one_in);
+
+DECLARE_bool(allow_data_in_errors);
+
+// Tiered storage
+DECLARE_bool(enable_tiered_storage); // set last_level_temperature
+DECLARE_int64(preclude_last_level_data_seconds);
+DECLARE_int64(preserve_internal_time_seconds);
+
+DECLARE_int32(verify_iterator_with_expected_state_one_in);
+DECLARE_bool(preserve_unverified_changes);
+
+DECLARE_uint64(readahead_size);
+DECLARE_uint64(initial_auto_readahead_size);
+DECLARE_uint64(max_auto_readahead_size);
+DECLARE_uint64(num_file_reads_for_auto_readahead);
+
+constexpr long KB = 1024;
+constexpr int kRandomValueMaxFactor = 3;
+constexpr int kValueMaxLen = 100;
+
+// wrapped posix environment
+extern ROCKSDB_NAMESPACE::Env* db_stress_env;
+extern ROCKSDB_NAMESPACE::Env* db_stress_listener_env;
+extern std::shared_ptr<ROCKSDB_NAMESPACE::FaultInjectionTestFS> fault_fs_guard;
+
+extern enum ROCKSDB_NAMESPACE::CompressionType compression_type_e;
+extern enum ROCKSDB_NAMESPACE::CompressionType bottommost_compression_type_e;
+extern enum ROCKSDB_NAMESPACE::ChecksumType checksum_type_e;
+
+enum RepFactory { kSkipList, kHashSkipList, kVectorRep };
+
+inline enum RepFactory StringToRepFactory(const char* ctype) {
+ assert(ctype);
+
+ if (!strcasecmp(ctype, "skip_list"))
+ return kSkipList;
+ else if (!strcasecmp(ctype, "prefix_hash"))
+ return kHashSkipList;
+ else if (!strcasecmp(ctype, "vector"))
+ return kVectorRep;
+
+ fprintf(stdout, "Cannot parse memreptable %s\n", ctype);
+ return kSkipList;
+}
+
+extern enum RepFactory FLAGS_rep_factory;
+
+namespace ROCKSDB_NAMESPACE {
+inline enum ROCKSDB_NAMESPACE::CompressionType StringToCompressionType(
+ const char* ctype) {
+ assert(ctype);
+
+ ROCKSDB_NAMESPACE::CompressionType ret_compression_type;
+
+ if (!strcasecmp(ctype, "disable")) {
+ ret_compression_type = ROCKSDB_NAMESPACE::kDisableCompressionOption;
+ } else if (!strcasecmp(ctype, "none")) {
+ ret_compression_type = ROCKSDB_NAMESPACE::kNoCompression;
+ } else if (!strcasecmp(ctype, "snappy")) {
+ ret_compression_type = ROCKSDB_NAMESPACE::kSnappyCompression;
+ } else if (!strcasecmp(ctype, "zlib")) {
+ ret_compression_type = ROCKSDB_NAMESPACE::kZlibCompression;
+ } else if (!strcasecmp(ctype, "bzip2")) {
+ ret_compression_type = ROCKSDB_NAMESPACE::kBZip2Compression;
+ } else if (!strcasecmp(ctype, "lz4")) {
+ ret_compression_type = ROCKSDB_NAMESPACE::kLZ4Compression;
+ } else if (!strcasecmp(ctype, "lz4hc")) {
+ ret_compression_type = ROCKSDB_NAMESPACE::kLZ4HCCompression;
+ } else if (!strcasecmp(ctype, "xpress")) {
+ ret_compression_type = ROCKSDB_NAMESPACE::kXpressCompression;
+ } else if (!strcasecmp(ctype, "zstd")) {
+ ret_compression_type = ROCKSDB_NAMESPACE::kZSTD;
+ } else {
+ fprintf(stderr, "Cannot parse compression type '%s'\n", ctype);
+ ret_compression_type =
+ ROCKSDB_NAMESPACE::kSnappyCompression; // default value
+ }
+ if (ret_compression_type != ROCKSDB_NAMESPACE::kDisableCompressionOption &&
+ !CompressionTypeSupported(ret_compression_type)) {
+ // Use no compression will be more portable but considering this is
+ // only a stress test and snappy is widely available. Use snappy here.
+ ret_compression_type = ROCKSDB_NAMESPACE::kSnappyCompression;
+ }
+ return ret_compression_type;
+}
+
+inline enum ROCKSDB_NAMESPACE::ChecksumType StringToChecksumType(
+ const char* ctype) {
+ assert(ctype);
+ auto iter = ROCKSDB_NAMESPACE::checksum_type_string_map.find(ctype);
+ if (iter != ROCKSDB_NAMESPACE::checksum_type_string_map.end()) {
+ return iter->second;
+ }
+ fprintf(stderr, "Cannot parse checksum type '%s'\n", ctype);
+ return ROCKSDB_NAMESPACE::kCRC32c;
+}
+
+inline std::string ChecksumTypeToString(ROCKSDB_NAMESPACE::ChecksumType ctype) {
+ auto iter = std::find_if(
+ ROCKSDB_NAMESPACE::checksum_type_string_map.begin(),
+ ROCKSDB_NAMESPACE::checksum_type_string_map.end(),
+ [&](const std::pair<std::string, ROCKSDB_NAMESPACE::ChecksumType>&
+ name_and_enum_val) { return name_and_enum_val.second == ctype; });
+ assert(iter != ROCKSDB_NAMESPACE::checksum_type_string_map.end());
+ return iter->first;
+}
+
+inline std::vector<std::string> SplitString(std::string src) {
+ std::vector<std::string> ret;
+ if (src.empty()) {
+ return ret;
+ }
+ size_t pos = 0;
+ size_t pos_comma;
+ while ((pos_comma = src.find(',', pos)) != std::string::npos) {
+ ret.push_back(src.substr(pos, pos_comma - pos));
+ pos = pos_comma + 1;
+ }
+ ret.push_back(src.substr(pos, src.length()));
+ return ret;
+}
+
+#ifdef _MSC_VER
+#pragma warning(push)
+// truncation of constant value on static_cast
+#pragma warning(disable : 4309)
+#endif
+inline bool GetNextPrefix(const ROCKSDB_NAMESPACE::Slice& src, std::string* v) {
+ std::string ret = src.ToString();
+ for (int i = static_cast<int>(ret.size()) - 1; i >= 0; i--) {
+ if (ret[i] != static_cast<char>(255)) {
+ ret[i] = ret[i] + 1;
+ break;
+ } else if (i != 0) {
+ ret[i] = 0;
+ } else {
+ // all FF. No next prefix
+ return false;
+ }
+ }
+ *v = ret;
+ return true;
+}
+#ifdef _MSC_VER
+#pragma warning(pop)
+#endif
+
+// Append `val` to `*key` in fixed-width big-endian format
+extern inline void AppendIntToString(uint64_t val, std::string* key) {
+ // PutFixed64 uses little endian
+ PutFixed64(key, val);
+ // Reverse to get big endian
+ char* int_data = &((*key)[key->size() - sizeof(uint64_t)]);
+ for (size_t i = 0; i < sizeof(uint64_t) / 2; ++i) {
+ std::swap(int_data[i], int_data[sizeof(uint64_t) - 1 - i]);
+ }
+}
+
+// A struct for maintaining the parameters for generating variable length keys
+struct KeyGenContext {
+ // Number of adjacent keys in one cycle of key lengths
+ uint64_t window;
+ // Number of keys of each possible length in a given window
+ std::vector<uint64_t> weights;
+};
+extern KeyGenContext key_gen_ctx;
+
+// Generate a variable length key string from the given int64 val. The
+// order of the keys is preserved. The key could be anywhere from 8 to
+// max_key_len * 8 bytes.
+// The algorithm picks the length based on the
+// offset of the val within a configured window and the distribution of the
+// number of keys of various lengths in that window. For example, if x, y, x are
+// the weights assigned to each possible key length, the keys generated would be
+// - {0}...{x-1}
+// {(x-1),0}..{(x-1),(y-1)},{(x-1),(y-1),0}..{(x-1),(y-1),(z-1)} and so on.
+// Additionally, a trailer of 0-7 bytes could be appended.
+extern inline std::string Key(int64_t val) {
+ uint64_t window = key_gen_ctx.window;
+ size_t levels = key_gen_ctx.weights.size();
+ std::string key;
+ // Over-reserve and for now do not bother `shrink_to_fit()` since the key
+ // strings are transient.
+ key.reserve(FLAGS_max_key_len * 8);
+
+ uint64_t window_idx = static_cast<uint64_t>(val) / window;
+ uint64_t offset = static_cast<uint64_t>(val) % window;
+ for (size_t level = 0; level < levels; ++level) {
+ uint64_t weight = key_gen_ctx.weights[level];
+ uint64_t pfx;
+ if (level == 0) {
+ pfx = window_idx * weight;
+ } else {
+ pfx = 0;
+ }
+ pfx += offset >= weight ? weight - 1 : offset;
+ AppendIntToString(pfx, &key);
+ if (offset < weight) {
+ // Use the bottom 3 bits of offset as the number of trailing 'x's in the
+ // key. If the next key is going to be of the next level, then skip the
+ // trailer as it would break ordering. If the key length is already at
+ // max, skip the trailer.
+ if (offset < weight - 1 && level < levels - 1) {
+ size_t trailer_len = offset & 0x7;
+ key.append(trailer_len, 'x');
+ }
+ break;
+ }
+ offset -= weight;
+ }
+
+ return key;
+}
+
+// Given a string key, map it to an index into the expected values buffer
+extern inline bool GetIntVal(std::string big_endian_key, uint64_t* key_p) {
+ size_t size_key = big_endian_key.size();
+ std::vector<uint64_t> prefixes;
+
+ assert(size_key <= key_gen_ctx.weights.size() * sizeof(uint64_t));
+
+ std::string little_endian_key;
+ little_endian_key.resize(size_key);
+ for (size_t start = 0; start + sizeof(uint64_t) <= size_key;
+ start += sizeof(uint64_t)) {
+ size_t end = start + sizeof(uint64_t);
+ for (size_t i = 0; i < sizeof(uint64_t); ++i) {
+ little_endian_key[start + i] = big_endian_key[end - 1 - i];
+ }
+ Slice little_endian_slice =
+ Slice(&little_endian_key[start], sizeof(uint64_t));
+ uint64_t pfx;
+ if (!GetFixed64(&little_endian_slice, &pfx)) {
+ return false;
+ }
+ prefixes.emplace_back(pfx);
+ }
+
+ uint64_t key = 0;
+ for (size_t i = 0; i < prefixes.size(); ++i) {
+ uint64_t pfx = prefixes[i];
+ key += (pfx / key_gen_ctx.weights[i]) * key_gen_ctx.window +
+ pfx % key_gen_ctx.weights[i];
+ if (i < prefixes.size() - 1) {
+ // The encoding writes a `key_gen_ctx.weights[i] - 1` that counts for
+ // `key_gen_ctx.weights[i]` when there are more prefixes to come. So we
+ // need to add back the one here as we're at a non-last prefix.
+ ++key;
+ }
+ }
+ *key_p = key;
+ return true;
+}
+
+// Given a string prefix, map it to the first corresponding index in the
+// expected values buffer.
+inline bool GetFirstIntValInPrefix(std::string big_endian_prefix,
+ uint64_t* key_p) {
+ size_t size_key = big_endian_prefix.size();
+ // Pad with zeros to make it a multiple of 8. This function may be called
+ // with a prefix, in which case we return the first index that falls
+ // inside or outside that prefix, dependeing on whether the prefix is
+ // the start of upper bound of a scan
+ unsigned int pad = sizeof(uint64_t) - (size_key % sizeof(uint64_t));
+ if (pad < sizeof(uint64_t)) {
+ big_endian_prefix.append(pad, '\0');
+ }
+ return GetIntVal(std::move(big_endian_prefix), key_p);
+}
+
+extern inline uint64_t GetPrefixKeyCount(const std::string& prefix,
+ const std::string& ub) {
+ uint64_t start = 0;
+ uint64_t end = 0;
+
+ if (!GetFirstIntValInPrefix(prefix, &start) ||
+ !GetFirstIntValInPrefix(ub, &end)) {
+ return 0;
+ }
+
+ return end - start;
+}
+
+extern inline std::string StringToHex(const std::string& str) {
+ std::string result = "0x";
+ result.append(Slice(str).ToString(true));
+ return result;
+}
+
+// Unified output format for double parameters
+extern inline std::string FormatDoubleParam(double param) {
+ return std::to_string(param);
+}
+
+// Make sure that double parameter is a value we can reproduce by
+// re-inputting the value printed.
+extern inline void SanitizeDoubleParam(double* param) {
+ *param = std::atof(FormatDoubleParam(*param).c_str());
+}
+
+extern void PoolSizeChangeThread(void* v);
+
+extern void DbVerificationThread(void* v);
+
+extern void TimestampedSnapshotsThread(void* v);
+
+extern void PrintKeyValue(int cf, uint64_t key, const char* value, size_t sz);
+
+extern int64_t GenerateOneKey(ThreadState* thread, uint64_t iteration);
+
+extern std::vector<int64_t> GenerateNKeys(ThreadState* thread, int num_keys,
+ uint64_t iteration);
+
+extern size_t GenerateValue(uint32_t rand, char* v, size_t max_sz);
+extern uint32_t GetValueBase(Slice s);
+
+extern WideColumns GenerateWideColumns(uint32_t value_base, const Slice& slice);
+extern WideColumns GenerateExpectedWideColumns(uint32_t value_base,
+ const Slice& slice);
+
+extern StressTest* CreateCfConsistencyStressTest();
+extern StressTest* CreateBatchedOpsStressTest();
+extern StressTest* CreateNonBatchedOpsStressTest();
+extern StressTest* CreateMultiOpsTxnsStressTest();
+extern void CheckAndSetOptionsForMultiOpsTxnStressTest();
+extern void InitializeHotKeyGenerator(double alpha);
+extern int64_t GetOneHotKeyID(double rand_seed, int64_t max_key);
+
+extern std::string GetNowNanos();
+
+std::shared_ptr<FileChecksumGenFactory> GetFileChecksumImpl(
+ const std::string& name);
+
+Status DeleteFilesInDirectory(const std::string& dirname);
+Status SaveFilesInDirectory(const std::string& src_dirname,
+ const std::string& dst_dirname);
+Status DestroyUnverifiedSubdir(const std::string& dirname);
+Status InitUnverifiedSubdir(const std::string& dirname);
+} // namespace ROCKSDB_NAMESPACE
+#endif // GFLAGS
diff --git a/src/rocksdb/db_stress_tool/db_stress_compaction_filter.h b/src/rocksdb/db_stress_tool/db_stress_compaction_filter.h
new file mode 100644
index 000000000..408bb48f3
--- /dev/null
+++ b/src/rocksdb/db_stress_tool/db_stress_compaction_filter.h
@@ -0,0 +1,96 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+//
+// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#pragma once
+
+#include "db_stress_tool/db_stress_common.h"
+#include "db_stress_tool/db_stress_shared_state.h"
+#include "rocksdb/compaction_filter.h"
+
+namespace ROCKSDB_NAMESPACE {
+
+// DbStressCompactionFilter is safe to use with db_stress as it does not perform
+// any mutation. It only makes `kRemove` decisions for keys that are already
+// non-existent according to the `SharedState`.
+class DbStressCompactionFilter : public CompactionFilter {
+ public:
+ DbStressCompactionFilter(SharedState* state, int cf_id)
+ : state_(state), cf_id_(cf_id) {}
+
+ Decision FilterV2(int /*level*/, const Slice& key, ValueType /*value_type*/,
+ const Slice& /*existing_value*/, std::string* /*new_value*/,
+ std::string* /*skip_until*/) const override {
+ if (state_ == nullptr) {
+ return Decision::kKeep;
+ }
+ if (key.empty() || ('0' <= key[0] && key[0] <= '9')) {
+ // It is likely leftover from a test_batches_snapshots run. Below this
+ // conditional, the test_batches_snapshots key format is not handled
+ // properly. Just keep it to be safe.
+ return Decision::kKeep;
+ }
+ uint64_t key_num = 0;
+ {
+ Slice ukey_without_ts = key;
+ assert(ukey_without_ts.size() >= FLAGS_user_timestamp_size);
+ ukey_without_ts.remove_suffix(FLAGS_user_timestamp_size);
+ [[maybe_unused]] bool ok =
+ GetIntVal(ukey_without_ts.ToString(), &key_num);
+ assert(ok);
+ }
+ port::Mutex* key_mutex = state_->GetMutexForKey(cf_id_, key_num);
+ if (!key_mutex->TryLock()) {
+ return Decision::kKeep;
+ }
+ // Reaching here means we acquired the lock.
+
+ bool key_exists = state_->Exists(cf_id_, key_num);
+ const bool allow_overwrite = state_->AllowsOverwrite(key_num);
+
+ key_mutex->Unlock();
+
+ if (!key_exists) {
+ return allow_overwrite ? Decision::kRemove : Decision::kPurge;
+ }
+ return Decision::kKeep;
+ }
+
+ const char* Name() const override { return "DbStressCompactionFilter"; }
+
+ private:
+ SharedState* const state_;
+ const int cf_id_;
+};
+
+class DbStressCompactionFilterFactory : public CompactionFilterFactory {
+ public:
+ DbStressCompactionFilterFactory() : state_(nullptr) {}
+
+ void SetSharedState(SharedState* state) {
+ MutexLock state_mutex_guard(&state_mutex_);
+ state_ = state;
+ }
+
+ std::unique_ptr<CompactionFilter> CreateCompactionFilter(
+ const CompactionFilter::Context& context) override {
+ MutexLock state_mutex_guard(&state_mutex_);
+ return std::unique_ptr<CompactionFilter>(
+ new DbStressCompactionFilter(state_, context.column_family_id));
+ }
+
+ const char* Name() const override {
+ return "DbStressCompactionFilterFactory";
+ }
+
+ private:
+ port::Mutex state_mutex_;
+ SharedState* state_;
+};
+
+} // namespace ROCKSDB_NAMESPACE
diff --git a/src/rocksdb/db_stress_tool/db_stress_driver.cc b/src/rocksdb/db_stress_tool/db_stress_driver.cc
new file mode 100644
index 000000000..ed1240e00
--- /dev/null
+++ b/src/rocksdb/db_stress_tool/db_stress_driver.cc
@@ -0,0 +1,212 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+//
+// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+//
+
+#ifdef GFLAGS
+#include "db_stress_tool/db_stress_common.h"
+#include "utilities/fault_injection_fs.h"
+
+namespace ROCKSDB_NAMESPACE {
+void ThreadBody(void* v) {
+ ThreadState* thread = reinterpret_cast<ThreadState*>(v);
+ SharedState* shared = thread->shared;
+
+ if (!FLAGS_skip_verifydb && shared->ShouldVerifyAtBeginning()) {
+ thread->shared->GetStressTest()->VerifyDb(thread);
+ }
+ {
+ MutexLock l(shared->GetMutex());
+ shared->IncInitialized();
+ if (shared->AllInitialized()) {
+ shared->GetCondVar()->SignalAll();
+ }
+ while (!shared->Started()) {
+ shared->GetCondVar()->Wait();
+ }
+ }
+ thread->shared->GetStressTest()->OperateDb(thread);
+
+ {
+ MutexLock l(shared->GetMutex());
+ shared->IncOperated();
+ if (shared->AllOperated()) {
+ shared->GetCondVar()->SignalAll();
+ }
+ while (!shared->VerifyStarted()) {
+ shared->GetCondVar()->Wait();
+ }
+ }
+
+ if (!FLAGS_skip_verifydb) {
+ thread->shared->GetStressTest()->VerifyDb(thread);
+ }
+
+ {
+ MutexLock l(shared->GetMutex());
+ shared->IncDone();
+ if (shared->AllDone()) {
+ shared->GetCondVar()->SignalAll();
+ }
+ }
+}
+
+bool RunStressTest(StressTest* stress) {
+ SystemClock* clock = db_stress_env->GetSystemClock().get();
+
+ SharedState shared(db_stress_env, stress);
+
+ if (shared.ShouldVerifyAtBeginning() && FLAGS_preserve_unverified_changes) {
+ Status s = InitUnverifiedSubdir(FLAGS_db);
+ if (s.ok() && !FLAGS_expected_values_dir.empty()) {
+ s = InitUnverifiedSubdir(FLAGS_expected_values_dir);
+ }
+ if (!s.ok()) {
+ fprintf(stderr, "Failed to setup unverified state dir: %s\n",
+ s.ToString().c_str());
+ exit(1);
+ }
+ }
+
+ stress->InitDb(&shared);
+ stress->FinishInitDb(&shared);
+
+ if (FLAGS_sync_fault_injection) {
+ fault_fs_guard->SetFilesystemDirectWritable(false);
+ }
+ if (FLAGS_write_fault_one_in) {
+ fault_fs_guard->EnableWriteErrorInjection();
+ }
+
+ uint32_t n = FLAGS_threads;
+ uint64_t now = clock->NowMicros();
+ fprintf(stdout, "%s Initializing worker threads\n",
+ clock->TimeToString(now / 1000000).c_str());
+
+ shared.SetThreads(n);
+
+ if (FLAGS_compaction_thread_pool_adjust_interval > 0) {
+ shared.IncBgThreads();
+ }
+
+ if (FLAGS_continuous_verification_interval > 0) {
+ shared.IncBgThreads();
+ }
+
+ std::vector<ThreadState*> threads(n);
+ for (uint32_t i = 0; i < n; i++) {
+ threads[i] = new ThreadState(i, &shared);
+ db_stress_env->StartThread(ThreadBody, threads[i]);
+ }
+
+ ThreadState bg_thread(0, &shared);
+ if (FLAGS_compaction_thread_pool_adjust_interval > 0) {
+ db_stress_env->StartThread(PoolSizeChangeThread, &bg_thread);
+ }
+
+ ThreadState continuous_verification_thread(0, &shared);
+ if (FLAGS_continuous_verification_interval > 0) {
+ db_stress_env->StartThread(DbVerificationThread,
+ &continuous_verification_thread);
+ }
+
+ // Each thread goes through the following states:
+ // initializing -> wait for others to init -> read/populate/depopulate
+ // wait for others to operate -> verify -> done
+
+ {
+ MutexLock l(shared.GetMutex());
+ while (!shared.AllInitialized()) {
+ shared.GetCondVar()->Wait();
+ }
+ if (shared.ShouldVerifyAtBeginning()) {
+ if (shared.HasVerificationFailedYet()) {
+ fprintf(stderr, "Crash-recovery verification failed :(\n");
+ } else {
+ fprintf(stdout, "Crash-recovery verification passed :)\n");
+ Status s = DestroyUnverifiedSubdir(FLAGS_db);
+ if (s.ok() && !FLAGS_expected_values_dir.empty()) {
+ s = DestroyUnverifiedSubdir(FLAGS_expected_values_dir);
+ }
+ if (!s.ok()) {
+ fprintf(stderr, "Failed to cleanup unverified state dir: %s\n",
+ s.ToString().c_str());
+ exit(1);
+ }
+ }
+ }
+
+ // This is after the verification step to avoid making all those `Get()`s
+ // and `MultiGet()`s contend on the DB-wide trace mutex.
+ if (!FLAGS_expected_values_dir.empty()) {
+ stress->TrackExpectedState(&shared);
+ }
+
+ now = clock->NowMicros();
+ fprintf(stdout, "%s Starting database operations\n",
+ clock->TimeToString(now / 1000000).c_str());
+
+ shared.SetStart();
+ shared.GetCondVar()->SignalAll();
+ while (!shared.AllOperated()) {
+ shared.GetCondVar()->Wait();
+ }
+
+ now = clock->NowMicros();
+ if (FLAGS_test_batches_snapshots) {
+ fprintf(stdout, "%s Limited verification already done during gets\n",
+ clock->TimeToString((uint64_t)now / 1000000).c_str());
+ } else if (FLAGS_skip_verifydb) {
+ fprintf(stdout, "%s Verification skipped\n",
+ clock->TimeToString((uint64_t)now / 1000000).c_str());
+ } else {
+ fprintf(stdout, "%s Starting verification\n",
+ clock->TimeToString((uint64_t)now / 1000000).c_str());
+ }
+
+ shared.SetStartVerify();
+ shared.GetCondVar()->SignalAll();
+ while (!shared.AllDone()) {
+ shared.GetCondVar()->Wait();
+ }
+ }
+
+ for (unsigned int i = 1; i < n; i++) {
+ threads[0]->stats.Merge(threads[i]->stats);
+ }
+ threads[0]->stats.Report("Stress Test");
+
+ for (unsigned int i = 0; i < n; i++) {
+ delete threads[i];
+ threads[i] = nullptr;
+ }
+ now = clock->NowMicros();
+ if (!FLAGS_skip_verifydb && !FLAGS_test_batches_snapshots &&
+ !shared.HasVerificationFailedYet()) {
+ fprintf(stdout, "%s Verification successful\n",
+ clock->TimeToString(now / 1000000).c_str());
+ }
+ stress->PrintStatistics();
+
+ if (FLAGS_compaction_thread_pool_adjust_interval > 0 ||
+ FLAGS_continuous_verification_interval > 0) {
+ MutexLock l(shared.GetMutex());
+ shared.SetShouldStopBgThread();
+ while (!shared.BgThreadsFinished()) {
+ shared.GetCondVar()->Wait();
+ }
+ }
+
+ if (shared.HasVerificationFailedYet()) {
+ fprintf(stderr, "Verification failed :(\n");
+ return false;
+ }
+ return true;
+}
+} // namespace ROCKSDB_NAMESPACE
+#endif // GFLAGS
diff --git a/src/rocksdb/db_stress_tool/db_stress_driver.h b/src/rocksdb/db_stress_tool/db_stress_driver.h
new file mode 100644
index 000000000..ff701fcb2
--- /dev/null
+++ b/src/rocksdb/db_stress_tool/db_stress_driver.h
@@ -0,0 +1,17 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+//
+// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#ifdef GFLAGS
+#pragma once
+#include "db_stress_tool/db_stress_test_base.h"
+namespace ROCKSDB_NAMESPACE {
+extern void ThreadBody(void* /*thread_state*/);
+extern bool RunStressTest(StressTest*);
+} // namespace ROCKSDB_NAMESPACE
+#endif // GFLAGS
diff --git a/src/rocksdb/db_stress_tool/db_stress_env_wrapper.h b/src/rocksdb/db_stress_tool/db_stress_env_wrapper.h
new file mode 100644
index 000000000..21f6db2ab
--- /dev/null
+++ b/src/rocksdb/db_stress_tool/db_stress_env_wrapper.h
@@ -0,0 +1,42 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+//
+// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#ifdef GFLAGS
+#pragma once
+#include "db_stress_tool/db_stress_common.h"
+
+namespace ROCKSDB_NAMESPACE {
+class DbStressEnvWrapper : public EnvWrapper {
+ public:
+ explicit DbStressEnvWrapper(Env* t) : EnvWrapper(t) {}
+ static const char* kClassName() { return "DbStressEnv"; }
+ const char* Name() const override { return kClassName(); }
+
+ Status DeleteFile(const std::string& f) override {
+ // We determine whether it is a manifest file by searching a strong,
+ // so that there will be false positive if the directory path contains the
+ // keyword but it is unlikely.
+ // Checkpoint, backup, and restore directories needs to be exempted.
+ if (!if_preserve_all_manifests ||
+ f.find("MANIFEST-") == std::string::npos ||
+ f.find("checkpoint") != std::string::npos ||
+ f.find(".backup") != std::string::npos ||
+ f.find(".restore") != std::string::npos) {
+ return target()->DeleteFile(f);
+ }
+ // Rename the file instead of deletion to keep the history, and
+ // at the same time it is not visible to RocksDB.
+ return target()->RenameFile(f, f + "_renamed_");
+ }
+
+ // If true, all manifest files will not be delted in DeleteFile().
+ bool if_preserve_all_manifests = true;
+};
+} // namespace ROCKSDB_NAMESPACE
+#endif // GFLAGS
diff --git a/src/rocksdb/db_stress_tool/db_stress_gflags.cc b/src/rocksdb/db_stress_tool/db_stress_gflags.cc
new file mode 100644
index 000000000..7adc66509
--- /dev/null
+++ b/src/rocksdb/db_stress_tool/db_stress_gflags.cc
@@ -0,0 +1,1074 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+//
+// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#ifdef GFLAGS
+#include "db_stress_tool/db_stress_common.h"
+
+static bool ValidateUint32Range(const char* flagname, uint64_t value) {
+ if (value > std::numeric_limits<uint32_t>::max()) {
+ fprintf(stderr, "Invalid value for --%s: %lu, overflow\n", flagname,
+ (unsigned long)value);
+ return false;
+ }
+ return true;
+}
+
+DEFINE_uint64(seed, 2341234,
+ "Seed for PRNG. When --nooverwritepercent is "
+ "nonzero and --expected_values_dir is nonempty, this value "
+ "must be fixed across invocations.");
+static const bool FLAGS_seed_dummy __attribute__((__unused__)) =
+ RegisterFlagValidator(&FLAGS_seed, &ValidateUint32Range);
+
+DEFINE_bool(read_only, false, "True if open DB in read-only mode during tests");
+
+DEFINE_int64(max_key, 1 * KB * KB,
+ "Max number of key/values to place in database");
+
+DEFINE_int32(max_key_len, 3, "Maximum length of a key in 8-byte units");
+
+DEFINE_string(key_len_percent_dist, "",
+ "Percentages of keys of various lengths. For example, 1,30,69 "
+ "means 1% of keys are 8 bytes, 30% are 16 bytes, and 69% are "
+ "24 bytes. If not specified, it will be evenly distributed");
+
+DEFINE_int32(key_window_scale_factor, 10,
+ "This value will be multiplied by 100 to come up with a window "
+ "size for varying the key length");
+
+DEFINE_int32(column_families, 10, "Number of column families");
+
+DEFINE_double(
+ hot_key_alpha, 0,
+ "Use Zipfian distribution to generate the key "
+ "distribution. If it is not specified, write path will use random "
+ "distribution to generate the keys. The parameter is [0, double_max]). "
+ "However, the larger alpha is, the more shewed will be. If alpha is "
+ "larger than 2, it is likely that only 1 key will be accessed. The "
+ "Recommended value is [0.8-1.5]. The distribution is also related to "
+ "max_key and total iterations of generating the hot key. ");
+
+DEFINE_string(
+ options_file, "",
+ "The path to a RocksDB options file. If specified, then db_stress will "
+ "run with the RocksDB options in the default column family of the "
+ "specified options file. Note that, when an options file is provided, "
+ "db_stress will ignore the flag values for all options that may be passed "
+ "via options file.");
+
+DEFINE_int64(
+ active_width, 0,
+ "Number of keys in active span of the key-range at any given time. The "
+ "span begins with its left endpoint at key 0, gradually moves rightwards, "
+ "and ends with its right endpoint at max_key. If set to 0, active_width "
+ "will be sanitized to be equal to max_key.");
+
+// TODO(noetzli) Add support for single deletes
+DEFINE_bool(test_batches_snapshots, false,
+ "If set, the test uses MultiGet(), MultiPut() and MultiDelete()"
+ " which read/write/delete multiple keys in a batch. In this mode,"
+ " we do not verify db content by comparing the content with the "
+ "pre-allocated array. Instead, we do partial verification inside"
+ " MultiGet() by checking various values in a batch. Benefit of"
+ " this mode:\n"
+ "\t(a) No need to acquire mutexes during writes (less cache "
+ "flushes in multi-core leading to speed up)\n"
+ "\t(b) No long validation at the end (more speed up)\n"
+ "\t(c) Test snapshot and atomicity of batch writes");
+
+DEFINE_bool(atomic_flush, false,
+ "If set, enables atomic flush in the options.\n");
+
+DEFINE_int32(
+ manual_wal_flush_one_in, 0,
+ "If non-zero, then `FlushWAL(bool sync)`, where `bool sync` is randomly "
+ "decided, will be explictly called in db stress once for every N ops "
+ "on average. Setting `manual_wal_flush_one_in` to be greater than 0 "
+ "implies `Options::manual_wal_flush = true` is set.");
+
+DEFINE_bool(test_cf_consistency, false,
+ "If set, runs the stress test dedicated to verifying writes to "
+ "multiple column families are consistent. Setting this implies "
+ "`atomic_flush=true` is set true if `disable_wal=false`.\n");
+
+DEFINE_bool(test_multi_ops_txns, false,
+ "If set, runs stress test dedicated to verifying multi-ops "
+ "transactions on a simple relational table with primary and "
+ "secondary index.");
+
+DEFINE_int32(threads, 32, "Number of concurrent threads to run.");
+
+DEFINE_int32(ttl, -1,
+ "Opens the db with this ttl value if this is not -1. "
+ "Carefully specify a large value such that verifications on "
+ "deleted values don't fail");
+
+DEFINE_int32(value_size_mult, 8,
+ "Size of value will be this number times rand_int(1,3) bytes");
+
+DEFINE_int32(compaction_readahead_size, 0, "Compaction readahead size");
+
+DEFINE_bool(enable_pipelined_write, false, "Pipeline WAL/memtable writes");
+
+DEFINE_bool(verify_before_write, false, "Verify before write");
+
+DEFINE_bool(histogram, false, "Print histogram of operation timings");
+
+DEFINE_bool(destroy_db_initially, true,
+ "Destroys the database dir before start if this is true");
+
+DEFINE_bool(verbose, false, "Verbose");
+
+DEFINE_bool(progress_reports, true,
+ "If true, db_stress will report number of finished operations");
+
+DEFINE_uint64(db_write_buffer_size,
+ ROCKSDB_NAMESPACE::Options().db_write_buffer_size,
+ "Number of bytes to buffer in all memtables before compacting");
+
+DEFINE_int32(
+ write_buffer_size,
+ static_cast<int32_t>(ROCKSDB_NAMESPACE::Options().write_buffer_size),
+ "Number of bytes to buffer in memtable before compacting");
+
+DEFINE_int32(max_write_buffer_number,
+ ROCKSDB_NAMESPACE::Options().max_write_buffer_number,
+ "The number of in-memory memtables. "
+ "Each memtable is of size FLAGS_write_buffer_size.");
+
+DEFINE_int32(min_write_buffer_number_to_merge,
+ ROCKSDB_NAMESPACE::Options().min_write_buffer_number_to_merge,
+ "The minimum number of write buffers that will be merged together "
+ "before writing to storage. This is cheap because it is an "
+ "in-memory merge. If this feature is not enabled, then all these "
+ "write buffers are flushed to L0 as separate files and this "
+ "increases read amplification because a get request has to check "
+ "in all of these files. Also, an in-memory merge may result in "
+ "writing less data to storage if there are duplicate records in"
+ " each of these individual write buffers.");
+
+DEFINE_int32(max_write_buffer_number_to_maintain,
+ ROCKSDB_NAMESPACE::Options().max_write_buffer_number_to_maintain,
+ "The total maximum number of write buffers to maintain in memory "
+ "including copies of buffers that have already been flushed. "
+ "Unlike max_write_buffer_number, this parameter does not affect "
+ "flushing. This controls the minimum amount of write history "
+ "that will be available in memory for conflict checking when "
+ "Transactions are used. If this value is too low, some "
+ "transactions may fail at commit time due to not being able to "
+ "determine whether there were any write conflicts. Setting this "
+ "value to 0 will cause write buffers to be freed immediately "
+ "after they are flushed. If this value is set to -1, "
+ "'max_write_buffer_number' will be used.");
+
+DEFINE_int64(max_write_buffer_size_to_maintain,
+ ROCKSDB_NAMESPACE::Options().max_write_buffer_size_to_maintain,
+ "The total maximum size of write buffers to maintain in memory "
+ "including copies of buffers that have already been flushed. "
+ "Unlike max_write_buffer_number, this parameter does not affect "
+ "flushing. This controls the minimum amount of write history "
+ "that will be available in memory for conflict checking when "
+ "Transactions are used. If this value is too low, some "
+ "transactions may fail at commit time due to not being able to "
+ "determine whether there were any write conflicts. Setting this "
+ "value to 0 will cause write buffers to be freed immediately "
+ "after they are flushed. If this value is set to -1, "
+ "'max_write_buffer_number' will be used.");
+
+DEFINE_double(memtable_prefix_bloom_size_ratio,
+ ROCKSDB_NAMESPACE::Options().memtable_prefix_bloom_size_ratio,
+ "creates prefix blooms for memtables, each with size "
+ "`write_buffer_size * memtable_prefix_bloom_size_ratio`.");
+
+DEFINE_bool(memtable_whole_key_filtering,
+ ROCKSDB_NAMESPACE::Options().memtable_whole_key_filtering,
+ "Enable whole key filtering in memtables.");
+
+DEFINE_int32(open_files, ROCKSDB_NAMESPACE::Options().max_open_files,
+ "Maximum number of files to keep open at the same time "
+ "(use default if == 0)");
+
+DEFINE_int64(compressed_cache_size, 0,
+ "Number of bytes to use as a cache of compressed data."
+ " 0 means use default settings.");
+
+DEFINE_int32(
+ compressed_cache_numshardbits, -1,
+ "Number of shards for the compressed block cache is 2 ** "
+ "compressed_cache_numshardbits. Negative value means default settings. "
+ "This is applied only if compressed_cache_size is greater than 0.");
+
+DEFINE_int32(compaction_style, ROCKSDB_NAMESPACE::Options().compaction_style,
+ "");
+
+DEFINE_int32(compaction_pri, ROCKSDB_NAMESPACE::Options().compaction_pri,
+ "Which file from a level should be picked to merge to the next "
+ "level in level-based compaction");
+
+DEFINE_int32(num_levels, ROCKSDB_NAMESPACE::Options().num_levels,
+ "Number of levels in the DB");
+
+DEFINE_int32(level0_file_num_compaction_trigger,
+ ROCKSDB_NAMESPACE::Options().level0_file_num_compaction_trigger,
+ "Level0 compaction start trigger");
+
+DEFINE_int32(level0_slowdown_writes_trigger,
+ ROCKSDB_NAMESPACE::Options().level0_slowdown_writes_trigger,
+ "Number of files in level-0 that will slow down writes");
+
+DEFINE_int32(level0_stop_writes_trigger,
+ ROCKSDB_NAMESPACE::Options().level0_stop_writes_trigger,
+ "Number of files in level-0 that will trigger put stop.");
+
+DEFINE_int32(block_size,
+ static_cast<int32_t>(
+ ROCKSDB_NAMESPACE::BlockBasedTableOptions().block_size),
+ "Number of bytes in a block.");
+
+DEFINE_int32(format_version,
+ static_cast<int32_t>(
+ ROCKSDB_NAMESPACE::BlockBasedTableOptions().format_version),
+ "Format version of SST files.");
+
+DEFINE_int32(
+ index_block_restart_interval,
+ ROCKSDB_NAMESPACE::BlockBasedTableOptions().index_block_restart_interval,
+ "Number of keys between restart points "
+ "for delta encoding of keys in index block.");
+
+DEFINE_bool(disable_auto_compactions,
+ ROCKSDB_NAMESPACE::Options().disable_auto_compactions,
+ "If true, RocksDB internally will not trigger compactions.");
+
+DEFINE_int32(max_background_compactions,
+ ROCKSDB_NAMESPACE::Options().max_background_compactions,
+ "The maximum number of concurrent background compactions "
+ "that can occur in parallel.");
+
+DEFINE_int32(num_bottom_pri_threads, 0,
+ "The number of threads in the bottom-priority thread pool (used "
+ "by universal compaction only).");
+
+DEFINE_int32(compaction_thread_pool_adjust_interval, 0,
+ "The interval (in milliseconds) to adjust compaction thread pool "
+ "size. Don't change it periodically if the value is 0.");
+
+DEFINE_int32(compaction_thread_pool_variations, 2,
+ "Range of background thread pool size variations when adjusted "
+ "periodically.");
+
+DEFINE_int32(max_background_flushes,
+ ROCKSDB_NAMESPACE::Options().max_background_flushes,
+ "The maximum number of concurrent background flushes "
+ "that can occur in parallel.");
+
+DEFINE_int32(universal_size_ratio, 0,
+ "The ratio of file sizes that trigger"
+ " compaction in universal style");
+
+DEFINE_int32(universal_min_merge_width, 0,
+ "The minimum number of files to "
+ "compact in universal style compaction");
+
+DEFINE_int32(universal_max_merge_width, 0,
+ "The max number of files to compact"
+ " in universal style compaction");
+
+DEFINE_int32(universal_max_size_amplification_percent, 0,
+ "The max size amplification for universal style compaction");
+
+DEFINE_int32(clear_column_family_one_in, 1000000,
+ "With a chance of 1/N, delete a column family and then recreate "
+ "it again. If N == 0, never drop/create column families. "
+ "When test_batches_snapshots is true, this flag has no effect");
+
+DEFINE_int32(get_live_files_one_in, 1000000,
+ "With a chance of 1/N, call GetLiveFiles to verify if it returns "
+ "correctly. If N == 0, do not call the interface.");
+
+DEFINE_int32(
+ get_sorted_wal_files_one_in, 1000000,
+ "With a chance of 1/N, call GetSortedWalFiles to verify if it returns "
+ "correctly. (Note that this API may legitimately return an error.) If N == "
+ "0, do not call the interface.");
+
+DEFINE_int32(
+ get_current_wal_file_one_in, 1000000,
+ "With a chance of 1/N, call GetCurrentWalFile to verify if it returns "
+ "correctly. (Note that this API may legitimately return an error.) If N == "
+ "0, do not call the interface.");
+
+DEFINE_int32(set_options_one_in, 0,
+ "With a chance of 1/N, change some random options");
+
+DEFINE_int32(set_in_place_one_in, 0,
+ "With a chance of 1/N, toggle in place support option");
+
+DEFINE_int64(cache_size, 2LL * KB * KB * KB,
+ "Number of bytes to use as a cache of uncompressed data.");
+
+DEFINE_int32(cache_numshardbits, 6,
+ "Number of shards for the block cache"
+ " is 2 ** cache_numshardbits. Negative means use default settings."
+ " This is applied only if FLAGS_cache_size is greater than 0.");
+
+DEFINE_bool(cache_index_and_filter_blocks, false,
+ "True if indexes/filters should be cached in block cache.");
+
+DEFINE_bool(charge_compression_dictionary_building_buffer, false,
+ "Setting for "
+ "CacheEntryRoleOptions::charged of "
+ "CacheEntryRole::kCompressionDictionaryBuildingBuffer");
+
+DEFINE_bool(charge_filter_construction, false,
+ "Setting for "
+ "CacheEntryRoleOptions::charged of "
+ "CacheEntryRole::kFilterConstruction");
+
+DEFINE_bool(charge_table_reader, false,
+ "Setting for "
+ "CacheEntryRoleOptions::charged of "
+ "CacheEntryRole::kBlockBasedTableReader");
+
+DEFINE_bool(charge_file_metadata, false,
+ "Setting for "
+ "CacheEntryRoleOptions::charged of "
+ "kFileMetadata");
+
+DEFINE_bool(charge_blob_cache, false,
+ "Setting for "
+ "CacheEntryRoleOptions::charged of "
+ "kBlobCache");
+
+DEFINE_int32(
+ top_level_index_pinning,
+ static_cast<int32_t>(ROCKSDB_NAMESPACE::PinningTier::kFallback),
+ "Type of pinning for top-level indexes into metadata partitions (see "
+ "`enum PinningTier` in table.h)");
+
+DEFINE_int32(
+ partition_pinning,
+ static_cast<int32_t>(ROCKSDB_NAMESPACE::PinningTier::kFallback),
+ "Type of pinning for metadata partitions (see `enum PinningTier` in "
+ "table.h)");
+
+DEFINE_int32(
+ unpartitioned_pinning,
+ static_cast<int32_t>(ROCKSDB_NAMESPACE::PinningTier::kFallback),
+ "Type of pinning for unpartitioned metadata blocks (see `enum PinningTier` "
+ "in table.h)");
+
+DEFINE_string(cache_type, "lru_cache", "Type of block cache.");
+
+DEFINE_uint64(subcompactions, 1,
+ "Maximum number of subcompactions to divide L0-L1 compactions "
+ "into.");
+
+DEFINE_uint64(periodic_compaction_seconds, 1000,
+ "Files older than this value will be picked up for compaction.");
+
+DEFINE_uint64(compaction_ttl, 1000,
+ "Files older than TTL will be compacted to the next level.");
+
+DEFINE_bool(allow_concurrent_memtable_write, false,
+ "Allow multi-writers to update mem tables in parallel.");
+
+DEFINE_double(experimental_mempurge_threshold, 0.0,
+ "Maximum estimated useful payload that triggers a "
+ "mempurge process to collect memtable garbage bytes.");
+
+DEFINE_bool(enable_write_thread_adaptive_yield, true,
+ "Use a yielding spin loop for brief writer thread waits.");
+
+#ifndef ROCKSDB_LITE
+// Options for StackableDB-based BlobDB
+DEFINE_bool(use_blob_db, false, "[Stacked BlobDB] Use BlobDB.");
+
+DEFINE_uint64(
+ blob_db_min_blob_size,
+ ROCKSDB_NAMESPACE::blob_db::BlobDBOptions().min_blob_size,
+ "[Stacked BlobDB] Smallest blob to store in a file. Blobs "
+ "smaller than this will be inlined with the key in the LSM tree.");
+
+DEFINE_uint64(
+ blob_db_bytes_per_sync,
+ ROCKSDB_NAMESPACE::blob_db::BlobDBOptions().bytes_per_sync,
+ "[Stacked BlobDB] Sync blob files once per every N bytes written.");
+
+DEFINE_uint64(blob_db_file_size,
+ ROCKSDB_NAMESPACE::blob_db::BlobDBOptions().blob_file_size,
+ "[Stacked BlobDB] Target size of each blob file.");
+
+DEFINE_bool(
+ blob_db_enable_gc,
+ ROCKSDB_NAMESPACE::blob_db::BlobDBOptions().enable_garbage_collection,
+ "[Stacked BlobDB] Enable BlobDB garbage collection.");
+
+DEFINE_double(
+ blob_db_gc_cutoff,
+ ROCKSDB_NAMESPACE::blob_db::BlobDBOptions().garbage_collection_cutoff,
+ "[Stacked BlobDB] Cutoff ratio for BlobDB garbage collection.");
+#endif // !ROCKSDB_LITE
+
+// Options for integrated BlobDB
+DEFINE_bool(allow_setting_blob_options_dynamically, false,
+ "[Integrated BlobDB] Allow setting blob options dynamically.");
+
+DEFINE_bool(
+ enable_blob_files,
+ ROCKSDB_NAMESPACE::AdvancedColumnFamilyOptions().enable_blob_files,
+ "[Integrated BlobDB] Enable writing large values to separate blob files.");
+
+DEFINE_uint64(min_blob_size,
+ ROCKSDB_NAMESPACE::AdvancedColumnFamilyOptions().min_blob_size,
+ "[Integrated BlobDB] The size of the smallest value to be stored "
+ "separately in a blob file.");
+
+DEFINE_uint64(blob_file_size,
+ ROCKSDB_NAMESPACE::AdvancedColumnFamilyOptions().blob_file_size,
+ "[Integrated BlobDB] The size limit for blob files.");
+
+DEFINE_string(blob_compression_type, "none",
+ "[Integrated BlobDB] The compression algorithm to use for large "
+ "values stored in blob files.");
+
+DEFINE_bool(enable_blob_garbage_collection,
+ ROCKSDB_NAMESPACE::AdvancedColumnFamilyOptions()
+ .enable_blob_garbage_collection,
+ "[Integrated BlobDB] Enable blob garbage collection.");
+
+DEFINE_double(blob_garbage_collection_age_cutoff,
+ ROCKSDB_NAMESPACE::AdvancedColumnFamilyOptions()
+ .blob_garbage_collection_age_cutoff,
+ "[Integrated BlobDB] The cutoff in terms of blob file age for "
+ "garbage collection.");
+
+DEFINE_double(blob_garbage_collection_force_threshold,
+ ROCKSDB_NAMESPACE::AdvancedColumnFamilyOptions()
+ .blob_garbage_collection_force_threshold,
+ "[Integrated BlobDB] The threshold for the ratio of garbage in "
+ "the oldest blob files for forcing garbage collection.");
+
+DEFINE_uint64(blob_compaction_readahead_size,
+ ROCKSDB_NAMESPACE::AdvancedColumnFamilyOptions()
+ .blob_compaction_readahead_size,
+ "[Integrated BlobDB] Compaction readahead for blob files.");
+
+DEFINE_int32(
+ blob_file_starting_level,
+ ROCKSDB_NAMESPACE::AdvancedColumnFamilyOptions().blob_file_starting_level,
+ "[Integrated BlobDB] Enable writing blob files during flushes and "
+ "compactions starting from the specified level.");
+
+DEFINE_bool(use_blob_cache, false, "[Integrated BlobDB] Enable blob cache.");
+
+DEFINE_bool(
+ use_shared_block_and_blob_cache, true,
+ "[Integrated BlobDB] Use a shared backing cache for both block "
+ "cache and blob cache. It only takes effect if use_blob_cache is enabled.");
+
+DEFINE_uint64(
+ blob_cache_size, 2LL * KB * KB * KB,
+ "[Integrated BlobDB] Number of bytes to use as a cache of blobs. It only "
+ "takes effect if the block and blob caches are different "
+ "(use_shared_block_and_blob_cache = false).");
+
+DEFINE_int32(blob_cache_numshardbits, 6,
+ "[Integrated BlobDB] Number of shards for the blob cache is 2 ** "
+ "blob_cache_numshardbits. Negative means use default settings. "
+ "It only takes effect if blob_cache_size is greater than 0, and "
+ "the block and blob caches are different "
+ "(use_shared_block_and_blob_cache = false).");
+
+DEFINE_int32(prepopulate_blob_cache, 0,
+ "[Integrated BlobDB] Pre-populate hot/warm blobs in blob cache. 0 "
+ "to disable and 1 to insert during flush.");
+
+DEFINE_bool(enable_tiered_storage, false, "Set last_level_temperature");
+
+DEFINE_int64(preclude_last_level_data_seconds, 0,
+ "Preclude data from the last level. Used with tiered storage "
+ "feature to preclude new data from comacting to the last level.");
+
+DEFINE_int64(
+ preserve_internal_time_seconds, 0,
+ "Preserve internal time information which is attached to each SST.");
+
+static const bool FLAGS_subcompactions_dummy __attribute__((__unused__)) =
+ RegisterFlagValidator(&FLAGS_subcompactions, &ValidateUint32Range);
+
+static bool ValidateInt32Positive(const char* flagname, int32_t value) {
+ if (value < 0) {
+ fprintf(stderr, "Invalid value for --%s: %d, must be >=0\n", flagname,
+ value);
+ return false;
+ }
+ return true;
+}
+DEFINE_int32(reopen, 10, "Number of times database reopens");
+static const bool FLAGS_reopen_dummy __attribute__((__unused__)) =
+ RegisterFlagValidator(&FLAGS_reopen, &ValidateInt32Positive);
+
+DEFINE_double(bloom_bits, 10,
+ "Bloom filter bits per key. "
+ "Negative means use default settings.");
+
+DEFINE_int32(
+ ribbon_starting_level, 999,
+ "Use Bloom filter on levels below specified and Ribbon beginning on level "
+ "specified. Flush is considered level -1. 999 or more -> always Bloom. 0 "
+ "-> Ribbon except Bloom for flush. -1 -> always Ribbon.");
+
+DEFINE_bool(partition_filters, false,
+ "use partitioned filters "
+ "for block-based table");
+
+DEFINE_bool(
+ optimize_filters_for_memory,
+ ROCKSDB_NAMESPACE::BlockBasedTableOptions().optimize_filters_for_memory,
+ "Minimize memory footprint of filters");
+
+DEFINE_bool(
+ detect_filter_construct_corruption,
+ ROCKSDB_NAMESPACE::BlockBasedTableOptions()
+ .detect_filter_construct_corruption,
+ "Detect corruption during new Bloom Filter and Ribbon Filter construction");
+
+DEFINE_int32(
+ index_type,
+ static_cast<int32_t>(
+ ROCKSDB_NAMESPACE::BlockBasedTableOptions().index_type),
+ "Type of block-based table index (see `enum IndexType` in table.h)");
+
+DEFINE_int32(
+ data_block_index_type,
+ static_cast<int32_t>(
+ ROCKSDB_NAMESPACE::BlockBasedTableOptions().data_block_index_type),
+ "Index type for data blocks (see `enum DataBlockIndexType` in table.h)");
+
+DEFINE_string(db, "", "Use the db with the following name.");
+
+DEFINE_string(secondaries_base, "",
+ "Use this path as the base path for secondary instances.");
+
+DEFINE_bool(test_secondary, false,
+ "If true, start an additional secondary instance which can be used "
+ "for verification.");
+
+DEFINE_string(
+ expected_values_dir, "",
+ "Dir where files containing info about the latest/historical values will "
+ "be stored. If provided and non-empty, the DB state will be verified "
+ "against values from these files after recovery. --max_key and "
+ "--column_family must be kept the same across invocations of this program "
+ "that use the same --expected_values_dir. Currently historical values are "
+ "only tracked when --sync_fault_injection is set. See --seed and "
+ "--nooverwritepercent for further requirements.");
+
+DEFINE_bool(verify_checksum, false,
+ "Verify checksum for every block read from storage");
+
+DEFINE_bool(mmap_read, ROCKSDB_NAMESPACE::Options().allow_mmap_reads,
+ "Allow reads to occur via mmap-ing files");
+
+DEFINE_bool(mmap_write, ROCKSDB_NAMESPACE::Options().allow_mmap_writes,
+ "Allow writes to occur via mmap-ing files");
+
+DEFINE_bool(use_direct_reads, ROCKSDB_NAMESPACE::Options().use_direct_reads,
+ "Use O_DIRECT for reading data");
+
+DEFINE_bool(use_direct_io_for_flush_and_compaction,
+ ROCKSDB_NAMESPACE::Options().use_direct_io_for_flush_and_compaction,
+ "Use O_DIRECT for writing data");
+
+DEFINE_bool(mock_direct_io, false,
+ "Mock direct IO by not using O_DIRECT for direct IO read");
+
+DEFINE_bool(statistics, false, "Create database statistics");
+
+DEFINE_bool(sync, false, "Sync all writes to disk");
+
+DEFINE_bool(use_fsync, false, "If true, issue fsync instead of fdatasync");
+
+DEFINE_uint64(bytes_per_sync, ROCKSDB_NAMESPACE::Options().bytes_per_sync,
+ "If nonzero, sync SST file data incrementally after every "
+ "`bytes_per_sync` bytes are written");
+
+DEFINE_uint64(wal_bytes_per_sync,
+ ROCKSDB_NAMESPACE::Options().wal_bytes_per_sync,
+ "If nonzero, sync WAL file data incrementally after every "
+ "`bytes_per_sync` bytes are written");
+
+DEFINE_int32(kill_random_test, 0,
+ "If non-zero, kill at various points in source code with "
+ "probability 1/this");
+static const bool FLAGS_kill_random_test_dummy __attribute__((__unused__)) =
+ RegisterFlagValidator(&FLAGS_kill_random_test, &ValidateInt32Positive);
+
+DEFINE_string(kill_exclude_prefixes, "",
+ "If non-empty, kill points with prefix in the list given will be"
+ " skipped. Items are comma-separated.");
+extern std::vector<std::string> rocksdb_kill_exclude_prefixes;
+
+DEFINE_bool(disable_wal, false, "If true, do not write WAL for write.");
+
+DEFINE_uint64(recycle_log_file_num,
+ ROCKSDB_NAMESPACE::Options().recycle_log_file_num,
+ "Number of old WAL files to keep around for later recycling");
+
+DEFINE_int64(target_file_size_base,
+ ROCKSDB_NAMESPACE::Options().target_file_size_base,
+ "Target level-1 file size for compaction");
+
+DEFINE_int32(target_file_size_multiplier, 1,
+ "A multiplier to compute target level-N file size (N >= 2)");
+
+DEFINE_uint64(max_bytes_for_level_base,
+ ROCKSDB_NAMESPACE::Options().max_bytes_for_level_base,
+ "Max bytes for level-1");
+
+DEFINE_double(max_bytes_for_level_multiplier, 2,
+ "A multiplier to compute max bytes for level-N (N >= 2)");
+
+DEFINE_int32(range_deletion_width, 10,
+ "The width of the range deletion intervals.");
+
+DEFINE_uint64(rate_limiter_bytes_per_sec, 0, "Set options.rate_limiter value.");
+
+DEFINE_bool(rate_limit_bg_reads, false,
+ "Use options.rate_limiter on compaction reads");
+
+DEFINE_bool(rate_limit_user_ops, false,
+ "When true use Env::IO_USER priority level to charge internal rate "
+ "limiter for reads associated with user operations.");
+
+DEFINE_bool(rate_limit_auto_wal_flush, false,
+ "When true use Env::IO_USER priority level to charge internal rate "
+ "limiter for automatic WAL flush (`Options::manual_wal_flush` == "
+ "false) after the user "
+ "write operation.");
+
+DEFINE_uint64(sst_file_manager_bytes_per_sec, 0,
+ "Set `Options::sst_file_manager` to delete at this rate. By "
+ "default the deletion rate is unbounded.");
+
+DEFINE_uint64(sst_file_manager_bytes_per_truncate, 0,
+ "Set `Options::sst_file_manager` to delete in chunks of this "
+ "many bytes. By default whole files will be deleted.");
+
+DEFINE_bool(use_txn, false,
+ "Use TransactionDB. Currently the default write policy is "
+ "TxnDBWritePolicy::WRITE_PREPARED");
+
+DEFINE_uint64(txn_write_policy, 0,
+ "The transaction write policy. Default is "
+ "TxnDBWritePolicy::WRITE_COMMITTED. Note that this should not be "
+ "changed accross crashes.");
+
+DEFINE_bool(unordered_write, false,
+ "Turn on the unordered_write feature. This options is currently "
+ "tested only in combination with use_txn=true and "
+ "txn_write_policy=TxnDBWritePolicy::WRITE_PREPARED.");
+
+DEFINE_int32(backup_one_in, 0,
+ "If non-zero, then CreateNewBackup() will be called once for "
+ "every N operations on average. 0 indicates CreateNewBackup() "
+ "is disabled.");
+
+DEFINE_uint64(backup_max_size, 100 * 1024 * 1024,
+ "If non-zero, skip checking backup/restore when DB size in "
+ "bytes exceeds this setting.");
+
+DEFINE_int32(checkpoint_one_in, 0,
+ "If non-zero, then CreateCheckpoint() will be called once for "
+ "every N operations on average. 0 indicates CreateCheckpoint() "
+ "is disabled.");
+
+DEFINE_int32(ingest_external_file_one_in, 0,
+ "If non-zero, then IngestExternalFile() will be called once for "
+ "every N operations on average. 0 indicates IngestExternalFile() "
+ "is disabled.");
+
+DEFINE_int32(ingest_external_file_width, 100,
+ "The width of the ingested external files.");
+
+DEFINE_int32(compact_files_one_in, 0,
+ "If non-zero, then CompactFiles() will be called once for every N "
+ "operations on average. 0 indicates CompactFiles() is disabled.");
+
+DEFINE_int32(compact_range_one_in, 0,
+ "If non-zero, then CompactRange() will be called once for every N "
+ "operations on average. 0 indicates CompactRange() is disabled.");
+
+DEFINE_int32(mark_for_compaction_one_file_in, 0,
+ "A `TablePropertiesCollectorFactory` will be registered, which "
+ "creates a `TablePropertiesCollector` with `NeedCompact()` "
+ "returning true once for every N files on average. 0 or negative "
+ "mean `NeedCompact()` always returns false.");
+
+DEFINE_int32(flush_one_in, 0,
+ "If non-zero, then Flush() will be called once for every N ops "
+ "on average. 0 indicates calls to Flush() are disabled.");
+
+DEFINE_int32(pause_background_one_in, 0,
+ "If non-zero, then PauseBackgroundWork()+Continue will be called "
+ "once for every N ops on average. 0 disables.");
+
+DEFINE_int32(compact_range_width, 10000,
+ "The width of the ranges passed to CompactRange().");
+
+DEFINE_int32(acquire_snapshot_one_in, 0,
+ "If non-zero, then acquires a snapshot once every N operations on "
+ "average.");
+
+DEFINE_bool(compare_full_db_state_snapshot, false,
+ "If set we compare state of entire db (in one of the threads) with"
+ "each snapshot.");
+
+DEFINE_uint64(snapshot_hold_ops, 0,
+ "If non-zero, then releases snapshots N operations after they're "
+ "acquired.");
+
+DEFINE_bool(long_running_snapshots, false,
+ "If set, hold on some some snapshots for much longer time.");
+
+DEFINE_bool(use_multiget, false,
+ "If set, use the batched MultiGet API for reads");
+
+static bool ValidateInt32Percent(const char* flagname, int32_t value) {
+ if (value < 0 || value > 100) {
+ fprintf(stderr, "Invalid value for --%s: %d, 0<= pct <=100 \n", flagname,
+ value);
+ return false;
+ }
+ return true;
+}
+
+DEFINE_int32(readpercent, 10,
+ "Ratio of reads to total workload (expressed as a percentage)");
+static const bool FLAGS_readpercent_dummy __attribute__((__unused__)) =
+ RegisterFlagValidator(&FLAGS_readpercent, &ValidateInt32Percent);
+
+DEFINE_int32(prefixpercent, 20,
+ "Ratio of prefix iterators to total workload (expressed as a"
+ " percentage)");
+static const bool FLAGS_prefixpercent_dummy __attribute__((__unused__)) =
+ RegisterFlagValidator(&FLAGS_prefixpercent, &ValidateInt32Percent);
+
+DEFINE_int32(writepercent, 45,
+ "Ratio of writes to total workload (expressed as a percentage)");
+static const bool FLAGS_writepercent_dummy __attribute__((__unused__)) =
+ RegisterFlagValidator(&FLAGS_writepercent, &ValidateInt32Percent);
+
+DEFINE_int32(delpercent, 15,
+ "Ratio of deletes to total workload (expressed as a percentage)");
+static const bool FLAGS_delpercent_dummy __attribute__((__unused__)) =
+ RegisterFlagValidator(&FLAGS_delpercent, &ValidateInt32Percent);
+
+DEFINE_int32(delrangepercent, 0,
+ "Ratio of range deletions to total workload (expressed as a "
+ "percentage). Cannot be used with test_batches_snapshots");
+static const bool FLAGS_delrangepercent_dummy __attribute__((__unused__)) =
+ RegisterFlagValidator(&FLAGS_delrangepercent, &ValidateInt32Percent);
+
+DEFINE_int32(nooverwritepercent, 60,
+ "Ratio of keys without overwrite to total workload (expressed as "
+ "a percentage). When --expected_values_dir is nonempty, must "
+ "keep this value constant across invocations.");
+static const bool FLAGS_nooverwritepercent_dummy __attribute__((__unused__)) =
+ RegisterFlagValidator(&FLAGS_nooverwritepercent, &ValidateInt32Percent);
+
+DEFINE_int32(iterpercent, 10,
+ "Ratio of iterations to total workload"
+ " (expressed as a percentage)");
+static const bool FLAGS_iterpercent_dummy __attribute__((__unused__)) =
+ RegisterFlagValidator(&FLAGS_iterpercent, &ValidateInt32Percent);
+
+DEFINE_uint64(num_iterations, 10, "Number of iterations per MultiIterate run");
+static const bool FLAGS_num_iterations_dummy __attribute__((__unused__)) =
+ RegisterFlagValidator(&FLAGS_num_iterations, &ValidateUint32Range);
+
+DEFINE_int32(
+ customopspercent, 0,
+ "Ratio of custom operations to total workload (expressed as a percentage)");
+
+DEFINE_string(compression_type, "snappy",
+ "Algorithm to use to compress the database");
+
+DEFINE_int32(compression_max_dict_bytes, 0,
+ "Maximum size of dictionary used to prime the compression "
+ "library.");
+
+DEFINE_int32(compression_zstd_max_train_bytes, 0,
+ "Maximum size of training data passed to zstd's dictionary "
+ "trainer.");
+
+DEFINE_int32(compression_parallel_threads, 1,
+ "Number of threads for parallel compression.");
+
+DEFINE_uint64(compression_max_dict_buffer_bytes, 0,
+ "Buffering limit for SST file data to sample for dictionary "
+ "compression.");
+
+DEFINE_bool(
+ compression_use_zstd_dict_trainer, true,
+ "Use zstd's trainer to generate dictionary. If the options is false, "
+ "zstd's finalizeDictionary() API is used to generate dictionary. "
+ "ZSTD 1.4.5+ is required. If ZSTD 1.4.5+ is not linked with the binary, "
+ "this flag will have the default value true.");
+
+DEFINE_string(bottommost_compression_type, "disable",
+ "Algorithm to use to compress bottommost level of the database. "
+ "\"disable\" means disabling the feature");
+
+DEFINE_string(checksum_type, "kCRC32c", "Algorithm to use to checksum blocks");
+
+DEFINE_string(env_uri, "",
+ "URI for env lookup. Mutually exclusive with --fs_uri");
+
+DEFINE_string(fs_uri, "",
+ "URI for registry Filesystem lookup. Mutually exclusive"
+ " with --env_uri."
+ " Creates a default environment with the specified filesystem.");
+
+DEFINE_uint64(ops_per_thread, 1200000, "Number of operations per thread.");
+static const bool FLAGS_ops_per_thread_dummy __attribute__((__unused__)) =
+ RegisterFlagValidator(&FLAGS_ops_per_thread, &ValidateUint32Range);
+
+DEFINE_uint64(log2_keys_per_lock, 2, "Log2 of number of keys per lock");
+static const bool FLAGS_log2_keys_per_lock_dummy __attribute__((__unused__)) =
+ RegisterFlagValidator(&FLAGS_log2_keys_per_lock, &ValidateUint32Range);
+
+DEFINE_uint64(max_manifest_file_size, 16384, "Maximum size of a MANIFEST file");
+
+DEFINE_bool(in_place_update, false, "On true, does inplace update in memtable");
+
+DEFINE_string(memtablerep, "skip_list", "");
+
+inline static bool ValidatePrefixSize(const char* flagname, int32_t value) {
+ if (value < -1 || value > 8) {
+ fprintf(stderr, "Invalid value for --%s: %d. -1 <= PrefixSize <= 8\n",
+ flagname, value);
+ return false;
+ }
+ return true;
+}
+DEFINE_int32(prefix_size, 7,
+ "Control the prefix size for HashSkipListRep. "
+ "-1 is disabled.");
+static const bool FLAGS_prefix_size_dummy __attribute__((__unused__)) =
+ RegisterFlagValidator(&FLAGS_prefix_size, &ValidatePrefixSize);
+
+DEFINE_bool(use_merge, false,
+ "On true, replaces all writes with a Merge "
+ "that behaves like a Put");
+
+DEFINE_uint32(use_put_entity_one_in, 0,
+ "If greater than zero, PutEntity will be used once per every N "
+ "write ops on average.");
+
+DEFINE_bool(use_full_merge_v1, false,
+ "On true, use a merge operator that implement the deprecated "
+ "version of FullMerge");
+
+DEFINE_int32(sync_wal_one_in, 0,
+ "If non-zero, then SyncWAL() will be called once for every N ops "
+ "on average. 0 indicates that calls to SyncWAL() are disabled.");
+
+DEFINE_bool(avoid_unnecessary_blocking_io,
+ ROCKSDB_NAMESPACE::Options().avoid_unnecessary_blocking_io,
+ "If true, some expensive cleaning up operations will be moved from "
+ "user reads to high-pri background threads.");
+
+DEFINE_bool(write_dbid_to_manifest,
+ ROCKSDB_NAMESPACE::Options().write_dbid_to_manifest,
+ "Write DB_ID to manifest");
+
+DEFINE_bool(avoid_flush_during_recovery,
+ ROCKSDB_NAMESPACE::Options().avoid_flush_during_recovery,
+ "Avoid flush during recovery");
+
+DEFINE_uint64(max_write_batch_group_size_bytes,
+ ROCKSDB_NAMESPACE::Options().max_write_batch_group_size_bytes,
+ "Max write batch group size");
+
+DEFINE_bool(level_compaction_dynamic_level_bytes,
+ ROCKSDB_NAMESPACE::Options().level_compaction_dynamic_level_bytes,
+ "Use dynamic level");
+
+DEFINE_int32(verify_checksum_one_in, 0,
+ "If non-zero, then DB::VerifyChecksum() will be called to do"
+ " checksum verification of all the files in the database once for"
+ " every N ops on average. 0 indicates that calls to"
+ " VerifyChecksum() are disabled.");
+DEFINE_int32(verify_db_one_in, 0,
+ "If non-zero, call VerifyDb() once for every N ops. 0 indicates "
+ "that VerifyDb() will not be called in OperateDb(). Note that "
+ "enabling this can slow down tests.");
+
+DEFINE_int32(continuous_verification_interval, 1000,
+ "While test is running, verify db every N milliseconds. 0 "
+ "disables continuous verification.");
+
+DEFINE_int32(approximate_size_one_in, 64,
+ "If non-zero, DB::GetApproximateSizes() will be called against"
+ " random key ranges.");
+
+DEFINE_int32(read_fault_one_in, 1000,
+ "On non-zero, enables fault injection on read");
+
+DEFINE_int32(get_property_one_in, 1000,
+ "If non-zero, then DB::GetProperty() will be called to get various"
+ " properties for every N ops on average. 0 indicates that"
+ " GetProperty() will be not be called.");
+
+DEFINE_bool(sync_fault_injection, false,
+ "If true, FaultInjectionTestFS will be used for write operations, "
+ "and unsynced data in DB will lost after crash. In such a case we "
+ "track DB changes in a trace file (\"*.trace\") in "
+ "--expected_values_dir for verifying there are no holes in the "
+ "recovered data.");
+
+DEFINE_bool(best_efforts_recovery, false,
+ "If true, use best efforts recovery.");
+DEFINE_bool(skip_verifydb, false,
+ "If true, skip VerifyDb() calls and Get()/Iterator verifications"
+ "against expected state.");
+
+DEFINE_bool(enable_compaction_filter, false,
+ "If true, configures a compaction filter that returns a kRemove "
+ "decision for deleted keys.");
+
+DEFINE_bool(paranoid_file_checks, true,
+ "After writing every SST file, reopen it and read all the keys "
+ "and validate checksums");
+
+DEFINE_bool(fail_if_options_file_error, false,
+ "Fail operations that fail to detect or properly persist options "
+ "file.");
+
+DEFINE_uint64(batch_protection_bytes_per_key, 0,
+ "If nonzero, enables integrity protection in `WriteBatch` at the "
+ "specified number of bytes per key. Currently the only supported "
+ "nonzero value is eight.");
+
+DEFINE_uint32(
+ memtable_protection_bytes_per_key, 0,
+ "If nonzero, enables integrity protection in memtable entries at the "
+ "specified number of bytes per key. Currently the supported "
+ "nonzero values are 1, 2, 4 and 8.");
+
+DEFINE_string(file_checksum_impl, "none",
+ "Name of an implementation for file_checksum_gen_factory, or "
+ "\"none\" for null.");
+
+DEFINE_int32(write_fault_one_in, 0,
+ "On non-zero, enables fault injection on write");
+
+DEFINE_uint64(user_timestamp_size, 0,
+ "Number of bytes for a user-defined timestamp. Currently, only "
+ "8-byte is supported");
+
+DEFINE_int32(open_metadata_write_fault_one_in, 0,
+ "On non-zero, enables fault injection on file metadata write "
+ "during DB reopen.");
+
+#ifndef ROCKSDB_LITE
+DEFINE_string(secondary_cache_uri, "",
+ "Full URI for creating a customized secondary cache object");
+DEFINE_int32(secondary_cache_fault_one_in, 0,
+ "On non-zero, enables fault injection in secondary cache inserts"
+ " and lookups");
+#endif // ROCKSDB_LITE
+DEFINE_int32(open_write_fault_one_in, 0,
+ "On non-zero, enables fault injection on file writes "
+ "during DB reopen.");
+DEFINE_int32(open_read_fault_one_in, 0,
+ "On non-zero, enables fault injection on file reads "
+ "during DB reopen.");
+DEFINE_int32(injest_error_severity, 1,
+ "The severity of the injested IO Error. 1 is soft error (e.g. "
+ "retryable error), 2 is fatal error, and the default is "
+ "retryable error.");
+DEFINE_int32(prepopulate_block_cache,
+ static_cast<int32_t>(ROCKSDB_NAMESPACE::BlockBasedTableOptions::
+ PrepopulateBlockCache::kDisable),
+ "Options related to cache warming (see `enum "
+ "PrepopulateBlockCache` in table.h)");
+
+DEFINE_bool(two_write_queues, false,
+ "Set to true to enable two write queues. Default: false");
+#ifndef ROCKSDB_LITE
+
+DEFINE_bool(use_only_the_last_commit_time_batch_for_recovery, false,
+ "If true, the commit-time write batch will not be immediately "
+ "inserted into the memtables. Default: false");
+
+DEFINE_uint64(
+ wp_snapshot_cache_bits, 7ull,
+ "Number of bits to represent write-prepared transaction db's snapshot "
+ "cache. Default: 7 (128 entries)");
+
+DEFINE_uint64(wp_commit_cache_bits, 23ull,
+ "Number of bits to represent write-prepared transaction db's "
+ "commit cache. Default: 23 (8M entries)");
+#endif // !ROCKSDB_LITE
+
+DEFINE_bool(adaptive_readahead, false,
+ "Carry forward internal auto readahead size from one file to next "
+ "file at each level during iteration");
+DEFINE_bool(
+ async_io, false,
+ "Does asynchronous prefetching when internal auto readahead is enabled");
+
+DEFINE_string(wal_compression, "none",
+ "Algorithm to use for WAL compression. none to disable.");
+
+DEFINE_bool(
+ verify_sst_unique_id_in_manifest, false,
+ "Enable DB options `verify_sst_unique_id_in_manifest`, if true, during "
+ "DB-open try verifying the SST unique id between MANIFEST and SST "
+ "properties.");
+
+DEFINE_int32(
+ create_timestamped_snapshot_one_in, 0,
+ "On non-zero, create timestamped snapshots upon transaction commits.");
+
+DEFINE_bool(allow_data_in_errors,
+ ROCKSDB_NAMESPACE::Options().allow_data_in_errors,
+ "If true, allow logging data, e.g. key, value in LOG files.");
+
+DEFINE_int32(verify_iterator_with_expected_state_one_in, 0,
+ "If non-zero, when TestIterate() is to be called, there is a "
+ "1/verify_iterator_with_expected_state_one_in "
+ "chance that the iterator is verified against the expected state "
+ "file, instead of comparing keys between two iterators.");
+
+DEFINE_uint64(readahead_size, 0, "Iterator readahead size");
+DEFINE_uint64(initial_auto_readahead_size, 0,
+ "Initial auto readahead size for prefetching during Iteration");
+DEFINE_uint64(max_auto_readahead_size, 0,
+ "Max auto readahead size for prefetching during Iteration");
+DEFINE_uint64(
+ num_file_reads_for_auto_readahead, 0,
+ "Num of sequential reads to enable auto prefetching during Iteration");
+
+DEFINE_bool(
+ preserve_unverified_changes, false,
+ "DB files of the current run will all be preserved in `FLAGS_db`. DB files "
+ "from the last run will be preserved in `FLAGS_db/unverified` until the "
+ "first verification succeeds. Expected state files from the last run will "
+ "be preserved similarly under `FLAGS_expected_values_dir/unverified` when "
+ "`--expected_values_dir` is nonempty.");
+
+DEFINE_uint64(stats_dump_period_sec,
+ ROCKSDB_NAMESPACE::Options().stats_dump_period_sec,
+ "Gap between printing stats to log in seconds");
+
+#endif // GFLAGS
diff --git a/src/rocksdb/db_stress_tool/db_stress_listener.cc b/src/rocksdb/db_stress_tool/db_stress_listener.cc
new file mode 100644
index 000000000..578f21c41
--- /dev/null
+++ b/src/rocksdb/db_stress_tool/db_stress_listener.cc
@@ -0,0 +1,191 @@
+// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+#include "db_stress_tool/db_stress_listener.h"
+
+#include <cstdint>
+
+#include "file/file_util.h"
+#include "rocksdb/file_system.h"
+#include "util/coding_lean.h"
+
+namespace ROCKSDB_NAMESPACE {
+
+#ifdef GFLAGS
+#ifndef ROCKSDB_LITE
+
+// TODO: consider using expected_values_dir instead, but this is more
+// convenient for now.
+UniqueIdVerifier::UniqueIdVerifier(const std::string& db_name, Env* env)
+ : path_(db_name + "/.unique_ids") {
+ // We expect such a small number of files generated during this test
+ // (thousands?), checking full 192-bit IDs for uniqueness is a very
+ // weak check. For a stronger check, we pick a specific 64-bit
+ // subsequence from the ID to check for uniqueness. All bits of the
+ // ID should be high quality, and 64 bits should be unique with
+ // very good probability for the quantities in this test.
+ offset_ = Random::GetTLSInstance()->Uniform(17); // 0 to 16
+
+ const std::shared_ptr<FileSystem> fs = env->GetFileSystem();
+ IOOptions opts;
+
+ Status st = fs->CreateDirIfMissing(db_name, opts, nullptr);
+ if (!st.ok()) {
+ fprintf(stderr, "Failed to create directory %s: %s\n", db_name.c_str(),
+ st.ToString().c_str());
+ exit(1);
+ }
+
+ // Avoid relying on ReopenWritableFile which is not supported by all
+ // file systems. Create a new file and copy the old file contents to it.
+ std::string tmp_path = path_ + ".tmp";
+ st = fs->FileExists(tmp_path, opts, /*dbg*/ nullptr);
+ if (st.IsNotFound()) {
+ st = fs->RenameFile(path_, tmp_path, opts, /*dbg*/ nullptr);
+ // Either it should succeed or fail because src path doesn't exist
+ assert(st.ok() || st.IsPathNotFound());
+ } else {
+ // If path_ and tmp_path both exist, retain tmp_path as its
+ // guaranteed to be more complete. The order of operations are -
+ // 1. Rename path_ to tmp_path
+ // 2. Parse tmp_path contents
+ // 3. Create path_
+ // 4. Copy tmp_path contents to path_
+ // 5. Delete tmp_path
+ st = fs->DeleteFile(path_, opts, /*dbg*/ nullptr);
+ assert(st.ok() || st.IsPathNotFound());
+ }
+
+ uint64_t size = 0;
+ {
+ std::unique_ptr<FSSequentialFile> reader;
+ Status s = fs->NewSequentialFile(tmp_path, FileOptions(), &reader,
+ /*dbg*/ nullptr);
+ if (s.ok()) {
+ // Load from file
+ std::string id(24U, '\0');
+ Slice result;
+ for (;;) {
+ s = reader->Read(id.size(), opts, &result, &id[0], /*dbg*/ nullptr);
+ if (!s.ok()) {
+ fprintf(stderr, "Error reading unique id file: %s\n",
+ s.ToString().c_str());
+ assert(false);
+ }
+ if (result.size() < id.size()) {
+ // EOF
+ if (result.size() != 0) {
+ // Corrupt file. Not a DB bug but could happen if OS doesn't provide
+ // good guarantees on process crash.
+ fprintf(stdout, "Warning: clearing corrupt unique id file\n");
+ id_set_.clear();
+ reader.reset();
+ s = fs->DeleteFile(tmp_path, opts, /*dbg*/ nullptr);
+ assert(s.ok());
+ size = 0;
+ }
+ break;
+ }
+ size += 24U;
+ VerifyNoWrite(id);
+ }
+ } else {
+ // Newly created is ok.
+ // But FileSystem doesn't tell us whether non-existence was the cause of
+ // the failure. (Issue #9021)
+ Status s2 = fs->FileExists(tmp_path, opts, /*dbg*/ nullptr);
+ if (!s2.IsNotFound()) {
+ fprintf(stderr, "Error opening unique id file: %s\n",
+ s.ToString().c_str());
+ assert(false);
+ }
+ size = 0;
+ }
+ }
+ fprintf(stdout, "(Re-)verified %zu unique IDs\n", id_set_.size());
+
+ std::unique_ptr<FSWritableFile> file_writer;
+ st = fs->NewWritableFile(path_, FileOptions(), &file_writer, /*dbg*/ nullptr);
+ if (!st.ok()) {
+ fprintf(stderr, "Error creating the unique ids file: %s\n",
+ st.ToString().c_str());
+ assert(false);
+ }
+ data_file_writer_.reset(
+ new WritableFileWriter(std::move(file_writer), path_, FileOptions()));
+
+ if (size > 0) {
+ st = CopyFile(fs.get(), tmp_path, data_file_writer_, size,
+ /*use_fsync*/ true, /*io_tracer*/ nullptr,
+ /*temparature*/ Temperature::kHot);
+ if (!st.ok()) {
+ fprintf(stderr, "Error copying contents of old unique id file: %s\n",
+ st.ToString().c_str());
+ assert(false);
+ }
+ }
+ st = fs->DeleteFile(tmp_path, opts, /*dbg*/ nullptr);
+ assert(st.ok() || st.IsPathNotFound());
+}
+
+UniqueIdVerifier::~UniqueIdVerifier() {
+ IOStatus s = data_file_writer_->Close();
+ assert(s.ok());
+}
+
+void UniqueIdVerifier::VerifyNoWrite(const std::string& id) {
+ assert(id.size() == 24);
+ bool is_new = id_set_.insert(DecodeFixed64(&id[offset_])).second;
+ if (!is_new) {
+ fprintf(stderr,
+ "Duplicate partial unique ID found (offset=%zu, count=%zu)\n",
+ offset_, id_set_.size());
+ assert(false);
+ }
+}
+
+void UniqueIdVerifier::Verify(const std::string& id) {
+ assert(id.size() == 24);
+ std::lock_guard<std::mutex> lock(mutex_);
+ // If we accumulate more than ~4 million IDs, there would be > 1 in 1M
+ // natural chance of collision. Thus, simply stop checking at that point.
+ if (id_set_.size() >= 4294967) {
+ return;
+ }
+ IOStatus s = data_file_writer_->Append(Slice(id));
+ if (!s.ok()) {
+ fprintf(stderr, "Error writing to unique id file: %s\n",
+ s.ToString().c_str());
+ assert(false);
+ }
+ s = data_file_writer_->Flush();
+ if (!s.ok()) {
+ fprintf(stderr, "Error flushing unique id file: %s\n",
+ s.ToString().c_str());
+ assert(false);
+ }
+ VerifyNoWrite(id);
+}
+
+void DbStressListener::VerifyTableFileUniqueId(
+ const TableProperties& new_file_properties, const std::string& file_path) {
+ // Verify unique ID
+ std::string id;
+ // Unit tests verify that GetUniqueIdFromTableProperties returns just a
+ // substring of this, and we're only going to pull out 64 bits, so using
+ // GetExtendedUniqueIdFromTableProperties is arguably stronger testing here.
+ Status s = GetExtendedUniqueIdFromTableProperties(new_file_properties, &id);
+ if (!s.ok()) {
+ fprintf(stderr, "Error getting SST unique id for %s: %s\n",
+ file_path.c_str(), s.ToString().c_str());
+ assert(false);
+ }
+ unique_ids_.Verify(id);
+}
+
+#endif // !ROCKSDB_LITE
+#endif // GFLAGS
+
+} // namespace ROCKSDB_NAMESPACE
diff --git a/src/rocksdb/db_stress_tool/db_stress_listener.h b/src/rocksdb/db_stress_tool/db_stress_listener.h
new file mode 100644
index 000000000..faced3172
--- /dev/null
+++ b/src/rocksdb/db_stress_tool/db_stress_listener.h
@@ -0,0 +1,271 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+#ifdef GFLAGS
+#pragma once
+
+#include <mutex>
+#include <unordered_set>
+
+#include "file/filename.h"
+#include "file/writable_file_writer.h"
+#include "rocksdb/db.h"
+#include "rocksdb/env.h"
+#include "rocksdb/file_system.h"
+#include "rocksdb/listener.h"
+#include "rocksdb/table_properties.h"
+#include "rocksdb/unique_id.h"
+#include "util/gflags_compat.h"
+#include "util/random.h"
+
+DECLARE_int32(compact_files_one_in);
+
+namespace ROCKSDB_NAMESPACE {
+
+#ifndef ROCKSDB_LITE
+// Verify across process executions that all seen IDs are unique
+class UniqueIdVerifier {
+ public:
+ explicit UniqueIdVerifier(const std::string& db_name, Env* env);
+ ~UniqueIdVerifier();
+
+ void Verify(const std::string& id);
+
+ private:
+ void VerifyNoWrite(const std::string& id);
+
+ private:
+ std::mutex mutex_;
+ // IDs persisted to a hidden file inside DB dir
+ std::string path_;
+ std::unique_ptr<WritableFileWriter> data_file_writer_;
+ // Starting byte for which 8 bytes to check in memory within 24 byte ID
+ size_t offset_;
+ // Working copy of the set of 8 byte pieces
+ std::unordered_set<uint64_t> id_set_;
+};
+
+class DbStressListener : public EventListener {
+ public:
+ DbStressListener(const std::string& db_name,
+ const std::vector<DbPath>& db_paths,
+ const std::vector<ColumnFamilyDescriptor>& column_families,
+ Env* env)
+ : db_name_(db_name),
+ db_paths_(db_paths),
+ column_families_(column_families),
+ num_pending_file_creations_(0),
+ unique_ids_(db_name, env) {}
+
+ const char* Name() const override { return kClassName(); }
+ static const char* kClassName() { return "DBStressListener"; }
+
+ ~DbStressListener() override { assert(num_pending_file_creations_ == 0); }
+ void OnFlushCompleted(DB* /*db*/, const FlushJobInfo& info) override {
+ assert(IsValidColumnFamilyName(info.cf_name));
+ VerifyFilePath(info.file_path);
+ // pretending doing some work here
+ RandomSleep();
+ }
+
+ void OnFlushBegin(DB* /*db*/,
+ const FlushJobInfo& /*flush_job_info*/) override {
+ RandomSleep();
+ }
+
+ void OnTableFileDeleted(const TableFileDeletionInfo& /*info*/) override {
+ RandomSleep();
+ }
+
+ void OnCompactionBegin(DB* /*db*/, const CompactionJobInfo& /*ci*/) override {
+ RandomSleep();
+ }
+
+ void OnCompactionCompleted(DB* /*db*/, const CompactionJobInfo& ci) override {
+ assert(IsValidColumnFamilyName(ci.cf_name));
+ assert(ci.input_files.size() + ci.output_files.size() > 0U);
+ for (const auto& file_path : ci.input_files) {
+ VerifyFilePath(file_path);
+ }
+ for (const auto& file_path : ci.output_files) {
+ VerifyFilePath(file_path);
+ }
+ // pretending doing some work here
+ RandomSleep();
+ }
+
+ void OnTableFileCreationStarted(
+ const TableFileCreationBriefInfo& /*info*/) override {
+ ++num_pending_file_creations_;
+ }
+
+ void OnTableFileCreated(const TableFileCreationInfo& info) override {
+ assert(info.db_name == db_name_);
+ assert(IsValidColumnFamilyName(info.cf_name));
+ assert(info.job_id > 0 || FLAGS_compact_files_one_in > 0);
+ if (info.status.ok()) {
+ assert(info.file_size > 0);
+ VerifyFilePath(info.file_path);
+ assert(info.table_properties.data_size > 0 ||
+ info.table_properties.num_range_deletions > 0);
+ assert(info.table_properties.raw_key_size > 0);
+ assert(info.table_properties.num_entries > 0);
+ VerifyTableFileUniqueId(info.table_properties, info.file_path);
+ }
+ --num_pending_file_creations_;
+ }
+
+ void OnMemTableSealed(const MemTableInfo& /*info*/) override {
+ RandomSleep();
+ }
+
+ void OnColumnFamilyHandleDeletionStarted(
+ ColumnFamilyHandle* /*handle*/) override {
+ RandomSleep();
+ }
+
+ void OnExternalFileIngested(DB* /*db*/,
+ const ExternalFileIngestionInfo& info) override {
+ RandomSleep();
+ // Here we assume that each generated external file is ingested
+ // exactly once (or thrown away in case of crash)
+ VerifyTableFileUniqueId(info.table_properties, info.internal_file_path);
+ }
+
+ void OnBackgroundError(BackgroundErrorReason /* reason */,
+ Status* /* bg_error */) override {
+ RandomSleep();
+ }
+
+ void OnStallConditionsChanged(const WriteStallInfo& /*info*/) override {
+ RandomSleep();
+ }
+
+ void OnFileReadFinish(const FileOperationInfo& info) override {
+ // Even empty callback is valuable because sometimes some locks are
+ // released in order to make the callback.
+
+ // Sleep carefully here as it is a frequent operation and we don't want
+ // to slow down the tests. We always sleep when the read is large.
+ // When read is small, sleep in a small chance.
+ size_t length_read = info.length;
+ if (length_read >= 1000000 || Random::GetTLSInstance()->OneIn(1000)) {
+ RandomSleep();
+ }
+ }
+
+ void OnFileWriteFinish(const FileOperationInfo& info) override {
+ // Even empty callback is valuable because sometimes some locks are
+ // released in order to make the callback.
+
+ // Sleep carefully here as it is a frequent operation and we don't want
+ // to slow down the tests. When the write is large, always sleep.
+ // Otherwise, sleep in a relatively small chance.
+ size_t length_write = info.length;
+ if (length_write >= 1000000 || Random::GetTLSInstance()->OneIn(64)) {
+ RandomSleep();
+ }
+ }
+
+ bool ShouldBeNotifiedOnFileIO() override {
+ RandomSleep();
+ return static_cast<bool>(Random::GetTLSInstance()->OneIn(1));
+ }
+
+ void OnErrorRecoveryBegin(BackgroundErrorReason /* reason */,
+ Status /* bg_error */,
+ bool* /* auto_recovery */) override {
+ RandomSleep();
+ }
+
+ void OnErrorRecoveryCompleted(Status /* old_bg_error */) override {
+ RandomSleep();
+ }
+
+ protected:
+ bool IsValidColumnFamilyName(const std::string& cf_name) const {
+ if (cf_name == kDefaultColumnFamilyName) {
+ return true;
+ }
+ // The column family names in the stress tests are numbers.
+ for (size_t i = 0; i < cf_name.size(); ++i) {
+ if (cf_name[i] < '0' || cf_name[i] > '9') {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ void VerifyFileDir(const std::string& file_dir) {
+#ifndef NDEBUG
+ if (db_name_ == file_dir) {
+ return;
+ }
+ for (const auto& db_path : db_paths_) {
+ if (db_path.path == file_dir) {
+ return;
+ }
+ }
+ for (auto& cf : column_families_) {
+ for (const auto& cf_path : cf.options.cf_paths) {
+ if (cf_path.path == file_dir) {
+ return;
+ }
+ }
+ }
+ assert(false);
+#else
+ (void)file_dir;
+#endif // !NDEBUG
+ }
+
+ void VerifyFileName(const std::string& file_name) {
+#ifndef NDEBUG
+ uint64_t file_number;
+ FileType file_type;
+ bool result = ParseFileName(file_name, &file_number, &file_type);
+ assert(result);
+ assert(file_type == kTableFile);
+#else
+ (void)file_name;
+#endif // !NDEBUG
+ }
+
+ void VerifyFilePath(const std::string& file_path) {
+#ifndef NDEBUG
+ size_t pos = file_path.find_last_of("/");
+ if (pos == std::string::npos) {
+ VerifyFileName(file_path);
+ } else {
+ if (pos > 0) {
+ VerifyFileDir(file_path.substr(0, pos));
+ }
+ VerifyFileName(file_path.substr(pos));
+ }
+#else
+ (void)file_path;
+#endif // !NDEBUG
+ }
+
+ // Unique id is verified using the TableProperties. file_path is only used
+ // for reporting.
+ void VerifyTableFileUniqueId(const TableProperties& new_file_properties,
+ const std::string& file_path);
+
+ void RandomSleep() {
+ std::this_thread::sleep_for(
+ std::chrono::microseconds(Random::GetTLSInstance()->Uniform(5000)));
+ }
+
+ private:
+ std::string db_name_;
+ std::vector<DbPath> db_paths_;
+ std::vector<ColumnFamilyDescriptor> column_families_;
+ std::atomic<int> num_pending_file_creations_;
+ UniqueIdVerifier unique_ids_;
+};
+#endif // !ROCKSDB_LITE
+} // namespace ROCKSDB_NAMESPACE
+#endif // GFLAGS
diff --git a/src/rocksdb/db_stress_tool/db_stress_shared_state.cc b/src/rocksdb/db_stress_tool/db_stress_shared_state.cc
new file mode 100644
index 000000000..a27f6ac73
--- /dev/null
+++ b/src/rocksdb/db_stress_tool/db_stress_shared_state.cc
@@ -0,0 +1,17 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+//
+// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+//
+
+#ifdef GFLAGS
+#include "db_stress_tool/db_stress_shared_state.h"
+
+namespace ROCKSDB_NAMESPACE {
+thread_local bool SharedState::ignore_read_error;
+} // namespace ROCKSDB_NAMESPACE
+#endif // GFLAGS
diff --git a/src/rocksdb/db_stress_tool/db_stress_shared_state.h b/src/rocksdb/db_stress_tool/db_stress_shared_state.h
new file mode 100644
index 000000000..5565c6221
--- /dev/null
+++ b/src/rocksdb/db_stress_tool/db_stress_shared_state.h
@@ -0,0 +1,427 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+//
+// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors
+
+#ifdef GFLAGS
+#pragma once
+
+#include "db_stress_tool/db_stress_stat.h"
+#include "db_stress_tool/expected_state.h"
+// SyncPoint is not supported in Released Windows Mode.
+#if !(defined NDEBUG) || !defined(OS_WIN)
+#include "test_util/sync_point.h"
+#endif // !(defined NDEBUG) || !defined(OS_WIN)
+#include "util/gflags_compat.h"
+
+DECLARE_uint64(seed);
+DECLARE_int64(max_key);
+DECLARE_uint64(log2_keys_per_lock);
+DECLARE_int32(threads);
+DECLARE_int32(column_families);
+DECLARE_int32(nooverwritepercent);
+DECLARE_string(expected_values_dir);
+DECLARE_int32(clear_column_family_one_in);
+DECLARE_bool(test_batches_snapshots);
+DECLARE_int32(compaction_thread_pool_adjust_interval);
+DECLARE_int32(continuous_verification_interval);
+DECLARE_int32(read_fault_one_in);
+DECLARE_int32(write_fault_one_in);
+DECLARE_int32(open_metadata_write_fault_one_in);
+DECLARE_int32(open_write_fault_one_in);
+DECLARE_int32(open_read_fault_one_in);
+
+DECLARE_int32(injest_error_severity);
+
+namespace ROCKSDB_NAMESPACE {
+class StressTest;
+
+// State shared by all concurrent executions of the same benchmark.
+class SharedState {
+ public:
+ // indicates a key may have any value (or not be present) as an operation on
+ // it is incomplete.
+ static constexpr uint32_t UNKNOWN_SENTINEL = 0xfffffffe;
+ // indicates a key should definitely be deleted
+ static constexpr uint32_t DELETION_SENTINEL = 0xffffffff;
+
+ // Errors when reading filter blocks are ignored, so we use a thread
+ // local variable updated via sync points to keep track of errors injected
+ // while reading filter blocks in order to ignore the Get/MultiGet result
+ // for those calls
+ static thread_local bool ignore_read_error;
+
+ SharedState(Env* /*env*/, StressTest* stress_test)
+ : cv_(&mu_),
+ seed_(static_cast<uint32_t>(FLAGS_seed)),
+ max_key_(FLAGS_max_key),
+ log2_keys_per_lock_(static_cast<uint32_t>(FLAGS_log2_keys_per_lock)),
+ num_threads_(0),
+ num_initialized_(0),
+ num_populated_(0),
+ vote_reopen_(0),
+ num_done_(0),
+ start_(false),
+ start_verify_(false),
+ num_bg_threads_(0),
+ should_stop_bg_thread_(false),
+ bg_thread_finished_(0),
+ stress_test_(stress_test),
+ verification_failure_(false),
+ should_stop_test_(false),
+ no_overwrite_ids_(GenerateNoOverwriteIds()),
+ expected_state_manager_(nullptr),
+ printing_verification_results_(false),
+ start_timestamp_(Env::Default()->NowNanos()) {
+ Status status;
+ // TODO: We should introduce a way to explicitly disable verification
+ // during shutdown. When that is disabled and FLAGS_expected_values_dir
+ // is empty (disabling verification at startup), we can skip tracking
+ // expected state. Only then should we permit bypassing the below feature
+ // compatibility checks.
+ if (!FLAGS_expected_values_dir.empty()) {
+ if (!std::atomic<uint32_t>{}.is_lock_free()) {
+ status = Status::InvalidArgument(
+ "Cannot use --expected_values_dir on platforms without lock-free "
+ "std::atomic<uint32_t>");
+ }
+ if (status.ok() && FLAGS_clear_column_family_one_in > 0) {
+ status = Status::InvalidArgument(
+ "Cannot use --expected_values_dir on when "
+ "--clear_column_family_one_in is greater than zero.");
+ }
+ }
+ if (status.ok()) {
+ if (FLAGS_expected_values_dir.empty()) {
+ expected_state_manager_.reset(
+ new AnonExpectedStateManager(FLAGS_max_key, FLAGS_column_families));
+ } else {
+ expected_state_manager_.reset(new FileExpectedStateManager(
+ FLAGS_max_key, FLAGS_column_families, FLAGS_expected_values_dir));
+ }
+ status = expected_state_manager_->Open();
+ }
+ if (!status.ok()) {
+ fprintf(stderr, "Failed setting up expected state with error: %s\n",
+ status.ToString().c_str());
+ exit(1);
+ }
+
+ if (FLAGS_test_batches_snapshots) {
+ fprintf(stdout, "No lock creation because test_batches_snapshots set\n");
+ return;
+ }
+
+ long num_locks = static_cast<long>(max_key_ >> log2_keys_per_lock_);
+ if (max_key_ & ((1 << log2_keys_per_lock_) - 1)) {
+ num_locks++;
+ }
+ fprintf(stdout, "Creating %ld locks\n", num_locks * FLAGS_column_families);
+ key_locks_.resize(FLAGS_column_families);
+
+ for (int i = 0; i < FLAGS_column_families; ++i) {
+ key_locks_[i].reset(new port::Mutex[num_locks]);
+ }
+ if (FLAGS_read_fault_one_in) {
+#ifdef NDEBUG
+ // Unsupported in release mode because it relies on
+ // `IGNORE_STATUS_IF_ERROR` to distinguish faults not expected to lead to
+ // failure.
+ fprintf(stderr,
+ "Cannot set nonzero value for --read_fault_one_in in "
+ "release mode.");
+ exit(1);
+#else // NDEBUG
+ SyncPoint::GetInstance()->SetCallBack("FaultInjectionIgnoreError",
+ IgnoreReadErrorCallback);
+ SyncPoint::GetInstance()->EnableProcessing();
+#endif // NDEBUG
+ }
+ }
+
+ ~SharedState() {
+#ifndef NDEBUG
+ if (FLAGS_read_fault_one_in) {
+ SyncPoint::GetInstance()->ClearAllCallBacks();
+ SyncPoint::GetInstance()->DisableProcessing();
+ }
+#endif
+ }
+
+ port::Mutex* GetMutex() { return &mu_; }
+
+ port::CondVar* GetCondVar() { return &cv_; }
+
+ StressTest* GetStressTest() const { return stress_test_; }
+
+ int64_t GetMaxKey() const { return max_key_; }
+
+ uint32_t GetNumThreads() const { return num_threads_; }
+
+ void SetThreads(int num_threads) { num_threads_ = num_threads; }
+
+ void IncInitialized() { num_initialized_++; }
+
+ void IncOperated() { num_populated_++; }
+
+ void IncDone() { num_done_++; }
+
+ void IncVotedReopen() { vote_reopen_ = (vote_reopen_ + 1) % num_threads_; }
+
+ bool AllInitialized() const { return num_initialized_ >= num_threads_; }
+
+ bool AllOperated() const { return num_populated_ >= num_threads_; }
+
+ bool AllDone() const { return num_done_ >= num_threads_; }
+
+ bool AllVotedReopen() { return (vote_reopen_ == 0); }
+
+ void SetStart() { start_ = true; }
+
+ void SetStartVerify() { start_verify_ = true; }
+
+ bool Started() const { return start_; }
+
+ bool VerifyStarted() const { return start_verify_; }
+
+ void SetVerificationFailure() { verification_failure_.store(true); }
+
+ bool HasVerificationFailedYet() const { return verification_failure_.load(); }
+
+ void SetShouldStopTest() { should_stop_test_.store(true); }
+
+ bool ShouldStopTest() const { return should_stop_test_.load(); }
+
+ // Returns a lock covering `key` in `cf`.
+ port::Mutex* GetMutexForKey(int cf, int64_t key) {
+ return &key_locks_[cf][key >> log2_keys_per_lock_];
+ }
+
+ // Acquires locks for all keys in `cf`.
+ void LockColumnFamily(int cf) {
+ for (int i = 0; i < max_key_ >> log2_keys_per_lock_; ++i) {
+ key_locks_[cf][i].Lock();
+ }
+ }
+
+ // Releases locks for all keys in `cf`.
+ void UnlockColumnFamily(int cf) {
+ for (int i = 0; i < max_key_ >> log2_keys_per_lock_; ++i) {
+ key_locks_[cf][i].Unlock();
+ }
+ }
+
+ // Returns a collection of mutex locks covering the key range [start, end) in
+ // `cf`.
+ std::vector<std::unique_ptr<MutexLock>> GetLocksForKeyRange(int cf,
+ int64_t start,
+ int64_t end) {
+ std::vector<std::unique_ptr<MutexLock>> range_locks;
+
+ if (start >= end) {
+ return range_locks;
+ }
+
+ const int64_t start_idx = start >> log2_keys_per_lock_;
+
+ int64_t end_idx = end >> log2_keys_per_lock_;
+ if ((end & ((1 << log2_keys_per_lock_) - 1)) == 0) {
+ --end_idx;
+ }
+
+ for (int64_t idx = start_idx; idx <= end_idx; ++idx) {
+ range_locks.emplace_back(
+ std::make_unique<MutexLock>(&key_locks_[cf][idx]));
+ }
+
+ return range_locks;
+ }
+
+ Status SaveAtAndAfter(DB* db) {
+ return expected_state_manager_->SaveAtAndAfter(db);
+ }
+
+ bool HasHistory() { return expected_state_manager_->HasHistory(); }
+
+ Status Restore(DB* db) { return expected_state_manager_->Restore(db); }
+
+ // Requires external locking covering all keys in `cf`.
+ void ClearColumnFamily(int cf) {
+ return expected_state_manager_->ClearColumnFamily(cf);
+ }
+
+ // @param pending True if the update may have started but is not yet
+ // guaranteed finished. This is useful for crash-recovery testing when the
+ // process may crash before updating the expected values array.
+ //
+ // Requires external locking covering `key` in `cf`.
+ void Put(int cf, int64_t key, uint32_t value_base, bool pending) {
+ return expected_state_manager_->Put(cf, key, value_base, pending);
+ }
+
+ // Requires external locking covering `key` in `cf`.
+ uint32_t Get(int cf, int64_t key) const {
+ return expected_state_manager_->Get(cf, key);
+ }
+
+ // @param pending See comment above Put()
+ // Returns true if the key was not yet deleted.
+ //
+ // Requires external locking covering `key` in `cf`.
+ bool Delete(int cf, int64_t key, bool pending) {
+ return expected_state_manager_->Delete(cf, key, pending);
+ }
+
+ // @param pending See comment above Put()
+ // Returns true if the key was not yet deleted.
+ //
+ // Requires external locking covering `key` in `cf`.
+ bool SingleDelete(int cf, int64_t key, bool pending) {
+ return expected_state_manager_->Delete(cf, key, pending);
+ }
+
+ // @param pending See comment above Put()
+ // Returns number of keys deleted by the call.
+ //
+ // Requires external locking covering keys in `[begin_key, end_key)` in `cf`.
+ int DeleteRange(int cf, int64_t begin_key, int64_t end_key, bool pending) {
+ return expected_state_manager_->DeleteRange(cf, begin_key, end_key,
+ pending);
+ }
+
+ bool AllowsOverwrite(int64_t key) const {
+ return no_overwrite_ids_.find(key) == no_overwrite_ids_.end();
+ }
+
+ // Requires external locking covering `key` in `cf`.
+ bool Exists(int cf, int64_t key) {
+ return expected_state_manager_->Exists(cf, key);
+ }
+
+ uint32_t GetSeed() const { return seed_; }
+
+ void SetShouldStopBgThread() { should_stop_bg_thread_ = true; }
+
+ bool ShouldStopBgThread() { return should_stop_bg_thread_; }
+
+ void IncBgThreads() { ++num_bg_threads_; }
+
+ void IncBgThreadsFinished() { ++bg_thread_finished_; }
+
+ bool BgThreadsFinished() const {
+ return bg_thread_finished_ == num_bg_threads_;
+ }
+
+ bool ShouldVerifyAtBeginning() const {
+ return !FLAGS_expected_values_dir.empty();
+ }
+
+ bool PrintingVerificationResults() {
+ bool tmp = false;
+ return !printing_verification_results_.compare_exchange_strong(
+ tmp, true, std::memory_order_relaxed);
+ }
+
+ void FinishPrintingVerificationResults() {
+ printing_verification_results_.store(false, std::memory_order_relaxed);
+ }
+
+ uint64_t GetStartTimestamp() const { return start_timestamp_; }
+
+ private:
+ static void IgnoreReadErrorCallback(void*) { ignore_read_error = true; }
+
+ // Pick random keys in each column family that will not experience overwrite.
+ std::unordered_set<int64_t> GenerateNoOverwriteIds() const {
+ fprintf(stdout, "Choosing random keys with no overwrite\n");
+ // Start with the identity permutation. Subsequent iterations of
+ // for loop below will start with perm of previous for loop
+ std::vector<int64_t> permutation(max_key_);
+ for (int64_t i = 0; i < max_key_; ++i) {
+ permutation[i] = i;
+ }
+ // Now do the Knuth shuffle
+ const int64_t num_no_overwrite_keys =
+ (max_key_ * FLAGS_nooverwritepercent) / 100;
+ // Only need to figure out first num_no_overwrite_keys of permutation
+ std::unordered_set<int64_t> ret;
+ ret.reserve(num_no_overwrite_keys);
+ Random64 rnd(seed_);
+ for (int64_t i = 0; i < num_no_overwrite_keys; i++) {
+ assert(i < max_key_);
+ int64_t rand_index = i + rnd.Next() % (max_key_ - i);
+ // Swap i and rand_index;
+ int64_t temp = permutation[i];
+ permutation[i] = permutation[rand_index];
+ permutation[rand_index] = temp;
+ // Fill no_overwrite_ids_ with the first num_no_overwrite_keys of
+ // permutation
+ ret.insert(permutation[i]);
+ }
+ return ret;
+ }
+
+ port::Mutex mu_;
+ port::CondVar cv_;
+ const uint32_t seed_;
+ const int64_t max_key_;
+ const uint32_t log2_keys_per_lock_;
+ int num_threads_;
+ long num_initialized_;
+ long num_populated_;
+ long vote_reopen_;
+ long num_done_;
+ bool start_;
+ bool start_verify_;
+ int num_bg_threads_;
+ bool should_stop_bg_thread_;
+ int bg_thread_finished_;
+ StressTest* stress_test_;
+ std::atomic<bool> verification_failure_;
+ std::atomic<bool> should_stop_test_;
+
+ // Keys that should not be overwritten
+ const std::unordered_set<int64_t> no_overwrite_ids_;
+
+ std::unique_ptr<ExpectedStateManager> expected_state_manager_;
+ // Cannot store `port::Mutex` directly in vector since it is not copyable
+ // and storing it in the container may require copying depending on the impl.
+ std::vector<std::unique_ptr<port::Mutex[]>> key_locks_;
+ std::atomic<bool> printing_verification_results_;
+ const uint64_t start_timestamp_;
+};
+
+// Per-thread state for concurrent executions of the same benchmark.
+struct ThreadState {
+ uint32_t tid; // 0..n-1
+ Random rand; // Has different seeds for different threads
+ SharedState* shared;
+ Stats stats;
+ struct SnapshotState {
+ const Snapshot* snapshot;
+ // The cf from which we did a Get at this snapshot
+ int cf_at;
+ // The name of the cf at the time that we did a read
+ std::string cf_at_name;
+ // The key with which we did a Get at this snapshot
+ std::string key;
+ // The status of the Get
+ Status status;
+ // The value of the Get
+ std::string value;
+ // optional state of all keys in the db
+ std::vector<bool>* key_vec;
+
+ std::string timestamp;
+ };
+ std::queue<std::pair<uint64_t, SnapshotState>> snapshot_queue;
+
+ ThreadState(uint32_t index, SharedState* _shared)
+ : tid(index), rand(1000 + index + _shared->GetSeed()), shared(_shared) {}
+};
+} // namespace ROCKSDB_NAMESPACE
+#endif // GFLAGS
diff --git a/src/rocksdb/db_stress_tool/db_stress_stat.cc b/src/rocksdb/db_stress_tool/db_stress_stat.cc
new file mode 100644
index 000000000..6a7883a52
--- /dev/null
+++ b/src/rocksdb/db_stress_tool/db_stress_stat.cc
@@ -0,0 +1,17 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+#ifdef GFLAGS
+
+#include "db_stress_tool/db_stress_stat.h"
+
+namespace ROCKSDB_NAMESPACE {
+
+std::shared_ptr<ROCKSDB_NAMESPACE::Statistics> dbstats;
+std::shared_ptr<ROCKSDB_NAMESPACE::Statistics> dbstats_secondaries;
+
+} // namespace ROCKSDB_NAMESPACE
+
+#endif // GFLAGS
diff --git a/src/rocksdb/db_stress_tool/db_stress_stat.h b/src/rocksdb/db_stress_tool/db_stress_stat.h
new file mode 100644
index 000000000..5b38c6e2b
--- /dev/null
+++ b/src/rocksdb/db_stress_tool/db_stress_stat.h
@@ -0,0 +1,219 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+#pragma once
+#include <cinttypes>
+#include <memory>
+#include <queue>
+#include <unordered_set>
+
+#include "monitoring/histogram.h"
+#include "port/port.h"
+#include "rocksdb/snapshot.h"
+#include "rocksdb/statistics.h"
+#include "rocksdb/system_clock.h"
+#include "util/gflags_compat.h"
+#include "util/random.h"
+
+DECLARE_bool(histogram);
+DECLARE_bool(progress_reports);
+
+namespace ROCKSDB_NAMESPACE {
+
+// Database statistics
+extern std::shared_ptr<ROCKSDB_NAMESPACE::Statistics> dbstats;
+extern std::shared_ptr<ROCKSDB_NAMESPACE::Statistics> dbstats_secondaries;
+
+class Stats {
+ private:
+ uint64_t start_;
+ uint64_t finish_;
+ double seconds_;
+ long done_;
+ long gets_;
+ long prefixes_;
+ long writes_;
+ long deletes_;
+ size_t single_deletes_;
+ long iterator_size_sums_;
+ long founds_;
+ long iterations_;
+ long range_deletions_;
+ long covered_by_range_deletions_;
+ long errors_;
+ long verified_errors_;
+ long num_compact_files_succeed_;
+ long num_compact_files_failed_;
+ int next_report_;
+ size_t bytes_;
+ uint64_t last_op_finish_;
+ HistogramImpl hist_;
+
+ public:
+ Stats() {}
+
+ void Start() {
+ next_report_ = 100;
+ hist_.Clear();
+ done_ = 0;
+ gets_ = 0;
+ prefixes_ = 0;
+ writes_ = 0;
+ deletes_ = 0;
+ single_deletes_ = 0;
+ iterator_size_sums_ = 0;
+ founds_ = 0;
+ iterations_ = 0;
+ range_deletions_ = 0;
+ covered_by_range_deletions_ = 0;
+ errors_ = 0;
+ verified_errors_ = 0;
+ bytes_ = 0;
+ seconds_ = 0;
+ num_compact_files_succeed_ = 0;
+ num_compact_files_failed_ = 0;
+ start_ = SystemClock::Default()->NowMicros();
+ last_op_finish_ = start_;
+ finish_ = start_;
+ }
+
+ void Merge(const Stats& other) {
+ hist_.Merge(other.hist_);
+ done_ += other.done_;
+ gets_ += other.gets_;
+ prefixes_ += other.prefixes_;
+ writes_ += other.writes_;
+ deletes_ += other.deletes_;
+ single_deletes_ += other.single_deletes_;
+ iterator_size_sums_ += other.iterator_size_sums_;
+ founds_ += other.founds_;
+ iterations_ += other.iterations_;
+ range_deletions_ += other.range_deletions_;
+ covered_by_range_deletions_ = other.covered_by_range_deletions_;
+ errors_ += other.errors_;
+ verified_errors_ += other.verified_errors_;
+ bytes_ += other.bytes_;
+ seconds_ += other.seconds_;
+ num_compact_files_succeed_ += other.num_compact_files_succeed_;
+ num_compact_files_failed_ += other.num_compact_files_failed_;
+ if (other.start_ < start_) start_ = other.start_;
+ if (other.finish_ > finish_) finish_ = other.finish_;
+ }
+
+ void Stop() {
+ finish_ = SystemClock::Default()->NowMicros();
+ seconds_ = (finish_ - start_) * 1e-6;
+ }
+
+ void FinishedSingleOp() {
+ if (FLAGS_histogram) {
+ auto now = SystemClock::Default()->NowMicros();
+ auto micros = now - last_op_finish_;
+ hist_.Add(micros);
+ if (micros > 20000) {
+ fprintf(stdout, "long op: %" PRIu64 " micros%30s\r", micros, "");
+ }
+ last_op_finish_ = now;
+ }
+
+ done_++;
+ if (FLAGS_progress_reports) {
+ if (done_ >= next_report_) {
+ if (next_report_ < 1000)
+ next_report_ += 100;
+ else if (next_report_ < 5000)
+ next_report_ += 500;
+ else if (next_report_ < 10000)
+ next_report_ += 1000;
+ else if (next_report_ < 50000)
+ next_report_ += 5000;
+ else if (next_report_ < 100000)
+ next_report_ += 10000;
+ else if (next_report_ < 500000)
+ next_report_ += 50000;
+ else
+ next_report_ += 100000;
+ fprintf(stdout, "... finished %ld ops%30s\r", done_, "");
+ }
+ }
+ }
+
+ void AddBytesForWrites(long nwrites, size_t nbytes) {
+ writes_ += nwrites;
+ bytes_ += nbytes;
+ }
+
+ void AddGets(long ngets, long nfounds) {
+ founds_ += nfounds;
+ gets_ += ngets;
+ }
+
+ void AddPrefixes(long nprefixes, long count) {
+ prefixes_ += nprefixes;
+ iterator_size_sums_ += count;
+ }
+
+ void AddIterations(long n) { iterations_ += n; }
+
+ void AddDeletes(long n) { deletes_ += n; }
+
+ void AddSingleDeletes(size_t n) { single_deletes_ += n; }
+
+ void AddRangeDeletions(long n) { range_deletions_ += n; }
+
+ void AddCoveredByRangeDeletions(long n) { covered_by_range_deletions_ += n; }
+
+ void AddErrors(long n) { errors_ += n; }
+
+ void AddVerifiedErrors(long n) { verified_errors_ += n; }
+
+ void AddNumCompactFilesSucceed(long n) { num_compact_files_succeed_ += n; }
+
+ void AddNumCompactFilesFailed(long n) { num_compact_files_failed_ += n; }
+
+ void Report(const char* name) {
+ std::string extra;
+ if (bytes_ < 1 || done_ < 1) {
+ fprintf(stderr, "No writes or ops?\n");
+ return;
+ }
+
+ double elapsed = (finish_ - start_) * 1e-6;
+ double bytes_mb = bytes_ / 1048576.0;
+ double rate = bytes_mb / elapsed;
+ double throughput = (double)done_ / elapsed;
+
+ fprintf(stdout, "%-12s: ", name);
+ fprintf(stdout, "%.3f micros/op %ld ops/sec\n", seconds_ * 1e6 / done_,
+ (long)throughput);
+ fprintf(stdout, "%-12s: Wrote %.2f MB (%.2f MB/sec) (%ld%% of %ld ops)\n",
+ "", bytes_mb, rate, (100 * writes_) / done_, done_);
+ fprintf(stdout, "%-12s: Wrote %ld times\n", "", writes_);
+ fprintf(stdout, "%-12s: Deleted %ld times\n", "", deletes_);
+ fprintf(stdout, "%-12s: Single deleted %" ROCKSDB_PRIszt " times\n", "",
+ single_deletes_);
+ fprintf(stdout, "%-12s: %ld read and %ld found the key\n", "", gets_,
+ founds_);
+ fprintf(stdout, "%-12s: Prefix scanned %ld times\n", "", prefixes_);
+ fprintf(stdout, "%-12s: Iterator size sum is %ld\n", "",
+ iterator_size_sums_);
+ fprintf(stdout, "%-12s: Iterated %ld times\n", "", iterations_);
+ fprintf(stdout, "%-12s: Deleted %ld key-ranges\n", "", range_deletions_);
+ fprintf(stdout, "%-12s: Range deletions covered %ld keys\n", "",
+ covered_by_range_deletions_);
+
+ fprintf(stdout, "%-12s: Got errors %ld times\n", "", errors_);
+ fprintf(stdout, "%-12s: %ld CompactFiles() succeed\n", "",
+ num_compact_files_succeed_);
+ fprintf(stdout, "%-12s: %ld CompactFiles() did not succeed\n", "",
+ num_compact_files_failed_);
+
+ if (FLAGS_histogram) {
+ fprintf(stdout, "Microseconds per op:\n%s\n", hist_.ToString().c_str());
+ }
+ fflush(stdout);
+ }
+};
+} // namespace ROCKSDB_NAMESPACE
diff --git a/src/rocksdb/db_stress_tool/db_stress_table_properties_collector.h b/src/rocksdb/db_stress_tool/db_stress_table_properties_collector.h
new file mode 100644
index 000000000..d1758cbb4
--- /dev/null
+++ b/src/rocksdb/db_stress_tool/db_stress_table_properties_collector.h
@@ -0,0 +1,65 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+#pragma once
+
+#include "rocksdb/table.h"
+#include "util/gflags_compat.h"
+#include "util/random.h"
+
+DECLARE_int32(mark_for_compaction_one_file_in);
+
+namespace ROCKSDB_NAMESPACE {
+
+// A `DbStressTablePropertiesCollector` ignores what keys/values were added to
+// the table, adds no properties to the table, and decides at random whether the
+// table will be marked for compaction according to
+// `FLAGS_mark_for_compaction_one_file_in`.
+class DbStressTablePropertiesCollector : public TablePropertiesCollector {
+ public:
+ DbStressTablePropertiesCollector()
+ : need_compact_(Random::GetTLSInstance()->OneInOpt(
+ FLAGS_mark_for_compaction_one_file_in)) {}
+
+ virtual Status AddUserKey(const Slice& /* key */, const Slice& /* value */,
+ EntryType /*type*/, SequenceNumber /*seq*/,
+ uint64_t /*file_size*/) override {
+ return Status::OK();
+ }
+
+ virtual Status Finish(UserCollectedProperties* /* properties */) override {
+ return Status::OK();
+ }
+
+ virtual UserCollectedProperties GetReadableProperties() const override {
+ return UserCollectedProperties{};
+ }
+
+ virtual const char* Name() const override {
+ return "DbStressTablePropertiesCollector";
+ }
+
+ virtual bool NeedCompact() const override { return need_compact_; }
+
+ private:
+ const bool need_compact_;
+};
+
+// A `DbStressTablePropertiesCollectorFactory` creates
+// `DbStressTablePropertiesCollectorFactory`s.
+class DbStressTablePropertiesCollectorFactory
+ : public TablePropertiesCollectorFactory {
+ public:
+ virtual TablePropertiesCollector* CreateTablePropertiesCollector(
+ TablePropertiesCollectorFactory::Context /* context */) override {
+ return new DbStressTablePropertiesCollector();
+ }
+
+ virtual const char* Name() const override {
+ return "DbStressTablePropertiesCollectorFactory";
+ }
+};
+
+} // namespace ROCKSDB_NAMESPACE
diff --git a/src/rocksdb/db_stress_tool/db_stress_test_base.cc b/src/rocksdb/db_stress_tool/db_stress_test_base.cc
new file mode 100644
index 000000000..e51b43176
--- /dev/null
+++ b/src/rocksdb/db_stress_tool/db_stress_test_base.cc
@@ -0,0 +1,3383 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+//
+// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+//
+
+#include <ios>
+
+#include "util/compression.h"
+#ifdef GFLAGS
+#include "db_stress_tool/db_stress_common.h"
+#include "db_stress_tool/db_stress_compaction_filter.h"
+#include "db_stress_tool/db_stress_driver.h"
+#include "db_stress_tool/db_stress_table_properties_collector.h"
+#include "rocksdb/convenience.h"
+#include "rocksdb/filter_policy.h"
+#include "rocksdb/secondary_cache.h"
+#include "rocksdb/sst_file_manager.h"
+#include "rocksdb/types.h"
+#include "rocksdb/utilities/object_registry.h"
+#include "rocksdb/utilities/write_batch_with_index.h"
+#include "test_util/testutil.h"
+#include "util/cast_util.h"
+#include "utilities/backup/backup_engine_impl.h"
+#include "utilities/fault_injection_fs.h"
+#include "utilities/fault_injection_secondary_cache.h"
+
+namespace ROCKSDB_NAMESPACE {
+
+namespace {
+
+std::shared_ptr<const FilterPolicy> CreateFilterPolicy() {
+ if (FLAGS_bloom_bits < 0) {
+ return BlockBasedTableOptions().filter_policy;
+ }
+ const FilterPolicy* new_policy;
+ if (FLAGS_ribbon_starting_level >= 999) {
+ // Use Bloom API
+ new_policy = NewBloomFilterPolicy(FLAGS_bloom_bits, false);
+ } else {
+ new_policy = NewRibbonFilterPolicy(
+ FLAGS_bloom_bits, /* bloom_before_level */ FLAGS_ribbon_starting_level);
+ }
+ return std::shared_ptr<const FilterPolicy>(new_policy);
+}
+
+} // namespace
+
+StressTest::StressTest()
+ : cache_(NewCache(FLAGS_cache_size, FLAGS_cache_numshardbits)),
+ compressed_cache_(NewLRUCache(FLAGS_compressed_cache_size,
+ FLAGS_compressed_cache_numshardbits)),
+ filter_policy_(CreateFilterPolicy()),
+ db_(nullptr),
+#ifndef ROCKSDB_LITE
+ txn_db_(nullptr),
+#endif
+ db_aptr_(nullptr),
+ clock_(db_stress_env->GetSystemClock().get()),
+ new_column_family_name_(1),
+ num_times_reopened_(0),
+ db_preload_finished_(false),
+ cmp_db_(nullptr),
+ is_db_stopped_(false) {
+ if (FLAGS_destroy_db_initially) {
+ std::vector<std::string> files;
+ db_stress_env->GetChildren(FLAGS_db, &files);
+ for (unsigned int i = 0; i < files.size(); i++) {
+ if (Slice(files[i]).starts_with("heap-")) {
+ db_stress_env->DeleteFile(FLAGS_db + "/" + files[i]);
+ }
+ }
+
+ Options options;
+ options.env = db_stress_env;
+ // Remove files without preserving manfiest files
+#ifndef ROCKSDB_LITE
+ const Status s = !FLAGS_use_blob_db
+ ? DestroyDB(FLAGS_db, options)
+ : blob_db::DestroyBlobDB(FLAGS_db, options,
+ blob_db::BlobDBOptions());
+#else
+ const Status s = DestroyDB(FLAGS_db, options);
+#endif // !ROCKSDB_LITE
+
+ if (!s.ok()) {
+ fprintf(stderr, "Cannot destroy original db: %s\n", s.ToString().c_str());
+ exit(1);
+ }
+ }
+}
+
+StressTest::~StressTest() {
+ for (auto cf : column_families_) {
+ delete cf;
+ }
+ column_families_.clear();
+ delete db_;
+
+ for (auto* cf : cmp_cfhs_) {
+ delete cf;
+ }
+ cmp_cfhs_.clear();
+ delete cmp_db_;
+}
+
+std::shared_ptr<Cache> StressTest::NewCache(size_t capacity,
+ int32_t num_shard_bits) {
+ ConfigOptions config_options;
+ if (capacity <= 0) {
+ return nullptr;
+ }
+
+ if (FLAGS_cache_type == "clock_cache") {
+ fprintf(stderr, "Old clock cache implementation has been removed.\n");
+ exit(1);
+ } else if (FLAGS_cache_type == "hyper_clock_cache") {
+ return HyperClockCacheOptions(static_cast<size_t>(capacity),
+ FLAGS_block_size /*estimated_entry_charge*/,
+ num_shard_bits)
+ .MakeSharedCache();
+ } else if (FLAGS_cache_type == "lru_cache") {
+ LRUCacheOptions opts;
+ opts.capacity = capacity;
+ opts.num_shard_bits = num_shard_bits;
+#ifndef ROCKSDB_LITE
+ std::shared_ptr<SecondaryCache> secondary_cache;
+ if (!FLAGS_secondary_cache_uri.empty()) {
+ Status s = SecondaryCache::CreateFromString(
+ config_options, FLAGS_secondary_cache_uri, &secondary_cache);
+ if (secondary_cache == nullptr) {
+ fprintf(stderr,
+ "No secondary cache registered matching string: %s status=%s\n",
+ FLAGS_secondary_cache_uri.c_str(), s.ToString().c_str());
+ exit(1);
+ }
+ if (FLAGS_secondary_cache_fault_one_in > 0) {
+ secondary_cache = std::make_shared<FaultInjectionSecondaryCache>(
+ secondary_cache, static_cast<uint32_t>(FLAGS_seed),
+ FLAGS_secondary_cache_fault_one_in);
+ }
+ opts.secondary_cache = secondary_cache;
+ }
+#endif
+ return NewLRUCache(opts);
+ } else {
+ fprintf(stderr, "Cache type not supported.");
+ exit(1);
+ }
+}
+
+std::vector<std::string> StressTest::GetBlobCompressionTags() {
+ std::vector<std::string> compression_tags{"kNoCompression"};
+
+ if (Snappy_Supported()) {
+ compression_tags.emplace_back("kSnappyCompression");
+ }
+ if (LZ4_Supported()) {
+ compression_tags.emplace_back("kLZ4Compression");
+ }
+ if (ZSTD_Supported()) {
+ compression_tags.emplace_back("kZSTD");
+ }
+
+ return compression_tags;
+}
+
+bool StressTest::BuildOptionsTable() {
+ if (FLAGS_set_options_one_in <= 0) {
+ return true;
+ }
+
+ std::unordered_map<std::string, std::vector<std::string>> options_tbl = {
+ {"write_buffer_size",
+ {std::to_string(options_.write_buffer_size),
+ std::to_string(options_.write_buffer_size * 2),
+ std::to_string(options_.write_buffer_size * 4)}},
+ {"max_write_buffer_number",
+ {std::to_string(options_.max_write_buffer_number),
+ std::to_string(options_.max_write_buffer_number * 2),
+ std::to_string(options_.max_write_buffer_number * 4)}},
+ {"arena_block_size",
+ {
+ std::to_string(options_.arena_block_size),
+ std::to_string(options_.write_buffer_size / 4),
+ std::to_string(options_.write_buffer_size / 8),
+ }},
+ {"memtable_huge_page_size", {"0", std::to_string(2 * 1024 * 1024)}},
+ {"max_successive_merges", {"0", "2", "4"}},
+ {"inplace_update_num_locks", {"100", "200", "300"}},
+ // TODO: re-enable once internal task T124324915 is fixed.
+ // {"experimental_mempurge_threshold", {"0.0", "1.0"}},
+ // TODO(ljin): enable test for this option
+ // {"disable_auto_compactions", {"100", "200", "300"}},
+ {"level0_file_num_compaction_trigger",
+ {
+ std::to_string(options_.level0_file_num_compaction_trigger),
+ std::to_string(options_.level0_file_num_compaction_trigger + 2),
+ std::to_string(options_.level0_file_num_compaction_trigger + 4),
+ }},
+ {"level0_slowdown_writes_trigger",
+ {
+ std::to_string(options_.level0_slowdown_writes_trigger),
+ std::to_string(options_.level0_slowdown_writes_trigger + 2),
+ std::to_string(options_.level0_slowdown_writes_trigger + 4),
+ }},
+ {"level0_stop_writes_trigger",
+ {
+ std::to_string(options_.level0_stop_writes_trigger),
+ std::to_string(options_.level0_stop_writes_trigger + 2),
+ std::to_string(options_.level0_stop_writes_trigger + 4),
+ }},
+ {"max_compaction_bytes",
+ {
+ std::to_string(options_.target_file_size_base * 5),
+ std::to_string(options_.target_file_size_base * 15),
+ std::to_string(options_.target_file_size_base * 100),
+ }},
+ {"target_file_size_base",
+ {
+ std::to_string(options_.target_file_size_base),
+ std::to_string(options_.target_file_size_base * 2),
+ std::to_string(options_.target_file_size_base * 4),
+ }},
+ {"target_file_size_multiplier",
+ {
+ std::to_string(options_.target_file_size_multiplier),
+ "1",
+ "2",
+ }},
+ {"max_bytes_for_level_base",
+ {
+ std::to_string(options_.max_bytes_for_level_base / 2),
+ std::to_string(options_.max_bytes_for_level_base),
+ std::to_string(options_.max_bytes_for_level_base * 2),
+ }},
+ {"max_bytes_for_level_multiplier",
+ {
+ std::to_string(options_.max_bytes_for_level_multiplier),
+ "1",
+ "2",
+ }},
+ {"max_sequential_skip_in_iterations", {"4", "8", "12"}},
+ };
+
+ if (FLAGS_allow_setting_blob_options_dynamically) {
+ options_tbl.emplace("enable_blob_files",
+ std::vector<std::string>{"false", "true"});
+ options_tbl.emplace("min_blob_size",
+ std::vector<std::string>{"0", "8", "16"});
+ options_tbl.emplace("blob_file_size",
+ std::vector<std::string>{"1M", "16M", "256M", "1G"});
+ options_tbl.emplace("blob_compression_type", GetBlobCompressionTags());
+ options_tbl.emplace("enable_blob_garbage_collection",
+ std::vector<std::string>{"false", "true"});
+ options_tbl.emplace(
+ "blob_garbage_collection_age_cutoff",
+ std::vector<std::string>{"0.0", "0.25", "0.5", "0.75", "1.0"});
+ options_tbl.emplace("blob_garbage_collection_force_threshold",
+ std::vector<std::string>{"0.5", "0.75", "1.0"});
+ options_tbl.emplace("blob_compaction_readahead_size",
+ std::vector<std::string>{"0", "1M", "4M"});
+ options_tbl.emplace("blob_file_starting_level",
+ std::vector<std::string>{"0", "1", "2"});
+ options_tbl.emplace("prepopulate_blob_cache",
+ std::vector<std::string>{"kDisable", "kFlushOnly"});
+ }
+
+ options_table_ = std::move(options_tbl);
+
+ for (const auto& iter : options_table_) {
+ options_index_.push_back(iter.first);
+ }
+ return true;
+}
+
+void StressTest::InitDb(SharedState* shared) {
+ uint64_t now = clock_->NowMicros();
+ fprintf(stdout, "%s Initializing db_stress\n",
+ clock_->TimeToString(now / 1000000).c_str());
+ PrintEnv();
+ Open(shared);
+ BuildOptionsTable();
+}
+
+void StressTest::FinishInitDb(SharedState* shared) {
+ if (FLAGS_read_only) {
+ uint64_t now = clock_->NowMicros();
+ fprintf(stdout, "%s Preloading db with %" PRIu64 " KVs\n",
+ clock_->TimeToString(now / 1000000).c_str(), FLAGS_max_key);
+ PreloadDbAndReopenAsReadOnly(FLAGS_max_key, shared);
+ }
+
+ if (shared->HasHistory()) {
+ // The way it works right now is, if there's any history, that means the
+ // previous run mutating the DB had all its operations traced, in which case
+ // we should always be able to `Restore()` the expected values to match the
+ // `db_`'s current seqno.
+ Status s = shared->Restore(db_);
+ if (!s.ok()) {
+ fprintf(stderr, "Error restoring historical expected values: %s\n",
+ s.ToString().c_str());
+ exit(1);
+ }
+ }
+#ifndef ROCKSDB_LITE
+ if (FLAGS_use_txn) {
+ // It's OK here without sync because unsynced data cannot be lost at this
+ // point
+ // - even with sync_fault_injection=1 as the
+ // file is still directly writable until after FinishInitDb()
+ ProcessRecoveredPreparedTxns(shared);
+ }
+#endif
+ if (FLAGS_enable_compaction_filter) {
+ auto* compaction_filter_factory =
+ reinterpret_cast<DbStressCompactionFilterFactory*>(
+ options_.compaction_filter_factory.get());
+ assert(compaction_filter_factory);
+ // This must be called only after any potential `SharedState::Restore()` has
+ // completed in order for the `compaction_filter_factory` to operate on the
+ // correct latest values file.
+ compaction_filter_factory->SetSharedState(shared);
+ fprintf(stdout, "Compaction filter factory: %s\n",
+ compaction_filter_factory->Name());
+ }
+}
+
+void StressTest::TrackExpectedState(SharedState* shared) {
+ // For `FLAGS_manual_wal_flush_one_inWAL`
+ // data can be lost when `manual_wal_flush_one_in > 0` and `FlushWAL()` is not
+ // explictly called by users of RocksDB (in our case, db stress).
+ // Therefore recovery from such potential WAL data loss is a prefix recovery
+ // that requires tracing
+ if ((FLAGS_sync_fault_injection || FLAGS_disable_wal ||
+ FLAGS_manual_wal_flush_one_in > 0) &&
+ IsStateTracked()) {
+ Status s = shared->SaveAtAndAfter(db_);
+ if (!s.ok()) {
+ fprintf(stderr, "Error enabling history tracing: %s\n",
+ s.ToString().c_str());
+ exit(1);
+ }
+ }
+}
+
+Status StressTest::AssertSame(DB* db, ColumnFamilyHandle* cf,
+ ThreadState::SnapshotState& snap_state) {
+ Status s;
+ if (cf->GetName() != snap_state.cf_at_name) {
+ return s;
+ }
+ // This `ReadOptions` is for validation purposes. Ignore
+ // `FLAGS_rate_limit_user_ops` to avoid slowing any validation.
+ ReadOptions ropt;
+ ropt.snapshot = snap_state.snapshot;
+ Slice ts;
+ if (!snap_state.timestamp.empty()) {
+ ts = snap_state.timestamp;
+ ropt.timestamp = &ts;
+ }
+ PinnableSlice exp_v(&snap_state.value);
+ exp_v.PinSelf();
+ PinnableSlice v;
+ s = db->Get(ropt, cf, snap_state.key, &v);
+ if (!s.ok() && !s.IsNotFound()) {
+ return s;
+ }
+ if (snap_state.status != s) {
+ return Status::Corruption(
+ "The snapshot gave inconsistent results for key " +
+ std::to_string(Hash(snap_state.key.c_str(), snap_state.key.size(), 0)) +
+ " in cf " + cf->GetName() + ": (" + snap_state.status.ToString() +
+ ") vs. (" + s.ToString() + ")");
+ }
+ if (s.ok()) {
+ if (exp_v != v) {
+ return Status::Corruption("The snapshot gave inconsistent values: (" +
+ exp_v.ToString() + ") vs. (" + v.ToString() +
+ ")");
+ }
+ }
+ if (snap_state.key_vec != nullptr) {
+ // When `prefix_extractor` is set, seeking to beginning and scanning
+ // across prefixes are only supported with `total_order_seek` set.
+ ropt.total_order_seek = true;
+ std::unique_ptr<Iterator> iterator(db->NewIterator(ropt));
+ std::unique_ptr<std::vector<bool>> tmp_bitvec(
+ new std::vector<bool>(FLAGS_max_key));
+ for (iterator->SeekToFirst(); iterator->Valid(); iterator->Next()) {
+ uint64_t key_val;
+ if (GetIntVal(iterator->key().ToString(), &key_val)) {
+ (*tmp_bitvec.get())[key_val] = true;
+ }
+ }
+ if (!std::equal(snap_state.key_vec->begin(), snap_state.key_vec->end(),
+ tmp_bitvec.get()->begin())) {
+ return Status::Corruption("Found inconsistent keys at this snapshot");
+ }
+ }
+ return Status::OK();
+}
+
+void StressTest::VerificationAbort(SharedState* shared, std::string msg,
+ Status s) const {
+ fprintf(stderr, "Verification failed: %s. Status is %s\n", msg.c_str(),
+ s.ToString().c_str());
+ shared->SetVerificationFailure();
+}
+
+void StressTest::VerificationAbort(SharedState* shared, std::string msg, int cf,
+ int64_t key) const {
+ auto key_str = Key(key);
+ Slice key_slice = key_str;
+ fprintf(stderr,
+ "Verification failed for column family %d key %s (%" PRIi64 "): %s\n",
+ cf, key_slice.ToString(true).c_str(), key, msg.c_str());
+ shared->SetVerificationFailure();
+}
+
+void StressTest::VerificationAbort(SharedState* shared, std::string msg, int cf,
+ int64_t key, Slice value_from_db,
+ Slice value_from_expected) const {
+ auto key_str = Key(key);
+ fprintf(stderr,
+ "Verification failed for column family %d key %s (%" PRIi64
+ "): value_from_db: %s, value_from_expected: %s, msg: %s\n",
+ cf, Slice(key_str).ToString(true).c_str(), key,
+ value_from_db.ToString(true).c_str(),
+ value_from_expected.ToString(true).c_str(), msg.c_str());
+ shared->SetVerificationFailure();
+}
+
+void StressTest::VerificationAbort(SharedState* shared, int cf, int64_t key,
+ const Slice& value,
+ const WideColumns& columns,
+ const WideColumns& expected_columns) const {
+ assert(shared);
+
+ auto key_str = Key(key);
+
+ fprintf(stderr,
+ "Verification failed for column family %d key %s (%" PRIi64
+ "): Value and columns inconsistent: %s\n",
+ cf, Slice(key_str).ToString(/* hex */ true).c_str(), key,
+ DebugString(value, columns, expected_columns).c_str());
+
+ shared->SetVerificationFailure();
+}
+
+std::string StressTest::DebugString(const Slice& value,
+ const WideColumns& columns,
+ const WideColumns& expected_columns) {
+ std::ostringstream oss;
+
+ oss << "value: " << value.ToString(/* hex */ true);
+
+ auto dump = [](const WideColumns& cols, std::ostream& os) {
+ if (cols.empty()) {
+ return;
+ }
+
+ os << std::hex;
+
+ auto it = cols.begin();
+ os << *it;
+ for (++it; it != cols.end(); ++it) {
+ os << ' ' << *it;
+ }
+ };
+
+ oss << ", columns: ";
+ dump(columns, oss);
+
+ oss << ", expected_columns: ";
+ dump(expected_columns, oss);
+
+ return oss.str();
+}
+
+void StressTest::PrintStatistics() {
+ if (dbstats) {
+ fprintf(stdout, "STATISTICS:\n%s\n", dbstats->ToString().c_str());
+ }
+ if (dbstats_secondaries) {
+ fprintf(stdout, "Secondary instances STATISTICS:\n%s\n",
+ dbstats_secondaries->ToString().c_str());
+ }
+}
+
+// Currently PreloadDb has to be single-threaded.
+void StressTest::PreloadDbAndReopenAsReadOnly(int64_t number_of_keys,
+ SharedState* shared) {
+ WriteOptions write_opts;
+ write_opts.disableWAL = FLAGS_disable_wal;
+ if (FLAGS_sync) {
+ write_opts.sync = true;
+ }
+ if (FLAGS_rate_limit_auto_wal_flush) {
+ write_opts.rate_limiter_priority = Env::IO_USER;
+ }
+ char value[100];
+ int cf_idx = 0;
+ Status s;
+ for (auto cfh : column_families_) {
+ for (int64_t k = 0; k != number_of_keys; ++k) {
+ const std::string key = Key(k);
+
+ constexpr uint32_t value_base = 0;
+ const size_t sz = GenerateValue(value_base, value, sizeof(value));
+
+ const Slice v(value, sz);
+
+ shared->Put(cf_idx, k, value_base, true /* pending */);
+
+ std::string ts;
+ if (FLAGS_user_timestamp_size > 0) {
+ ts = GetNowNanos();
+ }
+
+ if (FLAGS_use_merge) {
+ if (!FLAGS_use_txn) {
+ if (FLAGS_user_timestamp_size > 0) {
+ s = db_->Merge(write_opts, cfh, key, ts, v);
+ } else {
+ s = db_->Merge(write_opts, cfh, key, v);
+ }
+ } else {
+#ifndef ROCKSDB_LITE
+ Transaction* txn;
+ s = NewTxn(write_opts, &txn);
+ if (s.ok()) {
+ s = txn->Merge(cfh, key, v);
+ if (s.ok()) {
+ s = CommitTxn(txn);
+ }
+ }
+#endif
+ }
+ } else if (FLAGS_use_put_entity_one_in > 0) {
+ s = db_->PutEntity(write_opts, cfh, key,
+ GenerateWideColumns(value_base, v));
+ } else {
+ if (!FLAGS_use_txn) {
+ if (FLAGS_user_timestamp_size > 0) {
+ s = db_->Put(write_opts, cfh, key, ts, v);
+ } else {
+ s = db_->Put(write_opts, cfh, key, v);
+ }
+ } else {
+#ifndef ROCKSDB_LITE
+ Transaction* txn;
+ s = NewTxn(write_opts, &txn);
+ if (s.ok()) {
+ s = txn->Put(cfh, key, v);
+ if (s.ok()) {
+ s = CommitTxn(txn);
+ }
+ }
+#endif
+ }
+ }
+
+ shared->Put(cf_idx, k, value_base, false /* pending */);
+ if (!s.ok()) {
+ break;
+ }
+ }
+ if (!s.ok()) {
+ break;
+ }
+ ++cf_idx;
+ }
+ if (s.ok()) {
+ s = db_->Flush(FlushOptions(), column_families_);
+ }
+ if (s.ok()) {
+ for (auto cf : column_families_) {
+ delete cf;
+ }
+ column_families_.clear();
+ delete db_;
+ db_ = nullptr;
+#ifndef ROCKSDB_LITE
+ txn_db_ = nullptr;
+#endif
+
+ db_preload_finished_.store(true);
+ auto now = clock_->NowMicros();
+ fprintf(stdout, "%s Reopening database in read-only\n",
+ clock_->TimeToString(now / 1000000).c_str());
+ // Reopen as read-only, can ignore all options related to updates
+ Open(shared);
+ } else {
+ fprintf(stderr, "Failed to preload db");
+ exit(1);
+ }
+}
+
+Status StressTest::SetOptions(ThreadState* thread) {
+ assert(FLAGS_set_options_one_in > 0);
+ std::unordered_map<std::string, std::string> opts;
+ std::string name =
+ options_index_[thread->rand.Next() % options_index_.size()];
+ int value_idx = thread->rand.Next() % options_table_[name].size();
+ if (name == "level0_file_num_compaction_trigger" ||
+ name == "level0_slowdown_writes_trigger" ||
+ name == "level0_stop_writes_trigger") {
+ opts["level0_file_num_compaction_trigger"] =
+ options_table_["level0_file_num_compaction_trigger"][value_idx];
+ opts["level0_slowdown_writes_trigger"] =
+ options_table_["level0_slowdown_writes_trigger"][value_idx];
+ opts["level0_stop_writes_trigger"] =
+ options_table_["level0_stop_writes_trigger"][value_idx];
+ } else {
+ opts[name] = options_table_[name][value_idx];
+ }
+
+ int rand_cf_idx = thread->rand.Next() % FLAGS_column_families;
+ auto cfh = column_families_[rand_cf_idx];
+ return db_->SetOptions(cfh, opts);
+}
+
+#ifndef ROCKSDB_LITE
+void StressTest::ProcessRecoveredPreparedTxns(SharedState* shared) {
+ assert(txn_db_);
+ std::vector<Transaction*> recovered_prepared_trans;
+ txn_db_->GetAllPreparedTransactions(&recovered_prepared_trans);
+ for (Transaction* txn : recovered_prepared_trans) {
+ ProcessRecoveredPreparedTxnsHelper(txn, shared);
+ delete txn;
+ }
+ recovered_prepared_trans.clear();
+ txn_db_->GetAllPreparedTransactions(&recovered_prepared_trans);
+ assert(recovered_prepared_trans.size() == 0);
+}
+
+void StressTest::ProcessRecoveredPreparedTxnsHelper(Transaction* txn,
+ SharedState* shared) {
+ thread_local Random rand(static_cast<uint32_t>(FLAGS_seed));
+ for (size_t i = 0; i < column_families_.size(); ++i) {
+ std::unique_ptr<WBWIIterator> wbwi_iter(
+ txn->GetWriteBatch()->NewIterator(column_families_[i]));
+ for (wbwi_iter->SeekToFirst(); wbwi_iter->Valid(); wbwi_iter->Next()) {
+ uint64_t key_val;
+ if (GetIntVal(wbwi_iter->Entry().key.ToString(), &key_val)) {
+ shared->Put(static_cast<int>(i) /* cf_idx */, key_val,
+ 0 /* value_base */, true /* pending */);
+ }
+ }
+ }
+ if (rand.OneIn(2)) {
+ Status s = txn->Commit();
+ assert(s.ok());
+ } else {
+ Status s = txn->Rollback();
+ assert(s.ok());
+ }
+}
+
+Status StressTest::NewTxn(WriteOptions& write_opts, Transaction** txn) {
+ if (!FLAGS_use_txn) {
+ return Status::InvalidArgument("NewTxn when FLAGS_use_txn is not set");
+ }
+ write_opts.disableWAL = FLAGS_disable_wal;
+ static std::atomic<uint64_t> txn_id = {0};
+ TransactionOptions txn_options;
+ txn_options.use_only_the_last_commit_time_batch_for_recovery =
+ FLAGS_use_only_the_last_commit_time_batch_for_recovery;
+ txn_options.lock_timeout = 600000; // 10 min
+ txn_options.deadlock_detect = true;
+ *txn = txn_db_->BeginTransaction(write_opts, txn_options);
+ auto istr = std::to_string(txn_id.fetch_add(1));
+ Status s = (*txn)->SetName("xid" + istr);
+ return s;
+}
+
+Status StressTest::CommitTxn(Transaction* txn, ThreadState* thread) {
+ if (!FLAGS_use_txn) {
+ return Status::InvalidArgument("CommitTxn when FLAGS_use_txn is not set");
+ }
+ assert(txn_db_);
+ Status s = txn->Prepare();
+ std::shared_ptr<const Snapshot> timestamped_snapshot;
+ if (s.ok()) {
+ if (thread && FLAGS_create_timestamped_snapshot_one_in &&
+ thread->rand.OneIn(FLAGS_create_timestamped_snapshot_one_in)) {
+ uint64_t ts = db_stress_env->NowNanos();
+ s = txn->CommitAndTryCreateSnapshot(/*notifier=*/nullptr, ts,
+ &timestamped_snapshot);
+
+ std::pair<Status, std::shared_ptr<const Snapshot>> res;
+ if (thread->tid == 0) {
+ uint64_t now = db_stress_env->NowNanos();
+ res = txn_db_->CreateTimestampedSnapshot(now);
+ if (res.first.ok()) {
+ assert(res.second);
+ assert(res.second->GetTimestamp() == now);
+ if (timestamped_snapshot) {
+ assert(res.second->GetTimestamp() >
+ timestamped_snapshot->GetTimestamp());
+ }
+ } else {
+ assert(!res.second);
+ }
+ }
+ } else {
+ s = txn->Commit();
+ }
+ }
+ if (thread && FLAGS_create_timestamped_snapshot_one_in > 0 &&
+ thread->rand.OneInOpt(50000)) {
+ uint64_t now = db_stress_env->NowNanos();
+ constexpr uint64_t time_diff = static_cast<uint64_t>(1000) * 1000 * 1000;
+ txn_db_->ReleaseTimestampedSnapshotsOlderThan(now - time_diff);
+ }
+ delete txn;
+ return s;
+}
+
+Status StressTest::RollbackTxn(Transaction* txn) {
+ if (!FLAGS_use_txn) {
+ return Status::InvalidArgument(
+ "RollbackTxn when FLAGS_use_txn is not"
+ " set");
+ }
+ Status s = txn->Rollback();
+ delete txn;
+ return s;
+}
+#endif
+
+void StressTest::OperateDb(ThreadState* thread) {
+ ReadOptions read_opts(FLAGS_verify_checksum, true);
+ read_opts.rate_limiter_priority =
+ FLAGS_rate_limit_user_ops ? Env::IO_USER : Env::IO_TOTAL;
+ read_opts.async_io = FLAGS_async_io;
+ read_opts.adaptive_readahead = FLAGS_adaptive_readahead;
+ read_opts.readahead_size = FLAGS_readahead_size;
+ WriteOptions write_opts;
+ if (FLAGS_rate_limit_auto_wal_flush) {
+ write_opts.rate_limiter_priority = Env::IO_USER;
+ }
+ auto shared = thread->shared;
+ char value[100];
+ std::string from_db;
+ if (FLAGS_sync) {
+ write_opts.sync = true;
+ }
+ write_opts.disableWAL = FLAGS_disable_wal;
+ write_opts.protection_bytes_per_key = FLAGS_batch_protection_bytes_per_key;
+ const int prefix_bound = static_cast<int>(FLAGS_readpercent) +
+ static_cast<int>(FLAGS_prefixpercent);
+ const int write_bound = prefix_bound + static_cast<int>(FLAGS_writepercent);
+ const int del_bound = write_bound + static_cast<int>(FLAGS_delpercent);
+ const int delrange_bound =
+ del_bound + static_cast<int>(FLAGS_delrangepercent);
+ const int iterate_bound =
+ delrange_bound + static_cast<int>(FLAGS_iterpercent);
+
+ const uint64_t ops_per_open = FLAGS_ops_per_thread / (FLAGS_reopen + 1);
+
+#ifndef NDEBUG
+ if (FLAGS_read_fault_one_in) {
+ fault_fs_guard->SetThreadLocalReadErrorContext(thread->shared->GetSeed(),
+ FLAGS_read_fault_one_in);
+ }
+#endif // NDEBUG
+ if (FLAGS_write_fault_one_in) {
+ IOStatus error_msg;
+ if (FLAGS_injest_error_severity <= 1 || FLAGS_injest_error_severity > 2) {
+ error_msg = IOStatus::IOError("Retryable IO Error");
+ error_msg.SetRetryable(true);
+ } else if (FLAGS_injest_error_severity == 2) {
+ // Ingest the fatal error
+ error_msg = IOStatus::IOError("Fatal IO Error");
+ error_msg.SetDataLoss(true);
+ }
+ std::vector<FileType> types = {FileType::kTableFile,
+ FileType::kDescriptorFile,
+ FileType::kCurrentFile};
+ fault_fs_guard->SetRandomWriteError(
+ thread->shared->GetSeed(), FLAGS_write_fault_one_in, error_msg,
+ /*inject_for_all_file_types=*/false, types);
+ }
+ thread->stats.Start();
+ for (int open_cnt = 0; open_cnt <= FLAGS_reopen; ++open_cnt) {
+ if (thread->shared->HasVerificationFailedYet() ||
+ thread->shared->ShouldStopTest()) {
+ break;
+ }
+ if (open_cnt != 0) {
+ thread->stats.FinishedSingleOp();
+ MutexLock l(thread->shared->GetMutex());
+ while (!thread->snapshot_queue.empty()) {
+ db_->ReleaseSnapshot(thread->snapshot_queue.front().second.snapshot);
+ delete thread->snapshot_queue.front().second.key_vec;
+ thread->snapshot_queue.pop();
+ }
+ thread->shared->IncVotedReopen();
+ if (thread->shared->AllVotedReopen()) {
+ thread->shared->GetStressTest()->Reopen(thread);
+ thread->shared->GetCondVar()->SignalAll();
+ } else {
+ thread->shared->GetCondVar()->Wait();
+ }
+ // Commenting this out as we don't want to reset stats on each open.
+ // thread->stats.Start();
+ }
+
+ for (uint64_t i = 0; i < ops_per_open; i++) {
+ if (thread->shared->HasVerificationFailedYet()) {
+ break;
+ }
+
+ // Change Options
+ if (thread->rand.OneInOpt(FLAGS_set_options_one_in)) {
+ SetOptions(thread);
+ }
+
+ if (thread->rand.OneInOpt(FLAGS_set_in_place_one_in)) {
+ options_.inplace_update_support ^= options_.inplace_update_support;
+ }
+
+ if (thread->tid == 0 && FLAGS_verify_db_one_in > 0 &&
+ thread->rand.OneIn(FLAGS_verify_db_one_in)) {
+ ContinuouslyVerifyDb(thread);
+ if (thread->shared->ShouldStopTest()) {
+ break;
+ }
+ }
+
+ MaybeClearOneColumnFamily(thread);
+
+ if (thread->rand.OneInOpt(FLAGS_manual_wal_flush_one_in)) {
+ bool sync = thread->rand.OneIn(2) ? true : false;
+ Status s = db_->FlushWAL(sync);
+ if (!s.ok() && !(sync && s.IsNotSupported())) {
+ fprintf(stderr, "FlushWAL(sync=%s) failed: %s\n",
+ (sync ? "true" : "false"), s.ToString().c_str());
+ }
+ }
+
+ if (thread->rand.OneInOpt(FLAGS_sync_wal_one_in)) {
+ Status s = db_->SyncWAL();
+ if (!s.ok() && !s.IsNotSupported()) {
+ fprintf(stderr, "SyncWAL() failed: %s\n", s.ToString().c_str());
+ }
+ }
+
+ int rand_column_family = thread->rand.Next() % FLAGS_column_families;
+ ColumnFamilyHandle* column_family = column_families_[rand_column_family];
+
+ if (thread->rand.OneInOpt(FLAGS_compact_files_one_in)) {
+ TestCompactFiles(thread, column_family);
+ }
+
+ int64_t rand_key = GenerateOneKey(thread, i);
+ std::string keystr = Key(rand_key);
+ Slice key = keystr;
+
+ if (thread->rand.OneInOpt(FLAGS_compact_range_one_in)) {
+ TestCompactRange(thread, rand_key, key, column_family);
+ if (thread->shared->HasVerificationFailedYet()) {
+ break;
+ }
+ }
+
+ std::vector<int> rand_column_families =
+ GenerateColumnFamilies(FLAGS_column_families, rand_column_family);
+
+ if (thread->rand.OneInOpt(FLAGS_flush_one_in)) {
+ Status status = TestFlush(rand_column_families);
+ if (!status.ok()) {
+ fprintf(stdout, "Unable to perform Flush(): %s\n",
+ status.ToString().c_str());
+ }
+ }
+
+#ifndef ROCKSDB_LITE
+ // Verify GetLiveFiles with a 1 in N chance.
+ if (thread->rand.OneInOpt(FLAGS_get_live_files_one_in) &&
+ !FLAGS_write_fault_one_in) {
+ Status status = VerifyGetLiveFiles();
+ if (!status.ok()) {
+ VerificationAbort(shared, "VerifyGetLiveFiles status not OK", status);
+ }
+ }
+
+ // Verify GetSortedWalFiles with a 1 in N chance.
+ if (thread->rand.OneInOpt(FLAGS_get_sorted_wal_files_one_in)) {
+ Status status = VerifyGetSortedWalFiles();
+ if (!status.ok()) {
+ VerificationAbort(shared, "VerifyGetSortedWalFiles status not OK",
+ status);
+ }
+ }
+
+ // Verify GetCurrentWalFile with a 1 in N chance.
+ if (thread->rand.OneInOpt(FLAGS_get_current_wal_file_one_in)) {
+ Status status = VerifyGetCurrentWalFile();
+ if (!status.ok()) {
+ VerificationAbort(shared, "VerifyGetCurrentWalFile status not OK",
+ status);
+ }
+ }
+#endif // !ROCKSDB_LITE
+
+ if (thread->rand.OneInOpt(FLAGS_pause_background_one_in)) {
+ Status status = TestPauseBackground(thread);
+ if (!status.ok()) {
+ VerificationAbort(
+ shared, "Pause/ContinueBackgroundWork status not OK", status);
+ }
+ }
+
+#ifndef ROCKSDB_LITE
+ if (thread->rand.OneInOpt(FLAGS_verify_checksum_one_in)) {
+ Status status = db_->VerifyChecksum();
+ if (!status.ok()) {
+ VerificationAbort(shared, "VerifyChecksum status not OK", status);
+ }
+ }
+
+ if (thread->rand.OneInOpt(FLAGS_get_property_one_in)) {
+ TestGetProperty(thread);
+ }
+#endif
+
+ std::vector<int64_t> rand_keys = GenerateKeys(rand_key);
+
+ if (thread->rand.OneInOpt(FLAGS_ingest_external_file_one_in)) {
+ TestIngestExternalFile(thread, rand_column_families, rand_keys);
+ }
+
+ if (thread->rand.OneInOpt(FLAGS_backup_one_in)) {
+ // Beyond a certain DB size threshold, this test becomes heavier than
+ // it's worth.
+ uint64_t total_size = 0;
+ if (FLAGS_backup_max_size > 0) {
+ std::vector<FileAttributes> files;
+ db_stress_env->GetChildrenFileAttributes(FLAGS_db, &files);
+ for (auto& file : files) {
+ total_size += file.size_bytes;
+ }
+ }
+
+ if (total_size <= FLAGS_backup_max_size) {
+ Status s = TestBackupRestore(thread, rand_column_families, rand_keys);
+ if (!s.ok()) {
+ VerificationAbort(shared, "Backup/restore gave inconsistent state",
+ s);
+ }
+ }
+ }
+
+ if (thread->rand.OneInOpt(FLAGS_checkpoint_one_in)) {
+ Status s = TestCheckpoint(thread, rand_column_families, rand_keys);
+ if (!s.ok()) {
+ VerificationAbort(shared, "Checkpoint gave inconsistent state", s);
+ }
+ }
+
+#ifndef ROCKSDB_LITE
+ if (thread->rand.OneInOpt(FLAGS_approximate_size_one_in)) {
+ Status s =
+ TestApproximateSize(thread, i, rand_column_families, rand_keys);
+ if (!s.ok()) {
+ VerificationAbort(shared, "ApproximateSize Failed", s);
+ }
+ }
+#endif // !ROCKSDB_LITE
+ if (thread->rand.OneInOpt(FLAGS_acquire_snapshot_one_in)) {
+ TestAcquireSnapshot(thread, rand_column_family, keystr, i);
+ }
+
+ /*always*/ {
+ Status s = MaybeReleaseSnapshots(thread, i);
+ if (!s.ok()) {
+ VerificationAbort(shared, "Snapshot gave inconsistent state", s);
+ }
+ }
+
+ // Assign timestamps if necessary.
+ std::string read_ts_str;
+ Slice read_ts;
+ if (FLAGS_user_timestamp_size > 0) {
+ read_ts_str = GetNowNanos();
+ read_ts = read_ts_str;
+ read_opts.timestamp = &read_ts;
+ }
+
+ int prob_op = thread->rand.Uniform(100);
+ // Reset this in case we pick something other than a read op. We don't
+ // want to use a stale value when deciding at the beginning of the loop
+ // whether to vote to reopen
+ if (prob_op >= 0 && prob_op < static_cast<int>(FLAGS_readpercent)) {
+ assert(0 <= prob_op);
+ // OPERATION read
+ if (FLAGS_use_multiget) {
+ // Leave room for one more iteration of the loop with a single key
+ // batch. This is to ensure that each thread does exactly the same
+ // number of ops
+ int multiget_batch_size = static_cast<int>(
+ std::min(static_cast<uint64_t>(thread->rand.Uniform(64)),
+ FLAGS_ops_per_thread - i - 1));
+ // If its the last iteration, ensure that multiget_batch_size is 1
+ multiget_batch_size = std::max(multiget_batch_size, 1);
+ rand_keys = GenerateNKeys(thread, multiget_batch_size, i);
+ TestMultiGet(thread, read_opts, rand_column_families, rand_keys);
+ i += multiget_batch_size - 1;
+ } else {
+ TestGet(thread, read_opts, rand_column_families, rand_keys);
+ }
+ } else if (prob_op < prefix_bound) {
+ assert(static_cast<int>(FLAGS_readpercent) <= prob_op);
+ // OPERATION prefix scan
+ // keys are 8 bytes long, prefix size is FLAGS_prefix_size. There are
+ // (8 - FLAGS_prefix_size) bytes besides the prefix. So there will
+ // be 2 ^ ((8 - FLAGS_prefix_size) * 8) possible keys with the same
+ // prefix
+ TestPrefixScan(thread, read_opts, rand_column_families, rand_keys);
+ } else if (prob_op < write_bound) {
+ assert(prefix_bound <= prob_op);
+ // OPERATION write
+ TestPut(thread, write_opts, read_opts, rand_column_families, rand_keys,
+ value);
+ } else if (prob_op < del_bound) {
+ assert(write_bound <= prob_op);
+ // OPERATION delete
+ TestDelete(thread, write_opts, rand_column_families, rand_keys);
+ } else if (prob_op < delrange_bound) {
+ assert(del_bound <= prob_op);
+ // OPERATION delete range
+ TestDeleteRange(thread, write_opts, rand_column_families, rand_keys);
+ } else if (prob_op < iterate_bound) {
+ assert(delrange_bound <= prob_op);
+ // OPERATION iterate
+ if (!FLAGS_skip_verifydb &&
+ thread->rand.OneInOpt(
+ FLAGS_verify_iterator_with_expected_state_one_in)) {
+ TestIterateAgainstExpected(thread, read_opts, rand_column_families,
+ rand_keys);
+ } else {
+ int num_seeks = static_cast<int>(
+ std::min(static_cast<uint64_t>(thread->rand.Uniform(4)),
+ FLAGS_ops_per_thread - i - 1));
+ rand_keys = GenerateNKeys(thread, num_seeks, i);
+ i += num_seeks - 1;
+ TestIterate(thread, read_opts, rand_column_families, rand_keys);
+ }
+ } else {
+ assert(iterate_bound <= prob_op);
+ TestCustomOperations(thread, rand_column_families);
+ }
+ thread->stats.FinishedSingleOp();
+ }
+ }
+ while (!thread->snapshot_queue.empty()) {
+ db_->ReleaseSnapshot(thread->snapshot_queue.front().second.snapshot);
+ delete thread->snapshot_queue.front().second.key_vec;
+ thread->snapshot_queue.pop();
+ }
+
+ thread->stats.Stop();
+}
+
+#ifndef ROCKSDB_LITE
+// Generated a list of keys that close to boundaries of SST keys.
+// If there isn't any SST file in the DB, return empty list.
+std::vector<std::string> StressTest::GetWhiteBoxKeys(ThreadState* thread,
+ DB* db,
+ ColumnFamilyHandle* cfh,
+ size_t num_keys) {
+ ColumnFamilyMetaData cfmd;
+ db->GetColumnFamilyMetaData(cfh, &cfmd);
+ std::vector<std::string> boundaries;
+ for (const LevelMetaData& lmd : cfmd.levels) {
+ for (const SstFileMetaData& sfmd : lmd.files) {
+ // If FLAGS_user_timestamp_size > 0, then both smallestkey and largestkey
+ // have timestamps.
+ const auto& skey = sfmd.smallestkey;
+ const auto& lkey = sfmd.largestkey;
+ assert(skey.size() >= FLAGS_user_timestamp_size);
+ assert(lkey.size() >= FLAGS_user_timestamp_size);
+ boundaries.push_back(
+ skey.substr(0, skey.size() - FLAGS_user_timestamp_size));
+ boundaries.push_back(
+ lkey.substr(0, lkey.size() - FLAGS_user_timestamp_size));
+ }
+ }
+ if (boundaries.empty()) {
+ return {};
+ }
+
+ std::vector<std::string> ret;
+ for (size_t j = 0; j < num_keys; j++) {
+ std::string k =
+ boundaries[thread->rand.Uniform(static_cast<int>(boundaries.size()))];
+ if (thread->rand.OneIn(3)) {
+ // Reduce one byte from the string
+ for (int i = static_cast<int>(k.length()) - 1; i >= 0; i--) {
+ uint8_t cur = k[i];
+ if (cur > 0) {
+ k[i] = static_cast<char>(cur - 1);
+ break;
+ } else if (i > 0) {
+ k[i] = 0xFFu;
+ }
+ }
+ } else if (thread->rand.OneIn(2)) {
+ // Add one byte to the string
+ for (int i = static_cast<int>(k.length()) - 1; i >= 0; i--) {
+ uint8_t cur = k[i];
+ if (cur < 255) {
+ k[i] = static_cast<char>(cur + 1);
+ break;
+ } else if (i > 0) {
+ k[i] = 0x00;
+ }
+ }
+ }
+ ret.push_back(k);
+ }
+ return ret;
+}
+#endif // !ROCKSDB_LITE
+
+// Given a key K, this creates an iterator which scans to K and then
+// does a random sequence of Next/Prev operations.
+Status StressTest::TestIterate(ThreadState* thread,
+ const ReadOptions& read_opts,
+ const std::vector<int>& rand_column_families,
+ const std::vector<int64_t>& rand_keys) {
+ assert(!rand_column_families.empty());
+ assert(!rand_keys.empty());
+
+ ManagedSnapshot snapshot_guard(db_);
+
+ ReadOptions ro = read_opts;
+ ro.snapshot = snapshot_guard.snapshot();
+
+ std::string read_ts_str;
+ Slice read_ts_slice;
+ MaybeUseOlderTimestampForRangeScan(thread, read_ts_str, read_ts_slice, ro);
+
+ bool expect_total_order = false;
+ if (thread->rand.OneIn(16)) {
+ // When prefix extractor is used, it's useful to cover total order seek.
+ ro.total_order_seek = true;
+ expect_total_order = true;
+ } else if (thread->rand.OneIn(4)) {
+ ro.total_order_seek = false;
+ ro.auto_prefix_mode = true;
+ expect_total_order = true;
+ } else if (options_.prefix_extractor.get() == nullptr) {
+ expect_total_order = true;
+ }
+
+ std::string upper_bound_str;
+ Slice upper_bound;
+ if (thread->rand.OneIn(16)) {
+ // With a 1/16 chance, set an iterator upper bound.
+ // Note: upper_bound can be smaller than the seek key.
+ const int64_t rand_upper_key = GenerateOneKey(thread, FLAGS_ops_per_thread);
+ upper_bound_str = Key(rand_upper_key);
+ upper_bound = Slice(upper_bound_str);
+ ro.iterate_upper_bound = &upper_bound;
+ }
+ std::string lower_bound_str;
+ Slice lower_bound;
+ if (thread->rand.OneIn(16)) {
+ // With a 1/16 chance, enable iterator lower bound.
+ // Note: lower_bound can be greater than the seek key.
+ const int64_t rand_lower_key = GenerateOneKey(thread, FLAGS_ops_per_thread);
+ lower_bound_str = Key(rand_lower_key);
+ lower_bound = Slice(lower_bound_str);
+ ro.iterate_lower_bound = &lower_bound;
+ }
+
+ ColumnFamilyHandle* const cfh = column_families_[rand_column_families[0]];
+ assert(cfh);
+
+ std::unique_ptr<Iterator> iter(db_->NewIterator(ro, cfh));
+
+ std::vector<std::string> key_strs;
+ if (thread->rand.OneIn(16)) {
+ // Generate keys close to lower or upper bound of SST files.
+ key_strs = GetWhiteBoxKeys(thread, db_, cfh, rand_keys.size());
+ }
+ if (key_strs.empty()) {
+ // Use the random keys passed in.
+ for (int64_t rkey : rand_keys) {
+ key_strs.push_back(Key(rkey));
+ }
+ }
+
+ std::string op_logs;
+ constexpr size_t kOpLogsLimit = 10000;
+
+ for (const std::string& key_str : key_strs) {
+ if (op_logs.size() > kOpLogsLimit) {
+ // Shouldn't take too much memory for the history log. Clear it.
+ op_logs = "(cleared...)\n";
+ }
+
+ if (ro.iterate_upper_bound != nullptr && thread->rand.OneIn(2)) {
+ // With a 1/2 chance, change the upper bound.
+ // It is possible that it is changed before first use, but there is no
+ // problem with that.
+ const int64_t rand_upper_key =
+ GenerateOneKey(thread, FLAGS_ops_per_thread);
+ upper_bound_str = Key(rand_upper_key);
+ upper_bound = Slice(upper_bound_str);
+ }
+ if (ro.iterate_lower_bound != nullptr && thread->rand.OneIn(4)) {
+ // With a 1/4 chance, change the lower bound.
+ // It is possible that it is changed before first use, but there is no
+ // problem with that.
+ const int64_t rand_lower_key =
+ GenerateOneKey(thread, FLAGS_ops_per_thread);
+ lower_bound_str = Key(rand_lower_key);
+ lower_bound = Slice(lower_bound_str);
+ }
+
+ // Record some options to op_logs
+ op_logs += "total_order_seek: ";
+ op_logs += (ro.total_order_seek ? "1 " : "0 ");
+ op_logs += "auto_prefix_mode: ";
+ op_logs += (ro.auto_prefix_mode ? "1 " : "0 ");
+ if (ro.iterate_upper_bound != nullptr) {
+ op_logs += "ub: " + upper_bound.ToString(true) + " ";
+ }
+ if (ro.iterate_lower_bound != nullptr) {
+ op_logs += "lb: " + lower_bound.ToString(true) + " ";
+ }
+
+ // Set up an iterator, perform the same operations without bounds and with
+ // total order seek, and compare the results. This is to identify bugs
+ // related to bounds, prefix extractor, or reseeking. Sometimes we are
+ // comparing iterators with the same set-up, and it doesn't hurt to check
+ // them to be equal.
+ //
+ // This `ReadOptions` is for validation purposes. Ignore
+ // `FLAGS_rate_limit_user_ops` to avoid slowing any validation.
+ ReadOptions cmp_ro;
+ cmp_ro.timestamp = ro.timestamp;
+ cmp_ro.iter_start_ts = ro.iter_start_ts;
+ cmp_ro.snapshot = snapshot_guard.snapshot();
+ cmp_ro.total_order_seek = true;
+
+ ColumnFamilyHandle* const cmp_cfh =
+ GetControlCfh(thread, rand_column_families[0]);
+ assert(cmp_cfh);
+
+ std::unique_ptr<Iterator> cmp_iter(db_->NewIterator(cmp_ro, cmp_cfh));
+
+ bool diverged = false;
+
+ Slice key(key_str);
+
+ const bool support_seek_first_or_last = expect_total_order;
+
+ LastIterateOp last_op;
+ if (support_seek_first_or_last && thread->rand.OneIn(100)) {
+ iter->SeekToFirst();
+ cmp_iter->SeekToFirst();
+ last_op = kLastOpSeekToFirst;
+ op_logs += "STF ";
+ } else if (support_seek_first_or_last && thread->rand.OneIn(100)) {
+ iter->SeekToLast();
+ cmp_iter->SeekToLast();
+ last_op = kLastOpSeekToLast;
+ op_logs += "STL ";
+ } else if (thread->rand.OneIn(8)) {
+ iter->SeekForPrev(key);
+ cmp_iter->SeekForPrev(key);
+ last_op = kLastOpSeekForPrev;
+ op_logs += "SFP " + key.ToString(true) + " ";
+ } else {
+ iter->Seek(key);
+ cmp_iter->Seek(key);
+ last_op = kLastOpSeek;
+ op_logs += "S " + key.ToString(true) + " ";
+ }
+
+ VerifyIterator(thread, cmp_cfh, ro, iter.get(), cmp_iter.get(), last_op,
+ key, op_logs, &diverged);
+
+ const bool no_reverse =
+ (FLAGS_memtablerep == "prefix_hash" && !expect_total_order);
+ for (uint64_t i = 0; i < FLAGS_num_iterations && iter->Valid(); ++i) {
+ if (no_reverse || thread->rand.OneIn(2)) {
+ iter->Next();
+ if (!diverged) {
+ assert(cmp_iter->Valid());
+ cmp_iter->Next();
+ }
+ op_logs += "N";
+ } else {
+ iter->Prev();
+ if (!diverged) {
+ assert(cmp_iter->Valid());
+ cmp_iter->Prev();
+ }
+ op_logs += "P";
+ }
+
+ last_op = kLastOpNextOrPrev;
+
+ VerifyIterator(thread, cmp_cfh, ro, iter.get(), cmp_iter.get(), last_op,
+ key, op_logs, &diverged);
+ }
+
+ thread->stats.AddIterations(1);
+
+ op_logs += "; ";
+ }
+
+ return Status::OK();
+}
+
+#ifndef ROCKSDB_LITE
+// Test the return status of GetLiveFiles.
+Status StressTest::VerifyGetLiveFiles() const {
+ std::vector<std::string> live_file;
+ uint64_t manifest_size = 0;
+ return db_->GetLiveFiles(live_file, &manifest_size);
+}
+
+// Test the return status of GetSortedWalFiles.
+Status StressTest::VerifyGetSortedWalFiles() const {
+ VectorLogPtr log_ptr;
+ return db_->GetSortedWalFiles(log_ptr);
+}
+
+// Test the return status of GetCurrentWalFile.
+Status StressTest::VerifyGetCurrentWalFile() const {
+ std::unique_ptr<LogFile> cur_wal_file;
+ return db_->GetCurrentWalFile(&cur_wal_file);
+}
+#endif // !ROCKSDB_LITE
+
+// Compare the two iterator, iter and cmp_iter are in the same position,
+// unless iter might be made invalidate or undefined because of
+// upper or lower bounds, or prefix extractor.
+// Will flag failure if the verification fails.
+// diverged = true if the two iterator is already diverged.
+// True if verification passed, false if not.
+void StressTest::VerifyIterator(ThreadState* thread,
+ ColumnFamilyHandle* cmp_cfh,
+ const ReadOptions& ro, Iterator* iter,
+ Iterator* cmp_iter, LastIterateOp op,
+ const Slice& seek_key,
+ const std::string& op_logs, bool* diverged) {
+ assert(diverged);
+
+ if (*diverged) {
+ return;
+ }
+
+ if (ro.iter_start_ts != nullptr) {
+ assert(FLAGS_user_timestamp_size > 0);
+ // We currently do not verify iterator when dumping history of internal
+ // keys.
+ *diverged = true;
+ return;
+ }
+
+ if (op == kLastOpSeekToFirst && ro.iterate_lower_bound != nullptr) {
+ // SeekToFirst() with lower bound is not well defined.
+ *diverged = true;
+ return;
+ } else if (op == kLastOpSeekToLast && ro.iterate_upper_bound != nullptr) {
+ // SeekToLast() with higher bound is not well defined.
+ *diverged = true;
+ return;
+ } else if (op == kLastOpSeek && ro.iterate_lower_bound != nullptr &&
+ (options_.comparator->CompareWithoutTimestamp(
+ *ro.iterate_lower_bound, /*a_has_ts=*/false, seek_key,
+ /*b_has_ts=*/false) >= 0 ||
+ (ro.iterate_upper_bound != nullptr &&
+ options_.comparator->CompareWithoutTimestamp(
+ *ro.iterate_lower_bound, /*a_has_ts=*/false,
+ *ro.iterate_upper_bound, /*b_has_ts*/ false) >= 0))) {
+ // Lower bound behavior is not well defined if it is larger than
+ // seek key or upper bound. Disable the check for now.
+ *diverged = true;
+ return;
+ } else if (op == kLastOpSeekForPrev && ro.iterate_upper_bound != nullptr &&
+ (options_.comparator->CompareWithoutTimestamp(
+ *ro.iterate_upper_bound, /*a_has_ts=*/false, seek_key,
+ /*b_has_ts=*/false) <= 0 ||
+ (ro.iterate_lower_bound != nullptr &&
+ options_.comparator->CompareWithoutTimestamp(
+ *ro.iterate_lower_bound, /*a_has_ts=*/false,
+ *ro.iterate_upper_bound, /*b_has_ts=*/false) >= 0))) {
+ // Uppder bound behavior is not well defined if it is smaller than
+ // seek key or lower bound. Disable the check for now.
+ *diverged = true;
+ return;
+ }
+
+ const SliceTransform* pe = (ro.total_order_seek || ro.auto_prefix_mode)
+ ? nullptr
+ : options_.prefix_extractor.get();
+ const Comparator* cmp = options_.comparator;
+
+ if (iter->Valid() && !cmp_iter->Valid()) {
+ if (pe != nullptr) {
+ if (!pe->InDomain(seek_key)) {
+ // Prefix seek a non-in-domain key is undefined. Skip checking for
+ // this scenario.
+ *diverged = true;
+ return;
+ } else if (!pe->InDomain(iter->key())) {
+ // out of range is iterator key is not in domain anymore.
+ *diverged = true;
+ return;
+ } else if (pe->Transform(iter->key()) != pe->Transform(seek_key)) {
+ *diverged = true;
+ return;
+ }
+ }
+ fprintf(stderr,
+ "Control interator is invalid but iterator has key %s "
+ "%s\n",
+ iter->key().ToString(true).c_str(), op_logs.c_str());
+
+ *diverged = true;
+ } else if (cmp_iter->Valid()) {
+ // Iterator is not valid. It can be legimate if it has already been
+ // out of upper or lower bound, or filtered out by prefix iterator.
+ const Slice& total_order_key = cmp_iter->key();
+
+ if (pe != nullptr) {
+ if (!pe->InDomain(seek_key)) {
+ // Prefix seek a non-in-domain key is undefined. Skip checking for
+ // this scenario.
+ *diverged = true;
+ return;
+ }
+
+ if (!pe->InDomain(total_order_key) ||
+ pe->Transform(total_order_key) != pe->Transform(seek_key)) {
+ // If the prefix is exhausted, the only thing needs to check
+ // is the iterator isn't return a position in prefix.
+ // Either way, checking can stop from here.
+ *diverged = true;
+ if (!iter->Valid() || !pe->InDomain(iter->key()) ||
+ pe->Transform(iter->key()) != pe->Transform(seek_key)) {
+ return;
+ }
+ fprintf(stderr,
+ "Iterator stays in prefix but contol doesn't"
+ " iterator key %s control iterator key %s %s\n",
+ iter->key().ToString(true).c_str(),
+ cmp_iter->key().ToString(true).c_str(), op_logs.c_str());
+ }
+ }
+ // Check upper or lower bounds.
+ if (!*diverged) {
+ if ((iter->Valid() && iter->key() != cmp_iter->key()) ||
+ (!iter->Valid() &&
+ (ro.iterate_upper_bound == nullptr ||
+ cmp->CompareWithoutTimestamp(total_order_key, /*a_has_ts=*/false,
+ *ro.iterate_upper_bound,
+ /*b_has_ts=*/false) < 0) &&
+ (ro.iterate_lower_bound == nullptr ||
+ cmp->CompareWithoutTimestamp(total_order_key, /*a_has_ts=*/false,
+ *ro.iterate_lower_bound,
+ /*b_has_ts=*/false) > 0))) {
+ fprintf(stderr,
+ "Iterator diverged from control iterator which"
+ " has value %s %s\n",
+ total_order_key.ToString(true).c_str(), op_logs.c_str());
+ if (iter->Valid()) {
+ fprintf(stderr, "iterator has value %s\n",
+ iter->key().ToString(true).c_str());
+ } else {
+ fprintf(stderr, "iterator is not valid\n");
+ }
+ *diverged = true;
+ }
+ }
+ }
+
+ if (!*diverged && iter->Valid()) {
+ const WideColumns expected_columns =
+ GenerateExpectedWideColumns(GetValueBase(iter->value()), iter->value());
+ if (iter->columns() != expected_columns) {
+ fprintf(stderr, "Value and columns inconsistent for iterator: %s\n",
+ DebugString(iter->value(), iter->columns(), expected_columns)
+ .c_str());
+
+ *diverged = true;
+ }
+ }
+
+ if (*diverged) {
+ fprintf(stderr, "Control CF %s\n", cmp_cfh->GetName().c_str());
+ thread->stats.AddErrors(1);
+ // Fail fast to preserve the DB state.
+ thread->shared->SetVerificationFailure();
+ }
+}
+
+#ifdef ROCKSDB_LITE
+Status StressTest::TestBackupRestore(
+ ThreadState* /* thread */,
+ const std::vector<int>& /* rand_column_families */,
+ const std::vector<int64_t>& /* rand_keys */) {
+ assert(false);
+ fprintf(stderr,
+ "RocksDB lite does not support "
+ "TestBackupRestore\n");
+ std::terminate();
+}
+
+Status StressTest::TestCheckpoint(
+ ThreadState* /* thread */,
+ const std::vector<int>& /* rand_column_families */,
+ const std::vector<int64_t>& /* rand_keys */) {
+ assert(false);
+ fprintf(stderr,
+ "RocksDB lite does not support "
+ "TestCheckpoint\n");
+ std::terminate();
+}
+
+void StressTest::TestCompactFiles(ThreadState* /* thread */,
+ ColumnFamilyHandle* /* column_family */) {
+ assert(false);
+ fprintf(stderr,
+ "RocksDB lite does not support "
+ "CompactFiles\n");
+ std::terminate();
+}
+#else // ROCKSDB_LITE
+Status StressTest::TestBackupRestore(
+ ThreadState* thread, const std::vector<int>& rand_column_families,
+ const std::vector<int64_t>& rand_keys) {
+ std::vector<std::unique_ptr<MutexLock>> locks;
+ if (ShouldAcquireMutexOnKey()) {
+ for (int rand_column_family : rand_column_families) {
+ // `rand_keys[0]` on each chosen CF will be verified.
+ locks.emplace_back(new MutexLock(
+ thread->shared->GetMutexForKey(rand_column_family, rand_keys[0])));
+ }
+ }
+
+ const std::string backup_dir =
+ FLAGS_db + "/.backup" + std::to_string(thread->tid);
+ const std::string restore_dir =
+ FLAGS_db + "/.restore" + std::to_string(thread->tid);
+ BackupEngineOptions backup_opts(backup_dir);
+ // For debugging, get info_log from live options
+ backup_opts.info_log = db_->GetDBOptions().info_log.get();
+ if (thread->rand.OneIn(10)) {
+ backup_opts.share_table_files = false;
+ } else {
+ backup_opts.share_table_files = true;
+ if (thread->rand.OneIn(5)) {
+ backup_opts.share_files_with_checksum = false;
+ } else {
+ backup_opts.share_files_with_checksum = true;
+ if (thread->rand.OneIn(2)) {
+ // old
+ backup_opts.share_files_with_checksum_naming =
+ BackupEngineOptions::kLegacyCrc32cAndFileSize;
+ } else {
+ // new
+ backup_opts.share_files_with_checksum_naming =
+ BackupEngineOptions::kUseDbSessionId;
+ }
+ if (thread->rand.OneIn(2)) {
+ backup_opts.share_files_with_checksum_naming =
+ backup_opts.share_files_with_checksum_naming |
+ BackupEngineOptions::kFlagIncludeFileSize;
+ }
+ }
+ }
+ if (thread->rand.OneIn(2)) {
+ backup_opts.schema_version = 1;
+ } else {
+ backup_opts.schema_version = 2;
+ }
+ BackupEngine* backup_engine = nullptr;
+ std::string from = "a backup/restore operation";
+ Status s = BackupEngine::Open(db_stress_env, backup_opts, &backup_engine);
+ if (!s.ok()) {
+ from = "BackupEngine::Open";
+ }
+ if (s.ok()) {
+ if (backup_opts.schema_version >= 2 && thread->rand.OneIn(2)) {
+ TEST_BackupMetaSchemaOptions test_opts;
+ test_opts.crc32c_checksums = thread->rand.OneIn(2) == 0;
+ test_opts.file_sizes = thread->rand.OneIn(2) == 0;
+ TEST_SetBackupMetaSchemaOptions(backup_engine, test_opts);
+ }
+ CreateBackupOptions create_opts;
+ if (FLAGS_disable_wal) {
+ // The verification can only work when latest value of `key` is backed up,
+ // which requires flushing in case of WAL disabled.
+ //
+ // Note this triggers a flush with a key lock held. Meanwhile, operations
+ // like flush/compaction may attempt to grab key locks like in
+ // `DbStressCompactionFilter`. The philosophy around preventing deadlock
+ // is the background operation key lock acquisition only tries but does
+ // not wait for the lock. So here in the foreground it is OK to hold the
+ // lock and wait on a background operation (flush).
+ create_opts.flush_before_backup = true;
+ }
+ s = backup_engine->CreateNewBackup(create_opts, db_);
+ if (!s.ok()) {
+ from = "BackupEngine::CreateNewBackup";
+ }
+ }
+ if (s.ok()) {
+ delete backup_engine;
+ backup_engine = nullptr;
+ s = BackupEngine::Open(db_stress_env, backup_opts, &backup_engine);
+ if (!s.ok()) {
+ from = "BackupEngine::Open (again)";
+ }
+ }
+ std::vector<BackupInfo> backup_info;
+ // If inplace_not_restore, we verify the backup by opening it as a
+ // read-only DB. If !inplace_not_restore, we restore it to a temporary
+ // directory for verification.
+ bool inplace_not_restore = thread->rand.OneIn(3);
+ if (s.ok()) {
+ backup_engine->GetBackupInfo(&backup_info,
+ /*include_file_details*/ inplace_not_restore);
+ if (backup_info.empty()) {
+ s = Status::NotFound("no backups found");
+ from = "BackupEngine::GetBackupInfo";
+ }
+ }
+ if (s.ok() && thread->rand.OneIn(2)) {
+ s = backup_engine->VerifyBackup(
+ backup_info.front().backup_id,
+ thread->rand.OneIn(2) /* verify_with_checksum */);
+ if (!s.ok()) {
+ from = "BackupEngine::VerifyBackup";
+ }
+ }
+ const bool allow_persistent = thread->tid == 0; // not too many
+ bool from_latest = false;
+ int count = static_cast<int>(backup_info.size());
+ if (s.ok() && !inplace_not_restore) {
+ if (count > 1) {
+ s = backup_engine->RestoreDBFromBackup(
+ RestoreOptions(), backup_info[thread->rand.Uniform(count)].backup_id,
+ restore_dir /* db_dir */, restore_dir /* wal_dir */);
+ if (!s.ok()) {
+ from = "BackupEngine::RestoreDBFromBackup";
+ }
+ } else {
+ from_latest = true;
+ s = backup_engine->RestoreDBFromLatestBackup(RestoreOptions(),
+ restore_dir /* db_dir */,
+ restore_dir /* wal_dir */);
+ if (!s.ok()) {
+ from = "BackupEngine::RestoreDBFromLatestBackup";
+ }
+ }
+ }
+ if (s.ok() && !inplace_not_restore) {
+ // Purge early if restoring, to ensure the restored directory doesn't
+ // have some secret dependency on the backup directory.
+ uint32_t to_keep = 0;
+ if (allow_persistent) {
+ // allow one thread to keep up to 2 backups
+ to_keep = thread->rand.Uniform(3);
+ }
+ s = backup_engine->PurgeOldBackups(to_keep);
+ if (!s.ok()) {
+ from = "BackupEngine::PurgeOldBackups";
+ }
+ }
+ DB* restored_db = nullptr;
+ std::vector<ColumnFamilyHandle*> restored_cf_handles;
+ // Not yet implemented: opening restored BlobDB or TransactionDB
+ if (s.ok() && !FLAGS_use_txn && !FLAGS_use_blob_db) {
+ Options restore_options(options_);
+ restore_options.best_efforts_recovery = false;
+ restore_options.listeners.clear();
+ // Avoid dangling/shared file descriptors, for reliable destroy
+ restore_options.sst_file_manager = nullptr;
+ std::vector<ColumnFamilyDescriptor> cf_descriptors;
+ // TODO(ajkr): `column_family_names_` is not safe to access here when
+ // `clear_column_family_one_in != 0`. But we can't easily switch to
+ // `ListColumnFamilies` to get names because it won't necessarily give
+ // the same order as `column_family_names_`.
+ assert(FLAGS_clear_column_family_one_in == 0);
+ for (auto name : column_family_names_) {
+ cf_descriptors.emplace_back(name, ColumnFamilyOptions(restore_options));
+ }
+ if (inplace_not_restore) {
+ BackupInfo& info = backup_info[thread->rand.Uniform(count)];
+ restore_options.env = info.env_for_open.get();
+ s = DB::OpenForReadOnly(DBOptions(restore_options), info.name_for_open,
+ cf_descriptors, &restored_cf_handles,
+ &restored_db);
+ if (!s.ok()) {
+ from = "DB::OpenForReadOnly in backup/restore";
+ }
+ } else {
+ s = DB::Open(DBOptions(restore_options), restore_dir, cf_descriptors,
+ &restored_cf_handles, &restored_db);
+ if (!s.ok()) {
+ from = "DB::Open in backup/restore";
+ }
+ }
+ }
+ // Note the column families chosen by `rand_column_families` cannot be
+ // dropped while the locks for `rand_keys` are held. So we should not have
+ // to worry about accessing those column families throughout this function.
+ //
+ // For simplicity, currently only verifies existence/non-existence of a
+ // single key
+ for (size_t i = 0; restored_db && s.ok() && i < rand_column_families.size();
+ ++i) {
+ std::string key_str = Key(rand_keys[0]);
+ Slice key = key_str;
+ std::string restored_value;
+ // This `ReadOptions` is for validation purposes. Ignore
+ // `FLAGS_rate_limit_user_ops` to avoid slowing any validation.
+ ReadOptions read_opts;
+ std::string ts_str;
+ Slice ts;
+ if (FLAGS_user_timestamp_size > 0) {
+ ts_str = GetNowNanos();
+ ts = ts_str;
+ read_opts.timestamp = &ts;
+ }
+ Status get_status = restored_db->Get(
+ read_opts, restored_cf_handles[rand_column_families[i]], key,
+ &restored_value);
+ bool exists = thread->shared->Exists(rand_column_families[i], rand_keys[0]);
+ if (get_status.ok()) {
+ if (!exists && from_latest && ShouldAcquireMutexOnKey()) {
+ std::ostringstream oss;
+ oss << "0x" << key.ToString(true)
+ << " exists in restore but not in original db";
+ s = Status::Corruption(oss.str());
+ }
+ } else if (get_status.IsNotFound()) {
+ if (exists && from_latest && ShouldAcquireMutexOnKey()) {
+ std::ostringstream oss;
+ oss << "0x" << key.ToString(true)
+ << " exists in original db but not in restore";
+ s = Status::Corruption(oss.str());
+ }
+ } else {
+ s = get_status;
+ if (!s.ok()) {
+ from = "DB::Get in backup/restore";
+ }
+ }
+ }
+ if (restored_db != nullptr) {
+ for (auto* cf_handle : restored_cf_handles) {
+ restored_db->DestroyColumnFamilyHandle(cf_handle);
+ }
+ delete restored_db;
+ restored_db = nullptr;
+ }
+ if (s.ok() && inplace_not_restore) {
+ // Purge late if inplace open read-only
+ uint32_t to_keep = 0;
+ if (allow_persistent) {
+ // allow one thread to keep up to 2 backups
+ to_keep = thread->rand.Uniform(3);
+ }
+ s = backup_engine->PurgeOldBackups(to_keep);
+ if (!s.ok()) {
+ from = "BackupEngine::PurgeOldBackups";
+ }
+ }
+ if (backup_engine != nullptr) {
+ delete backup_engine;
+ backup_engine = nullptr;
+ }
+ if (s.ok()) {
+ // Preserve directories on failure, or allowed persistent backup
+ if (!allow_persistent) {
+ s = DestroyDir(db_stress_env, backup_dir);
+ if (!s.ok()) {
+ from = "Destroy backup dir";
+ }
+ }
+ }
+ if (s.ok()) {
+ s = DestroyDir(db_stress_env, restore_dir);
+ if (!s.ok()) {
+ from = "Destroy restore dir";
+ }
+ }
+ if (!s.ok()) {
+ fprintf(stderr, "Failure in %s with: %s\n", from.c_str(),
+ s.ToString().c_str());
+ }
+ return s;
+}
+
+Status StressTest::TestApproximateSize(
+ ThreadState* thread, uint64_t iteration,
+ const std::vector<int>& rand_column_families,
+ const std::vector<int64_t>& rand_keys) {
+ // rand_keys likely only has one key. Just use the first one.
+ assert(!rand_keys.empty());
+ assert(!rand_column_families.empty());
+ int64_t key1 = rand_keys[0];
+ int64_t key2;
+ if (thread->rand.OneIn(2)) {
+ // Two totally random keys. This tends to cover large ranges.
+ key2 = GenerateOneKey(thread, iteration);
+ if (key2 < key1) {
+ std::swap(key1, key2);
+ }
+ } else {
+ // Unless users pass a very large FLAGS_max_key, it we should not worry
+ // about overflow. It is for testing, so we skip the overflow checking
+ // for simplicity.
+ key2 = key1 + static_cast<int64_t>(thread->rand.Uniform(1000));
+ }
+ std::string key1_str = Key(key1);
+ std::string key2_str = Key(key2);
+ Range range{Slice(key1_str), Slice(key2_str)};
+ SizeApproximationOptions sao;
+ sao.include_memtables = thread->rand.OneIn(2);
+ if (sao.include_memtables) {
+ sao.include_files = thread->rand.OneIn(2);
+ }
+ if (thread->rand.OneIn(2)) {
+ if (thread->rand.OneIn(2)) {
+ sao.files_size_error_margin = 0.0;
+ } else {
+ sao.files_size_error_margin =
+ static_cast<double>(thread->rand.Uniform(3));
+ }
+ }
+ uint64_t result;
+ return db_->GetApproximateSizes(
+ sao, column_families_[rand_column_families[0]], &range, 1, &result);
+}
+
+Status StressTest::TestCheckpoint(ThreadState* thread,
+ const std::vector<int>& rand_column_families,
+ const std::vector<int64_t>& rand_keys) {
+ std::vector<std::unique_ptr<MutexLock>> locks;
+ if (ShouldAcquireMutexOnKey()) {
+ for (int rand_column_family : rand_column_families) {
+ // `rand_keys[0]` on each chosen CF will be verified.
+ locks.emplace_back(new MutexLock(
+ thread->shared->GetMutexForKey(rand_column_family, rand_keys[0])));
+ }
+ }
+
+ std::string checkpoint_dir =
+ FLAGS_db + "/.checkpoint" + std::to_string(thread->tid);
+ Options tmp_opts(options_);
+ tmp_opts.listeners.clear();
+ tmp_opts.env = db_stress_env;
+
+ DestroyDB(checkpoint_dir, tmp_opts);
+
+ if (db_stress_env->FileExists(checkpoint_dir).ok()) {
+ // If the directory might still exist, try to delete the files one by one.
+ // Likely a trash file is still there.
+ Status my_s = DestroyDir(db_stress_env, checkpoint_dir);
+ if (!my_s.ok()) {
+ fprintf(stderr, "Fail to destory directory before checkpoint: %s",
+ my_s.ToString().c_str());
+ }
+ }
+
+ Checkpoint* checkpoint = nullptr;
+ Status s = Checkpoint::Create(db_, &checkpoint);
+ if (s.ok()) {
+ s = checkpoint->CreateCheckpoint(checkpoint_dir);
+ if (!s.ok()) {
+ fprintf(stderr, "Fail to create checkpoint to %s\n",
+ checkpoint_dir.c_str());
+ std::vector<std::string> files;
+ Status my_s = db_stress_env->GetChildren(checkpoint_dir, &files);
+ if (my_s.ok()) {
+ for (const auto& f : files) {
+ fprintf(stderr, " %s\n", f.c_str());
+ }
+ } else {
+ fprintf(stderr, "Fail to get files under the directory to %s\n",
+ my_s.ToString().c_str());
+ }
+ }
+ }
+ delete checkpoint;
+ checkpoint = nullptr;
+ std::vector<ColumnFamilyHandle*> cf_handles;
+ DB* checkpoint_db = nullptr;
+ if (s.ok()) {
+ Options options(options_);
+ options.best_efforts_recovery = false;
+ options.listeners.clear();
+ // Avoid race condition in trash handling after delete checkpoint_db
+ options.sst_file_manager.reset();
+ std::vector<ColumnFamilyDescriptor> cf_descs;
+ // TODO(ajkr): `column_family_names_` is not safe to access here when
+ // `clear_column_family_one_in != 0`. But we can't easily switch to
+ // `ListColumnFamilies` to get names because it won't necessarily give
+ // the same order as `column_family_names_`.
+ assert(FLAGS_clear_column_family_one_in == 0);
+ if (FLAGS_clear_column_family_one_in == 0) {
+ for (const auto& name : column_family_names_) {
+ cf_descs.emplace_back(name, ColumnFamilyOptions(options));
+ }
+ s = DB::OpenForReadOnly(DBOptions(options), checkpoint_dir, cf_descs,
+ &cf_handles, &checkpoint_db);
+ }
+ }
+ if (checkpoint_db != nullptr) {
+ // Note the column families chosen by `rand_column_families` cannot be
+ // dropped while the locks for `rand_keys` are held. So we should not have
+ // to worry about accessing those column families throughout this function.
+ for (size_t i = 0; s.ok() && i < rand_column_families.size(); ++i) {
+ std::string key_str = Key(rand_keys[0]);
+ Slice key = key_str;
+ std::string ts_str;
+ Slice ts;
+ ReadOptions read_opts;
+ if (FLAGS_user_timestamp_size > 0) {
+ ts_str = GetNowNanos();
+ ts = ts_str;
+ read_opts.timestamp = &ts;
+ }
+ std::string value;
+ Status get_status = checkpoint_db->Get(
+ read_opts, cf_handles[rand_column_families[i]], key, &value);
+ bool exists =
+ thread->shared->Exists(rand_column_families[i], rand_keys[0]);
+ if (get_status.ok()) {
+ if (!exists && ShouldAcquireMutexOnKey()) {
+ std::ostringstream oss;
+ oss << "0x" << key.ToString(true) << " exists in checkpoint "
+ << checkpoint_dir << " but not in original db";
+ s = Status::Corruption(oss.str());
+ }
+ } else if (get_status.IsNotFound()) {
+ if (exists && ShouldAcquireMutexOnKey()) {
+ std::ostringstream oss;
+ oss << "0x" << key.ToString(true)
+ << " exists in original db but not in checkpoint "
+ << checkpoint_dir;
+ s = Status::Corruption(oss.str());
+ }
+ } else {
+ s = get_status;
+ }
+ }
+ for (auto cfh : cf_handles) {
+ delete cfh;
+ }
+ cf_handles.clear();
+ delete checkpoint_db;
+ checkpoint_db = nullptr;
+ }
+
+ if (!s.ok()) {
+ fprintf(stderr, "A checkpoint operation failed with: %s\n",
+ s.ToString().c_str());
+ } else {
+ DestroyDB(checkpoint_dir, tmp_opts);
+ }
+ return s;
+}
+
+void StressTest::TestGetProperty(ThreadState* thread) const {
+ std::unordered_set<std::string> levelPropertyNames = {
+ DB::Properties::kAggregatedTablePropertiesAtLevel,
+ DB::Properties::kCompressionRatioAtLevelPrefix,
+ DB::Properties::kNumFilesAtLevelPrefix,
+ };
+ std::unordered_set<std::string> unknownPropertyNames = {
+ DB::Properties::kEstimateOldestKeyTime,
+ DB::Properties::kOptionsStatistics,
+ DB::Properties::
+ kLiveSstFilesSizeAtTemperature, // similar to levelPropertyNames, it
+ // requires a number suffix
+ };
+ unknownPropertyNames.insert(levelPropertyNames.begin(),
+ levelPropertyNames.end());
+
+ std::unordered_set<std::string> blobCachePropertyNames = {
+ DB::Properties::kBlobCacheCapacity,
+ DB::Properties::kBlobCacheUsage,
+ DB::Properties::kBlobCachePinnedUsage,
+ };
+ if (db_->GetOptions().blob_cache == nullptr) {
+ unknownPropertyNames.insert(blobCachePropertyNames.begin(),
+ blobCachePropertyNames.end());
+ }
+
+ std::string prop;
+ for (const auto& ppt_name_and_info : InternalStats::ppt_name_to_info) {
+ bool res = db_->GetProperty(ppt_name_and_info.first, &prop);
+ if (unknownPropertyNames.find(ppt_name_and_info.first) ==
+ unknownPropertyNames.end()) {
+ if (!res) {
+ fprintf(stderr, "Failed to get DB property: %s\n",
+ ppt_name_and_info.first.c_str());
+ thread->shared->SetVerificationFailure();
+ }
+ if (ppt_name_and_info.second.handle_int != nullptr) {
+ uint64_t prop_int;
+ if (!db_->GetIntProperty(ppt_name_and_info.first, &prop_int)) {
+ fprintf(stderr, "Failed to get Int property: %s\n",
+ ppt_name_and_info.first.c_str());
+ thread->shared->SetVerificationFailure();
+ }
+ }
+ if (ppt_name_and_info.second.handle_map != nullptr) {
+ std::map<std::string, std::string> prop_map;
+ if (!db_->GetMapProperty(ppt_name_and_info.first, &prop_map)) {
+ fprintf(stderr, "Failed to get Map property: %s\n",
+ ppt_name_and_info.first.c_str());
+ thread->shared->SetVerificationFailure();
+ }
+ }
+ }
+ }
+
+ ROCKSDB_NAMESPACE::ColumnFamilyMetaData cf_meta_data;
+ db_->GetColumnFamilyMetaData(&cf_meta_data);
+ int level_size = static_cast<int>(cf_meta_data.levels.size());
+ for (int level = 0; level < level_size; level++) {
+ for (const auto& ppt_name : levelPropertyNames) {
+ bool res = db_->GetProperty(ppt_name + std::to_string(level), &prop);
+ if (!res) {
+ fprintf(stderr, "Failed to get DB property: %s\n",
+ (ppt_name + std::to_string(level)).c_str());
+ thread->shared->SetVerificationFailure();
+ }
+ }
+ }
+
+ // Test for an invalid property name
+ if (thread->rand.OneIn(100)) {
+ if (db_->GetProperty("rocksdb.invalid_property_name", &prop)) {
+ fprintf(stderr, "Failed to return false for invalid property name\n");
+ thread->shared->SetVerificationFailure();
+ }
+ }
+}
+
+void StressTest::TestCompactFiles(ThreadState* thread,
+ ColumnFamilyHandle* column_family) {
+ ROCKSDB_NAMESPACE::ColumnFamilyMetaData cf_meta_data;
+ db_->GetColumnFamilyMetaData(column_family, &cf_meta_data);
+
+ if (cf_meta_data.levels.empty()) {
+ return;
+ }
+
+ // Randomly compact up to three consecutive files from a level
+ const int kMaxRetry = 3;
+ for (int attempt = 0; attempt < kMaxRetry; ++attempt) {
+ size_t random_level =
+ thread->rand.Uniform(static_cast<int>(cf_meta_data.levels.size()));
+
+ const auto& files = cf_meta_data.levels[random_level].files;
+ if (files.size() > 0) {
+ size_t random_file_index =
+ thread->rand.Uniform(static_cast<int>(files.size()));
+ if (files[random_file_index].being_compacted) {
+ // Retry as the selected file is currently being compacted
+ continue;
+ }
+
+ std::vector<std::string> input_files;
+ input_files.push_back(files[random_file_index].name);
+ if (random_file_index > 0 &&
+ !files[random_file_index - 1].being_compacted) {
+ input_files.push_back(files[random_file_index - 1].name);
+ }
+ if (random_file_index + 1 < files.size() &&
+ !files[random_file_index + 1].being_compacted) {
+ input_files.push_back(files[random_file_index + 1].name);
+ }
+
+ size_t output_level =
+ std::min(random_level + 1, cf_meta_data.levels.size() - 1);
+ auto s = db_->CompactFiles(CompactionOptions(), column_family,
+ input_files, static_cast<int>(output_level));
+ if (!s.ok()) {
+ fprintf(stdout, "Unable to perform CompactFiles(): %s\n",
+ s.ToString().c_str());
+ thread->stats.AddNumCompactFilesFailed(1);
+ } else {
+ thread->stats.AddNumCompactFilesSucceed(1);
+ }
+ break;
+ }
+ }
+}
+#endif // ROCKSDB_LITE
+
+Status StressTest::TestFlush(const std::vector<int>& rand_column_families) {
+ FlushOptions flush_opts;
+ if (FLAGS_atomic_flush) {
+ return db_->Flush(flush_opts, column_families_);
+ }
+ std::vector<ColumnFamilyHandle*> cfhs;
+ std::for_each(rand_column_families.begin(), rand_column_families.end(),
+ [this, &cfhs](int k) { cfhs.push_back(column_families_[k]); });
+ return db_->Flush(flush_opts, cfhs);
+}
+
+Status StressTest::TestPauseBackground(ThreadState* thread) {
+ Status status = db_->PauseBackgroundWork();
+ if (!status.ok()) {
+ return status;
+ }
+ // To avoid stalling/deadlocking ourself in this thread, just
+ // sleep here during pause and let other threads do db operations.
+ // Sleep up to ~16 seconds (2**24 microseconds), but very skewed
+ // toward short pause. (1 chance in 25 of pausing >= 1s;
+ // 1 chance in 625 of pausing full 16s.)
+ int pwr2_micros =
+ std::min(thread->rand.Uniform(25), thread->rand.Uniform(25));
+ clock_->SleepForMicroseconds(1 << pwr2_micros);
+ return db_->ContinueBackgroundWork();
+}
+
+void StressTest::TestAcquireSnapshot(ThreadState* thread,
+ int rand_column_family,
+ const std::string& keystr, uint64_t i) {
+ Slice key = keystr;
+ ColumnFamilyHandle* column_family = column_families_[rand_column_family];
+ // This `ReadOptions` is for validation purposes. Ignore
+ // `FLAGS_rate_limit_user_ops` to avoid slowing any validation.
+ ReadOptions ropt;
+#ifndef ROCKSDB_LITE
+ auto db_impl = static_cast_with_check<DBImpl>(db_->GetRootDB());
+ const bool ww_snapshot = thread->rand.OneIn(10);
+ const Snapshot* snapshot =
+ ww_snapshot ? db_impl->GetSnapshotForWriteConflictBoundary()
+ : db_->GetSnapshot();
+#else
+ const Snapshot* snapshot = db_->GetSnapshot();
+#endif // !ROCKSDB_LITE
+ ropt.snapshot = snapshot;
+
+ // Ideally, we want snapshot taking and timestamp generation to be atomic
+ // here, so that the snapshot corresponds to the timestamp. However, it is
+ // not possible with current GetSnapshot() API.
+ std::string ts_str;
+ Slice ts;
+ if (FLAGS_user_timestamp_size > 0) {
+ ts_str = GetNowNanos();
+ ts = ts_str;
+ ropt.timestamp = &ts;
+ }
+
+ std::string value_at;
+ // When taking a snapshot, we also read a key from that snapshot. We
+ // will later read the same key before releasing the snapshot and
+ // verify that the results are the same.
+ auto status_at = db_->Get(ropt, column_family, key, &value_at);
+ std::vector<bool>* key_vec = nullptr;
+
+ if (FLAGS_compare_full_db_state_snapshot && (thread->tid == 0)) {
+ key_vec = new std::vector<bool>(FLAGS_max_key);
+ // When `prefix_extractor` is set, seeking to beginning and scanning
+ // across prefixes are only supported with `total_order_seek` set.
+ ropt.total_order_seek = true;
+ std::unique_ptr<Iterator> iterator(db_->NewIterator(ropt));
+ for (iterator->SeekToFirst(); iterator->Valid(); iterator->Next()) {
+ uint64_t key_val;
+ if (GetIntVal(iterator->key().ToString(), &key_val)) {
+ (*key_vec)[key_val] = true;
+ }
+ }
+ }
+
+ ThreadState::SnapshotState snap_state = {snapshot,
+ rand_column_family,
+ column_family->GetName(),
+ keystr,
+ status_at,
+ value_at,
+ key_vec,
+ ts_str};
+ uint64_t hold_for = FLAGS_snapshot_hold_ops;
+ if (FLAGS_long_running_snapshots) {
+ // Hold 10% of snapshots for 10x more
+ if (thread->rand.OneIn(10)) {
+ assert(hold_for < std::numeric_limits<uint64_t>::max() / 10);
+ hold_for *= 10;
+ // Hold 1% of snapshots for 100x more
+ if (thread->rand.OneIn(10)) {
+ assert(hold_for < std::numeric_limits<uint64_t>::max() / 10);
+ hold_for *= 10;
+ }
+ }
+ }
+ uint64_t release_at = std::min(FLAGS_ops_per_thread - 1, i + hold_for);
+ thread->snapshot_queue.emplace(release_at, snap_state);
+}
+
+Status StressTest::MaybeReleaseSnapshots(ThreadState* thread, uint64_t i) {
+ while (!thread->snapshot_queue.empty() &&
+ i >= thread->snapshot_queue.front().first) {
+ auto snap_state = thread->snapshot_queue.front().second;
+ assert(snap_state.snapshot);
+ // Note: this is unsafe as the cf might be dropped concurrently. But
+ // it is ok since unclean cf drop is cunnrently not supported by write
+ // prepared transactions.
+ Status s = AssertSame(db_, column_families_[snap_state.cf_at], snap_state);
+ db_->ReleaseSnapshot(snap_state.snapshot);
+ delete snap_state.key_vec;
+ thread->snapshot_queue.pop();
+ if (!s.ok()) {
+ return s;
+ }
+ }
+ return Status::OK();
+}
+
+void StressTest::TestCompactRange(ThreadState* thread, int64_t rand_key,
+ const Slice& start_key,
+ ColumnFamilyHandle* column_family) {
+ int64_t end_key_num;
+ if (std::numeric_limits<int64_t>::max() - rand_key <
+ FLAGS_compact_range_width) {
+ end_key_num = std::numeric_limits<int64_t>::max();
+ } else {
+ end_key_num = FLAGS_compact_range_width + rand_key;
+ }
+ std::string end_key_buf = Key(end_key_num);
+ Slice end_key(end_key_buf);
+
+ CompactRangeOptions cro;
+ cro.exclusive_manual_compaction = static_cast<bool>(thread->rand.Next() % 2);
+ cro.change_level = static_cast<bool>(thread->rand.Next() % 2);
+ std::vector<BottommostLevelCompaction> bottom_level_styles = {
+ BottommostLevelCompaction::kSkip,
+ BottommostLevelCompaction::kIfHaveCompactionFilter,
+ BottommostLevelCompaction::kForce,
+ BottommostLevelCompaction::kForceOptimized};
+ cro.bottommost_level_compaction =
+ bottom_level_styles[thread->rand.Next() %
+ static_cast<uint32_t>(bottom_level_styles.size())];
+ cro.allow_write_stall = static_cast<bool>(thread->rand.Next() % 2);
+ cro.max_subcompactions = static_cast<uint32_t>(thread->rand.Next() % 4);
+ std::vector<BlobGarbageCollectionPolicy> blob_gc_policies = {
+ BlobGarbageCollectionPolicy::kForce,
+ BlobGarbageCollectionPolicy::kDisable,
+ BlobGarbageCollectionPolicy::kUseDefault};
+ cro.blob_garbage_collection_policy =
+ blob_gc_policies[thread->rand.Next() %
+ static_cast<uint32_t>(blob_gc_policies.size())];
+ cro.blob_garbage_collection_age_cutoff =
+ static_cast<double>(thread->rand.Next() % 100) / 100.0;
+
+ const Snapshot* pre_snapshot = nullptr;
+ uint32_t pre_hash = 0;
+ if (thread->rand.OneIn(2)) {
+ // Do some validation by declaring a snapshot and compare the data before
+ // and after the compaction
+ pre_snapshot = db_->GetSnapshot();
+ pre_hash =
+ GetRangeHash(thread, pre_snapshot, column_family, start_key, end_key);
+ }
+
+ Status status = db_->CompactRange(cro, column_family, &start_key, &end_key);
+
+ if (!status.ok()) {
+ fprintf(stdout, "Unable to perform CompactRange(): %s\n",
+ status.ToString().c_str());
+ }
+
+ if (pre_snapshot != nullptr) {
+ uint32_t post_hash =
+ GetRangeHash(thread, pre_snapshot, column_family, start_key, end_key);
+ if (pre_hash != post_hash) {
+ fprintf(stderr,
+ "Data hash different before and after compact range "
+ "start_key %s end_key %s\n",
+ start_key.ToString(true).c_str(), end_key.ToString(true).c_str());
+ thread->stats.AddErrors(1);
+ // Fail fast to preserve the DB state.
+ thread->shared->SetVerificationFailure();
+ }
+ db_->ReleaseSnapshot(pre_snapshot);
+ }
+}
+
+uint32_t StressTest::GetRangeHash(ThreadState* thread, const Snapshot* snapshot,
+ ColumnFamilyHandle* column_family,
+ const Slice& start_key,
+ const Slice& end_key) {
+ // This `ReadOptions` is for validation purposes. Ignore
+ // `FLAGS_rate_limit_user_ops` to avoid slowing any validation.
+ ReadOptions ro;
+ ro.snapshot = snapshot;
+ ro.total_order_seek = true;
+ std::string ts_str;
+ Slice ts;
+ if (FLAGS_user_timestamp_size > 0) {
+ ts_str = GetNowNanos();
+ ts = ts_str;
+ ro.timestamp = &ts;
+ }
+
+ std::unique_ptr<Iterator> it(db_->NewIterator(ro, column_family));
+
+ constexpr char kCrcCalculatorSepearator = ';';
+
+ uint32_t crc = 0;
+
+ for (it->Seek(start_key);
+ it->Valid() && options_.comparator->Compare(it->key(), end_key) <= 0;
+ it->Next()) {
+ crc = crc32c::Extend(crc, it->key().data(), it->key().size());
+ crc = crc32c::Extend(crc, &kCrcCalculatorSepearator, sizeof(char));
+ crc = crc32c::Extend(crc, it->value().data(), it->value().size());
+ crc = crc32c::Extend(crc, &kCrcCalculatorSepearator, sizeof(char));
+
+ for (const auto& column : it->columns()) {
+ crc = crc32c::Extend(crc, column.name().data(), column.name().size());
+ crc = crc32c::Extend(crc, &kCrcCalculatorSepearator, sizeof(char));
+ crc = crc32c::Extend(crc, column.value().data(), column.value().size());
+ crc = crc32c::Extend(crc, &kCrcCalculatorSepearator, sizeof(char));
+ }
+ }
+
+ if (!it->status().ok()) {
+ fprintf(stderr, "Iterator non-OK when calculating range CRC: %s\n",
+ it->status().ToString().c_str());
+ thread->stats.AddErrors(1);
+ // Fail fast to preserve the DB state.
+ thread->shared->SetVerificationFailure();
+ }
+
+ return crc;
+}
+
+void StressTest::PrintEnv() const {
+ fprintf(stdout, "RocksDB version : %d.%d\n", kMajorVersion,
+ kMinorVersion);
+ fprintf(stdout, "Format version : %d\n", FLAGS_format_version);
+ fprintf(stdout, "TransactionDB : %s\n",
+ FLAGS_use_txn ? "true" : "false");
+
+ if (FLAGS_use_txn) {
+#ifndef ROCKSDB_LITE
+ fprintf(stdout, "Two write queues: : %s\n",
+ FLAGS_two_write_queues ? "true" : "false");
+ fprintf(stdout, "Write policy : %d\n",
+ static_cast<int>(FLAGS_txn_write_policy));
+ if (static_cast<uint64_t>(TxnDBWritePolicy::WRITE_PREPARED) ==
+ FLAGS_txn_write_policy ||
+ static_cast<uint64_t>(TxnDBWritePolicy::WRITE_UNPREPARED) ==
+ FLAGS_txn_write_policy) {
+ fprintf(stdout, "Snapshot cache bits : %d\n",
+ static_cast<int>(FLAGS_wp_snapshot_cache_bits));
+ fprintf(stdout, "Commit cache bits : %d\n",
+ static_cast<int>(FLAGS_wp_commit_cache_bits));
+ }
+ fprintf(stdout, "last cwb for recovery : %s\n",
+ FLAGS_use_only_the_last_commit_time_batch_for_recovery ? "true"
+ : "false");
+#endif // !ROCKSDB_LITE
+ }
+
+#ifndef ROCKSDB_LITE
+ fprintf(stdout, "Stacked BlobDB : %s\n",
+ FLAGS_use_blob_db ? "true" : "false");
+#endif // !ROCKSDB_LITE
+ fprintf(stdout, "Read only mode : %s\n",
+ FLAGS_read_only ? "true" : "false");
+ fprintf(stdout, "Atomic flush : %s\n",
+ FLAGS_atomic_flush ? "true" : "false");
+ fprintf(stdout, "Manual WAL flush : %s\n",
+ FLAGS_manual_wal_flush_one_in > 0 ? "true" : "false");
+ fprintf(stdout, "Column families : %d\n", FLAGS_column_families);
+ if (!FLAGS_test_batches_snapshots) {
+ fprintf(stdout, "Clear CFs one in : %d\n",
+ FLAGS_clear_column_family_one_in);
+ }
+ fprintf(stdout, "Number of threads : %d\n", FLAGS_threads);
+ fprintf(stdout, "Ops per thread : %lu\n",
+ (unsigned long)FLAGS_ops_per_thread);
+ std::string ttl_state("unused");
+ if (FLAGS_ttl > 0) {
+ ttl_state = std::to_string(FLAGS_ttl);
+ }
+ fprintf(stdout, "Time to live(sec) : %s\n", ttl_state.c_str());
+ fprintf(stdout, "Read percentage : %d%%\n", FLAGS_readpercent);
+ fprintf(stdout, "Prefix percentage : %d%%\n", FLAGS_prefixpercent);
+ fprintf(stdout, "Write percentage : %d%%\n", FLAGS_writepercent);
+ fprintf(stdout, "Delete percentage : %d%%\n", FLAGS_delpercent);
+ fprintf(stdout, "Delete range percentage : %d%%\n", FLAGS_delrangepercent);
+ fprintf(stdout, "No overwrite percentage : %d%%\n",
+ FLAGS_nooverwritepercent);
+ fprintf(stdout, "Iterate percentage : %d%%\n", FLAGS_iterpercent);
+ fprintf(stdout, "Custom ops percentage : %d%%\n", FLAGS_customopspercent);
+ fprintf(stdout, "DB-write-buffer-size : %" PRIu64 "\n",
+ FLAGS_db_write_buffer_size);
+ fprintf(stdout, "Write-buffer-size : %d\n", FLAGS_write_buffer_size);
+ fprintf(stdout, "Iterations : %lu\n",
+ (unsigned long)FLAGS_num_iterations);
+ fprintf(stdout, "Max key : %lu\n",
+ (unsigned long)FLAGS_max_key);
+ fprintf(stdout, "Ratio #ops/#keys : %f\n",
+ (1.0 * FLAGS_ops_per_thread * FLAGS_threads) / FLAGS_max_key);
+ fprintf(stdout, "Num times DB reopens : %d\n", FLAGS_reopen);
+ fprintf(stdout, "Batches/snapshots : %d\n",
+ FLAGS_test_batches_snapshots);
+ fprintf(stdout, "Do update in place : %d\n", FLAGS_in_place_update);
+ fprintf(stdout, "Num keys per lock : %d\n",
+ 1 << FLAGS_log2_keys_per_lock);
+ std::string compression = CompressionTypeToString(compression_type_e);
+ fprintf(stdout, "Compression : %s\n", compression.c_str());
+ std::string bottommost_compression =
+ CompressionTypeToString(bottommost_compression_type_e);
+ fprintf(stdout, "Bottommost Compression : %s\n",
+ bottommost_compression.c_str());
+ std::string checksum = ChecksumTypeToString(checksum_type_e);
+ fprintf(stdout, "Checksum type : %s\n", checksum.c_str());
+ fprintf(stdout, "File checksum impl : %s\n",
+ FLAGS_file_checksum_impl.c_str());
+ fprintf(stdout, "Bloom bits / key : %s\n",
+ FormatDoubleParam(FLAGS_bloom_bits).c_str());
+ fprintf(stdout, "Max subcompactions : %" PRIu64 "\n",
+ FLAGS_subcompactions);
+ fprintf(stdout, "Use MultiGet : %s\n",
+ FLAGS_use_multiget ? "true" : "false");
+
+ const char* memtablerep = "";
+ switch (FLAGS_rep_factory) {
+ case kSkipList:
+ memtablerep = "skip_list";
+ break;
+ case kHashSkipList:
+ memtablerep = "prefix_hash";
+ break;
+ case kVectorRep:
+ memtablerep = "vector";
+ break;
+ }
+
+ fprintf(stdout, "Memtablerep : %s\n", memtablerep);
+
+#ifndef NDEBUG
+ KillPoint* kp = KillPoint::GetInstance();
+ fprintf(stdout, "Test kill odd : %d\n", kp->rocksdb_kill_odds);
+ if (!kp->rocksdb_kill_exclude_prefixes.empty()) {
+ fprintf(stdout, "Skipping kill points prefixes:\n");
+ for (auto& p : kp->rocksdb_kill_exclude_prefixes) {
+ fprintf(stdout, " %s\n", p.c_str());
+ }
+ }
+#endif
+ fprintf(stdout, "Periodic Compaction Secs : %" PRIu64 "\n",
+ FLAGS_periodic_compaction_seconds);
+ fprintf(stdout, "Compaction TTL : %" PRIu64 "\n",
+ FLAGS_compaction_ttl);
+ const char* compaction_pri = "";
+ switch (FLAGS_compaction_pri) {
+ case kByCompensatedSize:
+ compaction_pri = "kByCompensatedSize";
+ break;
+ case kOldestLargestSeqFirst:
+ compaction_pri = "kOldestLargestSeqFirst";
+ break;
+ case kOldestSmallestSeqFirst:
+ compaction_pri = "kOldestSmallestSeqFirst";
+ break;
+ case kMinOverlappingRatio:
+ compaction_pri = "kMinOverlappingRatio";
+ break;
+ case kRoundRobin:
+ compaction_pri = "kRoundRobin";
+ break;
+ }
+ fprintf(stdout, "Compaction Pri : %s\n", compaction_pri);
+ fprintf(stdout, "Background Purge : %d\n",
+ static_cast<int>(FLAGS_avoid_unnecessary_blocking_io));
+ fprintf(stdout, "Write DB ID to manifest : %d\n",
+ static_cast<int>(FLAGS_write_dbid_to_manifest));
+ fprintf(stdout, "Max Write Batch Group Size: %" PRIu64 "\n",
+ FLAGS_max_write_batch_group_size_bytes);
+ fprintf(stdout, "Use dynamic level : %d\n",
+ static_cast<int>(FLAGS_level_compaction_dynamic_level_bytes));
+ fprintf(stdout, "Read fault one in : %d\n", FLAGS_read_fault_one_in);
+ fprintf(stdout, "Write fault one in : %d\n", FLAGS_write_fault_one_in);
+ fprintf(stdout, "Open metadata write fault one in:\n");
+ fprintf(stdout, " %d\n",
+ FLAGS_open_metadata_write_fault_one_in);
+ fprintf(stdout, "Sync fault injection : %d\n",
+ FLAGS_sync_fault_injection);
+ fprintf(stdout, "Best efforts recovery : %d\n",
+ static_cast<int>(FLAGS_best_efforts_recovery));
+ fprintf(stdout, "Fail if OPTIONS file error: %d\n",
+ static_cast<int>(FLAGS_fail_if_options_file_error));
+ fprintf(stdout, "User timestamp size bytes : %d\n",
+ static_cast<int>(FLAGS_user_timestamp_size));
+ fprintf(stdout, "WAL compression : %s\n",
+ FLAGS_wal_compression.c_str());
+ fprintf(stdout, "Try verify sst unique id : %d\n",
+ static_cast<int>(FLAGS_verify_sst_unique_id_in_manifest));
+
+ fprintf(stdout, "------------------------------------------------\n");
+}
+
+void StressTest::Open(SharedState* shared) {
+ assert(db_ == nullptr);
+#ifndef ROCKSDB_LITE
+ assert(txn_db_ == nullptr);
+#else
+ (void)shared;
+#endif
+ if (!InitializeOptionsFromFile(options_)) {
+ InitializeOptionsFromFlags(cache_, compressed_cache_, filter_policy_,
+ options_);
+ }
+ InitializeOptionsGeneral(cache_, compressed_cache_, filter_policy_, options_);
+
+ if (FLAGS_prefix_size == 0 && FLAGS_rep_factory == kHashSkipList) {
+ fprintf(stderr,
+ "prefeix_size cannot be zero if memtablerep == prefix_hash\n");
+ exit(1);
+ }
+ if (FLAGS_prefix_size != 0 && FLAGS_rep_factory != kHashSkipList) {
+ fprintf(stderr,
+ "WARNING: prefix_size is non-zero but "
+ "memtablerep != prefix_hash\n");
+ }
+
+ if ((options_.enable_blob_files || options_.enable_blob_garbage_collection ||
+ FLAGS_allow_setting_blob_options_dynamically) &&
+ FLAGS_best_efforts_recovery) {
+ fprintf(stderr,
+ "Integrated BlobDB is currently incompatible with best-effort "
+ "recovery\n");
+ exit(1);
+ }
+
+ fprintf(stdout,
+ "Integrated BlobDB: blob files enabled %d, min blob size %" PRIu64
+ ", blob file size %" PRIu64
+ ", blob compression type %s, blob GC enabled %d, cutoff %f, force "
+ "threshold %f, blob compaction readahead size %" PRIu64
+ ", blob file starting level %d\n",
+ options_.enable_blob_files, options_.min_blob_size,
+ options_.blob_file_size,
+ CompressionTypeToString(options_.blob_compression_type).c_str(),
+ options_.enable_blob_garbage_collection,
+ options_.blob_garbage_collection_age_cutoff,
+ options_.blob_garbage_collection_force_threshold,
+ options_.blob_compaction_readahead_size,
+ options_.blob_file_starting_level);
+
+ if (FLAGS_use_blob_cache) {
+ fprintf(stdout,
+ "Integrated BlobDB: blob cache enabled"
+ ", block and blob caches shared: %d",
+ FLAGS_use_shared_block_and_blob_cache);
+ if (!FLAGS_use_shared_block_and_blob_cache) {
+ fprintf(stdout,
+ ", blob cache size %" PRIu64 ", blob cache num shard bits: %d",
+ FLAGS_blob_cache_size, FLAGS_blob_cache_numshardbits);
+ }
+ fprintf(stdout, ", blob cache prepopulated: %d\n",
+ FLAGS_prepopulate_blob_cache);
+ } else {
+ fprintf(stdout, "Integrated BlobDB: blob cache disabled\n");
+ }
+
+ fprintf(stdout, "DB path: [%s]\n", FLAGS_db.c_str());
+
+ Status s;
+
+ if (FLAGS_ttl == -1) {
+ std::vector<std::string> existing_column_families;
+ s = DB::ListColumnFamilies(DBOptions(options_), FLAGS_db,
+ &existing_column_families); // ignore errors
+ if (!s.ok()) {
+ // DB doesn't exist
+ assert(existing_column_families.empty());
+ assert(column_family_names_.empty());
+ column_family_names_.push_back(kDefaultColumnFamilyName);
+ } else if (column_family_names_.empty()) {
+ // this is the first call to the function Open()
+ column_family_names_ = existing_column_families;
+ } else {
+ // this is a reopen. just assert that existing column_family_names are
+ // equivalent to what we remember
+ auto sorted_cfn = column_family_names_;
+ std::sort(sorted_cfn.begin(), sorted_cfn.end());
+ std::sort(existing_column_families.begin(),
+ existing_column_families.end());
+ if (sorted_cfn != existing_column_families) {
+ fprintf(stderr, "Expected column families differ from the existing:\n");
+ fprintf(stderr, "Expected: {");
+ for (auto cf : sorted_cfn) {
+ fprintf(stderr, "%s ", cf.c_str());
+ }
+ fprintf(stderr, "}\n");
+ fprintf(stderr, "Existing: {");
+ for (auto cf : existing_column_families) {
+ fprintf(stderr, "%s ", cf.c_str());
+ }
+ fprintf(stderr, "}\n");
+ }
+ assert(sorted_cfn == existing_column_families);
+ }
+ std::vector<ColumnFamilyDescriptor> cf_descriptors;
+ for (auto name : column_family_names_) {
+ if (name != kDefaultColumnFamilyName) {
+ new_column_family_name_ =
+ std::max(new_column_family_name_.load(), std::stoi(name) + 1);
+ }
+ cf_descriptors.emplace_back(name, ColumnFamilyOptions(options_));
+ }
+ while (cf_descriptors.size() < (size_t)FLAGS_column_families) {
+ std::string name = std::to_string(new_column_family_name_.load());
+ new_column_family_name_++;
+ cf_descriptors.emplace_back(name, ColumnFamilyOptions(options_));
+ column_family_names_.push_back(name);
+ }
+
+ options_.listeners.clear();
+#ifndef ROCKSDB_LITE
+ options_.listeners.emplace_back(new DbStressListener(
+ FLAGS_db, options_.db_paths, cf_descriptors, db_stress_listener_env));
+#endif // !ROCKSDB_LITE
+ RegisterAdditionalListeners();
+
+ if (!FLAGS_use_txn) {
+ // Determine whether we need to ingest file metadata write failures
+ // during DB reopen. If it does, enable it.
+ // Only ingest metadata error if it is reopening, as initial open
+ // failure doesn't need to be handled.
+ // TODO cover transaction DB is not covered in this fault test too.
+ bool ingest_meta_error = false;
+ bool ingest_write_error = false;
+ bool ingest_read_error = false;
+ if ((FLAGS_open_metadata_write_fault_one_in ||
+ FLAGS_open_write_fault_one_in || FLAGS_open_read_fault_one_in) &&
+ fault_fs_guard
+ ->FileExists(FLAGS_db + "/CURRENT", IOOptions(), nullptr)
+ .ok()) {
+ if (!FLAGS_sync) {
+ // When DB Stress is not sync mode, we expect all WAL writes to
+ // WAL is durable. Buffering unsynced writes will cause false
+ // positive in crash tests. Before we figure out a way to
+ // solve it, skip WAL from failure injection.
+ fault_fs_guard->SetSkipDirectWritableTypes({kWalFile});
+ }
+ ingest_meta_error = FLAGS_open_metadata_write_fault_one_in;
+ ingest_write_error = FLAGS_open_write_fault_one_in;
+ ingest_read_error = FLAGS_open_read_fault_one_in;
+ if (ingest_meta_error) {
+ fault_fs_guard->EnableMetadataWriteErrorInjection();
+ fault_fs_guard->SetRandomMetadataWriteError(
+ FLAGS_open_metadata_write_fault_one_in);
+ }
+ if (ingest_write_error) {
+ fault_fs_guard->SetFilesystemDirectWritable(false);
+ fault_fs_guard->EnableWriteErrorInjection();
+ fault_fs_guard->SetRandomWriteError(
+ static_cast<uint32_t>(FLAGS_seed), FLAGS_open_write_fault_one_in,
+ IOStatus::IOError("Injected Open Error"),
+ /*inject_for_all_file_types=*/true, /*types=*/{});
+ }
+ if (ingest_read_error) {
+ fault_fs_guard->SetRandomReadError(FLAGS_open_read_fault_one_in);
+ }
+ }
+ while (true) {
+#ifndef ROCKSDB_LITE
+ // StackableDB-based BlobDB
+ if (FLAGS_use_blob_db) {
+ blob_db::BlobDBOptions blob_db_options;
+ blob_db_options.min_blob_size = FLAGS_blob_db_min_blob_size;
+ blob_db_options.bytes_per_sync = FLAGS_blob_db_bytes_per_sync;
+ blob_db_options.blob_file_size = FLAGS_blob_db_file_size;
+ blob_db_options.enable_garbage_collection = FLAGS_blob_db_enable_gc;
+ blob_db_options.garbage_collection_cutoff = FLAGS_blob_db_gc_cutoff;
+
+ blob_db::BlobDB* blob_db = nullptr;
+ s = blob_db::BlobDB::Open(options_, blob_db_options, FLAGS_db,
+ cf_descriptors, &column_families_,
+ &blob_db);
+ if (s.ok()) {
+ db_ = blob_db;
+ }
+ } else
+#endif // !ROCKSDB_LITE
+ {
+ if (db_preload_finished_.load() && FLAGS_read_only) {
+ s = DB::OpenForReadOnly(DBOptions(options_), FLAGS_db,
+ cf_descriptors, &column_families_, &db_);
+ } else {
+ s = DB::Open(DBOptions(options_), FLAGS_db, cf_descriptors,
+ &column_families_, &db_);
+ }
+ }
+
+ if (ingest_meta_error || ingest_write_error || ingest_read_error) {
+ fault_fs_guard->SetFilesystemDirectWritable(true);
+ fault_fs_guard->DisableMetadataWriteErrorInjection();
+ fault_fs_guard->DisableWriteErrorInjection();
+ fault_fs_guard->SetSkipDirectWritableTypes({});
+ fault_fs_guard->SetRandomReadError(0);
+ if (s.ok()) {
+ // Ingested errors might happen in background compactions. We
+ // wait for all compactions to finish to make sure DB is in
+ // clean state before executing queries.
+ s = static_cast_with_check<DBImpl>(db_->GetRootDB())
+ ->WaitForCompact(true /* wait_unscheduled */);
+ if (!s.ok()) {
+ for (auto cf : column_families_) {
+ delete cf;
+ }
+ column_families_.clear();
+ delete db_;
+ db_ = nullptr;
+ }
+ }
+ if (!s.ok()) {
+ // After failure to opening a DB due to IO error, retry should
+ // successfully open the DB with correct data if no IO error shows
+ // up.
+ ingest_meta_error = false;
+ ingest_write_error = false;
+ ingest_read_error = false;
+
+ Random rand(static_cast<uint32_t>(FLAGS_seed));
+ if (rand.OneIn(2)) {
+ fault_fs_guard->DeleteFilesCreatedAfterLastDirSync(IOOptions(),
+ nullptr);
+ }
+ if (rand.OneIn(3)) {
+ fault_fs_guard->DropUnsyncedFileData();
+ } else if (rand.OneIn(2)) {
+ fault_fs_guard->DropRandomUnsyncedFileData(&rand);
+ }
+ continue;
+ }
+ }
+ break;
+ }
+ } else {
+#ifndef ROCKSDB_LITE
+ TransactionDBOptions txn_db_options;
+ assert(FLAGS_txn_write_policy <= TxnDBWritePolicy::WRITE_UNPREPARED);
+ txn_db_options.write_policy =
+ static_cast<TxnDBWritePolicy>(FLAGS_txn_write_policy);
+ if (FLAGS_unordered_write) {
+ assert(txn_db_options.write_policy == TxnDBWritePolicy::WRITE_PREPARED);
+ options_.unordered_write = true;
+ options_.two_write_queues = true;
+ txn_db_options.skip_concurrency_control = true;
+ } else {
+ options_.two_write_queues = FLAGS_two_write_queues;
+ }
+ txn_db_options.wp_snapshot_cache_bits =
+ static_cast<size_t>(FLAGS_wp_snapshot_cache_bits);
+ txn_db_options.wp_commit_cache_bits =
+ static_cast<size_t>(FLAGS_wp_commit_cache_bits);
+ PrepareTxnDbOptions(shared, txn_db_options);
+ s = TransactionDB::Open(options_, txn_db_options, FLAGS_db,
+ cf_descriptors, &column_families_, &txn_db_);
+ if (!s.ok()) {
+ fprintf(stderr, "Error in opening the TransactionDB [%s]\n",
+ s.ToString().c_str());
+ fflush(stderr);
+ }
+ assert(s.ok());
+
+ // Do not swap the order of the following.
+ {
+ db_ = txn_db_;
+ db_aptr_.store(txn_db_, std::memory_order_release);
+ }
+#endif
+ }
+ if (!s.ok()) {
+ fprintf(stderr, "Error in opening the DB [%s]\n", s.ToString().c_str());
+ fflush(stderr);
+ }
+ assert(s.ok());
+ assert(column_families_.size() ==
+ static_cast<size_t>(FLAGS_column_families));
+
+ // Secondary instance does not support write-prepared/write-unprepared
+ // transactions, thus just disable secondary instance if we use
+ // transaction.
+ if (s.ok() && FLAGS_test_secondary && !FLAGS_use_txn) {
+#ifndef ROCKSDB_LITE
+ Options tmp_opts;
+ // TODO(yanqin) support max_open_files != -1 for secondary instance.
+ tmp_opts.max_open_files = -1;
+ tmp_opts.env = db_stress_env;
+ const std::string& secondary_path = FLAGS_secondaries_base;
+ s = DB::OpenAsSecondary(tmp_opts, FLAGS_db, secondary_path,
+ cf_descriptors, &cmp_cfhs_, &cmp_db_);
+ assert(s.ok());
+ assert(cmp_cfhs_.size() == static_cast<size_t>(FLAGS_column_families));
+#else
+ fprintf(stderr, "Secondary is not supported in RocksDBLite\n");
+ exit(1);
+#endif // !ROCKSDB_LITE
+ }
+ } else {
+#ifndef ROCKSDB_LITE
+ DBWithTTL* db_with_ttl;
+ s = DBWithTTL::Open(options_, FLAGS_db, &db_with_ttl, FLAGS_ttl);
+ db_ = db_with_ttl;
+#else
+ fprintf(stderr, "TTL is not supported in RocksDBLite\n");
+ exit(1);
+#endif
+ }
+
+ if (FLAGS_preserve_unverified_changes) {
+ // Up until now, no live file should have become obsolete due to these
+ // options. After `DisableFileDeletions()` we can reenable auto compactions
+ // since, even if live files become obsolete, they won't be deleted.
+ assert(options_.avoid_flush_during_recovery);
+ assert(options_.disable_auto_compactions);
+ if (s.ok()) {
+ s = db_->DisableFileDeletions();
+ }
+ if (s.ok()) {
+ s = db_->EnableAutoCompaction(column_families_);
+ }
+ }
+
+ if (!s.ok()) {
+ fprintf(stderr, "open error: %s\n", s.ToString().c_str());
+ exit(1);
+ }
+}
+
+void StressTest::Reopen(ThreadState* thread) {
+#ifndef ROCKSDB_LITE
+ // BG jobs in WritePrepared must be canceled first because i) they can access
+ // the db via a callbac ii) they hold on to a snapshot and the upcoming
+ // ::Close would complain about it.
+ const bool write_prepared = FLAGS_use_txn && FLAGS_txn_write_policy != 0;
+ bool bg_canceled __attribute__((unused)) = false;
+ if (write_prepared || thread->rand.OneIn(2)) {
+ const bool wait =
+ write_prepared || static_cast<bool>(thread->rand.OneIn(2));
+ CancelAllBackgroundWork(db_, wait);
+ bg_canceled = wait;
+ }
+ assert(!write_prepared || bg_canceled);
+#else
+ (void)thread;
+#endif
+
+ for (auto cf : column_families_) {
+ delete cf;
+ }
+ column_families_.clear();
+
+#ifndef ROCKSDB_LITE
+ if (thread->rand.OneIn(2)) {
+ Status s = db_->Close();
+ if (!s.ok()) {
+ fprintf(stderr, "Non-ok close status: %s\n", s.ToString().c_str());
+ fflush(stderr);
+ }
+ assert(s.ok());
+ }
+#endif
+ delete db_;
+ db_ = nullptr;
+#ifndef ROCKSDB_LITE
+ txn_db_ = nullptr;
+#endif
+
+ num_times_reopened_++;
+ auto now = clock_->NowMicros();
+ fprintf(stdout, "%s Reopening database for the %dth time\n",
+ clock_->TimeToString(now / 1000000).c_str(), num_times_reopened_);
+ Open(thread->shared);
+
+ if ((FLAGS_sync_fault_injection || FLAGS_disable_wal ||
+ FLAGS_manual_wal_flush_one_in > 0) &&
+ IsStateTracked()) {
+ Status s = thread->shared->SaveAtAndAfter(db_);
+ if (!s.ok()) {
+ fprintf(stderr, "Error enabling history tracing: %s\n",
+ s.ToString().c_str());
+ exit(1);
+ }
+ }
+}
+
+bool StressTest::MaybeUseOlderTimestampForPointLookup(ThreadState* thread,
+ std::string& ts_str,
+ Slice& ts_slice,
+ ReadOptions& read_opts) {
+ if (FLAGS_user_timestamp_size == 0) {
+ return false;
+ }
+
+ assert(thread);
+ if (!thread->rand.OneInOpt(3)) {
+ return false;
+ }
+
+ const SharedState* const shared = thread->shared;
+ assert(shared);
+ const uint64_t start_ts = shared->GetStartTimestamp();
+
+ uint64_t now = db_stress_env->NowNanos();
+
+ assert(now > start_ts);
+ uint64_t time_diff = now - start_ts;
+ uint64_t ts = start_ts + (thread->rand.Next64() % time_diff);
+ ts_str.clear();
+ PutFixed64(&ts_str, ts);
+ ts_slice = ts_str;
+ read_opts.timestamp = &ts_slice;
+ return true;
+}
+
+void StressTest::MaybeUseOlderTimestampForRangeScan(ThreadState* thread,
+ std::string& ts_str,
+ Slice& ts_slice,
+ ReadOptions& read_opts) {
+ if (FLAGS_user_timestamp_size == 0) {
+ return;
+ }
+
+ assert(thread);
+ if (!thread->rand.OneInOpt(3)) {
+ return;
+ }
+
+ const Slice* const saved_ts = read_opts.timestamp;
+ assert(saved_ts != nullptr);
+
+ const SharedState* const shared = thread->shared;
+ assert(shared);
+ const uint64_t start_ts = shared->GetStartTimestamp();
+
+ uint64_t now = db_stress_env->NowNanos();
+
+ assert(now > start_ts);
+ uint64_t time_diff = now - start_ts;
+ uint64_t ts = start_ts + (thread->rand.Next64() % time_diff);
+ ts_str.clear();
+ PutFixed64(&ts_str, ts);
+ ts_slice = ts_str;
+ read_opts.timestamp = &ts_slice;
+
+ // TODO (yanqin): support Merge with iter_start_ts
+ if (!thread->rand.OneInOpt(3) || FLAGS_use_merge || FLAGS_use_full_merge_v1) {
+ return;
+ }
+
+ ts_str.clear();
+ PutFixed64(&ts_str, start_ts);
+ ts_slice = ts_str;
+ read_opts.iter_start_ts = &ts_slice;
+ read_opts.timestamp = saved_ts;
+}
+
+void CheckAndSetOptionsForUserTimestamp(Options& options) {
+ assert(FLAGS_user_timestamp_size > 0);
+ const Comparator* const cmp = test::BytewiseComparatorWithU64TsWrapper();
+ assert(cmp);
+ if (FLAGS_user_timestamp_size != cmp->timestamp_size()) {
+ fprintf(stderr,
+ "Only -user_timestamp_size=%d is supported in stress test.\n",
+ static_cast<int>(cmp->timestamp_size()));
+ exit(1);
+ }
+ if (FLAGS_use_txn) {
+ fprintf(stderr, "TransactionDB does not support timestamp yet.\n");
+ exit(1);
+ }
+#ifndef ROCKSDB_LITE
+ if (FLAGS_enable_blob_files || FLAGS_use_blob_db) {
+ fprintf(stderr, "BlobDB not supported with timestamp.\n");
+ exit(1);
+ }
+#endif // !ROCKSDB_LITE
+ if (FLAGS_test_cf_consistency || FLAGS_test_batches_snapshots) {
+ fprintf(stderr,
+ "Due to per-key ts-seq ordering constraint, only the (default) "
+ "non-batched test is supported with timestamp.\n");
+ exit(1);
+ }
+ if (FLAGS_ingest_external_file_one_in > 0) {
+ fprintf(stderr, "Bulk loading may not support timestamp yet.\n");
+ exit(1);
+ }
+ options.comparator = cmp;
+}
+
+bool InitializeOptionsFromFile(Options& options) {
+#ifndef ROCKSDB_LITE
+ DBOptions db_options;
+ std::vector<ColumnFamilyDescriptor> cf_descriptors;
+ if (!FLAGS_options_file.empty()) {
+ Status s = LoadOptionsFromFile(FLAGS_options_file, db_stress_env,
+ &db_options, &cf_descriptors);
+ if (!s.ok()) {
+ fprintf(stderr, "Unable to load options file %s --- %s\n",
+ FLAGS_options_file.c_str(), s.ToString().c_str());
+ exit(1);
+ }
+ db_options.env = new DbStressEnvWrapper(db_stress_env);
+ options = Options(db_options, cf_descriptors[0].options);
+ return true;
+ }
+#else
+ (void)options;
+ fprintf(stderr, "--options_file not supported in lite mode\n");
+ exit(1);
+#endif //! ROCKSDB_LITE
+ return false;
+}
+
+void InitializeOptionsFromFlags(
+ const std::shared_ptr<Cache>& cache,
+ const std::shared_ptr<Cache>& block_cache_compressed,
+ const std::shared_ptr<const FilterPolicy>& filter_policy,
+ Options& options) {
+ BlockBasedTableOptions block_based_options;
+ block_based_options.block_cache = cache;
+ block_based_options.cache_index_and_filter_blocks =
+ FLAGS_cache_index_and_filter_blocks;
+ block_based_options.metadata_cache_options.top_level_index_pinning =
+ static_cast<PinningTier>(FLAGS_top_level_index_pinning);
+ block_based_options.metadata_cache_options.partition_pinning =
+ static_cast<PinningTier>(FLAGS_partition_pinning);
+ block_based_options.metadata_cache_options.unpartitioned_pinning =
+ static_cast<PinningTier>(FLAGS_unpartitioned_pinning);
+ block_based_options.block_cache_compressed = block_cache_compressed;
+ block_based_options.checksum = checksum_type_e;
+ block_based_options.block_size = FLAGS_block_size;
+ block_based_options.cache_usage_options.options_overrides.insert(
+ {CacheEntryRole::kCompressionDictionaryBuildingBuffer,
+ {/*.charged = */ FLAGS_charge_compression_dictionary_building_buffer
+ ? CacheEntryRoleOptions::Decision::kEnabled
+ : CacheEntryRoleOptions::Decision::kDisabled}});
+ block_based_options.cache_usage_options.options_overrides.insert(
+ {CacheEntryRole::kFilterConstruction,
+ {/*.charged = */ FLAGS_charge_filter_construction
+ ? CacheEntryRoleOptions::Decision::kEnabled
+ : CacheEntryRoleOptions::Decision::kDisabled}});
+ block_based_options.cache_usage_options.options_overrides.insert(
+ {CacheEntryRole::kBlockBasedTableReader,
+ {/*.charged = */ FLAGS_charge_table_reader
+ ? CacheEntryRoleOptions::Decision::kEnabled
+ : CacheEntryRoleOptions::Decision::kDisabled}});
+ block_based_options.cache_usage_options.options_overrides.insert(
+ {CacheEntryRole::kFileMetadata,
+ {/*.charged = */ FLAGS_charge_file_metadata
+ ? CacheEntryRoleOptions::Decision::kEnabled
+ : CacheEntryRoleOptions::Decision::kDisabled}});
+ block_based_options.cache_usage_options.options_overrides.insert(
+ {CacheEntryRole::kBlobCache,
+ {/*.charged = */ FLAGS_charge_blob_cache
+ ? CacheEntryRoleOptions::Decision::kEnabled
+ : CacheEntryRoleOptions::Decision::kDisabled}});
+ block_based_options.format_version =
+ static_cast<uint32_t>(FLAGS_format_version);
+ block_based_options.index_block_restart_interval =
+ static_cast<int32_t>(FLAGS_index_block_restart_interval);
+ block_based_options.filter_policy = filter_policy;
+ block_based_options.partition_filters = FLAGS_partition_filters;
+ block_based_options.optimize_filters_for_memory =
+ FLAGS_optimize_filters_for_memory;
+ block_based_options.detect_filter_construct_corruption =
+ FLAGS_detect_filter_construct_corruption;
+ block_based_options.index_type =
+ static_cast<BlockBasedTableOptions::IndexType>(FLAGS_index_type);
+ block_based_options.data_block_index_type =
+ static_cast<BlockBasedTableOptions::DataBlockIndexType>(
+ FLAGS_data_block_index_type);
+ block_based_options.prepopulate_block_cache =
+ static_cast<BlockBasedTableOptions::PrepopulateBlockCache>(
+ FLAGS_prepopulate_block_cache);
+ block_based_options.initial_auto_readahead_size =
+ FLAGS_initial_auto_readahead_size;
+ block_based_options.max_auto_readahead_size = FLAGS_max_auto_readahead_size;
+ block_based_options.num_file_reads_for_auto_readahead =
+ FLAGS_num_file_reads_for_auto_readahead;
+ options.table_factory.reset(NewBlockBasedTableFactory(block_based_options));
+ options.db_write_buffer_size = FLAGS_db_write_buffer_size;
+ options.write_buffer_size = FLAGS_write_buffer_size;
+ options.max_write_buffer_number = FLAGS_max_write_buffer_number;
+ options.min_write_buffer_number_to_merge =
+ FLAGS_min_write_buffer_number_to_merge;
+ options.max_write_buffer_number_to_maintain =
+ FLAGS_max_write_buffer_number_to_maintain;
+ options.max_write_buffer_size_to_maintain =
+ FLAGS_max_write_buffer_size_to_maintain;
+ options.memtable_prefix_bloom_size_ratio =
+ FLAGS_memtable_prefix_bloom_size_ratio;
+ options.memtable_whole_key_filtering = FLAGS_memtable_whole_key_filtering;
+ options.disable_auto_compactions = FLAGS_disable_auto_compactions;
+ options.max_background_compactions = FLAGS_max_background_compactions;
+ options.max_background_flushes = FLAGS_max_background_flushes;
+ options.compaction_style =
+ static_cast<ROCKSDB_NAMESPACE::CompactionStyle>(FLAGS_compaction_style);
+ options.compaction_pri =
+ static_cast<ROCKSDB_NAMESPACE::CompactionPri>(FLAGS_compaction_pri);
+ options.num_levels = FLAGS_num_levels;
+ if (FLAGS_prefix_size >= 0) {
+ options.prefix_extractor.reset(NewFixedPrefixTransform(FLAGS_prefix_size));
+ }
+ options.max_open_files = FLAGS_open_files;
+ options.statistics = dbstats;
+ options.env = db_stress_env;
+ options.use_fsync = FLAGS_use_fsync;
+ options.compaction_readahead_size = FLAGS_compaction_readahead_size;
+ options.allow_mmap_reads = FLAGS_mmap_read;
+ options.allow_mmap_writes = FLAGS_mmap_write;
+ options.use_direct_reads = FLAGS_use_direct_reads;
+ options.use_direct_io_for_flush_and_compaction =
+ FLAGS_use_direct_io_for_flush_and_compaction;
+ options.recycle_log_file_num =
+ static_cast<size_t>(FLAGS_recycle_log_file_num);
+ options.target_file_size_base = FLAGS_target_file_size_base;
+ options.target_file_size_multiplier = FLAGS_target_file_size_multiplier;
+ options.max_bytes_for_level_base = FLAGS_max_bytes_for_level_base;
+ options.max_bytes_for_level_multiplier = FLAGS_max_bytes_for_level_multiplier;
+ options.level0_stop_writes_trigger = FLAGS_level0_stop_writes_trigger;
+ options.level0_slowdown_writes_trigger = FLAGS_level0_slowdown_writes_trigger;
+ options.level0_file_num_compaction_trigger =
+ FLAGS_level0_file_num_compaction_trigger;
+ options.compression = compression_type_e;
+ options.bottommost_compression = bottommost_compression_type_e;
+ options.compression_opts.max_dict_bytes = FLAGS_compression_max_dict_bytes;
+ options.compression_opts.zstd_max_train_bytes =
+ FLAGS_compression_zstd_max_train_bytes;
+ options.compression_opts.parallel_threads =
+ FLAGS_compression_parallel_threads;
+ options.compression_opts.max_dict_buffer_bytes =
+ FLAGS_compression_max_dict_buffer_bytes;
+ if (ZSTD_FinalizeDictionarySupported()) {
+ options.compression_opts.use_zstd_dict_trainer =
+ FLAGS_compression_use_zstd_dict_trainer;
+ } else if (!FLAGS_compression_use_zstd_dict_trainer) {
+ fprintf(
+ stderr,
+ "WARNING: use_zstd_dict_trainer is false but zstd finalizeDictionary "
+ "cannot be used because ZSTD 1.4.5+ is not linked with the binary."
+ " zstd dictionary trainer will be used.\n");
+ }
+ options.max_manifest_file_size = FLAGS_max_manifest_file_size;
+ options.inplace_update_support = FLAGS_in_place_update;
+ options.max_subcompactions = static_cast<uint32_t>(FLAGS_subcompactions);
+ options.allow_concurrent_memtable_write =
+ FLAGS_allow_concurrent_memtable_write;
+ options.experimental_mempurge_threshold =
+ FLAGS_experimental_mempurge_threshold;
+ options.periodic_compaction_seconds = FLAGS_periodic_compaction_seconds;
+ options.stats_dump_period_sec =
+ static_cast<unsigned int>(FLAGS_stats_dump_period_sec);
+ options.ttl = FLAGS_compaction_ttl;
+ options.enable_pipelined_write = FLAGS_enable_pipelined_write;
+ options.enable_write_thread_adaptive_yield =
+ FLAGS_enable_write_thread_adaptive_yield;
+ options.compaction_options_universal.size_ratio = FLAGS_universal_size_ratio;
+ options.compaction_options_universal.min_merge_width =
+ FLAGS_universal_min_merge_width;
+ options.compaction_options_universal.max_merge_width =
+ FLAGS_universal_max_merge_width;
+ options.compaction_options_universal.max_size_amplification_percent =
+ FLAGS_universal_max_size_amplification_percent;
+ options.atomic_flush = FLAGS_atomic_flush;
+ options.manual_wal_flush = FLAGS_manual_wal_flush_one_in > 0 ? true : false;
+ options.avoid_unnecessary_blocking_io = FLAGS_avoid_unnecessary_blocking_io;
+ options.write_dbid_to_manifest = FLAGS_write_dbid_to_manifest;
+ options.avoid_flush_during_recovery = FLAGS_avoid_flush_during_recovery;
+ options.max_write_batch_group_size_bytes =
+ FLAGS_max_write_batch_group_size_bytes;
+ options.level_compaction_dynamic_level_bytes =
+ FLAGS_level_compaction_dynamic_level_bytes;
+ options.track_and_verify_wals_in_manifest = true;
+ options.verify_sst_unique_id_in_manifest =
+ FLAGS_verify_sst_unique_id_in_manifest;
+ options.memtable_protection_bytes_per_key =
+ FLAGS_memtable_protection_bytes_per_key;
+
+ // Integrated BlobDB
+ options.enable_blob_files = FLAGS_enable_blob_files;
+ options.min_blob_size = FLAGS_min_blob_size;
+ options.blob_file_size = FLAGS_blob_file_size;
+ options.blob_compression_type =
+ StringToCompressionType(FLAGS_blob_compression_type.c_str());
+ options.enable_blob_garbage_collection = FLAGS_enable_blob_garbage_collection;
+ options.blob_garbage_collection_age_cutoff =
+ FLAGS_blob_garbage_collection_age_cutoff;
+ options.blob_garbage_collection_force_threshold =
+ FLAGS_blob_garbage_collection_force_threshold;
+ options.blob_compaction_readahead_size = FLAGS_blob_compaction_readahead_size;
+ options.blob_file_starting_level = FLAGS_blob_file_starting_level;
+
+ if (FLAGS_use_blob_cache) {
+ if (FLAGS_use_shared_block_and_blob_cache) {
+ options.blob_cache = cache;
+ } else {
+ if (FLAGS_blob_cache_size > 0) {
+ LRUCacheOptions co;
+ co.capacity = FLAGS_blob_cache_size;
+ co.num_shard_bits = FLAGS_blob_cache_numshardbits;
+ options.blob_cache = NewLRUCache(co);
+ } else {
+ fprintf(stderr,
+ "Unable to create a standalone blob cache if blob_cache_size "
+ "<= 0.\n");
+ exit(1);
+ }
+ }
+ switch (FLAGS_prepopulate_blob_cache) {
+ case 0:
+ options.prepopulate_blob_cache = PrepopulateBlobCache::kDisable;
+ break;
+ case 1:
+ options.prepopulate_blob_cache = PrepopulateBlobCache::kFlushOnly;
+ break;
+ default:
+ fprintf(stderr, "Unknown prepopulate blob cache mode\n");
+ exit(1);
+ }
+ }
+
+ options.wal_compression =
+ StringToCompressionType(FLAGS_wal_compression.c_str());
+
+ if (FLAGS_enable_tiered_storage) {
+ options.bottommost_temperature = Temperature::kCold;
+ }
+ options.preclude_last_level_data_seconds =
+ FLAGS_preclude_last_level_data_seconds;
+ options.preserve_internal_time_seconds = FLAGS_preserve_internal_time_seconds;
+
+ switch (FLAGS_rep_factory) {
+ case kSkipList:
+ // no need to do anything
+ break;
+#ifndef ROCKSDB_LITE
+ case kHashSkipList:
+ options.memtable_factory.reset(NewHashSkipListRepFactory(10000));
+ break;
+ case kVectorRep:
+ options.memtable_factory.reset(new VectorRepFactory());
+ break;
+#else
+ default:
+ fprintf(stderr,
+ "RocksdbLite only supports skip list mem table. Skip "
+ "--rep_factory\n");
+#endif // ROCKSDB_LITE
+ }
+
+ if (FLAGS_use_full_merge_v1) {
+ options.merge_operator = MergeOperators::CreateDeprecatedPutOperator();
+ } else {
+ options.merge_operator = MergeOperators::CreatePutOperator();
+ }
+
+ if (FLAGS_enable_compaction_filter) {
+ options.compaction_filter_factory =
+ std::make_shared<DbStressCompactionFilterFactory>();
+ }
+
+ options.best_efforts_recovery = FLAGS_best_efforts_recovery;
+ options.paranoid_file_checks = FLAGS_paranoid_file_checks;
+ options.fail_if_options_file_error = FLAGS_fail_if_options_file_error;
+
+ if (FLAGS_user_timestamp_size > 0) {
+ CheckAndSetOptionsForUserTimestamp(options);
+ }
+
+ options.allow_data_in_errors = FLAGS_allow_data_in_errors;
+}
+
+void InitializeOptionsGeneral(
+ const std::shared_ptr<Cache>& cache,
+ const std::shared_ptr<Cache>& block_cache_compressed,
+ const std::shared_ptr<const FilterPolicy>& filter_policy,
+ Options& options) {
+ options.create_missing_column_families = true;
+ options.create_if_missing = true;
+
+ if (!options.statistics) {
+ options.statistics = dbstats;
+ }
+
+ if (options.env == Options().env) {
+ options.env = db_stress_env;
+ }
+
+ assert(options.table_factory);
+ auto table_options =
+ options.table_factory->GetOptions<BlockBasedTableOptions>();
+ if (table_options) {
+ if (FLAGS_cache_size > 0) {
+ table_options->block_cache = cache;
+ }
+ if (!table_options->block_cache_compressed &&
+ FLAGS_compressed_cache_size > 0) {
+ table_options->block_cache_compressed = block_cache_compressed;
+ }
+ if (!table_options->filter_policy) {
+ table_options->filter_policy = filter_policy;
+ }
+ }
+
+ // TODO: row_cache, thread-pool IO priority, CPU priority.
+
+ if (!options.rate_limiter) {
+ if (FLAGS_rate_limiter_bytes_per_sec > 0) {
+ options.rate_limiter.reset(NewGenericRateLimiter(
+ FLAGS_rate_limiter_bytes_per_sec, 1000 /* refill_period_us */,
+ 10 /* fairness */,
+ FLAGS_rate_limit_bg_reads ? RateLimiter::Mode::kReadsOnly
+ : RateLimiter::Mode::kWritesOnly));
+ }
+ }
+
+ if (!options.file_checksum_gen_factory) {
+ options.file_checksum_gen_factory =
+ GetFileChecksumImpl(FLAGS_file_checksum_impl);
+ }
+
+ if (FLAGS_sst_file_manager_bytes_per_sec > 0 ||
+ FLAGS_sst_file_manager_bytes_per_truncate > 0) {
+ Status status;
+ options.sst_file_manager.reset(NewSstFileManager(
+ db_stress_env, options.info_log, "" /* trash_dir */,
+ static_cast<int64_t>(FLAGS_sst_file_manager_bytes_per_sec),
+ true /* delete_existing_trash */, &status,
+ 0.25 /* max_trash_db_ratio */,
+ FLAGS_sst_file_manager_bytes_per_truncate));
+ if (!status.ok()) {
+ fprintf(stderr, "SstFileManager creation failed: %s\n",
+ status.ToString().c_str());
+ exit(1);
+ }
+ }
+
+ if (FLAGS_preserve_unverified_changes) {
+ if (!options.avoid_flush_during_recovery) {
+ fprintf(stderr,
+ "WARNING: flipping `avoid_flush_during_recovery` to true for "
+ "`preserve_unverified_changes` to keep all files\n");
+ options.avoid_flush_during_recovery = true;
+ }
+ // Together with `avoid_flush_during_recovery == true`, this will prevent
+ // live files from becoming obsolete and deleted between `DB::Open()` and
+ // `DisableFileDeletions()` due to flush or compaction. We do not need to
+ // warn the user since we will reenable compaction soon.
+ options.disable_auto_compactions = true;
+ }
+
+ options.table_properties_collector_factories.emplace_back(
+ std::make_shared<DbStressTablePropertiesCollectorFactory>());
+}
+
+} // namespace ROCKSDB_NAMESPACE
+#endif // GFLAGS
diff --git a/src/rocksdb/db_stress_tool/db_stress_test_base.h b/src/rocksdb/db_stress_tool/db_stress_test_base.h
new file mode 100644
index 000000000..81fbbe24b
--- /dev/null
+++ b/src/rocksdb/db_stress_tool/db_stress_test_base.h
@@ -0,0 +1,337 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+//
+// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#ifdef GFLAGS
+#pragma once
+
+#include "db_stress_tool/db_stress_common.h"
+#include "db_stress_tool/db_stress_shared_state.h"
+
+namespace ROCKSDB_NAMESPACE {
+class SystemClock;
+class Transaction;
+class TransactionDB;
+struct TransactionDBOptions;
+
+class StressTest {
+ public:
+ StressTest();
+
+ virtual ~StressTest();
+
+ std::shared_ptr<Cache> NewCache(size_t capacity, int32_t num_shard_bits);
+
+ static std::vector<std::string> GetBlobCompressionTags();
+
+ bool BuildOptionsTable();
+
+ void InitDb(SharedState*);
+ // The initialization work is split into two parts to avoid a circular
+ // dependency with `SharedState`.
+ virtual void FinishInitDb(SharedState*);
+ void TrackExpectedState(SharedState* shared);
+ void OperateDb(ThreadState* thread);
+ virtual void VerifyDb(ThreadState* thread) const = 0;
+ virtual void ContinuouslyVerifyDb(ThreadState* /*thread*/) const = 0;
+ void PrintStatistics();
+
+ protected:
+ Status AssertSame(DB* db, ColumnFamilyHandle* cf,
+ ThreadState::SnapshotState& snap_state);
+
+ // Currently PreloadDb has to be single-threaded.
+ void PreloadDbAndReopenAsReadOnly(int64_t number_of_keys,
+ SharedState* shared);
+
+ Status SetOptions(ThreadState* thread);
+
+#ifndef ROCKSDB_LITE
+ // For transactionsDB, there can be txns prepared but not yet committeed
+ // right before previous stress run crash.
+ // They will be recovered and processed through
+ // ProcessRecoveredPreparedTxnsHelper on the start of current stress run.
+ void ProcessRecoveredPreparedTxns(SharedState* shared);
+
+ // Default implementation will first update ExpectedState to be
+ // `SharedState::UNKNOWN` for each keys in `txn` and then randomly
+ // commit or rollback `txn`.
+ virtual void ProcessRecoveredPreparedTxnsHelper(Transaction* txn,
+ SharedState* shared);
+
+ Status NewTxn(WriteOptions& write_opts, Transaction** txn);
+
+ Status CommitTxn(Transaction* txn, ThreadState* thread = nullptr);
+
+ Status RollbackTxn(Transaction* txn);
+#endif
+
+ virtual void MaybeClearOneColumnFamily(ThreadState* /* thread */) {}
+
+ virtual bool ShouldAcquireMutexOnKey() const { return false; }
+
+ // Returns true if DB state is tracked by the stress test.
+ virtual bool IsStateTracked() const = 0;
+
+ virtual std::vector<int> GenerateColumnFamilies(
+ const int /* num_column_families */, int rand_column_family) const {
+ return {rand_column_family};
+ }
+
+ virtual std::vector<int64_t> GenerateKeys(int64_t rand_key) const {
+ return {rand_key};
+ }
+
+ virtual Status TestGet(ThreadState* thread, const ReadOptions& read_opts,
+ const std::vector<int>& rand_column_families,
+ const std::vector<int64_t>& rand_keys) = 0;
+
+ virtual std::vector<Status> TestMultiGet(
+ ThreadState* thread, const ReadOptions& read_opts,
+ const std::vector<int>& rand_column_families,
+ const std::vector<int64_t>& rand_keys) = 0;
+
+ virtual Status TestPrefixScan(ThreadState* thread,
+ const ReadOptions& read_opts,
+ const std::vector<int>& rand_column_families,
+ const std::vector<int64_t>& rand_keys) = 0;
+
+ virtual Status TestPut(ThreadState* thread, WriteOptions& write_opts,
+ const ReadOptions& read_opts,
+ const std::vector<int>& cf_ids,
+ const std::vector<int64_t>& keys,
+ char (&value)[100]) = 0;
+
+ virtual Status TestDelete(ThreadState* thread, WriteOptions& write_opts,
+ const std::vector<int>& rand_column_families,
+ const std::vector<int64_t>& rand_keys) = 0;
+
+ virtual Status TestDeleteRange(ThreadState* thread, WriteOptions& write_opts,
+ const std::vector<int>& rand_column_families,
+ const std::vector<int64_t>& rand_keys) = 0;
+
+ virtual void TestIngestExternalFile(
+ ThreadState* thread, const std::vector<int>& rand_column_families,
+ const std::vector<int64_t>& rand_keys) = 0;
+
+ // Issue compact range, starting with start_key, whose integer value
+ // is rand_key.
+ virtual void TestCompactRange(ThreadState* thread, int64_t rand_key,
+ const Slice& start_key,
+ ColumnFamilyHandle* column_family);
+
+ // Calculate a hash value for all keys in range [start_key, end_key]
+ // at a certain snapshot.
+ uint32_t GetRangeHash(ThreadState* thread, const Snapshot* snapshot,
+ ColumnFamilyHandle* column_family,
+ const Slice& start_key, const Slice& end_key);
+
+ // Return a column family handle that mirrors what is pointed by
+ // `column_family_id`, which will be used to validate data to be correct.
+ // By default, the column family itself will be returned.
+ virtual ColumnFamilyHandle* GetControlCfh(ThreadState* /* thread*/,
+ int column_family_id) {
+ return column_families_[column_family_id];
+ }
+
+#ifndef ROCKSDB_LITE
+ // Generated a list of keys that close to boundaries of SST keys.
+ // If there isn't any SST file in the DB, return empty list.
+ std::vector<std::string> GetWhiteBoxKeys(ThreadState* thread, DB* db,
+ ColumnFamilyHandle* cfh,
+ size_t num_keys);
+#else // !ROCKSDB_LITE
+ std::vector<std::string> GetWhiteBoxKeys(ThreadState*, DB*,
+ ColumnFamilyHandle*, size_t) {
+ // Not supported in LITE mode.
+ return {};
+ }
+#endif // !ROCKSDB_LITE
+
+ // Given a key K, this creates an iterator which scans to K and then
+ // does a random sequence of Next/Prev operations.
+ virtual Status TestIterate(ThreadState* thread, const ReadOptions& read_opts,
+ const std::vector<int>& rand_column_families,
+ const std::vector<int64_t>& rand_keys);
+
+ virtual Status TestIterateAgainstExpected(
+ ThreadState* /* thread */, const ReadOptions& /* read_opts */,
+ const std::vector<int>& /* rand_column_families */,
+ const std::vector<int64_t>& /* rand_keys */) {
+ return Status::NotSupported();
+ }
+
+ // Enum used by VerifyIterator() to identify the mode to validate.
+ enum LastIterateOp {
+ kLastOpSeek,
+ kLastOpSeekForPrev,
+ kLastOpNextOrPrev,
+ kLastOpSeekToFirst,
+ kLastOpSeekToLast
+ };
+
+ // Compare the two iterator, iter and cmp_iter are in the same position,
+ // unless iter might be made invalidate or undefined because of
+ // upper or lower bounds, or prefix extractor.
+ // Will flag failure if the verification fails.
+ // diverged = true if the two iterator is already diverged.
+ // True if verification passed, false if not.
+ // op_logs is the information to print when validation fails.
+ void VerifyIterator(ThreadState* thread, ColumnFamilyHandle* cmp_cfh,
+ const ReadOptions& ro, Iterator* iter, Iterator* cmp_iter,
+ LastIterateOp op, const Slice& seek_key,
+ const std::string& op_logs, bool* diverged);
+
+ virtual Status TestBackupRestore(ThreadState* thread,
+ const std::vector<int>& rand_column_families,
+ const std::vector<int64_t>& rand_keys);
+
+ virtual Status TestCheckpoint(ThreadState* thread,
+ const std::vector<int>& rand_column_families,
+ const std::vector<int64_t>& rand_keys);
+
+ void TestCompactFiles(ThreadState* thread, ColumnFamilyHandle* column_family);
+
+ Status TestFlush(const std::vector<int>& rand_column_families);
+
+ Status TestPauseBackground(ThreadState* thread);
+
+ void TestAcquireSnapshot(ThreadState* thread, int rand_column_family,
+ const std::string& keystr, uint64_t i);
+
+ Status MaybeReleaseSnapshots(ThreadState* thread, uint64_t i);
+#ifndef ROCKSDB_LITE
+ Status VerifyGetLiveFiles() const;
+ Status VerifyGetSortedWalFiles() const;
+ Status VerifyGetCurrentWalFile() const;
+ void TestGetProperty(ThreadState* thread) const;
+
+ virtual Status TestApproximateSize(
+ ThreadState* thread, uint64_t iteration,
+ const std::vector<int>& rand_column_families,
+ const std::vector<int64_t>& rand_keys);
+#endif // !ROCKSDB_LITE
+
+ virtual Status TestCustomOperations(
+ ThreadState* /*thread*/,
+ const std::vector<int>& /*rand_column_families*/) {
+ return Status::NotSupported("TestCustomOperations() must be overridden");
+ }
+
+ void VerificationAbort(SharedState* shared, std::string msg, Status s) const;
+
+ void VerificationAbort(SharedState* shared, std::string msg, int cf,
+ int64_t key) const;
+
+ void VerificationAbort(SharedState* shared, std::string msg, int cf,
+ int64_t key, Slice value_from_db,
+ Slice value_from_expected) const;
+
+ void VerificationAbort(SharedState* shared, int cf, int64_t key,
+ const Slice& value, const WideColumns& columns,
+ const WideColumns& expected_columns) const;
+
+ static std::string DebugString(const Slice& value, const WideColumns& columns,
+ const WideColumns& expected_columns);
+
+ void PrintEnv() const;
+
+ void Open(SharedState* shared);
+
+ void Reopen(ThreadState* thread);
+
+ virtual void RegisterAdditionalListeners() {}
+
+#ifndef ROCKSDB_LITE
+ virtual void PrepareTxnDbOptions(SharedState* /*shared*/,
+ TransactionDBOptions& /*txn_db_opts*/) {}
+#endif
+
+ // Returns whether the timestamp of read_opts is updated.
+ bool MaybeUseOlderTimestampForPointLookup(ThreadState* thread,
+ std::string& ts_str,
+ Slice& ts_slice,
+ ReadOptions& read_opts);
+
+ void MaybeUseOlderTimestampForRangeScan(ThreadState* thread,
+ std::string& ts_str, Slice& ts_slice,
+ ReadOptions& read_opts);
+
+ std::shared_ptr<Cache> cache_;
+ std::shared_ptr<Cache> compressed_cache_;
+ std::shared_ptr<const FilterPolicy> filter_policy_;
+ DB* db_;
+#ifndef ROCKSDB_LITE
+ TransactionDB* txn_db_;
+#endif
+
+ // Currently only used in MultiOpsTxnsStressTest
+ std::atomic<DB*> db_aptr_;
+
+ Options options_;
+ SystemClock* clock_;
+ std::vector<ColumnFamilyHandle*> column_families_;
+ std::vector<std::string> column_family_names_;
+ std::atomic<int> new_column_family_name_;
+ int num_times_reopened_;
+ std::unordered_map<std::string, std::vector<std::string>> options_table_;
+ std::vector<std::string> options_index_;
+ std::atomic<bool> db_preload_finished_;
+
+ // Fields used for continuous verification from another thread
+ DB* cmp_db_;
+ std::vector<ColumnFamilyHandle*> cmp_cfhs_;
+ bool is_db_stopped_;
+};
+
+// Load options from OPTIONS file and populate `options`.
+extern bool InitializeOptionsFromFile(Options& options);
+
+// Initialize `options` using command line arguments.
+// When this function is called, `cache`, `block_cache_compressed`,
+// `filter_policy` have all been initialized. Therefore, we just pass them as
+// input arguments.
+extern void InitializeOptionsFromFlags(
+ const std::shared_ptr<Cache>& cache,
+ const std::shared_ptr<Cache>& block_cache_compressed,
+ const std::shared_ptr<const FilterPolicy>& filter_policy, Options& options);
+
+// Initialize `options` on which `InitializeOptionsFromFile()` and
+// `InitializeOptionsFromFlags()` have both been called already.
+// There are two cases.
+// Case 1: OPTIONS file is not specified. Command line arguments have been used
+// to initialize `options`. InitializeOptionsGeneral() will use
+// `cache`, `block_cache_compressed` and `filter_policy` to initialize
+// corresponding fields of `options`. InitializeOptionsGeneral() will
+// also set up other fields of `options` so that stress test can run.
+// Examples include `create_if_missing` and
+// `create_missing_column_families`, etc.
+// Case 2: OPTIONS file is specified. It is possible that, after loading from
+// the given OPTIONS files, some shared object fields are still not
+// initialized because they are not set in the OPTIONS file. In this
+// case, if command line arguments indicate that the user wants to set
+// up such shared objects, e.g. block cache, compressed block cache,
+// row cache, filter policy, then InitializeOptionsGeneral() will honor
+// the user's choice, thus passing `cache`, `block_cache_compressed`,
+// `filter_policy` as input arguments.
+//
+// InitializeOptionsGeneral() must not overwrite fields of `options` loaded
+// from OPTIONS file.
+extern void InitializeOptionsGeneral(
+ const std::shared_ptr<Cache>& cache,
+ const std::shared_ptr<Cache>& block_cache_compressed,
+ const std::shared_ptr<const FilterPolicy>& filter_policy, Options& options);
+
+// If no OPTIONS file is specified, set up `options` so that we can test
+// user-defined timestamp which requires `-user_timestamp_size=8`.
+// This function also checks for known (currently) incompatible features with
+// user-defined timestamp.
+extern void CheckAndSetOptionsForUserTimestamp(Options& options);
+
+} // namespace ROCKSDB_NAMESPACE
+#endif // GFLAGS
diff --git a/src/rocksdb/db_stress_tool/db_stress_tool.cc b/src/rocksdb/db_stress_tool/db_stress_tool.cc
new file mode 100644
index 000000000..6c5e952db
--- /dev/null
+++ b/src/rocksdb/db_stress_tool/db_stress_tool.cc
@@ -0,0 +1,365 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+//
+// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+//
+// The test uses an array to compare against values written to the database.
+// Keys written to the array are in 1:1 correspondence to the actual values in
+// the database according to the formula in the function GenerateValue.
+
+// Space is reserved in the array from 0 to FLAGS_max_key and values are
+// randomly written/deleted/read from those positions. During verification we
+// compare all the positions in the array. To shorten/elongate the running
+// time, you could change the settings: FLAGS_max_key, FLAGS_ops_per_thread,
+// (sometimes also FLAGS_threads).
+//
+// NOTE that if FLAGS_test_batches_snapshots is set, the test will have
+// different behavior. See comment of the flag for details.
+
+#ifdef GFLAGS
+#include "db_stress_tool/db_stress_common.h"
+#include "db_stress_tool/db_stress_driver.h"
+#include "rocksdb/convenience.h"
+#include "utilities/fault_injection_fs.h"
+
+namespace ROCKSDB_NAMESPACE {
+namespace {
+static std::shared_ptr<ROCKSDB_NAMESPACE::Env> env_guard;
+static std::shared_ptr<ROCKSDB_NAMESPACE::DbStressEnvWrapper> env_wrapper_guard;
+static std::shared_ptr<ROCKSDB_NAMESPACE::DbStressEnvWrapper>
+ dbsl_env_wrapper_guard;
+static std::shared_ptr<CompositeEnvWrapper> fault_env_guard;
+} // namespace
+
+KeyGenContext key_gen_ctx;
+
+int db_stress_tool(int argc, char** argv) {
+ SetUsageMessage(std::string("\nUSAGE:\n") + std::string(argv[0]) +
+ " [OPTIONS]...");
+ ParseCommandLineFlags(&argc, &argv, true);
+
+ SanitizeDoubleParam(&FLAGS_bloom_bits);
+ SanitizeDoubleParam(&FLAGS_memtable_prefix_bloom_size_ratio);
+ SanitizeDoubleParam(&FLAGS_max_bytes_for_level_multiplier);
+
+#ifndef NDEBUG
+ if (FLAGS_mock_direct_io) {
+ SetupSyncPointsToMockDirectIO();
+ }
+#endif
+ if (FLAGS_statistics) {
+ dbstats = ROCKSDB_NAMESPACE::CreateDBStatistics();
+ if (FLAGS_test_secondary) {
+ dbstats_secondaries = ROCKSDB_NAMESPACE::CreateDBStatistics();
+ }
+ }
+ compression_type_e = StringToCompressionType(FLAGS_compression_type.c_str());
+ bottommost_compression_type_e =
+ StringToCompressionType(FLAGS_bottommost_compression_type.c_str());
+ checksum_type_e = StringToChecksumType(FLAGS_checksum_type.c_str());
+
+ Env* raw_env;
+
+ int env_opts = !FLAGS_env_uri.empty() + !FLAGS_fs_uri.empty();
+ if (env_opts > 1) {
+ fprintf(stderr, "Error: --env_uri and --fs_uri are mutually exclusive\n");
+ exit(1);
+ }
+
+ Status s = Env::CreateFromUri(ConfigOptions(), FLAGS_env_uri, FLAGS_fs_uri,
+ &raw_env, &env_guard);
+ if (!s.ok()) {
+ fprintf(stderr, "Error Creating Env URI: %s: %s\n", FLAGS_env_uri.c_str(),
+ s.ToString().c_str());
+ exit(1);
+ }
+ dbsl_env_wrapper_guard = std::make_shared<DbStressEnvWrapper>(raw_env);
+ db_stress_listener_env = dbsl_env_wrapper_guard.get();
+
+ if (FLAGS_read_fault_one_in || FLAGS_sync_fault_injection ||
+ FLAGS_write_fault_one_in || FLAGS_open_metadata_write_fault_one_in ||
+ FLAGS_open_write_fault_one_in || FLAGS_open_read_fault_one_in) {
+ FaultInjectionTestFS* fs =
+ new FaultInjectionTestFS(raw_env->GetFileSystem());
+ fault_fs_guard.reset(fs);
+ if (FLAGS_write_fault_one_in) {
+ fault_fs_guard->SetFilesystemDirectWritable(false);
+ } else {
+ fault_fs_guard->SetFilesystemDirectWritable(true);
+ }
+ fault_env_guard =
+ std::make_shared<CompositeEnvWrapper>(raw_env, fault_fs_guard);
+ raw_env = fault_env_guard.get();
+ }
+
+ env_wrapper_guard = std::make_shared<DbStressEnvWrapper>(raw_env);
+ db_stress_env = env_wrapper_guard.get();
+
+ if (FLAGS_write_fault_one_in) {
+ // In the write injection case, we need to use the FS interface and returns
+ // the IOStatus with different error and flags. Therefore,
+ // DbStressEnvWrapper cannot be used which will swallow the FS
+ // implementations. We should directly use the raw_env which is the
+ // CompositeEnvWrapper of env and fault_fs.
+ db_stress_env = raw_env;
+ }
+
+ FLAGS_rep_factory = StringToRepFactory(FLAGS_memtablerep.c_str());
+
+ // The number of background threads should be at least as much the
+ // max number of concurrent compactions.
+ db_stress_env->SetBackgroundThreads(FLAGS_max_background_compactions,
+ ROCKSDB_NAMESPACE::Env::Priority::LOW);
+ db_stress_env->SetBackgroundThreads(FLAGS_num_bottom_pri_threads,
+ ROCKSDB_NAMESPACE::Env::Priority::BOTTOM);
+ if (FLAGS_prefixpercent > 0 && FLAGS_prefix_size < 0) {
+ fprintf(stderr,
+ "Error: prefixpercent is non-zero while prefix_size is "
+ "not positive!\n");
+ exit(1);
+ }
+ if (FLAGS_test_batches_snapshots && FLAGS_prefix_size <= 0) {
+ fprintf(stderr,
+ "Error: please specify prefix_size for "
+ "test_batches_snapshots test!\n");
+ exit(1);
+ }
+ if (FLAGS_memtable_prefix_bloom_size_ratio > 0.0 && FLAGS_prefix_size < 0 &&
+ !FLAGS_memtable_whole_key_filtering) {
+ fprintf(stderr,
+ "Error: please specify positive prefix_size or enable whole key "
+ "filtering in order to use memtable_prefix_bloom_size_ratio\n");
+ exit(1);
+ }
+ if ((FLAGS_readpercent + FLAGS_prefixpercent + FLAGS_writepercent +
+ FLAGS_delpercent + FLAGS_delrangepercent + FLAGS_iterpercent +
+ FLAGS_customopspercent) != 100) {
+ fprintf(
+ stderr,
+ "Error: "
+ "Read(-readpercent=%d)+Prefix(-prefixpercent=%d)+Write(-writepercent=%"
+ "d)+Delete(-delpercent=%d)+DeleteRange(-delrangepercent=%d)"
+ "+Iterate(-iterpercent=%d)+CustomOps(-customopspercent=%d) percents != "
+ "100!\n",
+ FLAGS_readpercent, FLAGS_prefixpercent, FLAGS_writepercent,
+ FLAGS_delpercent, FLAGS_delrangepercent, FLAGS_iterpercent,
+ FLAGS_customopspercent);
+ exit(1);
+ }
+ if (FLAGS_disable_wal == 1 && FLAGS_reopen > 0) {
+ fprintf(stderr, "Error: Db cannot reopen safely with disable_wal set!\n");
+ exit(1);
+ }
+ if ((unsigned)FLAGS_reopen >= FLAGS_ops_per_thread) {
+ fprintf(stderr,
+ "Error: #DB-reopens should be < ops_per_thread\n"
+ "Provided reopens = %d and ops_per_thread = %lu\n",
+ FLAGS_reopen, (unsigned long)FLAGS_ops_per_thread);
+ exit(1);
+ }
+ if (FLAGS_test_batches_snapshots && FLAGS_delrangepercent > 0) {
+ fprintf(stderr,
+ "Error: nonzero delrangepercent unsupported in "
+ "test_batches_snapshots mode\n");
+ exit(1);
+ }
+ if (FLAGS_active_width > FLAGS_max_key) {
+ fprintf(stderr, "Error: active_width can be at most max_key\n");
+ exit(1);
+ } else if (FLAGS_active_width == 0) {
+ FLAGS_active_width = FLAGS_max_key;
+ }
+ if (FLAGS_value_size_mult * kRandomValueMaxFactor > kValueMaxLen) {
+ fprintf(stderr, "Error: value_size_mult can be at most %d\n",
+ kValueMaxLen / kRandomValueMaxFactor);
+ exit(1);
+ }
+ if (FLAGS_use_merge && FLAGS_nooverwritepercent == 100) {
+ fprintf(
+ stderr,
+ "Error: nooverwritepercent must not be 100 when using merge operands");
+ exit(1);
+ }
+ if (FLAGS_ingest_external_file_one_in > 0 &&
+ FLAGS_nooverwritepercent == 100) {
+ fprintf(
+ stderr,
+ "Error: nooverwritepercent must not be 100 when using file ingestion");
+ exit(1);
+ }
+ if (FLAGS_clear_column_family_one_in > 0 && FLAGS_backup_one_in > 0) {
+ fprintf(stderr,
+ "Error: clear_column_family_one_in must be 0 when using backup\n");
+ exit(1);
+ }
+ if (FLAGS_test_cf_consistency && FLAGS_disable_wal) {
+ FLAGS_atomic_flush = true;
+ }
+
+ if (FLAGS_read_only) {
+ if (FLAGS_writepercent != 0 || FLAGS_delpercent != 0 ||
+ FLAGS_delrangepercent != 0) {
+ fprintf(stderr, "Error: updates are not supported in read only mode\n");
+ exit(1);
+ } else if (FLAGS_checkpoint_one_in > 0 &&
+ FLAGS_clear_column_family_one_in > 0) {
+ fprintf(stdout,
+ "Warn: checkpoint won't be validated since column families may "
+ "be dropped.\n");
+ }
+ }
+
+ // Choose a location for the test database if none given with --db=<path>
+ if (FLAGS_db.empty()) {
+ std::string default_db_path;
+ db_stress_env->GetTestDirectory(&default_db_path);
+ default_db_path += "/dbstress";
+ FLAGS_db = default_db_path;
+ }
+
+ if ((FLAGS_test_secondary || FLAGS_continuous_verification_interval > 0) &&
+ FLAGS_secondaries_base.empty()) {
+ std::string default_secondaries_path;
+ db_stress_env->GetTestDirectory(&default_secondaries_path);
+ default_secondaries_path += "/dbstress_secondaries";
+ s = db_stress_env->CreateDirIfMissing(default_secondaries_path);
+ if (!s.ok()) {
+ fprintf(stderr, "Failed to create directory %s: %s\n",
+ default_secondaries_path.c_str(), s.ToString().c_str());
+ exit(1);
+ }
+ FLAGS_secondaries_base = default_secondaries_path;
+ }
+
+ if (FLAGS_best_efforts_recovery && !FLAGS_skip_verifydb &&
+ !FLAGS_disable_wal) {
+ fprintf(stderr,
+ "With best-efforts recovery, either skip_verifydb or disable_wal "
+ "should be set to true.\n");
+ exit(1);
+ }
+ if (FLAGS_skip_verifydb) {
+ if (FLAGS_verify_db_one_in > 0) {
+ fprintf(stderr,
+ "Must set -verify_db_one_in=0 if skip_verifydb is true.\n");
+ exit(1);
+ }
+ if (FLAGS_continuous_verification_interval > 0) {
+ fprintf(stderr,
+ "Must set -continuous_verification_interval=0 if skip_verifydb "
+ "is true.\n");
+ exit(1);
+ }
+ }
+ if (FLAGS_enable_compaction_filter &&
+ (FLAGS_acquire_snapshot_one_in > 0 || FLAGS_compact_range_one_in > 0 ||
+ FLAGS_iterpercent > 0 || FLAGS_test_batches_snapshots ||
+ FLAGS_test_cf_consistency)) {
+ fprintf(
+ stderr,
+ "Error: acquire_snapshot_one_in, compact_range_one_in, iterpercent, "
+ "test_batches_snapshots must all be 0 when using compaction filter\n");
+ exit(1);
+ }
+ if (FLAGS_test_multi_ops_txns) {
+ CheckAndSetOptionsForMultiOpsTxnStressTest();
+ }
+
+ if (FLAGS_create_timestamped_snapshot_one_in > 0) {
+ if (!FLAGS_use_txn) {
+ fprintf(stderr, "timestamped snapshot supported only in TransactionDB\n");
+ exit(1);
+ } else if (FLAGS_txn_write_policy != 0) {
+ fprintf(stderr,
+ "timestamped snapshot supported only in write-committed\n");
+ exit(1);
+ }
+ }
+
+ if (FLAGS_preserve_unverified_changes && FLAGS_reopen != 0) {
+ fprintf(stderr,
+ "Reopen DB is incompatible with preserving unverified changes\n");
+ exit(1);
+ }
+
+ if (FLAGS_use_txn && FLAGS_sync_fault_injection &&
+ FLAGS_txn_write_policy != 0) {
+ fprintf(stderr,
+ "For TransactionDB, correctness testing with unsync data loss is "
+ "currently compatible with only write committed policy\n");
+ exit(1);
+ }
+
+ if (FLAGS_use_put_entity_one_in > 0 &&
+ (FLAGS_ingest_external_file_one_in > 0 || FLAGS_use_merge ||
+ FLAGS_use_full_merge_v1 || FLAGS_use_txn || FLAGS_test_multi_ops_txns ||
+ FLAGS_user_timestamp_size > 0)) {
+ fprintf(stderr,
+ "PutEntity is currently incompatible with SstFileWriter, Merge,"
+ " transactions, and user-defined timestamps\n");
+ exit(1);
+ }
+
+#ifndef NDEBUG
+ KillPoint* kp = KillPoint::GetInstance();
+ kp->rocksdb_kill_odds = FLAGS_kill_random_test;
+ kp->rocksdb_kill_exclude_prefixes = SplitString(FLAGS_kill_exclude_prefixes);
+#endif
+
+ unsigned int levels = FLAGS_max_key_len;
+ std::vector<std::string> weights;
+ uint64_t scale_factor = FLAGS_key_window_scale_factor;
+ key_gen_ctx.window = scale_factor * 100;
+ if (!FLAGS_key_len_percent_dist.empty()) {
+ weights = SplitString(FLAGS_key_len_percent_dist);
+ if (weights.size() != levels) {
+ fprintf(stderr,
+ "Number of weights in key_len_dist should be equal to"
+ " max_key_len");
+ exit(1);
+ }
+
+ uint64_t total_weight = 0;
+ for (std::string& weight : weights) {
+ uint64_t val = std::stoull(weight);
+ key_gen_ctx.weights.emplace_back(val * scale_factor);
+ total_weight += val;
+ }
+ if (total_weight != 100) {
+ fprintf(stderr, "Sum of all weights in key_len_dist should be 100");
+ exit(1);
+ }
+ } else {
+ uint64_t keys_per_level = key_gen_ctx.window / levels;
+ for (unsigned int level = 0; level + 1 < levels; ++level) {
+ key_gen_ctx.weights.emplace_back(keys_per_level);
+ }
+ key_gen_ctx.weights.emplace_back(key_gen_ctx.window -
+ keys_per_level * (levels - 1));
+ }
+
+ std::unique_ptr<ROCKSDB_NAMESPACE::StressTest> stress;
+ if (FLAGS_test_cf_consistency) {
+ stress.reset(CreateCfConsistencyStressTest());
+ } else if (FLAGS_test_batches_snapshots) {
+ stress.reset(CreateBatchedOpsStressTest());
+ } else if (FLAGS_test_multi_ops_txns) {
+ stress.reset(CreateMultiOpsTxnsStressTest());
+ } else {
+ stress.reset(CreateNonBatchedOpsStressTest());
+ }
+ // Initialize the Zipfian pre-calculated array
+ InitializeHotKeyGenerator(FLAGS_hot_key_alpha);
+ if (RunStressTest(stress.get())) {
+ return 0;
+ } else {
+ return 1;
+ }
+}
+
+} // namespace ROCKSDB_NAMESPACE
+#endif // GFLAGS
diff --git a/src/rocksdb/db_stress_tool/expected_state.cc b/src/rocksdb/db_stress_tool/expected_state.cc
new file mode 100644
index 000000000..d08403b76
--- /dev/null
+++ b/src/rocksdb/db_stress_tool/expected_state.cc
@@ -0,0 +1,761 @@
+// Copyright (c) 2021-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+#ifdef GFLAGS
+
+#include "db_stress_tool/expected_state.h"
+
+#include "db/wide/wide_column_serialization.h"
+#include "db_stress_tool/db_stress_common.h"
+#include "db_stress_tool/db_stress_shared_state.h"
+#include "rocksdb/trace_reader_writer.h"
+#include "rocksdb/trace_record_result.h"
+
+namespace ROCKSDB_NAMESPACE {
+
+ExpectedState::ExpectedState(size_t max_key, size_t num_column_families)
+ : max_key_(max_key),
+ num_column_families_(num_column_families),
+ values_(nullptr) {}
+
+void ExpectedState::ClearColumnFamily(int cf) {
+ std::fill(&Value(cf, 0 /* key */), &Value(cf + 1, 0 /* key */),
+ SharedState::DELETION_SENTINEL);
+}
+
+void ExpectedState::Put(int cf, int64_t key, uint32_t value_base,
+ bool pending) {
+ if (!pending) {
+ // prevent expected-value update from reordering before Write
+ std::atomic_thread_fence(std::memory_order_release);
+ }
+ Value(cf, key).store(pending ? SharedState::UNKNOWN_SENTINEL : value_base,
+ std::memory_order_relaxed);
+ if (pending) {
+ // prevent Write from reordering before expected-value update
+ std::atomic_thread_fence(std::memory_order_release);
+ }
+}
+
+uint32_t ExpectedState::Get(int cf, int64_t key) const {
+ return Value(cf, key);
+}
+
+bool ExpectedState::Delete(int cf, int64_t key, bool pending) {
+ if (Value(cf, key) == SharedState::DELETION_SENTINEL) {
+ return false;
+ }
+ Put(cf, key, SharedState::DELETION_SENTINEL, pending);
+ return true;
+}
+
+bool ExpectedState::SingleDelete(int cf, int64_t key, bool pending) {
+ return Delete(cf, key, pending);
+}
+
+int ExpectedState::DeleteRange(int cf, int64_t begin_key, int64_t end_key,
+ bool pending) {
+ int covered = 0;
+ for (int64_t key = begin_key; key < end_key; ++key) {
+ if (Delete(cf, key, pending)) {
+ ++covered;
+ }
+ }
+ return covered;
+}
+
+bool ExpectedState::Exists(int cf, int64_t key) {
+ // UNKNOWN_SENTINEL counts as exists. That assures a key for which overwrite
+ // is disallowed can't be accidentally added a second time, in which case
+ // SingleDelete wouldn't be able to properly delete the key. It does allow
+ // the case where a SingleDelete might be added which covers nothing, but
+ // that's not a correctness issue.
+ uint32_t expected_value = Value(cf, key).load();
+ return expected_value != SharedState::DELETION_SENTINEL;
+}
+
+void ExpectedState::Reset() {
+ for (size_t i = 0; i < num_column_families_; ++i) {
+ for (size_t j = 0; j < max_key_; ++j) {
+ Value(static_cast<int>(i), j)
+ .store(SharedState::DELETION_SENTINEL, std::memory_order_relaxed);
+ }
+ }
+}
+
+FileExpectedState::FileExpectedState(std::string expected_state_file_path,
+ size_t max_key, size_t num_column_families)
+ : ExpectedState(max_key, num_column_families),
+ expected_state_file_path_(expected_state_file_path) {}
+
+Status FileExpectedState::Open(bool create) {
+ size_t expected_values_size = GetValuesLen();
+
+ Env* default_env = Env::Default();
+
+ Status status;
+ if (create) {
+ std::unique_ptr<WritableFile> wfile;
+ const EnvOptions soptions;
+ status = default_env->NewWritableFile(expected_state_file_path_, &wfile,
+ soptions);
+ if (status.ok()) {
+ std::string buf(expected_values_size, '\0');
+ status = wfile->Append(buf);
+ }
+ }
+ if (status.ok()) {
+ status = default_env->NewMemoryMappedFileBuffer(
+ expected_state_file_path_, &expected_state_mmap_buffer_);
+ }
+ if (status.ok()) {
+ assert(expected_state_mmap_buffer_->GetLen() == expected_values_size);
+ values_ = static_cast<std::atomic<uint32_t>*>(
+ expected_state_mmap_buffer_->GetBase());
+ assert(values_ != nullptr);
+ if (create) {
+ Reset();
+ }
+ } else {
+ assert(values_ == nullptr);
+ }
+ return status;
+}
+
+AnonExpectedState::AnonExpectedState(size_t max_key, size_t num_column_families)
+ : ExpectedState(max_key, num_column_families) {}
+
+#ifndef NDEBUG
+Status AnonExpectedState::Open(bool create) {
+#else
+Status AnonExpectedState::Open(bool /* create */) {
+#endif
+ // AnonExpectedState only supports being freshly created.
+ assert(create);
+ values_allocation_.reset(
+ new std::atomic<uint32_t>[GetValuesLen() /
+ sizeof(std::atomic<uint32_t>)]);
+ values_ = &values_allocation_[0];
+ Reset();
+ return Status::OK();
+}
+
+ExpectedStateManager::ExpectedStateManager(size_t max_key,
+ size_t num_column_families)
+ : max_key_(max_key),
+ num_column_families_(num_column_families),
+ latest_(nullptr) {}
+
+ExpectedStateManager::~ExpectedStateManager() {}
+
+const std::string FileExpectedStateManager::kLatestBasename = "LATEST";
+const std::string FileExpectedStateManager::kStateFilenameSuffix = ".state";
+const std::string FileExpectedStateManager::kTraceFilenameSuffix = ".trace";
+const std::string FileExpectedStateManager::kTempFilenamePrefix = ".";
+const std::string FileExpectedStateManager::kTempFilenameSuffix = ".tmp";
+
+FileExpectedStateManager::FileExpectedStateManager(
+ size_t max_key, size_t num_column_families,
+ std::string expected_state_dir_path)
+ : ExpectedStateManager(max_key, num_column_families),
+ expected_state_dir_path_(std::move(expected_state_dir_path)) {
+ assert(!expected_state_dir_path_.empty());
+}
+
+Status FileExpectedStateManager::Open() {
+ // Before doing anything, sync directory state with ours. That is, determine
+ // `saved_seqno_`, and create any necessary missing files.
+ std::vector<std::string> expected_state_dir_children;
+ Status s = Env::Default()->GetChildren(expected_state_dir_path_,
+ &expected_state_dir_children);
+ bool found_trace = false;
+ if (s.ok()) {
+ for (size_t i = 0; i < expected_state_dir_children.size(); ++i) {
+ const auto& filename = expected_state_dir_children[i];
+ if (filename.size() >= kStateFilenameSuffix.size() &&
+ filename.rfind(kStateFilenameSuffix) ==
+ filename.size() - kStateFilenameSuffix.size() &&
+ filename.rfind(kLatestBasename, 0) == std::string::npos) {
+ SequenceNumber found_seqno = ParseUint64(
+ filename.substr(0, filename.size() - kStateFilenameSuffix.size()));
+ if (saved_seqno_ == kMaxSequenceNumber || found_seqno > saved_seqno_) {
+ saved_seqno_ = found_seqno;
+ }
+ }
+ }
+ // Check if crash happened after creating state file but before creating
+ // trace file.
+ if (saved_seqno_ != kMaxSequenceNumber) {
+ std::string saved_seqno_trace_path = GetPathForFilename(
+ std::to_string(saved_seqno_) + kTraceFilenameSuffix);
+ Status exists_status = Env::Default()->FileExists(saved_seqno_trace_path);
+ if (exists_status.ok()) {
+ found_trace = true;
+ } else if (exists_status.IsNotFound()) {
+ found_trace = false;
+ } else {
+ s = exists_status;
+ }
+ }
+ }
+ if (s.ok() && saved_seqno_ != kMaxSequenceNumber && !found_trace) {
+ // Create an empty trace file so later logic does not need to distinguish
+ // missing vs. empty trace file.
+ std::unique_ptr<WritableFile> wfile;
+ const EnvOptions soptions;
+ std::string saved_seqno_trace_path =
+ GetPathForFilename(std::to_string(saved_seqno_) + kTraceFilenameSuffix);
+ s = Env::Default()->NewWritableFile(saved_seqno_trace_path, &wfile,
+ soptions);
+ }
+
+ if (s.ok()) {
+ s = Clean();
+ }
+
+ std::string expected_state_file_path =
+ GetPathForFilename(kLatestBasename + kStateFilenameSuffix);
+ bool found = false;
+ if (s.ok()) {
+ Status exists_status = Env::Default()->FileExists(expected_state_file_path);
+ if (exists_status.ok()) {
+ found = true;
+ } else if (exists_status.IsNotFound()) {
+ found = false;
+ } else {
+ s = exists_status;
+ }
+ }
+
+ if (!found) {
+ // Initialize the file in a temp path and then rename it. That way, in case
+ // this process is killed during setup, `Clean()` will take care of removing
+ // the incomplete expected values file.
+ std::string temp_expected_state_file_path =
+ GetTempPathForFilename(kLatestBasename + kStateFilenameSuffix);
+ FileExpectedState temp_expected_state(temp_expected_state_file_path,
+ max_key_, num_column_families_);
+ if (s.ok()) {
+ s = temp_expected_state.Open(true /* create */);
+ }
+ if (s.ok()) {
+ s = Env::Default()->RenameFile(temp_expected_state_file_path,
+ expected_state_file_path);
+ }
+ }
+
+ if (s.ok()) {
+ latest_.reset(new FileExpectedState(std::move(expected_state_file_path),
+ max_key_, num_column_families_));
+ s = latest_->Open(false /* create */);
+ }
+ return s;
+}
+
+#ifndef ROCKSDB_LITE
+Status FileExpectedStateManager::SaveAtAndAfter(DB* db) {
+ SequenceNumber seqno = db->GetLatestSequenceNumber();
+
+ std::string state_filename = std::to_string(seqno) + kStateFilenameSuffix;
+ std::string state_file_temp_path = GetTempPathForFilename(state_filename);
+ std::string state_file_path = GetPathForFilename(state_filename);
+
+ std::string latest_file_path =
+ GetPathForFilename(kLatestBasename + kStateFilenameSuffix);
+
+ std::string trace_filename = std::to_string(seqno) + kTraceFilenameSuffix;
+ std::string trace_file_path = GetPathForFilename(trace_filename);
+
+ // Populate a tempfile and then rename it to atomically create "<seqno>.state"
+ // with contents from "LATEST.state"
+ Status s = CopyFile(FileSystem::Default(), latest_file_path,
+ state_file_temp_path, 0 /* size */, false /* use_fsync */,
+ nullptr /* io_tracer */, Temperature::kUnknown);
+ if (s.ok()) {
+ s = FileSystem::Default()->RenameFile(state_file_temp_path, state_file_path,
+ IOOptions(), nullptr /* dbg */);
+ }
+ SequenceNumber old_saved_seqno = 0;
+ if (s.ok()) {
+ old_saved_seqno = saved_seqno_;
+ saved_seqno_ = seqno;
+ }
+
+ // If there is a crash now, i.e., after "<seqno>.state" was created but before
+ // "<seqno>.trace" is created, it will be treated as if "<seqno>.trace" were
+ // present but empty.
+
+ // Create "<seqno>.trace" directly. It is initially empty so no need for
+ // tempfile.
+ std::unique_ptr<TraceWriter> trace_writer;
+ if (s.ok()) {
+ EnvOptions soptions;
+ // Disable buffering so traces will not get stuck in application buffer.
+ soptions.writable_file_max_buffer_size = 0;
+ s = NewFileTraceWriter(Env::Default(), soptions, trace_file_path,
+ &trace_writer);
+ }
+ if (s.ok()) {
+ TraceOptions trace_opts;
+ trace_opts.filter |= kTraceFilterGet;
+ trace_opts.filter |= kTraceFilterMultiGet;
+ trace_opts.filter |= kTraceFilterIteratorSeek;
+ trace_opts.filter |= kTraceFilterIteratorSeekForPrev;
+ trace_opts.preserve_write_order = true;
+ s = db->StartTrace(trace_opts, std::move(trace_writer));
+ }
+
+ // Delete old state/trace files. Deletion order does not matter since we only
+ // delete after successfully saving new files, so old files will never be used
+ // again, even if we crash.
+ if (s.ok() && old_saved_seqno != kMaxSequenceNumber &&
+ old_saved_seqno != saved_seqno_) {
+ s = Env::Default()->DeleteFile(GetPathForFilename(
+ std::to_string(old_saved_seqno) + kStateFilenameSuffix));
+ }
+ if (s.ok() && old_saved_seqno != kMaxSequenceNumber &&
+ old_saved_seqno != saved_seqno_) {
+ s = Env::Default()->DeleteFile(GetPathForFilename(
+ std::to_string(old_saved_seqno) + kTraceFilenameSuffix));
+ }
+ return s;
+}
+#else // ROCKSDB_LITE
+Status FileExpectedStateManager::SaveAtAndAfter(DB* /* db */) {
+ return Status::NotSupported();
+}
+#endif // ROCKSDB_LITE
+
+bool FileExpectedStateManager::HasHistory() {
+ return saved_seqno_ != kMaxSequenceNumber;
+}
+
+#ifndef ROCKSDB_LITE
+
+namespace {
+
+// An `ExpectedStateTraceRecordHandler` applies a configurable number of
+// write operation trace records to the configured expected state. It is used in
+// `FileExpectedStateManager::Restore()` to sync the expected state with the
+// DB's post-recovery state.
+class ExpectedStateTraceRecordHandler : public TraceRecord::Handler,
+ public WriteBatch::Handler {
+ public:
+ ExpectedStateTraceRecordHandler(uint64_t max_write_ops, ExpectedState* state)
+ : max_write_ops_(max_write_ops),
+ state_(state),
+ buffered_writes_(nullptr) {}
+
+ ~ExpectedStateTraceRecordHandler() { assert(IsDone()); }
+
+ // True if we have already reached the limit on write operations to apply.
+ bool IsDone() { return num_write_ops_ == max_write_ops_; }
+
+ Status Handle(const WriteQueryTraceRecord& record,
+ std::unique_ptr<TraceRecordResult>* /* result */) override {
+ if (IsDone()) {
+ return Status::OK();
+ }
+ WriteBatch batch(record.GetWriteBatchRep().ToString());
+ return batch.Iterate(this);
+ }
+
+ // Ignore reads.
+ Status Handle(const GetQueryTraceRecord& /* record */,
+ std::unique_ptr<TraceRecordResult>* /* result */) override {
+ return Status::OK();
+ }
+
+ // Ignore reads.
+ Status Handle(const IteratorSeekQueryTraceRecord& /* record */,
+ std::unique_ptr<TraceRecordResult>* /* result */) override {
+ return Status::OK();
+ }
+
+ // Ignore reads.
+ Status Handle(const MultiGetQueryTraceRecord& /* record */,
+ std::unique_ptr<TraceRecordResult>* /* result */) override {
+ return Status::OK();
+ }
+
+ // Below are the WriteBatch::Handler overrides. We could use a separate
+ // object, but it's convenient and works to share state with the
+ // `TraceRecord::Handler`.
+
+ Status PutCF(uint32_t column_family_id, const Slice& key_with_ts,
+ const Slice& value) override {
+ Slice key =
+ StripTimestampFromUserKey(key_with_ts, FLAGS_user_timestamp_size);
+ uint64_t key_id;
+ if (!GetIntVal(key.ToString(), &key_id)) {
+ return Status::Corruption("unable to parse key", key.ToString());
+ }
+ uint32_t value_id = GetValueBase(value);
+
+ bool should_buffer_write = !(buffered_writes_ == nullptr);
+ if (should_buffer_write) {
+ return WriteBatchInternal::Put(buffered_writes_.get(), column_family_id,
+ key, value);
+ }
+
+ state_->Put(column_family_id, static_cast<int64_t>(key_id), value_id,
+ false /* pending */);
+ ++num_write_ops_;
+ return Status::OK();
+ }
+
+ Status PutEntityCF(uint32_t column_family_id, const Slice& key_with_ts,
+ const Slice& entity) override {
+ Slice key =
+ StripTimestampFromUserKey(key_with_ts, FLAGS_user_timestamp_size);
+
+ uint64_t key_id = 0;
+ if (!GetIntVal(key.ToString(), &key_id)) {
+ return Status::Corruption("Unable to parse key", key.ToString());
+ }
+
+ Slice entity_copy = entity;
+ WideColumns columns;
+ if (!WideColumnSerialization::Deserialize(entity_copy, columns).ok()) {
+ return Status::Corruption("Unable to deserialize entity",
+ entity.ToString(/* hex */ true));
+ }
+
+ if (columns.empty() || columns[0].name() != kDefaultWideColumnName) {
+ return Status::Corruption("Cannot find default column in entity",
+ entity.ToString(/* hex */ true));
+ }
+
+ const Slice& value_of_default = columns[0].value();
+
+ const uint32_t value_base = GetValueBase(value_of_default);
+
+ if (columns != GenerateExpectedWideColumns(value_base, value_of_default)) {
+ return Status::Corruption("Wide columns in entity inconsistent",
+ entity.ToString(/* hex */ true));
+ }
+
+ if (buffered_writes_) {
+ return WriteBatchInternal::PutEntity(buffered_writes_.get(),
+ column_family_id, key, columns);
+ }
+
+ state_->Put(column_family_id, static_cast<int64_t>(key_id), value_base,
+ false /* pending */);
+
+ ++num_write_ops_;
+
+ return Status::OK();
+ }
+
+ Status DeleteCF(uint32_t column_family_id,
+ const Slice& key_with_ts) override {
+ Slice key =
+ StripTimestampFromUserKey(key_with_ts, FLAGS_user_timestamp_size);
+ uint64_t key_id;
+ if (!GetIntVal(key.ToString(), &key_id)) {
+ return Status::Corruption("unable to parse key", key.ToString());
+ }
+
+ bool should_buffer_write = !(buffered_writes_ == nullptr);
+ if (should_buffer_write) {
+ return WriteBatchInternal::Delete(buffered_writes_.get(),
+ column_family_id, key);
+ }
+
+ state_->Delete(column_family_id, static_cast<int64_t>(key_id),
+ false /* pending */);
+ ++num_write_ops_;
+ return Status::OK();
+ }
+
+ Status SingleDeleteCF(uint32_t column_family_id,
+ const Slice& key_with_ts) override {
+ bool should_buffer_write = !(buffered_writes_ == nullptr);
+ if (should_buffer_write) {
+ Slice key =
+ StripTimestampFromUserKey(key_with_ts, FLAGS_user_timestamp_size);
+ Slice ts =
+ ExtractTimestampFromUserKey(key_with_ts, FLAGS_user_timestamp_size);
+ std::array<Slice, 2> key_with_ts_arr{{key, ts}};
+ return WriteBatchInternal::SingleDelete(
+ buffered_writes_.get(), column_family_id,
+ SliceParts(key_with_ts_arr.data(), 2));
+ }
+
+ return DeleteCF(column_family_id, key_with_ts);
+ }
+
+ Status DeleteRangeCF(uint32_t column_family_id,
+ const Slice& begin_key_with_ts,
+ const Slice& end_key_with_ts) override {
+ Slice begin_key =
+ StripTimestampFromUserKey(begin_key_with_ts, FLAGS_user_timestamp_size);
+ Slice end_key =
+ StripTimestampFromUserKey(end_key_with_ts, FLAGS_user_timestamp_size);
+ uint64_t begin_key_id, end_key_id;
+ if (!GetIntVal(begin_key.ToString(), &begin_key_id)) {
+ return Status::Corruption("unable to parse begin key",
+ begin_key.ToString());
+ }
+ if (!GetIntVal(end_key.ToString(), &end_key_id)) {
+ return Status::Corruption("unable to parse end key", end_key.ToString());
+ }
+
+ bool should_buffer_write = !(buffered_writes_ == nullptr);
+ if (should_buffer_write) {
+ return WriteBatchInternal::DeleteRange(
+ buffered_writes_.get(), column_family_id, begin_key, end_key);
+ }
+
+ state_->DeleteRange(column_family_id, static_cast<int64_t>(begin_key_id),
+ static_cast<int64_t>(end_key_id), false /* pending */);
+ ++num_write_ops_;
+ return Status::OK();
+ }
+
+ Status MergeCF(uint32_t column_family_id, const Slice& key_with_ts,
+ const Slice& value) override {
+ Slice key =
+ StripTimestampFromUserKey(key_with_ts, FLAGS_user_timestamp_size);
+
+ bool should_buffer_write = !(buffered_writes_ == nullptr);
+ if (should_buffer_write) {
+ return WriteBatchInternal::Merge(buffered_writes_.get(), column_family_id,
+ key, value);
+ }
+
+ return PutCF(column_family_id, key, value);
+ }
+
+ Status MarkBeginPrepare(bool = false) override {
+ assert(!buffered_writes_);
+ buffered_writes_.reset(new WriteBatch());
+ return Status::OK();
+ }
+
+ Status MarkEndPrepare(const Slice& xid) override {
+ assert(buffered_writes_);
+ std::string xid_str = xid.ToString();
+ assert(xid_to_buffered_writes_.find(xid_str) ==
+ xid_to_buffered_writes_.end());
+
+ xid_to_buffered_writes_[xid_str].swap(buffered_writes_);
+
+ buffered_writes_.reset();
+
+ return Status::OK();
+ }
+
+ Status MarkCommit(const Slice& xid) override {
+ std::string xid_str = xid.ToString();
+ assert(xid_to_buffered_writes_.find(xid_str) !=
+ xid_to_buffered_writes_.end());
+ assert(xid_to_buffered_writes_.at(xid_str));
+
+ Status s = xid_to_buffered_writes_.at(xid_str)->Iterate(this);
+ xid_to_buffered_writes_.erase(xid_str);
+
+ return s;
+ }
+
+ Status MarkRollback(const Slice& xid) override {
+ std::string xid_str = xid.ToString();
+ assert(xid_to_buffered_writes_.find(xid_str) !=
+ xid_to_buffered_writes_.end());
+ assert(xid_to_buffered_writes_.at(xid_str));
+ xid_to_buffered_writes_.erase(xid_str);
+
+ return Status::OK();
+ }
+
+ private:
+ uint64_t num_write_ops_ = 0;
+ uint64_t max_write_ops_;
+ ExpectedState* state_;
+ std::unordered_map<std::string, std::unique_ptr<WriteBatch>>
+ xid_to_buffered_writes_;
+ std::unique_ptr<WriteBatch> buffered_writes_;
+};
+
+} // anonymous namespace
+
+Status FileExpectedStateManager::Restore(DB* db) {
+ assert(HasHistory());
+ SequenceNumber seqno = db->GetLatestSequenceNumber();
+ if (seqno < saved_seqno_) {
+ return Status::Corruption("DB is older than any restorable expected state");
+ }
+
+ std::string state_filename =
+ std::to_string(saved_seqno_) + kStateFilenameSuffix;
+ std::string state_file_path = GetPathForFilename(state_filename);
+
+ std::string latest_file_temp_path =
+ GetTempPathForFilename(kLatestBasename + kStateFilenameSuffix);
+ std::string latest_file_path =
+ GetPathForFilename(kLatestBasename + kStateFilenameSuffix);
+
+ std::string trace_filename =
+ std::to_string(saved_seqno_) + kTraceFilenameSuffix;
+ std::string trace_file_path = GetPathForFilename(trace_filename);
+
+ std::unique_ptr<TraceReader> trace_reader;
+ Status s = NewFileTraceReader(Env::Default(), EnvOptions(), trace_file_path,
+ &trace_reader);
+
+ if (s.ok()) {
+ // We are going to replay on top of "`seqno`.state" to create a new
+ // "LATEST.state". Start off by creating a tempfile so we can later make the
+ // new "LATEST.state" appear atomically using `RenameFile()`.
+ s = CopyFile(FileSystem::Default(), state_file_path, latest_file_temp_path,
+ 0 /* size */, false /* use_fsync */, nullptr /* io_tracer */,
+ Temperature::kUnknown);
+ }
+
+ {
+ std::unique_ptr<Replayer> replayer;
+ std::unique_ptr<ExpectedState> state;
+ std::unique_ptr<ExpectedStateTraceRecordHandler> handler;
+ if (s.ok()) {
+ state.reset(new FileExpectedState(latest_file_temp_path, max_key_,
+ num_column_families_));
+ s = state->Open(false /* create */);
+ }
+ if (s.ok()) {
+ handler.reset(new ExpectedStateTraceRecordHandler(seqno - saved_seqno_,
+ state.get()));
+ // TODO(ajkr): An API limitation requires we provide `handles` although
+ // they will be unused since we only use the replayer for reading records.
+ // Just give a default CFH for now to satisfy the requirement.
+ s = db->NewDefaultReplayer({db->DefaultColumnFamily()} /* handles */,
+ std::move(trace_reader), &replayer);
+ }
+
+ if (s.ok()) {
+ s = replayer->Prepare();
+ }
+ for (;;) {
+ std::unique_ptr<TraceRecord> record;
+ s = replayer->Next(&record);
+ if (!s.ok()) {
+ break;
+ }
+ std::unique_ptr<TraceRecordResult> res;
+ record->Accept(handler.get(), &res);
+ }
+ if (s.IsCorruption() && handler->IsDone()) {
+ // There could be a corruption reading the tail record of the trace due to
+ // `db_stress` crashing while writing it. It shouldn't matter as long as
+ // we already found all the write ops we need to catch up the expected
+ // state.
+ s = Status::OK();
+ }
+ if (s.IsIncomplete()) {
+ // OK because `Status::Incomplete` is expected upon finishing all the
+ // trace records.
+ s = Status::OK();
+ }
+ }
+
+ if (s.ok()) {
+ s = FileSystem::Default()->RenameFile(latest_file_temp_path,
+ latest_file_path, IOOptions(),
+ nullptr /* dbg */);
+ }
+ if (s.ok()) {
+ latest_.reset(new FileExpectedState(latest_file_path, max_key_,
+ num_column_families_));
+ s = latest_->Open(false /* create */);
+ }
+
+ // Delete old state/trace files. We must delete the state file first.
+ // Otherwise, a crash-recovery immediately after deleting the trace file could
+ // lead to `Restore()` unable to replay to `seqno`.
+ if (s.ok()) {
+ s = Env::Default()->DeleteFile(state_file_path);
+ }
+ if (s.ok()) {
+ saved_seqno_ = kMaxSequenceNumber;
+ s = Env::Default()->DeleteFile(trace_file_path);
+ }
+ return s;
+}
+#else // ROCKSDB_LITE
+Status FileExpectedStateManager::Restore(DB* /* db */) {
+ return Status::NotSupported();
+}
+#endif // ROCKSDB_LITE
+
+Status FileExpectedStateManager::Clean() {
+ std::vector<std::string> expected_state_dir_children;
+ Status s = Env::Default()->GetChildren(expected_state_dir_path_,
+ &expected_state_dir_children);
+ // An incomplete `Open()` or incomplete `SaveAtAndAfter()` could have left
+ // behind invalid temporary files. An incomplete `SaveAtAndAfter()` could have
+ // also left behind stale state/trace files. An incomplete `Restore()` could
+ // have left behind stale trace files.
+ for (size_t i = 0; s.ok() && i < expected_state_dir_children.size(); ++i) {
+ const auto& filename = expected_state_dir_children[i];
+ if (filename.rfind(kTempFilenamePrefix, 0 /* pos */) == 0 &&
+ filename.size() >= kTempFilenameSuffix.size() &&
+ filename.rfind(kTempFilenameSuffix) ==
+ filename.size() - kTempFilenameSuffix.size()) {
+ // Delete all temp files.
+ s = Env::Default()->DeleteFile(GetPathForFilename(filename));
+ } else if (filename.size() >= kStateFilenameSuffix.size() &&
+ filename.rfind(kStateFilenameSuffix) ==
+ filename.size() - kStateFilenameSuffix.size() &&
+ filename.rfind(kLatestBasename, 0) == std::string::npos &&
+ ParseUint64(filename.substr(
+ 0, filename.size() - kStateFilenameSuffix.size())) <
+ saved_seqno_) {
+ assert(saved_seqno_ != kMaxSequenceNumber);
+ // Delete stale state files.
+ s = Env::Default()->DeleteFile(GetPathForFilename(filename));
+ } else if (filename.size() >= kTraceFilenameSuffix.size() &&
+ filename.rfind(kTraceFilenameSuffix) ==
+ filename.size() - kTraceFilenameSuffix.size() &&
+ ParseUint64(filename.substr(
+ 0, filename.size() - kTraceFilenameSuffix.size())) <
+ saved_seqno_) {
+ // Delete stale trace files.
+ s = Env::Default()->DeleteFile(GetPathForFilename(filename));
+ }
+ }
+ return s;
+}
+
+std::string FileExpectedStateManager::GetTempPathForFilename(
+ const std::string& filename) {
+ assert(!expected_state_dir_path_.empty());
+ std::string expected_state_dir_path_slash =
+ expected_state_dir_path_.back() == '/' ? expected_state_dir_path_
+ : expected_state_dir_path_ + "/";
+ return expected_state_dir_path_slash + kTempFilenamePrefix + filename +
+ kTempFilenameSuffix;
+}
+
+std::string FileExpectedStateManager::GetPathForFilename(
+ const std::string& filename) {
+ assert(!expected_state_dir_path_.empty());
+ std::string expected_state_dir_path_slash =
+ expected_state_dir_path_.back() == '/' ? expected_state_dir_path_
+ : expected_state_dir_path_ + "/";
+ return expected_state_dir_path_slash + filename;
+}
+
+AnonExpectedStateManager::AnonExpectedStateManager(size_t max_key,
+ size_t num_column_families)
+ : ExpectedStateManager(max_key, num_column_families) {}
+
+Status AnonExpectedStateManager::Open() {
+ latest_.reset(new AnonExpectedState(max_key_, num_column_families_));
+ return latest_->Open(true /* create */);
+}
+
+} // namespace ROCKSDB_NAMESPACE
+
+#endif // GFLAGS
diff --git a/src/rocksdb/db_stress_tool/expected_state.h b/src/rocksdb/db_stress_tool/expected_state.h
new file mode 100644
index 000000000..41d747e76
--- /dev/null
+++ b/src/rocksdb/db_stress_tool/expected_state.h
@@ -0,0 +1,287 @@
+// Copyright (c) 2021-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+#ifdef GFLAGS
+
+#pragma once
+
+#include <stdint.h>
+
+#include <atomic>
+#include <memory>
+
+#include "db/dbformat.h"
+#include "file/file_util.h"
+#include "rocksdb/db.h"
+#include "rocksdb/env.h"
+#include "rocksdb/file_system.h"
+#include "rocksdb/rocksdb_namespace.h"
+#include "rocksdb/types.h"
+#include "util/string_util.h"
+
+namespace ROCKSDB_NAMESPACE {
+
+// An `ExpectedState` provides read/write access to expected values for every
+// key.
+class ExpectedState {
+ public:
+ explicit ExpectedState(size_t max_key, size_t num_column_families);
+
+ virtual ~ExpectedState() {}
+
+ // Requires external locking preventing concurrent execution with any other
+ // member function.
+ virtual Status Open(bool create) = 0;
+
+ // Requires external locking covering all keys in `cf`.
+ void ClearColumnFamily(int cf);
+
+ // @param pending True if the update may have started but is not yet
+ // guaranteed finished. This is useful for crash-recovery testing when the
+ // process may crash before updating the expected values array.
+ //
+ // Requires external locking covering `key` in `cf`.
+ void Put(int cf, int64_t key, uint32_t value_base, bool pending);
+
+ // Requires external locking covering `key` in `cf`.
+ uint32_t Get(int cf, int64_t key) const;
+
+ // @param pending See comment above Put()
+ // Returns true if the key was not yet deleted.
+ //
+ // Requires external locking covering `key` in `cf`.
+ bool Delete(int cf, int64_t key, bool pending);
+
+ // @param pending See comment above Put()
+ // Returns true if the key was not yet deleted.
+ //
+ // Requires external locking covering `key` in `cf`.
+ bool SingleDelete(int cf, int64_t key, bool pending);
+
+ // @param pending See comment above Put()
+ // Returns number of keys deleted by the call.
+ //
+ // Requires external locking covering keys in `[begin_key, end_key)` in `cf`.
+ int DeleteRange(int cf, int64_t begin_key, int64_t end_key, bool pending);
+
+ // Requires external locking covering `key` in `cf`.
+ bool Exists(int cf, int64_t key);
+
+ private:
+ // Requires external locking covering `key` in `cf`.
+ std::atomic<uint32_t>& Value(int cf, int64_t key) const {
+ return values_[cf * max_key_ + key];
+ }
+
+ const size_t max_key_;
+ const size_t num_column_families_;
+
+ protected:
+ size_t GetValuesLen() const {
+ return sizeof(std::atomic<uint32_t>) * num_column_families_ * max_key_;
+ }
+
+ // Requires external locking preventing concurrent execution with any other
+ // member function.
+ void Reset();
+
+ std::atomic<uint32_t>* values_;
+};
+
+// A `FileExpectedState` implements `ExpectedState` backed by a file.
+class FileExpectedState : public ExpectedState {
+ public:
+ explicit FileExpectedState(std::string expected_state_file_path,
+ size_t max_key, size_t num_column_families);
+
+ // Requires external locking preventing concurrent execution with any other
+ // member function.
+ Status Open(bool create) override;
+
+ private:
+ const std::string expected_state_file_path_;
+ std::unique_ptr<MemoryMappedFileBuffer> expected_state_mmap_buffer_;
+};
+
+// An `AnonExpectedState` implements `ExpectedState` backed by a memory
+// allocation.
+class AnonExpectedState : public ExpectedState {
+ public:
+ explicit AnonExpectedState(size_t max_key, size_t num_column_families);
+
+ // Requires external locking preventing concurrent execution with any other
+ // member function.
+ Status Open(bool create) override;
+
+ private:
+ std::unique_ptr<std::atomic<uint32_t>[]> values_allocation_;
+};
+
+// An `ExpectedStateManager` manages data about the expected state of the
+// database. It exposes operations for reading and modifying the latest
+// expected state.
+class ExpectedStateManager {
+ public:
+ explicit ExpectedStateManager(size_t max_key, size_t num_column_families);
+
+ virtual ~ExpectedStateManager();
+
+ // Requires external locking preventing concurrent execution with any other
+ // member function.
+ virtual Status Open() = 0;
+
+ // Saves expected values for the current state of `db` and begins tracking
+ // changes. Following a successful `SaveAtAndAfter()`, `Restore()` can be
+ // called on the same DB, as long as its state does not roll back to before
+ // its current state.
+ //
+ // Requires external locking preventing concurrent execution with any other
+ // member function. Furthermore, `db` must not be mutated while this function
+ // is executing.
+ virtual Status SaveAtAndAfter(DB* db) = 0;
+
+ // Returns true if at least one state of historical expected values can be
+ // restored.
+ //
+ // Requires external locking preventing concurrent execution with any other
+ // member function.
+ virtual bool HasHistory() = 0;
+
+ // Restores expected values according to the current state of `db`. See
+ // `SaveAtAndAfter()` for conditions where this can be called.
+ //
+ // Requires external locking preventing concurrent execution with any other
+ // member function. Furthermore, `db` must not be mutated while this function
+ // is executing.
+ virtual Status Restore(DB* db) = 0;
+
+ // Requires external locking covering all keys in `cf`.
+ void ClearColumnFamily(int cf) { return latest_->ClearColumnFamily(cf); }
+
+ // @param pending True if the update may have started but is not yet
+ // guaranteed finished. This is useful for crash-recovery testing when the
+ // process may crash before updating the expected values array.
+ //
+ // Requires external locking covering `key` in `cf`.
+ void Put(int cf, int64_t key, uint32_t value_base, bool pending) {
+ return latest_->Put(cf, key, value_base, pending);
+ }
+
+ // Requires external locking covering `key` in `cf`.
+ uint32_t Get(int cf, int64_t key) const { return latest_->Get(cf, key); }
+
+ // @param pending See comment above Put()
+ // Returns true if the key was not yet deleted.
+ //
+ // Requires external locking covering `key` in `cf`.
+ bool Delete(int cf, int64_t key, bool pending) {
+ return latest_->Delete(cf, key, pending);
+ }
+
+ // @param pending See comment above Put()
+ // Returns true if the key was not yet deleted.
+ //
+ // Requires external locking covering `key` in `cf`.
+ bool SingleDelete(int cf, int64_t key, bool pending) {
+ return latest_->SingleDelete(cf, key, pending);
+ }
+
+ // @param pending See comment above Put()
+ // Returns number of keys deleted by the call.
+ //
+ // Requires external locking covering keys in `[begin_key, end_key)` in `cf`.
+ int DeleteRange(int cf, int64_t begin_key, int64_t end_key, bool pending) {
+ return latest_->DeleteRange(cf, begin_key, end_key, pending);
+ }
+
+ // Requires external locking covering `key` in `cf`.
+ bool Exists(int cf, int64_t key) { return latest_->Exists(cf, key); }
+
+ protected:
+ const size_t max_key_;
+ const size_t num_column_families_;
+ std::unique_ptr<ExpectedState> latest_;
+};
+
+// A `FileExpectedStateManager` implements an `ExpectedStateManager` backed by
+// a directory of files containing data about the expected state of the
+// database.
+class FileExpectedStateManager : public ExpectedStateManager {
+ public:
+ explicit FileExpectedStateManager(size_t max_key, size_t num_column_families,
+ std::string expected_state_dir_path);
+
+ // Requires external locking preventing concurrent execution with any other
+ // member function.
+ Status Open() override;
+
+ // See `ExpectedStateManager::SaveAtAndAfter()` API doc.
+ //
+ // This implementation makes a copy of "LATEST.state" into
+ // "<current seqno>.state", and starts a trace in "<current seqno>.trace".
+ // Due to using external files, a following `Restore()` can happen even
+ // from a different process.
+ Status SaveAtAndAfter(DB* db) override;
+
+ // See `ExpectedStateManager::HasHistory()` API doc.
+ bool HasHistory() override;
+
+ // See `ExpectedStateManager::Restore()` API doc.
+ //
+ // Say `db->GetLatestSequenceNumber()` was `a` last time `SaveAtAndAfter()`
+ // was called and now it is `b`. Then this function replays `b - a` write
+ // operations from "`a`.trace" onto "`a`.state", and then copies the resulting
+ // file into "LATEST.state".
+ Status Restore(DB* db) override;
+
+ private:
+ // Requires external locking preventing concurrent execution with any other
+ // member function.
+ Status Clean();
+
+ std::string GetTempPathForFilename(const std::string& filename);
+ std::string GetPathForFilename(const std::string& filename);
+
+ static const std::string kLatestBasename;
+ static const std::string kStateFilenameSuffix;
+ static const std::string kTraceFilenameSuffix;
+ static const std::string kTempFilenamePrefix;
+ static const std::string kTempFilenameSuffix;
+
+ const std::string expected_state_dir_path_;
+ SequenceNumber saved_seqno_ = kMaxSequenceNumber;
+};
+
+// An `AnonExpectedStateManager` implements an `ExpectedStateManager` backed by
+// a memory allocation containing data about the expected state of the database.
+class AnonExpectedStateManager : public ExpectedStateManager {
+ public:
+ explicit AnonExpectedStateManager(size_t max_key, size_t num_column_families);
+
+ // See `ExpectedStateManager::SaveAtAndAfter()` API doc.
+ //
+ // This implementation returns `Status::NotSupported` since we do not
+ // currently have a need to keep history of expected state within a process.
+ Status SaveAtAndAfter(DB* /* db */) override {
+ return Status::NotSupported();
+ }
+
+ // See `ExpectedStateManager::HasHistory()` API doc.
+ bool HasHistory() override { return false; }
+
+ // See `ExpectedStateManager::Restore()` API doc.
+ //
+ // This implementation returns `Status::NotSupported` since we do not
+ // currently have a need to keep history of expected state within a process.
+ Status Restore(DB* /* db */) override { return Status::NotSupported(); }
+
+ // Requires external locking preventing concurrent execution with any other
+ // member function.
+ Status Open() override;
+};
+
+} // namespace ROCKSDB_NAMESPACE
+
+#endif // GFLAGS
diff --git a/src/rocksdb/db_stress_tool/multi_ops_txns_stress.cc b/src/rocksdb/db_stress_tool/multi_ops_txns_stress.cc
new file mode 100644
index 000000000..7db5e8942
--- /dev/null
+++ b/src/rocksdb/db_stress_tool/multi_ops_txns_stress.cc
@@ -0,0 +1,1808 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+//
+// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#ifdef GFLAGS
+#include "db_stress_tool/multi_ops_txns_stress.h"
+
+#include "rocksdb/utilities/write_batch_with_index.h"
+#include "util/defer.h"
+#include "utilities/fault_injection_fs.h"
+#include "utilities/transactions/write_prepared_txn_db.h"
+
+namespace ROCKSDB_NAMESPACE {
+
+// The description of A and C can be found in multi_ops_txns_stress.h
+DEFINE_int32(lb_a, 0, "(Inclusive) lower bound of A");
+DEFINE_int32(ub_a, 1000, "(Exclusive) upper bound of A");
+DEFINE_int32(lb_c, 0, "(Inclusive) lower bound of C");
+DEFINE_int32(ub_c, 1000, "(Exclusive) upper bound of C");
+
+DEFINE_string(key_spaces_path, "",
+ "Path to file describing the lower and upper bounds of A and C");
+
+DEFINE_int32(delay_snapshot_read_one_in, 0,
+ "With a chance of 1/N, inject a random delay between taking "
+ "snapshot and read.");
+
+DEFINE_int32(rollback_one_in, 0,
+ "If non-zero, rollback non-read-only transactions with a "
+ "probability of 1/N.");
+
+DEFINE_int32(clear_wp_commit_cache_one_in, 0,
+ "If non-zero, evict all commit entries from commit cache with a "
+ "probability of 1/N. This options applies to write-prepared and "
+ "write-unprepared transactions.");
+
+extern "C" bool rocksdb_write_prepared_TEST_ShouldClearCommitCache(void) {
+ static Random rand(static_cast<uint32_t>(db_stress_env->NowMicros()));
+ return FLAGS_clear_wp_commit_cache_one_in > 0 &&
+ rand.OneIn(FLAGS_clear_wp_commit_cache_one_in);
+}
+
+// MultiOpsTxnsStressTest can either operate on a database with pre-populated
+// data (possibly from previous ones), or create a new db and preload it with
+// data specified via `-lb_a`, `-ub_a`, `-lb_c`, `-ub_c`, etc. Among these, we
+// define the test key spaces as two key ranges: [lb_a, ub_a) and [lb_c, ub_c).
+// The key spaces specification is persisted in a file whose absolute path can
+// be specified via `-key_spaces_path`.
+//
+// Whether an existing db is used or a new one is created, key_spaces_path will
+// be used. In the former case, the test reads the key spaces specification
+// from `-key_spaces_path` and decodes [lb_a, ub_a) and [lb_c, ub_c). In the
+// latter case, the test writes a key spaces specification to a file at the
+// location, and this file will be used by future runs until a new db is
+// created.
+//
+// Create a fresh new database (-destroy_db_initially=1 or there is no database
+// in the location specified by -db). See PreloadDb().
+//
+// Use an existing, non-empty database. See ScanExistingDb().
+//
+// This test is multi-threaded, and thread count can be specified via
+// `-threads`. For simplicity, we partition the key ranges and each thread
+// operates on a subrange independently.
+// Within each subrange, a KeyGenerator object is responsible for key
+// generation. A KeyGenerator maintains two sets: set of existing keys within
+// [low, high), set of non-existing keys within [low, high). [low, high) is the
+// subrange. The test initialization makes sure there is at least one
+// non-existing key, otherwise the test will return an error and exit before
+// any test thread is spawned.
+
+void MultiOpsTxnsStressTest::KeyGenerator::FinishInit() {
+ assert(existing_.empty());
+ assert(!existing_uniq_.empty());
+ assert(low_ < high_);
+ for (auto v : existing_uniq_) {
+ assert(low_ <= v);
+ assert(high_ > v);
+ existing_.push_back(v);
+ }
+ if (non_existing_uniq_.empty()) {
+ fprintf(
+ stderr,
+ "Cannot allocate key in [%u, %u)\nStart with a new DB or try change "
+ "the number of threads for testing via -threads=<#threads>\n",
+ static_cast<unsigned int>(low_), static_cast<unsigned int>(high_));
+ fflush(stdout);
+ fflush(stderr);
+ assert(false);
+ }
+ initialized_ = true;
+}
+
+std::pair<uint32_t, uint32_t>
+MultiOpsTxnsStressTest::KeyGenerator::ChooseExisting() {
+ assert(initialized_);
+ const size_t N = existing_.size();
+ assert(N > 0);
+ uint32_t rnd = rand_.Uniform(static_cast<int>(N));
+ assert(rnd < N);
+ return std::make_pair(existing_[rnd], rnd);
+}
+
+uint32_t MultiOpsTxnsStressTest::KeyGenerator::Allocate() {
+ assert(initialized_);
+ auto it = non_existing_uniq_.begin();
+ assert(non_existing_uniq_.end() != it);
+ uint32_t ret = *it;
+ // Remove this element from non_existing_.
+ // Need to call UndoAllocation() if the calling transaction does not commit.
+ non_existing_uniq_.erase(it);
+ return ret;
+}
+
+void MultiOpsTxnsStressTest::KeyGenerator::Replace(uint32_t old_val,
+ uint32_t old_pos,
+ uint32_t new_val) {
+ assert(initialized_);
+ {
+ auto it = existing_uniq_.find(old_val);
+ assert(it != existing_uniq_.end());
+ existing_uniq_.erase(it);
+ }
+
+ {
+ assert(0 == existing_uniq_.count(new_val));
+ existing_uniq_.insert(new_val);
+ existing_[old_pos] = new_val;
+ }
+
+ {
+ assert(0 == non_existing_uniq_.count(old_val));
+ non_existing_uniq_.insert(old_val);
+ }
+}
+
+void MultiOpsTxnsStressTest::KeyGenerator::UndoAllocation(uint32_t new_val) {
+ assert(initialized_);
+ assert(0 == non_existing_uniq_.count(new_val));
+ non_existing_uniq_.insert(new_val);
+}
+
+std::string MultiOpsTxnsStressTest::Record::EncodePrimaryKey(uint32_t a) {
+ std::string ret;
+ PutFixed32(&ret, kPrimaryIndexId);
+ PutFixed32(&ret, a);
+
+ char* const buf = &ret[0];
+ std::reverse(buf, buf + sizeof(kPrimaryIndexId));
+ std::reverse(buf + sizeof(kPrimaryIndexId),
+ buf + sizeof(kPrimaryIndexId) + sizeof(a));
+ return ret;
+}
+
+std::string MultiOpsTxnsStressTest::Record::EncodeSecondaryKey(uint32_t c) {
+ std::string ret;
+ PutFixed32(&ret, kSecondaryIndexId);
+ PutFixed32(&ret, c);
+
+ char* const buf = &ret[0];
+ std::reverse(buf, buf + sizeof(kSecondaryIndexId));
+ std::reverse(buf + sizeof(kSecondaryIndexId),
+ buf + sizeof(kSecondaryIndexId) + sizeof(c));
+ return ret;
+}
+
+std::string MultiOpsTxnsStressTest::Record::EncodeSecondaryKey(uint32_t c,
+ uint32_t a) {
+ std::string ret;
+ PutFixed32(&ret, kSecondaryIndexId);
+ PutFixed32(&ret, c);
+ PutFixed32(&ret, a);
+
+ char* const buf = &ret[0];
+ std::reverse(buf, buf + sizeof(kSecondaryIndexId));
+ std::reverse(buf + sizeof(kSecondaryIndexId),
+ buf + sizeof(kSecondaryIndexId) + sizeof(c));
+ std::reverse(buf + sizeof(kSecondaryIndexId) + sizeof(c),
+ buf + sizeof(kSecondaryIndexId) + sizeof(c) + sizeof(a));
+ return ret;
+}
+
+std::tuple<Status, uint32_t, uint32_t>
+MultiOpsTxnsStressTest::Record::DecodePrimaryIndexValue(
+ Slice primary_index_value) {
+ if (primary_index_value.size() != 8) {
+ return std::tuple<Status, uint32_t, uint32_t>{Status::Corruption(""), 0, 0};
+ }
+ uint32_t b = 0;
+ uint32_t c = 0;
+ if (!GetFixed32(&primary_index_value, &b) ||
+ !GetFixed32(&primary_index_value, &c)) {
+ assert(false);
+ return std::tuple<Status, uint32_t, uint32_t>{Status::Corruption(""), 0, 0};
+ }
+ return std::tuple<Status, uint32_t, uint32_t>{Status::OK(), b, c};
+}
+
+std::pair<Status, uint32_t>
+MultiOpsTxnsStressTest::Record::DecodeSecondaryIndexValue(
+ Slice secondary_index_value) {
+ if (secondary_index_value.size() != 4) {
+ return std::make_pair(Status::Corruption(""), 0);
+ }
+ uint32_t crc = 0;
+ bool result __attribute__((unused)) =
+ GetFixed32(&secondary_index_value, &crc);
+ assert(result);
+ return std::make_pair(Status::OK(), crc);
+}
+
+std::pair<std::string, std::string>
+MultiOpsTxnsStressTest::Record::EncodePrimaryIndexEntry() const {
+ std::string primary_index_key = EncodePrimaryKey();
+ std::string primary_index_value = EncodePrimaryIndexValue();
+ return std::make_pair(primary_index_key, primary_index_value);
+}
+
+std::string MultiOpsTxnsStressTest::Record::EncodePrimaryKey() const {
+ return EncodePrimaryKey(a_);
+}
+
+std::string MultiOpsTxnsStressTest::Record::EncodePrimaryIndexValue() const {
+ std::string ret;
+ PutFixed32(&ret, b_);
+ PutFixed32(&ret, c_);
+ return ret;
+}
+
+std::pair<std::string, std::string>
+MultiOpsTxnsStressTest::Record::EncodeSecondaryIndexEntry() const {
+ std::string secondary_index_key = EncodeSecondaryKey(c_, a_);
+
+ // Secondary index value is always 4-byte crc32 of the secondary key
+ std::string secondary_index_value;
+ uint32_t crc =
+ crc32c::Value(secondary_index_key.data(), secondary_index_key.size());
+ PutFixed32(&secondary_index_value, crc);
+ return std::make_pair(std::move(secondary_index_key), secondary_index_value);
+}
+
+std::string MultiOpsTxnsStressTest::Record::EncodeSecondaryKey() const {
+ return EncodeSecondaryKey(c_, a_);
+}
+
+Status MultiOpsTxnsStressTest::Record::DecodePrimaryIndexEntry(
+ Slice primary_index_key, Slice primary_index_value) {
+ if (primary_index_key.size() != 8) {
+ assert(false);
+ return Status::Corruption("Primary index key length is not 8");
+ }
+
+ uint32_t index_id = 0;
+
+ [[maybe_unused]] bool res = GetFixed32(&primary_index_key, &index_id);
+ assert(res);
+ index_id = EndianSwapValue(index_id);
+
+ if (index_id != kPrimaryIndexId) {
+ std::ostringstream oss;
+ oss << "Unexpected primary index id: " << index_id;
+ return Status::Corruption(oss.str());
+ }
+
+ res = GetFixed32(&primary_index_key, &a_);
+ assert(res);
+ a_ = EndianSwapValue(a_);
+ assert(primary_index_key.empty());
+
+ if (primary_index_value.size() != 8) {
+ return Status::Corruption("Primary index value length is not 8");
+ }
+ GetFixed32(&primary_index_value, &b_);
+ GetFixed32(&primary_index_value, &c_);
+ return Status::OK();
+}
+
+Status MultiOpsTxnsStressTest::Record::DecodeSecondaryIndexEntry(
+ Slice secondary_index_key, Slice secondary_index_value) {
+ if (secondary_index_key.size() != 12) {
+ return Status::Corruption("Secondary index key length is not 12");
+ }
+ uint32_t crc =
+ crc32c::Value(secondary_index_key.data(), secondary_index_key.size());
+
+ uint32_t index_id = 0;
+
+ [[maybe_unused]] bool res = GetFixed32(&secondary_index_key, &index_id);
+ assert(res);
+ index_id = EndianSwapValue(index_id);
+
+ if (index_id != kSecondaryIndexId) {
+ std::ostringstream oss;
+ oss << "Unexpected secondary index id: " << index_id;
+ return Status::Corruption(oss.str());
+ }
+
+ assert(secondary_index_key.size() == 8);
+ res = GetFixed32(&secondary_index_key, &c_);
+ assert(res);
+ c_ = EndianSwapValue(c_);
+
+ assert(secondary_index_key.size() == 4);
+ res = GetFixed32(&secondary_index_key, &a_);
+ assert(res);
+ a_ = EndianSwapValue(a_);
+ assert(secondary_index_key.empty());
+
+ if (secondary_index_value.size() != 4) {
+ return Status::Corruption("Secondary index value length is not 4");
+ }
+ uint32_t val = 0;
+ GetFixed32(&secondary_index_value, &val);
+ if (val != crc) {
+ std::ostringstream oss;
+ oss << "Secondary index key checksum mismatch, stored: " << val
+ << ", recomputed: " << crc;
+ return Status::Corruption(oss.str());
+ }
+ return Status::OK();
+}
+
+void MultiOpsTxnsStressTest::FinishInitDb(SharedState* shared) {
+ if (FLAGS_enable_compaction_filter) {
+ // TODO (yanqin) enable compaction filter
+ }
+#ifndef ROCKSDB_LITE
+ ProcessRecoveredPreparedTxns(shared);
+#endif
+
+ ReopenAndPreloadDbIfNeeded(shared);
+ // TODO (yanqin) parallelize if key space is large
+ for (auto& key_gen : key_gen_for_a_) {
+ assert(key_gen);
+ key_gen->FinishInit();
+ }
+ // TODO (yanqin) parallelize if key space is large
+ for (auto& key_gen : key_gen_for_c_) {
+ assert(key_gen);
+ key_gen->FinishInit();
+ }
+}
+
+void MultiOpsTxnsStressTest::ReopenAndPreloadDbIfNeeded(SharedState* shared) {
+ (void)shared;
+#ifndef ROCKSDB_LITE
+ bool db_empty = false;
+ {
+ std::unique_ptr<Iterator> iter(db_->NewIterator(ReadOptions()));
+ iter->SeekToFirst();
+ if (!iter->Valid()) {
+ db_empty = true;
+ }
+ }
+
+ if (db_empty) {
+ PreloadDb(shared, FLAGS_threads, FLAGS_lb_a, FLAGS_ub_a, FLAGS_lb_c,
+ FLAGS_ub_c);
+ } else {
+ fprintf(stdout,
+ "Key ranges will be read from %s.\n-lb_a, -ub_a, -lb_c, -ub_c will "
+ "be ignored\n",
+ FLAGS_key_spaces_path.c_str());
+ fflush(stdout);
+ ScanExistingDb(shared, FLAGS_threads);
+ }
+#endif // !ROCKSDB_LITE
+}
+
+// Used for point-lookup transaction
+Status MultiOpsTxnsStressTest::TestGet(
+ ThreadState* thread, const ReadOptions& read_opts,
+ const std::vector<int>& /*rand_column_families*/,
+ const std::vector<int64_t>& /*rand_keys*/) {
+ uint32_t a = 0;
+ uint32_t pos = 0;
+ std::tie(a, pos) = ChooseExistingA(thread);
+ return PointLookupTxn(thread, read_opts, a);
+}
+
+// Not used.
+std::vector<Status> MultiOpsTxnsStressTest::TestMultiGet(
+ ThreadState* /*thread*/, const ReadOptions& /*read_opts*/,
+ const std::vector<int>& /*rand_column_families*/,
+ const std::vector<int64_t>& /*rand_keys*/) {
+ return std::vector<Status>{Status::NotSupported()};
+}
+
+Status MultiOpsTxnsStressTest::TestPrefixScan(
+ ThreadState* thread, const ReadOptions& read_opts,
+ const std::vector<int>& rand_column_families,
+ const std::vector<int64_t>& rand_keys) {
+ (void)thread;
+ (void)read_opts;
+ (void)rand_column_families;
+ (void)rand_keys;
+ return Status::OK();
+}
+
+// Given a key K, this creates an iterator which scans to K and then
+// does a random sequence of Next/Prev operations.
+Status MultiOpsTxnsStressTest::TestIterate(
+ ThreadState* thread, const ReadOptions& read_opts,
+ const std::vector<int>& /*rand_column_families*/,
+ const std::vector<int64_t>& /*rand_keys*/) {
+ uint32_t c = 0;
+ uint32_t pos = 0;
+ std::tie(c, pos) = ChooseExistingC(thread);
+ return RangeScanTxn(thread, read_opts, c);
+}
+
+// Not intended for use.
+Status MultiOpsTxnsStressTest::TestPut(ThreadState* /*thread*/,
+ WriteOptions& /*write_opts*/,
+ const ReadOptions& /*read_opts*/,
+ const std::vector<int>& /*cf_ids*/,
+ const std::vector<int64_t>& /*keys*/,
+ char (&value)[100]) {
+ (void)value;
+ return Status::NotSupported();
+}
+
+// Not intended for use.
+Status MultiOpsTxnsStressTest::TestDelete(
+ ThreadState* /*thread*/, WriteOptions& /*write_opts*/,
+ const std::vector<int>& /*rand_column_families*/,
+ const std::vector<int64_t>& /*rand_keys*/) {
+ return Status::NotSupported();
+}
+
+// Not intended for use.
+Status MultiOpsTxnsStressTest::TestDeleteRange(
+ ThreadState* /*thread*/, WriteOptions& /*write_opts*/,
+ const std::vector<int>& /*rand_column_families*/,
+ const std::vector<int64_t>& /*rand_keys*/) {
+ return Status::NotSupported();
+}
+
+void MultiOpsTxnsStressTest::TestIngestExternalFile(
+ ThreadState* thread, const std::vector<int>& rand_column_families,
+ const std::vector<int64_t>& /*rand_keys*/) {
+ // TODO (yanqin)
+ (void)thread;
+ (void)rand_column_families;
+}
+
+void MultiOpsTxnsStressTest::TestCompactRange(
+ ThreadState* thread, int64_t /*rand_key*/, const Slice& /*start_key*/,
+ ColumnFamilyHandle* column_family) {
+ // TODO (yanqin).
+ // May use GetRangeHash() for validation before and after DB::CompactRange()
+ // completes.
+ (void)thread;
+ (void)column_family;
+}
+
+Status MultiOpsTxnsStressTest::TestBackupRestore(
+ ThreadState* thread, const std::vector<int>& rand_column_families,
+ const std::vector<int64_t>& /*rand_keys*/) {
+ // TODO (yanqin)
+ (void)thread;
+ (void)rand_column_families;
+ return Status::OK();
+}
+
+Status MultiOpsTxnsStressTest::TestCheckpoint(
+ ThreadState* thread, const std::vector<int>& rand_column_families,
+ const std::vector<int64_t>& /*rand_keys*/) {
+ // TODO (yanqin)
+ (void)thread;
+ (void)rand_column_families;
+ return Status::OK();
+}
+
+#ifndef ROCKSDB_LITE
+Status MultiOpsTxnsStressTest::TestApproximateSize(
+ ThreadState* thread, uint64_t iteration,
+ const std::vector<int>& rand_column_families,
+ const std::vector<int64_t>& /*rand_keys*/) {
+ // TODO (yanqin)
+ (void)thread;
+ (void)iteration;
+ (void)rand_column_families;
+ return Status::OK();
+}
+#endif // !ROCKSDB_LITE
+
+Status MultiOpsTxnsStressTest::TestCustomOperations(
+ ThreadState* thread, const std::vector<int>& rand_column_families) {
+ (void)rand_column_families;
+ // Randomly choose from 0, 1, and 2.
+ // TODO (yanqin) allow user to configure probability of each operation.
+ uint32_t rand = thread->rand.Uniform(3);
+ Status s;
+ if (0 == rand) {
+ // Update primary key.
+ uint32_t old_a = 0;
+ uint32_t pos = 0;
+ std::tie(old_a, pos) = ChooseExistingA(thread);
+ uint32_t new_a = GenerateNextA(thread);
+ s = PrimaryKeyUpdateTxn(thread, old_a, pos, new_a);
+ } else if (1 == rand) {
+ // Update secondary key.
+ uint32_t old_c = 0;
+ uint32_t pos = 0;
+ std::tie(old_c, pos) = ChooseExistingC(thread);
+ uint32_t new_c = GenerateNextC(thread);
+ s = SecondaryKeyUpdateTxn(thread, old_c, pos, new_c);
+ } else if (2 == rand) {
+ // Update primary index value.
+ uint32_t a = 0;
+ uint32_t pos = 0;
+ std::tie(a, pos) = ChooseExistingA(thread);
+ s = UpdatePrimaryIndexValueTxn(thread, a, /*b_delta=*/1);
+ } else {
+ // Should never reach here.
+ assert(false);
+ }
+
+ return s;
+}
+
+void MultiOpsTxnsStressTest::RegisterAdditionalListeners() {
+ options_.listeners.emplace_back(new MultiOpsTxnsStressListener(this));
+}
+
+#ifndef ROCKSDB_LITE
+void MultiOpsTxnsStressTest::PrepareTxnDbOptions(
+ SharedState* /*shared*/, TransactionDBOptions& txn_db_opts) {
+ // MultiOpsTxnStressTest uses SingleDelete to delete secondary keys, thus we
+ // register this callback to let TxnDb know that when rolling back
+ // a transaction, use only SingleDelete to cancel prior Put from the same
+ // transaction if applicable.
+ txn_db_opts.rollback_deletion_type_callback =
+ [](TransactionDB* /*db*/, ColumnFamilyHandle* /*column_family*/,
+ const Slice& key) {
+ Slice ks = key;
+ uint32_t index_id = 0;
+ [[maybe_unused]] bool res = GetFixed32(&ks, &index_id);
+ assert(res);
+ index_id = EndianSwapValue(index_id);
+ assert(index_id <= Record::kSecondaryIndexId);
+ return index_id == Record::kSecondaryIndexId;
+ };
+}
+#endif // !ROCKSDB_LITE
+
+Status MultiOpsTxnsStressTest::PrimaryKeyUpdateTxn(ThreadState* thread,
+ uint32_t old_a,
+ uint32_t old_a_pos,
+ uint32_t new_a) {
+#ifdef ROCKSDB_LITE
+ (void)thread;
+ (void)old_a;
+ (void)old_a_pos;
+ (void)new_a;
+ return Status::NotSupported();
+#else
+ std::string old_pk = Record::EncodePrimaryKey(old_a);
+ std::string new_pk = Record::EncodePrimaryKey(new_a);
+ Transaction* txn = nullptr;
+ WriteOptions wopts;
+ Status s = NewTxn(wopts, &txn);
+ if (!s.ok()) {
+ assert(!txn);
+ thread->stats.AddErrors(1);
+ return s;
+ }
+
+ assert(txn);
+ txn->SetSnapshotOnNextOperation(/*notifier=*/nullptr);
+
+ const Defer cleanup([new_a, &s, thread, txn, this]() {
+ if (s.ok()) {
+ // Two gets, one for existing pk, one for locking potential new pk.
+ thread->stats.AddGets(/*ngets=*/2, /*nfounds=*/1);
+ thread->stats.AddDeletes(1);
+ thread->stats.AddBytesForWrites(
+ /*nwrites=*/2,
+ Record::kPrimaryIndexEntrySize + Record::kSecondaryIndexEntrySize);
+ thread->stats.AddSingleDeletes(1);
+ return;
+ }
+ if (s.IsNotFound()) {
+ thread->stats.AddGets(/*ngets=*/1, /*nfounds=*/0);
+ } else if (s.IsBusy() || s.IsIncomplete()) {
+ // ignore.
+ // Incomplete also means rollback by application. See the transaction
+ // implementations.
+ } else {
+ thread->stats.AddErrors(1);
+ }
+ auto& key_gen = key_gen_for_a_[thread->tid];
+ key_gen->UndoAllocation(new_a);
+ RollbackTxn(txn).PermitUncheckedError();
+ });
+
+ ReadOptions ropts;
+ ropts.rate_limiter_priority =
+ FLAGS_rate_limit_user_ops ? Env::IO_USER : Env::IO_TOTAL;
+ std::string value;
+ s = txn->GetForUpdate(ropts, old_pk, &value);
+ if (!s.ok()) {
+ return s;
+ }
+ std::string empty_value;
+ s = txn->GetForUpdate(ropts, new_pk, &empty_value);
+ if (s.ok()) {
+ assert(!empty_value.empty());
+ s = Status::Busy();
+ return s;
+ } else if (!s.IsNotFound()) {
+ return s;
+ }
+
+ auto result = Record::DecodePrimaryIndexValue(value);
+ s = std::get<0>(result);
+ if (!s.ok()) {
+ return s;
+ }
+ uint32_t b = std::get<1>(result);
+ uint32_t c = std::get<2>(result);
+
+ ColumnFamilyHandle* cf = db_->DefaultColumnFamily();
+ s = txn->Delete(cf, old_pk, /*assume_tracked=*/true);
+ if (!s.ok()) {
+ return s;
+ }
+ s = txn->Put(cf, new_pk, value, /*assume_tracked=*/true);
+ if (!s.ok()) {
+ return s;
+ }
+
+ auto* wb = txn->GetWriteBatch();
+ assert(wb);
+
+ std::string old_sk = Record::EncodeSecondaryKey(c, old_a);
+ s = wb->SingleDelete(old_sk);
+ if (!s.ok()) {
+ return s;
+ }
+
+ Record record(new_a, b, c);
+ std::string new_sk;
+ std::string new_crc;
+ std::tie(new_sk, new_crc) = record.EncodeSecondaryIndexEntry();
+ s = wb->Put(new_sk, new_crc);
+ if (!s.ok()) {
+ return s;
+ }
+
+ s = txn->Prepare();
+
+ if (!s.ok()) {
+ return s;
+ }
+
+ if (FLAGS_rollback_one_in > 0 && thread->rand.OneIn(FLAGS_rollback_one_in)) {
+ s = Status::Incomplete();
+ return s;
+ }
+
+ s = WriteToCommitTimeWriteBatch(*txn);
+ if (!s.ok()) {
+ return s;
+ }
+
+ s = CommitAndCreateTimestampedSnapshotIfNeeded(thread, *txn);
+
+ auto& key_gen = key_gen_for_a_.at(thread->tid);
+ if (s.ok()) {
+ delete txn;
+ key_gen->Replace(old_a, old_a_pos, new_a);
+ }
+ return s;
+#endif // !ROCKSDB_LITE
+}
+
+Status MultiOpsTxnsStressTest::SecondaryKeyUpdateTxn(ThreadState* thread,
+ uint32_t old_c,
+ uint32_t old_c_pos,
+ uint32_t new_c) {
+#ifdef ROCKSDB_LITE
+ (void)thread;
+ (void)old_c;
+ (void)old_c_pos;
+ (void)new_c;
+ return Status::NotSupported();
+#else
+ Transaction* txn = nullptr;
+ WriteOptions wopts;
+ Status s = NewTxn(wopts, &txn);
+ if (!s.ok()) {
+ assert(!txn);
+ thread->stats.AddErrors(1);
+ return s;
+ }
+
+ assert(txn);
+
+ Iterator* it = nullptr;
+ long iterations = 0;
+ const Defer cleanup([new_c, &s, thread, &it, txn, this, &iterations]() {
+ delete it;
+ if (s.ok()) {
+ thread->stats.AddIterations(iterations);
+ thread->stats.AddGets(/*ngets=*/1, /*nfounds=*/1);
+ thread->stats.AddSingleDeletes(1);
+ thread->stats.AddBytesForWrites(
+ /*nwrites=*/2,
+ Record::kPrimaryIndexEntrySize + Record::kSecondaryIndexEntrySize);
+ return;
+ } else if (s.IsBusy() || s.IsTimedOut() || s.IsTryAgain() ||
+ s.IsMergeInProgress() || s.IsIncomplete()) {
+ // ww-conflict detected, or
+ // lock cannot be acquired, or
+ // memtable history is not large enough for conflict checking, or
+ // Merge operation cannot be resolved, or
+ // application rollback.
+ // TODO (yanqin) add stats for other cases?
+ } else if (s.IsNotFound()) {
+ // ignore.
+ } else {
+ thread->stats.AddErrors(1);
+ }
+ auto& key_gen = key_gen_for_c_[thread->tid];
+ key_gen->UndoAllocation(new_c);
+ RollbackTxn(txn).PermitUncheckedError();
+ });
+
+ // TODO (yanqin) try SetSnapshotOnNextOperation(). We currently need to take
+ // a snapshot here because we will later verify that point lookup in the
+ // primary index using GetForUpdate() returns the same value for 'c' as the
+ // iterator. The iterator does not need a snapshot though, because it will be
+ // assigned the current latest (published) sequence in the db, which will be
+ // no smaller than the snapshot created here. The GetForUpdate will perform
+ // ww conflict checking to ensure GetForUpdate() (using the snapshot) sees
+ // the same data as this iterator.
+ txn->SetSnapshot();
+ std::string old_sk_prefix = Record::EncodeSecondaryKey(old_c);
+ std::string iter_ub_str = Record::EncodeSecondaryKey(old_c + 1);
+ Slice iter_ub = iter_ub_str;
+ ReadOptions ropts;
+ ropts.snapshot = txn->GetSnapshot();
+ ropts.total_order_seek = true;
+ ropts.iterate_upper_bound = &iter_ub;
+ ropts.rate_limiter_priority =
+ FLAGS_rate_limit_user_ops ? Env::IO_USER : Env::IO_TOTAL;
+ it = txn->GetIterator(ropts);
+
+ assert(it);
+ it->Seek(old_sk_prefix);
+ if (!it->Valid()) {
+ s = Status::NotFound();
+ return s;
+ }
+ auto* wb = txn->GetWriteBatch();
+ assert(wb);
+
+ do {
+ ++iterations;
+ Record record;
+ s = record.DecodeSecondaryIndexEntry(it->key(), it->value());
+ if (!s.ok()) {
+ fprintf(stderr, "Cannot decode secondary key (%s => %s): %s\n",
+ it->key().ToString(true).c_str(),
+ it->value().ToString(true).c_str(), s.ToString().c_str());
+ assert(false);
+ break;
+ }
+ // At this point, record.b is not known yet, thus we need to access
+ // primary index.
+ std::string pk = Record::EncodePrimaryKey(record.a_value());
+ std::string value;
+ ReadOptions read_opts;
+ read_opts.rate_limiter_priority =
+ FLAGS_rate_limit_user_ops ? Env::IO_USER : Env::IO_TOTAL;
+ read_opts.snapshot = txn->GetSnapshot();
+ s = txn->GetForUpdate(read_opts, pk, &value);
+ if (s.IsBusy() || s.IsTimedOut() || s.IsTryAgain() ||
+ s.IsMergeInProgress()) {
+ // Write conflict, or cannot acquire lock, or memtable size is not large
+ // enough, or merge cannot be resolved.
+ break;
+ } else if (s.IsNotFound()) {
+ // We can also fail verification here.
+ std::ostringstream oss;
+ auto* dbimpl = static_cast_with_check<DBImpl>(db_->GetRootDB());
+ assert(dbimpl);
+ oss << "snap " << read_opts.snapshot->GetSequenceNumber()
+ << " (published " << dbimpl->GetLastPublishedSequence()
+ << "), pk should exist: " << Slice(pk).ToString(true);
+ fprintf(stderr, "%s\n", oss.str().c_str());
+ assert(false);
+ break;
+ }
+ if (!s.ok()) {
+ std::ostringstream oss;
+ auto* dbimpl = static_cast_with_check<DBImpl>(db_->GetRootDB());
+ assert(dbimpl);
+ oss << "snap " << read_opts.snapshot->GetSequenceNumber()
+ << " (published " << dbimpl->GetLastPublishedSequence() << "), "
+ << s.ToString();
+ fprintf(stderr, "%s\n", oss.str().c_str());
+ assert(false);
+ break;
+ }
+ auto result = Record::DecodePrimaryIndexValue(value);
+ s = std::get<0>(result);
+ if (!s.ok()) {
+ fprintf(stderr, "Cannot decode primary index value %s: %s\n",
+ Slice(value).ToString(true).c_str(), s.ToString().c_str());
+ assert(false);
+ break;
+ }
+ uint32_t b = std::get<1>(result);
+ uint32_t c = std::get<2>(result);
+ if (c != old_c) {
+ std::ostringstream oss;
+ auto* dbimpl = static_cast_with_check<DBImpl>(db_->GetRootDB());
+ assert(dbimpl);
+ oss << "snap " << read_opts.snapshot->GetSequenceNumber()
+ << " (published " << dbimpl->GetLastPublishedSequence()
+ << "), pk/sk mismatch. pk: (a=" << record.a_value() << ", "
+ << "c=" << c << "), sk: (c=" << old_c << ")";
+ s = Status::Corruption();
+ fprintf(stderr, "%s\n", oss.str().c_str());
+ assert(false);
+ break;
+ }
+ Record new_rec(record.a_value(), b, new_c);
+ std::string new_primary_index_value = new_rec.EncodePrimaryIndexValue();
+ ColumnFamilyHandle* cf = db_->DefaultColumnFamily();
+ s = txn->Put(cf, pk, new_primary_index_value, /*assume_tracked=*/true);
+ if (!s.ok()) {
+ break;
+ }
+ std::string old_sk = it->key().ToString(/*hex=*/false);
+ std::string new_sk;
+ std::string new_crc;
+ std::tie(new_sk, new_crc) = new_rec.EncodeSecondaryIndexEntry();
+ s = wb->SingleDelete(old_sk);
+ if (!s.ok()) {
+ break;
+ }
+ s = wb->Put(new_sk, new_crc);
+ if (!s.ok()) {
+ break;
+ }
+
+ it->Next();
+ } while (it->Valid());
+
+ if (!s.ok()) {
+ return s;
+ }
+
+ s = txn->Prepare();
+
+ if (!s.ok()) {
+ return s;
+ }
+
+ if (FLAGS_rollback_one_in > 0 && thread->rand.OneIn(FLAGS_rollback_one_in)) {
+ s = Status::Incomplete();
+ return s;
+ }
+
+ s = WriteToCommitTimeWriteBatch(*txn);
+ if (!s.ok()) {
+ return s;
+ }
+
+ s = CommitAndCreateTimestampedSnapshotIfNeeded(thread, *txn);
+
+ if (s.ok()) {
+ delete txn;
+ auto& key_gen = key_gen_for_c_.at(thread->tid);
+ key_gen->Replace(old_c, old_c_pos, new_c);
+ }
+
+ return s;
+#endif // !ROCKSDB_LITE
+}
+
+Status MultiOpsTxnsStressTest::UpdatePrimaryIndexValueTxn(ThreadState* thread,
+ uint32_t a,
+ uint32_t b_delta) {
+#ifdef ROCKSDB_LITE
+ (void)thread;
+ (void)a;
+ (void)b_delta;
+ return Status::NotSupported();
+#else
+ std::string pk_str = Record::EncodePrimaryKey(a);
+ Transaction* txn = nullptr;
+ WriteOptions wopts;
+ Status s = NewTxn(wopts, &txn);
+ if (!s.ok()) {
+ assert(!txn);
+ thread->stats.AddErrors(1);
+ return s;
+ }
+
+ assert(txn);
+
+ const Defer cleanup([&s, thread, txn, this]() {
+ if (s.ok()) {
+ thread->stats.AddGets(/*ngets=*/1, /*nfounds=*/1);
+ thread->stats.AddBytesForWrites(
+ /*nwrites=*/1, /*nbytes=*/Record::kPrimaryIndexEntrySize);
+ return;
+ }
+ if (s.IsNotFound()) {
+ thread->stats.AddGets(/*ngets=*/1, /*nfounds=*/0);
+ } else if (s.IsInvalidArgument()) {
+ // ignored.
+ } else if (s.IsBusy() || s.IsTimedOut() || s.IsTryAgain() ||
+ s.IsMergeInProgress() || s.IsIncomplete()) {
+ // ignored.
+ } else {
+ thread->stats.AddErrors(1);
+ }
+ RollbackTxn(txn).PermitUncheckedError();
+ });
+ ReadOptions ropts;
+ ropts.rate_limiter_priority =
+ FLAGS_rate_limit_user_ops ? Env::IO_USER : Env::IO_TOTAL;
+ std::string value;
+ s = txn->GetForUpdate(ropts, pk_str, &value);
+ if (!s.ok()) {
+ return s;
+ }
+ auto result = Record::DecodePrimaryIndexValue(value);
+ if (!std::get<0>(result).ok()) {
+ s = std::get<0>(result);
+ fprintf(stderr, "Cannot decode primary index value %s: %s\n",
+ Slice(value).ToString(true).c_str(), s.ToString().c_str());
+ assert(false);
+ return s;
+ }
+ uint32_t b = std::get<1>(result) + b_delta;
+ uint32_t c = std::get<2>(result);
+ Record record(a, b, c);
+ std::string primary_index_value = record.EncodePrimaryIndexValue();
+ ColumnFamilyHandle* cf = db_->DefaultColumnFamily();
+ s = txn->Put(cf, pk_str, primary_index_value, /*assume_tracked=*/true);
+ if (!s.ok()) {
+ return s;
+ }
+ s = txn->Prepare();
+ if (!s.ok()) {
+ return s;
+ }
+
+ if (FLAGS_rollback_one_in > 0 && thread->rand.OneIn(FLAGS_rollback_one_in)) {
+ s = Status::Incomplete();
+ return s;
+ }
+
+ s = WriteToCommitTimeWriteBatch(*txn);
+ if (!s.ok()) {
+ return s;
+ }
+
+ s = CommitAndCreateTimestampedSnapshotIfNeeded(thread, *txn);
+
+ if (s.ok()) {
+ delete txn;
+ }
+ return s;
+#endif // !ROCKSDB_LITE
+}
+
+Status MultiOpsTxnsStressTest::PointLookupTxn(ThreadState* thread,
+ ReadOptions ropts, uint32_t a) {
+#ifdef ROCKSDB_LITE
+ (void)thread;
+ (void)ropts;
+ (void)a;
+ return Status::NotSupported();
+#else
+ std::string pk_str = Record::EncodePrimaryKey(a);
+ // pk may or may not exist
+ PinnableSlice value;
+
+ Transaction* txn = nullptr;
+ WriteOptions wopts;
+ Status s = NewTxn(wopts, &txn);
+ if (!s.ok()) {
+ assert(!txn);
+ thread->stats.AddErrors(1);
+ return s;
+ }
+
+ assert(txn);
+
+ const Defer cleanup([&s, thread, txn, this]() {
+ if (s.ok()) {
+ thread->stats.AddGets(/*ngets=*/1, /*nfounds=*/1);
+ return;
+ } else if (s.IsNotFound()) {
+ thread->stats.AddGets(/*ngets=*/1, /*nfounds=*/0);
+ } else {
+ thread->stats.AddErrors(1);
+ }
+ RollbackTxn(txn).PermitUncheckedError();
+ });
+
+ std::shared_ptr<const Snapshot> snapshot;
+ SetupSnapshot(thread, ropts, *txn, snapshot);
+
+ if (FLAGS_delay_snapshot_read_one_in > 0 &&
+ thread->rand.OneIn(FLAGS_delay_snapshot_read_one_in)) {
+ uint64_t delay_ms = thread->rand.Uniform(100) + 1;
+ db_->GetDBOptions().env->SleepForMicroseconds(
+ static_cast<int>(delay_ms * 1000));
+ }
+
+ s = txn->Get(ropts, db_->DefaultColumnFamily(), pk_str, &value);
+ if (s.ok()) {
+ s = txn->Commit();
+ }
+ if (s.ok()) {
+ delete txn;
+ }
+ return s;
+#endif // !ROCKSDB_LITE
+}
+
+Status MultiOpsTxnsStressTest::RangeScanTxn(ThreadState* thread,
+ ReadOptions ropts, uint32_t c) {
+#ifdef ROCKSDB_LITE
+ (void)thread;
+ (void)ropts;
+ (void)c;
+ return Status::NotSupported();
+#else
+ std::string sk = Record::EncodeSecondaryKey(c);
+
+ Transaction* txn = nullptr;
+ WriteOptions wopts;
+ Status s = NewTxn(wopts, &txn);
+ if (!s.ok()) {
+ assert(!txn);
+ thread->stats.AddErrors(1);
+ return s;
+ }
+
+ assert(txn);
+
+ const Defer cleanup([&s, thread, txn, this]() {
+ if (s.ok()) {
+ thread->stats.AddIterations(1);
+ return;
+ }
+ thread->stats.AddErrors(1);
+ RollbackTxn(txn).PermitUncheckedError();
+ });
+
+ std::shared_ptr<const Snapshot> snapshot;
+ SetupSnapshot(thread, ropts, *txn, snapshot);
+
+ if (FLAGS_delay_snapshot_read_one_in > 0 &&
+ thread->rand.OneIn(FLAGS_delay_snapshot_read_one_in)) {
+ uint64_t delay_ms = thread->rand.Uniform(100) + 1;
+ db_->GetDBOptions().env->SleepForMicroseconds(
+ static_cast<int>(delay_ms * 1000));
+ }
+
+ std::unique_ptr<Iterator> iter(txn->GetIterator(ropts));
+
+ constexpr size_t total_nexts = 10;
+ size_t nexts = 0;
+ for (iter->Seek(sk);
+ iter->Valid() && nexts < total_nexts && iter->status().ok();
+ iter->Next(), ++nexts) {
+ }
+
+ if (iter->status().ok()) {
+ s = txn->Commit();
+ } else {
+ s = iter->status();
+ }
+
+ if (s.ok()) {
+ delete txn;
+ }
+
+ return s;
+#endif // !ROCKSDB_LITE
+}
+
+void MultiOpsTxnsStressTest::VerifyDb(ThreadState* thread) const {
+ if (thread->shared->HasVerificationFailedYet()) {
+ return;
+ }
+ const Snapshot* const snapshot = db_->GetSnapshot();
+ assert(snapshot);
+ ManagedSnapshot snapshot_guard(db_, snapshot);
+
+ std::ostringstream oss;
+ oss << "[snap=" << snapshot->GetSequenceNumber() << ",";
+
+ auto* dbimpl = static_cast_with_check<DBImpl>(db_->GetRootDB());
+ assert(dbimpl);
+
+ oss << " last_published=" << dbimpl->GetLastPublishedSequence() << "] ";
+
+ if (FLAGS_delay_snapshot_read_one_in > 0 &&
+ thread->rand.OneIn(FLAGS_delay_snapshot_read_one_in)) {
+ uint64_t delay_ms = thread->rand.Uniform(100) + 1;
+ db_->GetDBOptions().env->SleepForMicroseconds(
+ static_cast<int>(delay_ms * 1000));
+ }
+
+ // TODO (yanqin) with a probability, we can use either forward or backward
+ // iterator in subsequent checks. We can also use more advanced features in
+ // range scan. For now, let's just use simple forward iteration with
+ // total_order_seek = true.
+
+ // First, iterate primary index.
+ size_t primary_index_entries_count = 0;
+ {
+ std::string iter_ub_str;
+ PutFixed32(&iter_ub_str, Record::kPrimaryIndexId + 1);
+ std::reverse(iter_ub_str.begin(), iter_ub_str.end());
+ Slice iter_ub = iter_ub_str;
+
+ std::string start_key;
+ PutFixed32(&start_key, Record::kPrimaryIndexId);
+ std::reverse(start_key.begin(), start_key.end());
+
+ // This `ReadOptions` is for validation purposes. Ignore
+ // `FLAGS_rate_limit_user_ops` to avoid slowing any validation.
+ ReadOptions ropts;
+ ropts.snapshot = snapshot;
+ ropts.total_order_seek = true;
+ ropts.iterate_upper_bound = &iter_ub;
+
+ std::unique_ptr<Iterator> it(db_->NewIterator(ropts));
+ for (it->Seek(start_key); it->Valid(); it->Next()) {
+ Record record;
+ Status s = record.DecodePrimaryIndexEntry(it->key(), it->value());
+ if (!s.ok()) {
+ oss << "Cannot decode primary index entry " << it->key().ToString(true)
+ << "=>" << it->value().ToString(true);
+ VerificationAbort(thread->shared, oss.str(), s);
+ assert(false);
+ return;
+ }
+ ++primary_index_entries_count;
+
+ // Search secondary index.
+ uint32_t a = record.a_value();
+ uint32_t c = record.c_value();
+ char sk_buf[12];
+ EncodeFixed32(sk_buf, Record::kSecondaryIndexId);
+ std::reverse(sk_buf, sk_buf + sizeof(uint32_t));
+ EncodeFixed32(sk_buf + sizeof(uint32_t), c);
+ std::reverse(sk_buf + sizeof(uint32_t), sk_buf + 2 * sizeof(uint32_t));
+ EncodeFixed32(sk_buf + 2 * sizeof(uint32_t), a);
+ std::reverse(sk_buf + 2 * sizeof(uint32_t), sk_buf + sizeof(sk_buf));
+ Slice sk(sk_buf, sizeof(sk_buf));
+ std::string value;
+ s = db_->Get(ropts, sk, &value);
+ if (!s.ok()) {
+ oss << "Cannot find secondary index entry " << sk.ToString(true);
+ VerificationAbort(thread->shared, oss.str(), s);
+ assert(false);
+ return;
+ }
+ }
+ }
+
+ // Second, iterate secondary index.
+ size_t secondary_index_entries_count = 0;
+ {
+ std::string start_key;
+ PutFixed32(&start_key, Record::kSecondaryIndexId);
+ std::reverse(start_key.begin(), start_key.end());
+
+ // This `ReadOptions` is for validation purposes. Ignore
+ // `FLAGS_rate_limit_user_ops` to avoid slowing any validation.
+ ReadOptions ropts;
+ ropts.snapshot = snapshot;
+ ropts.total_order_seek = true;
+
+ std::unique_ptr<Iterator> it(db_->NewIterator(ropts));
+ for (it->Seek(start_key); it->Valid(); it->Next()) {
+ ++secondary_index_entries_count;
+ Record record;
+ Status s = record.DecodeSecondaryIndexEntry(it->key(), it->value());
+ if (!s.ok()) {
+ oss << "Cannot decode secondary index entry "
+ << it->key().ToString(true) << "=>" << it->value().ToString(true);
+ VerificationAbort(thread->shared, oss.str(), s);
+ assert(false);
+ return;
+ }
+ // After decoding secondary index entry, we know a and c. Crc is verified
+ // in decoding phase.
+ //
+ // Form a primary key and search in the primary index.
+ std::string pk = Record::EncodePrimaryKey(record.a_value());
+ std::string value;
+ s = db_->Get(ropts, pk, &value);
+ if (!s.ok()) {
+ oss << "Error searching pk " << Slice(pk).ToString(true) << ". "
+ << s.ToString() << ". sk " << it->key().ToString(true);
+ VerificationAbort(thread->shared, oss.str(), s);
+ assert(false);
+ return;
+ }
+ auto result = Record::DecodePrimaryIndexValue(value);
+ s = std::get<0>(result);
+ if (!s.ok()) {
+ oss << "Error decoding primary index value "
+ << Slice(value).ToString(true) << ". " << s.ToString();
+ VerificationAbort(thread->shared, oss.str(), s);
+ assert(false);
+ return;
+ }
+ uint32_t c_in_primary = std::get<2>(result);
+ if (c_in_primary != record.c_value()) {
+ oss << "Pk/sk mismatch. pk: " << Slice(pk).ToString(true) << "=>"
+ << Slice(value).ToString(true) << " (a=" << record.a_value()
+ << ", c=" << c_in_primary << "), sk: " << it->key().ToString(true)
+ << " (c=" << record.c_value() << ")";
+ VerificationAbort(thread->shared, oss.str(), s);
+ assert(false);
+ return;
+ }
+ }
+ }
+
+ if (secondary_index_entries_count != primary_index_entries_count) {
+ oss << "Pk/sk mismatch: primary index has " << primary_index_entries_count
+ << " entries. Secondary index has " << secondary_index_entries_count
+ << " entries.";
+ VerificationAbort(thread->shared, oss.str(), Status::OK());
+ assert(false);
+ return;
+ }
+}
+
+// VerifyPkSkFast() can be called by MultiOpsTxnsStressListener's callbacks
+// which can be called before TransactionDB::Open() returns to caller.
+// Therefore, at that time, db_ and txn_db_ may still be nullptr.
+// Caller has to make sure that the race condition does not happen.
+void MultiOpsTxnsStressTest::VerifyPkSkFast(int job_id) {
+ DB* const db = db_aptr_.load(std::memory_order_acquire);
+ if (db == nullptr) {
+ return;
+ }
+
+ assert(db_ == db);
+ assert(db_ != nullptr);
+
+ const Snapshot* const snapshot = db_->GetSnapshot();
+ assert(snapshot);
+ ManagedSnapshot snapshot_guard(db_, snapshot);
+
+ std::ostringstream oss;
+ auto* dbimpl = static_cast_with_check<DBImpl>(db_->GetRootDB());
+ assert(dbimpl);
+
+ oss << "Job " << job_id << ": [" << snapshot->GetSequenceNumber() << ","
+ << dbimpl->GetLastPublishedSequence() << "] ";
+
+ std::string start_key;
+ PutFixed32(&start_key, Record::kSecondaryIndexId);
+ std::reverse(start_key.begin(), start_key.end());
+
+ // This `ReadOptions` is for validation purposes. Ignore
+ // `FLAGS_rate_limit_user_ops` to avoid slowing any validation.
+ ReadOptions ropts;
+ ropts.snapshot = snapshot;
+ ropts.total_order_seek = true;
+
+ std::unique_ptr<Iterator> it(db_->NewIterator(ropts));
+ for (it->Seek(start_key); it->Valid(); it->Next()) {
+ Record record;
+ Status s = record.DecodeSecondaryIndexEntry(it->key(), it->value());
+ if (!s.ok()) {
+ oss << "Cannot decode secondary index entry " << it->key().ToString(true)
+ << "=>" << it->value().ToString(true);
+ fprintf(stderr, "%s\n", oss.str().c_str());
+ fflush(stderr);
+ assert(false);
+ }
+ // After decoding secondary index entry, we know a and c. Crc is verified
+ // in decoding phase.
+ //
+ // Form a primary key and search in the primary index.
+ std::string pk = Record::EncodePrimaryKey(record.a_value());
+ std::string value;
+ s = db_->Get(ropts, pk, &value);
+ if (!s.ok()) {
+ oss << "Error searching pk " << Slice(pk).ToString(true) << ". "
+ << s.ToString() << ". sk " << it->key().ToString(true);
+ fprintf(stderr, "%s\n", oss.str().c_str());
+ fflush(stderr);
+ assert(false);
+ }
+ auto result = Record::DecodePrimaryIndexValue(value);
+ s = std::get<0>(result);
+ if (!s.ok()) {
+ oss << "Error decoding primary index value "
+ << Slice(value).ToString(true) << ". " << s.ToString();
+ fprintf(stderr, "%s\n", oss.str().c_str());
+ fflush(stderr);
+ assert(false);
+ }
+ uint32_t c_in_primary = std::get<2>(result);
+ if (c_in_primary != record.c_value()) {
+ oss << "Pk/sk mismatch. pk: " << Slice(pk).ToString(true) << "=>"
+ << Slice(value).ToString(true) << " (a=" << record.a_value()
+ << ", c=" << c_in_primary << "), sk: " << it->key().ToString(true)
+ << " (c=" << record.c_value() << ")";
+ fprintf(stderr, "%s\n", oss.str().c_str());
+ fflush(stderr);
+ assert(false);
+ }
+ }
+}
+
+std::pair<uint32_t, uint32_t> MultiOpsTxnsStressTest::ChooseExistingA(
+ ThreadState* thread) {
+ uint32_t tid = thread->tid;
+ auto& key_gen = key_gen_for_a_.at(tid);
+ return key_gen->ChooseExisting();
+}
+
+uint32_t MultiOpsTxnsStressTest::GenerateNextA(ThreadState* thread) {
+ uint32_t tid = thread->tid;
+ auto& key_gen = key_gen_for_a_.at(tid);
+ return key_gen->Allocate();
+}
+
+std::pair<uint32_t, uint32_t> MultiOpsTxnsStressTest::ChooseExistingC(
+ ThreadState* thread) {
+ uint32_t tid = thread->tid;
+ auto& key_gen = key_gen_for_c_.at(tid);
+ return key_gen->ChooseExisting();
+}
+
+uint32_t MultiOpsTxnsStressTest::GenerateNextC(ThreadState* thread) {
+ uint32_t tid = thread->tid;
+ auto& key_gen = key_gen_for_c_.at(tid);
+ return key_gen->Allocate();
+}
+
+#ifndef ROCKSDB_LITE
+void MultiOpsTxnsStressTest::ProcessRecoveredPreparedTxnsHelper(
+ Transaction* txn, SharedState*) {
+ thread_local Random rand(static_cast<uint32_t>(FLAGS_seed));
+ if (rand.OneIn(2)) {
+ Status s = txn->Commit();
+ assert(s.ok());
+ } else {
+ Status s = txn->Rollback();
+ assert(s.ok());
+ }
+}
+
+Status MultiOpsTxnsStressTest::WriteToCommitTimeWriteBatch(Transaction& txn) {
+ WriteBatch* ctwb = txn.GetCommitTimeWriteBatch();
+ assert(ctwb);
+ // Do not change the content in key_buf.
+ static constexpr char key_buf[sizeof(Record::kMetadataPrefix) + 4] = {
+ '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\xff'};
+
+ uint64_t counter_val = counter_.Next();
+ char val_buf[sizeof(counter_val)];
+ EncodeFixed64(val_buf, counter_val);
+ return ctwb->Put(Slice(key_buf, sizeof(key_buf)),
+ Slice(val_buf, sizeof(val_buf)));
+}
+
+Status MultiOpsTxnsStressTest::CommitAndCreateTimestampedSnapshotIfNeeded(
+ ThreadState* thread, Transaction& txn) {
+ Status s;
+ if (FLAGS_create_timestamped_snapshot_one_in > 0 &&
+ thread->rand.OneInOpt(FLAGS_create_timestamped_snapshot_one_in)) {
+ uint64_t ts = db_stress_env->NowNanos();
+ std::shared_ptr<const Snapshot> snapshot;
+ s = txn.CommitAndTryCreateSnapshot(/*notifier=*/nullptr, ts, &snapshot);
+ } else {
+ s = txn.Commit();
+ }
+ assert(txn_db_);
+ if (FLAGS_create_timestamped_snapshot_one_in > 0 &&
+ thread->rand.OneInOpt(50000)) {
+ uint64_t now = db_stress_env->NowNanos();
+ constexpr uint64_t time_diff = static_cast<uint64_t>(1000) * 1000 * 1000;
+ txn_db_->ReleaseTimestampedSnapshotsOlderThan(now - time_diff);
+ }
+ return s;
+}
+
+void MultiOpsTxnsStressTest::SetupSnapshot(
+ ThreadState* thread, ReadOptions& read_opts, Transaction& txn,
+ std::shared_ptr<const Snapshot>& snapshot) {
+ if (thread->rand.OneInOpt(2)) {
+ snapshot = txn_db_->GetLatestTimestampedSnapshot();
+ }
+
+ if (snapshot) {
+ read_opts.snapshot = snapshot.get();
+ } else {
+ txn.SetSnapshot();
+ read_opts.snapshot = txn.GetSnapshot();
+ }
+}
+#endif // !ROCKSDB_LITE
+
+std::string MultiOpsTxnsStressTest::KeySpaces::EncodeTo() const {
+ std::string result;
+ PutFixed32(&result, lb_a);
+ PutFixed32(&result, ub_a);
+ PutFixed32(&result, lb_c);
+ PutFixed32(&result, ub_c);
+ return result;
+}
+
+bool MultiOpsTxnsStressTest::KeySpaces::DecodeFrom(Slice data) {
+ if (!GetFixed32(&data, &lb_a) || !GetFixed32(&data, &ub_a) ||
+ !GetFixed32(&data, &lb_c) || !GetFixed32(&data, &ub_c)) {
+ return false;
+ }
+ return true;
+}
+
+void MultiOpsTxnsStressTest::PersistKeySpacesDesc(
+ const std::string& key_spaces_path, uint32_t lb_a, uint32_t ub_a,
+ uint32_t lb_c, uint32_t ub_c) {
+ KeySpaces key_spaces(lb_a, ub_a, lb_c, ub_c);
+ std::string key_spaces_rep = key_spaces.EncodeTo();
+
+ std::unique_ptr<WritableFile> wfile;
+ Status s1 =
+ Env::Default()->NewWritableFile(key_spaces_path, &wfile, EnvOptions());
+ assert(s1.ok());
+ assert(wfile);
+ s1 = wfile->Append(key_spaces_rep);
+ assert(s1.ok());
+}
+
+MultiOpsTxnsStressTest::KeySpaces MultiOpsTxnsStressTest::ReadKeySpacesDesc(
+ const std::string& key_spaces_path) {
+ KeySpaces key_spaces;
+ std::unique_ptr<SequentialFile> sfile;
+ Status s1 =
+ Env::Default()->NewSequentialFile(key_spaces_path, &sfile, EnvOptions());
+ assert(s1.ok());
+ assert(sfile);
+ char buf[16];
+ Slice result;
+ s1 = sfile->Read(sizeof(buf), &result, buf);
+ assert(s1.ok());
+ if (!key_spaces.DecodeFrom(result)) {
+ assert(false);
+ }
+ return key_spaces;
+}
+
+// Create an empty database if necessary and preload it with initial test data.
+// Key range [lb_a, ub_a), [lb_c, ub_c). The key ranges will be shared by
+// 'threads' threads.
+// PreloadDb() also sets up KeyGenerator objects for each sub key range
+// operated on by each thread.
+// Both [lb_a, ub_a) and [lb_c, ub_c) are partitioned. Each thread operates on
+// one sub range, using KeyGenerators to generate keys.
+// For example, we choose a from [0, 10000) and c from [0, 100). Number of
+// threads is 32, their tids range from 0 to 31.
+// Thread k chooses a from [312*k,312*(k+1)) and c from [3*k,3*(k+1)) if k<31.
+// Thread 31 chooses a from [9672, 10000) and c from [93, 100).
+// Within each subrange: a from [low1, high1), c from [low2, high2).
+// high1 - low1 > high2 - low2
+// We reserve {high1 - 1} and {high2 - 1} as unallocated.
+// The records are <low1,low2>, <low1+1,low2+1>, ...,
+// <low1+k,low2+k%(high2-low2-1), <low1+k+1,low2+(k+1)%(high2-low2-1)>, ...
+void MultiOpsTxnsStressTest::PreloadDb(SharedState* shared, int threads,
+ uint32_t lb_a, uint32_t ub_a,
+ uint32_t lb_c, uint32_t ub_c) {
+#ifdef ROCKSDB_LITE
+ (void)shared;
+ (void)threads;
+ (void)lb_a;
+ (void)ub_a;
+ (void)lb_c;
+ (void)ub_c;
+#else
+ key_gen_for_a_.resize(threads);
+ key_gen_for_c_.resize(threads);
+
+ assert(ub_a > lb_a && ub_a > lb_a + threads);
+ assert(ub_c > lb_c && ub_c > lb_c + threads);
+
+ PersistKeySpacesDesc(FLAGS_key_spaces_path, lb_a, ub_a, lb_c, ub_c);
+
+ fprintf(stdout, "a from [%u, %u), c from [%u, %u)\n",
+ static_cast<unsigned int>(lb_a), static_cast<unsigned int>(ub_a),
+ static_cast<unsigned int>(lb_c), static_cast<unsigned int>(ub_c));
+
+ const uint32_t num_c = ub_c - lb_c;
+ const uint32_t num_c_per_thread = num_c / threads;
+ const uint32_t num_a = ub_a - lb_a;
+ const uint32_t num_a_per_thread = num_a / threads;
+
+ WriteOptions wopts;
+ wopts.disableWAL = FLAGS_disable_wal;
+ Random rnd(shared->GetSeed());
+ assert(txn_db_);
+
+ std::vector<KeySet> existing_a_uniqs(threads);
+ std::vector<KeySet> non_existing_a_uniqs(threads);
+ std::vector<KeySet> existing_c_uniqs(threads);
+ std::vector<KeySet> non_existing_c_uniqs(threads);
+
+ for (uint32_t a = lb_a; a < ub_a; ++a) {
+ uint32_t tid = (a - lb_a) / num_a_per_thread;
+ if (tid >= static_cast<uint32_t>(threads)) {
+ tid = threads - 1;
+ }
+
+ uint32_t a_base = lb_a + tid * num_a_per_thread;
+ uint32_t a_hi = (tid < static_cast<uint32_t>(threads - 1))
+ ? (a_base + num_a_per_thread)
+ : ub_a;
+ uint32_t a_delta = a - a_base;
+
+ if (a == a_hi - 1) {
+ non_existing_a_uniqs[tid].insert(a);
+ continue;
+ }
+
+ uint32_t c_base = lb_c + tid * num_c_per_thread;
+ uint32_t c_hi = (tid < static_cast<uint32_t>(threads - 1))
+ ? (c_base + num_c_per_thread)
+ : ub_c;
+ uint32_t c_delta = a_delta % (c_hi - c_base - 1);
+ uint32_t c = c_base + c_delta;
+
+ uint32_t b = rnd.Next();
+ Record record(a, b, c);
+ WriteBatch wb;
+ const auto primary_index_entry = record.EncodePrimaryIndexEntry();
+ Status s = wb.Put(primary_index_entry.first, primary_index_entry.second);
+ assert(s.ok());
+
+ const auto secondary_index_entry = record.EncodeSecondaryIndexEntry();
+ s = wb.Put(secondary_index_entry.first, secondary_index_entry.second);
+ assert(s.ok());
+
+ s = txn_db_->Write(wopts, &wb);
+ assert(s.ok());
+
+ // TODO (yanqin): make the following check optional, especially when data
+ // size is large.
+ Record tmp_rec;
+ tmp_rec.SetB(record.b_value());
+ s = tmp_rec.DecodeSecondaryIndexEntry(secondary_index_entry.first,
+ secondary_index_entry.second);
+ assert(s.ok());
+ assert(tmp_rec == record);
+
+ existing_a_uniqs[tid].insert(a);
+ existing_c_uniqs[tid].insert(c);
+ }
+
+ for (int i = 0; i < threads; ++i) {
+ uint32_t my_seed = i + shared->GetSeed();
+
+ auto& key_gen_for_a = key_gen_for_a_[i];
+ assert(!key_gen_for_a);
+ uint32_t low = lb_a + i * num_a_per_thread;
+ uint32_t high = (i < threads - 1) ? (low + num_a_per_thread) : ub_a;
+ assert(existing_a_uniqs[i].size() == high - low - 1);
+ assert(non_existing_a_uniqs[i].size() == 1);
+ key_gen_for_a = std::make_unique<KeyGenerator>(
+ my_seed, low, high, std::move(existing_a_uniqs[i]),
+ std::move(non_existing_a_uniqs[i]));
+
+ auto& key_gen_for_c = key_gen_for_c_[i];
+ assert(!key_gen_for_c);
+ low = lb_c + i * num_c_per_thread;
+ high = (i < threads - 1) ? (low + num_c_per_thread) : ub_c;
+ non_existing_c_uniqs[i].insert(high - 1);
+ assert(existing_c_uniqs[i].size() == high - low - 1);
+ assert(non_existing_c_uniqs[i].size() == 1);
+ key_gen_for_c = std::make_unique<KeyGenerator>(
+ my_seed, low, high, std::move(existing_c_uniqs[i]),
+ std::move(non_existing_c_uniqs[i]));
+ }
+#endif // !ROCKSDB_LITE
+}
+
+// Scan an existing, non-empty database.
+// Set up [lb_a, ub_a) and [lb_c, ub_c) as test key ranges.
+// Set up KeyGenerator objects for each sub key range operated on by each
+// thread.
+// Scan the entire database and for each subrange, populate the existing keys
+// and non-existing keys. We currently require the non-existing keys be
+// non-empty after initialization.
+void MultiOpsTxnsStressTest::ScanExistingDb(SharedState* shared, int threads) {
+ key_gen_for_a_.resize(threads);
+ key_gen_for_c_.resize(threads);
+
+ KeySpaces key_spaces = ReadKeySpacesDesc(FLAGS_key_spaces_path);
+
+ const uint32_t lb_a = key_spaces.lb_a;
+ const uint32_t ub_a = key_spaces.ub_a;
+ const uint32_t lb_c = key_spaces.lb_c;
+ const uint32_t ub_c = key_spaces.ub_c;
+
+ assert(lb_a < ub_a && lb_c < ub_c);
+
+ fprintf(stdout, "a from [%u, %u), c from [%u, %u)\n",
+ static_cast<unsigned int>(lb_a), static_cast<unsigned int>(ub_a),
+ static_cast<unsigned int>(lb_c), static_cast<unsigned int>(ub_c));
+
+ assert(ub_a > lb_a && ub_a > lb_a + threads);
+ assert(ub_c > lb_c && ub_c > lb_c + threads);
+
+ const uint32_t num_c = ub_c - lb_c;
+ const uint32_t num_c_per_thread = num_c / threads;
+ const uint32_t num_a = ub_a - lb_a;
+ const uint32_t num_a_per_thread = num_a / threads;
+
+ assert(db_);
+ ReadOptions ropts;
+ std::vector<KeySet> existing_a_uniqs(threads);
+ std::vector<KeySet> non_existing_a_uniqs(threads);
+ std::vector<KeySet> existing_c_uniqs(threads);
+ std::vector<KeySet> non_existing_c_uniqs(threads);
+ {
+ std::string pk_lb_str = Record::EncodePrimaryKey(0);
+ std::string pk_ub_str =
+ Record::EncodePrimaryKey(std::numeric_limits<uint32_t>::max());
+ Slice pk_lb = pk_lb_str;
+ Slice pk_ub = pk_ub_str;
+ ropts.iterate_lower_bound = &pk_lb;
+ ropts.iterate_upper_bound = &pk_ub;
+ ropts.total_order_seek = true;
+ std::unique_ptr<Iterator> it(db_->NewIterator(ropts));
+
+ for (it->SeekToFirst(); it->Valid(); it->Next()) {
+ Record record;
+ Status s = record.DecodePrimaryIndexEntry(it->key(), it->value());
+ if (!s.ok()) {
+ fprintf(stderr, "Cannot decode primary index entry (%s => %s): %s\n",
+ it->key().ToString(true).c_str(),
+ it->value().ToString(true).c_str(), s.ToString().c_str());
+ assert(false);
+ }
+ uint32_t a = record.a_value();
+ assert(a >= lb_a);
+ assert(a < ub_a);
+ uint32_t tid = (a - lb_a) / num_a_per_thread;
+ if (tid >= static_cast<uint32_t>(threads)) {
+ tid = threads - 1;
+ }
+
+ existing_a_uniqs[tid].insert(a);
+
+ uint32_t c = record.c_value();
+ assert(c >= lb_c);
+ assert(c < ub_c);
+ tid = (c - lb_c) / num_c_per_thread;
+ if (tid >= static_cast<uint32_t>(threads)) {
+ tid = threads - 1;
+ }
+ auto& existing_c_uniq = existing_c_uniqs[tid];
+ existing_c_uniq.insert(c);
+ }
+
+ for (uint32_t a = lb_a; a < ub_a; ++a) {
+ uint32_t tid = (a - lb_a) / num_a_per_thread;
+ if (tid >= static_cast<uint32_t>(threads)) {
+ tid = threads - 1;
+ }
+ if (0 == existing_a_uniqs[tid].count(a)) {
+ non_existing_a_uniqs[tid].insert(a);
+ }
+ }
+
+ for (uint32_t c = lb_c; c < ub_c; ++c) {
+ uint32_t tid = (c - lb_c) / num_c_per_thread;
+ if (tid >= static_cast<uint32_t>(threads)) {
+ tid = threads - 1;
+ }
+ if (0 == existing_c_uniqs[tid].count(c)) {
+ non_existing_c_uniqs[tid].insert(c);
+ }
+ }
+
+ for (int i = 0; i < threads; ++i) {
+ uint32_t my_seed = i + shared->GetSeed();
+ auto& key_gen_for_a = key_gen_for_a_[i];
+ assert(!key_gen_for_a);
+ uint32_t low = lb_a + i * num_a_per_thread;
+ uint32_t high = (i < threads - 1) ? (low + num_a_per_thread) : ub_a;
+
+ // The following two assertions assume the test thread count and key
+ // space remain the same across different runs. Will need to relax.
+ assert(existing_a_uniqs[i].size() == high - low - 1);
+ assert(non_existing_a_uniqs[i].size() == 1);
+
+ key_gen_for_a = std::make_unique<KeyGenerator>(
+ my_seed, low, high, std::move(existing_a_uniqs[i]),
+ std::move(non_existing_a_uniqs[i]));
+
+ auto& key_gen_for_c = key_gen_for_c_[i];
+ assert(!key_gen_for_c);
+ low = lb_c + i * num_c_per_thread;
+ high = (i < threads - 1) ? (low + num_c_per_thread) : ub_c;
+
+ // The following two assertions assume the test thread count and key
+ // space remain the same across different runs. Will need to relax.
+ assert(existing_c_uniqs[i].size() == high - low - 1);
+ assert(non_existing_c_uniqs[i].size() == 1);
+
+ key_gen_for_c = std::make_unique<KeyGenerator>(
+ my_seed, low, high, std::move(existing_c_uniqs[i]),
+ std::move(non_existing_c_uniqs[i]));
+ }
+ }
+}
+
+StressTest* CreateMultiOpsTxnsStressTest() {
+ return new MultiOpsTxnsStressTest();
+}
+
+void CheckAndSetOptionsForMultiOpsTxnStressTest() {
+#ifndef ROCKSDB_LITE
+ if (FLAGS_test_batches_snapshots || FLAGS_test_cf_consistency) {
+ fprintf(stderr,
+ "-test_multi_ops_txns is not compatible with "
+ "-test_bathces_snapshots and -test_cf_consistency\n");
+ exit(1);
+ }
+ if (!FLAGS_use_txn) {
+ fprintf(stderr, "-use_txn must be true if -test_multi_ops_txns\n");
+ exit(1);
+ } else if (FLAGS_test_secondary > 0) {
+ fprintf(
+ stderr,
+ "secondary instance does not support replaying logs (MANIFEST + WAL) "
+ "of TransactionDB with write-prepared/write-unprepared policy\n");
+ exit(1);
+ }
+ if (FLAGS_clear_column_family_one_in > 0) {
+ fprintf(stderr,
+ "-test_multi_ops_txns is not compatible with clearing column "
+ "families\n");
+ exit(1);
+ }
+ if (FLAGS_column_families > 1) {
+ // TODO (yanqin) support separating primary index and secondary index in
+ // different column families.
+ fprintf(stderr,
+ "-test_multi_ops_txns currently does not use more than one column "
+ "family\n");
+ exit(1);
+ }
+ if (FLAGS_writepercent > 0 || FLAGS_delpercent > 0 ||
+ FLAGS_delrangepercent > 0) {
+ fprintf(stderr,
+ "-test_multi_ops_txns requires that -writepercent, -delpercent and "
+ "-delrangepercent be 0\n");
+ exit(1);
+ }
+ if (FLAGS_key_spaces_path.empty()) {
+ fprintf(stderr,
+ "Must specify a file to store ranges of A and C via "
+ "-key_spaces_path\n");
+ exit(1);
+ }
+ if (FLAGS_create_timestamped_snapshot_one_in > 0) {
+ if (FLAGS_txn_write_policy !=
+ static_cast<uint64_t>(TxnDBWritePolicy::WRITE_COMMITTED)) {
+ fprintf(stderr,
+ "Timestamped snapshot is not yet supported by "
+ "write-prepared/write-unprepared transactions\n");
+ exit(1);
+ }
+ }
+ if (FLAGS_sync_fault_injection == 1) {
+ fprintf(stderr,
+ "Sync fault injection is currently not supported in "
+ "-test_multi_ops_txns\n");
+ exit(1);
+ }
+#else
+ fprintf(stderr, "-test_multi_ops_txns not supported in ROCKSDB_LITE mode\n");
+ exit(1);
+#endif // !ROCKSDB_LITE
+}
+} // namespace ROCKSDB_NAMESPACE
+
+#endif // GFLAGS
diff --git a/src/rocksdb/db_stress_tool/multi_ops_txns_stress.h b/src/rocksdb/db_stress_tool/multi_ops_txns_stress.h
new file mode 100644
index 000000000..7463d05d7
--- /dev/null
+++ b/src/rocksdb/db_stress_tool/multi_ops_txns_stress.h
@@ -0,0 +1,444 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+//
+// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#ifdef GFLAGS
+#include "db_stress_tool/db_stress_common.h"
+
+namespace ROCKSDB_NAMESPACE {
+
+// This file defines MultiOpsTxnsStress so that we can stress test RocksDB
+// transactions on a simple, emulated relational table.
+//
+// The record format is similar to the example found at
+// https://github.com/facebook/mysql-5.6/wiki/MyRocks-record-format.
+//
+// The table is created by
+// ```
+// create table t1 (
+// a int primary key,
+// b int,
+// c int,
+// key(c),
+// )
+// ```
+//
+// (For simplicity, we use uint32_t for int here.)
+//
+// For this table, there is a primary index using `a`, as well as a secondary
+// index using `c` and `a`.
+//
+// Primary key format:
+// | index id | M(a) |
+// Primary index value:
+// | b | c |
+// M(a) represents the big-endian format of a.
+//
+// Secondary key format:
+// | index id | M(c) | M(a) |
+// Secondary index value:
+// | crc32 |
+// Similarly to M(a), M(c) is the big-endian format of c.
+//
+// The in-memory representation of a record is defined in class
+// MultiOpsTxnsStress:Record that includes a number of helper methods to
+// encode/decode primary index keys, primary index values, secondary index keys,
+// secondary index values, etc.
+//
+// Sometimes primary index and secondary index reside on different column
+// families, but sometimes they colocate in the same column family. Current
+// implementation puts them in the same (default) column family, and this is
+// subject to future change if we find it interesting to test the other case.
+//
+// Class MultiOpsTxnsStressTest has the following transactions for testing.
+//
+// 1. Primary key update
+// UPDATE t1 SET a = 3 WHERE a = 2;
+// ```
+// tx->GetForUpdate(primary key a=2)
+// tx->GetForUpdate(primary key a=3)
+// tx->Delete(primary key a=2)
+// tx->Put(primary key a=3, value)
+// tx->batch->SingleDelete(secondary key a=2)
+// tx->batch->Put(secondary key a=3, value)
+// tx->Prepare()
+// Tx->Commit()
+// ```
+//
+// 2. Secondary key update
+// UPDATE t1 SET c = 3 WHERE c = 2;
+// ```
+// iter->Seek(secondary key)
+// // Get corresponding primary key value(s) from iterator
+// tx->GetForUpdate(primary key)
+// tx->Put(primary key, value c=3)
+// tx->batch->SingleDelete(secondary key c=2)
+// tx->batch->Put(secondary key c=3)
+// tx->Prepare()
+// tx->Commit()
+// ```
+//
+// 3. Primary index value update
+// UPDATE t1 SET b = b + 1 WHERE a = 2;
+// ```
+// tx->GetForUpdate(primary key a=2)
+// tx->Put(primary key a=2, value b=b+1)
+// tx->Prepare()
+// tx->Commit()
+// ```
+//
+// 4. Point lookup
+// SELECT * FROM t1 WHERE a = 3;
+// ```
+// tx->Get(primary key a=3)
+// tx->Commit()
+// ```
+//
+// 5. Range scan
+// SELECT * FROM t1 WHERE c = 2;
+// ```
+// it = tx->GetIterator()
+// it->Seek(secondary key c=2)
+// tx->Commit()
+// ```
+
+class MultiOpsTxnsStressTest : public StressTest {
+ public:
+ class Record {
+ public:
+ static constexpr uint32_t kMetadataPrefix = 0;
+ static constexpr uint32_t kPrimaryIndexId = 1;
+ static constexpr uint32_t kSecondaryIndexId = 2;
+
+ static constexpr size_t kPrimaryIndexEntrySize = 8 + 8;
+ static constexpr size_t kSecondaryIndexEntrySize = 12 + 4;
+
+ static_assert(kPrimaryIndexId < kSecondaryIndexId,
+ "kPrimaryIndexId must be smaller than kSecondaryIndexId");
+
+ static_assert(sizeof(kPrimaryIndexId) == sizeof(uint32_t),
+ "kPrimaryIndexId must be 4 bytes");
+ static_assert(sizeof(kSecondaryIndexId) == sizeof(uint32_t),
+ "kSecondaryIndexId must be 4 bytes");
+
+ // Used for generating search key to probe primary index.
+ static std::string EncodePrimaryKey(uint32_t a);
+ // Used for generating search prefix to probe secondary index.
+ static std::string EncodeSecondaryKey(uint32_t c);
+ // Used for generating search key to probe secondary index.
+ static std::string EncodeSecondaryKey(uint32_t c, uint32_t a);
+
+ static std::tuple<Status, uint32_t, uint32_t> DecodePrimaryIndexValue(
+ Slice primary_index_value);
+
+ static std::pair<Status, uint32_t> DecodeSecondaryIndexValue(
+ Slice secondary_index_value);
+
+ Record() = default;
+ Record(uint32_t _a, uint32_t _b, uint32_t _c) : a_(_a), b_(_b), c_(_c) {}
+
+ bool operator==(const Record& other) const {
+ return a_ == other.a_ && b_ == other.b_ && c_ == other.c_;
+ }
+
+ bool operator!=(const Record& other) const { return !(*this == other); }
+
+ std::pair<std::string, std::string> EncodePrimaryIndexEntry() const;
+
+ std::string EncodePrimaryKey() const;
+
+ std::string EncodePrimaryIndexValue() const;
+
+ std::pair<std::string, std::string> EncodeSecondaryIndexEntry() const;
+
+ std::string EncodeSecondaryKey() const;
+
+ Status DecodePrimaryIndexEntry(Slice primary_index_key,
+ Slice primary_index_value);
+
+ Status DecodeSecondaryIndexEntry(Slice secondary_index_key,
+ Slice secondary_index_value);
+
+ uint32_t a_value() const { return a_; }
+ uint32_t b_value() const { return b_; }
+ uint32_t c_value() const { return c_; }
+
+ void SetA(uint32_t _a) { a_ = _a; }
+ void SetB(uint32_t _b) { b_ = _b; }
+ void SetC(uint32_t _c) { c_ = _c; }
+
+ std::string ToString() const {
+ std::string ret("(");
+ ret.append(std::to_string(a_));
+ ret.append(",");
+ ret.append(std::to_string(b_));
+ ret.append(",");
+ ret.append(std::to_string(c_));
+ ret.append(")");
+ return ret;
+ }
+
+ private:
+ friend class InvariantChecker;
+
+ uint32_t a_{0};
+ uint32_t b_{0};
+ uint32_t c_{0};
+ };
+
+ MultiOpsTxnsStressTest() {}
+
+ ~MultiOpsTxnsStressTest() override {}
+
+ void FinishInitDb(SharedState*) override;
+
+ void ReopenAndPreloadDbIfNeeded(SharedState* shared);
+
+ bool IsStateTracked() const override { return false; }
+
+ Status TestGet(ThreadState* thread, const ReadOptions& read_opts,
+ const std::vector<int>& rand_column_families,
+ const std::vector<int64_t>& rand_keys) override;
+
+ std::vector<Status> TestMultiGet(
+ ThreadState* thread, const ReadOptions& read_opts,
+ const std::vector<int>& rand_column_families,
+ const std::vector<int64_t>& rand_keys) override;
+
+ Status TestPrefixScan(ThreadState* thread, const ReadOptions& read_opts,
+ const std::vector<int>& rand_column_families,
+ const std::vector<int64_t>& rand_keys) override;
+
+ // Given a key K, this creates an iterator which scans to K and then
+ // does a random sequence of Next/Prev operations.
+ Status TestIterate(ThreadState* thread, const ReadOptions& read_opts,
+ const std::vector<int>& rand_column_families,
+ const std::vector<int64_t>& rand_keys) override;
+
+ Status TestPut(ThreadState* thread, WriteOptions& write_opts,
+ const ReadOptions& read_opts, const std::vector<int>& cf_ids,
+ const std::vector<int64_t>& keys, char (&value)[100]) override;
+
+ Status TestDelete(ThreadState* thread, WriteOptions& write_opts,
+ const std::vector<int>& rand_column_families,
+ const std::vector<int64_t>& rand_keys) override;
+
+ Status TestDeleteRange(ThreadState* thread, WriteOptions& write_opts,
+ const std::vector<int>& rand_column_families,
+ const std::vector<int64_t>& rand_keys) override;
+
+ void TestIngestExternalFile(ThreadState* thread,
+ const std::vector<int>& rand_column_families,
+ const std::vector<int64_t>& rand_keys) override;
+
+ void TestCompactRange(ThreadState* thread, int64_t rand_key,
+ const Slice& start_key,
+ ColumnFamilyHandle* column_family) override;
+
+ Status TestBackupRestore(ThreadState* thread,
+ const std::vector<int>& rand_column_families,
+ const std::vector<int64_t>& rand_keys) override;
+
+ Status TestCheckpoint(ThreadState* thread,
+ const std::vector<int>& rand_column_families,
+ const std::vector<int64_t>& rand_keys) override;
+
+#ifndef ROCKSDB_LITE
+ Status TestApproximateSize(ThreadState* thread, uint64_t iteration,
+ const std::vector<int>& rand_column_families,
+ const std::vector<int64_t>& rand_keys) override;
+#endif // !ROCKSDB_LITE
+
+ Status TestCustomOperations(
+ ThreadState* thread,
+ const std::vector<int>& rand_column_families) override;
+
+ void RegisterAdditionalListeners() override;
+
+#ifndef ROCKSDB_LITE
+ void PrepareTxnDbOptions(SharedState* /*shared*/,
+ TransactionDBOptions& txn_db_opts) override;
+#endif // !ROCKSDB_LITE
+
+ Status PrimaryKeyUpdateTxn(ThreadState* thread, uint32_t old_a,
+ uint32_t old_a_pos, uint32_t new_a);
+
+ Status SecondaryKeyUpdateTxn(ThreadState* thread, uint32_t old_c,
+ uint32_t old_c_pos, uint32_t new_c);
+
+ Status UpdatePrimaryIndexValueTxn(ThreadState* thread, uint32_t a,
+ uint32_t b_delta);
+
+ Status PointLookupTxn(ThreadState* thread, ReadOptions ropts, uint32_t a);
+
+ Status RangeScanTxn(ThreadState* thread, ReadOptions ropts, uint32_t c);
+
+ void VerifyDb(ThreadState* thread) const override;
+
+ void ContinuouslyVerifyDb(ThreadState* thread) const override {
+ VerifyDb(thread);
+ }
+
+ void VerifyPkSkFast(int job_id);
+
+ protected:
+ class Counter {
+ public:
+ uint64_t Next() { return value_.fetch_add(1); }
+
+ private:
+ std::atomic<uint64_t> value_ = Env::Default()->NowNanos();
+ };
+
+ using KeySet = std::set<uint32_t>;
+ class KeyGenerator {
+ public:
+ explicit KeyGenerator(uint32_t s, uint32_t low, uint32_t high,
+ KeySet&& existing_uniq, KeySet&& non_existing_uniq)
+ : rand_(s),
+ low_(low),
+ high_(high),
+ existing_uniq_(std::move(existing_uniq)),
+ non_existing_uniq_(std::move(non_existing_uniq)) {}
+ ~KeyGenerator() {
+ assert(!existing_uniq_.empty());
+ assert(!non_existing_uniq_.empty());
+ }
+ void FinishInit();
+
+ std::pair<uint32_t, uint32_t> ChooseExisting();
+ void Replace(uint32_t old_val, uint32_t old_pos, uint32_t new_val);
+ uint32_t Allocate();
+ void UndoAllocation(uint32_t new_val);
+
+ std::string ToString() const {
+ std::ostringstream oss;
+ oss << "[" << low_ << ", " << high_ << "): " << existing_.size()
+ << " elements, " << existing_uniq_.size() << " unique values, "
+ << non_existing_uniq_.size() << " unique non-existing values";
+ return oss.str();
+ }
+
+ private:
+ Random rand_;
+ uint32_t low_ = 0;
+ uint32_t high_ = 0;
+ std::vector<uint32_t> existing_{};
+ KeySet existing_uniq_{};
+ KeySet non_existing_uniq_{};
+ bool initialized_ = false;
+ };
+
+ // Return <a, pos>
+ std::pair<uint32_t, uint32_t> ChooseExistingA(ThreadState* thread);
+
+ uint32_t GenerateNextA(ThreadState* thread);
+
+ // Return <c, pos>
+ std::pair<uint32_t, uint32_t> ChooseExistingC(ThreadState* thread);
+
+ uint32_t GenerateNextC(ThreadState* thread);
+
+#ifndef ROCKSDB_LITE
+ // Randomly commit or rollback `txn`
+ void ProcessRecoveredPreparedTxnsHelper(Transaction* txn,
+ SharedState*) override;
+
+ // Some applications, e.g. MyRocks writes a KV pair to the database via
+ // commit-time-write-batch (ctwb) in additional to the transaction's regular
+ // write batch. The key is usually constant representing some system
+ // metadata, while the value is monoticailly increasing which represents the
+ // actual value of the metadata. Method WriteToCommitTimeWriteBatch()
+ // emulates this scenario.
+ Status WriteToCommitTimeWriteBatch(Transaction& txn);
+
+ Status CommitAndCreateTimestampedSnapshotIfNeeded(ThreadState* thread,
+ Transaction& txn);
+
+ void SetupSnapshot(ThreadState* thread, ReadOptions& read_opts,
+ Transaction& txn,
+ std::shared_ptr<const Snapshot>& snapshot);
+#endif //! ROCKSDB_LITE
+
+ std::vector<std::unique_ptr<KeyGenerator>> key_gen_for_a_;
+ std::vector<std::unique_ptr<KeyGenerator>> key_gen_for_c_;
+
+ Counter counter_{};
+
+ private:
+ struct KeySpaces {
+ uint32_t lb_a = 0;
+ uint32_t ub_a = 0;
+ uint32_t lb_c = 0;
+ uint32_t ub_c = 0;
+
+ explicit KeySpaces() = default;
+ explicit KeySpaces(uint32_t _lb_a, uint32_t _ub_a, uint32_t _lb_c,
+ uint32_t _ub_c)
+ : lb_a(_lb_a), ub_a(_ub_a), lb_c(_lb_c), ub_c(_ub_c) {}
+
+ std::string EncodeTo() const;
+ bool DecodeFrom(Slice data);
+ };
+
+ void PersistKeySpacesDesc(const std::string& key_spaces_path, uint32_t lb_a,
+ uint32_t ub_a, uint32_t lb_c, uint32_t ub_c);
+
+ KeySpaces ReadKeySpacesDesc(const std::string& key_spaces_path);
+
+ void PreloadDb(SharedState* shared, int threads, uint32_t lb_a, uint32_t ub_a,
+ uint32_t lb_c, uint32_t ub_c);
+
+ void ScanExistingDb(SharedState* shared, int threads);
+};
+
+class InvariantChecker {
+ public:
+ static_assert(sizeof(MultiOpsTxnsStressTest::Record().a_) == sizeof(uint32_t),
+ "MultiOpsTxnsStressTest::Record::a_ must be 4 bytes");
+ static_assert(sizeof(MultiOpsTxnsStressTest::Record().b_) == sizeof(uint32_t),
+ "MultiOpsTxnsStressTest::Record::b_ must be 4 bytes");
+ static_assert(sizeof(MultiOpsTxnsStressTest::Record().c_) == sizeof(uint32_t),
+ "MultiOpsTxnsStressTest::Record::c_ must be 4 bytes");
+};
+
+class MultiOpsTxnsStressListener : public EventListener {
+ public:
+ explicit MultiOpsTxnsStressListener(MultiOpsTxnsStressTest* stress_test)
+ : stress_test_(stress_test) {
+ assert(stress_test_);
+ }
+
+#ifndef ROCKSDB_LITE
+ ~MultiOpsTxnsStressListener() override {}
+
+ void OnFlushCompleted(DB* db, const FlushJobInfo& info) override {
+ assert(db);
+#ifdef NDEBUG
+ (void)db;
+#endif
+ assert(info.cf_id == 0);
+ stress_test_->VerifyPkSkFast(info.job_id);
+ }
+
+ void OnCompactionCompleted(DB* db, const CompactionJobInfo& info) override {
+ assert(db);
+#ifdef NDEBUG
+ (void)db;
+#endif
+ assert(info.cf_id == 0);
+ stress_test_->VerifyPkSkFast(info.job_id);
+ }
+#endif //! ROCKSDB_LITE
+
+ private:
+ MultiOpsTxnsStressTest* const stress_test_ = nullptr;
+};
+
+} // namespace ROCKSDB_NAMESPACE
+#endif // GFLAGS
diff --git a/src/rocksdb/db_stress_tool/no_batched_ops_stress.cc b/src/rocksdb/db_stress_tool/no_batched_ops_stress.cc
new file mode 100644
index 000000000..bf01b788f
--- /dev/null
+++ b/src/rocksdb/db_stress_tool/no_batched_ops_stress.cc
@@ -0,0 +1,1505 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+//
+// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#ifdef GFLAGS
+#include "db_stress_tool/db_stress_common.h"
+#include "rocksdb/utilities/transaction_db.h"
+#include "utilities/fault_injection_fs.h"
+
+namespace ROCKSDB_NAMESPACE {
+class NonBatchedOpsStressTest : public StressTest {
+ public:
+ NonBatchedOpsStressTest() {}
+
+ virtual ~NonBatchedOpsStressTest() {}
+
+ void VerifyDb(ThreadState* thread) const override {
+ // This `ReadOptions` is for validation purposes. Ignore
+ // `FLAGS_rate_limit_user_ops` to avoid slowing any validation.
+ ReadOptions options(FLAGS_verify_checksum, true);
+ std::string ts_str;
+ Slice ts;
+ if (FLAGS_user_timestamp_size > 0) {
+ ts_str = GetNowNanos();
+ ts = ts_str;
+ options.timestamp = &ts;
+ }
+
+ auto shared = thread->shared;
+ const int64_t max_key = shared->GetMaxKey();
+ const int64_t keys_per_thread = max_key / shared->GetNumThreads();
+ int64_t start = keys_per_thread * thread->tid;
+ int64_t end = start + keys_per_thread;
+ uint64_t prefix_to_use =
+ (FLAGS_prefix_size < 0) ? 1 : static_cast<size_t>(FLAGS_prefix_size);
+
+ if (thread->tid == shared->GetNumThreads() - 1) {
+ end = max_key;
+ }
+
+ for (size_t cf = 0; cf < column_families_.size(); ++cf) {
+ if (thread->shared->HasVerificationFailedYet()) {
+ break;
+ }
+
+ enum class VerificationMethod {
+ kIterator,
+ kGet,
+ kMultiGet,
+ kGetMergeOperands,
+ // Add any new items above kNumberOfMethods
+ kNumberOfMethods
+ };
+
+ constexpr int num_methods =
+ static_cast<int>(VerificationMethod::kNumberOfMethods);
+
+ const VerificationMethod method =
+ static_cast<VerificationMethod>(thread->rand.Uniform(
+ (FLAGS_user_timestamp_size > 0) ? num_methods - 1 : num_methods));
+
+ if (method == VerificationMethod::kIterator) {
+ std::unique_ptr<Iterator> iter(
+ db_->NewIterator(options, column_families_[cf]));
+
+ std::string seek_key = Key(start);
+ iter->Seek(seek_key);
+
+ Slice prefix(seek_key.data(), prefix_to_use);
+
+ for (int64_t i = start; i < end; ++i) {
+ if (thread->shared->HasVerificationFailedYet()) {
+ break;
+ }
+
+ const std::string key = Key(i);
+ const Slice k(key);
+ const Slice pfx(key.data(), prefix_to_use);
+
+ // Reseek when the prefix changes
+ if (prefix_to_use > 0 && prefix.compare(pfx) != 0) {
+ iter->Seek(k);
+ seek_key = key;
+ prefix = Slice(seek_key.data(), prefix_to_use);
+ }
+
+ Status s = iter->status();
+
+ std::string from_db;
+
+ if (iter->Valid()) {
+ const int diff = iter->key().compare(k);
+
+ if (diff > 0) {
+ s = Status::NotFound();
+ } else if (diff == 0) {
+ const WideColumns expected_columns = GenerateExpectedWideColumns(
+ GetValueBase(iter->value()), iter->value());
+ if (iter->columns() != expected_columns) {
+ VerificationAbort(shared, static_cast<int>(cf), i,
+ iter->value(), iter->columns(),
+ expected_columns);
+ break;
+ }
+
+ from_db = iter->value().ToString();
+ iter->Next();
+ } else {
+ assert(diff < 0);
+
+ VerificationAbort(shared, "An out of range key was found",
+ static_cast<int>(cf), i);
+ }
+ } else {
+ // The iterator found no value for the key in question, so do not
+ // move to the next item in the iterator
+ s = Status::NotFound();
+ }
+
+ VerifyOrSyncValue(static_cast<int>(cf), i, options, shared, from_db,
+ s, /* strict */ true);
+
+ if (!from_db.empty()) {
+ PrintKeyValue(static_cast<int>(cf), static_cast<uint32_t>(i),
+ from_db.data(), from_db.size());
+ }
+ }
+ } else if (method == VerificationMethod::kGet) {
+ for (int64_t i = start; i < end; ++i) {
+ if (thread->shared->HasVerificationFailedYet()) {
+ break;
+ }
+
+ const std::string key = Key(i);
+ std::string from_db;
+
+ Status s = db_->Get(options, column_families_[cf], key, &from_db);
+
+ VerifyOrSyncValue(static_cast<int>(cf), i, options, shared, from_db,
+ s, /* strict */ true);
+
+ if (!from_db.empty()) {
+ PrintKeyValue(static_cast<int>(cf), static_cast<uint32_t>(i),
+ from_db.data(), from_db.size());
+ }
+ }
+ } else if (method == VerificationMethod::kMultiGet) {
+ for (int64_t i = start; i < end;) {
+ if (thread->shared->HasVerificationFailedYet()) {
+ break;
+ }
+
+ // Keep the batch size to some reasonable value
+ size_t batch_size = thread->rand.Uniform(128) + 1;
+ batch_size = std::min<size_t>(batch_size, end - i);
+
+ std::vector<std::string> keystrs(batch_size);
+ std::vector<Slice> keys(batch_size);
+ std::vector<PinnableSlice> values(batch_size);
+ std::vector<Status> statuses(batch_size);
+
+ for (size_t j = 0; j < batch_size; ++j) {
+ keystrs[j] = Key(i + j);
+ keys[j] = Slice(keystrs[j].data(), keystrs[j].size());
+ }
+
+ db_->MultiGet(options, column_families_[cf], batch_size, keys.data(),
+ values.data(), statuses.data());
+
+ for (size_t j = 0; j < batch_size; ++j) {
+ const std::string from_db = values[j].ToString();
+
+ VerifyOrSyncValue(static_cast<int>(cf), i + j, options, shared,
+ from_db, statuses[j], /* strict */ true);
+
+ if (!from_db.empty()) {
+ PrintKeyValue(static_cast<int>(cf), static_cast<uint32_t>(i + j),
+ from_db.data(), from_db.size());
+ }
+ }
+
+ i += batch_size;
+ }
+ } else {
+ assert(method == VerificationMethod::kGetMergeOperands);
+
+ // Start off with small size that will be increased later if necessary
+ std::vector<PinnableSlice> values(4);
+
+ GetMergeOperandsOptions merge_operands_info;
+ merge_operands_info.expected_max_number_of_operands =
+ static_cast<int>(values.size());
+
+ for (int64_t i = start; i < end; ++i) {
+ if (thread->shared->HasVerificationFailedYet()) {
+ break;
+ }
+
+ const std::string key = Key(i);
+ const Slice k(key);
+ std::string from_db;
+ int number_of_operands = 0;
+
+ Status s = db_->GetMergeOperands(options, column_families_[cf], k,
+ values.data(), &merge_operands_info,
+ &number_of_operands);
+
+ if (s.IsIncomplete()) {
+ // Need to resize values as there are more than values.size() merge
+ // operands on this key. Should only happen a few times when we
+ // encounter a key that had more merge operands than any key seen so
+ // far
+ values.resize(number_of_operands);
+ merge_operands_info.expected_max_number_of_operands =
+ static_cast<int>(number_of_operands);
+ s = db_->GetMergeOperands(options, column_families_[cf], k,
+ values.data(), &merge_operands_info,
+ &number_of_operands);
+ }
+ // Assumed here that GetMergeOperands always sets number_of_operand
+ if (number_of_operands) {
+ from_db = values[number_of_operands - 1].ToString();
+ }
+
+ VerifyOrSyncValue(static_cast<int>(cf), i, options, shared, from_db,
+ s, /* strict */ true);
+
+ if (!from_db.empty()) {
+ PrintKeyValue(static_cast<int>(cf), static_cast<uint32_t>(i),
+ from_db.data(), from_db.size());
+ }
+ }
+ }
+ }
+ }
+
+#ifndef ROCKSDB_LITE
+ void ContinuouslyVerifyDb(ThreadState* thread) const override {
+ if (!cmp_db_) {
+ return;
+ }
+ assert(cmp_db_);
+ assert(!cmp_cfhs_.empty());
+ Status s = cmp_db_->TryCatchUpWithPrimary();
+ if (!s.ok()) {
+ assert(false);
+ exit(1);
+ }
+
+ const auto checksum_column_family = [](Iterator* iter,
+ uint32_t* checksum) -> Status {
+ assert(nullptr != checksum);
+ uint32_t ret = 0;
+ for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
+ ret = crc32c::Extend(ret, iter->key().data(), iter->key().size());
+ ret = crc32c::Extend(ret, iter->value().data(), iter->value().size());
+ }
+ *checksum = ret;
+ return iter->status();
+ };
+
+ auto* shared = thread->shared;
+ assert(shared);
+ const int64_t max_key = shared->GetMaxKey();
+ ReadOptions read_opts(FLAGS_verify_checksum, true);
+ std::string ts_str;
+ Slice ts;
+ if (FLAGS_user_timestamp_size > 0) {
+ ts_str = GetNowNanos();
+ ts = ts_str;
+ read_opts.timestamp = &ts;
+ }
+
+ static Random64 rand64(shared->GetSeed());
+
+ {
+ uint32_t crc = 0;
+ std::unique_ptr<Iterator> it(cmp_db_->NewIterator(read_opts));
+ s = checksum_column_family(it.get(), &crc);
+ if (!s.ok()) {
+ fprintf(stderr, "Computing checksum of default cf: %s\n",
+ s.ToString().c_str());
+ assert(false);
+ }
+ }
+
+ for (auto* handle : cmp_cfhs_) {
+ if (thread->rand.OneInOpt(3)) {
+ // Use Get()
+ uint64_t key = rand64.Uniform(static_cast<uint64_t>(max_key));
+ std::string key_str = Key(key);
+ std::string value;
+ std::string key_ts;
+ s = cmp_db_->Get(read_opts, handle, key_str, &value,
+ FLAGS_user_timestamp_size > 0 ? &key_ts : nullptr);
+ s.PermitUncheckedError();
+ } else {
+ // Use range scan
+ std::unique_ptr<Iterator> iter(cmp_db_->NewIterator(read_opts, handle));
+ uint32_t rnd = (thread->rand.Next()) % 4;
+ if (0 == rnd) {
+ // SeekToFirst() + Next()*5
+ read_opts.total_order_seek = true;
+ iter->SeekToFirst();
+ for (int i = 0; i < 5 && iter->Valid(); ++i, iter->Next()) {
+ }
+ } else if (1 == rnd) {
+ // SeekToLast() + Prev()*5
+ read_opts.total_order_seek = true;
+ iter->SeekToLast();
+ for (int i = 0; i < 5 && iter->Valid(); ++i, iter->Prev()) {
+ }
+ } else if (2 == rnd) {
+ // Seek() +Next()*5
+ uint64_t key = rand64.Uniform(static_cast<uint64_t>(max_key));
+ std::string key_str = Key(key);
+ iter->Seek(key_str);
+ for (int i = 0; i < 5 && iter->Valid(); ++i, iter->Next()) {
+ }
+ } else {
+ // SeekForPrev() + Prev()*5
+ uint64_t key = rand64.Uniform(static_cast<uint64_t>(max_key));
+ std::string key_str = Key(key);
+ iter->SeekForPrev(key_str);
+ for (int i = 0; i < 5 && iter->Valid(); ++i, iter->Prev()) {
+ }
+ }
+ }
+ }
+ }
+#else
+ void ContinuouslyVerifyDb(ThreadState* /*thread*/) const override {}
+#endif // ROCKSDB_LITE
+
+ void MaybeClearOneColumnFamily(ThreadState* thread) override {
+ if (FLAGS_column_families > 1) {
+ if (thread->rand.OneInOpt(FLAGS_clear_column_family_one_in)) {
+ // drop column family and then create it again (can't drop default)
+ int cf = thread->rand.Next() % (FLAGS_column_families - 1) + 1;
+ std::string new_name =
+ std::to_string(new_column_family_name_.fetch_add(1));
+ {
+ MutexLock l(thread->shared->GetMutex());
+ fprintf(
+ stdout,
+ "[CF %d] Dropping and recreating column family. new name: %s\n",
+ cf, new_name.c_str());
+ }
+ thread->shared->LockColumnFamily(cf);
+ Status s = db_->DropColumnFamily(column_families_[cf]);
+ delete column_families_[cf];
+ if (!s.ok()) {
+ fprintf(stderr, "dropping column family error: %s\n",
+ s.ToString().c_str());
+ std::terminate();
+ }
+ s = db_->CreateColumnFamily(ColumnFamilyOptions(options_), new_name,
+ &column_families_[cf]);
+ column_family_names_[cf] = new_name;
+ thread->shared->ClearColumnFamily(cf);
+ if (!s.ok()) {
+ fprintf(stderr, "creating column family error: %s\n",
+ s.ToString().c_str());
+ std::terminate();
+ }
+ thread->shared->UnlockColumnFamily(cf);
+ }
+ }
+ }
+
+ bool ShouldAcquireMutexOnKey() const override { return true; }
+
+ bool IsStateTracked() const override { return true; }
+
+ Status TestGet(ThreadState* thread, const ReadOptions& read_opts,
+ const std::vector<int>& rand_column_families,
+ const std::vector<int64_t>& rand_keys) override {
+ auto cfh = column_families_[rand_column_families[0]];
+ std::string key_str = Key(rand_keys[0]);
+ Slice key = key_str;
+ std::string from_db;
+ int error_count = 0;
+
+ if (fault_fs_guard) {
+ fault_fs_guard->EnableErrorInjection();
+ SharedState::ignore_read_error = false;
+ }
+
+ std::unique_ptr<MutexLock> lock(new MutexLock(
+ thread->shared->GetMutexForKey(rand_column_families[0], rand_keys[0])));
+
+ ReadOptions read_opts_copy = read_opts;
+ std::string read_ts_str;
+ Slice read_ts_slice;
+ bool read_older_ts = MaybeUseOlderTimestampForPointLookup(
+ thread, read_ts_str, read_ts_slice, read_opts_copy);
+
+ Status s = db_->Get(read_opts_copy, cfh, key, &from_db);
+ if (fault_fs_guard) {
+ error_count = fault_fs_guard->GetAndResetErrorCount();
+ }
+ if (s.ok()) {
+ if (fault_fs_guard) {
+ if (error_count && !SharedState::ignore_read_error) {
+ // Grab mutex so multiple thread don't try to print the
+ // stack trace at the same time
+ MutexLock l(thread->shared->GetMutex());
+ fprintf(stderr, "Didn't get expected error from Get\n");
+ fprintf(stderr, "Callstack that injected the fault\n");
+ fault_fs_guard->PrintFaultBacktrace();
+ std::terminate();
+ }
+ }
+ // found case
+ thread->stats.AddGets(1, 1);
+ // we only have the latest expected state
+ if (!FLAGS_skip_verifydb && !read_opts_copy.timestamp &&
+ thread->shared->Get(rand_column_families[0], rand_keys[0]) ==
+ SharedState::DELETION_SENTINEL) {
+ thread->shared->SetVerificationFailure();
+ fprintf(stderr,
+ "error : inconsistent values for key %s: Get returns %s, "
+ "expected state does not have the key.\n",
+ key.ToString(true).c_str(), StringToHex(from_db).c_str());
+ }
+ } else if (s.IsNotFound()) {
+ // not found case
+ thread->stats.AddGets(1, 0);
+ if (!FLAGS_skip_verifydb && !read_older_ts) {
+ auto expected =
+ thread->shared->Get(rand_column_families[0], rand_keys[0]);
+ if (expected != SharedState::DELETION_SENTINEL &&
+ expected != SharedState::UNKNOWN_SENTINEL) {
+ thread->shared->SetVerificationFailure();
+ fprintf(stderr,
+ "error : inconsistent values for key %s: expected state has "
+ "the key, Get() returns NotFound.\n",
+ key.ToString(true).c_str());
+ }
+ }
+ } else {
+ if (error_count == 0) {
+ // errors case
+ thread->stats.AddErrors(1);
+ } else {
+ thread->stats.AddVerifiedErrors(1);
+ }
+ }
+ if (fault_fs_guard) {
+ fault_fs_guard->DisableErrorInjection();
+ }
+ return s;
+ }
+
+ std::vector<Status> TestMultiGet(
+ ThreadState* thread, const ReadOptions& read_opts,
+ const std::vector<int>& rand_column_families,
+ const std::vector<int64_t>& rand_keys) override {
+ size_t num_keys = rand_keys.size();
+ std::vector<std::string> key_str;
+ std::vector<Slice> keys;
+ key_str.reserve(num_keys);
+ keys.reserve(num_keys);
+ std::vector<PinnableSlice> values(num_keys);
+ std::vector<Status> statuses(num_keys);
+ ColumnFamilyHandle* cfh = column_families_[rand_column_families[0]];
+ int error_count = 0;
+ // Do a consistency check between Get and MultiGet. Don't do it too
+ // often as it will slow db_stress down
+ bool do_consistency_check = thread->rand.OneIn(4);
+
+ ReadOptions readoptionscopy = read_opts;
+ if (do_consistency_check) {
+ readoptionscopy.snapshot = db_->GetSnapshot();
+ }
+
+ std::string read_ts_str;
+ Slice read_ts_slice;
+ MaybeUseOlderTimestampForPointLookup(thread, read_ts_str, read_ts_slice,
+ readoptionscopy);
+
+ readoptionscopy.rate_limiter_priority =
+ FLAGS_rate_limit_user_ops ? Env::IO_USER : Env::IO_TOTAL;
+
+ // To appease clang analyzer
+ const bool use_txn = FLAGS_use_txn;
+
+ // Create a transaction in order to write some data. The purpose is to
+ // exercise WriteBatchWithIndex::MultiGetFromBatchAndDB. The transaction
+ // will be rolled back once MultiGet returns.
+#ifndef ROCKSDB_LITE
+ Transaction* txn = nullptr;
+ if (use_txn) {
+ WriteOptions wo;
+ if (FLAGS_rate_limit_auto_wal_flush) {
+ wo.rate_limiter_priority = Env::IO_USER;
+ }
+ Status s = NewTxn(wo, &txn);
+ if (!s.ok()) {
+ fprintf(stderr, "NewTxn: %s\n", s.ToString().c_str());
+ std::terminate();
+ }
+ }
+#endif
+ for (size_t i = 0; i < num_keys; ++i) {
+ key_str.emplace_back(Key(rand_keys[i]));
+ keys.emplace_back(key_str.back());
+#ifndef ROCKSDB_LITE
+ if (use_txn) {
+ // With a 1 in 10 probability, insert the just added key in the batch
+ // into the transaction. This will create an overlap with the MultiGet
+ // keys and exercise some corner cases in the code
+ if (thread->rand.OneIn(10)) {
+ int op = thread->rand.Uniform(2);
+ Status s;
+ switch (op) {
+ case 0:
+ case 1: {
+ uint32_t value_base =
+ thread->rand.Next() % thread->shared->UNKNOWN_SENTINEL;
+ char value[100];
+ size_t sz = GenerateValue(value_base, value, sizeof(value));
+ Slice v(value, sz);
+ if (op == 0) {
+ s = txn->Put(cfh, keys.back(), v);
+ } else {
+ s = txn->Merge(cfh, keys.back(), v);
+ }
+ break;
+ }
+ case 2:
+ s = txn->Delete(cfh, keys.back());
+ break;
+ default:
+ assert(false);
+ }
+ if (!s.ok()) {
+ fprintf(stderr, "Transaction put: %s\n", s.ToString().c_str());
+ std::terminate();
+ }
+ }
+ }
+#endif
+ }
+
+ if (!use_txn) {
+ if (fault_fs_guard) {
+ fault_fs_guard->EnableErrorInjection();
+ SharedState::ignore_read_error = false;
+ }
+ db_->MultiGet(readoptionscopy, cfh, num_keys, keys.data(), values.data(),
+ statuses.data());
+ if (fault_fs_guard) {
+ error_count = fault_fs_guard->GetAndResetErrorCount();
+ }
+ } else {
+#ifndef ROCKSDB_LITE
+ txn->MultiGet(readoptionscopy, cfh, num_keys, keys.data(), values.data(),
+ statuses.data());
+#endif
+ }
+
+ if (fault_fs_guard && error_count && !SharedState::ignore_read_error) {
+ int stat_nok = 0;
+ for (const auto& s : statuses) {
+ if (!s.ok() && !s.IsNotFound()) {
+ stat_nok++;
+ }
+ }
+
+ if (stat_nok < error_count) {
+ // Grab mutex so multiple thread don't try to print the
+ // stack trace at the same time
+ MutexLock l(thread->shared->GetMutex());
+ fprintf(stderr, "Didn't get expected error from MultiGet. \n");
+ fprintf(stderr, "num_keys %zu Expected %d errors, seen %d\n", num_keys,
+ error_count, stat_nok);
+ fprintf(stderr, "Callstack that injected the fault\n");
+ fault_fs_guard->PrintFaultBacktrace();
+ std::terminate();
+ }
+ }
+ if (fault_fs_guard) {
+ fault_fs_guard->DisableErrorInjection();
+ }
+
+ for (size_t i = 0; i < statuses.size(); ++i) {
+ Status s = statuses[i];
+ bool is_consistent = true;
+ // Only do the consistency check if no error was injected and MultiGet
+ // didn't return an unexpected error
+ if (do_consistency_check && !error_count && (s.ok() || s.IsNotFound())) {
+ Status tmp_s;
+ std::string value;
+
+ if (use_txn) {
+#ifndef ROCKSDB_LITE
+ tmp_s = txn->Get(readoptionscopy, cfh, keys[i], &value);
+#endif // ROCKSDB_LITE
+ } else {
+ tmp_s = db_->Get(readoptionscopy, cfh, keys[i], &value);
+ }
+ if (!tmp_s.ok() && !tmp_s.IsNotFound()) {
+ fprintf(stderr, "Get error: %s\n", s.ToString().c_str());
+ is_consistent = false;
+ } else if (!s.ok() && tmp_s.ok()) {
+ fprintf(stderr, "MultiGet returned different results with key %s\n",
+ keys[i].ToString(true).c_str());
+ fprintf(stderr, "Get returned ok, MultiGet returned not found\n");
+ is_consistent = false;
+ } else if (s.ok() && tmp_s.IsNotFound()) {
+ fprintf(stderr, "MultiGet returned different results with key %s\n",
+ keys[i].ToString(true).c_str());
+ fprintf(stderr, "MultiGet returned ok, Get returned not found\n");
+ is_consistent = false;
+ } else if (s.ok() && value != values[i].ToString()) {
+ fprintf(stderr, "MultiGet returned different results with key %s\n",
+ keys[i].ToString(true).c_str());
+ fprintf(stderr, "MultiGet returned value %s\n",
+ values[i].ToString(true).c_str());
+ fprintf(stderr, "Get returned value %s\n",
+ Slice(value).ToString(true /* hex */).c_str());
+ is_consistent = false;
+ }
+ }
+
+ if (!is_consistent) {
+ fprintf(stderr, "TestMultiGet error: is_consistent is false\n");
+ thread->stats.AddErrors(1);
+ // Fail fast to preserve the DB state
+ thread->shared->SetVerificationFailure();
+ break;
+ } else if (s.ok()) {
+ // found case
+ thread->stats.AddGets(1, 1);
+ } else if (s.IsNotFound()) {
+ // not found case
+ thread->stats.AddGets(1, 0);
+ } else if (s.IsMergeInProgress() && use_txn) {
+ // With txn this is sometimes expected.
+ thread->stats.AddGets(1, 1);
+ } else {
+ if (error_count == 0) {
+ // errors case
+ fprintf(stderr, "MultiGet error: %s\n", s.ToString().c_str());
+ thread->stats.AddErrors(1);
+ } else {
+ thread->stats.AddVerifiedErrors(1);
+ }
+ }
+ }
+
+ if (readoptionscopy.snapshot) {
+ db_->ReleaseSnapshot(readoptionscopy.snapshot);
+ }
+ if (use_txn) {
+#ifndef ROCKSDB_LITE
+ RollbackTxn(txn);
+#endif
+ }
+ return statuses;
+ }
+
+ Status TestPrefixScan(ThreadState* thread, const ReadOptions& read_opts,
+ const std::vector<int>& rand_column_families,
+ const std::vector<int64_t>& rand_keys) override {
+ assert(!rand_column_families.empty());
+ assert(!rand_keys.empty());
+
+ ColumnFamilyHandle* const cfh = column_families_[rand_column_families[0]];
+ assert(cfh);
+
+ const std::string key = Key(rand_keys[0]);
+ const Slice prefix(key.data(), FLAGS_prefix_size);
+
+ std::string upper_bound;
+ Slice ub_slice;
+ ReadOptions ro_copy = read_opts;
+
+ // Get the next prefix first and then see if we want to set upper bound.
+ // We'll use the next prefix in an assertion later on
+ if (GetNextPrefix(prefix, &upper_bound) && thread->rand.OneIn(2)) {
+ // For half of the time, set the upper bound to the next prefix
+ ub_slice = Slice(upper_bound);
+ ro_copy.iterate_upper_bound = &ub_slice;
+ }
+
+ std::string read_ts_str;
+ Slice read_ts_slice;
+ MaybeUseOlderTimestampForRangeScan(thread, read_ts_str, read_ts_slice,
+ ro_copy);
+
+ std::unique_ptr<Iterator> iter(db_->NewIterator(ro_copy, cfh));
+
+ uint64_t count = 0;
+ Status s;
+
+ for (iter->Seek(prefix); iter->Valid() && iter->key().starts_with(prefix);
+ iter->Next()) {
+ ++count;
+
+ // When iter_start_ts is set, iterator exposes internal keys, including
+ // tombstones; however, we want to perform column validation only for
+ // value-like types.
+ if (ro_copy.iter_start_ts) {
+ const ValueType value_type = ExtractValueType(iter->key());
+ if (value_type != kTypeValue && value_type != kTypeBlobIndex &&
+ value_type != kTypeWideColumnEntity) {
+ continue;
+ }
+ }
+
+ const WideColumns expected_columns = GenerateExpectedWideColumns(
+ GetValueBase(iter->value()), iter->value());
+ if (iter->columns() != expected_columns) {
+ s = Status::Corruption(
+ "Value and columns inconsistent",
+ DebugString(iter->value(), iter->columns(), expected_columns));
+ break;
+ }
+ }
+
+ if (ro_copy.iter_start_ts == nullptr) {
+ assert(count <= GetPrefixKeyCount(prefix.ToString(), upper_bound));
+ }
+
+ if (s.ok()) {
+ s = iter->status();
+ }
+
+ if (!s.ok()) {
+ fprintf(stderr, "TestPrefixScan error: %s\n", s.ToString().c_str());
+ thread->stats.AddErrors(1);
+
+ return s;
+ }
+
+ thread->stats.AddPrefixes(1, count);
+
+ return Status::OK();
+ }
+
+ Status TestPut(ThreadState* thread, WriteOptions& write_opts,
+ const ReadOptions& read_opts,
+ const std::vector<int>& rand_column_families,
+ const std::vector<int64_t>& rand_keys,
+ char (&value)[100]) override {
+ assert(!rand_column_families.empty());
+ assert(!rand_keys.empty());
+
+ auto shared = thread->shared;
+ assert(shared);
+
+ const int64_t max_key = shared->GetMaxKey();
+
+ int64_t rand_key = rand_keys[0];
+ int rand_column_family = rand_column_families[0];
+ std::string write_ts;
+
+ std::unique_ptr<MutexLock> lock(
+ new MutexLock(shared->GetMutexForKey(rand_column_family, rand_key)));
+ while (!shared->AllowsOverwrite(rand_key) &&
+ (FLAGS_use_merge || shared->Exists(rand_column_family, rand_key))) {
+ lock.reset();
+
+ rand_key = thread->rand.Next() % max_key;
+ rand_column_family = thread->rand.Next() % FLAGS_column_families;
+
+ lock.reset(
+ new MutexLock(shared->GetMutexForKey(rand_column_family, rand_key)));
+ if (FLAGS_user_timestamp_size > 0) {
+ write_ts = GetNowNanos();
+ }
+ }
+
+ if (write_ts.empty() && FLAGS_user_timestamp_size) {
+ write_ts = GetNowNanos();
+ }
+
+ const std::string k = Key(rand_key);
+
+ ColumnFamilyHandle* const cfh = column_families_[rand_column_family];
+ assert(cfh);
+
+ if (FLAGS_verify_before_write) {
+ std::string from_db;
+ Status s = db_->Get(read_opts, cfh, k, &from_db);
+ if (!VerifyOrSyncValue(rand_column_family, rand_key, read_opts, shared,
+ from_db, s, /* strict */ true)) {
+ return s;
+ }
+ }
+
+ const uint32_t value_base = thread->rand.Next() % shared->UNKNOWN_SENTINEL;
+ const size_t sz = GenerateValue(value_base, value, sizeof(value));
+ const Slice v(value, sz);
+
+ shared->Put(rand_column_family, rand_key, value_base, true /* pending */);
+
+ Status s;
+
+ if (FLAGS_use_merge) {
+ if (!FLAGS_use_txn) {
+ if (FLAGS_user_timestamp_size == 0) {
+ s = db_->Merge(write_opts, cfh, k, v);
+ } else {
+ s = db_->Merge(write_opts, cfh, k, write_ts, v);
+ }
+ } else {
+#ifndef ROCKSDB_LITE
+ Transaction* txn;
+ s = NewTxn(write_opts, &txn);
+ if (s.ok()) {
+ s = txn->Merge(cfh, k, v);
+ if (s.ok()) {
+ s = CommitTxn(txn, thread);
+ }
+ }
+#endif
+ }
+ } else if (FLAGS_use_put_entity_one_in > 0 &&
+ (value_base % FLAGS_use_put_entity_one_in) == 0) {
+ s = db_->PutEntity(write_opts, cfh, k,
+ GenerateWideColumns(value_base, v));
+ } else {
+ if (!FLAGS_use_txn) {
+ if (FLAGS_user_timestamp_size == 0) {
+ s = db_->Put(write_opts, cfh, k, v);
+ } else {
+ s = db_->Put(write_opts, cfh, k, write_ts, v);
+ }
+ } else {
+#ifndef ROCKSDB_LITE
+ Transaction* txn;
+ s = NewTxn(write_opts, &txn);
+ if (s.ok()) {
+ s = txn->Put(cfh, k, v);
+ if (s.ok()) {
+ s = CommitTxn(txn, thread);
+ }
+ }
+#endif
+ }
+ }
+
+ shared->Put(rand_column_family, rand_key, value_base, false /* pending */);
+
+ if (!s.ok()) {
+ if (FLAGS_injest_error_severity >= 2) {
+ if (!is_db_stopped_ && s.severity() >= Status::Severity::kFatalError) {
+ is_db_stopped_ = true;
+ } else if (!is_db_stopped_ ||
+ s.severity() < Status::Severity::kFatalError) {
+ fprintf(stderr, "put or merge error: %s\n", s.ToString().c_str());
+ std::terminate();
+ }
+ } else {
+ fprintf(stderr, "put or merge error: %s\n", s.ToString().c_str());
+ std::terminate();
+ }
+ }
+
+ thread->stats.AddBytesForWrites(1, sz);
+ PrintKeyValue(rand_column_family, static_cast<uint32_t>(rand_key), value,
+ sz);
+ return s;
+ }
+
+ Status TestDelete(ThreadState* thread, WriteOptions& write_opts,
+ const std::vector<int>& rand_column_families,
+ const std::vector<int64_t>& rand_keys) override {
+ int64_t rand_key = rand_keys[0];
+ int rand_column_family = rand_column_families[0];
+ auto shared = thread->shared;
+
+ std::unique_ptr<MutexLock> lock(
+ new MutexLock(shared->GetMutexForKey(rand_column_family, rand_key)));
+
+ // OPERATION delete
+ std::string write_ts_str = GetNowNanos();
+ Slice write_ts = write_ts_str;
+
+ std::string key_str = Key(rand_key);
+ Slice key = key_str;
+ auto cfh = column_families_[rand_column_family];
+
+ // Use delete if the key may be overwritten and a single deletion
+ // otherwise.
+ Status s;
+ if (shared->AllowsOverwrite(rand_key)) {
+ shared->Delete(rand_column_family, rand_key, true /* pending */);
+ if (!FLAGS_use_txn) {
+ if (FLAGS_user_timestamp_size == 0) {
+ s = db_->Delete(write_opts, cfh, key);
+ } else {
+ s = db_->Delete(write_opts, cfh, key, write_ts);
+ }
+ } else {
+#ifndef ROCKSDB_LITE
+ Transaction* txn;
+ s = NewTxn(write_opts, &txn);
+ if (s.ok()) {
+ s = txn->Delete(cfh, key);
+ if (s.ok()) {
+ s = CommitTxn(txn, thread);
+ }
+ }
+#endif
+ }
+ shared->Delete(rand_column_family, rand_key, false /* pending */);
+ thread->stats.AddDeletes(1);
+ if (!s.ok()) {
+ if (FLAGS_injest_error_severity >= 2) {
+ if (!is_db_stopped_ &&
+ s.severity() >= Status::Severity::kFatalError) {
+ is_db_stopped_ = true;
+ } else if (!is_db_stopped_ ||
+ s.severity() < Status::Severity::kFatalError) {
+ fprintf(stderr, "delete error: %s\n", s.ToString().c_str());
+ std::terminate();
+ }
+ } else {
+ fprintf(stderr, "delete error: %s\n", s.ToString().c_str());
+ std::terminate();
+ }
+ }
+ } else {
+ shared->SingleDelete(rand_column_family, rand_key, true /* pending */);
+ if (!FLAGS_use_txn) {
+ if (FLAGS_user_timestamp_size == 0) {
+ s = db_->SingleDelete(write_opts, cfh, key);
+ } else {
+ s = db_->SingleDelete(write_opts, cfh, key, write_ts);
+ }
+ } else {
+#ifndef ROCKSDB_LITE
+ Transaction* txn;
+ s = NewTxn(write_opts, &txn);
+ if (s.ok()) {
+ s = txn->SingleDelete(cfh, key);
+ if (s.ok()) {
+ s = CommitTxn(txn, thread);
+ }
+ }
+#endif
+ }
+ shared->SingleDelete(rand_column_family, rand_key, false /* pending */);
+ thread->stats.AddSingleDeletes(1);
+ if (!s.ok()) {
+ if (FLAGS_injest_error_severity >= 2) {
+ if (!is_db_stopped_ &&
+ s.severity() >= Status::Severity::kFatalError) {
+ is_db_stopped_ = true;
+ } else if (!is_db_stopped_ ||
+ s.severity() < Status::Severity::kFatalError) {
+ fprintf(stderr, "single delete error: %s\n", s.ToString().c_str());
+ std::terminate();
+ }
+ } else {
+ fprintf(stderr, "single delete error: %s\n", s.ToString().c_str());
+ std::terminate();
+ }
+ }
+ }
+ return s;
+ }
+
+ Status TestDeleteRange(ThreadState* thread, WriteOptions& write_opts,
+ const std::vector<int>& rand_column_families,
+ const std::vector<int64_t>& rand_keys) override {
+ // OPERATION delete range
+ std::vector<std::unique_ptr<MutexLock>> range_locks;
+ // delete range does not respect disallowed overwrites. the keys for
+ // which overwrites are disallowed are randomly distributed so it
+ // could be expensive to find a range where each key allows
+ // overwrites.
+ int64_t rand_key = rand_keys[0];
+ int rand_column_family = rand_column_families[0];
+ auto shared = thread->shared;
+ int64_t max_key = shared->GetMaxKey();
+ if (rand_key > max_key - FLAGS_range_deletion_width) {
+ rand_key =
+ thread->rand.Next() % (max_key - FLAGS_range_deletion_width + 1);
+ }
+ for (int j = 0; j < FLAGS_range_deletion_width; ++j) {
+ if (j == 0 ||
+ ((rand_key + j) & ((1 << FLAGS_log2_keys_per_lock) - 1)) == 0) {
+ range_locks.emplace_back(new MutexLock(
+ shared->GetMutexForKey(rand_column_family, rand_key + j)));
+ }
+ }
+ shared->DeleteRange(rand_column_family, rand_key,
+ rand_key + FLAGS_range_deletion_width,
+ true /* pending */);
+
+ std::string keystr = Key(rand_key);
+ Slice key = keystr;
+ auto cfh = column_families_[rand_column_family];
+ std::string end_keystr = Key(rand_key + FLAGS_range_deletion_width);
+ Slice end_key = end_keystr;
+ std::string write_ts_str;
+ Slice write_ts;
+ Status s;
+ if (FLAGS_user_timestamp_size) {
+ write_ts_str = GetNowNanos();
+ write_ts = write_ts_str;
+ s = db_->DeleteRange(write_opts, cfh, key, end_key, write_ts);
+ } else {
+ s = db_->DeleteRange(write_opts, cfh, key, end_key);
+ }
+ if (!s.ok()) {
+ if (FLAGS_injest_error_severity >= 2) {
+ if (!is_db_stopped_ && s.severity() >= Status::Severity::kFatalError) {
+ is_db_stopped_ = true;
+ } else if (!is_db_stopped_ ||
+ s.severity() < Status::Severity::kFatalError) {
+ fprintf(stderr, "delete range error: %s\n", s.ToString().c_str());
+ std::terminate();
+ }
+ } else {
+ fprintf(stderr, "delete range error: %s\n", s.ToString().c_str());
+ std::terminate();
+ }
+ }
+ int covered = shared->DeleteRange(rand_column_family, rand_key,
+ rand_key + FLAGS_range_deletion_width,
+ false /* pending */);
+ thread->stats.AddRangeDeletions(1);
+ thread->stats.AddCoveredByRangeDeletions(covered);
+ return s;
+ }
+
+#ifdef ROCKSDB_LITE
+ void TestIngestExternalFile(
+ ThreadState* /* thread */,
+ const std::vector<int>& /* rand_column_families */,
+ const std::vector<int64_t>& /* rand_keys */) override {
+ assert(false);
+ fprintf(stderr,
+ "RocksDB lite does not support "
+ "TestIngestExternalFile\n");
+ std::terminate();
+ }
+#else
+ void TestIngestExternalFile(ThreadState* thread,
+ const std::vector<int>& rand_column_families,
+ const std::vector<int64_t>& rand_keys) override {
+ const std::string sst_filename =
+ FLAGS_db + "/." + std::to_string(thread->tid) + ".sst";
+ Status s;
+ if (db_stress_env->FileExists(sst_filename).ok()) {
+ // Maybe we terminated abnormally before, so cleanup to give this file
+ // ingestion a clean slate
+ s = db_stress_env->DeleteFile(sst_filename);
+ }
+
+ SstFileWriter sst_file_writer(EnvOptions(options_), options_);
+ if (s.ok()) {
+ s = sst_file_writer.Open(sst_filename);
+ }
+ int64_t key_base = rand_keys[0];
+ int column_family = rand_column_families[0];
+ std::vector<std::unique_ptr<MutexLock>> range_locks;
+ range_locks.reserve(FLAGS_ingest_external_file_width);
+ std::vector<int64_t> keys;
+ keys.reserve(FLAGS_ingest_external_file_width);
+ std::vector<uint32_t> values;
+ values.reserve(FLAGS_ingest_external_file_width);
+ SharedState* shared = thread->shared;
+
+ assert(FLAGS_nooverwritepercent < 100);
+ // Grab locks, set pending state on expected values, and add keys
+ for (int64_t key = key_base;
+ s.ok() && key < shared->GetMaxKey() &&
+ static_cast<int32_t>(keys.size()) < FLAGS_ingest_external_file_width;
+ ++key) {
+ if (key == key_base ||
+ (key & ((1 << FLAGS_log2_keys_per_lock) - 1)) == 0) {
+ range_locks.emplace_back(
+ new MutexLock(shared->GetMutexForKey(column_family, key)));
+ }
+ if (!shared->AllowsOverwrite(key)) {
+ // We could alternatively include `key` on the condition its current
+ // value is `DELETION_SENTINEL`.
+ continue;
+ }
+ keys.push_back(key);
+
+ uint32_t value_base = thread->rand.Next() % shared->UNKNOWN_SENTINEL;
+ values.push_back(value_base);
+ shared->Put(column_family, key, value_base, true /* pending */);
+
+ char value[100];
+ size_t value_len = GenerateValue(value_base, value, sizeof(value));
+ auto key_str = Key(key);
+ s = sst_file_writer.Put(Slice(key_str), Slice(value, value_len));
+ }
+
+ if (s.ok() && keys.empty()) {
+ return;
+ }
+
+ if (s.ok()) {
+ s = sst_file_writer.Finish();
+ }
+ if (s.ok()) {
+ s = db_->IngestExternalFile(column_families_[column_family],
+ {sst_filename}, IngestExternalFileOptions());
+ }
+ if (!s.ok()) {
+ fprintf(stderr, "file ingestion error: %s\n", s.ToString().c_str());
+ std::terminate();
+ }
+ for (size_t i = 0; i < keys.size(); ++i) {
+ shared->Put(column_family, keys[i], values[i], false /* pending */);
+ }
+ }
+#endif // ROCKSDB_LITE
+
+ // Given a key K, this creates an iterator which scans the range
+ // [K, K + FLAGS_num_iterations) forward and backward.
+ // Then does a random sequence of Next/Prev operations.
+ Status TestIterateAgainstExpected(
+ ThreadState* thread, const ReadOptions& read_opts,
+ const std::vector<int>& rand_column_families,
+ const std::vector<int64_t>& rand_keys) override {
+ assert(thread);
+ assert(!rand_column_families.empty());
+ assert(!rand_keys.empty());
+
+ auto shared = thread->shared;
+ assert(shared);
+
+ int64_t max_key = shared->GetMaxKey();
+
+ const int64_t num_iter = static_cast<int64_t>(FLAGS_num_iterations);
+
+ int64_t lb = rand_keys[0];
+ if (lb > max_key - num_iter) {
+ lb = thread->rand.Next() % (max_key - num_iter + 1);
+ }
+
+ const int64_t ub = lb + num_iter;
+
+ // Lock the whole range over which we might iterate to ensure it doesn't
+ // change under us.
+ const int rand_column_family = rand_column_families[0];
+ std::vector<std::unique_ptr<MutexLock>> range_locks =
+ shared->GetLocksForKeyRange(rand_column_family, lb, ub);
+
+ ReadOptions ro(read_opts);
+ ro.total_order_seek = true;
+
+ std::string read_ts_str;
+ Slice read_ts;
+ if (FLAGS_user_timestamp_size > 0) {
+ read_ts_str = GetNowNanos();
+ read_ts = read_ts_str;
+ ro.timestamp = &read_ts;
+ }
+
+ std::string max_key_str;
+ Slice max_key_slice;
+ if (!FLAGS_destroy_db_initially) {
+ max_key_str = Key(max_key);
+ max_key_slice = max_key_str;
+ // to restrict iterator from reading keys written in batched_op_stress
+ // that do not have expected state updated and may not be parseable by
+ // GetIntVal().
+ ro.iterate_upper_bound = &max_key_slice;
+ }
+
+ ColumnFamilyHandle* const cfh = column_families_[rand_column_family];
+ assert(cfh);
+
+ std::unique_ptr<Iterator> iter(db_->NewIterator(ro, cfh));
+
+ std::string op_logs;
+
+ auto check_columns = [&]() {
+ assert(iter);
+ assert(iter->Valid());
+
+ const WideColumns expected_columns = GenerateExpectedWideColumns(
+ GetValueBase(iter->value()), iter->value());
+ if (iter->columns() != expected_columns) {
+ shared->SetVerificationFailure();
+
+ fprintf(stderr,
+ "Verification failed for key %s: "
+ "Value and columns inconsistent: %s\n",
+ Slice(iter->key()).ToString(/* hex */ true).c_str(),
+ DebugString(iter->value(), iter->columns(), expected_columns)
+ .c_str());
+ fprintf(stderr, "Column family: %s, op_logs: %s\n",
+ cfh->GetName().c_str(), op_logs.c_str());
+
+ thread->stats.AddErrors(1);
+
+ return false;
+ }
+
+ return true;
+ };
+
+ auto check_no_key_in_range = [&](int64_t start, int64_t end) {
+ for (auto j = std::max(start, lb); j < std::min(end, ub); ++j) {
+ auto expected_value =
+ shared->Get(rand_column_family, static_cast<int64_t>(j));
+ if (expected_value != shared->DELETION_SENTINEL &&
+ expected_value != shared->UNKNOWN_SENTINEL) {
+ // Fail fast to preserve the DB state.
+ thread->shared->SetVerificationFailure();
+ if (iter->Valid()) {
+ fprintf(stderr,
+ "Expected state has key %s, iterator is at key %s\n",
+ Slice(Key(j)).ToString(true).c_str(),
+ iter->key().ToString(true).c_str());
+ } else {
+ fprintf(stderr, "Expected state has key %s, iterator is invalid\n",
+ Slice(Key(j)).ToString(true).c_str());
+ }
+ fprintf(stderr, "Column family: %s, op_logs: %s\n",
+ cfh->GetName().c_str(), op_logs.c_str());
+ thread->stats.AddErrors(1);
+ return false;
+ }
+ }
+ return true;
+ };
+
+ // Forward and backward scan to ensure we cover the entire range [lb, ub).
+ // The random sequence Next and Prev test below tends to be very short
+ // ranged.
+ int64_t last_key = lb - 1;
+
+ std::string key_str = Key(lb);
+ iter->Seek(key_str);
+
+ op_logs += "S " + Slice(key_str).ToString(true) + " ";
+
+ uint64_t curr = 0;
+ while (true) {
+ if (!iter->Valid()) {
+ if (!iter->status().ok()) {
+ thread->shared->SetVerificationFailure();
+ fprintf(stderr, "TestIterate against expected state error: %s\n",
+ iter->status().ToString().c_str());
+ fprintf(stderr, "Column family: %s, op_logs: %s\n",
+ cfh->GetName().c_str(), op_logs.c_str());
+ thread->stats.AddErrors(1);
+ return iter->status();
+ }
+ if (!check_no_key_in_range(last_key + 1, ub)) {
+ return Status::OK();
+ }
+ break;
+ }
+
+ if (!check_columns()) {
+ return Status::OK();
+ }
+
+ // iter is valid, the range (last_key, current key) was skipped
+ GetIntVal(iter->key().ToString(), &curr);
+ if (!check_no_key_in_range(last_key + 1, static_cast<int64_t>(curr))) {
+ return Status::OK();
+ }
+
+ last_key = static_cast<int64_t>(curr);
+ if (last_key >= ub - 1) {
+ break;
+ }
+
+ iter->Next();
+
+ op_logs += "N";
+ }
+
+ // backward scan
+ key_str = Key(ub - 1);
+ iter->SeekForPrev(key_str);
+
+ op_logs += " SFP " + Slice(key_str).ToString(true) + " ";
+
+ last_key = ub;
+ while (true) {
+ if (!iter->Valid()) {
+ if (!iter->status().ok()) {
+ thread->shared->SetVerificationFailure();
+ fprintf(stderr, "TestIterate against expected state error: %s\n",
+ iter->status().ToString().c_str());
+ fprintf(stderr, "Column family: %s, op_logs: %s\n",
+ cfh->GetName().c_str(), op_logs.c_str());
+ thread->stats.AddErrors(1);
+ return iter->status();
+ }
+ if (!check_no_key_in_range(lb, last_key)) {
+ return Status::OK();
+ }
+ break;
+ }
+
+ if (!check_columns()) {
+ return Status::OK();
+ }
+
+ // the range (current key, last key) was skipped
+ GetIntVal(iter->key().ToString(), &curr);
+ if (!check_no_key_in_range(static_cast<int64_t>(curr + 1), last_key)) {
+ return Status::OK();
+ }
+
+ last_key = static_cast<int64_t>(curr);
+ if (last_key <= lb) {
+ break;
+ }
+
+ iter->Prev();
+
+ op_logs += "P";
+ }
+
+ if (thread->rand.OneIn(2)) {
+ // Refresh after forward/backward scan to allow higher chance of SV
+ // change. It is safe to refresh since the testing key range is locked.
+ iter->Refresh();
+ }
+
+ // start from middle of [lb, ub) otherwise it is easy to iterate out of
+ // locked range
+ const int64_t mid = lb + num_iter / 2;
+
+ key_str = Key(mid);
+ const Slice key(key_str);
+
+ if (thread->rand.OneIn(2)) {
+ iter->Seek(key);
+ op_logs += " S " + key.ToString(true) + " ";
+ if (!iter->Valid() && iter->status().ok()) {
+ if (!check_no_key_in_range(mid, ub)) {
+ return Status::OK();
+ }
+ }
+ } else {
+ iter->SeekForPrev(key);
+ op_logs += " SFP " + key.ToString(true) + " ";
+ if (!iter->Valid() && iter->status().ok()) {
+ // iterator says nothing <= mid
+ if (!check_no_key_in_range(lb, mid + 1)) {
+ return Status::OK();
+ }
+ }
+ }
+
+ for (int64_t i = 0; i < num_iter && iter->Valid(); ++i) {
+ if (!check_columns()) {
+ return Status::OK();
+ }
+
+ GetIntVal(iter->key().ToString(), &curr);
+ if (static_cast<int64_t>(curr) < lb) {
+ iter->Next();
+ op_logs += "N";
+ } else if (static_cast<int64_t>(curr) >= ub) {
+ iter->Prev();
+ op_logs += "P";
+ } else {
+ const uint32_t expected_value =
+ shared->Get(rand_column_family, static_cast<int64_t>(curr));
+ if (expected_value == shared->DELETION_SENTINEL) {
+ // Fail fast to preserve the DB state.
+ thread->shared->SetVerificationFailure();
+ fprintf(stderr, "Iterator has key %s, but expected state does not.\n",
+ iter->key().ToString(true).c_str());
+ fprintf(stderr, "Column family: %s, op_logs: %s\n",
+ cfh->GetName().c_str(), op_logs.c_str());
+ thread->stats.AddErrors(1);
+ break;
+ }
+
+ if (thread->rand.OneIn(2)) {
+ iter->Next();
+ op_logs += "N";
+ if (!iter->Valid()) {
+ break;
+ }
+ uint64_t next = 0;
+ GetIntVal(iter->key().ToString(), &next);
+ if (!check_no_key_in_range(static_cast<int64_t>(curr + 1),
+ static_cast<int64_t>(next))) {
+ return Status::OK();
+ }
+ } else {
+ iter->Prev();
+ op_logs += "P";
+ if (!iter->Valid()) {
+ break;
+ }
+ uint64_t prev = 0;
+ GetIntVal(iter->key().ToString(), &prev);
+ if (!check_no_key_in_range(static_cast<int64_t>(prev + 1),
+ static_cast<int64_t>(curr))) {
+ return Status::OK();
+ }
+ }
+ }
+ }
+
+ if (!iter->status().ok()) {
+ thread->shared->SetVerificationFailure();
+ fprintf(stderr, "TestIterate against expected state error: %s\n",
+ iter->status().ToString().c_str());
+ fprintf(stderr, "Column family: %s, op_logs: %s\n",
+ cfh->GetName().c_str(), op_logs.c_str());
+ thread->stats.AddErrors(1);
+ return iter->status();
+ }
+
+ thread->stats.AddIterations(1);
+
+ return Status::OK();
+ }
+
+ bool VerifyOrSyncValue(int cf, int64_t key, const ReadOptions& /*opts*/,
+ SharedState* shared, const std::string& value_from_db,
+ const Status& s, bool strict = false) const {
+ if (shared->HasVerificationFailedYet()) {
+ return false;
+ }
+ // compare value_from_db with the value in the shared state
+ uint32_t value_base = shared->Get(cf, key);
+ if (value_base == SharedState::UNKNOWN_SENTINEL) {
+ if (s.ok()) {
+ // Value exists in db, update state to reflect that
+ Slice slice(value_from_db);
+ value_base = GetValueBase(slice);
+ shared->Put(cf, key, value_base, false);
+ } else if (s.IsNotFound()) {
+ // Value doesn't exist in db, update state to reflect that
+ shared->SingleDelete(cf, key, false);
+ }
+ return true;
+ }
+ if (value_base == SharedState::DELETION_SENTINEL && !strict) {
+ return true;
+ }
+
+ if (s.ok()) {
+ char value[kValueMaxLen];
+ if (value_base == SharedState::DELETION_SENTINEL) {
+ VerificationAbort(shared, "Unexpected value found", cf, key,
+ value_from_db, "");
+ return false;
+ }
+ size_t sz = GenerateValue(value_base, value, sizeof(value));
+ if (value_from_db.length() != sz) {
+ VerificationAbort(shared, "Length of value read is not equal", cf, key,
+ value_from_db, Slice(value, sz));
+ return false;
+ }
+ if (memcmp(value_from_db.data(), value, sz) != 0) {
+ VerificationAbort(shared, "Contents of value read don't match", cf, key,
+ value_from_db, Slice(value, sz));
+ return false;
+ }
+ } else {
+ if (value_base != SharedState::DELETION_SENTINEL) {
+ char value[kValueMaxLen];
+ size_t sz = GenerateValue(value_base, value, sizeof(value));
+ VerificationAbort(shared, "Value not found: " + s.ToString(), cf, key,
+ "", Slice(value, sz));
+ return false;
+ }
+ }
+ return true;
+ }
+
+#ifndef ROCKSDB_LITE
+ void PrepareTxnDbOptions(SharedState* shared,
+ TransactionDBOptions& txn_db_opts) override {
+ txn_db_opts.rollback_deletion_type_callback =
+ [shared](TransactionDB*, ColumnFamilyHandle*, const Slice& key) {
+ assert(shared);
+ uint64_t key_num = 0;
+ bool ok = GetIntVal(key.ToString(), &key_num);
+ assert(ok);
+ (void)ok;
+ return !shared->AllowsOverwrite(key_num);
+ };
+ }
+#endif // ROCKSDB_LITE
+};
+
+StressTest* CreateNonBatchedOpsStressTest() {
+ return new NonBatchedOpsStressTest();
+}
+
+} // namespace ROCKSDB_NAMESPACE
+#endif // GFLAGS