summaryrefslogtreecommitdiffstats
path: root/src/test/crimson
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:45:59 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:45:59 +0000
commit19fcec84d8d7d21e796c7624e521b60d28ee21ed (patch)
tree42d26aa27d1e3f7c0b8bd3fd14e7d7082f5008dc /src/test/crimson
parentInitial commit. (diff)
downloadceph-upstream.tar.xz
ceph-upstream.zip
Adding upstream version 16.2.11+ds.upstream/16.2.11+dsupstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'src/test/crimson')
-rw-r--r--src/test/crimson/CMakeLists.txt90
-rw-r--r--src/test/crimson/cbt/radosbench_4K_read.yaml36
-rw-r--r--src/test/crimson/cbt/radosbench_4K_write.yaml34
-rwxr-xr-xsrc/test/crimson/cbt/t2c.py78
-rw-r--r--src/test/crimson/gtest_seastar.cc23
-rw-r--r--src/test/crimson/gtest_seastar.h35
-rw-r--r--src/test/crimson/seastar_runner.h71
-rw-r--r--src/test/crimson/seastore/CMakeLists.txt51
-rw-r--r--src/test/crimson/seastore/onode_tree/CMakeLists.txt15
-rw-r--r--src/test/crimson/seastore/onode_tree/test_node.cc207
-rw-r--r--src/test/crimson/seastore/onode_tree/test_staged_fltree.cc1204
-rw-r--r--src/test/crimson/seastore/test_block.cc25
-rw-r--r--src/test/crimson/seastore/test_block.h147
-rw-r--r--src/test/crimson/seastore/test_btree_lba_manager.cc429
-rw-r--r--src/test/crimson/seastore/test_extmap_manager.cc283
-rw-r--r--src/test/crimson/seastore/test_seastore_cache.cc235
-rw-r--r--src/test/crimson/seastore/test_seastore_journal.cc260
-rw-r--r--src/test/crimson/seastore/test_transaction_manager.cc495
-rw-r--r--src/test/crimson/seastore/transaction_manager_test_state.h82
-rw-r--r--src/test/crimson/test_alien_echo.cc306
-rw-r--r--src/test/crimson/test_alienstore_thread_pool.cc78
-rw-r--r--src/test/crimson/test_async_echo.cc235
-rw-r--r--src/test/crimson/test_backfill.cc500
-rw-r--r--src/test/crimson/test_buffer.cc50
-rw-r--r--src/test/crimson/test_cmds.h76
-rw-r--r--src/test/crimson/test_config.cc108
-rw-r--r--src/test/crimson/test_denc.cc53
-rw-r--r--src/test/crimson/test_errorator.cc52
-rw-r--r--src/test/crimson/test_fixed_kv_node_layout.cc376
-rw-r--r--src/test/crimson/test_lru.cc213
-rw-r--r--src/test/crimson/test_messenger.cc3668
-rw-r--r--src/test/crimson/test_messenger_peer.cc447
-rw-r--r--src/test/crimson/test_monc.cc90
-rw-r--r--src/test/crimson/test_perfcounters.cc62
-rw-r--r--src/test/crimson/test_socket.cc490
35 files changed, 10604 insertions, 0 deletions
diff --git a/src/test/crimson/CMakeLists.txt b/src/test/crimson/CMakeLists.txt
new file mode 100644
index 000000000..5a0de43d2
--- /dev/null
+++ b/src/test/crimson/CMakeLists.txt
@@ -0,0 +1,90 @@
+# the crimson's backfill doesn't need nor use seastar
+add_executable(unittest-crimson-backfill
+ test_backfill.cc
+ ${PROJECT_SOURCE_DIR}/src/auth/Crypto.cc
+ ${PROJECT_SOURCE_DIR}/src/crimson/osd/backfill_state.cc
+ ${PROJECT_SOURCE_DIR}/src/osd/recovery_types.cc)
+add_ceph_unittest(unittest-crimson-backfill
+ --memory 256M --smp 1)
+target_link_libraries(unittest-crimson-backfill crimson GTest::Main)
+
+add_executable(unittest-seastar-buffer
+ test_buffer.cc)
+add_ceph_unittest(unittest-seastar-buffer
+ --memory 256M --smp 1)
+target_link_libraries(unittest-seastar-buffer crimson)
+
+add_executable(unittest-seastar-denc
+ test_denc.cc)
+add_ceph_unittest(unittest-seastar-denc --memory 256M --smp 1)
+target_link_libraries(unittest-seastar-denc crimson GTest::Main)
+
+add_executable(unittest-seastar-socket test_socket.cc)
+add_ceph_unittest(unittest-seastar-socket
+ --memory 256M --smp 2)
+target_link_libraries(unittest-seastar-socket crimson)
+
+add_executable(unittest-seastar-messenger test_messenger.cc)
+add_ceph_unittest(unittest-seastar-messenger
+ --memory 256M --smp 1)
+target_link_libraries(unittest-seastar-messenger crimson)
+
+add_executable(test-seastar-messenger-peer test_messenger_peer.cc)
+target_link_libraries(test-seastar-messenger-peer ceph-common global ${ALLOC_LIBS})
+
+add_executable(test-seastar-echo
+ test_alien_echo.cc)
+target_link_libraries(test-seastar-echo crimson)
+
+add_executable(test-async-echo
+ test_async_echo.cc)
+target_link_libraries(test-async-echo ceph-common global)
+
+add_executable(unittest-seastar-alienstore-thread-pool
+ test_alienstore_thread_pool.cc)
+add_ceph_unittest(unittest-seastar-alienstore-thread-pool
+ --memory 256M --smp 1)
+target_link_libraries(unittest-seastar-alienstore-thread-pool
+ crimson-alienstore
+ crimson)
+
+add_executable(unittest-seastar-config
+ test_config.cc)
+add_ceph_unittest(unittest-seastar-config
+ --memory 256M --smp 4)
+target_link_libraries(unittest-seastar-config crimson)
+
+add_executable(unittest-seastar-monc
+ test_monc.cc)
+target_link_libraries(unittest-seastar-monc crimson)
+
+add_executable(unittest-seastar-perfcounters
+ test_perfcounters.cc)
+add_ceph_unittest(unittest-seastar-perfcounters
+ --memory 256M --smp 1)
+target_link_libraries(unittest-seastar-perfcounters crimson)
+
+add_executable(unittest-seastar-lru
+ test_lru.cc)
+add_ceph_unittest(unittest-seastar-lru
+ --memory 256M --smp 1)
+target_link_libraries(unittest-seastar-lru crimson GTest::Main)
+
+add_executable(unittest-fixed-kv-node-layout
+ test_fixed_kv_node_layout.cc)
+add_ceph_unittest(unittest-fixed-kv-node-layout)
+
+add_subdirectory(seastore)
+
+add_library(crimson-gtest STATIC
+ gtest_seastar.cc)
+target_link_libraries(crimson-gtest crimson-common GTest::GTest)
+add_library(crimson::gtest ALIAS crimson-gtest)
+
+add_executable(unittest-seastar-errorator
+ test_errorator.cc)
+target_link_libraries(
+ unittest-seastar-errorator
+ crimson::gtest)
+add_ceph_unittest(unittest-seastar-errorator
+ --memory 256M --smp 1)
diff --git a/src/test/crimson/cbt/radosbench_4K_read.yaml b/src/test/crimson/cbt/radosbench_4K_read.yaml
new file mode 100644
index 000000000..219ce643a
--- /dev/null
+++ b/src/test/crimson/cbt/radosbench_4K_read.yaml
@@ -0,0 +1,36 @@
+meta:
+- desc: |
+ Run radosbench benchmark using cbt.
+ 4K read workload.
+
+tasks:
+- cbt:
+ benchmarks:
+ radosbench:
+ concurrent_ops: 16
+ concurrent_procs: 2
+ op_size: [4096]
+ pool_profile: 'replicated'
+ read_time: 30
+ read_only: true
+ readmode: 'rand'
+ prefill_time: 3
+ acceptable:
+ bandwidth: '(or (greater) (near 0.05))'
+ iops_avg: '(or (greater) (near 0.05))'
+ iops_stddev: '(or (less) (near 2.00))'
+ latency_avg: '(or (less) (near 0.05))'
+ cpu_cycles_per_op: '(or (less) (near 0.05))'
+ monitoring_profiles:
+ perf:
+ nodes:
+ - osds
+ args: 'stat -p {pid} -o {perf_dir}/perf_stat.{pid}'
+ cluster:
+ osds_per_node: 3
+ iterations: 1
+ pool_profiles:
+ replicated:
+ pg_size: 128
+ pgp_size: 128
+ replication: 'replicated'
diff --git a/src/test/crimson/cbt/radosbench_4K_write.yaml b/src/test/crimson/cbt/radosbench_4K_write.yaml
new file mode 100644
index 000000000..526982b10
--- /dev/null
+++ b/src/test/crimson/cbt/radosbench_4K_write.yaml
@@ -0,0 +1,34 @@
+meta:
+- desc: |
+ Run radosbench benchmark using cbt.
+ 4K write workload.
+
+tasks:
+- cbt:
+ benchmarks:
+ radosbench:
+ concurrent_ops: 16
+ concurrent_procs: 2
+ op_size: [4096]
+ pool_profile: 'replicated'
+ write_time: 3
+ write_only: true
+ acceptable:
+ bandwidth: '(or (greater) (near 0.05))'
+ iops_avg: '(or (greater) (near 0.05))'
+ iops_stddev: '(or (less) (near 2.00))'
+ latency_avg: '(or (less) (near 0.05))'
+ cpu_cycles_per_op: '(or (less) (near 0.05))'
+ monitoring_profiles:
+ perf:
+ nodes:
+ - osds
+ args: 'stat -p {pid} -o {perf_dir}/perf_stat.{pid}'
+ cluster:
+ osds_per_node: 3
+ iterations: 1
+ pool_profiles:
+ replicated:
+ pg_size: 128
+ pgp_size: 128
+ replication: 'replicated'
diff --git a/src/test/crimson/cbt/t2c.py b/src/test/crimson/cbt/t2c.py
new file mode 100755
index 000000000..0d4ee49e5
--- /dev/null
+++ b/src/test/crimson/cbt/t2c.py
@@ -0,0 +1,78 @@
+#!/usr/bin/env python3
+
+from __future__ import print_function
+import argparse
+import os
+import os.path
+import socket
+import sys
+import yaml
+
+
+class Translator(object):
+ def __init__(self, build_dir):
+ self.build_dir = build_dir
+
+ def translate(self, config):
+ cluster = config.get('cluster', {})
+ benchmarks = config.get('benchmarks', [])
+ monitoring_profiles = config.get('monitoring_profiles', {})
+ return dict(cluster=self._create_cluster_config(cluster),
+ benchmarks=benchmarks,
+ monitoring_profiles=monitoring_profiles)
+
+ def _create_cluster_config(self, cluster):
+ # prepare the "cluster" section consumed by CBT
+ localhost = socket.getfqdn()
+ num_osds = cluster.get('osds_per_node', 3)
+ items_to_copy = ['iterations', 'pool_profiles']
+ conf = dict((k, cluster[k]) for k in items_to_copy if k in cluster)
+ conf.update(dict(
+ head=localhost,
+ osds=[localhost],
+ osds_per_node=num_osds,
+ mons=[localhost],
+ clients=[localhost],
+ rebuild_every_test=False,
+ conf_file=os.path.join(self.build_dir, 'ceph.conf'),
+ ceph_cmd=os.path.join(self.build_dir, 'bin', 'ceph'),
+ rados_cmd=os.path.join(self.build_dir, 'bin', 'rados'),
+ pid_dir=os.path.join(self.build_dir, 'out')
+ ))
+ return conf
+
+def get_cbt_tasks(path):
+ with open(path) as input:
+ teuthology_config = yaml.load(input)
+ for task in teuthology_config['tasks']:
+ for name, conf in task.items():
+ if name == 'cbt':
+ yield conf
+
+def main():
+ parser = argparse.ArgumentParser(description='translate teuthology yaml to CBT yaml')
+ parser.add_argument('--build-dir',
+ default=os.getcwd(),
+ required=False,
+ help='Directory where CMakeCache.txt is located')
+ parser.add_argument('--input',
+ required=True,
+ help='The path to the input YAML file')
+ parser.add_argument('--output',
+ required=True,
+ help='The path to the output YAML file')
+ options = parser.parse_args(sys.argv[1:])
+ cbt_tasks = [task for task in get_cbt_tasks(options.input)]
+ if not cbt_tasks:
+ print('cbt not found in "tasks" section', file=sys.stderr)
+ return sys.exit(1)
+ elif len(cbt_tasks) > 1:
+ print('more than one cbt task found in "tasks" section', file=sys.stderr)
+ return sys.exit(1)
+ translator = Translator(options.build_dir)
+ cbt_config = translator.translate(cbt_tasks[0])
+ with open(options.output, 'w') as output:
+ yaml.dump(cbt_config, output)
+
+if __name__ == '__main__':
+ main()
diff --git a/src/test/crimson/gtest_seastar.cc b/src/test/crimson/gtest_seastar.cc
new file mode 100644
index 000000000..9d43fc6be
--- /dev/null
+++ b/src/test/crimson/gtest_seastar.cc
@@ -0,0 +1,23 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#include "include/ceph_assert.h"
+#include "gtest_seastar.h"
+
+SeastarRunner seastar_test_suite_t::seastar_env;
+
+int main(int argc, char **argv)
+{
+ ::testing::InitGoogleTest(&argc, argv);
+
+ seastar_test_suite_t::seastar_env.init(argc, argv);
+
+ seastar::global_logger_registry().set_all_loggers_level(
+ seastar::log_level::debug
+ );
+
+ int ret = RUN_ALL_TESTS();
+
+ seastar_test_suite_t::seastar_env.stop();
+ return ret;
+}
diff --git a/src/test/crimson/gtest_seastar.h b/src/test/crimson/gtest_seastar.h
new file mode 100644
index 000000000..20709a3ee
--- /dev/null
+++ b/src/test/crimson/gtest_seastar.h
@@ -0,0 +1,35 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#pragma once
+
+#include "gtest/gtest.h"
+
+#include "seastar_runner.h"
+
+struct seastar_test_suite_t : public ::testing::Test {
+ static SeastarRunner seastar_env;
+
+ template <typename Func>
+ void run(Func &&func) {
+ return seastar_env.run(std::forward<Func>(func));
+ }
+
+ template <typename Func>
+ void run_async(Func &&func) {
+ run(
+ [func=std::forward<Func>(func)]() mutable {
+ return seastar::async(std::forward<Func>(func));
+ });
+ }
+
+ virtual seastar::future<> set_up_fut() { return seastar::now(); }
+ void SetUp() final {
+ return run([this] { return set_up_fut(); });
+ }
+
+ virtual seastar::future<> tear_down_fut() { return seastar::now(); }
+ void TearDown() final {
+ return run([this] { return tear_down_fut(); });
+ }
+};
diff --git a/src/test/crimson/seastar_runner.h b/src/test/crimson/seastar_runner.h
new file mode 100644
index 000000000..5a430554e
--- /dev/null
+++ b/src/test/crimson/seastar_runner.h
@@ -0,0 +1,71 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#pragma once
+
+#include <stdio.h>
+#include <signal.h>
+#include <thread>
+
+#include <seastar/core/app-template.hh>
+#include <seastar/core/future-util.hh>
+#include <seastar/core/reactor.hh>
+#include <seastar/core/alien.hh>
+#include <seastar/core/thread.hh>
+
+struct SeastarRunner {
+ seastar::app_template app;
+ seastar::file_desc begin_fd;
+ std::unique_ptr<seastar::readable_eventfd> on_end;
+
+ std::thread thread;
+
+ SeastarRunner() :
+ begin_fd{seastar::file_desc::eventfd(0, 0)} {}
+
+ ~SeastarRunner() {}
+
+ void init(int argc, char **argv)
+ {
+ thread = std::thread([argc, argv, this] { reactor(argc, argv); });
+ eventfd_t result = 0;
+ if (int r = ::eventfd_read(begin_fd.get(), &result); r < 0) {
+ std::cerr << "unable to eventfd_read():" << errno << std::endl;
+ throw std::runtime_error("Cannot start seastar");
+ }
+ }
+
+ void stop()
+ {
+ run([this] {
+ on_end->write_side().signal(1);
+ return seastar::now();
+ });
+ thread.join();
+ }
+
+ void reactor(int argc, char **argv)
+ {
+ app.run(argc, argv, [this] {
+ on_end.reset(new seastar::readable_eventfd);
+ return seastar::now().then([this] {
+ ::eventfd_write(begin_fd.get(), 1);
+ return seastar::now();
+ }).then([this] {
+ return on_end->wait().then([](size_t){});
+ }).handle_exception([](auto ep) {
+ std::cerr << "Error: " << ep << std::endl;
+ }).finally([this] {
+ on_end.reset();
+ });
+ });
+ }
+
+ template <typename Func>
+ void run(Func &&func) {
+ auto fut = seastar::alien::submit_to(0, std::forward<Func>(func));
+ fut.get();
+ }
+};
+
+
diff --git a/src/test/crimson/seastore/CMakeLists.txt b/src/test/crimson/seastore/CMakeLists.txt
new file mode 100644
index 000000000..6c21ac7c5
--- /dev/null
+++ b/src/test/crimson/seastore/CMakeLists.txt
@@ -0,0 +1,51 @@
+add_executable(unittest-transaction-manager
+ test_block.cc
+ test_transaction_manager.cc
+ ../gtest_seastar.cc)
+add_ceph_unittest(unittest-transaction-manager
+ --memory 256M --smp 1)
+target_link_libraries(
+ unittest-transaction-manager
+ ${CMAKE_DL_LIBS}
+ crimson-seastore)
+
+add_executable(unittest-btree-lba-manager
+ test_btree_lba_manager.cc
+ ../gtest_seastar.cc)
+add_ceph_unittest(unittest-btree-lba-manager
+ --memory 256M --smp 1)
+target_link_libraries(
+ unittest-btree-lba-manager
+ ${CMAKE_DL_LIBS}
+ crimson-seastore)
+
+add_executable(unittest-seastore-journal
+ test_seastore_journal.cc)
+add_ceph_test(unittest-seastore-journal
+ unittest-seastore-journal --memory 256M --smp 1)
+target_link_libraries(
+ unittest-seastore-journal
+ crimson::gtest
+ crimson-seastore)
+
+add_executable(unittest-seastore-cache
+ test_block.cc
+ test_seastore_cache.cc)
+add_ceph_test(unittest-seastore-cache
+ unittest-seastore-cache --memory 256M --smp 1)
+target_link_libraries(
+ unittest-seastore-cache
+ crimson::gtest
+ crimson-seastore)
+
+add_executable(unittest-extmap-manager
+ test_extmap_manager.cc
+ ../gtest_seastar.cc)
+add_ceph_unittest(unittest-extmap-manager
+ --memory 256M --smp 1)
+target_link_libraries(
+ unittest-extmap-manager
+ ${CMAKE_DL_LIBS}
+ crimson-seastore)
+
+add_subdirectory(onode_tree)
diff --git a/src/test/crimson/seastore/onode_tree/CMakeLists.txt b/src/test/crimson/seastore/onode_tree/CMakeLists.txt
new file mode 100644
index 000000000..0886d2fb6
--- /dev/null
+++ b/src/test/crimson/seastore/onode_tree/CMakeLists.txt
@@ -0,0 +1,15 @@
+add_executable(test-seastore-onode-tree-node
+ test_node.cc)
+add_ceph_unittest(test-seastore-onode-tree-node
+ --memory 256M --smp 1)
+target_link_libraries(test-seastore-onode-tree-node
+ crimson-seastore
+ GTest::Main)
+
+add_executable(unittest-staged-fltree
+ test_staged_fltree.cc
+ ../../gtest_seastar.cc)
+add_ceph_unittest(unittest-staged-fltree
+ --memory 256M --smp 1)
+target_link_libraries(unittest-staged-fltree
+ crimson-seastore)
diff --git a/src/test/crimson/seastore/onode_tree/test_node.cc b/src/test/crimson/seastore/onode_tree/test_node.cc
new file mode 100644
index 000000000..178f78365
--- /dev/null
+++ b/src/test/crimson/seastore/onode_tree/test_node.cc
@@ -0,0 +1,207 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#include <gtest/gtest.h>
+
+#include "crimson/os/seastore/onode_manager/simple-fltree/onode_node.h"
+
+using crimson::os::seastore::Onode;
+using crimson::os::seastore::OnodeRef;
+
+TEST(OnodeNode, denc)
+{
+ Onode onode{"hello"};
+ bufferlist bl;
+ ceph::encode(onode, bl);
+ bl.rebuild();
+ auto flattened = reinterpret_cast<const onode_t*>(bl.c_str());
+ auto actual_onode = flattened->decode();
+ ASSERT_EQ(*actual_onode, onode);
+}
+
+TEST(OnodeNode, lookup)
+{
+ static constexpr size_t BLOCK_SIZE = 512;
+ char buf[BLOCK_SIZE];
+ using leaf_node_0 = node_t<BLOCK_SIZE, 0, ntype_t::leaf>;
+ auto leaf = new (buf) leaf_node_0;
+ ghobject_t oid{hobject_t{object_t{"saturn"}, "", 0, 0, 0, "solar"}};
+ {
+ auto [slot, found] = leaf->lower_bound(oid);
+ ASSERT_FALSE(found);
+ ASSERT_EQ(0, slot);
+ }
+ Onode onode{"hello"};
+ bufferlist bl;
+ ceph::encode(onode, bl);
+ bl.rebuild();
+ auto flattened = reinterpret_cast<const onode_t*>(bl.c_str());
+ leaf->insert_at(0, oid, *flattened);
+ {
+ auto [slot, found] = leaf->lower_bound(oid);
+ ASSERT_TRUE(found);
+ ASSERT_EQ(0, slot);
+ const auto& [key1, key2] = leaf->key_at(slot);
+ auto& item = leaf->item_at(key1);
+ auto actual_onode = item.decode();
+ ASSERT_EQ(*actual_onode, onode);
+ }
+}
+
+TEST(OnodeNode, grab_from_right)
+{
+ static constexpr size_t BLOCK_SIZE = 512;
+ char buf1[BLOCK_SIZE];
+ char buf2[BLOCK_SIZE];
+ using leaf_node_0 = node_t<BLOCK_SIZE, 0, ntype_t::leaf>;
+ auto leaf1 = new (buf1) leaf_node_0;
+ auto leaf2 = new (buf2) leaf_node_0;
+ auto& dummy_parent = *leaf1;
+
+ ghobject_t oid1{hobject_t{object_t{"earth"}, "", 0, 0, 0, "solar"}};
+ ghobject_t oid2{hobject_t{object_t{"jupiter"}, "", 0, 0, 0, "solar"}};
+ ghobject_t oid3{hobject_t{object_t{"saturn"}, "", 0, 0, 0, "solar"}};
+ Onode onode{"hello"};
+ bufferlist bl;
+ ceph::encode(onode, bl);
+ bl.rebuild();
+ auto flattened = reinterpret_cast<const onode_t*>(bl.c_str());
+ // so they are ordered as they should
+ leaf1->insert_at(0, oid1, *flattened);
+ ASSERT_EQ(1, leaf1->count);
+ {
+ auto [slot, found] = leaf1->lower_bound(oid1);
+ ASSERT_TRUE(found);
+ ASSERT_EQ(0, slot);
+ }
+ {
+ leaf2->insert_at(0, oid2, *flattened);
+ auto [slot, found] = leaf2->lower_bound(oid2);
+ ASSERT_TRUE(found);
+ ASSERT_EQ(0, slot);
+ }
+ {
+ leaf2->insert_at(1, oid3, *flattened);
+ auto [slot, found] = leaf2->lower_bound(oid3);
+ ASSERT_TRUE(found);
+ ASSERT_EQ(1, slot);
+ }
+ ASSERT_EQ(2, leaf2->count);
+
+ // normally we let left merge right, so we just need to remove an
+ // entry in parent, let's keep this convention here
+ auto mover = make_mover(dummy_parent, *leaf2, *leaf1, 0);
+ // just grab a single item from right
+ mover.move_from(0, 1, 1);
+ auto to_delta = mover.to_delta();
+ ASSERT_EQ(to_delta.op_t::insert_back, to_delta.op);
+ leaf1->insert_back(std::move(to_delta.keys), std::move(to_delta.cells));
+
+ ASSERT_EQ(2, leaf1->count);
+ {
+ auto [slot, found] = leaf1->lower_bound(oid2);
+ ASSERT_TRUE(found);
+ ASSERT_EQ(1, slot);
+ }
+
+ auto from_delta = mover.from_delta();
+ ASSERT_EQ(from_delta.op_t::shift_left, from_delta.op);
+ leaf2->shift_left(from_delta.n, 0);
+ ASSERT_EQ(1, leaf2->count);
+}
+
+TEST(OnodeNode, merge_right)
+{
+ static constexpr size_t BLOCK_SIZE = 512;
+ char buf1[BLOCK_SIZE];
+ char buf2[BLOCK_SIZE];
+ using leaf_node_0 = node_t<BLOCK_SIZE, 0, ntype_t::leaf>;
+ auto leaf1 = new (buf1) leaf_node_0;
+ auto leaf2 = new (buf2) leaf_node_0;
+ auto& dummy_parent = leaf1;
+
+ ghobject_t oid1{hobject_t{object_t{"earth"}, "", 0, 0, 0, "solar"}};
+ ghobject_t oid2{hobject_t{object_t{"jupiter"}, "", 0, 0, 0, "solar"}};
+ ghobject_t oid3{hobject_t{object_t{"saturn"}, "", 0, 0, 0, "solar"}};
+ Onode onode{"hello"};
+ bufferlist bl;
+ ceph::encode(onode, bl);
+ bl.rebuild();
+ auto flattened = reinterpret_cast<const onode_t*>(bl.c_str());
+ // so they are ordered as they should
+ leaf1->insert_at(0, oid1, *flattened);
+ ASSERT_EQ(1, leaf1->count);
+ {
+ auto [slot, found] = leaf1->lower_bound(oid1);
+ ASSERT_TRUE(found);
+ ASSERT_EQ(0, slot);
+ }
+ {
+ leaf2->insert_at(0, oid2, *flattened);
+ auto [slot, found] = leaf2->lower_bound(oid2);
+ ASSERT_TRUE(found);
+ ASSERT_EQ(0, slot);
+ }
+ {
+ leaf2->insert_at(1, oid3, *flattened);
+ auto [slot, found] = leaf2->lower_bound(oid3);
+ ASSERT_TRUE(found);
+ ASSERT_EQ(1, slot);
+ }
+ ASSERT_EQ(2, leaf2->count);
+
+ // normally we let left merge right, so we just need to remove an
+ // entry in parent, let's keep this convention here
+ auto mover = make_mover(dummy_parent, *leaf2, *leaf1, 0);
+ // just grab a single item from right
+ mover.move_from(0, 1, 2);
+ auto to_delta = mover.to_delta();
+ ASSERT_EQ(to_delta.op_t::insert_back, to_delta.op);
+ leaf1->insert_back(std::move(to_delta.keys), std::move(to_delta.cells));
+
+ ASSERT_EQ(3, leaf1->count);
+ {
+ auto [slot, found] = leaf1->lower_bound(oid2);
+ ASSERT_TRUE(found);
+ ASSERT_EQ(1, slot);
+ }
+ {
+ auto [slot, found] = leaf1->lower_bound(oid3);
+ ASSERT_TRUE(found);
+ ASSERT_EQ(2, slot);
+ }
+
+ // its onode tree's responsibility to retire the node
+ auto from_delta = mover.from_delta();
+ ASSERT_EQ(from_delta.op_t::nop, from_delta.op);
+}
+
+TEST(OnodeNode, remove_basic)
+{
+ static constexpr size_t BLOCK_SIZE = 512;
+ char buf[BLOCK_SIZE];
+ using leaf_node_0 = node_t<BLOCK_SIZE, 0, ntype_t::leaf>;
+ auto leaf = new (buf) leaf_node_0;
+ ghobject_t oid{hobject_t{object_t{"saturn"}, "", 0, 0, 0, "solar"}};
+ {
+ auto [slot, found] = leaf->lower_bound(oid);
+ ASSERT_FALSE(found);
+ ASSERT_EQ(0, slot);
+ }
+ Onode onode{"hello"};
+ bufferlist bl;
+ ceph::encode(onode, bl);
+ bl.rebuild();
+ auto flattened = reinterpret_cast<const onode_t*>(bl.c_str());
+ leaf->insert_at(0, oid, *flattened);
+ {
+ auto [slot, found] = leaf->lower_bound(oid);
+ ASSERT_TRUE(found);
+ ASSERT_EQ(0, slot);
+ leaf->remove_from(slot);
+ }
+ {
+ auto [slot, found] = leaf->lower_bound(oid);
+ ASSERT_FALSE(found);
+ }
+}
diff --git a/src/test/crimson/seastore/onode_tree/test_staged_fltree.cc b/src/test/crimson/seastore/onode_tree/test_staged_fltree.cc
new file mode 100644
index 000000000..da7422bcb
--- /dev/null
+++ b/src/test/crimson/seastore/onode_tree/test_staged_fltree.cc
@@ -0,0 +1,1204 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
+// vim: ts=8 sw=2 smarttab
+
+#include <array>
+#include <cstring>
+#include <memory>
+#include <set>
+#include <sstream>
+#include <vector>
+
+#include "crimson/common/log.h"
+#include "crimson/os/seastore/onode_manager/staged-fltree/node.h"
+#include "crimson/os/seastore/onode_manager/staged-fltree/node_extent_manager.h"
+#include "crimson/os/seastore/onode_manager/staged-fltree/node_layout.h"
+#include "crimson/os/seastore/onode_manager/staged-fltree/tree.h"
+#include "crimson/os/seastore/onode_manager/staged-fltree/tree_utils.h"
+
+#include "test/crimson/gtest_seastar.h"
+#include "test/crimson/seastore/transaction_manager_test_state.h"
+
+using namespace crimson::os::seastore::onode;
+
+namespace {
+ constexpr bool IS_DUMMY_SYNC = false;
+
+ [[maybe_unused]] seastar::logger& logger() {
+ return crimson::get_logger(ceph_subsys_test);
+ }
+
+ ghobject_t make_ghobj(
+ shard_t shard, pool_t pool, crush_hash_t crush,
+ std::string ns, std::string oid, snap_t snap, gen_t gen) {
+ return ghobject_t{shard_id_t{shard}, pool, crush, ns, oid, snap, gen};
+ }
+
+ // return a key_view_t and its underlying memory buffer.
+ // the buffer needs to be freed manually.
+ std::pair<key_view_t, void*> build_key_view(const ghobject_t& hobj) {
+ key_hobj_t key_hobj(hobj);
+ size_t key_size = sizeof(shard_pool_crush_t) + sizeof(snap_gen_t) +
+ ns_oid_view_t::estimate_size<KeyT::HOBJ>(key_hobj);
+ void* p_mem = std::malloc(key_size);
+
+ key_view_t key_view;
+ char* p_fill = (char*)p_mem + key_size;
+
+ auto spc = shard_pool_crush_t::from_key<KeyT::HOBJ>(key_hobj);
+ p_fill -= sizeof(shard_pool_crush_t);
+ std::memcpy(p_fill, &spc, sizeof(shard_pool_crush_t));
+ key_view.set(*reinterpret_cast<const shard_pool_crush_t*>(p_fill));
+
+ auto p_ns_oid = p_fill;
+ ns_oid_view_t::test_append<KeyT::HOBJ>(key_hobj, p_fill);
+ ns_oid_view_t ns_oid_view(p_ns_oid);
+ key_view.set(ns_oid_view);
+
+ auto sg = snap_gen_t::from_key<KeyT::HOBJ>(key_hobj);
+ p_fill -= sizeof(snap_gen_t);
+ ceph_assert(p_fill == (char*)p_mem);
+ std::memcpy(p_fill, &sg, sizeof(snap_gen_t));
+ key_view.set(*reinterpret_cast<const snap_gen_t*>(p_fill));
+
+ return {key_view, p_mem};
+ }
+}
+
+struct a_basic_test_t : public seastar_test_suite_t {};
+
+TEST_F(a_basic_test_t, 1_basic_sizes)
+{
+ logger().info("\n"
+ "Bytes of struct:\n"
+ " node_header_t: {}\n"
+ " shard_pool_t: {}\n"
+ " shard_pool_crush_t: {}\n"
+ " crush_t: {}\n"
+ " snap_gen_t: {}\n"
+ " slot_0_t: {}\n"
+ " slot_1_t: {}\n"
+ " slot_3_t: {}\n"
+ " node_fields_0_t: {}\n"
+ " node_fields_1_t: {}\n"
+ " node_fields_2_t: {}\n"
+ " internal_fields_3_t: {}\n"
+ " leaf_fields_3_t: {}\n"
+ " internal_sub_item_t: {}",
+ sizeof(node_header_t), sizeof(shard_pool_t),
+ sizeof(shard_pool_crush_t), sizeof(crush_t), sizeof(snap_gen_t),
+ sizeof(slot_0_t), sizeof(slot_1_t), sizeof(slot_3_t),
+ sizeof(node_fields_0_t), sizeof(node_fields_1_t), sizeof(node_fields_2_t),
+ sizeof(internal_fields_3_t), sizeof(leaf_fields_3_t), sizeof(internal_sub_item_t)
+ );
+
+ auto hobj = make_ghobj(0, 0, 0, "n", "o", 0, 0);
+ key_hobj_t key(hobj);
+ auto [key_view, p_mem] = build_key_view(hobj);
+ onode_t value = {2};
+#define _STAGE_T(NodeType) node_to_stage_t<typename NodeType::node_stage_t>
+#define NXT_T(StageType) staged<typename StageType::next_param_t>
+ laddr_packed_t i_value{0};
+ logger().info("\n"
+ "Bytes of a key-value insertion (full-string):\n"
+ " s-p-c, 'n'-'o', s-g => onode_t(2): typically internal 41B, leaf 35B\n"
+ " InternalNode0: {} {} {}\n"
+ " InternalNode1: {} {} {}\n"
+ " InternalNode2: {} {}\n"
+ " InternalNode3: {}\n"
+ " LeafNode0: {} {} {}\n"
+ " LeafNode1: {} {} {}\n"
+ " LeafNode2: {} {}\n"
+ " LeafNode3: {}",
+ _STAGE_T(InternalNode0)::template insert_size<KeyT::VIEW>(key_view, i_value),
+ NXT_T(_STAGE_T(InternalNode0))::template insert_size<KeyT::VIEW>(key_view, i_value),
+ NXT_T(NXT_T(_STAGE_T(InternalNode0)))::template insert_size<KeyT::VIEW>(key_view, i_value),
+ _STAGE_T(InternalNode1)::template insert_size<KeyT::VIEW>(key_view, i_value),
+ NXT_T(_STAGE_T(InternalNode1))::template insert_size<KeyT::VIEW>(key_view, i_value),
+ NXT_T(NXT_T(_STAGE_T(InternalNode1)))::template insert_size<KeyT::VIEW>(key_view, i_value),
+ _STAGE_T(InternalNode2)::template insert_size<KeyT::VIEW>(key_view, i_value),
+ NXT_T(_STAGE_T(InternalNode2))::template insert_size<KeyT::VIEW>(key_view, i_value),
+ _STAGE_T(InternalNode3)::template insert_size<KeyT::VIEW>(key_view, i_value),
+ _STAGE_T(LeafNode0)::template insert_size<KeyT::HOBJ>(key, value),
+ NXT_T(_STAGE_T(LeafNode0))::template insert_size<KeyT::HOBJ>(key, value),
+ NXT_T(NXT_T(_STAGE_T(LeafNode0)))::template insert_size<KeyT::HOBJ>(key, value),
+ _STAGE_T(LeafNode1)::template insert_size<KeyT::HOBJ>(key, value),
+ NXT_T(_STAGE_T(LeafNode1))::template insert_size<KeyT::HOBJ>(key, value),
+ NXT_T(NXT_T(_STAGE_T(LeafNode1)))::template insert_size<KeyT::HOBJ>(key, value),
+ _STAGE_T(LeafNode2)::template insert_size<KeyT::HOBJ>(key, value),
+ NXT_T(_STAGE_T(LeafNode2))::template insert_size<KeyT::HOBJ>(key, value),
+ _STAGE_T(LeafNode3)::template insert_size<KeyT::HOBJ>(key, value)
+ );
+ std::free(p_mem);
+}
+
+TEST_F(a_basic_test_t, 2_node_sizes)
+{
+ run_async([this] {
+ auto nm = NodeExtentManager::create_dummy(IS_DUMMY_SYNC);
+ auto t = make_transaction();
+ context_t c{*nm, *t};
+ std::array<std::pair<NodeImplURef, NodeExtentMutable>, 16> nodes = {
+ InternalNode0::allocate(c, false, 1u).unsafe_get0().make_pair(),
+ InternalNode1::allocate(c, false, 1u).unsafe_get0().make_pair(),
+ InternalNode2::allocate(c, false, 1u).unsafe_get0().make_pair(),
+ InternalNode3::allocate(c, false, 1u).unsafe_get0().make_pair(),
+ InternalNode0::allocate(c, true, 1u).unsafe_get0().make_pair(),
+ InternalNode1::allocate(c, true, 1u).unsafe_get0().make_pair(),
+ InternalNode2::allocate(c, true, 1u).unsafe_get0().make_pair(),
+ InternalNode3::allocate(c, true, 1u).unsafe_get0().make_pair(),
+ LeafNode0::allocate(c, false, 0u).unsafe_get0().make_pair(),
+ LeafNode1::allocate(c, false, 0u).unsafe_get0().make_pair(),
+ LeafNode2::allocate(c, false, 0u).unsafe_get0().make_pair(),
+ LeafNode3::allocate(c, false, 0u).unsafe_get0().make_pair(),
+ LeafNode0::allocate(c, true, 0u).unsafe_get0().make_pair(),
+ LeafNode1::allocate(c, true, 0u).unsafe_get0().make_pair(),
+ LeafNode2::allocate(c, true, 0u).unsafe_get0().make_pair(),
+ LeafNode3::allocate(c, true, 0u).unsafe_get0().make_pair()
+ };
+ std::ostringstream oss;
+ oss << "\nallocated nodes:";
+ for (auto iter = nodes.begin(); iter != nodes.end(); ++iter) {
+ oss << "\n ";
+ auto& ref_node = iter->first;
+ ref_node->dump_brief(oss);
+ }
+ logger().info("{}", oss.str());
+ });
+}
+
+struct b_dummy_tree_test_t : public seastar_test_suite_t {
+ NodeExtentManagerURef moved_nm;
+ TransactionRef ref_t;
+ Transaction& t;
+ context_t c;
+ Btree tree;
+
+ b_dummy_tree_test_t()
+ : moved_nm{NodeExtentManager::create_dummy(IS_DUMMY_SYNC)},
+ ref_t{make_transaction()},
+ t{*ref_t},
+ c{*moved_nm, t},
+ tree{std::move(moved_nm)} {}
+
+ seastar::future<> set_up_fut() override final {
+ return tree.mkfs(t).handle_error(
+ crimson::ct_error::all_same_way([] {
+ ASSERT_FALSE("Unable to mkfs");
+ })
+ );
+ }
+};
+
+TEST_F(b_dummy_tree_test_t, 3_random_insert_leaf_node)
+{
+ run_async([this] {
+ logger().info("\n---------------------------------------------"
+ "\nrandomized leaf node insert:\n");
+ auto key_s = make_ghobj(0, 0, 0, "ns", "oid", 0, 0);
+ auto key_e = make_ghobj(
+ std::numeric_limits<shard_t>::max(), 0, 0, "ns", "oid", 0, 0);
+ ASSERT_TRUE(tree.find(t, key_s).unsafe_get0().is_end());
+ ASSERT_TRUE(tree.begin(t).unsafe_get0().is_end());
+ ASSERT_TRUE(tree.last(t).unsafe_get0().is_end());
+
+ std::vector<std::tuple<ghobject_t,
+ const onode_t*,
+ Btree::Cursor>> insert_history;
+ auto f_validate_insert_new = [this, &insert_history] (
+ const ghobject_t& key, const onode_t& value) {
+ auto [cursor, success] = tree.insert(t, key, value).unsafe_get0();
+ ceph_assert(success);
+ insert_history.emplace_back(key, &value, cursor);
+ Onodes::validate_cursor(cursor, key, value);
+ auto cursor_ = tree.lower_bound(t, key).unsafe_get0();
+ ceph_assert(cursor_.get_ghobj() == key);
+ ceph_assert(cursor_.value() == cursor.value());
+ return cursor.value();
+ };
+ auto onodes = Onodes(15);
+
+ // insert key1, onode1 at STAGE_LEFT
+ auto key1 = make_ghobj(3, 3, 3, "ns3", "oid3", 3, 3);
+ auto& onode1 = onodes.pick();
+ auto p_value1 = f_validate_insert_new(key1, onode1);
+
+ // validate lookup
+ {
+ auto cursor1_s = tree.lower_bound(t, key_s).unsafe_get0();
+ ASSERT_EQ(cursor1_s.get_ghobj(), key1);
+ ASSERT_EQ(cursor1_s.value(), p_value1);
+ auto cursor1_e = tree.lower_bound(t, key_e).unsafe_get0();
+ ASSERT_TRUE(cursor1_e.is_end());
+ }
+
+ // insert the same key1 with a different onode
+ {
+ auto& onode1_dup = onodes.pick();
+ auto [cursor1_dup, ret1_dup] = tree.insert(
+ t, key1, onode1_dup).unsafe_get0();
+ ASSERT_FALSE(ret1_dup);
+ Onodes::validate_cursor(cursor1_dup, key1, onode1);
+ }
+
+ // insert key2, onode2 to key1's left at STAGE_LEFT
+ // insert node front at STAGE_LEFT
+ auto key2 = make_ghobj(2, 2, 2, "ns3", "oid3", 3, 3);
+ auto& onode2 = onodes.pick();
+ f_validate_insert_new(key2, onode2);
+
+ // insert key3, onode3 to key1's right at STAGE_LEFT
+ // insert node last at STAGE_LEFT
+ auto key3 = make_ghobj(4, 4, 4, "ns3", "oid3", 3, 3);
+ auto& onode3 = onodes.pick();
+ f_validate_insert_new(key3, onode3);
+
+ // insert key4, onode4 to key1's left at STAGE_STRING (collision)
+ auto key4 = make_ghobj(3, 3, 3, "ns2", "oid2", 3, 3);
+ auto& onode4 = onodes.pick();
+ f_validate_insert_new(key4, onode4);
+
+ // insert key5, onode5 to key1's right at STAGE_STRING (collision)
+ auto key5 = make_ghobj(3, 3, 3, "ns4", "oid4", 3, 3);
+ auto& onode5 = onodes.pick();
+ f_validate_insert_new(key5, onode5);
+
+ // insert key6, onode6 to key1's left at STAGE_RIGHT
+ auto key6 = make_ghobj(3, 3, 3, "ns3", "oid3", 2, 2);
+ auto& onode6 = onodes.pick();
+ f_validate_insert_new(key6, onode6);
+
+ // insert key7, onode7 to key1's right at STAGE_RIGHT
+ auto key7 = make_ghobj(3, 3, 3, "ns3", "oid3", 4, 4);
+ auto& onode7 = onodes.pick();
+ f_validate_insert_new(key7, onode7);
+
+ // insert node front at STAGE_RIGHT
+ auto key8 = make_ghobj(2, 2, 2, "ns3", "oid3", 2, 2);
+ auto& onode8 = onodes.pick();
+ f_validate_insert_new(key8, onode8);
+
+ // insert node front at STAGE_STRING (collision)
+ auto key9 = make_ghobj(2, 2, 2, "ns2", "oid2", 3, 3);
+ auto& onode9 = onodes.pick();
+ f_validate_insert_new(key9, onode9);
+
+ // insert node last at STAGE_RIGHT
+ auto key10 = make_ghobj(4, 4, 4, "ns3", "oid3", 4, 4);
+ auto& onode10 = onodes.pick();
+ f_validate_insert_new(key10, onode10);
+
+ // insert node last at STAGE_STRING (collision)
+ auto key11 = make_ghobj(4, 4, 4, "ns4", "oid4", 3, 3);
+ auto& onode11 = onodes.pick();
+ f_validate_insert_new(key11, onode11);
+
+ // insert key, value randomly until a perfect 3-ary tree is formed
+ std::vector<std::pair<ghobject_t, const onode_t*>> kvs{
+ {make_ghobj(2, 2, 2, "ns2", "oid2", 2, 2), &onodes.pick()},
+ {make_ghobj(2, 2, 2, "ns2", "oid2", 4, 4), &onodes.pick()},
+ {make_ghobj(2, 2, 2, "ns3", "oid3", 4, 4), &onodes.pick()},
+ {make_ghobj(2, 2, 2, "ns4", "oid4", 2, 2), &onodes.pick()},
+ {make_ghobj(2, 2, 2, "ns4", "oid4", 3, 3), &onodes.pick()},
+ {make_ghobj(2, 2, 2, "ns4", "oid4", 4, 4), &onodes.pick()},
+ {make_ghobj(3, 3, 3, "ns2", "oid2", 2, 2), &onodes.pick()},
+ {make_ghobj(3, 3, 3, "ns2", "oid2", 4, 4), &onodes.pick()},
+ {make_ghobj(3, 3, 3, "ns4", "oid4", 2, 2), &onodes.pick()},
+ {make_ghobj(3, 3, 3, "ns4", "oid4", 4, 4), &onodes.pick()},
+ {make_ghobj(4, 4, 4, "ns2", "oid2", 2, 2), &onodes.pick()},
+ {make_ghobj(4, 4, 4, "ns2", "oid2", 3, 3), &onodes.pick()},
+ {make_ghobj(4, 4, 4, "ns2", "oid2", 4, 4), &onodes.pick()},
+ {make_ghobj(4, 4, 4, "ns3", "oid3", 2, 2), &onodes.pick()},
+ {make_ghobj(4, 4, 4, "ns4", "oid4", 2, 2), &onodes.pick()},
+ {make_ghobj(4, 4, 4, "ns4", "oid4", 4, 4), &onodes.pick()}};
+ auto [smallest_key, smallest_value] = kvs[0];
+ auto [largest_key, largest_value] = kvs[kvs.size() - 1];
+ std::random_shuffle(kvs.begin(), kvs.end());
+ std::for_each(kvs.begin(), kvs.end(), [&f_validate_insert_new] (auto& kv) {
+ f_validate_insert_new(kv.first, *kv.second);
+ });
+ ASSERT_EQ(tree.height(t).unsafe_get0(), 1);
+ ASSERT_FALSE(tree.test_is_clean());
+
+ for (auto& [k, v, c] : insert_history) {
+ // validate values in tree keep intact
+ auto cursor = tree.lower_bound(t, k).unsafe_get0();
+ Onodes::validate_cursor(cursor, k, *v);
+ // validate values in cursors keep intact
+ Onodes::validate_cursor(c, k, *v);
+ }
+ Onodes::validate_cursor(
+ tree.lower_bound(t, key_s).unsafe_get0(), smallest_key, *smallest_value);
+ Onodes::validate_cursor(
+ tree.begin(t).unsafe_get0(), smallest_key, *smallest_value);
+ Onodes::validate_cursor(
+ tree.last(t).unsafe_get0(), largest_key, *largest_value);
+
+ std::ostringstream oss;
+ tree.dump(t, oss);
+ logger().info("\n{}\n", oss.str());
+
+ insert_history.clear();
+ });
+}
+
+static std::set<ghobject_t> build_key_set(
+ std::pair<unsigned, unsigned> range_2,
+ std::pair<unsigned, unsigned> range_1,
+ std::pair<unsigned, unsigned> range_0,
+ std::string padding = "",
+ bool is_internal = false) {
+ ceph_assert(range_1.second <= 10);
+ std::set<ghobject_t> ret;
+ ghobject_t key;
+ for (unsigned i = range_2.first; i < range_2.second; ++i) {
+ for (unsigned j = range_1.first; j < range_1.second; ++j) {
+ for (unsigned k = range_0.first; k < range_0.second; ++k) {
+ std::ostringstream os_ns;
+ os_ns << "ns" << j;
+ std::ostringstream os_oid;
+ os_oid << "oid" << j << padding;
+ key = make_ghobj(i, i, i, os_ns.str(), os_oid.str(), k, k);
+ ret.insert(key);
+ }
+ }
+ }
+ if (is_internal) {
+ ret.insert(make_ghobj(9, 9, 9, "ns~last", "oid~last", 9, 9));
+ }
+ return ret;
+}
+
+class TestTree {
+ public:
+ TestTree()
+ : moved_nm{NodeExtentManager::create_dummy(IS_DUMMY_SYNC)},
+ ref_t{make_transaction()},
+ t{*ref_t},
+ c{*moved_nm, t},
+ tree{std::move(moved_nm)},
+ onodes{0} {}
+
+ seastar::future<> build_tree(
+ std::pair<unsigned, unsigned> range_2,
+ std::pair<unsigned, unsigned> range_1,
+ std::pair<unsigned, unsigned> range_0,
+ size_t onode_size) {
+ return seastar::async([this, range_2, range_1, range_0, onode_size] {
+ tree.mkfs(t).unsafe_get0();
+ //logger().info("\n---------------------------------------------"
+ // "\nbefore leaf node split:\n");
+ auto keys = build_key_set(range_2, range_1, range_0);
+ for (auto& key : keys) {
+ auto& value = onodes.create(onode_size);
+ insert_tree(key, value).get0();
+ }
+ ASSERT_EQ(tree.height(t).unsafe_get0(), 1);
+ ASSERT_FALSE(tree.test_is_clean());
+ //std::ostringstream oss;
+ //tree.dump(t, oss);
+ //logger().info("\n{}\n", oss.str());
+ });
+ }
+
+ seastar::future<> build_tree(
+ const std::vector<ghobject_t>& keys, const std::vector<const onode_t*>& values) {
+ return seastar::async([this, keys, values] {
+ tree.mkfs(t).unsafe_get0();
+ //logger().info("\n---------------------------------------------"
+ // "\nbefore leaf node split:\n");
+ ASSERT_EQ(keys.size(), values.size());
+ auto key_iter = keys.begin();
+ auto value_iter = values.begin();
+ while (key_iter != keys.end()) {
+ insert_tree(*key_iter, **value_iter).get0();
+ ++key_iter;
+ ++value_iter;
+ }
+ ASSERT_EQ(tree.height(t).unsafe_get0(), 1);
+ ASSERT_FALSE(tree.test_is_clean());
+ //std::ostringstream oss;
+ //tree.dump(t, oss);
+ //logger().info("\n{}\n", oss.str());
+ });
+ }
+
+ seastar::future<> split(const ghobject_t& key, const onode_t& value,
+ const split_expectation_t& expected) {
+ return seastar::async([this, key, &value, expected] {
+ Btree tree_clone(NodeExtentManager::create_dummy(IS_DUMMY_SYNC));
+ auto ref_t_clone = make_transaction();
+ Transaction& t_clone = *ref_t_clone;
+ tree_clone.test_clone_from(t_clone, t, tree).unsafe_get0();
+
+ logger().info("insert {}:", key_hobj_t(key));
+ auto [cursor, success] = tree_clone.insert(t_clone, key, value).unsafe_get0();
+ ASSERT_TRUE(success);
+ Onodes::validate_cursor(cursor, key, value);
+
+ std::ostringstream oss;
+ tree_clone.dump(t_clone, oss);
+ logger().info("dump new root:\n{}", oss.str());
+ EXPECT_EQ(tree_clone.height(t_clone).unsafe_get0(), 2);
+
+ for (auto& [k, v, c] : insert_history) {
+ auto result = tree_clone.lower_bound(t_clone, k).unsafe_get0();
+ Onodes::validate_cursor(result, k, *v);
+ }
+ auto result = tree_clone.lower_bound(t_clone, key).unsafe_get0();
+ Onodes::validate_cursor(result, key, value);
+ EXPECT_TRUE(last_split.match(expected));
+ });
+ }
+
+ const onode_t& create_onode(size_t size) {
+ return onodes.create(size);
+ }
+
+ private:
+ seastar::future<> insert_tree(const ghobject_t& key, const onode_t& value) {
+ return seastar::async([this, &key, &value] {
+ auto [cursor, success] = tree.insert(t, key, value).unsafe_get0();
+ ASSERT_TRUE(success);
+ Onodes::validate_cursor(cursor, key, value);
+ insert_history.emplace_back(key, &value, cursor);
+ });
+ }
+
+ NodeExtentManagerURef moved_nm;
+ TransactionRef ref_t;
+ Transaction& t;
+ context_t c;
+ Btree tree;
+ Onodes onodes;
+ std::vector<std::tuple<
+ ghobject_t, const onode_t*, Btree::Cursor>> insert_history;
+};
+
+struct c_dummy_test_t : public seastar_test_suite_t {};
+
+TEST_F(c_dummy_test_t, 4_split_leaf_node)
+{
+ run_async([this] {
+ {
+ TestTree test;
+ test.build_tree({2, 5}, {2, 5}, {2, 5}, 120).get0();
+
+ auto& onode = test.create_onode(1144);
+ logger().info("\n---------------------------------------------"
+ "\nsplit at stage 2; insert to left front at stage 2, 1, 0\n");
+ test.split(make_ghobj(1, 1, 1, "ns3", "oid3", 3, 3), onode,
+ {2u, 2u, true, InsertType::BEGIN}).get0();
+ test.split(make_ghobj(2, 2, 2, "ns1", "oid1", 3, 3), onode,
+ {2u, 1u, true, InsertType::BEGIN}).get0();
+ test.split(make_ghobj(2, 2, 2, "ns2", "oid2", 1, 1), onode,
+ {2u, 0u, true, InsertType::BEGIN}).get0();
+
+ logger().info("\n---------------------------------------------"
+ "\nsplit at stage 2; insert to left back at stage 0, 1, 2, 1, 0\n");
+ test.split(make_ghobj(2, 2, 2, "ns4", "oid4", 5, 5), onode,
+ {2u, 0u, true, InsertType::LAST}).get0();
+ test.split(make_ghobj(2, 2, 2, "ns5", "oid5", 3, 3), onode,
+ {2u, 1u, true, InsertType::LAST}).get0();
+ test.split(make_ghobj(2, 3, 3, "ns3", "oid3", 3, 3), onode,
+ {2u, 2u, true, InsertType::LAST}).get0();
+ test.split(make_ghobj(3, 3, 3, "ns1", "oid1", 3, 3), onode,
+ {2u, 1u, true, InsertType::LAST}).get0();
+ test.split(make_ghobj(3, 3, 3, "ns2", "oid2", 1, 1), onode,
+ {2u, 0u, true, InsertType::LAST}).get0();
+
+ auto& onode0 = test.create_onode(1416);
+ logger().info("\n---------------------------------------------"
+ "\nsplit at stage 2; insert to right front at stage 0, 1, 2, 1, 0\n");
+ test.split(make_ghobj(3, 3, 3, "ns4", "oid4", 5, 5), onode0,
+ {2u, 0u, false, InsertType::BEGIN}).get0();
+ test.split(make_ghobj(3, 3, 3, "ns5", "oid5", 3, 3), onode0,
+ {2u, 1u, false, InsertType::BEGIN}).get0();
+ test.split(make_ghobj(3, 4, 4, "ns3", "oid3", 3, 3), onode0,
+ {2u, 2u, false, InsertType::BEGIN}).get0();
+ test.split(make_ghobj(4, 4, 4, "ns1", "oid1", 3, 3), onode0,
+ {2u, 1u, false, InsertType::BEGIN}).get0();
+ test.split(make_ghobj(4, 4, 4, "ns2", "oid2", 1, 1), onode0,
+ {2u, 0u, false, InsertType::BEGIN}).get0();
+
+ logger().info("\n---------------------------------------------"
+ "\nsplit at stage 2; insert to right back at stage 0, 1, 2\n");
+ test.split(make_ghobj(4, 4, 4, "ns4", "oid4", 5, 5), onode0,
+ {2u, 0u, false, InsertType::LAST}).get0();
+ test.split(make_ghobj(4, 4, 4, "ns5", "oid5", 3, 3), onode0,
+ {2u, 1u, false, InsertType::LAST}).get0();
+ test.split(make_ghobj(5, 5, 5, "ns3", "oid3", 3, 3), onode0,
+ {2u, 2u, false, InsertType::LAST}).get0();
+
+ auto& onode1 = test.create_onode(316);
+ logger().info("\n---------------------------------------------"
+ "\nsplit at stage 1; insert to left middle at stage 0, 1, 2, 1, 0\n");
+ test.split(make_ghobj(2, 2, 2, "ns4", "oid4", 5, 5), onode1,
+ {1u, 0u, true, InsertType::MID}).get0();
+ test.split(make_ghobj(2, 2, 2, "ns5", "oid5", 3, 3), onode1,
+ {1u, 1u, true, InsertType::MID}).get0();
+ test.split(make_ghobj(2, 2, 3, "ns3", "oid3", 3, 3), onode1,
+ {1u, 2u, true, InsertType::MID}).get0();
+ test.split(make_ghobj(3, 3, 3, "ns1", "oid1", 3, 3), onode1,
+ {1u, 1u, true, InsertType::MID}).get0();
+ test.split(make_ghobj(3, 3, 3, "ns2", "oid2", 1, 1), onode1,
+ {1u, 0u, true, InsertType::MID}).get0();
+
+ logger().info("\n---------------------------------------------"
+ "\nsplit at stage 1; insert to left back at stage 0, 1, 0\n");
+ test.split(make_ghobj(3, 3, 3, "ns2", "oid2", 5, 5), onode1,
+ {1u, 0u, true, InsertType::LAST}).get0();
+ test.split(make_ghobj(3, 3, 3, "ns2", "oid3", 3, 3), onode1,
+ {1u, 1u, true, InsertType::LAST}).get0();
+ test.split(make_ghobj(3, 3, 3, "ns3", "oid3", 1, 1), onode1,
+ {1u, 0u, true, InsertType::LAST}).get0();
+
+ auto& onode2 = test.create_onode(452);
+ logger().info("\n---------------------------------------------"
+ "\nsplit at stage 1; insert to right front at stage 0, 1, 0\n");
+ test.split(make_ghobj(3, 3, 3, "ns3", "oid3", 5, 5), onode2,
+ {1u, 0u, false, InsertType::BEGIN}).get0();
+ test.split(make_ghobj(3, 3, 3, "ns3", "oid4", 3, 3), onode2,
+ {1u, 1u, false, InsertType::BEGIN}).get0();
+ test.split(make_ghobj(3, 3, 3, "ns4", "oid4", 1, 1), onode2,
+ {1u, 0u, false, InsertType::BEGIN}).get0();
+
+ logger().info("\n---------------------------------------------"
+ "\nsplit at stage 1; insert to right middle at stage 0, 1, 2, 1, 0\n");
+ test.split(make_ghobj(3, 3, 3, "ns4", "oid4", 5, 5), onode2,
+ {1u, 0u, false, InsertType::MID}).get0();
+ test.split(make_ghobj(3, 3, 3, "ns5", "oid5", 3, 3), onode2,
+ {1u, 1u, false, InsertType::MID}).get0();
+ test.split(make_ghobj(3, 3, 4, "ns3", "oid3", 3, 3), onode2,
+ {1u, 2u, false, InsertType::MID}).get0();
+ test.split(make_ghobj(4, 4, 4, "ns1", "oid1", 3, 3), onode2,
+ {1u, 1u, false, InsertType::MID}).get0();
+ test.split(make_ghobj(4, 4, 4, "ns2", "oid2", 1, 1), onode2,
+ {1u, 0u, false, InsertType::MID}).get0();
+
+ auto& onode3 = test.create_onode(834);
+ logger().info("\n---------------------------------------------"
+ "\nsplit at stage 0; insert to right middle at stage 0, 1, 2, 1, 0\n");
+ test.split(make_ghobj(3, 3, 3, "ns4", "oid4", 5, 5), onode3,
+ {0u, 0u, false, InsertType::MID}).get0();
+ test.split(make_ghobj(3, 3, 3, "ns5", "oid5", 3, 3), onode3,
+ {0u, 1u, false, InsertType::MID}).get0();
+ test.split(make_ghobj(3, 3, 4, "ns3", "oid3", 3, 3), onode3,
+ {0u, 2u, false, InsertType::MID}).get0();
+ test.split(make_ghobj(4, 4, 4, "ns1", "oid1", 3, 3), onode3,
+ {0u, 1u, false, InsertType::MID}).get0();
+ test.split(make_ghobj(4, 4, 4, "ns2", "oid2", 1, 1), onode3,
+ {0u, 0u, false, InsertType::MID}).get0();
+
+ logger().info("\n---------------------------------------------"
+ "\nsplit at stage 0; insert to right front at stage 0\n");
+ test.split(make_ghobj(3, 3, 3, "ns4", "oid4", 2, 3), onode3,
+ {0u, 0u, false, InsertType::BEGIN}).get0();
+
+ auto& onode4 = test.create_onode(572);
+ logger().info("\n---------------------------------------------"
+ "\nsplit at stage 0; insert to left back at stage 0\n");
+ test.split(make_ghobj(3, 3, 3, "ns2", "oid2", 3, 4), onode4,
+ {0u, 0u, true, InsertType::LAST}).get0();
+ }
+
+ {
+ TestTree test;
+ test.build_tree({2, 4}, {2, 4}, {2, 4}, 232).get0();
+ auto& onode = test.create_onode(1996);
+ logger().info("\n---------------------------------------------"
+ "\nsplit at [0, 0, 0]; insert to left front at stage 2, 1, 0\n");
+ test.split(make_ghobj(1, 1, 1, "ns3", "oid3", 3, 3), onode,
+ {2u, 2u, true, InsertType::BEGIN}).get0();
+ EXPECT_TRUE(last_split.match_split_pos({0, {0, {0}}}));
+ test.split(make_ghobj(2, 2, 2, "ns1", "oid1", 3, 3), onode,
+ {2u, 1u, true, InsertType::BEGIN}).get0();
+ EXPECT_TRUE(last_split.match_split_pos({0, {0, {0}}}));
+ test.split(make_ghobj(2, 2, 2, "ns2", "oid2", 1, 1), onode,
+ {2u, 0u, true, InsertType::BEGIN}).get0();
+ EXPECT_TRUE(last_split.match_split_pos({0, {0, {0}}}));
+ }
+
+ {
+ TestTree test;
+ std::vector<ghobject_t> keys = {
+ make_ghobj(2, 2, 2, "ns3", "oid3", 3, 3),
+ make_ghobj(3, 3, 3, "ns3", "oid3", 3, 3)};
+ std::vector<const onode_t*> values = {
+ &test.create_onode(1360),
+ &test.create_onode(1632)};
+ test.build_tree(keys, values).get0();
+ auto& onode = test.create_onode(1640);
+ logger().info("\n---------------------------------------------"
+ "\nsplit at [END, END, END]; insert to right at stage 0, 1, 2\n");
+ test.split(make_ghobj(3, 3, 3, "ns3", "oid3", 4, 4), onode,
+ {0u, 0u, false, InsertType::BEGIN}).get0();
+ EXPECT_TRUE(last_split.match_split_pos({1, {0, {1}}}));
+ test.split(make_ghobj(3, 3, 3, "ns4", "oid4", 3, 3), onode,
+ {1u, 1u, false, InsertType::BEGIN}).get0();
+ EXPECT_TRUE(last_split.match_split_pos({1, {1, {0}}}));
+ test.split(make_ghobj(4, 4, 4, "ns3", "oid3", 3, 3), onode,
+ {2u, 2u, false, InsertType::BEGIN}).get0();
+ EXPECT_TRUE(last_split.match_split_pos({2, {0, {0}}}));
+ }
+ });
+}
+
+namespace crimson::os::seastore::onode {
+
+class DummyChildPool {
+ class DummyChildImpl final : public NodeImpl {
+ public:
+ using URef = std::unique_ptr<DummyChildImpl>;
+ DummyChildImpl(const std::set<ghobject_t>& keys, bool is_level_tail, laddr_t laddr)
+ : keys{keys}, _is_level_tail{is_level_tail}, _laddr{laddr} {
+ std::tie(key_view, p_mem_key_view) = build_key_view(*keys.crbegin());
+ }
+ ~DummyChildImpl() override {
+ std::free(p_mem_key_view);
+ }
+
+ const std::set<ghobject_t>& get_keys() const { return keys; }
+
+ void reset(const std::set<ghobject_t>& _keys, bool level_tail) {
+ keys = _keys;
+ _is_level_tail = level_tail;
+ std::free(p_mem_key_view);
+ std::tie(key_view, p_mem_key_view) = build_key_view(*keys.crbegin());
+ }
+
+ public:
+ laddr_t laddr() const override { return _laddr; }
+ bool is_level_tail() const override { return _is_level_tail; }
+
+ protected:
+ field_type_t field_type() const override { return field_type_t::N0; }
+ level_t level() const override { return 0u; }
+ key_view_t get_largest_key_view() const override { return key_view; }
+ void prepare_mutate(context_t) override {
+ ceph_abort("impossible path"); }
+ bool is_empty() const override {
+ ceph_abort("impossible path"); }
+ node_offset_t free_size() const override {
+ ceph_abort("impossible path"); }
+ key_view_t get_key_view(const search_position_t&) const override {
+ ceph_abort("impossible path"); }
+ void next_position(search_position_t&) const override {
+ ceph_abort("impossible path"); }
+ node_stats_t get_stats() const override {
+ ceph_abort("impossible path"); }
+ std::ostream& dump(std::ostream&) const override {
+ ceph_abort("impossible path"); }
+ std::ostream& dump_brief(std::ostream&) const override {
+ ceph_abort("impossible path"); }
+ void validate_layout() const override {
+ ceph_abort("impossible path"); }
+ void test_copy_to(NodeExtentMutable&) const override {
+ ceph_abort("impossible path"); }
+ void test_set_tail(NodeExtentMutable&) override {
+ ceph_abort("impossible path"); }
+
+ private:
+ std::set<ghobject_t> keys;
+ bool _is_level_tail;
+ laddr_t _laddr;
+
+ key_view_t key_view;
+ void* p_mem_key_view;
+ };
+
+ class DummyChild final : public Node {
+ public:
+ ~DummyChild() override = default;
+
+ node_future<> populate_split(
+ context_t c, std::set<Ref<DummyChild>>& splitable_nodes) {
+ ceph_assert(can_split());
+ ceph_assert(splitable_nodes.find(this) != splitable_nodes.end());
+
+ size_t index;
+ const auto& keys = impl->get_keys();
+ if (keys.size() == 2) {
+ index = 1;
+ } else {
+ index = rd() % (keys.size() - 2) + 1;
+ }
+ auto iter = keys.begin();
+ std::advance(iter, index);
+
+ std::set<ghobject_t> left_keys(keys.begin(), iter);
+ std::set<ghobject_t> right_keys(iter, keys.end());
+ bool right_is_tail = impl->is_level_tail();
+ impl->reset(left_keys, false);
+ auto right_child = DummyChild::create_new(right_keys, right_is_tail, pool);
+ if (!can_split()) {
+ splitable_nodes.erase(this);
+ }
+ if (right_child->can_split()) {
+ splitable_nodes.insert(right_child);
+ }
+ return insert_parent(c, right_child);
+ }
+
+ node_future<> insert_and_split(
+ context_t c, const ghobject_t& insert_key,
+ std::set<Ref<DummyChild>>& splitable_nodes) {
+ const auto& keys = impl->get_keys();
+ ceph_assert(keys.size() == 1);
+ auto& key = *keys.begin();
+ ceph_assert(insert_key < key);
+
+ std::set<ghobject_t> new_keys;
+ new_keys.insert(insert_key);
+ new_keys.insert(key);
+ impl->reset(new_keys, impl->is_level_tail());
+
+ splitable_nodes.clear();
+ splitable_nodes.insert(this);
+ auto fut = populate_split(c, splitable_nodes);
+ ceph_assert(splitable_nodes.size() == 0);
+ return fut;
+ }
+
+ bool match_pos(const search_position_t& pos) const {
+ ceph_assert(!is_root());
+ return pos == parent_info().position;
+ }
+
+ static Ref<DummyChild> create(
+ const std::set<ghobject_t>& keys, bool is_level_tail,
+ laddr_t addr, DummyChildPool& pool) {
+ auto ref_impl = std::make_unique<DummyChildImpl>(keys, is_level_tail, addr);
+ return new DummyChild(ref_impl.get(), std::move(ref_impl), pool);
+ }
+
+ static Ref<DummyChild> create_new(
+ const std::set<ghobject_t>& keys, bool is_level_tail, DummyChildPool& pool) {
+ static laddr_t seed = 0;
+ return create(keys, is_level_tail, seed++, pool);
+ }
+
+ static node_future<Ref<DummyChild>> create_initial(
+ context_t c, const std::set<ghobject_t>& keys,
+ DummyChildPool& pool, RootNodeTracker& root_tracker) {
+ auto initial = create_new(keys, true, pool);
+ return c.nm.get_super(c.t, root_tracker
+ ).safe_then([c, &pool, initial](auto super) {
+ initial->make_root_new(c, std::move(super));
+ return initial->upgrade_root(c).safe_then([initial] {
+ return initial;
+ });
+ });
+ }
+
+ protected:
+ node_future<> test_clone_non_root(
+ context_t, Ref<InternalNode> new_parent) const override {
+ ceph_assert(!is_root());
+ auto p_pool_clone = pool.pool_clone_in_progress;
+ ceph_assert(p_pool_clone != nullptr);
+ auto clone = create(
+ impl->get_keys(), impl->is_level_tail(), impl->laddr(), *p_pool_clone);
+ clone->as_child(parent_info().position, new_parent);
+ return node_ertr::now();
+ }
+ node_future<Ref<tree_cursor_t>> lookup_smallest(context_t) override {
+ ceph_abort("impossible path"); }
+ node_future<Ref<tree_cursor_t>> lookup_largest(context_t) override {
+ ceph_abort("impossible path"); }
+ node_future<> test_clone_root(context_t, RootNodeTracker&) const override {
+ ceph_abort("impossible path"); }
+ node_future<search_result_t> lower_bound_tracked(
+ context_t, const key_hobj_t&, MatchHistory&) override {
+ ceph_abort("impossible path"); }
+ node_future<> do_get_tree_stats(context_t, tree_stats_t&) override {
+ ceph_abort("impossible path"); }
+
+ private:
+ DummyChild(DummyChildImpl* impl, DummyChildImpl::URef&& ref, DummyChildPool& pool)
+ : Node(std::move(ref)), impl{impl}, pool{pool} {
+ pool.track_node(this);
+ }
+
+ bool can_split() const { return impl->get_keys().size() > 1; }
+
+ DummyChildImpl* impl;
+ DummyChildPool& pool;
+ mutable std::random_device rd;
+ };
+
+ public:
+ using node_ertr = Node::node_ertr;
+ template <class ValueT=void>
+ using node_future = Node::node_future<ValueT>;
+
+ DummyChildPool() = default;
+ ~DummyChildPool() { reset(); }
+
+ node_future<> build_tree(const std::set<ghobject_t>& keys) {
+ reset();
+
+ // create tree
+ auto ref_nm = NodeExtentManager::create_dummy(IS_DUMMY_SYNC);
+ p_nm = ref_nm.get();
+ p_btree.emplace(std::move(ref_nm));
+ return DummyChild::create_initial(get_context(), keys, *this, *p_btree->root_tracker
+ ).safe_then([this](auto initial_child) {
+ // split
+ splitable_nodes.insert(initial_child);
+ return crimson::do_until([this] {
+ if (splitable_nodes.empty()) {
+ return node_ertr::make_ready_future<bool>(true);
+ }
+ auto index = rd() % splitable_nodes.size();
+ auto iter = splitable_nodes.begin();
+ std::advance(iter, index);
+ Ref<DummyChild> child = *iter;
+ return child->populate_split(get_context(), splitable_nodes
+ ).safe_then([] {
+ return node_ertr::make_ready_future<bool>(false);
+ });
+ });
+ }).safe_then([this] {
+ //std::ostringstream oss;
+ //p_btree->dump(t(), oss);
+ //logger().info("\n{}\n", oss.str());
+ return p_btree->height(t());
+ }).safe_then([](auto height) {
+ ceph_assert(height == 2);
+ });
+ }
+
+ seastar::future<> test_split(ghobject_t key, search_position_t pos,
+ const split_expectation_t& expected) {
+ return seastar::async([this, key, pos, expected] {
+ logger().info("insert {} at {}:", key_hobj_t(key), pos);
+ DummyChildPool pool_clone;
+ pool_clone_in_progress = &pool_clone;
+ auto ref_nm = NodeExtentManager::create_dummy(IS_DUMMY_SYNC);
+ pool_clone.p_nm = ref_nm.get();
+ pool_clone.p_btree.emplace(std::move(ref_nm));
+ pool_clone.p_btree->test_clone_from(
+ pool_clone.t(), t(), *p_btree).unsafe_get0();
+ pool_clone_in_progress = nullptr;
+ auto node_to_split = pool_clone.get_node_by_pos(pos);
+ node_to_split->insert_and_split(
+ pool_clone.get_context(), key, pool_clone.splitable_nodes).unsafe_get0();
+ std::ostringstream oss;
+ pool_clone.p_btree->dump(pool_clone.t(), oss);
+ logger().info("dump new root:\n{}", oss.str());
+ EXPECT_EQ(pool_clone.p_btree->height(pool_clone.t()).unsafe_get0(), 3);
+ EXPECT_TRUE(last_split.match(expected));
+ });
+ }
+
+ private:
+ void reset() {
+ ceph_assert(pool_clone_in_progress == nullptr);
+ if (tracked_children.size()) {
+ ceph_assert(!p_btree->test_is_clean());
+ tracked_children.clear();
+ ceph_assert(p_btree->test_is_clean());
+ p_nm = nullptr;
+ p_btree.reset();
+ } else {
+ ceph_assert(!p_btree.has_value());
+ }
+ splitable_nodes.clear();
+ }
+
+ void track_node(Ref<DummyChild> node) {
+ ceph_assert(tracked_children.find(node) == tracked_children.end());
+ tracked_children.insert(node);
+ }
+
+ Ref<DummyChild> get_node_by_pos(const search_position_t& pos) const {
+ auto iter = std::find_if(
+ tracked_children.begin(), tracked_children.end(), [&pos](auto& child) {
+ return child->match_pos(pos);
+ });
+ ceph_assert(iter != tracked_children.end());
+ return *iter;
+ }
+
+ context_t get_context() {
+ ceph_assert(p_nm != nullptr);
+ return {*p_nm, t()};
+ }
+
+ Transaction& t() const { return *ref_t; }
+
+ std::set<Ref<DummyChild>> tracked_children;
+ std::optional<Btree> p_btree;
+ NodeExtentManager* p_nm = nullptr;
+ TransactionRef ref_t = make_transaction();
+
+ std::random_device rd;
+ std::set<Ref<DummyChild>> splitable_nodes;
+
+ DummyChildPool* pool_clone_in_progress = nullptr;
+};
+
+}
+
+TEST_F(c_dummy_test_t, 5_split_internal_node)
+{
+ run_async([this] {
+ DummyChildPool pool;
+ {
+ logger().info("\n---------------------------------------------"
+ "\nbefore internal node insert:\n");
+ auto padding = std::string(250, '_');
+ auto keys = build_key_set({2, 6}, {2, 5}, {2, 5}, padding, true);
+ keys.erase(make_ghobj(2, 2, 2, "ns2", "oid2" + padding, 2, 2));
+ keys.erase(make_ghobj(2, 2, 2, "ns2", "oid2" + padding, 3, 3));
+ keys.erase(make_ghobj(2, 2, 2, "ns2", "oid2" + padding, 4, 4));
+ keys.erase(make_ghobj(5, 5, 5, "ns4", "oid4" + padding, 2, 2));
+ keys.erase(make_ghobj(5, 5, 5, "ns4", "oid4" + padding, 3, 3));
+ keys.erase(make_ghobj(5, 5, 5, "ns4", "oid4" + padding, 4, 4));
+ auto padding_s = std::string(257, '_');
+ keys.insert(make_ghobj(2, 2, 2, "ns2", "oid2" + padding_s, 2, 2));
+ keys.insert(make_ghobj(2, 2, 2, "ns2", "oid2" + padding_s, 3, 3));
+ keys.insert(make_ghobj(2, 2, 2, "ns2", "oid2" + padding_s, 4, 4));
+ auto padding_e = std::string(248, '_');
+ keys.insert(make_ghobj(5, 5, 5, "ns4", "oid4" + padding_e, 2, 2));
+ keys.insert(make_ghobj(5, 5, 5, "ns4", "oid4" + padding_e, 3, 3));
+ keys.insert(make_ghobj(5, 5, 5, "ns4", "oid4" + padding_e, 4, 4));
+ pool.build_tree(keys).unsafe_get0();
+
+ logger().info("\n---------------------------------------------"
+ "\nsplit at stage 2; insert to right front at stage 0, 1, 2, 1, 0\n");
+ pool.test_split(make_ghobj(3, 3, 3, "ns4", "oid4" + padding, 5, 5), {2, {0, {0}}},
+ {2u, 0u, false, InsertType::BEGIN}).get();
+ pool.test_split(make_ghobj(3, 3, 3, "ns5", "oid5", 3, 3), {2, {0, {0}}},
+ {2u, 1u, false, InsertType::BEGIN}).get();
+ pool.test_split(make_ghobj(3, 4, 4, "ns3", "oid3", 3, 3), {2, {0, {0}}},
+ {2u, 2u, false, InsertType::BEGIN}).get();
+ pool.test_split(make_ghobj(4, 4, 4, "ns1", "oid1", 3, 3), {2, {0, {0}}},
+ {2u, 1u, false, InsertType::BEGIN}).get();
+ pool.test_split(make_ghobj(4, 4, 4, "ns2", "oid2" + padding, 1, 1), {2, {0, {0}}},
+ {2u, 0u, false, InsertType::BEGIN}).get();
+
+ logger().info("\n---------------------------------------------"
+ "\nsplit at stage 2; insert to right middle at stage 0, 1, 2, 1, 0\n");
+ pool.test_split(make_ghobj(4, 4, 4, "ns4", "oid4" + padding, 5, 5), {3, {0, {0}}},
+ {2u, 0u, false, InsertType::MID}).get();
+ pool.test_split(make_ghobj(4, 4, 4, "ns5", "oid5", 3, 3), {3, {0, {0}}},
+ {2u, 1u, false, InsertType::MID}).get();
+ pool.test_split(make_ghobj(4, 4, 5, "ns3", "oid3", 3, 3), {3, {0, {0}}},
+ {2u, 2u, false, InsertType::MID}).get();
+ pool.test_split(make_ghobj(5, 5, 5, "ns1", "oid1", 3, 3), {3, {0, {0}}},
+ {2u, 1u, false, InsertType::MID}).get();
+ pool.test_split(make_ghobj(5, 5, 5, "ns2", "oid2" + padding, 1, 1), {3, {0, {0}}},
+ {2u, 0u, false, InsertType::MID}).get();
+
+ logger().info("\n---------------------------------------------"
+ "\nsplit at stage 2; insert to right back at stage 0, 1, 2\n");
+ pool.test_split(make_ghobj(5, 5, 5, "ns4", "oid4" + padding_e, 5, 5), search_position_t::end(),
+ {2u, 0u, false, InsertType::LAST}).get();
+ pool.test_split(make_ghobj(5, 5, 5, "ns5", "oid5", 3, 3), search_position_t::end(),
+ {2u, 1u, false, InsertType::LAST}).get();
+ pool.test_split(make_ghobj(6, 6, 6, "ns3", "oid3", 3, 3), search_position_t::end(),
+ {2u, 2u, false, InsertType::LAST}).get();
+
+ logger().info("\n---------------------------------------------"
+ "\nsplit at stage 0; insert to left front at stage 2, 1, 0\n");
+ pool.test_split(make_ghobj(1, 1, 1, "ns3", "oid3", 3, 3), {0, {0, {0}}},
+ {0u, 2u, true, InsertType::BEGIN}).get();
+ pool.test_split(make_ghobj(2, 2, 2, "ns1", "oid1", 3, 3), {0, {0, {0}}},
+ {0u, 1u, true, InsertType::BEGIN}).get();
+ pool.test_split(make_ghobj(2, 2, 2, "ns2", "oid2" + padding_s, 1, 1), {0, {0, {0}}},
+ {0u, 0u, true, InsertType::BEGIN}).get();
+
+ logger().info("\n---------------------------------------------"
+ "\nsplit at stage 0; insert to left middle at stage 0, 1, 2, 1, 0\n");
+ pool.test_split(make_ghobj(2, 2, 2, "ns4", "oid4" + padding, 5, 5), {1, {0, {0}}},
+ {0u, 0u, true, InsertType::MID}).get();
+ pool.test_split(make_ghobj(2, 2, 2, "ns5", "oid5", 3, 3), {1, {0, {0}}},
+ {0u, 1u, true, InsertType::MID}).get();
+ pool.test_split(make_ghobj(2, 2, 3, "ns3", "oid3" + std::string(80, '_'), 3, 3), {1, {0, {0}}},
+ {0u, 2u, true, InsertType::MID}).get();
+ pool.test_split(make_ghobj(3, 3, 3, "ns1", "oid1", 3, 3), {1, {0, {0}}},
+ {0u, 1u, true, InsertType::MID}).get();
+ pool.test_split(make_ghobj(3, 3, 3, "ns2", "oid2" + padding, 1, 1), {1, {0, {0}}},
+ {0u, 0u, true, InsertType::MID}).get();
+
+ logger().info("\n---------------------------------------------"
+ "\nsplit at stage 0; insert to left back at stage 0\n");
+ pool.test_split(make_ghobj(3, 3, 3, "ns4", "oid4" + padding, 3, 4), {1, {2, {2}}},
+ {0u, 0u, true, InsertType::LAST}).get();
+ }
+
+ {
+ logger().info("\n---------------------------------------------"
+ "\nbefore internal node insert (1):\n");
+ auto padding = std::string(244, '_');
+ auto keys = build_key_set({2, 6}, {2, 5}, {2, 5}, padding, true);
+ keys.insert(make_ghobj(5, 5, 5, "ns4", "oid4" + padding, 5, 5));
+ keys.insert(make_ghobj(5, 5, 5, "ns4", "oid4" + padding, 6, 6));
+ keys.insert(make_ghobj(5, 5, 5, "ns4", "oid4" + padding, 7, 7));
+ pool.build_tree(keys).unsafe_get0();
+
+ logger().info("\n---------------------------------------------"
+ "\nsplit at stage 2; insert to left back at stage 0, 1, 2, 1\n");
+ pool.test_split(make_ghobj(3, 3, 3, "ns4", "oid4" + padding, 5, 5), {2, {0, {0}}},
+ {2u, 0u, true, InsertType::LAST}).get();
+ pool.test_split(make_ghobj(3, 3, 3, "ns5", "oid5", 3, 3), {2, {0, {0}}},
+ {2u, 1u, true, InsertType::LAST}).get();
+ pool.test_split(make_ghobj(3, 4, 4, "n", "o", 3, 3), {2, {0, {0}}},
+ {2u, 2u, true, InsertType::LAST}).get();
+ pool.test_split(make_ghobj(4, 4, 4, "n", "o", 3, 3), {2, {0, {0}}},
+ {2u, 1u, true, InsertType::LAST}).get();
+
+ logger().info("\n---------------------------------------------"
+ "\nsplit at stage 2; insert to left middle at stage 2\n");
+ pool.test_split(make_ghobj(2, 3, 3, "n", "o", 3, 3), {1, {0, {0}}},
+ {2u, 2u, true, InsertType::MID}).get();
+ }
+
+ {
+ logger().info("\n---------------------------------------------"
+ "\nbefore internal node insert (2):\n");
+ auto padding = std::string(243, '_');
+ auto keys = build_key_set({2, 6}, {2, 5}, {2, 5}, padding, true);
+ keys.insert(make_ghobj(4, 4, 4, "n", "o", 3, 3));
+ keys.insert(make_ghobj(5, 5, 5, "ns4", "oid4" + padding, 5, 5));
+ keys.insert(make_ghobj(5, 5, 5, "ns4", "oid4" + padding, 6, 6));
+ pool.build_tree(keys).unsafe_get0();
+
+ logger().info("\n---------------------------------------------"
+ "\nsplit at stage 2; insert to left back at stage (0, 1, 2, 1,) 0\n");
+ pool.test_split(make_ghobj(4, 4, 4, "n", "o", 2, 2), {2, {0, {0}}},
+ {2u, 0u, true, InsertType::LAST}).get();
+ }
+
+ {
+ logger().info("\n---------------------------------------------"
+ "\nbefore internal node insert (3):\n");
+ auto padding = std::string(420, '_');
+ auto keys = build_key_set({2, 5}, {2, 5}, {2, 5}, padding, true);
+ keys.erase(make_ghobj(4, 4, 4, "ns4", "oid4" + padding, 2, 2));
+ keys.erase(make_ghobj(4, 4, 4, "ns4", "oid4" + padding, 3, 3));
+ keys.erase(make_ghobj(4, 4, 4, "ns4", "oid4" + padding, 4, 4));
+ pool.build_tree(keys).unsafe_get0();
+
+ logger().info("\n---------------------------------------------"
+ "\nsplit at stage 1; insert to right front at stage 0, 1, 0\n");
+ pool.test_split(make_ghobj(3, 3, 3, "ns2", "oid2" + padding, 5, 5), {1, {1, {0}}},
+ {1u, 0u, false, InsertType::BEGIN}).get();
+ pool.test_split(make_ghobj(3, 3, 3, "ns2", "oid3", 3, 3), {1, {1, {0}}},
+ {1u, 1u, false, InsertType::BEGIN}).get();
+ pool.test_split(make_ghobj(3, 3, 3, "ns3", "oid3" + padding, 1, 1), {1, {1, {0}}},
+ {1u, 0u, false, InsertType::BEGIN}).get();
+ }
+
+ {
+ logger().info("\n---------------------------------------------"
+ "\nbefore internal node insert (4):\n");
+ auto padding = std::string(361, '_');
+ auto keys = build_key_set({2, 5}, {2, 5}, {2, 5}, padding, true);
+ keys.erase(make_ghobj(2, 2, 2, "ns2", "oid2" + padding, 2, 2));
+ keys.erase(make_ghobj(2, 2, 2, "ns2", "oid2" + padding, 3, 3));
+ keys.erase(make_ghobj(2, 2, 2, "ns2", "oid2" + padding, 4, 4));
+ auto padding_s = std::string(387, '_');
+ keys.insert(make_ghobj(2, 2, 2, "ns2", "oid2" + padding_s, 2, 2));
+ keys.insert(make_ghobj(2, 2, 2, "ns2", "oid2" + padding_s, 3, 3));
+ keys.insert(make_ghobj(2, 2, 2, "ns2", "oid2" + padding_s, 4, 4));
+ pool.build_tree(keys).unsafe_get0();
+
+ logger().info("\n---------------------------------------------"
+ "\nsplit at stage 1; insert to left back at stage 0, 1\n");
+ pool.test_split(make_ghobj(3, 3, 3, "ns2", "oid2" + padding, 5, 5), {1, {1, {0}}},
+ {1u, 0u, true, InsertType::LAST}).get();
+ pool.test_split(make_ghobj(3, 3, 3, "ns2", "oid3", 3, 3), {1, {1, {0}}},
+ {1u, 1u, true, InsertType::LAST}).get();
+ }
+
+ {
+ logger().info("\n---------------------------------------------"
+ "\nbefore internal node insert (5):\n");
+ auto padding = std::string(412, '_');
+ auto keys = build_key_set({2, 5}, {2, 5}, {2, 5}, padding);
+ keys.insert(make_ghobj(3, 3, 3, "ns2", "oid3", 3, 3));
+ keys.insert(make_ghobj(4, 4, 4, "ns3", "oid3" + padding, 5, 5));
+ keys.insert(make_ghobj(9, 9, 9, "ns~last", "oid~last", 9, 9));
+ keys.erase(make_ghobj(4, 4, 4, "ns4", "oid4" + padding, 2, 2));
+ keys.erase(make_ghobj(4, 4, 4, "ns4", "oid4" + padding, 3, 3));
+ keys.erase(make_ghobj(4, 4, 4, "ns4", "oid4" + padding, 4, 4));
+ pool.build_tree(keys).unsafe_get0();
+
+ logger().info("\n---------------------------------------------"
+ "\nsplit at stage 1; insert to left back at stage (0, 1,) 0\n");
+ pool.test_split(make_ghobj(3, 3, 3, "ns2", "oid3", 2, 2), {1, {1, {0}}},
+ {1u, 0u, true, InsertType::LAST}).get();
+ }
+
+ {
+ logger().info("\n---------------------------------------------"
+ "\nbefore internal node insert (6):\n");
+ auto padding = std::string(328, '_');
+ auto keys = build_key_set({2, 5}, {2, 5}, {2, 5}, padding);
+ keys.insert(make_ghobj(5, 5, 5, "ns3", "oid3" + std::string(271, '_'), 3, 3));
+ keys.insert(make_ghobj(9, 9, 9, "ns~last", "oid~last", 9, 9));
+ pool.build_tree(keys).unsafe_get0();
+
+ logger().info("\n---------------------------------------------"
+ "\nsplit at stage 0; insert to right front at stage 0\n");
+ pool.test_split(make_ghobj(3, 3, 3, "ns3", "oid3" + padding, 2, 3), {1, {1, {1}}},
+ {0u, 0u, false, InsertType::BEGIN}).get();
+ }
+
+ // Impossible to split at {0, 0, 0}
+ // Impossible to split at [END, END, END]
+ });
+}
+
+struct d_seastore_tm_test_t :
+ public seastar_test_suite_t, TMTestState {
+ seastar::future<> set_up_fut() override final {
+ return tm_setup();
+ }
+ seastar::future<> tear_down_fut() override final {
+ return tm_teardown();
+ }
+};
+
+TEST_F(d_seastore_tm_test_t, 6_random_insert_leaf_node)
+{
+ run_async([this] {
+ constexpr bool TEST_SEASTORE = true;
+ constexpr bool TRACK_CURSORS = true;
+ KVPool kvs{{8, 11, 64, 256, 301, 320},
+ {8, 16, 128, 512, 576, 640},
+ {0, 32}, {0, 10}, {0, 4}};
+ auto tree = std::make_unique<TreeBuilder<TRACK_CURSORS>>(kvs,
+ (TEST_SEASTORE ? NodeExtentManager::create_seastore(*tm)
+ : NodeExtentManager::create_dummy(IS_DUMMY_SYNC)));
+ {
+ auto t = tm->create_transaction();
+ tree->bootstrap(*t).unsafe_get();
+ tm->submit_transaction(std::move(t)).unsafe_get();
+ }
+ {
+ auto t = tm->create_transaction();
+ tree->insert(*t).unsafe_get();
+ tm->submit_transaction(std::move(t)).unsafe_get();
+ }
+ {
+ auto t = tm->create_transaction();
+ tree->get_stats(*t).unsafe_get();
+ tm->submit_transaction(std::move(t)).unsafe_get();
+ }
+ if constexpr (TEST_SEASTORE) {
+ logger().info("seastore replay begin");
+ restart();
+ tree->reload(NodeExtentManager::create_seastore(*tm));
+ logger().info("seastore replay end");
+ }
+ {
+ // Note: tm->create_weak_transaction() can also work, but too slow.
+ auto t = tm->create_transaction();
+ tree->validate(*t).unsafe_get();
+ }
+ tree.reset();
+ });
+}
diff --git a/src/test/crimson/seastore/test_block.cc b/src/test/crimson/seastore/test_block.cc
new file mode 100644
index 000000000..f3d6531bd
--- /dev/null
+++ b/src/test/crimson/seastore/test_block.cc
@@ -0,0 +1,25 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#include "test/crimson/seastore/test_block.h"
+
+namespace crimson::os::seastore {
+
+
+ceph::bufferlist TestBlock::get_delta() {
+ ceph::bufferlist bl;
+ encode(delta, bl);
+ return bl;
+}
+
+
+void TestBlock::apply_delta(const ceph::bufferlist &bl) {
+ auto biter = bl.begin();
+ decltype(delta) deltas;
+ decode(deltas, biter);
+ for (auto &&d : deltas) {
+ set_contents(d.val, d.offset, d.len);
+ }
+}
+
+}
diff --git a/src/test/crimson/seastore/test_block.h b/src/test/crimson/seastore/test_block.h
new file mode 100644
index 000000000..44ec65a23
--- /dev/null
+++ b/src/test/crimson/seastore/test_block.h
@@ -0,0 +1,147 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#pragma once
+
+#include <random>
+
+#include "crimson/os/seastore/transaction_manager.h"
+
+namespace crimson::os::seastore {
+
+struct test_extent_desc_t {
+ size_t len = 0;
+ unsigned checksum = 0;
+
+ bool operator==(const test_extent_desc_t &rhs) const {
+ return (len == rhs.len &&
+ checksum == rhs.checksum);
+ }
+ bool operator!=(const test_extent_desc_t &rhs) const {
+ return !(*this == rhs);
+ }
+};
+
+struct test_block_delta_t {
+ int8_t val = 0;
+ uint16_t offset = 0;
+ uint16_t len = 0;
+
+
+ DENC(test_block_delta_t, v, p) {
+ DENC_START(1, 1, p);
+ denc(v.val, p);
+ denc(v.offset, p);
+ denc(v.len, p);
+ DENC_FINISH(p);
+ }
+};
+
+inline std::ostream &operator<<(
+ std::ostream &lhs, const test_extent_desc_t &rhs) {
+ return lhs << "test_extent_desc_t(len=" << rhs.len
+ << ", checksum=" << rhs.checksum << ")";
+}
+
+struct TestBlock : crimson::os::seastore::LogicalCachedExtent {
+ constexpr static segment_off_t SIZE = 4<<10;
+ using Ref = TCachedExtentRef<TestBlock>;
+
+ std::vector<test_block_delta_t> delta = {};
+
+ TestBlock(ceph::bufferptr &&ptr)
+ : LogicalCachedExtent(std::move(ptr)) {}
+ TestBlock(const TestBlock &other)
+ : LogicalCachedExtent(other) {}
+
+ CachedExtentRef duplicate_for_write() final {
+ return CachedExtentRef(new TestBlock(*this));
+ };
+
+ static constexpr extent_types_t TYPE = extent_types_t::TEST_BLOCK;
+ extent_types_t get_type() const final {
+ return TYPE;
+ }
+
+ ceph::bufferlist get_delta() final;
+
+ void set_contents(char c, uint16_t offset, uint16_t len) {
+ ::memset(get_bptr().c_str() + offset, c, len);
+ delta.push_back({c, offset, len});
+ }
+
+ void set_contents(char c) {
+ set_contents(c, 0, get_length());
+ }
+
+ test_extent_desc_t get_desc() {
+ return { get_length(), get_crc32c() };
+ }
+
+ void apply_delta(const ceph::bufferlist &bl) final;
+};
+using TestBlockRef = TCachedExtentRef<TestBlock>;
+
+struct TestBlockPhysical : crimson::os::seastore::CachedExtent{
+ constexpr static segment_off_t SIZE = 4<<10;
+ using Ref = TCachedExtentRef<TestBlockPhysical>;
+
+ std::vector<test_block_delta_t> delta = {};
+
+ TestBlockPhysical(ceph::bufferptr &&ptr)
+ : CachedExtent(std::move(ptr)) {}
+ TestBlockPhysical(const TestBlock &other)
+ : CachedExtent(other) {}
+
+ CachedExtentRef duplicate_for_write() final {
+ return CachedExtentRef(new TestBlockPhysical(*this));
+ };
+
+ static constexpr extent_types_t TYPE = extent_types_t::TEST_BLOCK_PHYSICAL;
+ extent_types_t get_type() const final {
+ return TYPE;
+ }
+
+ void set_contents(char c, uint16_t offset, uint16_t len) {
+ ::memset(get_bptr().c_str() + offset, c, len);
+ }
+
+ void set_contents(char c) {
+ set_contents(c, 0, get_length());
+ }
+
+ ceph::bufferlist get_delta() final { return ceph::bufferlist(); }
+
+ void apply_delta_and_adjust_crc(paddr_t, const ceph::bufferlist &bl) final {}
+};
+using TestBlockPhysicalRef = TCachedExtentRef<TestBlockPhysical>;
+
+struct test_block_mutator_t {
+ std::uniform_int_distribution<int8_t>
+ contents_distribution = std::uniform_int_distribution<int8_t>(
+ std::numeric_limits<int8_t>::min(),
+ std::numeric_limits<int8_t>::max());
+
+ std::uniform_int_distribution<uint16_t>
+ offset_distribution = std::uniform_int_distribution<uint16_t>(
+ 0, TestBlock::SIZE - 1);
+
+ std::uniform_int_distribution<uint16_t> length_distribution(uint16_t offset) {
+ return std::uniform_int_distribution<uint16_t>(
+ 0, TestBlock::SIZE - offset - 1);
+ }
+
+
+ template <typename generator_t>
+ void mutate(TestBlock &block, generator_t &gen) {
+ auto offset = offset_distribution(gen);
+ block.set_contents(
+ contents_distribution(gen),
+ offset,
+ length_distribution(offset)(gen));
+ }
+};
+
+}
+
+WRITE_CLASS_DENC_BOUNDED(crimson::os::seastore::test_block_delta_t)
diff --git a/src/test/crimson/seastore/test_btree_lba_manager.cc b/src/test/crimson/seastore/test_btree_lba_manager.cc
new file mode 100644
index 000000000..60d5c3497
--- /dev/null
+++ b/src/test/crimson/seastore/test_btree_lba_manager.cc
@@ -0,0 +1,429 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#include "test/crimson/gtest_seastar.h"
+
+#include "crimson/common/log.h"
+
+#include "crimson/os/seastore/journal.h"
+#include "crimson/os/seastore/cache.h"
+#include "crimson/os/seastore/segment_manager/ephemeral.h"
+#include "crimson/os/seastore/lba_manager/btree/btree_lba_manager.h"
+
+#include "test/crimson/seastore/test_block.h"
+
+namespace {
+ [[maybe_unused]] seastar::logger& logger() {
+ return crimson::get_logger(ceph_subsys_test);
+ }
+}
+
+using namespace crimson;
+using namespace crimson::os;
+using namespace crimson::os::seastore;
+using namespace crimson::os::seastore::lba_manager;
+using namespace crimson::os::seastore::lba_manager::btree;
+
+struct btree_lba_manager_test :
+ public seastar_test_suite_t, JournalSegmentProvider {
+ segment_manager::EphemeralSegmentManagerRef segment_manager;
+ Journal journal;
+ Cache cache;
+ BtreeLBAManagerRef lba_manager;
+
+ const size_t block_size;
+
+ btree_lba_manager_test()
+ : segment_manager(segment_manager::create_test_ephemeral()),
+ journal(*segment_manager),
+ cache(*segment_manager),
+ lba_manager(new BtreeLBAManager(*segment_manager, cache)),
+ block_size(segment_manager->get_block_size())
+ {
+ journal.set_segment_provider(this);
+ }
+
+ segment_id_t next = 0;
+ get_segment_ret get_segment() final {
+ return get_segment_ret(
+ get_segment_ertr::ready_future_marker{},
+ next++);
+ }
+
+ journal_seq_t get_journal_tail_target() const final { return journal_seq_t{}; }
+ void update_journal_tail_committed(journal_seq_t committed) final {}
+
+ auto submit_transaction(TransactionRef t)
+ {
+ auto record = cache.try_construct_record(*t);
+ if (!record) {
+ ceph_assert(0 == "cannot fail");
+ }
+
+ return journal.submit_record(std::move(*record)).safe_then(
+ [this, t=std::move(t)](auto p) mutable {
+ auto [addr, seq] = p;
+ cache.complete_commit(*t, addr, seq);
+ lba_manager->complete_transaction(*t);
+ },
+ crimson::ct_error::assert_all{});
+ }
+
+ seastar::future<> set_up_fut() final {
+ return segment_manager->init(
+ ).safe_then([this] {
+ return journal.open_for_write();
+ }).safe_then([this](auto addr) {
+ return seastar::do_with(
+ make_transaction(),
+ [this](auto &transaction) {
+ cache.init();
+ return cache.mkfs(*transaction
+ ).safe_then([this, &transaction] {
+ return lba_manager->mkfs(*transaction);
+ }).safe_then([this, &transaction] {
+ return submit_transaction(std::move(transaction));
+ });
+ });
+ }).handle_error(
+ crimson::ct_error::all_same_way([] {
+ ceph_assert(0 == "error");
+ })
+ );
+ }
+
+ seastar::future<> tear_down_fut() final {
+ return cache.close(
+ ).safe_then([this] {
+ return journal.close();
+ }).handle_error(
+ crimson::ct_error::all_same_way([] {
+ ASSERT_FALSE("Unable to close");
+ })
+ );
+ }
+
+
+ struct test_extent_t {
+ paddr_t addr;
+ size_t len = 0;
+ unsigned refcount = 0;
+ };
+ using test_lba_mapping_t = std::map<laddr_t, test_extent_t>;
+ test_lba_mapping_t test_lba_mappings;
+ struct test_transaction_t {
+ TransactionRef t;
+ test_lba_mapping_t mappings;
+ };
+
+ auto create_transaction() {
+ auto t = test_transaction_t{
+ make_transaction(),
+ test_lba_mappings
+ };
+ cache.alloc_new_extent<TestBlockPhysical>(*t.t, TestBlockPhysical::SIZE);
+ return t;
+ }
+
+ auto create_weak_transaction() {
+ auto t = test_transaction_t{
+ make_weak_transaction(),
+ test_lba_mappings
+ };
+ return t;
+ }
+
+ void submit_test_transaction(test_transaction_t t) {
+ submit_transaction(std::move(t.t)).get0();
+ test_lba_mappings.swap(t.mappings);
+ }
+
+ auto get_overlap(test_transaction_t &t, laddr_t addr, size_t len) {
+ auto bottom = t.mappings.upper_bound(addr);
+ if (bottom != t.mappings.begin())
+ --bottom;
+ if (bottom != t.mappings.end() &&
+ bottom->first + bottom->second.len <= addr)
+ ++bottom;
+
+ auto top = t.mappings.lower_bound(addr + len);
+ return std::make_pair(
+ bottom,
+ top
+ );
+ }
+
+ segment_off_t next_off = 0;
+ paddr_t get_paddr() {
+ next_off += block_size;
+ return make_fake_paddr(next_off);
+ }
+
+ auto alloc_mapping(
+ test_transaction_t &t,
+ laddr_t hint,
+ size_t len,
+ paddr_t paddr) {
+ auto ret = lba_manager->alloc_extent(*t.t, hint, len, paddr).unsafe_get0();
+ logger().debug("alloc'd: {}", *ret);
+ EXPECT_EQ(len, ret->get_length());
+ auto [b, e] = get_overlap(t, ret->get_laddr(), len);
+ EXPECT_EQ(b, e);
+ t.mappings.emplace(
+ std::make_pair(
+ ret->get_laddr(),
+ test_extent_t{
+ ret->get_paddr(),
+ ret->get_length(),
+ 1
+ }
+ ));
+ return ret;
+ }
+
+ auto set_mapping(
+ test_transaction_t &t,
+ laddr_t addr,
+ size_t len,
+ paddr_t paddr) {
+ auto [b, e] = get_overlap(t, addr, len);
+ EXPECT_EQ(b, e);
+
+ auto ret = lba_manager->set_extent(*t.t, addr, len, paddr).unsafe_get0();
+ EXPECT_EQ(addr, ret->get_laddr());
+ EXPECT_EQ(len, ret->get_length());
+ EXPECT_EQ(paddr, ret->get_paddr());
+ t.mappings.emplace(
+ std::make_pair(
+ ret->get_laddr(),
+ test_extent_t{
+ ret->get_paddr(),
+ ret->get_length(),
+ 1
+ }
+ ));
+ return ret;
+ }
+
+ auto decref_mapping(
+ test_transaction_t &t,
+ laddr_t addr) {
+ return decref_mapping(t, t.mappings.find(addr));
+ }
+
+ void decref_mapping(
+ test_transaction_t &t,
+ test_lba_mapping_t::iterator target) {
+ ceph_assert(target != t.mappings.end());
+ ceph_assert(target->second.refcount > 0);
+ target->second.refcount--;
+
+ auto refcnt = lba_manager->decref_extent(
+ *t.t,
+ target->first).unsafe_get0().refcount;
+ EXPECT_EQ(refcnt, target->second.refcount);
+ if (target->second.refcount == 0) {
+ t.mappings.erase(target);
+ }
+ }
+
+ auto incref_mapping(
+ test_transaction_t &t,
+ laddr_t addr) {
+ return incref_mapping(t, t.mappings.find(addr));
+ }
+
+ void incref_mapping(
+ test_transaction_t &t,
+ test_lba_mapping_t::iterator target) {
+ ceph_assert(target->second.refcount > 0);
+ target->second.refcount++;
+ auto refcnt = lba_manager->incref_extent(
+ *t.t,
+ target->first).unsafe_get0().refcount;
+ EXPECT_EQ(refcnt, target->second.refcount);
+ }
+
+ std::vector<laddr_t> get_mapped_addresses() {
+ std::vector<laddr_t> addresses;
+ addresses.reserve(test_lba_mappings.size());
+ for (auto &i: test_lba_mappings) {
+ addresses.push_back(i.first);
+ }
+ return addresses;
+ }
+
+ std::vector<laddr_t> get_mapped_addresses(test_transaction_t &t) {
+ std::vector<laddr_t> addresses;
+ addresses.reserve(t.mappings.size());
+ for (auto &i: t.mappings) {
+ addresses.push_back(i.first);
+ }
+ return addresses;
+ }
+
+ void check_mappings() {
+ auto t = create_transaction();
+ check_mappings(t);
+ }
+
+ void check_mappings(test_transaction_t &t) {
+ for (auto &&i: t.mappings) {
+ auto ret_list = lba_manager->get_mapping(
+ *t.t, i.first, i.second.len
+ ).unsafe_get0();
+ EXPECT_EQ(ret_list.size(), 1);
+ auto &ret = *ret_list.begin();
+ EXPECT_EQ(i.second.addr, ret->get_paddr());
+ EXPECT_EQ(i.first, ret->get_laddr());
+ EXPECT_EQ(i.second.len, ret->get_length());
+ }
+ lba_manager->scan_mappings(
+ *t.t,
+ 0,
+ L_ADDR_MAX,
+ [iter=t.mappings.begin(), &t](auto l, auto p, auto len) mutable {
+ EXPECT_NE(iter, t.mappings.end());
+ EXPECT_EQ(l, iter->first);
+ EXPECT_EQ(p, iter->second.addr);
+ EXPECT_EQ(len, iter->second.len);
+ ++iter;
+ }).unsafe_get();
+ }
+};
+
+TEST_F(btree_lba_manager_test, basic)
+{
+ run_async([this] {
+ laddr_t laddr = 0x12345678 * block_size;
+ {
+ // write initial mapping
+ auto t = create_transaction();
+ check_mappings(t); // check in progress transaction sees mapping
+ check_mappings(); // check concurrent does not
+ auto ret = alloc_mapping(t, laddr, block_size, get_paddr());
+ submit_test_transaction(std::move(t));
+ }
+ check_mappings(); // check new transaction post commit sees it
+ });
+}
+
+TEST_F(btree_lba_manager_test, force_split)
+{
+ run_async([this] {
+ for (unsigned i = 0; i < 40; ++i) {
+ auto t = create_transaction();
+ logger().debug("opened transaction");
+ for (unsigned j = 0; j < 5; ++j) {
+ auto ret = alloc_mapping(t, 0, block_size, get_paddr());
+ if ((i % 10 == 0) && (j == 3)) {
+ check_mappings(t);
+ check_mappings();
+ }
+ }
+ logger().debug("submitting transaction");
+ submit_test_transaction(std::move(t));
+ check_mappings();
+ }
+ });
+}
+
+TEST_F(btree_lba_manager_test, force_split_merge)
+{
+ run_async([this] {
+ for (unsigned i = 0; i < 80; ++i) {
+ auto t = create_transaction();
+ logger().debug("opened transaction");
+ for (unsigned j = 0; j < 5; ++j) {
+ auto ret = alloc_mapping(t, 0, block_size, get_paddr());
+ // just to speed things up a bit
+ if ((i % 100 == 0) && (j == 3)) {
+ check_mappings(t);
+ check_mappings();
+ }
+ incref_mapping(t, ret->get_laddr());
+ decref_mapping(t, ret->get_laddr());
+ }
+ logger().debug("submitting transaction");
+ submit_test_transaction(std::move(t));
+ if (i % 50 == 0) {
+ check_mappings();
+ }
+ }
+ {
+ auto addresses = get_mapped_addresses();
+ auto t = create_transaction();
+ for (unsigned i = 0; i != addresses.size(); ++i) {
+ if (i % 2 == 0) {
+ incref_mapping(t, addresses[i]);
+ decref_mapping(t, addresses[i]);
+ decref_mapping(t, addresses[i]);
+ }
+ logger().debug("submitting transaction");
+ if (i % 7 == 0) {
+ submit_test_transaction(std::move(t));
+ t = create_transaction();
+ }
+ if (i % 13 == 0) {
+ check_mappings();
+ check_mappings(t);
+ }
+ }
+ submit_test_transaction(std::move(t));
+ }
+ {
+ auto addresses = get_mapped_addresses();
+ auto t = create_transaction();
+ for (unsigned i = 0; i != addresses.size(); ++i) {
+ incref_mapping(t, addresses[i]);
+ decref_mapping(t, addresses[i]);
+ decref_mapping(t, addresses[i]);
+ }
+ check_mappings(t);
+ submit_test_transaction(std::move(t));
+ check_mappings();
+ }
+ });
+}
+
+TEST_F(btree_lba_manager_test, single_transaction_split_merge)
+{
+ run_async([this] {
+ {
+ auto t = create_transaction();
+ for (unsigned i = 0; i < 600; ++i) {
+ alloc_mapping(t, 0, block_size, get_paddr());
+ }
+ check_mappings(t);
+ submit_test_transaction(std::move(t));
+ }
+ check_mappings();
+
+ {
+ auto addresses = get_mapped_addresses();
+ auto t = create_transaction();
+ for (unsigned i = 0; i != addresses.size(); ++i) {
+ if (i % 4 != 0) {
+ decref_mapping(t, addresses[i]);
+ }
+ }
+ check_mappings(t);
+ submit_test_transaction(std::move(t));
+ }
+ check_mappings();
+
+ {
+ auto t = create_transaction();
+ for (unsigned i = 0; i < 600; ++i) {
+ alloc_mapping(t, 0, block_size, get_paddr());
+ }
+ auto addresses = get_mapped_addresses(t);
+ for (unsigned i = 0; i != addresses.size(); ++i) {
+ decref_mapping(t, addresses[i]);
+ }
+ check_mappings(t);
+ submit_test_transaction(std::move(t));
+ }
+ check_mappings();
+ });
+}
diff --git a/src/test/crimson/seastore/test_extmap_manager.cc b/src/test/crimson/seastore/test_extmap_manager.cc
new file mode 100644
index 000000000..8b2588011
--- /dev/null
+++ b/src/test/crimson/seastore/test_extmap_manager.cc
@@ -0,0 +1,283 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#include "test/crimson/gtest_seastar.h"
+#include "test/crimson/seastore/transaction_manager_test_state.h"
+
+#include "crimson/os/seastore/cache.h"
+#include "crimson/os/seastore/transaction_manager.h"
+#include "crimson/os/seastore/segment_manager.h"
+#include "crimson/os/seastore/extentmap_manager.h"
+
+#include "test/crimson/seastore/test_block.h"
+
+using namespace crimson;
+using namespace crimson::os;
+using namespace crimson::os::seastore;
+
+namespace {
+ [[maybe_unused]] seastar::logger& logger() {
+ return crimson::get_logger(ceph_subsys_test);
+ }
+}
+
+
+struct extentmap_manager_test_t :
+ public seastar_test_suite_t,
+ TMTestState {
+
+ ExtentMapManagerRef extmap_manager;
+
+ extentmap_manager_test_t() {}
+
+ seastar::future<> set_up_fut() final {
+ return tm_setup().then([this] {
+ extmap_manager = extentmap_manager::create_extentmap_manager(*tm);
+ return seastar::now();
+ });
+ }
+
+ seastar::future<> tear_down_fut() final {
+ return tm_teardown().then([this] {
+ extmap_manager.reset();
+ return seastar::now();
+ });
+ }
+
+ using test_extmap_t = std::map<uint32_t, lext_map_val_t>;
+ test_extmap_t test_ext_mappings;
+
+ extent_mapping_t insert_extent(
+ extmap_root_t &extmap_root,
+ Transaction &t,
+ uint32_t lo,
+ lext_map_val_t val) {
+ auto extent = extmap_manager->add_lextent(extmap_root, t, lo, val).unsafe_get0();
+ EXPECT_EQ(lo, extent.logical_offset);
+ EXPECT_EQ(val.laddr, extent.laddr);
+ EXPECT_EQ(val.length, extent.length);
+ test_ext_mappings.emplace(extent.logical_offset,
+ lext_map_val_t{extent.laddr, extent.length});
+ return extent;
+ }
+
+ extent_map_list_t find_extent(
+ extmap_root_t &extmap_root,
+ Transaction &t,
+ uint32_t lo,
+ uint32_t len) {
+ auto extent = extmap_manager->find_lextent(extmap_root, t, lo, len).unsafe_get0();
+ EXPECT_EQ(lo, extent.front().logical_offset);
+ EXPECT_EQ(len, extent.front().length);
+ return extent;
+ }
+
+ extent_map_list_t findno_extent(
+ extmap_root_t &extmap_root,
+ Transaction &t,
+ uint32_t lo,
+ uint32_t len) {
+ auto extent = extmap_manager->find_lextent(extmap_root, t, lo, len).unsafe_get0();
+ EXPECT_EQ(extent.empty(), true);
+ return extent;
+ }
+
+ void rm_extent(
+ extmap_root_t &extmap_root,
+ Transaction &t,
+ uint32_t lo,
+ lext_map_val_t val ) {
+ auto ret = extmap_manager->rm_lextent(extmap_root, t, lo, val).unsafe_get0();
+ EXPECT_TRUE(ret);
+ test_ext_mappings.erase(lo);
+ }
+
+ void check_mappings(extmap_root_t &extmap_root, Transaction &t) {
+ for (const auto& [lo, ext]: test_ext_mappings){
+ const auto ext_list = find_extent(extmap_root, t, lo, ext.length);
+ ASSERT_EQ(ext_list.size(), 1);
+ const auto& ext_map = ext_list.front();
+ EXPECT_EQ(ext.laddr, ext_map.laddr);
+ EXPECT_EQ(ext.length, ext_map.length);
+ }
+ }
+
+ void check_mappings(extmap_root_t &extmap_root) {
+ auto t = tm->create_transaction();
+ check_mappings(extmap_root, *t);
+ }
+
+ void replay() {
+ logger().debug("{}: begin", __func__);
+ restart();
+ extmap_manager = extentmap_manager::create_extentmap_manager(*tm);
+ logger().debug("{}: end", __func__);
+ }
+
+
+};
+
+TEST_F(extentmap_manager_test_t, basic)
+{
+ run_async([this] {
+ extmap_root_t extmap_root(0, L_ADDR_NULL);
+ {
+ auto t = tm->create_transaction();
+ extmap_root = extmap_manager->initialize_extmap(*t).unsafe_get0();
+ tm->submit_transaction(std::move(t)).unsafe_get();
+ }
+
+ uint32_t len = 4096;
+ uint32_t lo = 0x1 * len;
+ {
+ auto t = tm->create_transaction();
+ logger().debug("first transaction");
+ [[maybe_unused]] auto addref = insert_extent(extmap_root, *t, lo, {lo, len});
+ [[maybe_unused]] auto seekref = find_extent(extmap_root, *t, lo, len);
+ tm->submit_transaction(std::move(t)).unsafe_get();
+ }
+ {
+ auto t = tm->create_transaction();
+ logger().debug("second transaction");
+ auto seekref = find_extent(extmap_root, *t, lo, len);
+ rm_extent(extmap_root, *t, lo, {seekref.front().laddr, len});
+ [[maybe_unused]] auto seekref2 = findno_extent(extmap_root, *t, lo, len);
+ tm->submit_transaction(std::move(t)).unsafe_get();
+ }
+ {
+ auto t = tm->create_transaction();
+ logger().debug("third transaction");
+ [[maybe_unused]] auto seekref = findno_extent(extmap_root, *t, lo, len);
+ tm->submit_transaction(std::move(t)).unsafe_get();
+ }
+ });
+}
+
+TEST_F(extentmap_manager_test_t, force_leafnode_split)
+{
+ run_async([this] {
+ extmap_root_t extmap_root(0, L_ADDR_NULL);
+ {
+ auto t = tm->create_transaction();
+ extmap_root = extmap_manager->initialize_extmap(*t).unsafe_get0();
+ tm->submit_transaction(std::move(t)).unsafe_get();
+ }
+ uint32_t len = 4096;
+ uint32_t lo = 0;
+ for (unsigned i = 0; i < 40; i++) {
+ auto t = tm->create_transaction();
+ logger().debug("opened transaction");
+ for (unsigned j = 0; j < 10; ++j) {
+ [[maybe_unused]] auto addref = insert_extent(extmap_root, *t, lo, {lo, len});
+ lo += len;
+ if ((i % 20 == 0) && (j == 5)) {
+ check_mappings(extmap_root, *t);
+ }
+ }
+ logger().debug("force split submit transaction i = {}", i);
+ tm->submit_transaction(std::move(t)).unsafe_get();
+ check_mappings(extmap_root);
+ }
+ });
+
+}
+
+TEST_F(extentmap_manager_test_t, force_leafnode_split_merge)
+{
+ run_async([this] {
+ extmap_root_t extmap_root(0, L_ADDR_NULL);
+ {
+ auto t = tm->create_transaction();
+ extmap_root = extmap_manager->initialize_extmap(*t).unsafe_get0();
+ tm->submit_transaction(std::move(t)).unsafe_get();
+ }
+ uint32_t len = 4096;
+ uint32_t lo = 0;
+ for (unsigned i = 0; i < 80; i++) {
+ auto t = tm->create_transaction();
+ logger().debug("opened split_merge transaction");
+ for (unsigned j = 0; j < 5; ++j) {
+ [[maybe_unused]] auto addref = insert_extent(extmap_root, *t, lo, {lo, len});
+ lo += len;
+ if ((i % 10 == 0) && (j == 3)) {
+ check_mappings(extmap_root, *t);
+ }
+ }
+ logger().debug("submitting transaction");
+ tm->submit_transaction(std::move(t)).unsafe_get();
+ if (i % 50 == 0) {
+ check_mappings(extmap_root);
+ }
+ }
+ auto t = tm->create_transaction();
+ int i = 0;
+ for (auto iter = test_ext_mappings.begin(); iter != test_ext_mappings.end();) {
+ auto [lo, ext] = *iter;
+ ++iter;
+ if (i % 3 != 0) {
+ rm_extent(extmap_root, *t, lo, ext);
+ }
+ i++;
+
+ if (i % 10 == 0) {
+ logger().debug("submitting transaction i= {}", i);
+ tm->submit_transaction(std::move(t)).unsafe_get();
+ t = tm->create_transaction();
+ }
+ if (i % 100 == 0) {
+ logger().debug("check_mappings i= {}", i);
+ check_mappings(extmap_root, *t);
+ check_mappings(extmap_root);
+ }
+ }
+ logger().debug("finally submitting transaction ");
+ tm->submit_transaction(std::move(t)).unsafe_get();
+ });
+}
+
+TEST_F(extentmap_manager_test_t, force_leafnode_split_merge_replay)
+{
+ run_async([this] {
+ extmap_root_t extmap_root(0, L_ADDR_NULL);
+ {
+ auto t = tm->create_transaction();
+ extmap_root = extmap_manager->initialize_extmap(*t).unsafe_get0();
+ tm->submit_transaction(std::move(t)).unsafe_get();
+ replay();
+ }
+ uint32_t len = 4096;
+ uint32_t lo = 0;
+ for (unsigned i = 0; i < 50; i++) {
+ auto t = tm->create_transaction();
+ logger().debug("opened split_merge transaction");
+ for (unsigned j = 0; j < 5; ++j) {
+ [[maybe_unused]] auto addref = insert_extent(extmap_root, *t, lo, {lo, len});
+ lo += len;
+ }
+ logger().debug("submitting transaction");
+ tm->submit_transaction(std::move(t)).unsafe_get();
+ }
+ replay();
+ auto t = tm->create_transaction();
+ int i = 0;
+ for (auto iter = test_ext_mappings.begin(); iter != test_ext_mappings.end();) {
+ auto [lo, ext] = *iter;
+ ++iter;
+ rm_extent(extmap_root, *t, lo, ext);
+ i++;
+
+ if (i % 10 == 0) {
+ logger().debug("submitting transaction i= {}", i);
+ tm->submit_transaction(std::move(t)).unsafe_get();
+ t = tm->create_transaction();
+ }
+ if (i% 100 == 0){
+ check_mappings(extmap_root);
+ }
+ }
+ logger().debug("finally submitting transaction ");
+ tm->submit_transaction(std::move(t)).unsafe_get();
+ replay();
+ check_mappings(extmap_root);
+ });
+}
diff --git a/src/test/crimson/seastore/test_seastore_cache.cc b/src/test/crimson/seastore/test_seastore_cache.cc
new file mode 100644
index 000000000..913668b08
--- /dev/null
+++ b/src/test/crimson/seastore/test_seastore_cache.cc
@@ -0,0 +1,235 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#include "test/crimson/gtest_seastar.h"
+
+#include "crimson/common/log.h"
+#include "crimson/os/seastore/cache.h"
+#include "crimson/os/seastore/segment_manager/ephemeral.h"
+
+#include "test/crimson/seastore/test_block.h"
+
+using namespace crimson;
+using namespace crimson::os;
+using namespace crimson::os::seastore;
+
+namespace {
+ [[maybe_unused]] seastar::logger& logger() {
+ return crimson::get_logger(ceph_subsys_test);
+ }
+}
+
+struct cache_test_t : public seastar_test_suite_t {
+ segment_manager::EphemeralSegmentManagerRef segment_manager;
+ Cache cache;
+ paddr_t current{0, 0};
+ journal_seq_t seq;
+
+ cache_test_t()
+ : segment_manager(segment_manager::create_test_ephemeral()),
+ cache(*segment_manager) {}
+
+ seastar::future<std::optional<paddr_t>> submit_transaction(
+ TransactionRef t) {
+ auto record = cache.try_construct_record(*t);
+ if (!record) {
+ return seastar::make_ready_future<std::optional<paddr_t>>(
+ std::nullopt);
+ }
+
+ bufferlist bl;
+ for (auto &&block : record->extents) {
+ bl.append(block.bl);
+ }
+
+ ceph_assert((segment_off_t)bl.length() <
+ segment_manager->get_segment_size());
+ if (current.offset + (segment_off_t)bl.length() >
+ segment_manager->get_segment_size())
+ current = paddr_t{current.segment + 1, 0};
+
+ auto prev = current;
+ current.offset += bl.length();
+ return segment_manager->segment_write(
+ prev,
+ std::move(bl),
+ true
+ ).safe_then(
+ [this, prev, t=std::move(t)]() mutable {
+ cache.complete_commit(*t, prev, seq /* TODO */);
+ return seastar::make_ready_future<std::optional<paddr_t>>(prev);
+ },
+ crimson::ct_error::all_same_way([](auto e) {
+ ASSERT_FALSE("failed to submit");
+ })
+ );
+ }
+
+ auto get_transaction() {
+ return make_transaction();
+ }
+
+ seastar::future<> set_up_fut() final {
+ return segment_manager->init(
+ ).safe_then(
+ [this] {
+ return seastar::do_with(
+ make_transaction(),
+ [this](auto &transaction) {
+ cache.init();
+ return cache.mkfs(*transaction).safe_then(
+ [this, &transaction] {
+ return submit_transaction(std::move(transaction)).then(
+ [](auto p) {
+ ASSERT_TRUE(p);
+ });
+ });
+ });
+ }).handle_error(
+ crimson::ct_error::all_same_way([](auto e) {
+ ASSERT_FALSE("failed to submit");
+ })
+ );
+ }
+
+ seastar::future<> tear_down_fut() final {
+ return cache.close().handle_error(
+ Cache::close_ertr::assert_all{});
+ }
+};
+
+TEST_F(cache_test_t, test_addr_fixup)
+{
+ run_async([this] {
+ paddr_t addr;
+ int csum = 0;
+ {
+ auto t = get_transaction();
+ auto extent = cache.alloc_new_extent<TestBlockPhysical>(
+ *t,
+ TestBlockPhysical::SIZE);
+ extent->set_contents('c');
+ csum = extent->get_crc32c();
+ auto ret = submit_transaction(std::move(t)).get0();
+ ASSERT_TRUE(ret);
+ addr = extent->get_paddr();
+ }
+ {
+ auto t = get_transaction();
+ auto extent = cache.get_extent<TestBlockPhysical>(
+ *t,
+ addr,
+ TestBlockPhysical::SIZE).unsafe_get0();
+ ASSERT_EQ(extent->get_paddr(), addr);
+ ASSERT_EQ(extent->get_crc32c(), csum);
+ }
+ });
+}
+
+TEST_F(cache_test_t, test_dirty_extent)
+{
+ run_async([this] {
+ paddr_t addr;
+ int csum = 0;
+ int csum2 = 0;
+ {
+ // write out initial test block
+ auto t = get_transaction();
+ auto extent = cache.alloc_new_extent<TestBlockPhysical>(
+ *t,
+ TestBlockPhysical::SIZE);
+ extent->set_contents('c');
+ csum = extent->get_crc32c();
+ auto reladdr = extent->get_paddr();
+ ASSERT_TRUE(reladdr.is_relative());
+ {
+ // test that read with same transaction sees new block though
+ // uncommitted
+ auto extent = cache.get_extent<TestBlockPhysical>(
+ *t,
+ reladdr,
+ TestBlockPhysical::SIZE).unsafe_get0();
+ ASSERT_TRUE(extent->is_clean());
+ ASSERT_TRUE(extent->is_pending());
+ ASSERT_TRUE(extent->get_paddr().is_relative());
+ ASSERT_EQ(extent->get_version(), 0);
+ ASSERT_EQ(csum, extent->get_crc32c());
+ }
+ auto ret = submit_transaction(std::move(t)).get0();
+ ASSERT_TRUE(ret);
+ addr = extent->get_paddr();
+ }
+ {
+ // test that consecutive reads on the same extent get the same ref
+ auto t = get_transaction();
+ auto extent = cache.get_extent<TestBlockPhysical>(
+ *t,
+ addr,
+ TestBlockPhysical::SIZE).unsafe_get0();
+ auto t2 = get_transaction();
+ auto extent2 = cache.get_extent<TestBlockPhysical>(
+ *t2,
+ addr,
+ TestBlockPhysical::SIZE).unsafe_get0();
+ ASSERT_EQ(&*extent, &*extent2);
+ }
+ {
+ // read back test block
+ auto t = get_transaction();
+ auto extent = cache.get_extent<TestBlockPhysical>(
+ *t,
+ addr,
+ TestBlockPhysical::SIZE).unsafe_get0();
+ // duplicate and reset contents
+ extent = cache.duplicate_for_write(*t, extent)->cast<TestBlockPhysical>();
+ extent->set_contents('c');
+ csum2 = extent->get_crc32c();
+ ASSERT_EQ(extent->get_paddr(), addr);
+ {
+ // test that concurrent read with fresh transaction sees old
+ // block
+ auto t2 = get_transaction();
+ auto extent = cache.get_extent<TestBlockPhysical>(
+ *t2,
+ addr,
+ TestBlockPhysical::SIZE).unsafe_get0();
+ ASSERT_TRUE(extent->is_clean());
+ ASSERT_FALSE(extent->is_pending());
+ ASSERT_EQ(addr, extent->get_paddr());
+ ASSERT_EQ(extent->get_version(), 0);
+ ASSERT_EQ(csum, extent->get_crc32c());
+ }
+ {
+ // test that read with same transaction sees new block
+ auto extent = cache.get_extent<TestBlockPhysical>(
+ *t,
+ addr,
+ TestBlockPhysical::SIZE).unsafe_get0();
+ ASSERT_TRUE(extent->is_dirty());
+ ASSERT_TRUE(extent->is_pending());
+ ASSERT_EQ(addr, extent->get_paddr());
+ ASSERT_EQ(extent->get_version(), 1);
+ ASSERT_EQ(csum2, extent->get_crc32c());
+ }
+ // submit transaction
+ auto ret = submit_transaction(std::move(t)).get0();
+ ASSERT_TRUE(ret);
+ ASSERT_TRUE(extent->is_dirty());
+ ASSERT_EQ(addr, extent->get_paddr());
+ ASSERT_EQ(extent->get_version(), 1);
+ ASSERT_EQ(extent->get_crc32c(), csum2);
+ }
+ {
+ // test that fresh transaction now sees newly dirty block
+ auto t = get_transaction();
+ auto extent = cache.get_extent<TestBlockPhysical>(
+ *t,
+ addr,
+ TestBlockPhysical::SIZE).unsafe_get0();
+ ASSERT_TRUE(extent->is_dirty());
+ ASSERT_EQ(addr, extent->get_paddr());
+ ASSERT_EQ(extent->get_version(), 1);
+ ASSERT_EQ(csum2, extent->get_crc32c());
+ }
+ });
+}
diff --git a/src/test/crimson/seastore/test_seastore_journal.cc b/src/test/crimson/seastore/test_seastore_journal.cc
new file mode 100644
index 000000000..0bed505ff
--- /dev/null
+++ b/src/test/crimson/seastore/test_seastore_journal.cc
@@ -0,0 +1,260 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#include "test/crimson/gtest_seastar.h"
+
+#include <random>
+
+#include "crimson/common/log.h"
+#include "crimson/os/seastore/journal.h"
+#include "crimson/os/seastore/segment_manager/ephemeral.h"
+
+using namespace crimson;
+using namespace crimson::os;
+using namespace crimson::os::seastore;
+
+namespace {
+ [[maybe_unused]] seastar::logger& logger() {
+ return crimson::get_logger(ceph_subsys_test);
+ }
+}
+
+struct record_validator_t {
+ record_t record;
+ paddr_t record_final_offset;
+
+ template <typename... T>
+ record_validator_t(T&&... record) : record(std::forward<T>(record)...) {}
+
+ void validate(SegmentManager &manager) {
+ paddr_t addr = make_record_relative_paddr(0);
+ for (auto &&block : record.extents) {
+ auto test = manager.read(
+ record_final_offset.add_relative(addr),
+ block.bl.length()).unsafe_get0();
+ addr.offset += block.bl.length();
+ bufferlist bl;
+ bl.push_back(test);
+ ASSERT_EQ(
+ bl.length(),
+ block.bl.length());
+ ASSERT_EQ(
+ bl.begin().crc32c(bl.length(), 1),
+ block.bl.begin().crc32c(block.bl.length(), 1));
+ }
+ }
+
+ auto get_replay_handler() {
+ auto checker = [this, iter=record.deltas.begin()] (
+ paddr_t base,
+ const delta_info_t &di) mutable {
+ EXPECT_EQ(base, record_final_offset);
+ ceph_assert(iter != record.deltas.end());
+ EXPECT_EQ(di, *iter++);
+ EXPECT_EQ(base, record_final_offset);
+ return iter != record.deltas.end();
+ };
+ if (record.deltas.size()) {
+ return std::make_optional(std::move(checker));
+ } else {
+ return std::optional<decltype(checker)>();
+ }
+ }
+};
+
+struct journal_test_t : seastar_test_suite_t, JournalSegmentProvider {
+ segment_manager::EphemeralSegmentManagerRef segment_manager;
+ std::unique_ptr<Journal> journal;
+
+ std::vector<record_validator_t> records;
+
+ std::default_random_engine generator;
+
+ const segment_off_t block_size;
+
+ journal_test_t()
+ : segment_manager(segment_manager::create_test_ephemeral()),
+ block_size(segment_manager->get_block_size())
+ {
+ }
+
+ segment_id_t next = 0;
+ get_segment_ret get_segment() final {
+ return get_segment_ret(
+ get_segment_ertr::ready_future_marker{},
+ next++);
+ }
+
+ journal_seq_t get_journal_tail_target() const final { return journal_seq_t{}; }
+ void update_journal_tail_committed(journal_seq_t paddr) final {}
+
+ seastar::future<> set_up_fut() final {
+ journal.reset(new Journal(*segment_manager));
+ journal->set_segment_provider(this);
+ return segment_manager->init(
+ ).safe_then([this] {
+ return journal->open_for_write();
+ }).safe_then(
+ [](auto){},
+ crimson::ct_error::all_same_way([] {
+ ASSERT_FALSE("Unable to mount");
+ }));
+ }
+
+ template <typename T>
+ auto replay(T &&f) {
+ return journal->close(
+ ).safe_then([this, f=std::move(f)]() mutable {
+ journal.reset(new Journal(*segment_manager));
+ journal->set_segment_provider(this);
+ return journal->replay(std::forward<T>(std::move(f)));
+ }).safe_then([this] {
+ return journal->open_for_write();
+ });
+ }
+
+ auto replay_and_check() {
+ auto record_iter = records.begin();
+ decltype(record_iter->get_replay_handler()) delta_checker = std::nullopt;
+ auto advance = [this, &record_iter, &delta_checker] {
+ ceph_assert(!delta_checker);
+ while (record_iter != records.end()) {
+ auto checker = record_iter->get_replay_handler();
+ record_iter++;
+ if (checker) {
+ delta_checker.emplace(std::move(*checker));
+ break;
+ }
+ }
+ };
+ advance();
+ replay(
+ [&advance,
+ &delta_checker]
+ (auto seq, auto base, const auto &di) mutable {
+ if (!delta_checker) {
+ EXPECT_FALSE("No Deltas Left");
+ }
+ if (!(*delta_checker)(base, di)) {
+ delta_checker = std::nullopt;
+ advance();
+ }
+ return Journal::replay_ertr::now();
+ }).unsafe_get0();
+ ASSERT_EQ(record_iter, records.end());
+ for (auto &i : records) {
+ i.validate(*segment_manager);
+ }
+ }
+
+ template <typename... T>
+ auto submit_record(T&&... _record) {
+ auto record{std::forward<T>(_record)...};
+ records.push_back(record);
+ auto [addr, _] = journal->submit_record(std::move(record)).unsafe_get0();
+ records.back().record_final_offset = addr;
+ return addr;
+ }
+
+ seastar::future<> tear_down_fut() final {
+ return seastar::now();
+ }
+
+ extent_t generate_extent(size_t blocks) {
+ std::uniform_int_distribution<char> distribution(
+ std::numeric_limits<char>::min(),
+ std::numeric_limits<char>::max()
+ );
+ char contents = distribution(generator);
+ bufferlist bl;
+ bl.append(buffer::ptr(buffer::create(blocks * block_size, contents)));
+ return extent_t{extent_types_t::TEST_BLOCK, L_ADDR_NULL, bl};
+ }
+
+ delta_info_t generate_delta(size_t bytes) {
+ std::uniform_int_distribution<char> distribution(
+ std::numeric_limits<char>::min(),
+ std::numeric_limits<char>::max()
+ );
+ char contents = distribution(generator);
+ bufferlist bl;
+ bl.append(buffer::ptr(buffer::create(bytes, contents)));
+ return delta_info_t{
+ extent_types_t::TEST_BLOCK,
+ paddr_t{},
+ L_ADDR_NULL,
+ 0, 0,
+ block_size,
+ 1,
+ bl
+ };
+ }
+};
+
+TEST_F(journal_test_t, replay_one_journal_segment)
+{
+ run_async([this] {
+ submit_record(record_t{
+ { generate_extent(1), generate_extent(2) },
+ { generate_delta(23), generate_delta(30) }
+ });
+ replay_and_check();
+ });
+}
+
+TEST_F(journal_test_t, replay_two_records)
+{
+ run_async([this] {
+ submit_record(record_t{
+ { generate_extent(1), generate_extent(2) },
+ { generate_delta(23), generate_delta(30) }
+ });
+ submit_record(record_t{
+ { generate_extent(4), generate_extent(1) },
+ { generate_delta(23), generate_delta(400) }
+ });
+ replay_and_check();
+ });
+}
+
+TEST_F(journal_test_t, replay_twice)
+{
+ run_async([this] {
+ submit_record(record_t{
+ { generate_extent(1), generate_extent(2) },
+ { generate_delta(23), generate_delta(30) }
+ });
+ submit_record(record_t{
+ { generate_extent(4), generate_extent(1) },
+ { generate_delta(23), generate_delta(400) }
+ });
+ replay_and_check();
+ submit_record(record_t{
+ { generate_extent(2), generate_extent(5) },
+ { generate_delta(230), generate_delta(40) }
+ });
+ replay_and_check();
+ });
+}
+
+TEST_F(journal_test_t, roll_journal_and_replay)
+{
+ run_async([this] {
+ paddr_t current = submit_record(
+ record_t{
+ { generate_extent(1), generate_extent(2) },
+ { generate_delta(23), generate_delta(30) }
+ });
+ auto starting_segment = current.segment;
+ unsigned so_far = 0;
+ while (current.segment == starting_segment) {
+ current = submit_record(record_t{
+ { generate_extent(512), generate_extent(512) },
+ { generate_delta(23), generate_delta(400) }
+ });
+ ++so_far;
+ ASSERT_FALSE(so_far > 10);
+ }
+ replay_and_check();
+ });
+}
diff --git a/src/test/crimson/seastore/test_transaction_manager.cc b/src/test/crimson/seastore/test_transaction_manager.cc
new file mode 100644
index 000000000..9906f938a
--- /dev/null
+++ b/src/test/crimson/seastore/test_transaction_manager.cc
@@ -0,0 +1,495 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#include <random>
+
+#include "test/crimson/gtest_seastar.h"
+#include "test/crimson/seastore/transaction_manager_test_state.h"
+
+#include "crimson/os/seastore/segment_cleaner.h"
+#include "crimson/os/seastore/cache.h"
+#include "crimson/os/seastore/transaction_manager.h"
+#include "crimson/os/seastore/segment_manager/ephemeral.h"
+#include "crimson/os/seastore/segment_manager.h"
+
+#include "test/crimson/seastore/test_block.h"
+
+using namespace crimson;
+using namespace crimson::os;
+using namespace crimson::os::seastore;
+
+namespace {
+ [[maybe_unused]] seastar::logger& logger() {
+ return crimson::get_logger(ceph_subsys_test);
+ }
+}
+
+struct test_extent_record_t {
+ test_extent_desc_t desc;
+ unsigned refcount = 0;
+ test_extent_record_t() = default;
+ test_extent_record_t(
+ const test_extent_desc_t &desc,
+ unsigned refcount) : desc(desc), refcount(refcount) {}
+
+ void update(const test_extent_desc_t &to) {
+ desc = to;
+ }
+
+ bool operator==(const test_extent_desc_t &rhs) const {
+ return desc == rhs;
+ }
+ bool operator!=(const test_extent_desc_t &rhs) const {
+ return desc != rhs;
+ }
+};
+
+std::ostream &operator<<(std::ostream &lhs, const test_extent_record_t &rhs) {
+ return lhs << "test_extent_record_t(" << rhs.desc
+ << ", refcount=" << rhs.refcount << ")";
+}
+
+struct transaction_manager_test_t :
+ public seastar_test_suite_t,
+ TMTestState {
+
+ std::random_device rd;
+ std::mt19937 gen;
+
+ transaction_manager_test_t()
+ : gen(rd()) {
+ init();
+ }
+
+ laddr_t get_random_laddr(size_t block_size, laddr_t limit) {
+ return block_size *
+ std::uniform_int_distribution<>(0, (limit / block_size) - 1)(gen);
+ }
+
+ char get_random_contents() {
+ return static_cast<char>(std::uniform_int_distribution<>(0, 255)(gen));
+ }
+
+ seastar::future<> set_up_fut() final {
+ return tm_setup();
+ }
+
+ seastar::future<> tear_down_fut() final {
+ return tm_teardown();
+ }
+
+ struct test_extents_t : std::map<laddr_t, test_extent_record_t> {
+ private:
+ void check_available(laddr_t addr, extent_len_t len) {
+ auto iter = upper_bound(addr);
+ if (iter != begin()) {
+ auto liter = iter;
+ liter--;
+ EXPECT_FALSE(liter->first + liter->second.desc.len > addr);
+ }
+ if (iter != end()) {
+ EXPECT_FALSE(iter->first < addr + len);
+ }
+ }
+ void check_hint(laddr_t hint, laddr_t addr, extent_len_t len) {
+ auto iter = lower_bound(hint);
+ laddr_t last = hint;
+ while (true) {
+ if (iter == end() || iter->first > addr) {
+ EXPECT_EQ(addr, last);
+ break;
+ }
+ EXPECT_FALSE(iter->first - last > len);
+ last = iter->first + iter->second.desc.len;
+ ++iter;
+ }
+ }
+ public:
+ void insert(TestBlock &extent) {
+ check_available(extent.get_laddr(), extent.get_length());
+ emplace(
+ std::make_pair(
+ extent.get_laddr(),
+ test_extent_record_t{extent.get_desc(), 1}
+ ));
+ }
+ void alloced(laddr_t hint, TestBlock &extent) {
+ check_hint(hint, extent.get_laddr(), extent.get_length());
+ insert(extent);
+ }
+ } test_mappings;
+
+ struct test_transaction_t {
+ TransactionRef t;
+ test_extents_t mappings;
+ };
+
+ test_transaction_t create_transaction() {
+ return { tm->create_transaction(), test_mappings };
+ }
+
+ test_transaction_t create_weak_transaction() {
+ return { tm->create_weak_transaction(), test_mappings };
+ }
+
+ TestBlockRef alloc_extent(
+ test_transaction_t &t,
+ laddr_t hint,
+ extent_len_t len,
+ char contents) {
+ auto extent = tm->alloc_extent<TestBlock>(
+ *(t.t),
+ hint,
+ len).unsafe_get0();
+ extent->set_contents(contents);
+ EXPECT_FALSE(t.mappings.count(extent->get_laddr()));
+ EXPECT_EQ(len, extent->get_length());
+ t.mappings.alloced(hint, *extent);
+ return extent;
+ }
+
+ TestBlockRef alloc_extent(
+ test_transaction_t &t,
+ laddr_t hint,
+ extent_len_t len) {
+ return alloc_extent(
+ t,
+ hint,
+ len,
+ get_random_contents());
+ }
+
+ bool check_usage() {
+ auto t = create_weak_transaction();
+ SpaceTrackerIRef tracker(segment_cleaner->get_empty_space_tracker());
+ lba_manager->scan_mapped_space(
+ *t.t,
+ [&tracker](auto offset, auto len) {
+ tracker->allocate(
+ offset.segment,
+ offset.offset,
+ len);
+ }).unsafe_get0();
+ return segment_cleaner->debug_check_space(*tracker);
+ }
+
+ void replay() {
+ logger().debug("{}: begin", __func__);
+ EXPECT_TRUE(check_usage());
+ restart();
+ logger().debug("{}: end", __func__);
+ }
+
+ void check() {
+ check_mappings();
+ check_usage();
+ }
+
+ void check_mappings() {
+ auto t = create_weak_transaction();
+ check_mappings(t);
+ }
+
+ TestBlockRef get_extent(
+ test_transaction_t &t,
+ laddr_t addr,
+ extent_len_t len) {
+ ceph_assert(t.mappings.count(addr));
+ ceph_assert(t.mappings[addr].desc.len == len);
+
+ auto ret_list = tm->read_extents<TestBlock>(
+ *t.t, addr, len
+ ).unsafe_get0();
+ EXPECT_EQ(ret_list.size(), 1);
+ auto &ext = ret_list.begin()->second;
+ auto &laddr = ret_list.begin()->first;
+ EXPECT_EQ(addr, laddr);
+ EXPECT_EQ(addr, ext->get_laddr());
+ return ext;
+ }
+
+ test_block_mutator_t mutator;
+ TestBlockRef mutate_extent(
+ test_transaction_t &t,
+ TestBlockRef ref) {
+ ceph_assert(t.mappings.count(ref->get_laddr()));
+ ceph_assert(t.mappings[ref->get_laddr()].desc.len == ref->get_length());
+ auto ext = tm->get_mutable_extent(*t.t, ref)->cast<TestBlock>();
+ EXPECT_EQ(ext->get_laddr(), ref->get_laddr());
+ EXPECT_EQ(ext->get_desc(), ref->get_desc());
+ mutator.mutate(*ext, gen);
+ t.mappings[ext->get_laddr()].update(ext->get_desc());
+ return ext;
+ }
+
+ void inc_ref(test_transaction_t &t, laddr_t offset) {
+ ceph_assert(t.mappings.count(offset));
+ ceph_assert(t.mappings[offset].refcount > 0);
+ auto refcnt = tm->inc_ref(*t.t, offset).unsafe_get0();
+ t.mappings[offset].refcount++;
+ EXPECT_EQ(refcnt, t.mappings[offset].refcount);
+ }
+
+ void dec_ref(test_transaction_t &t, laddr_t offset) {
+ ceph_assert(t.mappings.count(offset));
+ ceph_assert(t.mappings[offset].refcount > 0);
+ auto refcnt = tm->dec_ref(*t.t, offset).unsafe_get0();
+ t.mappings[offset].refcount--;
+ EXPECT_EQ(refcnt, t.mappings[offset].refcount);
+ if (t.mappings[offset].refcount == 0) {
+ t.mappings.erase(offset);
+ }
+ }
+
+ void check_mappings(test_transaction_t &t) {
+ for (auto &i: t.mappings) {
+ logger().debug("check_mappings: {}->{}", i.first, i.second);
+ auto ext = get_extent(t, i.first, i.second.desc.len);
+ EXPECT_EQ(i.second, ext->get_desc());
+ }
+ auto lt = create_weak_transaction();
+ lba_manager->scan_mappings(
+ *lt.t,
+ 0,
+ L_ADDR_MAX,
+ [iter=lt.mappings.begin(), &lt](auto l, auto p, auto len) mutable {
+ EXPECT_NE(iter, lt.mappings.end());
+ EXPECT_EQ(l, iter->first);
+ ++iter;
+ }).unsafe_get0();
+ }
+
+ void submit_transaction(test_transaction_t t) {
+ tm->submit_transaction(std::move(t.t)).unsafe_get();
+ test_mappings = t.mappings;
+ }
+};
+
+TEST_F(transaction_manager_test_t, basic)
+{
+ constexpr laddr_t SIZE = 4096;
+ run_async([this] {
+ constexpr laddr_t ADDR = 0xFF * SIZE;
+ {
+ auto t = create_transaction();
+ auto extent = alloc_extent(
+ t,
+ ADDR,
+ SIZE,
+ 'a');
+ ASSERT_EQ(ADDR, extent->get_laddr());
+ check_mappings(t);
+ check();
+ submit_transaction(std::move(t));
+ check();
+ }
+ });
+}
+
+TEST_F(transaction_manager_test_t, mutate)
+{
+ constexpr laddr_t SIZE = 4096;
+ run_async([this] {
+ constexpr laddr_t ADDR = 0xFF * SIZE;
+ {
+ auto t = create_transaction();
+ auto extent = alloc_extent(
+ t,
+ ADDR,
+ SIZE,
+ 'a');
+ ASSERT_EQ(ADDR, extent->get_laddr());
+ check_mappings(t);
+ check();
+ submit_transaction(std::move(t));
+ check();
+ }
+ ASSERT_TRUE(check_usage());
+ replay();
+ {
+ auto t = create_transaction();
+ auto ext = get_extent(
+ t,
+ ADDR,
+ SIZE);
+ auto mut = mutate_extent(t, ext);
+ check_mappings(t);
+ check();
+ submit_transaction(std::move(t));
+ check();
+ }
+ ASSERT_TRUE(check_usage());
+ replay();
+ check();
+ });
+}
+
+TEST_F(transaction_manager_test_t, create_remove_same_transaction)
+{
+ constexpr laddr_t SIZE = 4096;
+ run_async([this] {
+ constexpr laddr_t ADDR = 0xFF * SIZE;
+ {
+ auto t = create_transaction();
+ auto extent = alloc_extent(
+ t,
+ ADDR,
+ SIZE,
+ 'a');
+ ASSERT_EQ(ADDR, extent->get_laddr());
+ check_mappings(t);
+ dec_ref(t, ADDR);
+ check_mappings(t);
+
+ extent = alloc_extent(
+ t,
+ ADDR,
+ SIZE,
+ 'a');
+
+ submit_transaction(std::move(t));
+ check();
+ }
+ replay();
+ check();
+ });
+}
+
+TEST_F(transaction_manager_test_t, split_merge_read_same_transaction)
+{
+ constexpr laddr_t SIZE = 4096;
+ run_async([this] {
+ {
+ auto t = create_transaction();
+ for (unsigned i = 0; i < 300; ++i) {
+ auto extent = alloc_extent(
+ t,
+ laddr_t(i * SIZE),
+ SIZE);
+ }
+ check_mappings(t);
+ submit_transaction(std::move(t));
+ check();
+ }
+ {
+ auto t = create_transaction();
+ for (unsigned i = 0; i < 240; ++i) {
+ dec_ref(
+ t,
+ laddr_t(i * SIZE));
+ }
+ check_mappings(t);
+ submit_transaction(std::move(t));
+ check();
+ }
+ });
+}
+
+
+TEST_F(transaction_manager_test_t, inc_dec_ref)
+{
+ constexpr laddr_t SIZE = 4096;
+ run_async([this] {
+ constexpr laddr_t ADDR = 0xFF * SIZE;
+ {
+ auto t = create_transaction();
+ auto extent = alloc_extent(
+ t,
+ ADDR,
+ SIZE,
+ 'a');
+ ASSERT_EQ(ADDR, extent->get_laddr());
+ check_mappings(t);
+ check();
+ submit_transaction(std::move(t));
+ check();
+ }
+ replay();
+ {
+ auto t = create_transaction();
+ inc_ref(t, ADDR);
+ check_mappings(t);
+ check();
+ submit_transaction(std::move(t));
+ check();
+ }
+ {
+ auto t = create_transaction();
+ dec_ref(t, ADDR);
+ check_mappings(t);
+ check();
+ submit_transaction(std::move(t));
+ check();
+ }
+ replay();
+ {
+ auto t = create_transaction();
+ dec_ref(t, ADDR);
+ check_mappings(t);
+ check();
+ submit_transaction(std::move(t));
+ check();
+ }
+ });
+}
+
+TEST_F(transaction_manager_test_t, cause_lba_split)
+{
+ constexpr laddr_t SIZE = 4096;
+ run_async([this] {
+ for (unsigned i = 0; i < 200; ++i) {
+ auto t = create_transaction();
+ auto extent = alloc_extent(
+ t,
+ i * SIZE,
+ SIZE,
+ (char)(i & 0xFF));
+ ASSERT_EQ(i * SIZE, extent->get_laddr());
+ submit_transaction(std::move(t));
+ }
+ check();
+ });
+}
+
+TEST_F(transaction_manager_test_t, random_writes)
+{
+ constexpr size_t TOTAL = 4<<20;
+ constexpr size_t BSIZE = 4<<10;
+ constexpr size_t PADDING_SIZE = 256<<10;
+ constexpr size_t BLOCKS = TOTAL / BSIZE;
+ run_async([this] {
+ for (unsigned i = 0; i < BLOCKS; ++i) {
+ auto t = create_transaction();
+ auto extent = alloc_extent(
+ t,
+ i * BSIZE,
+ BSIZE);
+ ASSERT_EQ(i * BSIZE, extent->get_laddr());
+ submit_transaction(std::move(t));
+ }
+
+ for (unsigned i = 0; i < 4; ++i) {
+ for (unsigned j = 0; j < 65; ++j) {
+ auto t = create_transaction();
+ for (unsigned k = 0; k < 2; ++k) {
+ auto ext = get_extent(
+ t,
+ get_random_laddr(BSIZE, TOTAL),
+ BSIZE);
+ auto mut = mutate_extent(t, ext);
+ // pad out transaction
+ auto padding = alloc_extent(
+ t,
+ TOTAL + (k * PADDING_SIZE),
+ PADDING_SIZE);
+ dec_ref(t, padding->get_laddr());
+ }
+ submit_transaction(std::move(t));
+ }
+ replay();
+ logger().debug("random_writes: checking");
+ check();
+ logger().debug("random_writes: done replaying/checking");
+ }
+ });
+}
diff --git a/src/test/crimson/seastore/transaction_manager_test_state.h b/src/test/crimson/seastore/transaction_manager_test_state.h
new file mode 100644
index 000000000..bf2440923
--- /dev/null
+++ b/src/test/crimson/seastore/transaction_manager_test_state.h
@@ -0,0 +1,82 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#pragma once
+
+#include <random>
+
+#include "crimson/os/seastore/segment_cleaner.h"
+#include "crimson/os/seastore/cache.h"
+#include "crimson/os/seastore/transaction_manager.h"
+#include "crimson/os/seastore/segment_manager/ephemeral.h"
+#include "crimson/os/seastore/segment_manager.h"
+
+using namespace crimson;
+using namespace crimson::os;
+using namespace crimson::os::seastore;
+
+class TMTestState {
+protected:
+ std::unique_ptr<segment_manager::EphemeralSegmentManager> segment_manager;
+ std::unique_ptr<SegmentCleaner> segment_cleaner;
+ std::unique_ptr<Journal> journal;
+ std::unique_ptr<Cache> cache;
+ LBAManagerRef lba_manager;
+ std::unique_ptr<TransactionManager> tm;
+
+ TMTestState()
+ : segment_manager(segment_manager::create_test_ephemeral()) {
+ init();
+ }
+
+ void init() {
+ segment_cleaner = std::make_unique<SegmentCleaner>(
+ SegmentCleaner::config_t::default_from_segment_manager(
+ *segment_manager),
+ true);
+ journal = std::make_unique<Journal>(*segment_manager);
+ cache = std::make_unique<Cache>(*segment_manager);
+ lba_manager = lba_manager::create_lba_manager(*segment_manager, *cache);
+ tm = std::make_unique<TransactionManager>(
+ *segment_manager, *segment_cleaner, *journal, *cache, *lba_manager);
+
+ journal->set_segment_provider(&*segment_cleaner);
+ segment_cleaner->set_extent_callback(&*tm);
+ }
+
+ void destroy() {
+ tm.reset();
+ lba_manager.reset();
+ cache.reset();
+ journal.reset();
+ segment_cleaner.reset();
+ }
+
+ void restart() {
+ tm->close().unsafe_get();
+ destroy();
+ static_cast<segment_manager::EphemeralSegmentManager*>(&*segment_manager)->remount();
+ init();
+ tm->mount().unsafe_get();
+ }
+
+ seastar::future<> tm_setup() {
+ return segment_manager->init(
+ ).safe_then([this] {
+ return tm->mkfs();
+ }).safe_then([this] {
+ return tm->close();
+ }).safe_then([this] {
+ destroy();
+ static_cast<segment_manager::EphemeralSegmentManager*>(
+ &*segment_manager)->remount();
+ init();
+ return tm->mount();
+ }).handle_error(crimson::ct_error::assert_all{});
+ }
+
+ seastar::future<> tm_teardown() {
+ return tm->close(
+ ).handle_error(crimson::ct_error::assert_all{});
+ }
+};
diff --git a/src/test/crimson/test_alien_echo.cc b/src/test/crimson/test_alien_echo.cc
new file mode 100644
index 000000000..4434b522b
--- /dev/null
+++ b/src/test/crimson/test_alien_echo.cc
@@ -0,0 +1,306 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
+
+#include "auth/Auth.h"
+#include "messages/MPing.h"
+#include "common/ceph_argparse.h"
+#include "crimson/auth/DummyAuth.h"
+#include "crimson/common/throttle.h"
+#include "crimson/net/Connection.h"
+#include "crimson/net/Dispatcher.h"
+#include "crimson/net/Messenger.h"
+
+#include <seastar/core/alien.hh>
+#include <seastar/core/app-template.hh>
+#include <seastar/core/future-util.hh>
+#include <seastar/core/internal/pollable_fd.hh>
+#include <seastar/core/posix.hh>
+#include <seastar/core/reactor.hh>
+
+using crimson::common::local_conf;
+
+enum class echo_role {
+ as_server,
+ as_client,
+};
+
+namespace seastar_pingpong {
+struct DummyAuthAuthorizer : public AuthAuthorizer {
+ DummyAuthAuthorizer()
+ : AuthAuthorizer(CEPH_AUTH_CEPHX)
+ {}
+ bool verify_reply(bufferlist::const_iterator&,
+ std::string *connection_secret) override {
+ return true;
+ }
+ bool add_challenge(CephContext*, const bufferlist&) override {
+ return true;
+ }
+};
+
+struct Server {
+ crimson::common::Throttle byte_throttler;
+ crimson::net::MessengerRef msgr;
+ crimson::auth::DummyAuthClientServer dummy_auth;
+ struct ServerDispatcher final : crimson::net::Dispatcher {
+ unsigned count = 0;
+ seastar::condition_variable on_reply;
+ std::optional<seastar::future<>> ms_dispatch(crimson::net::ConnectionRef c,
+ MessageRef m) final
+ {
+ std::cout << "server got ping " << *m << std::endl;
+ // reply with a pong
+ return c->send(make_message<MPing>()).then([this] {
+ ++count;
+ on_reply.signal();
+ return seastar::now();
+ });
+ }
+ } dispatcher;
+ Server(crimson::net::MessengerRef msgr)
+ : byte_throttler(local_conf()->osd_client_message_size_cap),
+ msgr{msgr}
+ {
+ msgr->set_crc_header();
+ msgr->set_crc_data();
+ }
+};
+
+struct Client {
+ crimson::common::Throttle byte_throttler;
+ crimson::net::MessengerRef msgr;
+ crimson::auth::DummyAuthClientServer dummy_auth;
+ struct ClientDispatcher final : crimson::net::Dispatcher {
+ unsigned count = 0;
+ seastar::condition_variable on_reply;
+ std::optional<seastar::future<>> ms_dispatch(crimson::net::ConnectionRef c,
+ MessageRef m) final
+ {
+ std::cout << "client got pong " << *m << std::endl;
+ ++count;
+ on_reply.signal();
+ return seastar::now();
+ }
+ } dispatcher;
+ Client(crimson::net::MessengerRef msgr)
+ : byte_throttler(local_conf()->osd_client_message_size_cap),
+ msgr{msgr}
+ {
+ msgr->set_crc_header();
+ msgr->set_crc_data();
+ }
+};
+} // namespace seastar_pingpong
+
+class SeastarContext {
+ int begin_fd;
+ seastar::file_desc on_end;
+
+public:
+ SeastarContext()
+ : begin_fd{eventfd(0, 0)},
+ on_end{seastar::file_desc::eventfd(0, 0)}
+ {}
+
+ template<class Func>
+ std::thread with_seastar(Func&& func) {
+ return std::thread{[this, on_end = on_end.get(),
+ func = std::forward<Func>(func)] {
+ // alien: are you ready?
+ wait_for_seastar();
+ // alien: could you help me apply(func)?
+ func();
+ // alien: i've sent my request. have you replied it?
+ // wait_for_seastar();
+ // alien: you are free to go!
+ ::eventfd_write(on_end, 1);
+ }};
+ }
+
+ void run(seastar::app_template& app, int argc, char** argv) {
+ app.run(argc, argv, [this] {
+ std::vector<const char*> args;
+ std::string cluster;
+ std::string conf_file_list;
+ auto init_params = ceph_argparse_early_args(args,
+ CEPH_ENTITY_TYPE_CLIENT,
+ &cluster,
+ &conf_file_list);
+ return crimson::common::sharded_conf().start(init_params.name, cluster)
+ .then([conf_file_list] {
+ return local_conf().parse_config_files(conf_file_list);
+ }).then([this] {
+ return set_seastar_ready();
+ }).then([on_end = std::move(on_end)] () mutable {
+ // seastar: let me know once i am free to leave.
+ return seastar::do_with(seastar::pollable_fd(std::move(on_end)), []
+ (seastar::pollable_fd& on_end_fds) {
+ return on_end_fds.readable().then([&on_end_fds] {
+ eventfd_t result = 0;
+ on_end_fds.get_file_desc().read(&result, sizeof(result));
+ return seastar::make_ready_future<>();
+ });
+ });
+ }).then([]() {
+ return crimson::common::sharded_conf().stop();
+ }).handle_exception([](auto ep) {
+ std::cerr << "Error: " << ep << std::endl;
+ }).finally([] {
+ seastar::engine().exit(0);
+ });
+ });
+ }
+
+ seastar::future<> set_seastar_ready() {
+ // seastar: i am ready to serve!
+ ::eventfd_write(begin_fd, 1);
+ return seastar::now();
+ }
+
+private:
+ void wait_for_seastar() {
+ eventfd_t result = 0;
+ if (int r = ::eventfd_read(begin_fd, &result); r < 0) {
+ std::cerr << "unable to eventfd_read():" << errno << std::endl;
+ }
+ }
+};
+
+static seastar::future<>
+seastar_echo(const entity_addr_t addr, echo_role role, unsigned count)
+{
+ std::cout << "seastar/";
+ if (role == echo_role::as_server) {
+ return seastar::do_with(
+ seastar_pingpong::Server{crimson::net::Messenger::create(
+ entity_name_t::OSD(0), "server", addr.get_nonce())},
+ [addr, count](auto& server) mutable {
+ std::cout << "server listening at " << addr << std::endl;
+ // bind the server
+ server.msgr->set_default_policy(crimson::net::SocketPolicy::stateless_server(0));
+ server.msgr->set_policy_throttler(entity_name_t::TYPE_OSD,
+ &server.byte_throttler);
+ server.msgr->set_require_authorizer(false);
+ server.msgr->set_auth_client(&server.dummy_auth);
+ server.msgr->set_auth_server(&server.dummy_auth);
+ return server.msgr->bind(entity_addrvec_t{addr}
+ ).safe_then([&server] {
+ return server.msgr->start({&server.dispatcher});
+ }, crimson::net::Messenger::bind_ertr::all_same_way([](auto& e) {
+ ceph_abort_msg("bind failed");
+ })).then([&dispatcher=server.dispatcher, count] {
+ return dispatcher.on_reply.wait([&dispatcher, count] {
+ return dispatcher.count >= count;
+ });
+ }).finally([&server] {
+ std::cout << "server shutting down" << std::endl;
+ return server.msgr->shutdown();
+ });
+ });
+ } else {
+ return seastar::do_with(
+ seastar_pingpong::Client{crimson::net::Messenger::create(
+ entity_name_t::OSD(1), "client", addr.get_nonce())},
+ [addr, count](auto& client) {
+ std::cout << "client sending to " << addr << std::endl;
+ client.msgr->set_default_policy(crimson::net::SocketPolicy::lossy_client(0));
+ client.msgr->set_policy_throttler(entity_name_t::TYPE_OSD,
+ &client.byte_throttler);
+ client.msgr->set_require_authorizer(false);
+ client.msgr->set_auth_client(&client.dummy_auth);
+ client.msgr->set_auth_server(&client.dummy_auth);
+ return client.msgr->start({&client.dispatcher}).then(
+ [addr, &client, &disp=client.dispatcher, count] {
+ auto conn = client.msgr->connect(addr, entity_name_t::TYPE_OSD);
+ return seastar::do_until(
+ [&disp,count] { return disp.count >= count; },
+ [&disp,conn] {
+ return conn->send(make_message<MPing>()).then([&] {
+ return disp.on_reply.wait();
+ });
+ }
+ );
+ }).finally([&client] {
+ std::cout << "client shutting down" << std::endl;
+ return client.msgr->shutdown();
+ });
+ });
+ }
+}
+
+int main(int argc, char** argv)
+{
+ namespace po = boost::program_options;
+ po::options_description desc{"Allowed options"};
+ desc.add_options()
+ ("help,h", "show help message")
+ ("role", po::value<std::string>()->default_value("pong"),
+ "role to play (ping | pong)")
+ ("port", po::value<uint16_t>()->default_value(9010),
+ "port #")
+ ("nonce", po::value<uint32_t>()->default_value(42),
+ "a unique number to identify the pong server")
+ ("count", po::value<unsigned>()->default_value(10),
+ "stop after sending/echoing <count> MPing messages")
+ ("v2", po::value<bool>()->default_value(false),
+ "using msgr v2 protocol");
+ po::variables_map vm;
+ std::vector<std::string> unrecognized_options;
+ try {
+ auto parsed = po::command_line_parser(argc, argv)
+ .options(desc)
+ .allow_unregistered()
+ .run();
+ po::store(parsed, vm);
+ if (vm.count("help")) {
+ std::cout << desc << std::endl;
+ return 0;
+ }
+ po::notify(vm);
+ unrecognized_options = po::collect_unrecognized(parsed.options, po::include_positional);
+ } catch(const po::error& e) {
+ std::cerr << "error: " << e.what() << std::endl;
+ return 1;
+ }
+
+ entity_addr_t addr;
+ if (vm["v2"].as<bool>()) {
+ addr.set_type(entity_addr_t::TYPE_MSGR2);
+ } else {
+ addr.set_type(entity_addr_t::TYPE_LEGACY);
+ }
+ addr.set_family(AF_INET);
+ addr.set_port(vm["port"].as<std::uint16_t>());
+ addr.set_nonce(vm["nonce"].as<std::uint32_t>());
+
+ echo_role role = echo_role::as_server;
+ if (vm["role"].as<std::string>() == "ping") {
+ role = echo_role::as_client;
+ }
+
+ auto count = vm["count"].as<unsigned>();
+ seastar::app_template app;
+ SeastarContext sc;
+ auto job = sc.with_seastar([&] {
+ auto fut = seastar::alien::submit_to(0, [addr, role, count] {
+ return seastar_echo(addr, role, count);
+ });
+ fut.wait();
+ });
+ std::vector<char*> av{argv[0]};
+ std::transform(begin(unrecognized_options),
+ end(unrecognized_options),
+ std::back_inserter(av),
+ [](auto& s) {
+ return const_cast<char*>(s.c_str());
+ });
+ sc.run(app, av.size(), av.data());
+ job.join();
+}
+
+/*
+ * Local Variables:
+ * compile-command: "make -j4 \
+ * -C ../../../build \
+ * unittest_seastar_echo"
+ * End:
+ */
diff --git a/src/test/crimson/test_alienstore_thread_pool.cc b/src/test/crimson/test_alienstore_thread_pool.cc
new file mode 100644
index 000000000..82b98abbb
--- /dev/null
+++ b/src/test/crimson/test_alienstore_thread_pool.cc
@@ -0,0 +1,78 @@
+#include <chrono>
+#include <iostream>
+#include <numeric>
+#include <seastar/core/app-template.hh>
+#include "common/ceph_argparse.h"
+#include "crimson/common/config_proxy.h"
+#include "crimson/os/alienstore/thread_pool.h"
+#include "include/msgr.h"
+
+using namespace std::chrono_literals;
+using ThreadPool = crimson::os::ThreadPool;
+using crimson::common::local_conf;
+
+seastar::future<> test_accumulate(ThreadPool& tp) {
+ static constexpr auto N = 5;
+ static constexpr auto M = 1;
+ auto slow_plus = [&tp](int i) {
+ return tp.submit([=] {
+ std::this_thread::sleep_for(10ns);
+ return i + M;
+ });
+ };
+ return seastar::map_reduce(
+ boost::irange(0, N), slow_plus, 0, std::plus{}).then([] (int sum) {
+ auto r = boost::irange(0 + M, N + M);
+ if (sum != std::accumulate(r.begin(), r.end(), 0)) {
+ throw std::runtime_error("test_accumulate failed");
+ }
+ });
+}
+
+seastar::future<> test_void_return(ThreadPool& tp) {
+ return tp.submit([=] {
+ std::this_thread::sleep_for(10ns);
+ });
+}
+
+int main(int argc, char** argv)
+{
+ seastar::app_template app;
+ return app.run(argc, argv, [] {
+ std::vector<const char*> args;
+ std::string cluster;
+ std::string conf_file_list;
+ auto init_params = ceph_argparse_early_args(args,
+ CEPH_ENTITY_TYPE_CLIENT,
+ &cluster,
+ &conf_file_list);
+ return crimson::common::sharded_conf().start(init_params.name, cluster)
+ .then([conf_file_list] {
+ return local_conf().parse_config_files(conf_file_list);
+ }).then([] {
+ return seastar::do_with(std::make_unique<crimson::os::ThreadPool>(2, 128, 0),
+ [](auto& tp) {
+ return tp->start().then([&tp] {
+ return test_accumulate(*tp);
+ }).then([&tp] {
+ return test_void_return(*tp);
+ }).finally([&tp] {
+ return tp->stop();
+ });
+ });
+ }).finally([] {
+ return crimson::common::sharded_conf().stop();
+ }).handle_exception([](auto e) {
+ std::cerr << "Error: " << e << std::endl;
+ seastar::engine().exit(1);
+ });
+ });
+}
+
+/*
+ * Local Variables:
+ * compile-command: "make -j4 \
+ * -C ../../../build \
+ * unittest_seastar_thread_pool"
+ * End:
+ */
diff --git a/src/test/crimson/test_async_echo.cc b/src/test/crimson/test_async_echo.cc
new file mode 100644
index 000000000..4f2d43d9d
--- /dev/null
+++ b/src/test/crimson/test_async_echo.cc
@@ -0,0 +1,235 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
+
+#include <boost/program_options/variables_map.hpp>
+#include <boost/program_options/parsers.hpp>
+
+#include "auth/Auth.h"
+#include "global/global_init.h"
+#include "messages/MPing.h"
+#include "msg/Dispatcher.h"
+#include "msg/Messenger.h"
+
+#include "auth/DummyAuth.h"
+
+enum class echo_role {
+ as_server,
+ as_client,
+};
+
+namespace native_pingpong {
+
+constexpr int CEPH_OSD_PROTOCOL = 10;
+
+struct Server {
+ Server(CephContext* cct, const entity_inst_t& entity)
+ : dummy_auth(cct), dispatcher(cct)
+ {
+ msgr.reset(Messenger::create(cct, "async", entity.name, "pong", entity.addr.get_nonce()));
+ dummy_auth.auth_registry.refresh_config();
+ msgr->set_cluster_protocol(CEPH_OSD_PROTOCOL);
+ msgr->set_default_policy(Messenger::Policy::stateless_server(0));
+ msgr->set_auth_client(&dummy_auth);
+ msgr->set_auth_server(&dummy_auth);
+ msgr->set_require_authorizer(false);
+ }
+ DummyAuthClientServer dummy_auth;
+ unique_ptr<Messenger> msgr;
+ struct ServerDispatcher : Dispatcher {
+ std::mutex mutex;
+ std::condition_variable on_reply;
+ bool replied = false;
+ ServerDispatcher(CephContext* cct)
+ : Dispatcher(cct)
+ {}
+ bool ms_can_fast_dispatch_any() const override {
+ return true;
+ }
+ bool ms_can_fast_dispatch(const Message* m) const override {
+ return m->get_type() == CEPH_MSG_PING;
+ }
+ void ms_fast_dispatch(Message* m) override {
+ m->get_connection()->send_message(new MPing);
+ m->put();
+ {
+ std::lock_guard lock{mutex};
+ replied = true;
+ }
+ on_reply.notify_one();
+ }
+ bool ms_dispatch(Message*) override {
+ ceph_abort();
+ }
+ bool ms_handle_reset(Connection*) override {
+ return true;
+ }
+ void ms_handle_remote_reset(Connection*) override {
+ }
+ bool ms_handle_refused(Connection*) override {
+ return true;
+ }
+ void echo() {
+ replied = false;
+ std::unique_lock lock{mutex};
+ return on_reply.wait(lock, [this] { return replied; });
+ }
+ } dispatcher;
+ void echo() {
+ dispatcher.echo();
+ }
+};
+
+struct Client {
+ unique_ptr<Messenger> msgr;
+ Client(CephContext *cct)
+ : dummy_auth(cct), dispatcher(cct)
+ {
+ msgr.reset(Messenger::create(cct, "async", entity_name_t::CLIENT(-1), "ping", getpid()));
+ dummy_auth.auth_registry.refresh_config();
+ msgr->set_cluster_protocol(CEPH_OSD_PROTOCOL);
+ msgr->set_default_policy(Messenger::Policy::lossy_client(0));
+ msgr->set_auth_client(&dummy_auth);
+ msgr->set_auth_server(&dummy_auth);
+ msgr->set_require_authorizer(false);
+ }
+ DummyAuthClientServer dummy_auth;
+ struct ClientDispatcher : Dispatcher {
+ std::mutex mutex;
+ std::condition_variable on_reply;
+ bool replied = false;
+
+ ClientDispatcher(CephContext* cct)
+ : Dispatcher(cct)
+ {}
+ bool ms_can_fast_dispatch_any() const override {
+ return true;
+ }
+ bool ms_can_fast_dispatch(const Message* m) const override {
+ return m->get_type() == CEPH_MSG_PING;
+ }
+ void ms_fast_dispatch(Message* m) override {
+ m->put();
+ {
+ std::lock_guard lock{mutex};
+ replied = true;
+ }
+ on_reply.notify_one();
+ }
+ bool ms_dispatch(Message*) override {
+ ceph_abort();
+ }
+ bool ms_handle_reset(Connection *) override {
+ return true;
+ }
+ void ms_handle_remote_reset(Connection*) override {
+ }
+ bool ms_handle_refused(Connection*) override {
+ return true;
+ }
+ bool ping(Messenger* msgr, const entity_inst_t& peer) {
+ auto conn = msgr->connect_to(peer.name.type(),
+ entity_addrvec_t{peer.addr});
+ replied = false;
+ conn->send_message(new MPing);
+ std::unique_lock lock{mutex};
+ return on_reply.wait_for(lock, 500ms, [&] {
+ return replied;
+ });
+ }
+ } dispatcher;
+ void ping(const entity_inst_t& peer) {
+ dispatcher.ping(msgr.get(), peer);
+ }
+};
+} // namespace native_pingpong
+
+static void ceph_echo(CephContext* cct,
+ entity_addr_t addr, echo_role role, unsigned count)
+{
+ std::cout << "ceph/";
+ entity_inst_t entity{entity_name_t::OSD(0), addr};
+ if (role == echo_role::as_server) {
+ std::cout << "server listening at " << addr << std::endl;
+ native_pingpong::Server server{cct, entity};
+ server.msgr->bind(addr);
+ server.msgr->add_dispatcher_head(&server.dispatcher);
+ server.msgr->start();
+ for (unsigned i = 0; i < count; i++) {
+ server.echo();
+ }
+ server.msgr->shutdown();
+ server.msgr->wait();
+ } else {
+ std::cout << "client sending to " << addr << std::endl;
+ native_pingpong::Client client{cct};
+ client.msgr->add_dispatcher_head(&client.dispatcher);
+ client.msgr->start();
+ auto conn = client.msgr->connect_to(entity.name.type(),
+ entity_addrvec_t{entity.addr});
+ for (unsigned i = 0; i < count; i++) {
+ std::cout << "seq=" << i << std::endl;
+ client.ping(entity);
+ }
+ client.msgr->shutdown();
+ client.msgr->wait();
+ }
+}
+
+int main(int argc, char** argv)
+{
+ namespace po = boost::program_options;
+ po::options_description desc{"Allowed options"};
+ desc.add_options()
+ ("help,h", "show help message")
+ ("role", po::value<std::string>()->default_value("pong"),
+ "role to play (ping | pong)")
+ ("port", po::value<uint16_t>()->default_value(9010),
+ "port #")
+ ("nonce", po::value<uint32_t>()->default_value(42),
+ "a unique number to identify the pong server")
+ ("count", po::value<unsigned>()->default_value(10),
+ "stop after sending/echoing <count> MPing messages")
+ ("v2", po::value<bool>()->default_value(false),
+ "using msgr v2 protocol");
+ po::variables_map vm;
+ std::vector<std::string> unrecognized_options;
+ try {
+ auto parsed = po::command_line_parser(argc, argv)
+ .options(desc)
+ .allow_unregistered()
+ .run();
+ po::store(parsed, vm);
+ if (vm.count("help")) {
+ std::cout << desc << std::endl;
+ return 0;
+ }
+ po::notify(vm);
+ unrecognized_options = po::collect_unrecognized(parsed.options, po::include_positional);
+ } catch(const po::error& e) {
+ std::cerr << "error: " << e.what() << std::endl;
+ return 1;
+ }
+
+ entity_addr_t addr;
+ if (vm["v2"].as<bool>()) {
+ addr.set_type(entity_addr_t::TYPE_MSGR2);
+ } else {
+ addr.set_type(entity_addr_t::TYPE_LEGACY);
+ }
+ addr.set_family(AF_INET);
+ addr.set_port(vm["port"].as<std::uint16_t>());
+ addr.set_nonce(vm["nonce"].as<std::uint32_t>());
+
+ echo_role role = echo_role::as_server;
+ if (vm["role"].as<std::string>() == "ping") {
+ role = echo_role::as_client;
+ }
+
+ auto count = vm["count"].as<unsigned>();
+ std::vector<const char*> args(argv, argv + argc);
+ auto cct = global_init(nullptr, args,
+ CEPH_ENTITY_TYPE_CLIENT,
+ CODE_ENVIRONMENT_UTILITY,
+ CINIT_FLAG_NO_MON_CONFIG);
+ common_init_finish(cct.get());
+ ceph_echo(cct.get(), addr, role, count);
+}
diff --git a/src/test/crimson/test_backfill.cc b/src/test/crimson/test_backfill.cc
new file mode 100644
index 000000000..8f3bc0d9b
--- /dev/null
+++ b/src/test/crimson/test_backfill.cc
@@ -0,0 +1,500 @@
+#include <algorithm>
+#include <cstdlib>
+#include <deque>
+#include <functional>
+#include <initializer_list>
+#include <iostream>
+#include <iterator>
+#include <limits>
+#include <map>
+#include <set>
+#include <string>
+
+#include <boost/statechart/event_base.hpp>
+#include <gmock/gmock.h>
+#include <gtest/gtest.h>
+
+#include "common/hobject.h"
+#include "crimson/osd/backfill_state.h"
+#include "osd/recovery_types.h"
+
+
+// The sole purpose is to convert from the string representation.
+// An alternative approach could use boost::range in FakeStore's
+// constructor.
+struct improved_hobject_t : hobject_t {
+ improved_hobject_t(const char parsable_name[]) {
+ this->parse(parsable_name);
+ }
+ improved_hobject_t(const hobject_t& obj)
+ : hobject_t(obj) {
+ }
+ bool operator==(const improved_hobject_t& rhs) const {
+ return static_cast<const hobject_t&>(*this) == \
+ static_cast<const hobject_t&>(rhs);
+ }
+};
+
+
+struct FakeStore {
+ using objs_t = std::map<improved_hobject_t, eversion_t>;
+
+ objs_t objs;
+
+ void push(const hobject_t& obj, eversion_t version) {
+ objs[obj] = version;
+ }
+
+ void drop(const hobject_t& obj, const eversion_t version) {
+ auto it = objs.find(obj);
+ ceph_assert(it != std::end(objs));
+ ceph_assert(it->second == version);
+ objs.erase(it);
+ }
+
+ template <class Func>
+ hobject_t list(const hobject_t& start, Func&& per_entry) const {
+ auto it = objs.lower_bound(start);
+ for (auto max = std::numeric_limits<std::uint64_t>::max();
+ it != std::end(objs) && max > 0;
+ ++it, --max) {
+ per_entry(*it);
+ }
+ return it != std::end(objs) ? static_cast<const hobject_t&>(it->first)
+ : hobject_t::get_max();
+ }
+
+ bool operator==(const FakeStore& rhs) const {
+ return std::size(objs) == std::size(rhs.objs) && \
+ std::equal(std::begin(objs), std::end(objs), std::begin(rhs.objs));
+ }
+ bool operator!=(const FakeStore& rhs) const {
+ return !(*this == rhs);
+ }
+};
+
+
+struct FakeReplica {
+ FakeStore store;
+ hobject_t last_backfill;
+
+ FakeReplica(FakeStore&& store)
+ : store(std::move(store)) {
+ }
+};
+
+struct FakePrimary {
+ FakeStore store;
+ eversion_t last_update;
+ eversion_t projected_last_update;
+ eversion_t log_tail;
+
+ FakePrimary(FakeStore&& store)
+ : store(std::move(store)) {
+ }
+};
+
+class BackfillFixture : public crimson::osd::BackfillState::BackfillListener {
+ friend class BackfillFixtureBuilder;
+
+ FakePrimary backfill_source;
+ std::map<pg_shard_t, FakeReplica> backfill_targets;
+ std::map<pg_shard_t,
+ std::vector<std::pair<hobject_t, eversion_t>>> enqueued_drops;
+ std::deque<
+ boost::intrusive_ptr<
+ const boost::statechart::event_base>> events_to_dispatch;
+ crimson::osd::BackfillState backfill_state;
+
+ BackfillFixture(FakePrimary&& backfill_source,
+ std::map<pg_shard_t, FakeReplica>&& backfill_targets);
+
+ template <class EventT>
+ void schedule_event(const EventT& event) {
+ events_to_dispatch.emplace_back(event.intrusive_from_this());
+ }
+
+ // BackfillListener {
+ void request_replica_scan(
+ const pg_shard_t& target,
+ const hobject_t& begin,
+ const hobject_t& end) override;
+
+ void request_primary_scan(
+ const hobject_t& begin) override;
+
+ void enqueue_push(
+ const hobject_t& obj,
+ const eversion_t& v) override;
+
+ void enqueue_drop(
+ const pg_shard_t& target,
+ const hobject_t& obj,
+ const eversion_t& v) override;
+
+ void maybe_flush() override;
+
+ void update_peers_last_backfill(
+ const hobject_t& new_last_backfill) override;
+
+ bool budget_available() const override;
+
+public:
+ MOCK_METHOD(void, backfilled, (), (override));
+ // }
+
+ void next_round(std::size_t how_many=1) {
+ ceph_assert(events_to_dispatch.size() >= how_many);
+ while (how_many-- > 0) {
+ backfill_state.process_event(std::move(events_to_dispatch.front()));
+ events_to_dispatch.pop_front();
+ }
+ }
+
+ void next_till_done() {
+ while (!events_to_dispatch.empty()) {
+ next_round();
+ }
+ }
+
+ bool all_stores_look_like(const FakeStore& reference) const {
+ const bool all_replica_match = std::all_of(
+ std::begin(backfill_targets), std::end(backfill_targets),
+ [&reference] (const auto kv) {
+ return kv.second.store == reference;
+ });
+ return backfill_source.store == reference && all_replica_match;
+ }
+
+ struct PeeringFacade;
+ struct PGFacade;
+};
+
+struct BackfillFixture::PeeringFacade
+ : public crimson::osd::BackfillState::PeeringFacade {
+ FakePrimary& backfill_source;
+ std::map<pg_shard_t, FakeReplica>& backfill_targets;
+ // sorry, this is duplicative but that's the interface
+ std::set<pg_shard_t> backfill_targets_as_set;
+
+ PeeringFacade(FakePrimary& backfill_source,
+ std::map<pg_shard_t, FakeReplica>& backfill_targets)
+ : backfill_source(backfill_source),
+ backfill_targets(backfill_targets) {
+ std::transform(
+ std::begin(backfill_targets), std::end(backfill_targets),
+ std::inserter(backfill_targets_as_set, std::end(backfill_targets_as_set)),
+ [](auto pair) {
+ return pair.first;
+ });
+ }
+
+ hobject_t earliest_backfill() const override {
+ hobject_t e = hobject_t::get_max();
+ for (const auto& kv : backfill_targets) {
+ e = std::min(kv.second.last_backfill, e);
+ }
+ return e;
+ }
+ const std::set<pg_shard_t>& get_backfill_targets() const override {
+ return backfill_targets_as_set;
+ }
+ const hobject_t& get_peer_last_backfill(pg_shard_t peer) const override {
+ return backfill_targets.at(peer).last_backfill;
+ }
+ const eversion_t& get_last_update() const override {
+ return backfill_source.last_update;
+ }
+ const eversion_t& get_log_tail() const override {
+ return backfill_source.log_tail;
+ }
+
+ void scan_log_after(eversion_t, scan_log_func_t) const override {
+ /* NOP */
+ }
+
+ bool is_backfill_target(pg_shard_t peer) const override {
+ return backfill_targets.count(peer) == 1;
+ }
+ void update_complete_backfill_object_stats(const hobject_t &hoid,
+ const pg_stat_t &stats) override {
+ }
+ bool is_backfilling() const override {
+ return true;
+ }
+};
+
+struct BackfillFixture::PGFacade : public crimson::osd::BackfillState::PGFacade {
+ FakePrimary& backfill_source;
+
+ PGFacade(FakePrimary& backfill_source)
+ : backfill_source(backfill_source) {
+ }
+
+ const eversion_t& get_projected_last_update() const override {
+ return backfill_source.projected_last_update;
+ }
+};
+
+BackfillFixture::BackfillFixture(
+ FakePrimary&& backfill_source,
+ std::map<pg_shard_t, FakeReplica>&& backfill_targets)
+ : backfill_source(std::move(backfill_source)),
+ backfill_targets(std::move(backfill_targets)),
+ backfill_state(*this,
+ std::make_unique<PeeringFacade>(this->backfill_source,
+ this->backfill_targets),
+ std::make_unique<PGFacade>(this->backfill_source))
+{
+ backfill_state.process_event(crimson::osd::BackfillState::Triggered{}.intrusive_from_this());
+}
+
+void BackfillFixture::request_replica_scan(
+ const pg_shard_t& target,
+ const hobject_t& begin,
+ const hobject_t& end)
+{
+ BackfillInterval bi;
+ bi.end = backfill_targets.at(target).store.list(begin, [&bi](auto kv) {
+ bi.objects.insert(std::move(kv));
+ });
+ bi.begin = begin;
+ bi.version = backfill_source.last_update;
+
+ schedule_event(crimson::osd::BackfillState::ReplicaScanned{ target, std::move(bi) });
+}
+
+void BackfillFixture::request_primary_scan(
+ const hobject_t& begin)
+{
+ BackfillInterval bi;
+ bi.end = backfill_source.store.list(begin, [&bi](auto kv) {
+ bi.objects.insert(std::move(kv));
+ });
+ bi.begin = begin;
+ bi.version = backfill_source.last_update;
+
+ schedule_event(crimson::osd::BackfillState::PrimaryScanned{ std::move(bi) });
+}
+
+void BackfillFixture::enqueue_push(
+ const hobject_t& obj,
+ const eversion_t& v)
+{
+ for (auto& [ _, bt ] : backfill_targets) {
+ bt.store.push(obj, v);
+ }
+ schedule_event(crimson::osd::BackfillState::ObjectPushed{ obj });
+}
+
+void BackfillFixture::enqueue_drop(
+ const pg_shard_t& target,
+ const hobject_t& obj,
+ const eversion_t& v)
+{
+ enqueued_drops[target].emplace_back(obj, v);
+}
+
+void BackfillFixture::maybe_flush()
+{
+ for (const auto& [target, versioned_objs] : enqueued_drops) {
+ for (const auto& [obj, v] : versioned_objs) {
+ backfill_targets.at(target).store.drop(obj, v);
+ }
+ }
+ enqueued_drops.clear();
+}
+
+void BackfillFixture::update_peers_last_backfill(
+ const hobject_t& new_last_backfill)
+{
+}
+
+bool BackfillFixture::budget_available() const
+{
+ return true;
+}
+
+struct BackfillFixtureBuilder {
+ FakeStore backfill_source;
+ std::map<pg_shard_t, FakeReplica> backfill_targets;
+
+ static BackfillFixtureBuilder add_source(FakeStore::objs_t objs) {
+ BackfillFixtureBuilder bfb;
+ bfb.backfill_source = FakeStore{ std::move(objs) };
+ return bfb;
+ }
+
+ BackfillFixtureBuilder&& add_target(FakeStore::objs_t objs) && {
+ const auto new_osd_num = std::size(backfill_targets);
+ const auto [ _, inserted ] = backfill_targets.emplace(
+ new_osd_num, FakeReplica{ FakeStore{std::move(objs)} });
+ ceph_assert(inserted);
+ return std::move(*this);
+ }
+
+ BackfillFixture get_result() && {
+ return BackfillFixture{ std::move(backfill_source),
+ std::move(backfill_targets) };
+ }
+};
+
+// The straightest case: single primary, single replica. All have the same
+// content in their object stores, so the entire backfill boils into just
+// `request_primary_scan()` and `request_replica_scan()`.
+TEST(backfill, same_primary_same_replica)
+{
+ const auto reference_store = FakeStore{ {
+ { "1:00058bcc:::rbd_data.1018ac3e755.00000000000000d5:head", {10, 234} },
+ { "1:00ed7f8e:::rbd_data.1018ac3e755.00000000000000af:head", {10, 196} },
+ { "1:01483aea:::rbd_data.1018ac3e755.0000000000000095:head", {10, 169} },
+ }};
+ auto cluster_fixture = BackfillFixtureBuilder::add_source(
+ reference_store.objs
+ ).add_target(
+ reference_store.objs
+ ).get_result();
+
+ cluster_fixture.next_round();
+ EXPECT_CALL(cluster_fixture, backfilled);
+ cluster_fixture.next_round();
+ EXPECT_TRUE(cluster_fixture.all_stores_look_like(reference_store));
+}
+
+TEST(backfill, one_empty_replica)
+{
+ const auto reference_store = FakeStore{ {
+ { "1:00058bcc:::rbd_data.1018ac3e755.00000000000000d5:head", {10, 234} },
+ { "1:00ed7f8e:::rbd_data.1018ac3e755.00000000000000af:head", {10, 196} },
+ { "1:01483aea:::rbd_data.1018ac3e755.0000000000000095:head", {10, 169} },
+ }};
+ auto cluster_fixture = BackfillFixtureBuilder::add_source(
+ reference_store.objs
+ ).add_target(
+ { /* nothing */ }
+ ).get_result();
+
+ cluster_fixture.next_round();
+ cluster_fixture.next_round();
+ cluster_fixture.next_round(2);
+ EXPECT_CALL(cluster_fixture, backfilled);
+ cluster_fixture.next_round();
+ EXPECT_TRUE(cluster_fixture.all_stores_look_like(reference_store));
+}
+
+TEST(backfill, two_empty_replicas)
+{
+ const auto reference_store = FakeStore{ {
+ { "1:00058bcc:::rbd_data.1018ac3e755.00000000000000d5:head", {10, 234} },
+ { "1:00ed7f8e:::rbd_data.1018ac3e755.00000000000000af:head", {10, 196} },
+ { "1:01483aea:::rbd_data.1018ac3e755.0000000000000095:head", {10, 169} },
+ }};
+ auto cluster_fixture = BackfillFixtureBuilder::add_source(
+ reference_store.objs
+ ).add_target(
+ { /* nothing 1 */ }
+ ).add_target(
+ { /* nothing 2 */ }
+ ).get_result();
+
+ EXPECT_CALL(cluster_fixture, backfilled);
+ cluster_fixture.next_till_done();
+
+ EXPECT_TRUE(cluster_fixture.all_stores_look_like(reference_store));
+}
+
+namespace StoreRandomizer {
+ // FIXME: copied & pasted from test/test_snap_mapper.cc. We need to
+ // find a way to avoid code duplication in test. A static library?
+ std::string random_string(std::size_t size) {
+ std::string name;
+ for (size_t j = 0; j < size; ++j) {
+ name.push_back('a' + (std::rand() % 26));
+ }
+ return name;
+ }
+
+ hobject_t random_hobject() {
+ uint32_t mask{0};
+ uint32_t bits{0};
+ return hobject_t(
+ random_string(1+(std::rand() % 16)),
+ random_string(1+(std::rand() % 16)),
+ snapid_t(std::rand() % 1000),
+ (std::rand() & ((~0)<<bits)) | (mask & ~((~0)<<bits)),
+ 0, random_string(std::rand() % 16));
+ }
+
+ eversion_t random_eversion() {
+ return eversion_t{ std::rand() % 512U, std::rand() % 256UL };
+ }
+
+ FakeStore create() {
+ FakeStore store;
+ for (std::size_t i = std::rand() % 2048; i > 0; --i) {
+ store.push(random_hobject(), random_eversion());
+ }
+ return store;
+ }
+
+ template <class... Args>
+ void execute_random(Args&&... args) {
+ std::array<std::function<void()>, sizeof...(Args)> funcs = {
+ std::forward<Args>(args)...
+ };
+ return std::move(funcs[std::rand() % std::size(funcs)])();
+ }
+
+ FakeStore mutate(const FakeStore& source_store) {
+ FakeStore mutated_store;
+ source_store.list(hobject_t{}, [&] (const auto& kv) {
+ const auto& [ oid, version ] = kv;
+ execute_random(
+ [] { /* just drop the entry */ },
+ [&] { mutated_store.push(oid, version); },
+ [&] { mutated_store.push(oid, random_eversion()); },
+ [&] { mutated_store.push(random_hobject(), version); },
+ [&] {
+ for (auto how_many = std::rand() % 8; how_many > 0; --how_many) {
+ mutated_store.push(random_hobject(), random_eversion());
+ }
+ }
+ );
+ });
+ return mutated_store;
+ }
+}
+
+// The name might suggest randomness is involved here. Well, that's true
+// but till we know the seed the test still is repeatable.
+TEST(backfill, one_pseudorandomized_replica)
+{
+ const auto reference_store = StoreRandomizer::create();
+ auto cluster_fixture = BackfillFixtureBuilder::add_source(
+ reference_store.objs
+ ).add_target(
+ StoreRandomizer::mutate(reference_store).objs
+ ).get_result();
+
+ EXPECT_CALL(cluster_fixture, backfilled);
+ cluster_fixture.next_till_done();
+
+ EXPECT_TRUE(cluster_fixture.all_stores_look_like(reference_store));
+}
+
+TEST(backfill, two_pseudorandomized_replicas)
+{
+ const auto reference_store = StoreRandomizer::create();
+ auto cluster_fixture = BackfillFixtureBuilder::add_source(
+ reference_store.objs
+ ).add_target(
+ StoreRandomizer::mutate(reference_store).objs
+ ).add_target(
+ StoreRandomizer::mutate(reference_store).objs
+ ).get_result();
+
+ EXPECT_CALL(cluster_fixture, backfilled);
+ cluster_fixture.next_till_done();
+
+ EXPECT_TRUE(cluster_fixture.all_stores_look_like(reference_store));
+}
diff --git a/src/test/crimson/test_buffer.cc b/src/test/crimson/test_buffer.cc
new file mode 100644
index 000000000..64a815bd2
--- /dev/null
+++ b/src/test/crimson/test_buffer.cc
@@ -0,0 +1,50 @@
+#include <iostream>
+#include <seastar/core/app-template.hh>
+#include <seastar/core/future-util.hh>
+#include <seastar/core/reactor.hh>
+#include "include/buffer.h"
+
+// allocate a foreign buffer on each cpu, collect them all into a bufferlist,
+// and destruct it on this cpu
+seastar::future<> test_foreign_bufferlist()
+{
+ auto make_foreign_buffer = [] (unsigned cpu) {
+ return seastar::smp::submit_to(cpu, [=] {
+ bufferlist bl;
+ seastar::temporary_buffer<char> buf("abcd", 4);
+ bl.append(buffer::create(std::move(buf)));
+ return bl;
+ });
+ };
+ auto reduce = [] (bufferlist&& lhs, bufferlist&& rhs) {
+ bufferlist bl;
+ bl.claim_append(lhs);
+ bl.claim_append(rhs);
+ return bl;
+ };
+ return seastar::map_reduce(seastar::smp::all_cpus(), make_foreign_buffer,
+ bufferlist(), reduce).then(
+ [] (bufferlist&& bl) {
+ if (bl.length() != 4 * seastar::smp::count) {
+ auto e = std::make_exception_ptr(std::runtime_error("wrong buffer size"));
+ return seastar::make_exception_future<>(e);
+ }
+ bl.clear();
+ return seastar::make_ready_future<>();
+ });
+}
+
+int main(int argc, char** argv)
+{
+ seastar::app_template app;
+ return app.run(argc, argv, [] {
+ return seastar::now().then(
+ &test_foreign_bufferlist
+ ).then([] {
+ std::cout << "All tests succeeded" << std::endl;
+ }).handle_exception([] (auto eptr) {
+ std::cout << "Test failure" << std::endl;
+ return seastar::make_exception_future<>(eptr);
+ });
+ });
+}
diff --git a/src/test/crimson/test_cmds.h b/src/test/crimson/test_cmds.h
new file mode 100644
index 000000000..2320d30a0
--- /dev/null
+++ b/src/test/crimson/test_cmds.h
@@ -0,0 +1,76 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#pragma once
+
+namespace ceph::net::test {
+
+enum class cmd_t : char {
+ none = '\0',
+ shutdown,
+ suite_start,
+ suite_stop,
+ suite_connect_me,
+ suite_send_me,
+ suite_keepalive_me,
+ suite_markdown,
+ suite_recv_op
+};
+
+enum class policy_t : char {
+ none = '\0',
+ stateful_server,
+ stateless_server,
+ lossless_peer,
+ lossless_peer_reuse,
+ lossy_client,
+ lossless_client
+};
+
+inline std::ostream& operator<<(std::ostream& out, const cmd_t& cmd) {
+ switch(cmd) {
+ case cmd_t::none:
+ return out << "none";
+ case cmd_t::shutdown:
+ return out << "shutdown";
+ case cmd_t::suite_start:
+ return out << "suite_start";
+ case cmd_t::suite_stop:
+ return out << "suite_stop";
+ case cmd_t::suite_connect_me:
+ return out << "suite_connect_me";
+ case cmd_t::suite_send_me:
+ return out << "suite_send_me";
+ case cmd_t::suite_keepalive_me:
+ return out << "suite_keepalive_me";
+ case cmd_t::suite_markdown:
+ return out << "suite_markdown";
+ case cmd_t::suite_recv_op:
+ return out << "suite_recv_op";
+ default:
+ ceph_abort();
+ }
+}
+
+inline std::ostream& operator<<(std::ostream& out, const policy_t& policy) {
+ switch(policy) {
+ case policy_t::none:
+ return out << "none";
+ case policy_t::stateful_server:
+ return out << "stateful_server";
+ case policy_t::stateless_server:
+ return out << "stateless_server";
+ case policy_t::lossless_peer:
+ return out << "lossless_peer";
+ case policy_t::lossless_peer_reuse:
+ return out << "lossless_peer_reuse";
+ case policy_t::lossy_client:
+ return out << "lossy_client";
+ case policy_t::lossless_client:
+ return out << "lossless_client";
+ default:
+ ceph_abort();
+ }
+}
+
+} // namespace ceph::net::test
diff --git a/src/test/crimson/test_config.cc b/src/test/crimson/test_config.cc
new file mode 100644
index 000000000..608aa2694
--- /dev/null
+++ b/src/test/crimson/test_config.cc
@@ -0,0 +1,108 @@
+#include <chrono>
+#include <string>
+#include <numeric>
+#include <seastar/core/app-template.hh>
+#include <seastar/core/sharded.hh>
+#include "common/ceph_argparse.h"
+#include "common/config_obs.h"
+#include "crimson/common/config_proxy.h"
+
+using Config = crimson::common::ConfigProxy;
+const std::string test_uint_option = "osd_max_pgls";
+const uint64_t INVALID_VALUE = (uint64_t)(-1);
+const uint64_t EXPECTED_VALUE = 42;
+
+class ConfigObs : public ceph::md_config_obs_impl<Config> {
+ uint64_t last_change = INVALID_VALUE;
+ uint64_t num_changes = 0;
+
+ const char** get_tracked_conf_keys() const override {
+ static const char* keys[] = {
+ test_uint_option.c_str(),
+ nullptr,
+ };
+ return keys;
+ }
+ void handle_conf_change(const Config& conf,
+ const std::set <std::string> &changes) override{
+ if (changes.count(test_uint_option)) {
+ last_change = conf.get_val<uint64_t>(test_uint_option);
+ num_changes += 1;
+ }
+ }
+public:
+ ConfigObs() {
+ crimson::common::local_conf().add_observer(this);
+ }
+
+ uint64_t get_last_change() const { return last_change; }
+ uint64_t get_num_changes() const { return num_changes; }
+ seastar::future<> stop() {
+ crimson::common::local_conf().remove_observer(this);
+ return seastar::make_ready_future<>();
+ }
+};
+
+seastar::sharded<ConfigObs> sharded_cobs;
+
+static seastar::future<> test_config()
+{
+ return crimson::common::sharded_conf().start(EntityName{}, string_view{"ceph"}).then([] {
+ std::vector<const char*> args;
+ std::string cluster;
+ std::string conf_file_list;
+ auto init_params = ceph_argparse_early_args(args,
+ CEPH_ENTITY_TYPE_CLIENT,
+ &cluster,
+ &conf_file_list);
+ auto& conf = crimson::common::local_conf();
+ conf->name = init_params.name;
+ conf->cluster = cluster;
+ return conf.parse_config_files(conf_file_list);
+ }).then([] {
+ return crimson::common::sharded_conf().invoke_on(0, &Config::start);
+ }).then([] {
+ return sharded_cobs.start();
+ }).then([] {
+ auto& conf = crimson::common::local_conf();
+ return conf.set_val(test_uint_option, std::to_string(EXPECTED_VALUE));
+ }).then([] {
+ return crimson::common::sharded_conf().invoke_on_all([](Config& config) {
+ if (config.get_val<uint64_t>(test_uint_option) != EXPECTED_VALUE) {
+ throw std::runtime_error("configurations don't match");
+ }
+ if (sharded_cobs.local().get_last_change() != EXPECTED_VALUE) {
+ throw std::runtime_error("last applied changes don't match the latest config");
+ }
+ if (sharded_cobs.local().get_num_changes() != 1) {
+ throw std::runtime_error("num changes don't match actual changes");
+ }
+ });
+ }).finally([] {
+ return sharded_cobs.stop();
+ }).finally([] {
+ return crimson::common::sharded_conf().stop();
+ });
+}
+
+int main(int argc, char** argv)
+{
+ seastar::app_template app;
+ return app.run(argc, argv, [&] {
+ return test_config().then([] {
+ std::cout << "All tests succeeded" << std::endl;
+ }).handle_exception([] (auto eptr) {
+ std::cout << "Test failure" << std::endl;
+ return seastar::make_exception_future<>(eptr);
+ });
+ });
+}
+
+
+/*
+ * Local Variables:
+ * compile-command: "make -j4 \
+ * -C ../../../build \
+ * unittest_seastar_config"
+ * End:
+ */
diff --git a/src/test/crimson/test_denc.cc b/src/test/crimson/test_denc.cc
new file mode 100644
index 000000000..10ebd6dce
--- /dev/null
+++ b/src/test/crimson/test_denc.cc
@@ -0,0 +1,53 @@
+#include <string>
+#include <seastar/core/temporary_buffer.hh>
+#include <gtest/gtest.h>
+#include "include/denc.h"
+#include "common/buffer_seastar.h"
+
+using temporary_buffer = seastar::temporary_buffer<char>;
+using buffer_iterator = seastar_buffer_iterator;
+using const_buffer_iterator = const_seastar_buffer_iterator;
+
+template<typename T>
+void test_denc(T v) {
+ // estimate
+ size_t s = 0;
+ denc(v, s);
+ ASSERT_NE(s, 0u);
+
+ // encode
+ temporary_buffer buf{s};
+ buffer_iterator enc{buf};
+ denc(v, enc);
+ size_t len = enc.get() - buf.begin();
+ ASSERT_LE(len, s);
+
+ // decode
+ T out;
+ temporary_buffer encoded = buf.share();
+ encoded.trim(len);
+ const_buffer_iterator dec{encoded};
+ denc(out, dec);
+ ASSERT_EQ(v, out);
+ ASSERT_EQ(dec.get(), enc.get());
+}
+
+TEST(denc, simple)
+{
+ test_denc((uint8_t)4);
+ test_denc((int8_t)-5);
+ test_denc((uint16_t)6);
+ test_denc((int16_t)-7);
+ test_denc((uint32_t)8);
+ test_denc((int32_t)-9);
+ test_denc((uint64_t)10);
+ test_denc((int64_t)-11);
+}
+
+TEST(denc, string)
+{
+ std::string a, b("hi"), c("multi\nline\n");
+ test_denc(a);
+ test_denc(b);
+ test_denc(c);
+}
diff --git a/src/test/crimson/test_errorator.cc b/src/test/crimson/test_errorator.cc
new file mode 100644
index 000000000..57dbc78cf
--- /dev/null
+++ b/src/test/crimson/test_errorator.cc
@@ -0,0 +1,52 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
+// vim: ts=8 sw=2 smarttab
+
+#include "test/crimson/gtest_seastar.h"
+
+#include "crimson/common/errorator.h"
+#include "crimson/common/log.h"
+
+struct errorator_test_t : public seastar_test_suite_t {
+ using ertr = crimson::errorator<crimson::ct_error::invarg>;
+ ertr::future<> test_do_until() {
+ return crimson::do_until([i=0]() mutable {
+ if (i < 5) {
+ ++i;
+ return ertr::make_ready_future<bool>(false);
+ } else {
+ return ertr::make_ready_future<bool>(true);
+ }
+ });
+ }
+ struct noncopyable_t {
+ constexpr noncopyable_t() = default;
+ ~noncopyable_t() = default;
+ noncopyable_t(noncopyable_t&&) = default;
+ private:
+ noncopyable_t(const noncopyable_t&) = delete;
+ noncopyable_t& operator=(const noncopyable_t&) = delete;
+ };
+ ertr::future<> test_non_copy_then() {
+ return create_noncopyable().safe_then([](auto t) {
+ return ertr::now();
+ });
+ }
+private:
+ ertr::future<noncopyable_t> create_noncopyable() {
+ return ertr::make_ready_future<noncopyable_t>();
+ }
+};
+
+TEST_F(errorator_test_t, basic)
+{
+ run_async([this] {
+ test_do_until().unsafe_get0();
+ });
+}
+
+TEST_F(errorator_test_t, non_copy_then)
+{
+ run_async([this] {
+ test_non_copy_then().unsafe_get0();
+ });
+}
diff --git a/src/test/crimson/test_fixed_kv_node_layout.cc b/src/test/crimson/test_fixed_kv_node_layout.cc
new file mode 100644
index 000000000..5f59c4e5e
--- /dev/null
+++ b/src/test/crimson/test_fixed_kv_node_layout.cc
@@ -0,0 +1,376 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#include <stdio.h>
+#include <iostream>
+
+#include "gtest/gtest.h"
+
+#include "crimson/common/fixed_kv_node_layout.h"
+
+using namespace crimson;
+using namespace crimson::common;
+
+struct test_val_t {
+ uint32_t t1 = 0;
+ int32_t t2 = 0;
+
+ bool operator==(const test_val_t &rhs) const {
+ return rhs.t1 == t1 && rhs.t2 == t2;
+ }
+ bool operator!=(const test_val_t &rhs) const {
+ return !(*this == rhs);
+ }
+};
+
+struct test_val_le_t {
+ ceph_le32 t1 = init_le32(0);
+ ceph_les32 t2 = init_les32(0);
+
+ test_val_le_t() = default;
+ test_val_le_t(const test_val_le_t &) = default;
+ test_val_le_t(const test_val_t &nv)
+ : t1(init_le32(nv.t1)), t2(init_les32(nv.t2)) {}
+
+ operator test_val_t() const {
+ return test_val_t{t1, t2};
+ }
+};
+
+struct test_meta_t {
+ uint32_t t1 = 0;
+ uint32_t t2 = 0;
+
+ bool operator==(const test_meta_t &rhs) const {
+ return rhs.t1 == t1 && rhs.t2 == t2;
+ }
+ bool operator!=(const test_meta_t &rhs) const {
+ return !(*this == rhs);
+ }
+
+ std::pair<test_meta_t, test_meta_t> split_into(uint32_t pivot) const {
+ return std::make_pair(
+ test_meta_t{t1, pivot},
+ test_meta_t{pivot, t2});
+ }
+
+ static test_meta_t merge_from(const test_meta_t &lhs, const test_meta_t &rhs) {
+ return test_meta_t{lhs.t1, rhs.t2};
+ }
+
+ static std::pair<test_meta_t, test_meta_t>
+ rebalance(const test_meta_t &lhs, const test_meta_t &rhs, uint32_t pivot) {
+ return std::make_pair(
+ test_meta_t{lhs.t1, pivot},
+ test_meta_t{pivot, rhs.t2});
+ }
+};
+
+struct test_meta_le_t {
+ ceph_le32 t1 = init_le32(0);
+ ceph_le32 t2 = init_le32(0);
+
+ test_meta_le_t() = default;
+ test_meta_le_t(const test_meta_le_t &) = default;
+ test_meta_le_t(const test_meta_t &nv)
+ : t1(init_le32(nv.t1)), t2(init_le32(nv.t2)) {}
+
+ operator test_meta_t() const {
+ return test_meta_t{t1, t2};
+ }
+};
+
+constexpr size_t CAPACITY = 339;
+
+struct TestNode : FixedKVNodeLayout<
+ CAPACITY,
+ test_meta_t, test_meta_le_t,
+ uint32_t, ceph_le32,
+ test_val_t, test_val_le_t> {
+ char buf[4096];
+ TestNode() : FixedKVNodeLayout(buf) {
+ memset(buf, 0, sizeof(buf));
+ set_meta({0, std::numeric_limits<uint32_t>::max()});
+ }
+ TestNode(const TestNode &rhs)
+ : FixedKVNodeLayout(buf) {
+ ::memcpy(buf, rhs.buf, sizeof(buf));
+ }
+
+ TestNode &operator=(const TestNode &rhs) {
+ memcpy(buf, rhs.buf, sizeof(buf));
+ return *this;
+ }
+};
+
+TEST(FixedKVNodeTest, basic) {
+ auto node = TestNode();
+ ASSERT_EQ(node.get_size(), 0);
+
+ auto val = test_val_t{ 1, 1 };
+ node.journal_insert(node.begin(), 1, val, nullptr);
+ ASSERT_EQ(node.get_size(), 1);
+
+ auto iter = node.begin();
+ ASSERT_EQ(iter.get_key(), 1);
+ ASSERT_EQ(val, iter.get_val());
+
+ ASSERT_EQ(std::numeric_limits<uint32_t>::max(), iter.get_next_key_or_max());
+}
+
+TEST(FixedKVNodeTest, at_capacity) {
+ auto node = TestNode();
+ ASSERT_EQ(CAPACITY, node.get_capacity());
+
+ ASSERT_EQ(node.get_size(), 0);
+
+ unsigned short num = 0;
+ auto iter = node.begin();
+ while (num < CAPACITY) {
+ node.journal_insert(iter, num, test_val_t{num, num}, nullptr);
+ ++num;
+ ++iter;
+ }
+ ASSERT_EQ(node.get_size(), CAPACITY);
+
+ num = 0;
+ for (auto &i : node) {
+ ASSERT_EQ(i.get_key(), num);
+ ASSERT_EQ(i.get_val(), (test_val_t{num, num}));
+ if (num < (CAPACITY - 1)) {
+ ASSERT_EQ(i.get_next_key_or_max(), num + 1);
+ } else {
+ ASSERT_EQ(std::numeric_limits<uint32_t>::max(), i.get_next_key_or_max());
+ }
+ ++num;
+ }
+}
+
+TEST(FixedKVNodeTest, split) {
+ auto node = TestNode();
+
+ ASSERT_EQ(node.get_size(), 0);
+
+ unsigned short num = 0;
+ auto iter = node.begin();
+ while (num < CAPACITY) {
+ node.journal_insert(iter, num, test_val_t{num, num}, nullptr);
+ ++num;
+ ++iter;
+ }
+ ASSERT_EQ(node.get_size(), CAPACITY);
+
+ auto split_left = TestNode();
+ auto split_right = TestNode();
+ node.split_into(split_left, split_right);
+
+ ASSERT_EQ(split_left.get_size() + split_right.get_size(), CAPACITY);
+ ASSERT_EQ(split_left.get_meta().t1, split_left.begin()->get_key());
+ ASSERT_EQ(split_left.get_meta().t2, split_right.get_meta().t1);
+ ASSERT_EQ(split_right.get_meta().t2, std::numeric_limits<uint32_t>::max());
+
+ num = 0;
+ for (auto &i : split_left) {
+ ASSERT_EQ(i.get_key(), num);
+ ASSERT_EQ(i.get_val(), (test_val_t{num, num}));
+ if (num < split_left.get_size() - 1) {
+ ASSERT_EQ(i.get_next_key_or_max(), num + 1);
+ } else {
+ ASSERT_EQ(std::numeric_limits<uint32_t>::max(), i.get_next_key_or_max());
+ }
+ ++num;
+ }
+ for (auto &i : split_right) {
+ ASSERT_EQ(i.get_key(), num);
+ ASSERT_EQ(i.get_val(), (test_val_t{num, num}));
+ if (num < CAPACITY - 1) {
+ ASSERT_EQ(i.get_next_key_or_max(), num + 1);
+ } else {
+ ASSERT_EQ(std::numeric_limits<uint32_t>::max(), i.get_next_key_or_max());
+ }
+ ++num;
+ }
+ ASSERT_EQ(num, CAPACITY);
+}
+
+TEST(FixedKVNodeTest, merge) {
+ auto node = TestNode();
+ auto node2 = TestNode();
+
+ ASSERT_EQ(node.get_size(), 0);
+ ASSERT_EQ(node2.get_size(), 0);
+
+ unsigned short num = 0;
+ auto iter = node.begin();
+ while (num < CAPACITY/2) {
+ node.journal_insert(iter, num, test_val_t{num, num}, nullptr);
+ ++num;
+ ++iter;
+ }
+ node.set_meta({0, num});
+ node2.set_meta({num, std::numeric_limits<uint32_t>::max()});
+ iter = node2.begin();
+ while (num < (2 * (CAPACITY / 2))) {
+ node2.journal_insert(iter, num, test_val_t{num, num}, nullptr);
+ ++num;
+ ++iter;
+ }
+
+ ASSERT_EQ(node.get_size(), CAPACITY / 2);
+ ASSERT_EQ(node2.get_size(), CAPACITY / 2);
+
+ auto total = node.get_size() + node2.get_size();
+
+ auto node_merged = TestNode();
+ node_merged.merge_from(node, node2);
+
+ ASSERT_EQ(
+ node_merged.get_meta(),
+ (test_meta_t{0, std::numeric_limits<uint32_t>::max()}));
+
+ ASSERT_EQ(node_merged.get_size(), total);
+ num = 0;
+ for (auto &i : node_merged) {
+ ASSERT_EQ(i.get_key(), num);
+ ASSERT_EQ(i.get_val(), (test_val_t{num, num}));
+ if (num < node_merged.get_size() - 1) {
+ ASSERT_EQ(i.get_next_key_or_max(), num + 1);
+ } else {
+ ASSERT_EQ(std::numeric_limits<uint32_t>::max(), i.get_next_key_or_max());
+ }
+ ++num;
+ }
+ ASSERT_EQ(num, total);
+}
+
+void run_balance_test(unsigned left, unsigned right, bool prefer_left)
+{
+ auto node = TestNode();
+ auto node2 = TestNode();
+
+ ASSERT_EQ(node.get_size(), 0);
+ ASSERT_EQ(node2.get_size(), 0);
+
+ unsigned short num = 0;
+ auto iter = node.begin();
+ while (num < left) {
+ node.journal_insert(iter, num, test_val_t{num, num}, nullptr);
+ ++num;
+ ++iter;
+ }
+ node.set_meta({0, num});
+ node2.set_meta({num, std::numeric_limits<uint32_t>::max()});
+ iter = node2.begin();
+ while (num < (left + right)) {
+ node2.journal_insert(iter, num, test_val_t{num, num}, nullptr);
+ ++num;
+ ++iter;
+ }
+
+ ASSERT_EQ(node.get_size(), left);
+ ASSERT_EQ(node2.get_size(), right);
+
+ auto total = node.get_size() + node2.get_size();
+
+ auto node_balanced = TestNode();
+ auto node_balanced2 = TestNode();
+ auto pivot = TestNode::balance_into_new_nodes(
+ node,
+ node2,
+ prefer_left,
+ node_balanced,
+ node_balanced2);
+
+ ASSERT_EQ(total, node_balanced.get_size() + node_balanced2.get_size());
+
+ unsigned left_size, right_size;
+ if (total % 2) {
+ if (prefer_left) {
+ left_size = (total/2) + 1;
+ right_size = total/2;
+ } else {
+ left_size = total/2;
+ right_size = (total/2) + 1;
+ }
+ } else {
+ left_size = right_size = total/2;
+ }
+ ASSERT_EQ(pivot, left_size);
+ ASSERT_EQ(left_size, node_balanced.get_size());
+ ASSERT_EQ(right_size, node_balanced2.get_size());
+
+ ASSERT_EQ(
+ node_balanced.get_meta(),
+ (test_meta_t{0, left_size}));
+ ASSERT_EQ(
+ node_balanced2.get_meta(),
+ (test_meta_t{left_size, std::numeric_limits<uint32_t>::max()}));
+
+ num = 0;
+ for (auto &i: node_balanced) {
+ ASSERT_EQ(i.get_key(), num);
+ ASSERT_EQ(i.get_val(), (test_val_t{num, num}));
+ if (num < node_balanced.get_size() - 1) {
+ ASSERT_EQ(i.get_next_key_or_max(), num + 1);
+ } else {
+ ASSERT_EQ(std::numeric_limits<uint32_t>::max(), i.get_next_key_or_max());
+ }
+ ++num;
+ }
+ for (auto &i: node_balanced2) {
+ ASSERT_EQ(i.get_key(), num);
+ ASSERT_EQ(i.get_val(), (test_val_t{num, num}));
+ if (num < total - 1) {
+ ASSERT_EQ(i.get_next_key_or_max(), num + 1);
+ } else {
+ ASSERT_EQ(std::numeric_limits<uint32_t>::max(), i.get_next_key_or_max());
+ }
+ ++num;
+ }
+}
+
+TEST(FixedKVNodeTest, balanced) {
+ run_balance_test(CAPACITY / 2, CAPACITY, true);
+ run_balance_test(CAPACITY / 2, CAPACITY, false);
+ run_balance_test(CAPACITY, CAPACITY / 2, true);
+ run_balance_test(CAPACITY, CAPACITY / 2, false);
+ run_balance_test(CAPACITY - 1, CAPACITY / 2, true);
+ run_balance_test(CAPACITY / 2, CAPACITY - 1, false);
+ run_balance_test(CAPACITY / 2, CAPACITY / 2, false);
+}
+
+void run_replay_test(
+ std::vector<std::function<void(TestNode&, TestNode::delta_buffer_t&)>> &&f
+) {
+ TestNode node;
+ for (unsigned i = 0; i < f.size(); ++i) {
+ TestNode::delta_buffer_t buf;
+ TestNode replayed = node;
+ f[i](node, buf);
+ buf.replay(replayed);
+ ASSERT_EQ(node.get_size(), replayed.get_size());
+ ASSERT_EQ(node, replayed);
+ }
+}
+
+TEST(FixedKVNodeTest, replay) {
+ run_replay_test({
+ [](auto &n, auto &b) {
+ n.journal_insert(n.lower_bound(1), 1, test_val_t{1, 1}, &b);
+ ASSERT_EQ(1, n.get_size());
+ },
+ [](auto &n, auto &b) {
+ n.journal_insert(n.lower_bound(3), 3, test_val_t{1, 2}, &b);
+ ASSERT_EQ(2, n.get_size());
+ },
+ [](auto &n, auto &b) {
+ n.journal_remove(n.find(3), &b);
+ ASSERT_EQ(1, n.get_size());
+ },
+ [](auto &n, auto &b) {
+ n.journal_insert(n.lower_bound(2), 2, test_val_t{5, 1}, &b);
+ ASSERT_EQ(2, n.get_size());
+ }
+ });
+
+}
diff --git a/src/test/crimson/test_lru.cc b/src/test/crimson/test_lru.cc
new file mode 100644
index 000000000..40ab41539
--- /dev/null
+++ b/src/test/crimson/test_lru.cc
@@ -0,0 +1,213 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+/*
+ * Ceph - scalable distributed file system
+ *
+ * Copyright (C) 2013 Cloudwatt <libre.licensing@cloudwatt.com>
+ *
+ * Author: Loic Dachary <loic@dachary.org>
+ * Cheng Cheng <ccheng.leo@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Library Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Library Public License for more details.
+ *
+ */
+
+#include <stdio.h>
+#include "gtest/gtest.h"
+#include "crimson/common/shared_lru.h"
+
+class LRUTest : public SharedLRU<unsigned int, int> {
+public:
+ auto add(unsigned int key, int value, bool* existed = nullptr) {
+ auto pv = new int{value};
+ auto ptr = insert(key, std::unique_ptr<int>{pv});
+ if (existed) {
+ *existed = (ptr.get() != pv);
+ }
+ return ptr;
+ }
+};
+
+TEST(LRU, add) {
+ LRUTest cache;
+ unsigned int key = 1;
+ int value1 = 2;
+ bool existed = false;
+ {
+ auto ptr = cache.add(key, value1, &existed);
+ ASSERT_TRUE(ptr);
+ ASSERT_TRUE(ptr.get());
+ ASSERT_EQ(value1, *ptr);
+ ASSERT_FALSE(existed);
+ }
+ {
+ auto ptr = cache.add(key, 3, &existed);
+ ASSERT_EQ(value1, *ptr);
+ ASSERT_TRUE(existed);
+ }
+}
+
+TEST(LRU, empty) {
+ LRUTest cache;
+ unsigned int key = 1;
+ bool existed = false;
+
+ ASSERT_TRUE(cache.empty());
+ {
+ int value1 = 2;
+ auto ptr = cache.add(key, value1, &existed);
+ ASSERT_EQ(value1, *ptr);
+ ASSERT_FALSE(existed);
+ }
+ ASSERT_FALSE(cache.empty());
+
+ cache.clear();
+ ASSERT_TRUE(cache.empty());
+}
+
+TEST(LRU, lookup) {
+ LRUTest cache;
+ unsigned int key = 1;
+ {
+ int value = 2;
+ auto ptr = cache.add(key, value);
+ ASSERT_TRUE(ptr);
+ ASSERT_TRUE(ptr.get());
+ ASSERT_TRUE(cache.find(key).get());
+ ASSERT_EQ(value, *cache.find(key));
+ }
+ ASSERT_TRUE(cache.find(key).get());
+}
+
+TEST(LRU, lookup_or_create) {
+ LRUTest cache;
+ {
+ int value = 2;
+ unsigned int key = 1;
+ ASSERT_TRUE(cache.add(key, value).get());
+ ASSERT_TRUE(cache[key].get());
+ ASSERT_EQ(value, *cache.find(key));
+ }
+ {
+ unsigned int key = 2;
+ ASSERT_TRUE(cache[key].get());
+ ASSERT_EQ(0, *cache.find(key));
+ }
+ ASSERT_TRUE(cache.find(1).get());
+ ASSERT_TRUE(cache.find(2).get());
+}
+
+TEST(LRU, lower_bound) {
+ LRUTest cache;
+
+ {
+ unsigned int key = 1;
+ ASSERT_FALSE(cache.lower_bound(key));
+ int value = 2;
+
+ ASSERT_TRUE(cache.add(key, value).get());
+ ASSERT_TRUE(cache.lower_bound(key).get());
+ EXPECT_EQ(value, *cache.lower_bound(key));
+ }
+}
+
+TEST(LRU, get_next) {
+
+ {
+ LRUTest cache;
+ const unsigned int key = 0;
+ EXPECT_FALSE(cache.upper_bound(key));
+ }
+ {
+ LRUTest cache;
+ const unsigned int key1 = 111;
+ auto ptr1 = cache[key1];
+ const unsigned int key2 = 222;
+ auto ptr2 = cache[key2];
+
+ auto i = cache.upper_bound(0);
+ ASSERT_TRUE(i);
+ EXPECT_EQ(i->first, key1);
+ auto j = cache.upper_bound(i->first);
+ ASSERT_TRUE(j);
+ EXPECT_EQ(j->first, key2);
+ }
+}
+
+TEST(LRU, clear) {
+ LRUTest cache;
+ unsigned int key = 1;
+ int value = 2;
+ cache.add(key, value);
+ {
+ auto found = cache.find(key);
+ ASSERT_TRUE(found);
+ ASSERT_EQ(value, *found);
+ }
+ ASSERT_TRUE(cache.find(key).get());
+ cache.clear();
+ ASSERT_FALSE(cache.find(key));
+ ASSERT_TRUE(cache.empty());
+}
+
+TEST(LRU, eviction) {
+ LRUTest cache{5};
+ bool existed;
+ // add a bunch of elements, some of them will be evicted
+ for (size_t i = 0; i < 2 * cache.capacity(); ++i) {
+ cache.add(i, i, &existed);
+ ASSERT_FALSE(existed);
+ }
+ size_t i = 0;
+ for (; i < cache.capacity(); ++i) {
+ ASSERT_FALSE(cache.find(i));
+ }
+ for (; i < 2 * cache.capacity(); ++i) {
+ ASSERT_TRUE(cache.find(i));
+ }
+}
+
+TEST(LRU, track_weak) {
+ constexpr int SIZE = 5;
+ LRUTest cache{SIZE};
+
+ bool existed = false;
+ // strong reference to keep 0 alive
+ auto ptr = cache.add(0, 0, &existed);
+ ASSERT_FALSE(existed);
+
+ // add a bunch of elements to get 0 evicted
+ for (size_t i = 1; i < 2 * cache.capacity(); ++i) {
+ cache.add(i, i, &existed);
+ ASSERT_FALSE(existed);
+ }
+ // 0 is still reachable via the cache
+ ASSERT_TRUE(cache.find(0));
+ ASSERT_TRUE(cache.find(0).get());
+ ASSERT_EQ(0, *cache.find(0));
+
+ // [0..SIZE) are evicted when adding [SIZE..2*SIZE)
+ // [SIZE..SIZE * 2) were still in the cache before accessing 0,
+ // but SIZE got evicted when accessing 0
+ ASSERT_FALSE(cache.find(SIZE-1));
+ ASSERT_FALSE(cache.find(SIZE));
+ ASSERT_TRUE(cache.find(SIZE+1));
+ ASSERT_TRUE(cache.find(SIZE+1).get());
+ ASSERT_EQ((int)SIZE+1, *cache.find(SIZE+1));
+
+ ptr.reset();
+ // 0 is still reachable, as it is now put back into LRU cache
+ ASSERT_TRUE(cache.find(0));
+}
+
+// Local Variables:
+// compile-command: "cmake --build ../../../build -j 8 --target unittest_seastar_lru && ctest -R unittest_seastar_lru # --gtest_filter=*.* --log-to-stderr=true"
+// End:
diff --git a/src/test/crimson/test_messenger.cc b/src/test/crimson/test_messenger.cc
new file mode 100644
index 000000000..3877ec570
--- /dev/null
+++ b/src/test/crimson/test_messenger.cc
@@ -0,0 +1,3668 @@
+#include "common/ceph_argparse.h"
+#include "common/ceph_time.h"
+#include "messages/MPing.h"
+#include "messages/MCommand.h"
+#include "messages/MCommandReply.h"
+#include "messages/MOSDOp.h"
+#include "messages/MOSDOpReply.h"
+#include "crimson/auth/DummyAuth.h"
+#include "crimson/common/log.h"
+#include "crimson/net/Connection.h"
+#include "crimson/net/Dispatcher.h"
+#include "crimson/net/Messenger.h"
+#include "crimson/net/Interceptor.h"
+
+#include <map>
+#include <random>
+#include <boost/program_options.hpp>
+#include <fmt/format.h>
+#include <fmt/ostream.h>
+#include <seastar/core/app-template.hh>
+#include <seastar/core/do_with.hh>
+#include <seastar/core/future-util.hh>
+#include <seastar/core/reactor.hh>
+#include <seastar/core/sleep.hh>
+#include <seastar/core/with_timeout.hh>
+
+#include "test_cmds.h"
+
+namespace bpo = boost::program_options;
+using crimson::common::local_conf;
+
+namespace {
+
+seastar::logger& logger() {
+ return crimson::get_logger(ceph_subsys_ms);
+}
+
+static std::random_device rd;
+static std::default_random_engine rng{rd()};
+static bool verbose = false;
+
+static entity_addr_t get_server_addr() {
+ static int port = 9030;
+ ++port;
+ entity_addr_t saddr;
+ saddr.parse("127.0.0.1", nullptr);
+ saddr.set_port(port);
+ return saddr;
+}
+
+static seastar::future<> test_echo(unsigned rounds,
+ double keepalive_ratio,
+ bool v2)
+{
+ struct test_state {
+ struct Server final
+ : public crimson::net::Dispatcher {
+ crimson::net::MessengerRef msgr;
+ crimson::auth::DummyAuthClientServer dummy_auth;
+
+ std::optional<seastar::future<>> ms_dispatch(
+ crimson::net::ConnectionRef c, MessageRef m) override {
+ if (verbose) {
+ logger().info("server got {}", *m);
+ }
+ // reply with a pong
+ std::ignore = c->send(make_message<MPing>());
+ return {seastar::now()};
+ }
+
+ seastar::future<> init(const entity_name_t& name,
+ const std::string& lname,
+ const uint64_t nonce,
+ const entity_addr_t& addr) {
+ msgr = crimson::net::Messenger::create(name, lname, nonce);
+ msgr->set_default_policy(crimson::net::SocketPolicy::stateless_server(0));
+ msgr->set_require_authorizer(false);
+ msgr->set_auth_client(&dummy_auth);
+ msgr->set_auth_server(&dummy_auth);
+ return msgr->bind(entity_addrvec_t{addr}).safe_then([this] {
+ return msgr->start({this});
+ }, crimson::net::Messenger::bind_ertr::all_same_way(
+ [addr] (const std::error_code& e) {
+ logger().error("test_echo(): "
+ "there is another instance running at {}", addr);
+ ceph_abort();
+ }));
+ }
+ seastar::future<> shutdown() {
+ ceph_assert(msgr);
+ msgr->stop();
+ return msgr->shutdown();
+ }
+ };
+
+ struct Client final
+ : public crimson::net::Dispatcher {
+ struct PingSession : public seastar::enable_shared_from_this<PingSession> {
+ unsigned count = 0u;
+ mono_time connected_time;
+ mono_time finish_time;
+ };
+ using PingSessionRef = seastar::shared_ptr<PingSession>;
+
+ unsigned rounds;
+ std::bernoulli_distribution keepalive_dist;
+ crimson::net::MessengerRef msgr;
+ std::map<crimson::net::ConnectionRef, seastar::promise<>> pending_conns;
+ std::map<crimson::net::ConnectionRef, PingSessionRef> sessions;
+ crimson::auth::DummyAuthClientServer dummy_auth;
+
+ Client(unsigned rounds, double keepalive_ratio)
+ : rounds(rounds),
+ keepalive_dist(std::bernoulli_distribution{keepalive_ratio}) {}
+
+ PingSessionRef find_session(crimson::net::ConnectionRef c) {
+ auto found = sessions.find(c);
+ if (found == sessions.end()) {
+ ceph_assert(false);
+ }
+ return found->second;
+ }
+
+ void ms_handle_connect(crimson::net::ConnectionRef conn) override {
+ auto session = seastar::make_shared<PingSession>();
+ auto [i, added] = sessions.emplace(conn, session);
+ std::ignore = i;
+ ceph_assert(added);
+ session->connected_time = mono_clock::now();
+ }
+ std::optional<seastar::future<>> ms_dispatch(
+ crimson::net::ConnectionRef c, MessageRef m) override {
+ auto session = find_session(c);
+ ++(session->count);
+ if (verbose) {
+ logger().info("client ms_dispatch {}", session->count);
+ }
+
+ if (session->count == rounds) {
+ logger().info("{}: finished receiving {} pongs", *c, session->count);
+ session->finish_time = mono_clock::now();
+ auto found = pending_conns.find(c);
+ ceph_assert(found != pending_conns.end());
+ found->second.set_value();
+ }
+ return {seastar::now()};
+ }
+
+ seastar::future<> init(const entity_name_t& name,
+ const std::string& lname,
+ const uint64_t nonce) {
+ msgr = crimson::net::Messenger::create(name, lname, nonce);
+ msgr->set_default_policy(crimson::net::SocketPolicy::lossy_client(0));
+ msgr->set_auth_client(&dummy_auth);
+ msgr->set_auth_server(&dummy_auth);
+ return msgr->start({this});
+ }
+
+ seastar::future<> shutdown() {
+ ceph_assert(msgr);
+ msgr->stop();
+ return msgr->shutdown();
+ }
+
+ seastar::future<> dispatch_pingpong(const entity_addr_t& peer_addr) {
+ mono_time start_time = mono_clock::now();
+ auto conn = msgr->connect(peer_addr, entity_name_t::TYPE_OSD);
+ return seastar::futurize_invoke([this, conn] {
+ return do_dispatch_pingpong(conn);
+ }).then([this, conn, start_time] {
+ auto session = find_session(conn);
+ std::chrono::duration<double> dur_handshake = session->connected_time - start_time;
+ std::chrono::duration<double> dur_pingpong = session->finish_time - session->connected_time;
+ logger().info("{}: handshake {}, pingpong {}",
+ *conn, dur_handshake.count(), dur_pingpong.count());
+ });
+ }
+
+ private:
+ seastar::future<> do_dispatch_pingpong(crimson::net::ConnectionRef conn) {
+ auto [i, added] = pending_conns.emplace(conn, seastar::promise<>());
+ std::ignore = i;
+ ceph_assert(added);
+ return seastar::do_with(0u, 0u,
+ [this, conn](auto &count_ping, auto &count_keepalive) {
+ return seastar::do_until(
+ [this, conn, &count_ping, &count_keepalive] {
+ bool stop = (count_ping == rounds);
+ if (stop) {
+ logger().info("{}: finished sending {} pings with {} keepalives",
+ *conn, count_ping, count_keepalive);
+ }
+ return stop;
+ },
+ [this, conn, &count_ping, &count_keepalive] {
+ return seastar::repeat([this, conn, &count_ping, &count_keepalive] {
+ if (keepalive_dist(rng)) {
+ return conn->keepalive()
+ .then([&count_keepalive] {
+ count_keepalive += 1;
+ return seastar::make_ready_future<seastar::stop_iteration>(
+ seastar::stop_iteration::no);
+ });
+ } else {
+ return conn->send(make_message<MPing>())
+ .then([&count_ping] {
+ count_ping += 1;
+ return seastar::make_ready_future<seastar::stop_iteration>(
+ seastar::stop_iteration::yes);
+ });
+ }
+ });
+ }).then([this, conn] {
+ auto found = pending_conns.find(conn);
+ return found->second.get_future();
+ }
+ );
+ });
+ }
+ };
+ };
+
+ logger().info("test_echo(rounds={}, keepalive_ratio={}, v2={}):",
+ rounds, keepalive_ratio, v2);
+ auto server1 = seastar::make_shared<test_state::Server>();
+ auto server2 = seastar::make_shared<test_state::Server>();
+ auto client1 = seastar::make_shared<test_state::Client>(rounds, keepalive_ratio);
+ auto client2 = seastar::make_shared<test_state::Client>(rounds, keepalive_ratio);
+ // start servers and clients
+ auto addr1 = get_server_addr();
+ auto addr2 = get_server_addr();
+ if (v2) {
+ addr1.set_type(entity_addr_t::TYPE_MSGR2);
+ addr2.set_type(entity_addr_t::TYPE_MSGR2);
+ } else {
+ addr1.set_type(entity_addr_t::TYPE_LEGACY);
+ addr2.set_type(entity_addr_t::TYPE_LEGACY);
+ }
+ return seastar::when_all_succeed(
+ server1->init(entity_name_t::OSD(0), "server1", 1, addr1),
+ server2->init(entity_name_t::OSD(1), "server2", 2, addr2),
+ client1->init(entity_name_t::OSD(2), "client1", 3),
+ client2->init(entity_name_t::OSD(3), "client2", 4)
+ // dispatch pingpoing
+ ).then_unpack([client1, client2, server1, server2] {
+ return seastar::when_all_succeed(
+ // test connecting in parallel, accepting in parallel
+ client1->dispatch_pingpong(server2->msgr->get_myaddr()),
+ client2->dispatch_pingpong(server1->msgr->get_myaddr()));
+ // shutdown
+ }).then_unpack([] {
+ return seastar::now();
+ }).then([client1] {
+ logger().info("client1 shutdown...");
+ return client1->shutdown();
+ }).then([client2] {
+ logger().info("client2 shutdown...");
+ return client2->shutdown();
+ }).then([server1] {
+ logger().info("server1 shutdown...");
+ return server1->shutdown();
+ }).then([server2] {
+ logger().info("server2 shutdown...");
+ return server2->shutdown();
+ }).then([] {
+ logger().info("test_echo() done!\n");
+ }).handle_exception([server1, server2, client1, client2] (auto eptr) {
+ logger().error("test_echo() failed: got exception {}", eptr);
+ throw;
+ });
+}
+
+static seastar::future<> test_concurrent_dispatch(bool v2)
+{
+ struct test_state {
+ struct Server final
+ : public crimson::net::Dispatcher {
+ crimson::net::MessengerRef msgr;
+ int count = 0;
+ seastar::promise<> on_second; // satisfied on second dispatch
+ seastar::promise<> on_done; // satisfied when first dispatch unblocks
+ crimson::auth::DummyAuthClientServer dummy_auth;
+
+ std::optional<seastar::future<>> ms_dispatch(
+ crimson::net::ConnectionRef, MessageRef m) override {
+ switch (++count) {
+ case 1:
+ // block on the first request until we reenter with the second
+ std::ignore = on_second.get_future().then([this] { on_done.set_value(); });
+ break;
+ case 2:
+ on_second.set_value();
+ break;
+ default:
+ throw std::runtime_error("unexpected count");
+ }
+ return {seastar::now()};
+ }
+
+ seastar::future<> wait() { return on_done.get_future(); }
+
+ seastar::future<> init(const entity_name_t& name,
+ const std::string& lname,
+ const uint64_t nonce,
+ const entity_addr_t& addr) {
+ msgr = crimson::net::Messenger::create(name, lname, nonce);
+ msgr->set_default_policy(crimson::net::SocketPolicy::stateless_server(0));
+ msgr->set_auth_client(&dummy_auth);
+ msgr->set_auth_server(&dummy_auth);
+ return msgr->bind(entity_addrvec_t{addr}).safe_then([this] {
+ return msgr->start({this});
+ }, crimson::net::Messenger::bind_ertr::all_same_way(
+ [addr] (const std::error_code& e) {
+ logger().error("test_concurrent_dispatch(): "
+ "there is another instance running at {}", addr);
+ ceph_abort();
+ }));
+ }
+ };
+
+ struct Client final
+ : public crimson::net::Dispatcher {
+ crimson::net::MessengerRef msgr;
+ crimson::auth::DummyAuthClientServer dummy_auth;
+
+ std::optional<seastar::future<>> ms_dispatch(
+ crimson::net::ConnectionRef, MessageRef m) override {
+ return {seastar::now()};
+ }
+
+ seastar::future<> init(const entity_name_t& name,
+ const std::string& lname,
+ const uint64_t nonce) {
+ msgr = crimson::net::Messenger::create(name, lname, nonce);
+ msgr->set_default_policy(crimson::net::SocketPolicy::lossy_client(0));
+ msgr->set_auth_client(&dummy_auth);
+ msgr->set_auth_server(&dummy_auth);
+ return msgr->start({this});
+ }
+ };
+ };
+
+ logger().info("test_concurrent_dispatch(v2={}):", v2);
+ auto server = seastar::make_shared<test_state::Server>();
+ auto client = seastar::make_shared<test_state::Client>();
+ auto addr = get_server_addr();
+ if (v2) {
+ addr.set_type(entity_addr_t::TYPE_MSGR2);
+ } else {
+ addr.set_type(entity_addr_t::TYPE_LEGACY);
+ }
+ addr.set_family(AF_INET);
+ return seastar::when_all_succeed(
+ server->init(entity_name_t::OSD(4), "server3", 5, addr),
+ client->init(entity_name_t::OSD(5), "client3", 6)
+ ).then_unpack([server, client] {
+ auto conn = client->msgr->connect(server->msgr->get_myaddr(),
+ entity_name_t::TYPE_OSD);
+ // send two messages
+ return conn->send(make_message<MPing>()).then([conn] {
+ return conn->send(make_message<MPing>());
+ });
+ }).then([server] {
+ return server->wait();
+ }).then([client] {
+ logger().info("client shutdown...");
+ client->msgr->stop();
+ return client->msgr->shutdown();
+ }).then([server] {
+ logger().info("server shutdown...");
+ server->msgr->stop();
+ return server->msgr->shutdown();
+ }).then([] {
+ logger().info("test_concurrent_dispatch() done!\n");
+ }).handle_exception([server, client] (auto eptr) {
+ logger().error("test_concurrent_dispatch() failed: got exception {}", eptr);
+ throw;
+ });
+}
+
+seastar::future<> test_preemptive_shutdown(bool v2) {
+ struct test_state {
+ class Server final
+ : public crimson::net::Dispatcher {
+ crimson::net::MessengerRef msgr;
+ crimson::auth::DummyAuthClientServer dummy_auth;
+
+ std::optional<seastar::future<>> ms_dispatch(
+ crimson::net::ConnectionRef c, MessageRef m) override {
+ std::ignore = c->send(make_message<MPing>());
+ return {seastar::now()};
+ }
+
+ public:
+ seastar::future<> init(const entity_name_t& name,
+ const std::string& lname,
+ const uint64_t nonce,
+ const entity_addr_t& addr) {
+ msgr = crimson::net::Messenger::create(name, lname, nonce);
+ msgr->set_default_policy(crimson::net::SocketPolicy::stateless_server(0));
+ msgr->set_auth_client(&dummy_auth);
+ msgr->set_auth_server(&dummy_auth);
+ return msgr->bind(entity_addrvec_t{addr}).safe_then([this] {
+ return msgr->start({this});
+ }, crimson::net::Messenger::bind_ertr::all_same_way(
+ [addr] (const std::error_code& e) {
+ logger().error("test_preemptive_shutdown(): "
+ "there is another instance running at {}", addr);
+ ceph_abort();
+ }));
+ }
+ entity_addr_t get_addr() const {
+ return msgr->get_myaddr();
+ }
+ seastar::future<> shutdown() {
+ msgr->stop();
+ return msgr->shutdown();
+ }
+ };
+
+ class Client final
+ : public crimson::net::Dispatcher {
+ crimson::net::MessengerRef msgr;
+ crimson::auth::DummyAuthClientServer dummy_auth;
+
+ bool stop_send = false;
+ seastar::promise<> stopped_send_promise;
+
+ std::optional<seastar::future<>> ms_dispatch(
+ crimson::net::ConnectionRef, MessageRef m) override {
+ return {seastar::now()};
+ }
+
+ public:
+ seastar::future<> init(const entity_name_t& name,
+ const std::string& lname,
+ const uint64_t nonce) {
+ msgr = crimson::net::Messenger::create(name, lname, nonce);
+ msgr->set_default_policy(crimson::net::SocketPolicy::lossy_client(0));
+ msgr->set_auth_client(&dummy_auth);
+ msgr->set_auth_server(&dummy_auth);
+ return msgr->start({this});
+ }
+ void send_pings(const entity_addr_t& addr) {
+ auto conn = msgr->connect(addr, entity_name_t::TYPE_OSD);
+ // forwarded to stopped_send_promise
+ (void) seastar::do_until(
+ [this] { return stop_send; },
+ [conn] {
+ return conn->send(make_message<MPing>()).then([] {
+ return seastar::sleep(0ms);
+ });
+ }
+ ).then_wrapped([this, conn] (auto fut) {
+ fut.forward_to(std::move(stopped_send_promise));
+ });
+ }
+ seastar::future<> shutdown() {
+ msgr->stop();
+ return msgr->shutdown().then([this] {
+ stop_send = true;
+ return stopped_send_promise.get_future();
+ });
+ }
+ };
+ };
+
+ logger().info("test_preemptive_shutdown(v2={}):", v2);
+ auto server = seastar::make_shared<test_state::Server>();
+ auto client = seastar::make_shared<test_state::Client>();
+ auto addr = get_server_addr();
+ if (v2) {
+ addr.set_type(entity_addr_t::TYPE_MSGR2);
+ } else {
+ addr.set_type(entity_addr_t::TYPE_LEGACY);
+ }
+ addr.set_family(AF_INET);
+ return seastar::when_all_succeed(
+ server->init(entity_name_t::OSD(6), "server4", 7, addr),
+ client->init(entity_name_t::OSD(7), "client4", 8)
+ ).then_unpack([server, client] {
+ client->send_pings(server->get_addr());
+ return seastar::sleep(100ms);
+ }).then([client] {
+ logger().info("client shutdown...");
+ return client->shutdown();
+ }).then([server] {
+ logger().info("server shutdown...");
+ return server->shutdown();
+ }).then([] {
+ logger().info("test_preemptive_shutdown() done!\n");
+ }).handle_exception([server, client] (auto eptr) {
+ logger().error("test_preemptive_shutdown() failed: got exception {}", eptr);
+ throw;
+ });
+}
+
+using ceph::msgr::v2::Tag;
+using crimson::net::bp_action_t;
+using crimson::net::bp_type_t;
+using crimson::net::Breakpoint;
+using crimson::net::Connection;
+using crimson::net::ConnectionRef;
+using crimson::net::custom_bp_t;
+using crimson::net::Dispatcher;
+using crimson::net::Interceptor;
+using crimson::net::Messenger;
+using crimson::net::MessengerRef;
+using crimson::net::SocketPolicy;
+using crimson::net::tag_bp_t;
+using ceph::net::test::cmd_t;
+using ceph::net::test::policy_t;
+
+struct counter_t { unsigned counter = 0; };
+
+enum class conn_state_t {
+ unknown = 0,
+ established,
+ closed,
+ replaced,
+};
+
+std::ostream& operator<<(std::ostream& out, const conn_state_t& state) {
+ switch(state) {
+ case conn_state_t::unknown:
+ return out << "unknown";
+ case conn_state_t::established:
+ return out << "established";
+ case conn_state_t::closed:
+ return out << "closed";
+ case conn_state_t::replaced:
+ return out << "replaced";
+ default:
+ ceph_abort();
+ }
+}
+
+struct ConnResult {
+ ConnectionRef conn;
+ unsigned index;
+ conn_state_t state = conn_state_t::unknown;
+
+ unsigned connect_attempts = 0;
+ unsigned client_connect_attempts = 0;
+ unsigned client_reconnect_attempts = 0;
+ unsigned cnt_connect_dispatched = 0;
+
+ unsigned accept_attempts = 0;
+ unsigned server_connect_attempts = 0;
+ unsigned server_reconnect_attempts = 0;
+ unsigned cnt_accept_dispatched = 0;
+
+ unsigned cnt_reset_dispatched = 0;
+ unsigned cnt_remote_reset_dispatched = 0;
+
+ ConnResult(Connection& conn, unsigned index)
+ : conn(conn.shared_from_this()), index(index) {}
+
+ template <typename T>
+ void _assert_eq(const char* expr_actual, T actual,
+ const char* expr_expected, T expected) const {
+ if (actual != expected) {
+ throw std::runtime_error(fmt::format(
+ "[{}] {} '{}' is actually {}, not the expected '{}' {}",
+ index, *conn, expr_actual, actual, expr_expected, expected));
+ }
+ }
+
+#define ASSERT_EQUAL(actual, expected) \
+ _assert_eq(#actual, actual, #expected, expected)
+
+ void assert_state_at(conn_state_t expected) const {
+ ASSERT_EQUAL(state, expected);
+ }
+
+ void assert_connect(unsigned attempts,
+ unsigned connects,
+ unsigned reconnects,
+ unsigned dispatched) const {
+ ASSERT_EQUAL(connect_attempts, attempts);
+ ASSERT_EQUAL(client_connect_attempts, connects);
+ ASSERT_EQUAL(client_reconnect_attempts, reconnects);
+ ASSERT_EQUAL(cnt_connect_dispatched, dispatched);
+ }
+
+ void assert_connect(unsigned attempts,
+ unsigned dispatched) const {
+ ASSERT_EQUAL(connect_attempts, attempts);
+ ASSERT_EQUAL(cnt_connect_dispatched, dispatched);
+ }
+
+ void assert_accept(unsigned attempts,
+ unsigned accepts,
+ unsigned reaccepts,
+ unsigned dispatched) const {
+ ASSERT_EQUAL(accept_attempts, attempts);
+ ASSERT_EQUAL(server_connect_attempts, accepts);
+ ASSERT_EQUAL(server_reconnect_attempts, reaccepts);
+ ASSERT_EQUAL(cnt_accept_dispatched, dispatched);
+ }
+
+ void assert_accept(unsigned attempts,
+ unsigned dispatched) const {
+ ASSERT_EQUAL(accept_attempts, attempts);
+ ASSERT_EQUAL(cnt_accept_dispatched, dispatched);
+ }
+
+ void assert_reset(unsigned local, unsigned remote) const {
+ ASSERT_EQUAL(cnt_reset_dispatched, local);
+ ASSERT_EQUAL(cnt_remote_reset_dispatched, remote);
+ }
+
+ void dump() const {
+ logger().info("\nResult({}):\n"
+ " conn: [{}] {}:\n"
+ " state: {}\n"
+ " connect_attempts: {}\n"
+ " client_connect_attempts: {}\n"
+ " client_reconnect_attempts: {}\n"
+ " cnt_connect_dispatched: {}\n"
+ " accept_attempts: {}\n"
+ " server_connect_attempts: {}\n"
+ " server_reconnect_attempts: {}\n"
+ " cnt_accept_dispatched: {}\n"
+ " cnt_reset_dispatched: {}\n"
+ " cnt_remote_reset_dispatched: {}\n",
+ static_cast<const void*>(this),
+ index, *conn,
+ state,
+ connect_attempts,
+ client_connect_attempts,
+ client_reconnect_attempts,
+ cnt_connect_dispatched,
+ accept_attempts,
+ server_connect_attempts,
+ server_reconnect_attempts,
+ cnt_accept_dispatched,
+ cnt_reset_dispatched,
+ cnt_remote_reset_dispatched);
+ }
+};
+using ConnResults = std::vector<ConnResult>;
+
+struct TestInterceptor : public Interceptor {
+ std::map<Breakpoint, std::map<unsigned, bp_action_t>> breakpoints;
+ std::map<Breakpoint, counter_t> breakpoints_counter;
+ std::map<ConnectionRef, unsigned> conns;
+ ConnResults results;
+ std::optional<seastar::abort_source> signal;
+
+ TestInterceptor() = default;
+ // only used for copy breakpoint configurations
+ TestInterceptor(const TestInterceptor& other) {
+ assert(other.breakpoints_counter.empty());
+ assert(other.conns.empty());
+ assert(other.results.empty());
+ breakpoints = other.breakpoints;
+ assert(!other.signal);
+ }
+
+ void make_fault(Breakpoint bp, unsigned round = 1) {
+ assert(round >= 1);
+ breakpoints[bp][round] = bp_action_t::FAULT;
+ }
+
+ void make_block(Breakpoint bp, unsigned round = 1) {
+ assert(round >= 1);
+ breakpoints[bp][round] = bp_action_t::BLOCK;
+ }
+
+ void make_stall(Breakpoint bp, unsigned round = 1) {
+ assert(round >= 1);
+ breakpoints[bp][round] = bp_action_t::STALL;
+ }
+
+ ConnResult* find_result(ConnectionRef conn) {
+ auto it = conns.find(conn);
+ if (it == conns.end()) {
+ return nullptr;
+ } else {
+ return &results[it->second];
+ }
+ }
+
+ seastar::future<> wait() {
+ assert(!signal);
+ signal = seastar::abort_source();
+ return seastar::sleep_abortable(10s, *signal).then([] {
+ throw std::runtime_error("Timeout (10s) in TestInterceptor::wait()");
+ }).handle_exception_type([] (const seastar::sleep_aborted& e) {
+ // wait done!
+ });
+ }
+
+ void notify() {
+ if (signal) {
+ signal->request_abort();
+ signal = std::nullopt;
+ }
+ }
+
+ private:
+ void register_conn(Connection& conn) override {
+ unsigned index = results.size();
+ results.emplace_back(conn, index);
+ conns[conn.shared_from_this()] = index;
+ notify();
+ logger().info("[{}] {} new connection registered", index, conn);
+ }
+
+ void register_conn_closed(Connection& conn) override {
+ auto result = find_result(conn.shared_from_this());
+ if (result == nullptr) {
+ logger().error("Untracked closed connection: {}", conn);
+ ceph_abort();
+ }
+
+ if (result->state != conn_state_t::replaced) {
+ result->state = conn_state_t::closed;
+ }
+ notify();
+ logger().info("[{}] {} closed({})", result->index, conn, result->state);
+ }
+
+ void register_conn_ready(Connection& conn) override {
+ auto result = find_result(conn.shared_from_this());
+ if (result == nullptr) {
+ logger().error("Untracked ready connection: {}", conn);
+ ceph_abort();
+ }
+
+ ceph_assert(conn.is_connected());
+ notify();
+ logger().info("[{}] {} ready", result->index, conn);
+ }
+
+ void register_conn_replaced(Connection& conn) override {
+ auto result = find_result(conn.shared_from_this());
+ if (result == nullptr) {
+ logger().error("Untracked replaced connection: {}", conn);
+ ceph_abort();
+ }
+
+ result->state = conn_state_t::replaced;
+ logger().info("[{}] {} {}", result->index, conn, result->state);
+ }
+
+ bp_action_t intercept(Connection& conn, Breakpoint bp) override {
+ ++breakpoints_counter[bp].counter;
+
+ auto result = find_result(conn.shared_from_this());
+ if (result == nullptr) {
+ logger().error("Untracked intercepted connection: {}, at breakpoint {}({})",
+ conn, bp, breakpoints_counter[bp].counter);
+ ceph_abort();
+ }
+
+ if (bp == custom_bp_t::SOCKET_CONNECTING) {
+ ++result->connect_attempts;
+ logger().info("[Test] connect_attempts={}", result->connect_attempts);
+ } else if (bp == tag_bp_t{Tag::CLIENT_IDENT, bp_type_t::WRITE}) {
+ ++result->client_connect_attempts;
+ logger().info("[Test] client_connect_attempts={}", result->client_connect_attempts);
+ } else if (bp == tag_bp_t{Tag::SESSION_RECONNECT, bp_type_t::WRITE}) {
+ ++result->client_reconnect_attempts;
+ logger().info("[Test] client_reconnect_attempts={}", result->client_reconnect_attempts);
+ } else if (bp == custom_bp_t::SOCKET_ACCEPTED) {
+ ++result->accept_attempts;
+ logger().info("[Test] accept_attempts={}", result->accept_attempts);
+ } else if (bp == tag_bp_t{Tag::CLIENT_IDENT, bp_type_t::READ}) {
+ ++result->server_connect_attempts;
+ logger().info("[Test] server_connect_attemps={}", result->server_connect_attempts);
+ } else if (bp == tag_bp_t{Tag::SESSION_RECONNECT, bp_type_t::READ}) {
+ ++result->server_reconnect_attempts;
+ logger().info("[Test] server_reconnect_attempts={}", result->server_reconnect_attempts);
+ }
+
+ auto it_bp = breakpoints.find(bp);
+ if (it_bp != breakpoints.end()) {
+ auto it_cnt = it_bp->second.find(breakpoints_counter[bp].counter);
+ if (it_cnt != it_bp->second.end()) {
+ logger().info("[{}] {} intercepted {}({}) => {}",
+ result->index, conn, bp,
+ breakpoints_counter[bp].counter, it_cnt->second);
+ return it_cnt->second;
+ }
+ }
+ logger().info("[{}] {} intercepted {}({})",
+ result->index, conn, bp, breakpoints_counter[bp].counter);
+ return bp_action_t::CONTINUE;
+ }
+};
+
+SocketPolicy to_socket_policy(policy_t policy) {
+ switch (policy) {
+ case policy_t::stateful_server:
+ return SocketPolicy::stateful_server(0);
+ case policy_t::stateless_server:
+ return SocketPolicy::stateless_server(0);
+ case policy_t::lossless_peer:
+ return SocketPolicy::lossless_peer(0);
+ case policy_t::lossless_peer_reuse:
+ return SocketPolicy::lossless_peer_reuse(0);
+ case policy_t::lossy_client:
+ return SocketPolicy::lossy_client(0);
+ case policy_t::lossless_client:
+ return SocketPolicy::lossless_client(0);
+ default:
+ logger().error("unexpected policy type");
+ ceph_abort();
+ }
+}
+
+class FailoverSuite : public Dispatcher {
+ crimson::auth::DummyAuthClientServer dummy_auth;
+ MessengerRef test_msgr;
+ const entity_addr_t test_peer_addr;
+ TestInterceptor interceptor;
+
+ unsigned tracked_index = 0;
+ ConnectionRef tracked_conn;
+ unsigned pending_send = 0;
+ unsigned pending_peer_receive = 0;
+ unsigned pending_receive = 0;
+
+ std::optional<seastar::future<>> ms_dispatch(ConnectionRef c, MessageRef m) override {
+ auto result = interceptor.find_result(c);
+ if (result == nullptr) {
+ logger().error("Untracked ms dispatched connection: {}", *c);
+ ceph_abort();
+ }
+
+ if (tracked_conn != c) {
+ logger().error("[{}] {} got op, but doesn't match tracked_conn [{}] {}",
+ result->index, *c, tracked_index, *tracked_conn);
+ ceph_abort();
+ }
+ ceph_assert(result->index == tracked_index);
+
+ ceph_assert(m->get_type() == CEPH_MSG_OSD_OP);
+ ceph_assert(pending_receive > 0);
+ --pending_receive;
+ if (pending_receive == 0) {
+ interceptor.notify();
+ }
+ logger().info("[Test] got op, left {} ops -- [{}] {}",
+ pending_receive, result->index, *c);
+ return {seastar::now()};
+ }
+
+ void ms_handle_accept(ConnectionRef conn) override {
+ auto result = interceptor.find_result(conn);
+ if (result == nullptr) {
+ logger().error("Untracked accepted connection: {}", *conn);
+ ceph_abort();
+ }
+
+ if (tracked_conn &&
+ !tracked_conn->is_closed() &&
+ tracked_conn != conn) {
+ logger().error("[{}] {} got accepted, but there's already traced_conn [{}] {}",
+ result->index, *conn, tracked_index, *tracked_conn);
+ ceph_abort();
+ }
+
+ tracked_index = result->index;
+ tracked_conn = conn;
+ ++result->cnt_accept_dispatched;
+ logger().info("[Test] got accept (cnt_accept_dispatched={}), track [{}] {}",
+ result->cnt_accept_dispatched, result->index, *conn);
+ std::ignore = flush_pending_send();
+ }
+
+ void ms_handle_connect(ConnectionRef conn) override {
+ auto result = interceptor.find_result(conn);
+ if (result == nullptr) {
+ logger().error("Untracked connected connection: {}", *conn);
+ ceph_abort();
+ }
+
+ if (tracked_conn != conn) {
+ logger().error("[{}] {} got connected, but doesn't match tracked_conn [{}] {}",
+ result->index, *conn, tracked_index, *tracked_conn);
+ ceph_abort();
+ }
+ ceph_assert(result->index == tracked_index);
+
+ ++result->cnt_connect_dispatched;
+ logger().info("[Test] got connected (cnt_connect_dispatched={}) -- [{}] {}",
+ result->cnt_connect_dispatched, result->index, *conn);
+ }
+
+ void ms_handle_reset(ConnectionRef conn, bool is_replace) override {
+ auto result = interceptor.find_result(conn);
+ if (result == nullptr) {
+ logger().error("Untracked reset connection: {}", *conn);
+ ceph_abort();
+ }
+
+ if (tracked_conn != conn) {
+ logger().error("[{}] {} got reset, but doesn't match tracked_conn [{}] {}",
+ result->index, *conn, tracked_index, *tracked_conn);
+ ceph_abort();
+ }
+ ceph_assert(result->index == tracked_index);
+
+ tracked_index = 0;
+ tracked_conn = nullptr;
+ ++result->cnt_reset_dispatched;
+ logger().info("[Test] got reset (cnt_reset_dispatched={}), untrack [{}] {}",
+ result->cnt_reset_dispatched, result->index, *conn);
+ }
+
+ void ms_handle_remote_reset(ConnectionRef conn) override {
+ auto result = interceptor.find_result(conn);
+ if (result == nullptr) {
+ logger().error("Untracked remotely reset connection: {}", *conn);
+ ceph_abort();
+ }
+
+ if (tracked_conn != conn) {
+ logger().error("[{}] {} got remotely reset, but doesn't match tracked_conn [{}] {}",
+ result->index, *conn, tracked_index, *tracked_conn);
+ ceph_abort();
+ }
+ ceph_assert(result->index == tracked_index);
+
+ ++result->cnt_remote_reset_dispatched;
+ logger().info("[Test] got remote reset (cnt_remote_reset_dispatched={}) -- [{}] {}",
+ result->cnt_remote_reset_dispatched, result->index, *conn);
+ }
+
+ private:
+ seastar::future<> init(entity_addr_t addr, SocketPolicy policy) {
+ test_msgr->set_default_policy(policy);
+ test_msgr->set_auth_client(&dummy_auth);
+ test_msgr->set_auth_server(&dummy_auth);
+ test_msgr->interceptor = &interceptor;
+ return test_msgr->bind(entity_addrvec_t{addr}).safe_then([this] {
+ return test_msgr->start({this});
+ }, Messenger::bind_ertr::all_same_way([addr] (const std::error_code& e) {
+ logger().error("FailoverSuite: "
+ "there is another instance running at {}", addr);
+ ceph_abort();
+ }));
+ }
+
+ seastar::future<> send_op(bool expect_reply=true) {
+ ceph_assert(tracked_conn);
+ if (expect_reply) {
+ ++pending_peer_receive;
+ }
+ pg_t pgid;
+ object_locator_t oloc;
+ hobject_t hobj(object_t(), oloc.key, CEPH_NOSNAP, pgid.ps(),
+ pgid.pool(), oloc.nspace);
+ spg_t spgid(pgid);
+ return tracked_conn->send(make_message<MOSDOp>(0, 0, hobj, spgid, 0, 0, 0));
+ }
+
+ seastar::future<> flush_pending_send() {
+ if (pending_send != 0) {
+ logger().info("[Test] flush sending {} ops", pending_send);
+ }
+ ceph_assert(tracked_conn);
+ return seastar::do_until(
+ [this] { return pending_send == 0; },
+ [this] {
+ --pending_send;
+ return send_op();
+ });
+ }
+
+ seastar::future<> wait_ready(unsigned num_ready_conns,
+ unsigned num_replaced,
+ bool wait_received) {
+ unsigned pending_conns = 0;
+ unsigned pending_establish = 0;
+ unsigned replaced_conns = 0;
+ for (auto& result : interceptor.results) {
+ if (result.conn->is_closed_clean()) {
+ if (result.state == conn_state_t::replaced) {
+ ++replaced_conns;
+ }
+ } else if (result.conn->is_connected()) {
+ if (tracked_conn != result.conn || tracked_index != result.index) {
+ throw std::runtime_error(fmt::format(
+ "The connected connection [{}] {} doesn't"
+ " match the tracked connection [{}] {}",
+ result.index, *result.conn, tracked_index, tracked_conn));
+ }
+ if (pending_send == 0 && pending_peer_receive == 0 && pending_receive == 0) {
+ result.state = conn_state_t::established;
+ } else {
+ ++pending_establish;
+ }
+ } else {
+ ++pending_conns;
+ }
+ }
+
+ bool do_wait = false;
+ if (num_ready_conns > 0) {
+ if (interceptor.results.size() > num_ready_conns) {
+ throw std::runtime_error(fmt::format(
+ "{} connections, more than expected: {}",
+ interceptor.results.size(), num_ready_conns));
+ } else if (interceptor.results.size() < num_ready_conns || pending_conns > 0) {
+ logger().info("[Test] wait_ready(): wait for connections,"
+ " currently {} out of {}, pending {} ready ...",
+ interceptor.results.size(), num_ready_conns, pending_conns);
+ do_wait = true;
+ }
+ }
+ if (wait_received &&
+ (pending_send || pending_peer_receive || pending_receive)) {
+ if (pending_conns || pending_establish) {
+ logger().info("[Test] wait_ready(): wait for pending_send={},"
+ " pending_peer_receive={}, pending_receive={},"
+ " pending {}/{} ready/establish connections ...",
+ pending_send, pending_peer_receive, pending_receive,
+ pending_conns, pending_establish);
+ do_wait = true;
+ }
+ }
+ if (num_replaced > 0) {
+ if (replaced_conns > num_replaced) {
+ throw std::runtime_error(fmt::format(
+ "{} replaced connections, more than expected: {}",
+ replaced_conns, num_replaced));
+ }
+ if (replaced_conns < num_replaced) {
+ logger().info("[Test] wait_ready(): wait for {} replaced connections,"
+ " currently {} ...",
+ num_replaced, replaced_conns);
+ do_wait = true;
+ }
+ }
+
+ if (do_wait) {
+ return interceptor.wait(
+ ).then([this, num_ready_conns, num_replaced, wait_received] {
+ return wait_ready(num_ready_conns, num_replaced, wait_received);
+ });
+ } else {
+ logger().info("[Test] wait_ready(): wait done!");
+ return seastar::now();
+ }
+ }
+
+ // called by FailoverTest
+ public:
+ FailoverSuite(MessengerRef test_msgr,
+ entity_addr_t test_peer_addr,
+ const TestInterceptor& interceptor)
+ : test_msgr(test_msgr),
+ test_peer_addr(test_peer_addr),
+ interceptor(interceptor) { }
+
+ entity_addr_t get_addr() const {
+ return test_msgr->get_myaddr();
+ }
+
+ seastar::future<> shutdown() {
+ test_msgr->stop();
+ return test_msgr->shutdown();
+ }
+
+ void needs_receive() {
+ ++pending_receive;
+ }
+
+ void notify_peer_reply() {
+ ceph_assert(pending_peer_receive > 0);
+ --pending_peer_receive;
+ logger().info("[Test] TestPeer said got op, left {} ops",
+ pending_peer_receive);
+ if (pending_peer_receive == 0) {
+ interceptor.notify();
+ }
+ }
+
+ void post_check() const {
+ // make sure all breakpoints were hit
+ for (auto& kv : interceptor.breakpoints) {
+ auto it = interceptor.breakpoints_counter.find(kv.first);
+ if (it == interceptor.breakpoints_counter.end()) {
+ throw std::runtime_error(fmt::format("{} was missed", kv.first));
+ }
+ auto expected = kv.second.rbegin()->first;
+ if (expected > it->second.counter) {
+ throw std::runtime_error(fmt::format(
+ "{} only triggered {} times, not the expected {}",
+ kv.first, it->second.counter, expected));
+ }
+ }
+ }
+
+ void dump_results() const {
+ for (auto& result : interceptor.results) {
+ result.dump();
+ }
+ }
+
+ static seastar::future<std::unique_ptr<FailoverSuite>>
+ create(entity_addr_t test_addr,
+ SocketPolicy test_policy,
+ entity_addr_t test_peer_addr,
+ const TestInterceptor& interceptor) {
+ auto suite = std::make_unique<FailoverSuite>(
+ Messenger::create(entity_name_t::OSD(2), "Test", 2),
+ test_peer_addr, interceptor);
+ return suite->init(test_addr, test_policy
+ ).then([suite = std::move(suite)] () mutable {
+ return std::move(suite);
+ });
+ }
+
+ // called by tests
+ public:
+ seastar::future<> connect_peer() {
+ logger().info("[Test] connect_peer({})", test_peer_addr);
+ auto conn = test_msgr->connect(test_peer_addr, entity_name_t::TYPE_OSD);
+ auto result = interceptor.find_result(conn);
+ ceph_assert(result != nullptr);
+
+ if (tracked_conn) {
+ if (tracked_conn->is_closed()) {
+ ceph_assert(tracked_conn != conn);
+ logger().info("[Test] this is a new session replacing an closed one");
+ } else {
+ ceph_assert(tracked_index == result->index);
+ ceph_assert(tracked_conn == conn);
+ logger().info("[Test] this is not a new session");
+ }
+ } else {
+ logger().info("[Test] this is a new session");
+ }
+ tracked_index = result->index;
+ tracked_conn = conn;
+
+ return flush_pending_send();
+ }
+
+ seastar::future<> send_peer() {
+ if (tracked_conn) {
+ logger().info("[Test] send_peer()");
+ ceph_assert(!pending_send);
+ return send_op();
+ } else {
+ ++pending_send;
+ logger().info("[Test] send_peer() (pending {})", pending_send);
+ return seastar::now();
+ }
+ }
+
+ seastar::future<> keepalive_peer() {
+ logger().info("[Test] keepalive_peer()");
+ ceph_assert(tracked_conn);
+ return tracked_conn->keepalive();
+ }
+
+ seastar::future<> try_send_peer() {
+ logger().info("[Test] try_send_peer()");
+ ceph_assert(tracked_conn);
+ return send_op(false);
+ }
+
+ seastar::future<> markdown() {
+ logger().info("[Test] markdown()");
+ ceph_assert(tracked_conn);
+ tracked_conn->mark_down();
+ return seastar::now();
+ }
+
+ seastar::future<> wait_blocked() {
+ logger().info("[Test] wait_blocked() ...");
+ return interceptor.blocker.wait_blocked();
+ }
+
+ void unblock() {
+ logger().info("[Test] unblock()");
+ return interceptor.blocker.unblock();
+ }
+
+ seastar::future<> wait_replaced(unsigned count) {
+ logger().info("[Test] wait_replaced({}) ...", count);
+ return wait_ready(0, count, false);
+ }
+
+ seastar::future<> wait_established() {
+ logger().info("[Test] wait_established() ...");
+ return wait_ready(0, 0, true);
+ }
+
+ seastar::future<std::reference_wrapper<ConnResults>>
+ wait_results(unsigned count) {
+ logger().info("[Test] wait_result({}) ...", count);
+ return wait_ready(count, 0, true).then([this] {
+ return std::reference_wrapper<ConnResults>(interceptor.results);
+ });
+ }
+
+ bool is_standby() {
+ ceph_assert(tracked_conn);
+ return !(tracked_conn->is_connected() || tracked_conn->is_closed());
+ }
+};
+
+class FailoverTest : public Dispatcher {
+ crimson::auth::DummyAuthClientServer dummy_auth;
+ MessengerRef cmd_msgr;
+ ConnectionRef cmd_conn;
+ const entity_addr_t test_addr;
+ const entity_addr_t test_peer_addr;
+
+ std::optional<seastar::promise<>> recv_pong;
+ std::optional<seastar::promise<>> recv_cmdreply;
+
+ std::unique_ptr<FailoverSuite> test_suite;
+
+ std::optional<seastar::future<>> ms_dispatch(ConnectionRef c, MessageRef m) override {
+ switch (m->get_type()) {
+ case CEPH_MSG_PING:
+ ceph_assert(recv_pong);
+ recv_pong->set_value();
+ recv_pong = std::nullopt;
+ break;
+ case MSG_COMMAND_REPLY:
+ ceph_assert(recv_cmdreply);
+ recv_cmdreply->set_value();
+ recv_cmdreply = std::nullopt;
+ break;
+ case MSG_COMMAND: {
+ auto m_cmd = boost::static_pointer_cast<MCommand>(m);
+ ceph_assert(static_cast<cmd_t>(m_cmd->cmd[0][0]) == cmd_t::suite_recv_op);
+ ceph_assert(test_suite);
+ test_suite->notify_peer_reply();
+ break;
+ }
+ default:
+ logger().error("{} got unexpected msg from cmd server: {}", *c, *m);
+ ceph_abort();
+ }
+ return {seastar::now()};
+ }
+
+ private:
+ seastar::future<> prepare_cmd(
+ cmd_t cmd,
+ std::function<void(ceph::ref_t<MCommand>)>
+ f_prepare = [] (auto m) { return; }) {
+ assert(!recv_cmdreply);
+ recv_cmdreply = seastar::promise<>();
+ auto fut = recv_cmdreply->get_future();
+ auto m = make_message<MCommand>();
+ m->cmd.emplace_back(1, static_cast<char>(cmd));
+ f_prepare(m);
+ return cmd_conn->send(m).then([fut = std::move(fut)] () mutable {
+ return std::move(fut);
+ });
+ }
+
+ seastar::future<> start_peer(policy_t peer_policy) {
+ return prepare_cmd(cmd_t::suite_start,
+ [peer_policy] (auto m) {
+ m->cmd.emplace_back(1, static_cast<char>(peer_policy));
+ });
+ }
+
+ seastar::future<> stop_peer() {
+ return prepare_cmd(cmd_t::suite_stop);
+ }
+
+ seastar::future<> pingpong() {
+ assert(!recv_pong);
+ recv_pong = seastar::promise<>();
+ auto fut = recv_pong->get_future();
+ return cmd_conn->send(make_message<MPing>()
+ ).then([fut = std::move(fut)] () mutable {
+ return std::move(fut);
+ });
+ }
+
+ seastar::future<> init(entity_addr_t cmd_peer_addr) {
+ cmd_msgr->set_default_policy(SocketPolicy::lossy_client(0));
+ cmd_msgr->set_auth_client(&dummy_auth);
+ cmd_msgr->set_auth_server(&dummy_auth);
+ return cmd_msgr->start({this}).then([this, cmd_peer_addr] {
+ logger().info("CmdCli connect to CmdSrv({}) ...", cmd_peer_addr);
+ cmd_conn = cmd_msgr->connect(cmd_peer_addr, entity_name_t::TYPE_OSD);
+ return pingpong();
+ });
+ }
+
+ public:
+ FailoverTest(MessengerRef cmd_msgr,
+ entity_addr_t test_addr,
+ entity_addr_t test_peer_addr)
+ : cmd_msgr(cmd_msgr),
+ test_addr(test_addr),
+ test_peer_addr(test_peer_addr) { }
+
+ seastar::future<> shutdown() {
+ logger().info("CmdCli shutdown...");
+ assert(!recv_cmdreply);
+ auto m = make_message<MCommand>();
+ m->cmd.emplace_back(1, static_cast<char>(cmd_t::shutdown));
+ return cmd_conn->send(m).then([] {
+ return seastar::sleep(200ms);
+ }).then([this] {
+ cmd_msgr->stop();
+ return cmd_msgr->shutdown();
+ });
+ }
+
+ static seastar::future<seastar::lw_shared_ptr<FailoverTest>>
+ create(entity_addr_t cmd_peer_addr, entity_addr_t test_addr) {
+ test_addr.set_nonce(2);
+ cmd_peer_addr.set_nonce(3);
+ entity_addr_t test_peer_addr = cmd_peer_addr;
+ test_peer_addr.set_port(cmd_peer_addr.get_port() + 1);
+ test_peer_addr.set_nonce(4);
+ auto test = seastar::make_lw_shared<FailoverTest>(
+ Messenger::create(entity_name_t::OSD(1), "CmdCli", 1),
+ test_addr, test_peer_addr);
+ return test->init(cmd_peer_addr).then([test] {
+ logger().info("CmdCli ready");
+ return test;
+ });
+ }
+
+ // called by tests
+ public:
+ seastar::future<> run_suite(
+ std::string name,
+ const TestInterceptor& interceptor,
+ policy_t test_policy,
+ policy_t peer_policy,
+ std::function<seastar::future<>(FailoverSuite&)>&& f) {
+ logger().info("\n\n[{}]", name);
+ ceph_assert(!test_suite);
+ SocketPolicy test_policy_ = to_socket_policy(test_policy);
+ return FailoverSuite::create(
+ test_addr, test_policy_, test_peer_addr, interceptor
+ ).then([this, peer_policy, f = std::move(f)] (auto suite) mutable {
+ ceph_assert(suite->get_addr() == test_addr);
+ test_suite.swap(suite);
+ return start_peer(peer_policy).then([this, f = std::move(f)] {
+ return f(*test_suite);
+ }).then([this] {
+ test_suite->post_check();
+ logger().info("\n[SUCCESS]");
+ }).handle_exception([this] (auto eptr) {
+ logger().info("\n[FAIL: {}]", eptr);
+ test_suite->dump_results();
+ throw;
+ }).then([this] {
+ return stop_peer();
+ }).then([this] {
+ return test_suite->shutdown().then([this] {
+ test_suite.reset();
+ });
+ });
+ });
+ }
+
+ seastar::future<> peer_connect_me() {
+ logger().info("[Test] peer_connect_me({})", test_addr);
+ return prepare_cmd(cmd_t::suite_connect_me,
+ [this] (auto m) {
+ m->cmd.emplace_back(fmt::format("{}", test_addr));
+ });
+ }
+
+ seastar::future<> peer_send_me() {
+ logger().info("[Test] peer_send_me()");
+ ceph_assert(test_suite);
+ test_suite->needs_receive();
+ return prepare_cmd(cmd_t::suite_send_me);
+ }
+
+ seastar::future<> try_peer_send_me() {
+ logger().info("[Test] try_peer_send_me()");
+ ceph_assert(test_suite);
+ return prepare_cmd(cmd_t::suite_send_me);
+ }
+
+ seastar::future<> send_bidirectional() {
+ ceph_assert(test_suite);
+ return test_suite->send_peer().then([this] {
+ return peer_send_me();
+ });
+ }
+
+ seastar::future<> peer_keepalive_me() {
+ logger().info("[Test] peer_keepalive_me()");
+ ceph_assert(test_suite);
+ return prepare_cmd(cmd_t::suite_keepalive_me);
+ }
+
+ seastar::future<> markdown_peer() {
+ logger().info("[Test] markdown_peer()");
+ return prepare_cmd(cmd_t::suite_markdown).then([] {
+ // sleep awhile for peer markdown propagated
+ return seastar::sleep(100ms);
+ });
+ }
+};
+
+class FailoverSuitePeer : public Dispatcher {
+ using cb_t = std::function<seastar::future<>()>;
+ crimson::auth::DummyAuthClientServer dummy_auth;
+ MessengerRef peer_msgr;
+ cb_t op_callback;
+
+ ConnectionRef tracked_conn;
+ unsigned pending_send = 0;
+
+ std::optional<seastar::future<>> ms_dispatch(ConnectionRef c, MessageRef m) override {
+ logger().info("[TestPeer] got op from Test");
+ ceph_assert(m->get_type() == CEPH_MSG_OSD_OP);
+ ceph_assert(tracked_conn == c);
+ std::ignore = op_callback();
+ return {seastar::now()};
+ }
+
+ void ms_handle_accept(ConnectionRef conn) override {
+ logger().info("[TestPeer] got accept from Test");
+ ceph_assert(!tracked_conn ||
+ tracked_conn->is_closed() ||
+ tracked_conn == conn);
+ tracked_conn = conn;
+ std::ignore = flush_pending_send();
+ }
+
+ void ms_handle_reset(ConnectionRef conn, bool is_replace) override {
+ logger().info("[TestPeer] got reset from Test");
+ ceph_assert(tracked_conn == conn);
+ tracked_conn = nullptr;
+ }
+
+ private:
+ seastar::future<> init(entity_addr_t addr, SocketPolicy policy) {
+ peer_msgr->set_default_policy(policy);
+ peer_msgr->set_auth_client(&dummy_auth);
+ peer_msgr->set_auth_server(&dummy_auth);
+ return peer_msgr->bind(entity_addrvec_t{addr}).safe_then([this] {
+ return peer_msgr->start({this});
+ }, Messenger::bind_ertr::all_same_way([addr] (const std::error_code& e) {
+ logger().error("FailoverSuitePeer: "
+ "there is another instance running at {}", addr);
+ ceph_abort();
+ }));
+ }
+
+ seastar::future<> send_op() {
+ ceph_assert(tracked_conn);
+ pg_t pgid;
+ object_locator_t oloc;
+ hobject_t hobj(object_t(), oloc.key, CEPH_NOSNAP, pgid.ps(),
+ pgid.pool(), oloc.nspace);
+ spg_t spgid(pgid);
+ return tracked_conn->send(make_message<MOSDOp>(0, 0, hobj, spgid, 0, 0, 0));
+ }
+
+ seastar::future<> flush_pending_send() {
+ if (pending_send != 0) {
+ logger().info("[TestPeer] flush sending {} ops", pending_send);
+ }
+ ceph_assert(tracked_conn);
+ return seastar::do_until(
+ [this] { return pending_send == 0; },
+ [this] {
+ --pending_send;
+ return send_op();
+ });
+ }
+
+ public:
+ FailoverSuitePeer(MessengerRef peer_msgr, cb_t op_callback)
+ : peer_msgr(peer_msgr), op_callback(op_callback) { }
+
+ seastar::future<> shutdown() {
+ peer_msgr->stop();
+ return peer_msgr->shutdown();
+ }
+
+ seastar::future<> connect_peer(entity_addr_t addr) {
+ logger().info("[TestPeer] connect_peer({})", addr);
+ auto new_tracked_conn = peer_msgr->connect(addr, entity_name_t::TYPE_OSD);
+ if (tracked_conn) {
+ if (tracked_conn->is_closed()) {
+ ceph_assert(tracked_conn != new_tracked_conn);
+ logger().info("[TestPeer] this is a new session"
+ " replacing an closed one");
+ } else {
+ ceph_assert(tracked_conn == new_tracked_conn);
+ logger().info("[TestPeer] this is not a new session");
+ }
+ } else {
+ logger().info("[TestPeer] this is a new session");
+ }
+ tracked_conn = new_tracked_conn;
+ return flush_pending_send();
+ }
+
+ seastar::future<> send_peer() {
+ if (tracked_conn) {
+ logger().info("[TestPeer] send_peer()");
+ return send_op();
+ } else {
+ ++pending_send;
+ logger().info("[TestPeer] send_peer() (pending {})", pending_send);
+ return seastar::now();
+ }
+ }
+
+ seastar::future<> keepalive_peer() {
+ logger().info("[TestPeer] keepalive_peer()");
+ ceph_assert(tracked_conn);
+ return tracked_conn->keepalive();
+ }
+
+ seastar::future<> markdown() {
+ logger().info("[TestPeer] markdown()");
+ ceph_assert(tracked_conn);
+ tracked_conn->mark_down();
+ return seastar::now();
+ }
+
+ static seastar::future<std::unique_ptr<FailoverSuitePeer>>
+ create(entity_addr_t addr, const SocketPolicy& policy, cb_t op_callback) {
+ auto suite = std::make_unique<FailoverSuitePeer>(
+ Messenger::create(entity_name_t::OSD(4), "TestPeer", 4), op_callback);
+ return suite->init(addr, policy
+ ).then([suite = std::move(suite)] () mutable {
+ return std::move(suite);
+ });
+ }
+};
+
+class FailoverTestPeer : public Dispatcher {
+ crimson::auth::DummyAuthClientServer dummy_auth;
+ MessengerRef cmd_msgr;
+ ConnectionRef cmd_conn;
+ const entity_addr_t test_peer_addr;
+ std::unique_ptr<FailoverSuitePeer> test_suite;
+
+ std::optional<seastar::future<>> ms_dispatch(ConnectionRef c, MessageRef m) override {
+ ceph_assert(cmd_conn == c);
+ switch (m->get_type()) {
+ case CEPH_MSG_PING:
+ std::ignore = c->send(make_message<MPing>());
+ break;
+ case MSG_COMMAND: {
+ auto m_cmd = boost::static_pointer_cast<MCommand>(m);
+ auto cmd = static_cast<cmd_t>(m_cmd->cmd[0][0]);
+ if (cmd == cmd_t::shutdown) {
+ logger().info("CmdSrv shutdown...");
+ // forwarded to FailoverTestPeer::wait()
+ cmd_msgr->stop();
+ std::ignore = cmd_msgr->shutdown();
+ } else {
+ std::ignore = handle_cmd(cmd, m_cmd).then([c] {
+ return c->send(make_message<MCommandReply>());
+ });
+ }
+ break;
+ }
+ default:
+ logger().error("{} got unexpected msg from cmd client: {}", *c, m);
+ ceph_abort();
+ }
+ return {seastar::now()};
+ }
+
+ void ms_handle_accept(ConnectionRef conn) override {
+ cmd_conn = conn;
+ }
+
+ private:
+ seastar::future<> notify_recv_op() {
+ ceph_assert(cmd_conn);
+ auto m = make_message<MCommand>();
+ m->cmd.emplace_back(1, static_cast<char>(cmd_t::suite_recv_op));
+ return cmd_conn->send(m);
+ }
+
+ seastar::future<> handle_cmd(cmd_t cmd, MRef<MCommand> m_cmd) {
+ switch (cmd) {
+ case cmd_t::suite_start: {
+ ceph_assert(!test_suite);
+ auto policy = to_socket_policy(static_cast<policy_t>(m_cmd->cmd[1][0]));
+ return FailoverSuitePeer::create(test_peer_addr, policy,
+ [this] { return notify_recv_op(); }
+ ).then([this] (auto suite) {
+ test_suite.swap(suite);
+ });
+ }
+ case cmd_t::suite_stop:
+ ceph_assert(test_suite);
+ return test_suite->shutdown().then([this] {
+ test_suite.reset();
+ });
+ case cmd_t::suite_connect_me: {
+ ceph_assert(test_suite);
+ entity_addr_t test_addr = entity_addr_t();
+ test_addr.parse(m_cmd->cmd[1].c_str(), nullptr);
+ return test_suite->connect_peer(test_addr);
+ }
+ case cmd_t::suite_send_me:
+ ceph_assert(test_suite);
+ return test_suite->send_peer();
+ case cmd_t::suite_keepalive_me:
+ ceph_assert(test_suite);
+ return test_suite->keepalive_peer();
+ case cmd_t::suite_markdown:
+ ceph_assert(test_suite);
+ return test_suite->markdown();
+ default:
+ logger().error("TestPeer got unexpected command {} from Test", m_cmd);
+ ceph_abort();
+ return seastar::now();
+ }
+ }
+
+ seastar::future<> init(entity_addr_t cmd_peer_addr) {
+ cmd_msgr->set_default_policy(SocketPolicy::stateless_server(0));
+ cmd_msgr->set_auth_client(&dummy_auth);
+ cmd_msgr->set_auth_server(&dummy_auth);
+ return cmd_msgr->bind(entity_addrvec_t{cmd_peer_addr}).safe_then([this] {
+ return cmd_msgr->start({this});
+ }, Messenger::bind_ertr::all_same_way([cmd_peer_addr] (const std::error_code& e) {
+ logger().error("FailoverTestPeer: "
+ "there is another instance running at {}", cmd_peer_addr);
+ ceph_abort();
+ }));
+ }
+
+ public:
+ FailoverTestPeer(MessengerRef cmd_msgr,
+ entity_addr_t test_peer_addr)
+ : cmd_msgr(cmd_msgr),
+ test_peer_addr(test_peer_addr) { }
+
+ seastar::future<> wait() {
+ return cmd_msgr->wait();
+ }
+
+ static seastar::future<std::unique_ptr<FailoverTestPeer>>
+ create(entity_addr_t cmd_peer_addr) {
+ // suite bind to cmd_peer_addr, with port + 1
+ entity_addr_t test_peer_addr = cmd_peer_addr;
+ test_peer_addr.set_port(cmd_peer_addr.get_port() + 1);
+ auto test_peer = std::make_unique<FailoverTestPeer>(
+ Messenger::create(entity_name_t::OSD(3), "CmdSrv", 3), test_peer_addr);
+ return test_peer->init(cmd_peer_addr
+ ).then([test_peer = std::move(test_peer)] () mutable {
+ logger().info("CmdSrv ready");
+ return std::move(test_peer);
+ });
+ }
+};
+
+seastar::future<>
+test_v2_lossy_early_connect_fault(FailoverTest& test) {
+ return seastar::do_with(std::vector<Breakpoint>{
+ {custom_bp_t::BANNER_WRITE},
+ {custom_bp_t::BANNER_READ},
+ {custom_bp_t::BANNER_PAYLOAD_READ},
+ {custom_bp_t::SOCKET_CONNECTING},
+ {Tag::HELLO, bp_type_t::WRITE},
+ {Tag::HELLO, bp_type_t::READ},
+ {Tag::AUTH_REQUEST, bp_type_t::WRITE},
+ {Tag::AUTH_DONE, bp_type_t::READ},
+ {Tag::AUTH_SIGNATURE, bp_type_t::WRITE},
+ {Tag::AUTH_SIGNATURE, bp_type_t::READ},
+ }, [&test] (auto& failure_cases) {
+ return seastar::do_for_each(failure_cases, [&test] (auto bp) {
+ TestInterceptor interceptor;
+ interceptor.make_fault(bp);
+ return test.run_suite(
+ fmt::format("test_v2_lossy_early_connect_fault -- {}", bp),
+ interceptor,
+ policy_t::lossy_client,
+ policy_t::stateless_server,
+ [] (FailoverSuite& suite) {
+ return seastar::futurize_invoke([&suite] {
+ return suite.send_peer();
+ }).then([&suite] {
+ return suite.connect_peer();
+ }).then([&suite] {
+ return suite.wait_results(1);
+ }).then([] (ConnResults& results) {
+ results[0].assert_state_at(conn_state_t::established);
+ results[0].assert_connect(2, 1, 0, 1);
+ results[0].assert_accept(0, 0, 0, 0);
+ results[0].assert_reset(0, 0);
+ });
+ });
+ });
+ });
+}
+
+seastar::future<>
+test_v2_lossy_connect_fault(FailoverTest& test) {
+ return seastar::do_with(std::vector<Breakpoint>{
+ {Tag::CLIENT_IDENT, bp_type_t::WRITE},
+ {Tag::SERVER_IDENT, bp_type_t::READ},
+ }, [&test] (auto& failure_cases) {
+ return seastar::do_for_each(failure_cases, [&test] (auto bp) {
+ TestInterceptor interceptor;
+ interceptor.make_fault(bp);
+ return test.run_suite(
+ fmt::format("test_v2_lossy_connect_fault -- {}", bp),
+ interceptor,
+ policy_t::lossy_client,
+ policy_t::stateless_server,
+ [] (FailoverSuite& suite) {
+ return seastar::futurize_invoke([&suite] {
+ return suite.send_peer();
+ }).then([&suite] {
+ return suite.connect_peer();
+ }).then([&suite] {
+ return suite.wait_results(1);
+ }).then([] (ConnResults& results) {
+ results[0].assert_state_at(conn_state_t::established);
+ results[0].assert_connect(2, 2, 0, 1);
+ results[0].assert_accept(0, 0, 0, 0);
+ results[0].assert_reset(0, 0);
+ });
+ });
+ });
+ });
+}
+
+seastar::future<>
+test_v2_lossy_connected_fault(FailoverTest& test) {
+ return seastar::do_with(std::vector<Breakpoint>{
+ {Tag::MESSAGE, bp_type_t::WRITE},
+ {Tag::MESSAGE, bp_type_t::READ},
+ }, [&test] (auto& failure_cases) {
+ return seastar::do_for_each(failure_cases, [&test] (auto bp) {
+ TestInterceptor interceptor;
+ interceptor.make_fault(bp);
+ return test.run_suite(
+ fmt::format("test_v2_lossy_connected_fault -- {}", bp),
+ interceptor,
+ policy_t::lossy_client,
+ policy_t::stateless_server,
+ [&test] (FailoverSuite& suite) {
+ return seastar::futurize_invoke([&test] {
+ return test.send_bidirectional();
+ }).then([&suite] {
+ return suite.connect_peer();
+ }).then([&suite] {
+ return suite.wait_results(1);
+ }).then([] (ConnResults& results) {
+ results[0].assert_state_at(conn_state_t::closed);
+ results[0].assert_connect(1, 1, 0, 1);
+ results[0].assert_accept(0, 0, 0, 0);
+ results[0].assert_reset(1, 0);
+ });
+ });
+ });
+ });
+}
+
+seastar::future<>
+test_v2_lossy_early_accept_fault(FailoverTest& test) {
+ return seastar::do_with(std::vector<Breakpoint>{
+ {custom_bp_t::BANNER_WRITE},
+ {custom_bp_t::BANNER_READ},
+ {custom_bp_t::BANNER_PAYLOAD_READ},
+ {Tag::HELLO, bp_type_t::WRITE},
+ {Tag::HELLO, bp_type_t::READ},
+ {Tag::AUTH_REQUEST, bp_type_t::READ},
+ {Tag::AUTH_DONE, bp_type_t::WRITE},
+ {Tag::AUTH_SIGNATURE, bp_type_t::WRITE},
+ {Tag::AUTH_SIGNATURE, bp_type_t::READ},
+ }, [&test] (auto& failure_cases) {
+ return seastar::do_for_each(failure_cases, [&test] (auto bp) {
+ TestInterceptor interceptor;
+ interceptor.make_fault(bp);
+ return test.run_suite(
+ fmt::format("test_v2_lossy_early_accept_fault -- {}", bp),
+ interceptor,
+ policy_t::stateless_server,
+ policy_t::lossy_client,
+ [&test] (FailoverSuite& suite) {
+ return seastar::futurize_invoke([&test] {
+ return test.peer_send_me();
+ }).then([&test] {
+ return test.peer_connect_me();
+ }).then([&suite] {
+ return suite.wait_results(2);
+ }).then([] (ConnResults& results) {
+ results[0].assert_state_at(conn_state_t::closed);
+ results[0].assert_connect(0, 0, 0, 0);
+ results[0].assert_accept(1, 0, 0, 0);
+ results[0].assert_reset(0, 0);
+ results[1].assert_state_at(conn_state_t::established);
+ results[1].assert_connect(0, 0, 0, 0);
+ results[1].assert_accept(1, 1, 0, 1);
+ results[1].assert_reset(0, 0);
+ });
+ });
+ });
+ });
+}
+
+seastar::future<>
+test_v2_lossy_accept_fault(FailoverTest& test) {
+ auto bp = Breakpoint{Tag::CLIENT_IDENT, bp_type_t::READ};
+ TestInterceptor interceptor;
+ interceptor.make_fault(bp);
+ return test.run_suite(
+ fmt::format("test_v2_lossy_accept_fault -- {}", bp),
+ interceptor,
+ policy_t::stateless_server,
+ policy_t::lossy_client,
+ [&test] (FailoverSuite& suite) {
+ return seastar::futurize_invoke([&test] {
+ return test.peer_send_me();
+ }).then([&test] {
+ return test.peer_connect_me();
+ }).then([&suite] {
+ return suite.wait_results(2);
+ }).then([] (ConnResults& results) {
+ results[0].assert_state_at(conn_state_t::closed);
+ results[0].assert_connect(0, 0, 0, 0);
+ results[0].assert_accept(1, 1, 0, 0);
+ results[0].assert_reset(0, 0);
+ results[1].assert_state_at(conn_state_t::established);
+ results[1].assert_connect(0, 0, 0, 0);
+ results[1].assert_accept(1, 1, 0, 1);
+ results[1].assert_reset(0, 0);
+ });
+ });
+}
+
+seastar::future<>
+test_v2_lossy_establishing_fault(FailoverTest& test) {
+ auto bp = Breakpoint{Tag::SERVER_IDENT, bp_type_t::WRITE};
+ TestInterceptor interceptor;
+ interceptor.make_fault(bp);
+ return test.run_suite(
+ fmt::format("test_v2_lossy_establishing_fault -- {}", bp),
+ interceptor,
+ policy_t::stateless_server,
+ policy_t::lossy_client,
+ [&test] (FailoverSuite& suite) {
+ return seastar::futurize_invoke([&test] {
+ return test.peer_send_me();
+ }).then([&test] {
+ return test.peer_connect_me();
+ }).then([&suite] {
+ return suite.wait_results(2);
+ }).then([] (ConnResults& results) {
+ results[0].assert_state_at(conn_state_t::closed);
+ results[0].assert_connect(0, 0, 0, 0);
+ results[0].assert_accept(1, 1, 0, 1);
+ results[0].assert_reset(1, 0);
+ results[1].assert_state_at(conn_state_t::established);
+ results[1].assert_connect(0, 0, 0, 0);
+ results[1].assert_accept(1, 1, 0, 1);
+ results[1].assert_reset(0, 0);
+ });
+ });
+}
+
+seastar::future<>
+test_v2_lossy_accepted_fault(FailoverTest& test) {
+ return seastar::do_with(std::vector<Breakpoint>{
+ {Tag::MESSAGE, bp_type_t::WRITE},
+ {Tag::MESSAGE, bp_type_t::READ},
+ }, [&test] (auto& failure_cases) {
+ return seastar::do_for_each(failure_cases, [&test] (auto bp) {
+ TestInterceptor interceptor;
+ interceptor.make_fault(bp);
+ return test.run_suite(
+ fmt::format("test_v2_lossy_accepted_fault -- {}", bp),
+ interceptor,
+ policy_t::stateless_server,
+ policy_t::lossy_client,
+ [&test] (FailoverSuite& suite) {
+ return seastar::futurize_invoke([&test] {
+ return test.send_bidirectional();
+ }).then([&test] {
+ return test.peer_connect_me();
+ }).then([&suite] {
+ return suite.wait_results(1);
+ }).then([] (ConnResults& results) {
+ results[0].assert_state_at(conn_state_t::closed);
+ results[0].assert_connect(0, 0, 0, 0);
+ results[0].assert_accept(1, 1, 0, 1);
+ results[0].assert_reset(1, 0);
+ });
+ });
+ });
+ });
+}
+
+seastar::future<>
+test_v2_lossless_connect_fault(FailoverTest& test) {
+ return seastar::do_with(std::vector<Breakpoint>{
+ {Tag::CLIENT_IDENT, bp_type_t::WRITE},
+ {Tag::SERVER_IDENT, bp_type_t::READ},
+ }, [&test] (auto& failure_cases) {
+ return seastar::do_for_each(failure_cases, [&test] (auto bp) {
+ TestInterceptor interceptor;
+ interceptor.make_fault(bp);
+ return test.run_suite(
+ fmt::format("test_v2_lossless_connect_fault -- {}", bp),
+ interceptor,
+ policy_t::lossless_client,
+ policy_t::stateful_server,
+ [&test] (FailoverSuite& suite) {
+ return seastar::futurize_invoke([&test] {
+ return test.send_bidirectional();
+ }).then([&suite] {
+ return suite.connect_peer();
+ }).then([&suite] {
+ return suite.wait_results(1);
+ }).then([] (ConnResults& results) {
+ results[0].assert_state_at(conn_state_t::established);
+ results[0].assert_connect(2, 2, 0, 1);
+ results[0].assert_accept(0, 0, 0, 0);
+ results[0].assert_reset(0, 0);
+ });
+ });
+ });
+ });
+}
+
+seastar::future<>
+test_v2_lossless_connected_fault(FailoverTest& test) {
+ return seastar::do_with(std::vector<Breakpoint>{
+ {Tag::MESSAGE, bp_type_t::WRITE},
+ {Tag::MESSAGE, bp_type_t::READ},
+ }, [&test] (auto& failure_cases) {
+ return seastar::do_for_each(failure_cases, [&test] (auto bp) {
+ TestInterceptor interceptor;
+ interceptor.make_fault(bp);
+ return test.run_suite(
+ fmt::format("test_v2_lossless_connected_fault -- {}", bp),
+ interceptor,
+ policy_t::lossless_client,
+ policy_t::stateful_server,
+ [&test] (FailoverSuite& suite) {
+ return seastar::futurize_invoke([&test] {
+ return test.send_bidirectional();
+ }).then([&suite] {
+ return suite.connect_peer();
+ }).then([&suite] {
+ return suite.wait_results(1);
+ }).then([] (ConnResults& results) {
+ results[0].assert_state_at(conn_state_t::established);
+ results[0].assert_connect(2, 1, 1, 2);
+ results[0].assert_accept(0, 0, 0, 0);
+ results[0].assert_reset(0, 0);
+ });
+ });
+ });
+ });
+}
+
+seastar::future<>
+test_v2_lossless_connected_fault2(FailoverTest& test) {
+ return seastar::do_with(std::vector<Breakpoint>{
+ {Tag::ACK, bp_type_t::READ},
+ {Tag::ACK, bp_type_t::WRITE},
+ {Tag::KEEPALIVE2, bp_type_t::READ},
+ {Tag::KEEPALIVE2, bp_type_t::WRITE},
+ {Tag::KEEPALIVE2_ACK, bp_type_t::READ},
+ {Tag::KEEPALIVE2_ACK, bp_type_t::WRITE},
+ }, [&test] (auto& failure_cases) {
+ return seastar::do_for_each(failure_cases, [&test] (auto bp) {
+ TestInterceptor interceptor;
+ interceptor.make_fault(bp);
+ return test.run_suite(
+ fmt::format("test_v2_lossless_connected_fault2 -- {}", bp),
+ interceptor,
+ policy_t::lossless_client,
+ policy_t::stateful_server,
+ [&test] (FailoverSuite& suite) {
+ return seastar::futurize_invoke([&suite] {
+ return suite.connect_peer();
+ }).then([&suite] {
+ return suite.wait_established();
+ }).then([&suite] {
+ return suite.send_peer();
+ }).then([&suite] {
+ return suite.keepalive_peer();
+ }).then([&suite] {
+ return suite.wait_established();
+ }).then([&test] {
+ return test.peer_send_me();
+ }).then([&test] {
+ return test.peer_keepalive_me();
+ }).then([&suite] {
+ return suite.wait_established();
+ }).then([&suite] {
+ return suite.send_peer();
+ }).then([&suite] {
+ return suite.wait_established();
+ }).then([&test] {
+ return test.peer_send_me();
+ }).then([&suite] {
+ return suite.wait_established();
+ }).then([&suite] {
+ return suite.wait_results(1);
+ }).then([] (ConnResults& results) {
+ results[0].assert_state_at(conn_state_t::established);
+ results[0].assert_connect(2, 1, 1, 2);
+ results[0].assert_accept(0, 0, 0, 0);
+ results[0].assert_reset(0, 0);
+ });
+ });
+ });
+ });
+}
+
+seastar::future<>
+test_v2_lossless_reconnect_fault(FailoverTest& test) {
+ return seastar::do_with(std::vector<std::pair<Breakpoint, Breakpoint>>{
+ {{Tag::MESSAGE, bp_type_t::WRITE},
+ {Tag::SESSION_RECONNECT, bp_type_t::WRITE}},
+ {{Tag::MESSAGE, bp_type_t::WRITE},
+ {Tag::SESSION_RECONNECT_OK, bp_type_t::READ}},
+ }, [&test] (auto& failure_cases) {
+ return seastar::do_for_each(failure_cases, [&test] (auto bp_pair) {
+ TestInterceptor interceptor;
+ interceptor.make_fault(bp_pair.first);
+ interceptor.make_fault(bp_pair.second);
+ return test.run_suite(
+ fmt::format("test_v2_lossless_reconnect_fault -- {}, {}",
+ bp_pair.first, bp_pair.second),
+ interceptor,
+ policy_t::lossless_client,
+ policy_t::stateful_server,
+ [&test] (FailoverSuite& suite) {
+ return seastar::futurize_invoke([&test] {
+ return test.send_bidirectional();
+ }).then([&suite] {
+ return suite.connect_peer();
+ }).then([&suite] {
+ return suite.wait_results(1);
+ }).then([] (ConnResults& results) {
+ results[0].assert_state_at(conn_state_t::established);
+ results[0].assert_connect(3, 1, 2, 2);
+ results[0].assert_accept(0, 0, 0, 0);
+ results[0].assert_reset(0, 0);
+ });
+ });
+ });
+ });
+}
+
+seastar::future<>
+test_v2_lossless_accept_fault(FailoverTest& test) {
+ auto bp = Breakpoint{Tag::CLIENT_IDENT, bp_type_t::READ};
+ TestInterceptor interceptor;
+ interceptor.make_fault(bp);
+ return test.run_suite(
+ fmt::format("test_v2_lossless_accept_fault -- {}", bp),
+ interceptor,
+ policy_t::stateful_server,
+ policy_t::lossless_client,
+ [&test] (FailoverSuite& suite) {
+ return seastar::futurize_invoke([&test] {
+ return test.send_bidirectional();
+ }).then([&test] {
+ return test.peer_connect_me();
+ }).then([&suite] {
+ return suite.wait_results(2);
+ }).then([] (ConnResults& results) {
+ results[0].assert_state_at(conn_state_t::closed);
+ results[0].assert_connect(0, 0, 0, 0);
+ results[0].assert_accept(1, 1, 0, 0);
+ results[0].assert_reset(0, 0);
+ results[1].assert_state_at(conn_state_t::established);
+ results[1].assert_connect(0, 0, 0, 0);
+ results[1].assert_accept(1, 1, 0, 1);
+ results[1].assert_reset(0, 0);
+ });
+ });
+}
+
+seastar::future<>
+test_v2_lossless_establishing_fault(FailoverTest& test) {
+ auto bp = Breakpoint{Tag::SERVER_IDENT, bp_type_t::WRITE};
+ TestInterceptor interceptor;
+ interceptor.make_fault(bp);
+ return test.run_suite(
+ fmt::format("test_v2_lossless_establishing_fault -- {}", bp),
+ interceptor,
+ policy_t::stateful_server,
+ policy_t::lossless_client,
+ [&test] (FailoverSuite& suite) {
+ return seastar::futurize_invoke([&test] {
+ return test.send_bidirectional();
+ }).then([&test] {
+ return test.peer_connect_me();
+ }).then([&suite] {
+ return suite.wait_results(2);
+ }).then([] (ConnResults& results) {
+ results[0].assert_state_at(conn_state_t::established);
+ results[0].assert_connect(0, 0, 0, 0);
+ results[0].assert_accept(1, 1, 0, 2);
+ results[0].assert_reset(0, 0);
+ results[1].assert_state_at(conn_state_t::replaced);
+ results[1].assert_connect(0, 0, 0, 0);
+ results[1].assert_accept(1, 1, 0, 0);
+ results[1].assert_reset(0, 0);
+ });
+ });
+}
+
+seastar::future<>
+test_v2_lossless_accepted_fault(FailoverTest& test) {
+ return seastar::do_with(std::vector<Breakpoint>{
+ {Tag::MESSAGE, bp_type_t::WRITE},
+ {Tag::MESSAGE, bp_type_t::READ},
+ }, [&test] (auto& failure_cases) {
+ return seastar::do_for_each(failure_cases, [&test] (auto bp) {
+ TestInterceptor interceptor;
+ interceptor.make_fault(bp);
+ return test.run_suite(
+ fmt::format("test_v2_lossless_accepted_fault -- {}", bp),
+ interceptor,
+ policy_t::stateful_server,
+ policy_t::lossless_client,
+ [&test] (FailoverSuite& suite) {
+ return seastar::futurize_invoke([&test] {
+ return test.send_bidirectional();
+ }).then([&test] {
+ return test.peer_connect_me();
+ }).then([&suite] {
+ return suite.wait_results(2);
+ }).then([] (ConnResults& results) {
+ results[0].assert_state_at(conn_state_t::established);
+ results[0].assert_connect(0, 0, 0, 0);
+ results[0].assert_accept(1, 1, 0, 2);
+ results[0].assert_reset(0, 0);
+ results[1].assert_state_at(conn_state_t::replaced);
+ results[1].assert_connect(0, 0, 0, 0);
+ results[1].assert_accept(1, 0);
+ results[1].assert_reset(0, 0);
+ });
+ });
+ });
+ });
+}
+
+seastar::future<>
+test_v2_lossless_reaccept_fault(FailoverTest& test) {
+ return seastar::do_with(std::vector<std::pair<Breakpoint, Breakpoint>>{
+ {{Tag::MESSAGE, bp_type_t::READ},
+ {Tag::SESSION_RECONNECT, bp_type_t::READ}},
+ {{Tag::MESSAGE, bp_type_t::READ},
+ {Tag::SESSION_RECONNECT_OK, bp_type_t::WRITE}},
+ }, [&test] (auto& failure_cases) {
+ return seastar::do_for_each(failure_cases, [&test] (auto bp_pair) {
+ TestInterceptor interceptor;
+ interceptor.make_fault(bp_pair.first);
+ interceptor.make_fault(bp_pair.second);
+ return test.run_suite(
+ fmt::format("test_v2_lossless_reaccept_fault -- {}, {}",
+ bp_pair.first, bp_pair.second),
+ interceptor,
+ policy_t::stateful_server,
+ policy_t::lossless_client,
+ [&test, bp = bp_pair.second] (FailoverSuite& suite) {
+ return seastar::futurize_invoke([&test] {
+ return test.send_bidirectional();
+ }).then([&test] {
+ return test.peer_connect_me();
+ }).then([&suite] {
+ return suite.wait_results(3);
+ }).then([bp] (ConnResults& results) {
+ results[0].assert_state_at(conn_state_t::established);
+ results[0].assert_connect(0, 0, 0, 0);
+ if (bp == Breakpoint{Tag::SESSION_RECONNECT, bp_type_t::READ}) {
+ results[0].assert_accept(1, 1, 0, 2);
+ } else {
+ results[0].assert_accept(1, 1, 0, 3);
+ }
+ results[0].assert_reset(0, 0);
+ if (bp == Breakpoint{Tag::SESSION_RECONNECT, bp_type_t::READ}) {
+ results[1].assert_state_at(conn_state_t::closed);
+ } else {
+ results[1].assert_state_at(conn_state_t::replaced);
+ }
+ results[1].assert_connect(0, 0, 0, 0);
+ results[1].assert_accept(1, 0, 1, 0);
+ results[1].assert_reset(0, 0);
+ results[2].assert_state_at(conn_state_t::replaced);
+ results[2].assert_connect(0, 0, 0, 0);
+ results[2].assert_accept(1, 0, 1, 0);
+ results[2].assert_reset(0, 0);
+ });
+ });
+ });
+ });
+}
+
+seastar::future<>
+test_v2_peer_connect_fault(FailoverTest& test) {
+ return seastar::do_with(std::vector<Breakpoint>{
+ {Tag::CLIENT_IDENT, bp_type_t::WRITE},
+ {Tag::SERVER_IDENT, bp_type_t::READ},
+ }, [&test] (auto& failure_cases) {
+ return seastar::do_for_each(failure_cases, [&test] (auto bp) {
+ TestInterceptor interceptor;
+ interceptor.make_fault(bp);
+ return test.run_suite(
+ fmt::format("test_v2_peer_connect_fault -- {}", bp),
+ interceptor,
+ policy_t::lossless_peer,
+ policy_t::lossless_peer,
+ [] (FailoverSuite& suite) {
+ return seastar::futurize_invoke([&suite] {
+ return suite.send_peer();
+ }).then([&suite] {
+ return suite.connect_peer();
+ }).then([&suite] {
+ return suite.wait_results(1);
+ }).then([] (ConnResults& results) {
+ results[0].assert_state_at(conn_state_t::established);
+ results[0].assert_connect(2, 2, 0, 1);
+ results[0].assert_accept(0, 0, 0, 0);
+ results[0].assert_reset(0, 0);
+ });
+ });
+ });
+ });
+}
+
+seastar::future<>
+test_v2_peer_accept_fault(FailoverTest& test) {
+ auto bp = Breakpoint{Tag::CLIENT_IDENT, bp_type_t::READ};
+ TestInterceptor interceptor;
+ interceptor.make_fault(bp);
+ return test.run_suite(
+ fmt::format("test_v2_peer_accept_fault -- {}", bp),
+ interceptor,
+ policy_t::lossless_peer,
+ policy_t::lossless_peer,
+ [&test] (FailoverSuite& suite) {
+ return seastar::futurize_invoke([&test] {
+ return test.peer_send_me();
+ }).then([&test] {
+ return test.peer_connect_me();
+ }).then([&suite] {
+ return suite.wait_results(2);
+ }).then([] (ConnResults& results) {
+ results[0].assert_state_at(conn_state_t::closed);
+ results[0].assert_connect(0, 0, 0, 0);
+ results[0].assert_accept(1, 1, 0, 0);
+ results[0].assert_reset(0, 0);
+ results[1].assert_state_at(conn_state_t::established);
+ results[1].assert_connect(0, 0, 0, 0);
+ results[1].assert_accept(1, 1, 0, 1);
+ results[1].assert_reset(0, 0);
+ });
+ });
+}
+
+seastar::future<>
+test_v2_peer_establishing_fault(FailoverTest& test) {
+ auto bp = Breakpoint{Tag::SERVER_IDENT, bp_type_t::WRITE};
+ TestInterceptor interceptor;
+ interceptor.make_fault(bp);
+ return test.run_suite(
+ fmt::format("test_v2_peer_establishing_fault -- {}", bp),
+ interceptor,
+ policy_t::lossless_peer,
+ policy_t::lossless_peer,
+ [&test] (FailoverSuite& suite) {
+ return seastar::futurize_invoke([&test] {
+ return test.peer_send_me();
+ }).then([&test] {
+ return test.peer_connect_me();
+ }).then([&suite] {
+ return suite.wait_results(2);
+ }).then([] (ConnResults& results) {
+ results[0].assert_state_at(conn_state_t::established);
+ results[0].assert_connect(0, 0, 0, 0);
+ results[0].assert_accept(1, 1, 0, 2);
+ results[0].assert_reset(0, 0);
+ results[1].assert_state_at(conn_state_t::replaced);
+ results[1].assert_connect(0, 0, 0, 0);
+ results[1].assert_accept(1, 1, 0, 0);
+ results[1].assert_reset(0, 0);
+ });
+ });
+}
+
+seastar::future<>
+test_v2_peer_connected_fault_reconnect(FailoverTest& test) {
+ auto bp = Breakpoint{Tag::MESSAGE, bp_type_t::WRITE};
+ TestInterceptor interceptor;
+ interceptor.make_fault(bp);
+ return test.run_suite(
+ fmt::format("test_v2_peer_connected_fault_reconnect -- {}", bp),
+ interceptor,
+ policy_t::lossless_peer,
+ policy_t::lossless_peer,
+ [] (FailoverSuite& suite) {
+ return seastar::futurize_invoke([&suite] {
+ return suite.send_peer();
+ }).then([&suite] {
+ return suite.connect_peer();
+ }).then([&suite] {
+ return suite.wait_results(1);
+ }).then([] (ConnResults& results) {
+ results[0].assert_state_at(conn_state_t::established);
+ results[0].assert_connect(2, 1, 1, 2);
+ results[0].assert_accept(0, 0, 0, 0);
+ results[0].assert_reset(0, 0);
+ });
+ });
+}
+
+seastar::future<>
+test_v2_peer_connected_fault_reaccept(FailoverTest& test) {
+ auto bp = Breakpoint{Tag::MESSAGE, bp_type_t::READ};
+ TestInterceptor interceptor;
+ interceptor.make_fault(bp);
+ return test.run_suite(
+ fmt::format("test_v2_peer_connected_fault_reaccept -- {}", bp),
+ interceptor,
+ policy_t::lossless_peer,
+ policy_t::lossless_peer,
+ [&test] (FailoverSuite& suite) {
+ return seastar::futurize_invoke([&test] {
+ return test.peer_send_me();
+ }).then([&suite] {
+ return suite.connect_peer();
+ }).then([&suite] {
+ return suite.wait_results(2);
+ }).then([] (ConnResults& results) {
+ results[0].assert_state_at(conn_state_t::established);
+ results[0].assert_connect(1, 1, 0, 1);
+ results[0].assert_accept(0, 0, 0, 1);
+ results[0].assert_reset(0, 0);
+ results[1].assert_state_at(conn_state_t::replaced);
+ results[1].assert_connect(0, 0, 0, 0);
+ results[1].assert_accept(1, 0, 1, 0);
+ results[1].assert_reset(0, 0);
+ });
+ });
+}
+
+seastar::future<bool>
+peer_wins(FailoverTest& test) {
+ return seastar::do_with(bool(), [&test] (auto& ret) {
+ return test.run_suite("peer_wins",
+ TestInterceptor(),
+ policy_t::lossy_client,
+ policy_t::stateless_server,
+ [&ret] (FailoverSuite& suite) {
+ return suite.connect_peer().then([&suite] {
+ return suite.wait_results(1);
+ }).then([&ret] (ConnResults& results) {
+ results[0].assert_state_at(conn_state_t::established);
+ ret = results[0].conn->peer_wins();
+ logger().info("peer_wins: {}", ret);
+ });
+ }).then([&ret] {
+ return ret;
+ });
+ });
+}
+
+seastar::future<>
+test_v2_racing_reconnect_win(FailoverTest& test) {
+ return seastar::do_with(std::vector<std::pair<unsigned, Breakpoint>>{
+ {2, {custom_bp_t::BANNER_WRITE}},
+ {2, {custom_bp_t::BANNER_READ}},
+ {2, {custom_bp_t::BANNER_PAYLOAD_READ}},
+ {2, {Tag::HELLO, bp_type_t::WRITE}},
+ {2, {Tag::HELLO, bp_type_t::READ}},
+ {2, {Tag::AUTH_REQUEST, bp_type_t::READ}},
+ {2, {Tag::AUTH_DONE, bp_type_t::WRITE}},
+ {2, {Tag::AUTH_SIGNATURE, bp_type_t::WRITE}},
+ {2, {Tag::AUTH_SIGNATURE, bp_type_t::READ}},
+ {1, {Tag::SESSION_RECONNECT, bp_type_t::READ}},
+ }, [&test] (auto& failure_cases) {
+ return seastar::do_for_each(failure_cases, [&test] (auto bp) {
+ TestInterceptor interceptor;
+ interceptor.make_fault({Tag::MESSAGE, bp_type_t::READ});
+ interceptor.make_block(bp.second, bp.first);
+ return test.run_suite(
+ fmt::format("test_v2_racing_reconnect_win -- {}({})",
+ bp.second, bp.first),
+ interceptor,
+ policy_t::lossless_peer,
+ policy_t::lossless_peer,
+ [&test] (FailoverSuite& suite) {
+ return seastar::futurize_invoke([&test] {
+ return test.peer_send_me();
+ }).then([&test] {
+ return test.peer_connect_me();
+ }).then([&suite] {
+ return suite.wait_blocked();
+ }).then([&suite] {
+ return suite.send_peer();
+ }).then([&suite] {
+ return suite.wait_established();
+ }).then([&suite] {
+ suite.unblock();
+ return suite.wait_results(2);
+ }).then([] (ConnResults& results) {
+ results[0].assert_state_at(conn_state_t::established);
+ results[0].assert_connect(1, 0, 1, 1);
+ results[0].assert_accept(1, 1, 0, 1);
+ results[0].assert_reset(0, 0);
+ results[1].assert_state_at(conn_state_t::closed);
+ results[1].assert_connect(0, 0, 0, 0);
+ results[1].assert_accept(1, 0);
+ results[1].assert_reset(0, 0);
+ });
+ });
+ });
+ });
+}
+
+seastar::future<>
+test_v2_racing_reconnect_lose(FailoverTest& test) {
+ return seastar::do_with(std::vector<std::pair<unsigned, Breakpoint>>{
+ {2, {custom_bp_t::BANNER_WRITE}},
+ {2, {custom_bp_t::BANNER_READ}},
+ {2, {custom_bp_t::BANNER_PAYLOAD_READ}},
+ {2, {Tag::HELLO, bp_type_t::WRITE}},
+ {2, {Tag::HELLO, bp_type_t::READ}},
+ {2, {Tag::AUTH_REQUEST, bp_type_t::WRITE}},
+ {2, {Tag::AUTH_DONE, bp_type_t::READ}},
+ {2, {Tag::AUTH_SIGNATURE, bp_type_t::WRITE}},
+ {2, {Tag::AUTH_SIGNATURE, bp_type_t::READ}},
+ {1, {Tag::SESSION_RECONNECT, bp_type_t::WRITE}},
+ }, [&test] (auto& failure_cases) {
+ return seastar::do_for_each(failure_cases, [&test] (auto bp) {
+ TestInterceptor interceptor;
+ interceptor.make_fault({Tag::MESSAGE, bp_type_t::WRITE});
+ interceptor.make_block(bp.second, bp.first);
+ return test.run_suite(
+ fmt::format("test_v2_racing_reconnect_lose -- {}({})",
+ bp.second, bp.first),
+ interceptor,
+ policy_t::lossless_peer,
+ policy_t::lossless_peer,
+ [&test] (FailoverSuite& suite) {
+ return seastar::futurize_invoke([&suite] {
+ return suite.send_peer();
+ }).then([&suite] {
+ return suite.connect_peer();
+ }).then([&suite] {
+ return suite.wait_blocked();
+ }).then([&test] {
+ return test.peer_send_me();
+ }).then([&suite] {
+ return suite.wait_replaced(1);
+ }).then([&suite] {
+ suite.unblock();
+ return suite.wait_results(2);
+ }).then([] (ConnResults& results) {
+ results[0].assert_state_at(conn_state_t::established);
+ results[0].assert_connect(2, 1);
+ results[0].assert_accept(0, 0, 0, 1);
+ results[0].assert_reset(0, 0);
+ results[1].assert_state_at(conn_state_t::replaced);
+ results[1].assert_connect(0, 0, 0, 0);
+ results[1].assert_accept(1, 0, 1, 0);
+ results[1].assert_reset(0, 0);
+ });
+ });
+ });
+ });
+}
+
+seastar::future<>
+test_v2_racing_connect_win(FailoverTest& test) {
+ return seastar::do_with(std::vector<Breakpoint>{
+ {custom_bp_t::BANNER_WRITE},
+ {custom_bp_t::BANNER_READ},
+ {custom_bp_t::BANNER_PAYLOAD_READ},
+ {Tag::HELLO, bp_type_t::WRITE},
+ {Tag::HELLO, bp_type_t::READ},
+ {Tag::AUTH_REQUEST, bp_type_t::READ},
+ {Tag::AUTH_DONE, bp_type_t::WRITE},
+ {Tag::AUTH_SIGNATURE, bp_type_t::WRITE},
+ {Tag::AUTH_SIGNATURE, bp_type_t::READ},
+ {Tag::CLIENT_IDENT, bp_type_t::READ},
+ }, [&test] (auto& failure_cases) {
+ return seastar::do_for_each(failure_cases, [&test] (auto bp) {
+ TestInterceptor interceptor;
+ interceptor.make_block(bp);
+ return test.run_suite(
+ fmt::format("test_v2_racing_connect_win -- {}", bp),
+ interceptor,
+ policy_t::lossless_peer,
+ policy_t::lossless_peer,
+ [&test] (FailoverSuite& suite) {
+ return seastar::futurize_invoke([&test] {
+ return test.peer_send_me();
+ }).then([&test] {
+ return test.peer_connect_me();
+ }).then([&suite] {
+ return suite.wait_blocked();
+ }).then([&suite] {
+ return suite.send_peer();
+ }).then([&suite] {
+ return suite.connect_peer();
+ }).then([&suite] {
+ return suite.wait_established();
+ }).then([&suite] {
+ suite.unblock();
+ return suite.wait_results(2);
+ }).then([] (ConnResults& results) {
+ results[0].assert_state_at(conn_state_t::closed);
+ results[0].assert_connect(0, 0, 0, 0);
+ results[0].assert_accept(1, 0);
+ results[0].assert_reset(0, 0);
+ results[1].assert_state_at(conn_state_t::established);
+ results[1].assert_connect(1, 1, 0, 1);
+ results[1].assert_accept(0, 0, 0, 0);
+ results[1].assert_reset(0, 0);
+ });
+ });
+ });
+ });
+}
+
+seastar::future<>
+test_v2_racing_connect_lose(FailoverTest& test) {
+ return seastar::do_with(std::vector<Breakpoint>{
+ {custom_bp_t::BANNER_WRITE},
+ {custom_bp_t::BANNER_READ},
+ {custom_bp_t::BANNER_PAYLOAD_READ},
+ {Tag::HELLO, bp_type_t::WRITE},
+ {Tag::HELLO, bp_type_t::READ},
+ {Tag::AUTH_REQUEST, bp_type_t::WRITE},
+ {Tag::AUTH_DONE, bp_type_t::READ},
+ {Tag::AUTH_SIGNATURE, bp_type_t::WRITE},
+ {Tag::AUTH_SIGNATURE, bp_type_t::READ},
+ {Tag::CLIENT_IDENT, bp_type_t::WRITE},
+ }, [&test] (auto& failure_cases) {
+ return seastar::do_for_each(failure_cases, [&test] (auto bp) {
+ TestInterceptor interceptor;
+ interceptor.make_block(bp);
+ return test.run_suite(
+ fmt::format("test_v2_racing_connect_lose -- {}", bp),
+ interceptor,
+ policy_t::lossless_peer,
+ policy_t::lossless_peer,
+ [&test] (FailoverSuite& suite) {
+ return seastar::futurize_invoke([&suite] {
+ return suite.send_peer();
+ }).then([&suite] {
+ return suite.connect_peer();
+ }).then([&suite] {
+ return suite.wait_blocked();
+ }).then([&test] {
+ return test.peer_send_me();
+ }).then([&test] {
+ return test.peer_connect_me();
+ }).then([&suite] {
+ return suite.wait_replaced(1);
+ }).then([&suite] {
+ suite.unblock();
+ return suite.wait_results(2);
+ }).then([] (ConnResults& results) {
+ results[0].assert_state_at(conn_state_t::established);
+ results[0].assert_connect(1, 0);
+ results[0].assert_accept(0, 0, 0, 1);
+ results[0].assert_reset(0, 0);
+ results[1].assert_state_at(conn_state_t::replaced);
+ results[1].assert_connect(0, 0, 0, 0);
+ results[1].assert_accept(1, 1, 0, 0);
+ results[1].assert_reset(0, 0);
+ });
+ });
+ });
+ });
+}
+
+seastar::future<>
+test_v2_racing_connect_reconnect_lose(FailoverTest& test) {
+ TestInterceptor interceptor;
+ interceptor.make_fault({Tag::SERVER_IDENT, bp_type_t::READ});
+ interceptor.make_block({Tag::CLIENT_IDENT, bp_type_t::WRITE}, 2);
+ return test.run_suite("test_v2_racing_connect_reconnect_lose",
+ interceptor,
+ policy_t::lossless_peer,
+ policy_t::lossless_peer,
+ [&test] (FailoverSuite& suite) {
+ return seastar::futurize_invoke([&suite] {
+ return suite.send_peer();
+ }).then([&suite] {
+ return suite.connect_peer();
+ }).then([&suite] {
+ return suite.wait_blocked();
+ }).then([&test] {
+ return test.peer_send_me();
+ }).then([&suite] {
+ return suite.wait_replaced(1);
+ }).then([&suite] {
+ suite.unblock();
+ return suite.wait_results(2);
+ }).then([] (ConnResults& results) {
+ results[0].assert_state_at(conn_state_t::established);
+ results[0].assert_connect(2, 2, 0, 0);
+ results[0].assert_accept(0, 0, 0, 1);
+ results[0].assert_reset(0, 0);
+ results[1].assert_state_at(conn_state_t::replaced);
+ results[1].assert_connect(0, 0, 0, 0);
+ results[1].assert_accept(1, 1, 1, 0);
+ results[1].assert_reset(0, 0);
+ });
+ });
+}
+
+seastar::future<>
+test_v2_racing_connect_reconnect_win(FailoverTest& test) {
+ TestInterceptor interceptor;
+ interceptor.make_fault({Tag::SERVER_IDENT, bp_type_t::READ});
+ interceptor.make_block({Tag::SESSION_RECONNECT, bp_type_t::READ});
+ return test.run_suite("test_v2_racing_connect_reconnect_win",
+ interceptor,
+ policy_t::lossless_peer,
+ policy_t::lossless_peer,
+ [&test] (FailoverSuite& suite) {
+ return seastar::futurize_invoke([&test] {
+ return test.peer_send_me();
+ }).then([&suite] {
+ return suite.connect_peer();
+ }).then([&suite] {
+ return suite.wait_blocked();
+ }).then([&suite] {
+ return suite.send_peer();
+ }).then([&suite] {
+ return suite.wait_established();
+ }).then([&suite] {
+ suite.unblock();
+ return suite.wait_results(2);
+ }).then([] (ConnResults& results) {
+ results[0].assert_state_at(conn_state_t::established);
+ results[0].assert_connect(2, 2, 0, 1);
+ results[0].assert_accept(0, 0, 0, 0);
+ results[0].assert_reset(0, 0);
+ results[1].assert_state_at(conn_state_t::closed);
+ results[1].assert_connect(0, 0, 0, 0);
+ results[1].assert_accept(1, 0, 1, 0);
+ results[1].assert_reset(0, 0);
+ });
+ });
+}
+
+seastar::future<>
+test_v2_stale_connect(FailoverTest& test) {
+ auto bp = Breakpoint{Tag::SERVER_IDENT, bp_type_t::READ};
+ TestInterceptor interceptor;
+ interceptor.make_stall(bp);
+ return test.run_suite(
+ fmt::format("test_v2_stale_connect -- {}", bp),
+ interceptor,
+ policy_t::lossless_peer,
+ policy_t::lossless_peer,
+ [&test] (FailoverSuite& suite) {
+ return seastar::futurize_invoke([&suite] {
+ return suite.connect_peer();
+ }).then([&suite] {
+ return suite.wait_blocked();
+ }).then([&test] {
+ return test.peer_send_me();
+ }).then([&suite] {
+ return suite.wait_replaced(1);
+ }).then([&suite] {
+ suite.unblock();
+ return suite.wait_results(2);
+ }).then([] (ConnResults& results) {
+ results[0].assert_state_at(conn_state_t::established);
+ results[0].assert_connect(1, 1, 0, 0);
+ results[0].assert_accept(0, 0, 0, 1);
+ results[0].assert_reset(0, 0);
+ results[1].assert_state_at(conn_state_t::replaced);
+ results[1].assert_connect(0, 0, 0, 0);
+ results[1].assert_accept(1, 1, 1, 0);
+ results[1].assert_reset(0, 0);
+ });
+ });
+}
+
+seastar::future<>
+test_v2_stale_reconnect(FailoverTest& test) {
+ auto bp = Breakpoint{Tag::SESSION_RECONNECT_OK, bp_type_t::READ};
+ TestInterceptor interceptor;
+ interceptor.make_fault({Tag::MESSAGE, bp_type_t::WRITE});
+ interceptor.make_stall(bp);
+ return test.run_suite(
+ fmt::format("test_v2_stale_reconnect -- {}", bp),
+ interceptor,
+ policy_t::lossless_peer,
+ policy_t::lossless_peer,
+ [&test] (FailoverSuite& suite) {
+ return seastar::futurize_invoke([&suite] {
+ return suite.send_peer();
+ }).then([&suite] {
+ return suite.connect_peer();
+ }).then([&suite] {
+ return suite.wait_blocked();
+ }).then([&test] {
+ return test.peer_send_me();
+ }).then([&suite] {
+ return suite.wait_replaced(1);
+ }).then([&suite] {
+ suite.unblock();
+ return suite.wait_results(2);
+ }).then([] (ConnResults& results) {
+ results[0].assert_state_at(conn_state_t::established);
+ results[0].assert_connect(2, 1, 1, 1);
+ results[0].assert_accept(0, 0, 0, 1);
+ results[0].assert_reset(0, 0);
+ results[1].assert_state_at(conn_state_t::replaced);
+ results[1].assert_connect(0, 0, 0, 0);
+ results[1].assert_accept(1, 0, 1, 0);
+ results[1].assert_reset(0, 0);
+ });
+ });
+}
+
+seastar::future<>
+test_v2_stale_accept(FailoverTest& test) {
+ auto bp = Breakpoint{Tag::CLIENT_IDENT, bp_type_t::READ};
+ TestInterceptor interceptor;
+ interceptor.make_stall(bp);
+ return test.run_suite(
+ fmt::format("test_v2_stale_accept -- {}", bp),
+ interceptor,
+ policy_t::lossless_peer,
+ policy_t::lossless_peer,
+ [&test] (FailoverSuite& suite) {
+ return seastar::futurize_invoke([&test] {
+ return test.peer_connect_me();
+ }).then([&suite] {
+ return suite.wait_blocked();
+ }).then([&test] {
+ return test.peer_send_me();
+ }).then([&suite] {
+ return suite.wait_established();
+ }).then([&suite] {
+ suite.unblock();
+ return suite.wait_results(2);
+ }).then([] (ConnResults& results) {
+ results[0].assert_state_at(conn_state_t::closed);
+ results[0].assert_connect(0, 0, 0, 0);
+ results[0].assert_accept(1, 1, 0, 0);
+ results[0].assert_reset(0, 0);
+ results[1].assert_state_at(conn_state_t::established);
+ results[1].assert_connect(0, 0, 0, 0);
+ results[1].assert_accept(1, 1, 0, 1);
+ results[1].assert_reset(0, 0);
+ });
+ });
+}
+
+seastar::future<>
+test_v2_stale_establishing(FailoverTest& test) {
+ auto bp = Breakpoint{Tag::SERVER_IDENT, bp_type_t::WRITE};
+ TestInterceptor interceptor;
+ interceptor.make_stall(bp);
+ return test.run_suite(
+ fmt::format("test_v2_stale_establishing -- {}", bp),
+ interceptor,
+ policy_t::lossless_peer,
+ policy_t::lossless_peer,
+ [&test] (FailoverSuite& suite) {
+ return seastar::futurize_invoke([&test] {
+ return test.peer_connect_me();
+ }).then([&suite] {
+ return suite.wait_blocked();
+ }).then([&test] {
+ return test.peer_send_me();
+ }).then([&suite] {
+ return suite.wait_replaced(1);
+ }).then([&suite] {
+ suite.unblock();
+ return suite.wait_results(2);
+ }).then([] (ConnResults& results) {
+ results[0].assert_state_at(conn_state_t::established);
+ results[0].assert_connect(0, 0, 0, 0);
+ results[0].assert_accept(1, 1, 0, 2);
+ results[0].assert_reset(0, 0);
+ results[1].assert_state_at(conn_state_t::replaced);
+ results[1].assert_connect(0, 0, 0, 0);
+ results[1].assert_accept(1, 0);
+ results[1].assert_reset(0, 0);
+ });
+ });
+}
+
+seastar::future<>
+test_v2_stale_reaccept(FailoverTest& test) {
+ auto bp = Breakpoint{Tag::SESSION_RECONNECT_OK, bp_type_t::WRITE};
+ TestInterceptor interceptor;
+ interceptor.make_fault({Tag::MESSAGE, bp_type_t::READ});
+ interceptor.make_stall(bp);
+ return test.run_suite(
+ fmt::format("test_v2_stale_reaccept -- {}", bp),
+ interceptor,
+ policy_t::lossless_peer,
+ policy_t::lossless_peer,
+ [&test] (FailoverSuite& suite) {
+ return seastar::futurize_invoke([&test] {
+ return test.peer_send_me();
+ }).then([&test] {
+ return test.peer_connect_me();
+ }).then([&suite] {
+ return suite.wait_blocked();
+ }).then([] {
+ logger().info("[Test] block the broken REPLACING for 210ms...");
+ return seastar::sleep(210ms);
+ }).then([&suite] {
+ suite.unblock();
+ return suite.wait_results(3);
+ }).then([] (ConnResults& results) {
+ results[0].assert_state_at(conn_state_t::established);
+ results[0].assert_connect(0, 0, 0, 0);
+ results[0].assert_accept(1, 1, 0, 3);
+ results[0].assert_reset(0, 0);
+ results[1].assert_state_at(conn_state_t::replaced);
+ results[1].assert_connect(0, 0, 0, 0);
+ results[1].assert_accept(1, 0, 1, 0);
+ results[1].assert_reset(0, 0);
+ results[2].assert_state_at(conn_state_t::replaced);
+ results[2].assert_connect(0, 0, 0, 0);
+ results[2].assert_accept(1, 0);
+ results[2].assert_reset(0, 0);
+ ceph_assert(results[2].server_reconnect_attempts >= 1);
+ });
+ });
+}
+
+seastar::future<>
+test_v2_lossy_client(FailoverTest& test) {
+ return test.run_suite(
+ "test_v2_lossy_client",
+ TestInterceptor(),
+ policy_t::lossy_client,
+ policy_t::stateless_server,
+ [&test] (FailoverSuite& suite) {
+ return seastar::futurize_invoke([&suite] {
+ logger().info("-- 0 --");
+ logger().info("[Test] setup connection...");
+ return suite.connect_peer();
+ }).then([&test] {
+ return test.send_bidirectional();
+ }).then([&suite] {
+ return suite.wait_results(1);
+ }).then([] (ConnResults& results) {
+ results[0].assert_state_at(conn_state_t::established);
+ results[0].assert_connect(1, 1, 0, 1);
+ results[0].assert_accept(0, 0, 0, 0);
+ results[0].assert_reset(0, 0);
+ }).then([&suite] {
+ logger().info("-- 1 --");
+ logger().info("[Test] client markdown...");
+ return suite.markdown();
+ }).then([&suite] {
+ return suite.connect_peer();
+ }).then([&suite] {
+ return suite.send_peer();
+ }).then([&suite] {
+ return suite.wait_results(2);
+ }).then([] (ConnResults& results) {
+ results[0].assert_state_at(conn_state_t::closed);
+ results[0].assert_connect(1, 1, 0, 1);
+ results[0].assert_accept(0, 0, 0, 0);
+ results[0].assert_reset(0, 0);
+ results[1].assert_state_at(conn_state_t::established);
+ results[1].assert_connect(1, 1, 0, 1);
+ results[1].assert_accept(0, 0, 0, 0);
+ results[1].assert_reset(0, 0);
+ }).then([&test] {
+ logger().info("-- 2 --");
+ logger().info("[Test] server markdown...");
+ return test.markdown_peer();
+ }).then([&suite] {
+ return suite.wait_results(2);
+ }).then([] (ConnResults& results) {
+ results[0].assert_state_at(conn_state_t::closed);
+ results[0].assert_connect(1, 1, 0, 1);
+ results[0].assert_accept(0, 0, 0, 0);
+ results[0].assert_reset(0, 0);
+ results[1].assert_state_at(conn_state_t::closed);
+ results[1].assert_connect(1, 1, 0, 1);
+ results[1].assert_accept(0, 0, 0, 0);
+ results[1].assert_reset(1, 0);
+ }).then([&suite] {
+ logger().info("-- 3 --");
+ logger().info("[Test] client reconnect...");
+ return suite.connect_peer();
+ }).then([&suite] {
+ return suite.send_peer();
+ }).then([&suite] {
+ return suite.wait_results(3);
+ }).then([] (ConnResults& results) {
+ results[0].assert_state_at(conn_state_t::closed);
+ results[0].assert_connect(1, 1, 0, 1);
+ results[0].assert_accept(0, 0, 0, 0);
+ results[0].assert_reset(0, 0);
+ results[1].assert_state_at(conn_state_t::closed);
+ results[1].assert_connect(1, 1, 0, 1);
+ results[1].assert_accept(0, 0, 0, 0);
+ results[1].assert_reset(1, 0);
+ results[2].assert_state_at(conn_state_t::established);
+ results[2].assert_connect(1, 1, 0, 1);
+ results[2].assert_accept(0, 0, 0, 0);
+ results[2].assert_reset(0, 0);
+ });
+ });
+}
+
+seastar::future<>
+test_v2_stateless_server(FailoverTest& test) {
+ return test.run_suite(
+ "test_v2_stateless_server",
+ TestInterceptor(),
+ policy_t::stateless_server,
+ policy_t::lossy_client,
+ [&test] (FailoverSuite& suite) {
+ return seastar::futurize_invoke([&test] {
+ logger().info("-- 0 --");
+ logger().info("[Test] setup connection...");
+ return test.peer_connect_me();
+ }).then([&test] {
+ return test.send_bidirectional();
+ }).then([&suite] {
+ return suite.wait_results(1);
+ }).then([] (ConnResults& results) {
+ results[0].assert_state_at(conn_state_t::established);
+ results[0].assert_connect(0, 0, 0, 0);
+ results[0].assert_accept(1, 1, 0, 1);
+ results[0].assert_reset(0, 0);
+ }).then([&test] {
+ logger().info("-- 1 --");
+ logger().info("[Test] client markdown...");
+ return test.markdown_peer();
+ }).then([&test] {
+ return test.peer_connect_me();
+ }).then([&test] {
+ return test.peer_send_me();
+ }).then([&suite] {
+ return suite.wait_results(2);
+ }).then([] (ConnResults& results) {
+ results[0].assert_state_at(conn_state_t::closed);
+ results[0].assert_connect(0, 0, 0, 0);
+ results[0].assert_accept(1, 1, 0, 1);
+ results[0].assert_reset(1, 0);
+ results[1].assert_state_at(conn_state_t::established);
+ results[1].assert_connect(0, 0, 0, 0);
+ results[1].assert_accept(1, 1, 0, 1);
+ results[1].assert_reset(0, 0);
+ }).then([&suite] {
+ logger().info("-- 2 --");
+ logger().info("[Test] server markdown...");
+ return suite.markdown();
+ }).then([&suite] {
+ return suite.wait_results(2);
+ }).then([] (ConnResults& results) {
+ results[0].assert_state_at(conn_state_t::closed);
+ results[0].assert_connect(0, 0, 0, 0);
+ results[0].assert_accept(1, 1, 0, 1);
+ results[0].assert_reset(1, 0);
+ results[1].assert_state_at(conn_state_t::closed);
+ results[1].assert_connect(0, 0, 0, 0);
+ results[1].assert_accept(1, 1, 0, 1);
+ results[1].assert_reset(0, 0);
+ }).then([&test] {
+ logger().info("-- 3 --");
+ logger().info("[Test] client reconnect...");
+ return test.peer_connect_me();
+ }).then([&test] {
+ return test.peer_send_me();
+ }).then([&suite] {
+ return suite.wait_results(3);
+ }).then([] (ConnResults& results) {
+ results[0].assert_state_at(conn_state_t::closed);
+ results[0].assert_connect(0, 0, 0, 0);
+ results[0].assert_accept(1, 1, 0, 1);
+ results[0].assert_reset(1, 0);
+ results[1].assert_state_at(conn_state_t::closed);
+ results[1].assert_connect(0, 0, 0, 0);
+ results[1].assert_accept(1, 1, 0, 1);
+ results[1].assert_reset(0, 0);
+ results[2].assert_state_at(conn_state_t::established);
+ results[2].assert_connect(0, 0, 0, 0);
+ results[2].assert_accept(1, 1, 0, 1);
+ results[2].assert_reset(0, 0);
+ });
+ });
+}
+
+seastar::future<>
+test_v2_lossless_client(FailoverTest& test) {
+ return test.run_suite(
+ "test_v2_lossless_client",
+ TestInterceptor(),
+ policy_t::lossless_client,
+ policy_t::stateful_server,
+ [&test] (FailoverSuite& suite) {
+ return seastar::futurize_invoke([&suite] {
+ logger().info("-- 0 --");
+ logger().info("[Test] setup connection...");
+ return suite.connect_peer();
+ }).then([&test] {
+ return test.send_bidirectional();
+ }).then([&suite] {
+ return suite.wait_results(1);
+ }).then([] (ConnResults& results) {
+ results[0].assert_state_at(conn_state_t::established);
+ results[0].assert_connect(1, 1, 0, 1);
+ results[0].assert_accept(0, 0, 0, 0);
+ results[0].assert_reset(0, 0);
+ }).then([&suite] {
+ logger().info("-- 1 --");
+ logger().info("[Test] client markdown...");
+ return suite.markdown();
+ }).then([&suite] {
+ return suite.connect_peer();
+ }).then([&suite] {
+ return suite.send_peer();
+ }).then([&suite] {
+ return suite.wait_results(2);
+ }).then([] (ConnResults& results) {
+ results[0].assert_state_at(conn_state_t::closed);
+ results[0].assert_connect(1, 1, 0, 1);
+ results[0].assert_accept(0, 0, 0, 0);
+ results[0].assert_reset(0, 0);
+ results[1].assert_state_at(conn_state_t::established);
+ results[1].assert_connect(1, 1, 0, 1);
+ results[1].assert_accept(0, 0, 0, 0);
+ results[1].assert_reset(0, 0);
+ }).then([&test] {
+ logger().info("-- 2 --");
+ logger().info("[Test] server markdown...");
+ return test.markdown_peer();
+ }).then([&suite] {
+ return suite.wait_results(2);
+ }).then([] (ConnResults& results) {
+ results[0].assert_state_at(conn_state_t::closed);
+ results[0].assert_connect(1, 1, 0, 1);
+ results[0].assert_accept(0, 0, 0, 0);
+ results[0].assert_reset(0, 0);
+ results[1].assert_state_at(conn_state_t::established);
+ results[1].assert_connect(2, 2, 1, 2);
+ results[1].assert_accept(0, 0, 0, 0);
+ results[1].assert_reset(0, 1);
+ }).then([&suite] {
+ logger().info("-- 3 --");
+ logger().info("[Test] client reconnect...");
+ return suite.connect_peer();
+ }).then([&suite] {
+ return suite.send_peer();
+ }).then([&suite] {
+ return suite.wait_results(2);
+ }).then([] (ConnResults& results) {
+ results[0].assert_state_at(conn_state_t::closed);
+ results[0].assert_connect(1, 1, 0, 1);
+ results[0].assert_accept(0, 0, 0, 0);
+ results[0].assert_reset(0, 0);
+ results[1].assert_state_at(conn_state_t::established);
+ results[1].assert_connect(2, 2, 1, 2);
+ results[1].assert_accept(0, 0, 0, 0);
+ results[1].assert_reset(0, 1);
+ });
+ });
+}
+
+seastar::future<>
+test_v2_stateful_server(FailoverTest& test) {
+ return test.run_suite(
+ "test_v2_stateful_server",
+ TestInterceptor(),
+ policy_t::stateful_server,
+ policy_t::lossless_client,
+ [&test] (FailoverSuite& suite) {
+ return seastar::futurize_invoke([&test] {
+ logger().info("-- 0 --");
+ logger().info("[Test] setup connection...");
+ return test.peer_connect_me();
+ }).then([&test] {
+ return test.send_bidirectional();
+ }).then([&suite] {
+ return suite.wait_results(1);
+ }).then([] (ConnResults& results) {
+ results[0].assert_state_at(conn_state_t::established);
+ results[0].assert_connect(0, 0, 0, 0);
+ results[0].assert_accept(1, 1, 0, 1);
+ results[0].assert_reset(0, 0);
+ }).then([&test] {
+ logger().info("-- 1 --");
+ logger().info("[Test] client markdown...");
+ return test.markdown_peer();
+ }).then([&test] {
+ return test.peer_connect_me();
+ }).then([&test] {
+ return test.peer_send_me();
+ }).then([&suite] {
+ return suite.wait_results(2);
+ }).then([] (ConnResults& results) {
+ results[0].assert_state_at(conn_state_t::established);
+ results[0].assert_connect(0, 0, 0, 0);
+ results[0].assert_accept(1, 1, 0, 2);
+ results[0].assert_reset(0, 1);
+ results[1].assert_state_at(conn_state_t::replaced);
+ results[1].assert_connect(0, 0, 0, 0);
+ results[1].assert_accept(1, 1, 0, 0);
+ results[1].assert_reset(0, 0);
+ }).then([&suite] {
+ logger().info("-- 2 --");
+ logger().info("[Test] server markdown...");
+ return suite.markdown();
+ }).then([&suite] {
+ return suite.wait_results(3);
+ }).then([] (ConnResults& results) {
+ results[0].assert_state_at(conn_state_t::closed);
+ results[0].assert_connect(0, 0, 0, 0);
+ results[0].assert_accept(1, 1, 0, 2);
+ results[0].assert_reset(0, 1);
+ results[1].assert_state_at(conn_state_t::replaced);
+ results[1].assert_connect(0, 0, 0, 0);
+ results[1].assert_accept(1, 1, 0, 0);
+ results[1].assert_reset(0, 0);
+ results[2].assert_state_at(conn_state_t::established);
+ results[2].assert_connect(0, 0, 0, 0);
+ results[2].assert_accept(1, 1, 1, 1);
+ results[2].assert_reset(0, 0);
+ }).then([&test] {
+ logger().info("-- 3 --");
+ logger().info("[Test] client reconnect...");
+ return test.peer_connect_me();
+ }).then([&test] {
+ return test.peer_send_me();
+ }).then([&suite] {
+ return suite.wait_results(3);
+ }).then([] (ConnResults& results) {
+ results[0].assert_state_at(conn_state_t::closed);
+ results[0].assert_connect(0, 0, 0, 0);
+ results[0].assert_accept(1, 1, 0, 2);
+ results[0].assert_reset(0, 1);
+ results[1].assert_state_at(conn_state_t::replaced);
+ results[1].assert_connect(0, 0, 0, 0);
+ results[1].assert_accept(1, 1, 0, 0);
+ results[1].assert_reset(0, 0);
+ results[2].assert_state_at(conn_state_t::established);
+ results[2].assert_connect(0, 0, 0, 0);
+ results[2].assert_accept(1, 1, 1, 1);
+ results[2].assert_reset(0, 0);
+ });
+ });
+}
+
+seastar::future<>
+test_v2_peer_reuse_connector(FailoverTest& test) {
+ return test.run_suite(
+ "test_v2_peer_reuse_connector",
+ TestInterceptor(),
+ policy_t::lossless_peer_reuse,
+ policy_t::lossless_peer_reuse,
+ [&test] (FailoverSuite& suite) {
+ return seastar::futurize_invoke([&suite] {
+ logger().info("-- 0 --");
+ logger().info("[Test] setup connection...");
+ return suite.connect_peer();
+ }).then([&test] {
+ return test.send_bidirectional();
+ }).then([&suite] {
+ return suite.wait_results(1);
+ }).then([] (ConnResults& results) {
+ results[0].assert_state_at(conn_state_t::established);
+ results[0].assert_connect(1, 1, 0, 1);
+ results[0].assert_accept(0, 0, 0, 0);
+ results[0].assert_reset(0, 0);
+ }).then([&suite] {
+ logger().info("-- 1 --");
+ logger().info("[Test] connector markdown...");
+ return suite.markdown();
+ }).then([&suite] {
+ return suite.connect_peer();
+ }).then([&suite] {
+ return suite.send_peer();
+ }).then([&suite] {
+ return suite.wait_results(2);
+ }).then([] (ConnResults& results) {
+ results[0].assert_state_at(conn_state_t::closed);
+ results[0].assert_connect(1, 1, 0, 1);
+ results[0].assert_accept(0, 0, 0, 0);
+ results[0].assert_reset(0, 0);
+ results[1].assert_state_at(conn_state_t::established);
+ results[1].assert_connect(1, 1, 0, 1);
+ results[1].assert_accept(0, 0, 0, 0);
+ results[1].assert_reset(0, 0);
+ }).then([&test] {
+ logger().info("-- 2 --");
+ logger().info("[Test] acceptor markdown...");
+ return test.markdown_peer();
+ }).then([] {
+ return seastar::sleep(100ms);
+ }).then([&suite] {
+ ceph_assert(suite.is_standby());
+ logger().info("-- 3 --");
+ logger().info("[Test] connector reconnect...");
+ return suite.connect_peer();
+ }).then([&suite] {
+ return suite.try_send_peer();
+ }).then([&suite] {
+ return suite.wait_results(2);
+ }).then([] (ConnResults& results) {
+ results[0].assert_state_at(conn_state_t::closed);
+ results[0].assert_connect(1, 1, 0, 1);
+ results[0].assert_accept(0, 0, 0, 0);
+ results[0].assert_reset(0, 0);
+ results[1].assert_state_at(conn_state_t::established);
+ results[1].assert_connect(2, 2, 1, 2);
+ results[1].assert_accept(0, 0, 0, 0);
+ results[1].assert_reset(0, 1);
+ });
+ });
+}
+
+seastar::future<>
+test_v2_peer_reuse_acceptor(FailoverTest& test) {
+ return test.run_suite(
+ "test_v2_peer_reuse_acceptor",
+ TestInterceptor(),
+ policy_t::lossless_peer_reuse,
+ policy_t::lossless_peer_reuse,
+ [&test] (FailoverSuite& suite) {
+ return seastar::futurize_invoke([&test] {
+ logger().info("-- 0 --");
+ logger().info("[Test] setup connection...");
+ return test.peer_connect_me();
+ }).then([&test] {
+ return test.send_bidirectional();
+ }).then([&suite] {
+ return suite.wait_results(1);
+ }).then([] (ConnResults& results) {
+ results[0].assert_state_at(conn_state_t::established);
+ results[0].assert_connect(0, 0, 0, 0);
+ results[0].assert_accept(1, 1, 0, 1);
+ results[0].assert_reset(0, 0);
+ }).then([&test] {
+ logger().info("-- 1 --");
+ logger().info("[Test] connector markdown...");
+ return test.markdown_peer();
+ }).then([&test] {
+ return test.peer_connect_me();
+ }).then([&test] {
+ return test.peer_send_me();
+ }).then([&suite] {
+ return suite.wait_results(2);
+ }).then([] (ConnResults& results) {
+ results[0].assert_state_at(conn_state_t::established);
+ results[0].assert_connect(0, 0, 0, 0);
+ results[0].assert_accept(1, 1, 0, 2);
+ results[0].assert_reset(0, 1);
+ results[1].assert_state_at(conn_state_t::replaced);
+ results[1].assert_connect(0, 0, 0, 0);
+ results[1].assert_accept(1, 1, 0, 0);
+ results[1].assert_reset(0, 0);
+ }).then([] {
+ logger().info("-- 2 --");
+ logger().info("[Test] acceptor markdown...");
+ return seastar::sleep(100ms);
+ }).then([&suite] {
+ return suite.markdown();
+ }).then([&suite] {
+ return suite.wait_results(2);
+ }).then([] (ConnResults& results) {
+ results[0].assert_state_at(conn_state_t::closed);
+ results[0].assert_connect(0, 0, 0, 0);
+ results[0].assert_accept(1, 1, 0, 2);
+ results[0].assert_reset(0, 1);
+ results[1].assert_state_at(conn_state_t::replaced);
+ results[1].assert_connect(0, 0, 0, 0);
+ results[1].assert_accept(1, 1, 0, 0);
+ results[1].assert_reset(0, 0);
+ }).then([&test] {
+ logger().info("-- 3 --");
+ logger().info("[Test] connector reconnect...");
+ return test.peer_connect_me();
+ }).then([&test] {
+ return test.try_peer_send_me();
+ }).then([&suite] {
+ return suite.wait_results(3);
+ }).then([] (ConnResults& results) {
+ results[0].assert_state_at(conn_state_t::closed);
+ results[0].assert_connect(0, 0, 0, 0);
+ results[0].assert_accept(1, 1, 0, 2);
+ results[0].assert_reset(0, 1);
+ results[1].assert_state_at(conn_state_t::replaced);
+ results[1].assert_connect(0, 0, 0, 0);
+ results[1].assert_accept(1, 1, 0, 0);
+ results[1].assert_reset(0, 0);
+ results[2].assert_state_at(conn_state_t::established);
+ results[2].assert_connect(0, 0, 0, 0);
+ results[2].assert_accept(1, 1, 1, 1);
+ results[2].assert_reset(0, 0);
+ });
+ });
+}
+
+seastar::future<>
+test_v2_lossless_peer_connector(FailoverTest& test) {
+ return test.run_suite(
+ "test_v2_lossless_peer_connector",
+ TestInterceptor(),
+ policy_t::lossless_peer,
+ policy_t::lossless_peer,
+ [&test] (FailoverSuite& suite) {
+ return seastar::futurize_invoke([&suite] {
+ logger().info("-- 0 --");
+ logger().info("[Test] setup connection...");
+ return suite.connect_peer();
+ }).then([&test] {
+ return test.send_bidirectional();
+ }).then([&suite] {
+ return suite.wait_results(1);
+ }).then([] (ConnResults& results) {
+ results[0].assert_state_at(conn_state_t::established);
+ results[0].assert_connect(1, 1, 0, 1);
+ results[0].assert_accept(0, 0, 0, 0);
+ results[0].assert_reset(0, 0);
+ }).then([&suite] {
+ logger().info("-- 1 --");
+ logger().info("[Test] connector markdown...");
+ return suite.markdown();
+ }).then([&suite] {
+ return suite.connect_peer();
+ }).then([&suite] {
+ return suite.send_peer();
+ }).then([&suite] {
+ return suite.wait_results(2);
+ }).then([] (ConnResults& results) {
+ results[0].assert_state_at(conn_state_t::closed);
+ results[0].assert_connect(1, 1, 0, 1);
+ results[0].assert_accept(0, 0, 0, 0);
+ results[0].assert_reset(0, 0);
+ results[1].assert_state_at(conn_state_t::established);
+ results[1].assert_connect(1, 1, 0, 1);
+ results[1].assert_accept(0, 0, 0, 0);
+ results[1].assert_reset(0, 0);
+ }).then([&test] {
+ logger().info("-- 2 --");
+ logger().info("[Test] acceptor markdown...");
+ return test.markdown_peer();
+ }).then([] {
+ return seastar::sleep(100ms);
+ }).then([&suite] {
+ ceph_assert(suite.is_standby());
+ logger().info("-- 3 --");
+ logger().info("[Test] connector reconnect...");
+ return suite.connect_peer();
+ }).then([&suite] {
+ return suite.try_send_peer();
+ }).then([&suite] {
+ return suite.wait_results(2);
+ }).then([] (ConnResults& results) {
+ results[0].assert_state_at(conn_state_t::closed);
+ results[0].assert_connect(1, 1, 0, 1);
+ results[0].assert_accept(0, 0, 0, 0);
+ results[0].assert_reset(0, 0);
+ results[1].assert_state_at(conn_state_t::established);
+ results[1].assert_connect(2, 2, 1, 2);
+ results[1].assert_accept(0, 0, 0, 0);
+ results[1].assert_reset(0, 1);
+ });
+ });
+}
+
+seastar::future<>
+test_v2_lossless_peer_acceptor(FailoverTest& test) {
+ return test.run_suite(
+ "test_v2_lossless_peer_acceptor",
+ TestInterceptor(),
+ policy_t::lossless_peer,
+ policy_t::lossless_peer,
+ [&test] (FailoverSuite& suite) {
+ return seastar::futurize_invoke([&test] {
+ logger().info("-- 0 --");
+ logger().info("[Test] setup connection...");
+ return test.peer_connect_me();
+ }).then([&test] {
+ return test.send_bidirectional();
+ }).then([&suite] {
+ return suite.wait_results(1);
+ }).then([] (ConnResults& results) {
+ results[0].assert_state_at(conn_state_t::established);
+ results[0].assert_connect(0, 0, 0, 0);
+ results[0].assert_accept(1, 1, 0, 1);
+ results[0].assert_reset(0, 0);
+ }).then([&test] {
+ logger().info("-- 1 --");
+ logger().info("[Test] connector markdown...");
+ return test.markdown_peer();
+ }).then([&test] {
+ return test.peer_connect_me();
+ }).then([&test] {
+ return test.peer_send_me();
+ }).then([&suite] {
+ return suite.wait_results(2);
+ }).then([] (ConnResults& results) {
+ results[0].assert_state_at(conn_state_t::established);
+ results[0].assert_connect(0, 0, 0, 0);
+ results[0].assert_accept(1, 1, 0, 2);
+ results[0].assert_reset(0, 0);
+ results[1].assert_state_at(conn_state_t::replaced);
+ results[1].assert_connect(0, 0, 0, 0);
+ results[1].assert_accept(1, 1, 0, 0);
+ results[1].assert_reset(0, 0);
+ }).then([] {
+ logger().info("-- 2 --");
+ logger().info("[Test] acceptor markdown...");
+ return seastar::sleep(100ms);
+ }).then([&suite] {
+ return suite.markdown();
+ }).then([&suite] {
+ return suite.wait_results(2);
+ }).then([] (ConnResults& results) {
+ results[0].assert_state_at(conn_state_t::closed);
+ results[0].assert_connect(0, 0, 0, 0);
+ results[0].assert_accept(1, 1, 0, 2);
+ results[0].assert_reset(0, 0);
+ results[1].assert_state_at(conn_state_t::replaced);
+ results[1].assert_connect(0, 0, 0, 0);
+ results[1].assert_accept(1, 1, 0, 0);
+ results[1].assert_reset(0, 0);
+ }).then([&test] {
+ logger().info("-- 3 --");
+ logger().info("[Test] connector reconnect...");
+ return test.peer_connect_me();
+ }).then([&test] {
+ return test.try_peer_send_me();
+ }).then([&suite] {
+ return suite.wait_results(3);
+ }).then([] (ConnResults& results) {
+ results[0].assert_state_at(conn_state_t::closed);
+ results[0].assert_connect(0, 0, 0, 0);
+ results[0].assert_accept(1, 1, 0, 2);
+ results[0].assert_reset(0, 0);
+ results[1].assert_state_at(conn_state_t::replaced);
+ results[1].assert_connect(0, 0, 0, 0);
+ results[1].assert_accept(1, 1, 0, 0);
+ results[1].assert_reset(0, 0);
+ results[2].assert_state_at(conn_state_t::established);
+ results[2].assert_connect(0, 0, 0, 0);
+ results[2].assert_accept(1, 1, 1, 1);
+ results[2].assert_reset(0, 0);
+ });
+ });
+}
+
+seastar::future<>
+test_v2_protocol(entity_addr_t test_addr,
+ entity_addr_t test_peer_addr,
+ bool test_peer_islocal) {
+ ceph_assert(test_addr.is_msgr2());
+ ceph_assert(test_peer_addr.is_msgr2());
+
+ if (test_peer_islocal) {
+ // initiate crimson test peer locally
+ logger().info("test_v2_protocol: start local TestPeer at {}...", test_peer_addr);
+ return FailoverTestPeer::create(test_peer_addr
+ ).then([test_addr, test_peer_addr] (auto peer) {
+ return test_v2_protocol(test_addr, test_peer_addr, false
+ ).then([peer = std::move(peer)] () mutable {
+ return peer->wait().then([peer = std::move(peer)] {});
+ });
+ }).handle_exception([] (auto eptr) {
+ logger().error("FailoverTestPeer failed: got exception {}", eptr);
+ throw;
+ });
+ }
+
+ return FailoverTest::create(test_peer_addr, test_addr).then([] (auto test) {
+ return seastar::futurize_invoke([test] {
+ return test_v2_lossy_early_connect_fault(*test);
+ }).then([test] {
+ return test_v2_lossy_connect_fault(*test);
+ }).then([test] {
+ return test_v2_lossy_connected_fault(*test);
+ }).then([test] {
+ return test_v2_lossy_early_accept_fault(*test);
+ }).then([test] {
+ return test_v2_lossy_accept_fault(*test);
+ }).then([test] {
+ return test_v2_lossy_establishing_fault(*test);
+ }).then([test] {
+ return test_v2_lossy_accepted_fault(*test);
+ }).then([test] {
+ return test_v2_lossless_connect_fault(*test);
+ }).then([test] {
+ return test_v2_lossless_connected_fault(*test);
+ }).then([test] {
+ return test_v2_lossless_connected_fault2(*test);
+ }).then([test] {
+ return test_v2_lossless_reconnect_fault(*test);
+ }).then([test] {
+ return test_v2_lossless_accept_fault(*test);
+ }).then([test] {
+ return test_v2_lossless_establishing_fault(*test);
+ }).then([test] {
+ return test_v2_lossless_accepted_fault(*test);
+ }).then([test] {
+ return test_v2_lossless_reaccept_fault(*test);
+ }).then([test] {
+ return test_v2_peer_connect_fault(*test);
+ }).then([test] {
+ return test_v2_peer_accept_fault(*test);
+ }).then([test] {
+ return test_v2_peer_establishing_fault(*test);
+ }).then([test] {
+ return test_v2_peer_connected_fault_reconnect(*test);
+ }).then([test] {
+ return test_v2_peer_connected_fault_reaccept(*test);
+ }).then([test] {
+ return peer_wins(*test);
+ }).then([test] (bool peer_wins) {
+ if (peer_wins) {
+ return seastar::futurize_invoke([test] {
+ return test_v2_racing_connect_lose(*test);
+ }).then([test] {
+ return test_v2_racing_reconnect_lose(*test);
+ });
+ } else {
+ return seastar::futurize_invoke([test] {
+ return test_v2_racing_connect_win(*test);
+ }).then([test] {
+ return test_v2_racing_reconnect_win(*test);
+ });
+ }
+ }).then([test] {
+ return test_v2_racing_connect_reconnect_win(*test);
+ }).then([test] {
+ return test_v2_racing_connect_reconnect_lose(*test);
+ }).then([test] {
+ return test_v2_stale_connect(*test);
+ }).then([test] {
+ return test_v2_stale_reconnect(*test);
+ }).then([test] {
+ return test_v2_stale_accept(*test);
+ }).then([test] {
+ return test_v2_stale_establishing(*test);
+ }).then([test] {
+ return test_v2_stale_reaccept(*test);
+ }).then([test] {
+ return test_v2_lossy_client(*test);
+ }).then([test] {
+ return test_v2_stateless_server(*test);
+ }).then([test] {
+ return test_v2_lossless_client(*test);
+ }).then([test] {
+ return test_v2_stateful_server(*test);
+ }).then([test] {
+ return test_v2_peer_reuse_connector(*test);
+ }).then([test] {
+ return test_v2_peer_reuse_acceptor(*test);
+ }).then([test] {
+ return test_v2_lossless_peer_connector(*test);
+ }).then([test] {
+ return test_v2_lossless_peer_acceptor(*test);
+ }).then([test] {
+ return test->shutdown().then([test] {});
+ });
+ }).handle_exception([] (auto eptr) {
+ logger().error("FailoverTest failed: got exception {}", eptr);
+ throw;
+ });
+}
+
+}
+
+seastar::future<int> do_test(seastar::app_template& app)
+{
+ std::vector<const char*> args;
+ std::string cluster;
+ std::string conf_file_list;
+ auto init_params = ceph_argparse_early_args(args,
+ CEPH_ENTITY_TYPE_CLIENT,
+ &cluster,
+ &conf_file_list);
+ return crimson::common::sharded_conf().start(init_params.name, cluster)
+ .then([conf_file_list] {
+ return local_conf().parse_config_files(conf_file_list);
+ }).then([&app] {
+ auto&& config = app.configuration();
+ verbose = config["verbose"].as<bool>();
+ auto rounds = config["rounds"].as<unsigned>();
+ auto keepalive_ratio = config["keepalive-ratio"].as<double>();
+ entity_addr_t v2_test_addr;
+ ceph_assert(v2_test_addr.parse(
+ config["v2-test-addr"].as<std::string>().c_str(), nullptr));
+ entity_addr_t v2_testpeer_addr;
+ ceph_assert(v2_testpeer_addr.parse(
+ config["v2-testpeer-addr"].as<std::string>().c_str(), nullptr));
+ auto v2_testpeer_islocal = config["v2-testpeer-islocal"].as<bool>();
+ return test_echo(rounds, keepalive_ratio, false)
+ .then([rounds, keepalive_ratio] {
+ return test_echo(rounds, keepalive_ratio, true);
+ }).then([] {
+ return test_concurrent_dispatch(false);
+ }).then([] {
+ return test_concurrent_dispatch(true);
+ }).then([] {
+ return test_preemptive_shutdown(false);
+ }).then([] {
+ return test_preemptive_shutdown(true);
+ }).then([v2_test_addr, v2_testpeer_addr, v2_testpeer_islocal] {
+ return test_v2_protocol(v2_test_addr, v2_testpeer_addr, v2_testpeer_islocal);
+ }).then([] {
+ logger().info("All tests succeeded");
+ // Seastar has bugs to have events undispatched during shutdown,
+ // which will result in memory leak and thus fail LeakSanitizer.
+ return seastar::sleep(100ms);
+ });
+ }).then([] {
+ return crimson::common::sharded_conf().stop();
+ }).then([] {
+ return 0;
+ }).handle_exception([] (auto eptr) {
+ logger().error("Test failed: got exception {}", eptr);
+ return 1;
+ });
+}
+
+int main(int argc, char** argv)
+{
+ seastar::app_template app;
+ app.add_options()
+ ("verbose,v", bpo::value<bool>()->default_value(false),
+ "chatty if true")
+ ("rounds", bpo::value<unsigned>()->default_value(512),
+ "number of pingpong rounds")
+ ("keepalive-ratio", bpo::value<double>()->default_value(0.1),
+ "ratio of keepalive in ping messages")
+ ("v2-test-addr", bpo::value<std::string>()->default_value("v2:127.0.0.1:9012"),
+ "address of v2 failover tests")
+ ("v2-testpeer-addr", bpo::value<std::string>()->default_value("v2:127.0.0.1:9013"),
+ "addresses of v2 failover testpeer"
+ " (CmdSrv address and TestPeer address with port+=1)")
+ ("v2-testpeer-islocal", bpo::value<bool>()->default_value(true),
+ "create a local crimson testpeer, or connect to a remote testpeer");
+ return app.run(argc, argv, [&app] {
+ // This test normally succeeds within 60 seconds, so kill it after 120
+ // seconds in case it is blocked forever due to unaddressed bugs.
+ return seastar::with_timeout(seastar::lowres_clock::now() + 120s, do_test(app))
+ .handle_exception_type([](seastar::timed_out_error&) {
+ logger().error("test_messenger timeout after 120s, abort! "
+ "Consider to extend the period if the test is still running.");
+ // use the retcode of timeout(1)
+ return 124;
+ });
+ });
+}
diff --git a/src/test/crimson/test_messenger_peer.cc b/src/test/crimson/test_messenger_peer.cc
new file mode 100644
index 000000000..7b669675b
--- /dev/null
+++ b/src/test/crimson/test_messenger_peer.cc
@@ -0,0 +1,447 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
+
+#include <boost/pointer_cast.hpp>
+#include <boost/program_options/variables_map.hpp>
+#include <boost/program_options/parsers.hpp>
+
+#include "auth/DummyAuth.h"
+#include "common/dout.h"
+#include "global/global_init.h"
+#include "messages/MPing.h"
+#include "messages/MCommand.h"
+#include "messages/MCommandReply.h"
+#include "messages/MOSDOp.h"
+#include "msg/Dispatcher.h"
+#include "msg/Messenger.h"
+
+#include "test_cmds.h"
+
+namespace {
+
+#define dout_subsys ceph_subsys_test
+
+using ceph::net::test::cmd_t;
+using ceph::net::test::policy_t;
+using SocketPolicy = Messenger::Policy;
+
+constexpr int CEPH_OSD_PROTOCOL = 10;
+
+class FailoverSuitePeer : public Dispatcher {
+ using cb_t = std::function<void()>;
+ DummyAuthClientServer dummy_auth;
+ std::unique_ptr<Messenger> peer_msgr;
+ cb_t op_callback;
+
+ Connection* tracked_conn = nullptr;
+ unsigned pending_send = 0;
+
+ bool ms_can_fast_dispatch_any() const override { return true; }
+ bool ms_can_fast_dispatch(const Message* m) const override { return true; }
+ void ms_fast_dispatch(Message* m) override {
+ auto conn = m->get_connection().get();
+ if (tracked_conn == nullptr) {
+ ldout(cct, 0) << "[!TestPeer] got op from Test(conn "
+ << conn << "not tracked yet)" << dendl;
+ tracked_conn = conn;
+ } else if (tracked_conn != conn) {
+ lderr(cct) << "[TestPeer] got op from Test: conn(" << conn
+ << ") != tracked_conn(" << tracked_conn
+ << ")" << dendl;
+ ceph_abort();
+ } else {
+ ldout(cct, 0) << "[TestPeer] got op from Test" << dendl;
+ }
+ op_callback();
+ }
+ bool ms_dispatch(Message* m) override { ceph_abort(); }
+ void ms_handle_fast_connect(Connection* conn) override {
+ if (tracked_conn == conn) {
+ ldout(cct, 0) << "[TestPeer] connected: " << conn << dendl;
+ } else {
+ lderr(cct) << "[TestPeer] connected: conn(" << conn
+ << ") != tracked_conn(" << tracked_conn
+ << ")" << dendl;
+ ceph_abort();
+ }
+ }
+ void ms_handle_fast_accept(Connection* conn) override {
+ if (tracked_conn == nullptr) {
+ ldout(cct, 0) << "[TestPeer] accepted: " << conn << dendl;
+ tracked_conn = conn;
+ } else if (tracked_conn != conn) {
+ lderr(cct) << "[TestPeer] accepted: conn(" << conn
+ << ") != tracked_conn(" << tracked_conn
+ << ")" << dendl;
+ ceph_abort();
+ } else {
+ ldout(cct, 0) << "[!TestPeer] accepted(stale event): " << conn << dendl;
+ }
+ flush_pending_send();
+ }
+ bool ms_handle_reset(Connection* conn) override {
+ if (tracked_conn == conn) {
+ ldout(cct, 0) << "[TestPeer] reset: " << conn << dendl;
+ tracked_conn = nullptr;
+ } else {
+ ldout(cct, 0) << "[!TestPeer] reset(invalid event): conn(" << conn
+ << ") != tracked_conn(" << tracked_conn
+ << ")" << dendl;
+ }
+ return true;
+ }
+ void ms_handle_remote_reset(Connection* conn) override {
+ if (tracked_conn == conn) {
+ ldout(cct, 0) << "[TestPeer] remote reset: " << conn << dendl;
+ } else {
+ ldout(cct, 0) << "[!TestPeer] reset(invalid event): conn(" << conn
+ << ") != tracked_conn(" << tracked_conn
+ << ")" << dendl;
+ }
+ }
+ bool ms_handle_refused(Connection* conn) override {
+ ldout(cct, 0) << "[!TestPeer] refused: " << conn << dendl;
+ return true;
+ }
+
+ private:
+ void init(entity_addr_t test_peer_addr, SocketPolicy policy) {
+ peer_msgr.reset(Messenger::create(cct, "async", entity_name_t::OSD(4), "TestPeer", 4));
+ dummy_auth.auth_registry.refresh_config();
+ peer_msgr->set_cluster_protocol(CEPH_OSD_PROTOCOL);
+ peer_msgr->set_default_policy(policy);
+ peer_msgr->set_auth_client(&dummy_auth);
+ peer_msgr->set_auth_server(&dummy_auth);
+ peer_msgr->set_require_authorizer(false);
+ peer_msgr->bind(test_peer_addr);
+ peer_msgr->add_dispatcher_head(this);
+ peer_msgr->start();
+ }
+
+ void send_op() {
+ ceph_assert(tracked_conn);
+ pg_t pgid;
+ object_locator_t oloc;
+ hobject_t hobj(object_t(), oloc.key, CEPH_NOSNAP, pgid.ps(),
+ pgid.pool(), oloc.nspace);
+ spg_t spgid(pgid);
+ tracked_conn->send_message2(make_message<MOSDOp>(0, 0, hobj, spgid, 0, 0, 0));
+ }
+
+ void flush_pending_send() {
+ if (pending_send != 0) {
+ ldout(cct, 0) << "[TestPeer] flush sending "
+ << pending_send << " ops" << dendl;
+ }
+ ceph_assert(tracked_conn);
+ while (pending_send) {
+ send_op();
+ --pending_send;
+ }
+ }
+
+ public:
+ FailoverSuitePeer(CephContext* cct, cb_t op_callback)
+ : Dispatcher(cct), dummy_auth(cct), op_callback(op_callback) { }
+
+ void shutdown() {
+ peer_msgr->shutdown();
+ peer_msgr->wait();
+ }
+
+ void connect_peer(entity_addr_t test_addr) {
+ ldout(cct, 0) << "[TestPeer] connect_peer(" << test_addr << ")" << dendl;
+ auto conn = peer_msgr->connect_to_osd(entity_addrvec_t{test_addr});
+ if (tracked_conn) {
+ if (tracked_conn == conn.get()) {
+ ldout(cct, 0) << "[TestPeer] this is not a new session " << conn.get() << dendl;
+ } else {
+ ldout(cct, 0) << "[TestPeer] this is a new session " << conn.get()
+ << ", replacing old one " << tracked_conn << dendl;
+ }
+ } else {
+ ldout(cct, 0) << "[TestPeer] this is a new session " << conn.get() << dendl;
+ }
+ tracked_conn = conn.get();
+ flush_pending_send();
+ }
+
+ void send_peer() {
+ if (tracked_conn) {
+ ldout(cct, 0) << "[TestPeer] send_peer()" << dendl;
+ send_op();
+ } else {
+ ++pending_send;
+ ldout(cct, 0) << "[TestPeer] send_peer() (pending " << pending_send << ")" << dendl;
+ }
+ }
+
+ void keepalive_peer() {
+ ldout(cct, 0) << "[TestPeer] keepalive_peer()" << dendl;
+ ceph_assert(tracked_conn);
+ tracked_conn->send_keepalive();
+ }
+
+ void markdown() {
+ ldout(cct, 0) << "[TestPeer] markdown()" << dendl;
+ ceph_assert(tracked_conn);
+ tracked_conn->mark_down();
+ tracked_conn = nullptr;
+ }
+
+ static std::unique_ptr<FailoverSuitePeer>
+ create(CephContext* cct, entity_addr_t test_peer_addr,
+ SocketPolicy policy, cb_t op_callback) {
+ auto suite = std::make_unique<FailoverSuitePeer>(cct, op_callback);
+ suite->init(test_peer_addr, policy);
+ return suite;
+ }
+};
+
+SocketPolicy to_socket_policy(CephContext* cct, policy_t policy) {
+ switch (policy) {
+ case policy_t::stateful_server:
+ return SocketPolicy::stateful_server(0);
+ case policy_t::stateless_server:
+ return SocketPolicy::stateless_server(0);
+ case policy_t::lossless_peer:
+ return SocketPolicy::lossless_peer(0);
+ case policy_t::lossless_peer_reuse:
+ return SocketPolicy::lossless_peer_reuse(0);
+ case policy_t::lossy_client:
+ return SocketPolicy::lossy_client(0);
+ case policy_t::lossless_client:
+ return SocketPolicy::lossless_client(0);
+ default:
+ lderr(cct) << "[CmdSrv] unexpected policy type" << dendl;
+ ceph_abort();
+ }
+}
+
+class FailoverTestPeer : public Dispatcher {
+ DummyAuthClientServer dummy_auth;
+ std::unique_ptr<Messenger> cmd_msgr;
+ Connection *cmd_conn = nullptr;
+ const entity_addr_t test_peer_addr;
+ std::unique_ptr<FailoverSuitePeer> test_suite;
+ const bool nonstop;
+
+ bool ms_can_fast_dispatch_any() const override { return false; }
+ bool ms_can_fast_dispatch(const Message* m) const override { return false; }
+ void ms_fast_dispatch(Message* m) override { ceph_abort(); }
+ bool ms_dispatch(Message* m) override {
+ auto conn = m->get_connection().get();
+ if (cmd_conn == nullptr) {
+ ldout(cct, 0) << "[!CmdSrv] got msg from CmdCli(conn "
+ << conn << "not tracked yet)" << dendl;
+ cmd_conn = conn;
+ } else if (cmd_conn != conn) {
+ lderr(cct) << "[CmdSrv] got msg from CmdCli: conn(" << conn
+ << ") != cmd_conn(" << cmd_conn
+ << ")" << dendl;
+ ceph_abort();
+ } else {
+ // good!
+ }
+ switch (m->get_type()) {
+ case CEPH_MSG_PING: {
+ ldout(cct, 0) << "[CmdSrv] got PING, sending PONG ..." << dendl;
+ cmd_conn->send_message2(make_message<MPing>());
+ break;
+ }
+ case MSG_COMMAND: {
+ auto m_cmd = boost::static_pointer_cast<MCommand>(m);
+ auto cmd = static_cast<cmd_t>(m_cmd->cmd[0][0]);
+ if (cmd == cmd_t::shutdown) {
+ ldout(cct, 0) << "All tests succeeded" << dendl;
+ if (!nonstop) {
+ ldout(cct, 0) << "[CmdSrv] shutdown ..." << dendl;
+ cmd_msgr->shutdown();
+ } else {
+ ldout(cct, 0) << "[CmdSrv] nonstop set ..." << dendl;
+ }
+ } else {
+ ldout(cct, 0) << "[CmdSrv] got cmd " << cmd << dendl;
+ handle_cmd(cmd, m_cmd);
+ ldout(cct, 0) << "[CmdSrv] done, send cmd reply ..." << dendl;
+ cmd_conn->send_message2(make_message<MCommandReply>());
+ }
+ break;
+ }
+ default:
+ lderr(cct) << "[CmdSrv] " << __func__ << " " << cmd_conn
+ << " got unexpected msg from CmdCli: "
+ << m << dendl;
+ ceph_abort();
+ }
+ m->put();
+ return true;
+ }
+ void ms_handle_fast_connect(Connection*) override { ceph_abort(); }
+ void ms_handle_fast_accept(Connection *conn) override {
+ if (cmd_conn == nullptr) {
+ ldout(cct, 0) << "[CmdSrv] accepted: " << conn << dendl;
+ cmd_conn = conn;
+ } else if (cmd_conn != conn) {
+ lderr(cct) << "[CmdSrv] accepted: conn(" << conn
+ << ") != cmd_conn(" << cmd_conn
+ << ")" << dendl;
+ ceph_abort();
+ } else {
+ ldout(cct, 0) << "[!CmdSrv] accepted(stale event): " << conn << dendl;
+ }
+ }
+ bool ms_handle_reset(Connection* conn) override {
+ if (cmd_conn == conn) {
+ ldout(cct, 0) << "[CmdSrv] reset: " << conn << dendl;
+ cmd_conn = nullptr;
+ } else {
+ ldout(cct, 0) << "[!CmdSrv] reset(invalid event): conn(" << conn
+ << ") != cmd_conn(" << cmd_conn
+ << ")" << dendl;
+ }
+ return true;
+ }
+ void ms_handle_remote_reset(Connection*) override { ceph_abort(); }
+ bool ms_handle_refused(Connection*) override { ceph_abort(); }
+
+ private:
+ void notify_recv_op() {
+ ceph_assert(cmd_conn);
+ auto m = make_message<MCommand>();
+ m->cmd.emplace_back(1, static_cast<char>(cmd_t::suite_recv_op));
+ cmd_conn->send_message2(m);
+ }
+
+ void handle_cmd(cmd_t cmd, MRef<MCommand> m_cmd) {
+ switch (cmd) {
+ case cmd_t::suite_start: {
+ if (test_suite) {
+ test_suite->shutdown();
+ test_suite.reset();
+ ldout(cct, 0) << "-------- suite stopped (force) --------\n\n" << dendl;
+ }
+ auto p = static_cast<policy_t>(m_cmd->cmd[1][0]);
+ ldout(cct, 0) << "[CmdSrv] suite starting (" << p
+ <<", " << test_peer_addr << ") ..." << dendl;
+ auto policy = to_socket_policy(cct, p);
+ auto suite = FailoverSuitePeer::create(cct, test_peer_addr, policy,
+ [this] { notify_recv_op(); });
+ test_suite.swap(suite);
+ return;
+ }
+ case cmd_t::suite_stop:
+ ceph_assert(test_suite);
+ test_suite->shutdown();
+ test_suite.reset();
+ ldout(cct, 0) << "-------- suite stopped --------\n\n" << dendl;
+ return;
+ case cmd_t::suite_connect_me: {
+ ceph_assert(test_suite);
+ entity_addr_t test_addr = entity_addr_t();
+ test_addr.parse(m_cmd->cmd[1].c_str(), nullptr);
+ test_suite->connect_peer(test_addr);
+ return;
+ }
+ case cmd_t::suite_send_me:
+ ceph_assert(test_suite);
+ test_suite->send_peer();
+ return;
+ case cmd_t::suite_keepalive_me:
+ ceph_assert(test_suite);
+ test_suite->keepalive_peer();
+ return;
+ case cmd_t::suite_markdown:
+ ceph_assert(test_suite);
+ test_suite->markdown();
+ return;
+ default:
+ lderr(cct) << "[CmdSrv] got unexpected command " << m_cmd
+ << " from CmdCli" << dendl;
+ ceph_abort();
+ }
+ }
+
+ void init(entity_addr_t cmd_peer_addr) {
+ cmd_msgr.reset(Messenger::create(cct, "async", entity_name_t::OSD(3), "CmdSrv", 3));
+ dummy_auth.auth_registry.refresh_config();
+ cmd_msgr->set_cluster_protocol(CEPH_OSD_PROTOCOL);
+ cmd_msgr->set_default_policy(Messenger::Policy::stateless_server(0));
+ cmd_msgr->set_auth_client(&dummy_auth);
+ cmd_msgr->set_auth_server(&dummy_auth);
+ cmd_msgr->set_require_authorizer(false);
+ cmd_msgr->bind(cmd_peer_addr);
+ cmd_msgr->add_dispatcher_head(this);
+ cmd_msgr->start();
+ }
+
+ public:
+ FailoverTestPeer(CephContext* cct,
+ entity_addr_t test_peer_addr,
+ bool nonstop)
+ : Dispatcher(cct),
+ dummy_auth(cct),
+ test_peer_addr(test_peer_addr),
+ nonstop(nonstop) { }
+
+ void wait() { cmd_msgr->wait(); }
+
+ static std::unique_ptr<FailoverTestPeer>
+ create(CephContext* cct, entity_addr_t cmd_peer_addr, bool nonstop) {
+ // suite bind to cmd_peer_addr, with port + 1
+ entity_addr_t test_peer_addr = cmd_peer_addr;
+ test_peer_addr.set_port(cmd_peer_addr.get_port() + 1);
+ auto test_peer = std::make_unique<FailoverTestPeer>(cct, test_peer_addr, nonstop);
+ test_peer->init(cmd_peer_addr);
+ ldout(cct, 0) << "[CmdSrv] ready" << dendl;
+ return test_peer;
+ }
+};
+
+}
+
+int main(int argc, char** argv)
+{
+ namespace po = boost::program_options;
+ po::options_description desc{"Allowed options"};
+ desc.add_options()
+ ("help,h", "show help message")
+ ("addr", po::value<std::string>()->default_value("v2:127.0.0.1:9013"),
+ "CmdSrv address, and TestPeer address with port+=1")
+ ("nonstop", po::value<bool>()->default_value(false),
+ "Do not shutdown TestPeer when all tests are successful");
+ po::variables_map vm;
+ std::vector<std::string> unrecognized_options;
+ try {
+ auto parsed = po::command_line_parser(argc, argv)
+ .options(desc)
+ .allow_unregistered()
+ .run();
+ po::store(parsed, vm);
+ if (vm.count("help")) {
+ std::cout << desc << std::endl;
+ return 0;
+ }
+ po::notify(vm);
+ unrecognized_options = po::collect_unrecognized(parsed.options, po::include_positional);
+ } catch(const po::error& e) {
+ std::cerr << "error: " << e.what() << std::endl;
+ return 1;
+ }
+
+ auto addr = vm["addr"].as<std::string>();
+ entity_addr_t cmd_peer_addr;
+ cmd_peer_addr.parse(addr.c_str(), nullptr);
+ auto nonstop = vm["nonstop"].as<bool>();
+
+ std::vector<const char*> args(argv, argv + argc);
+ auto cct = global_init(nullptr, args,
+ CEPH_ENTITY_TYPE_CLIENT,
+ CODE_ENVIRONMENT_UTILITY,
+ CINIT_FLAG_NO_MON_CONFIG);
+ common_init_finish(cct.get());
+ cct->_conf.set_val("ms_crc_header", "false");
+ cct->_conf.set_val("ms_crc_data", "false");
+
+ auto test_peer = FailoverTestPeer::create(cct.get(), cmd_peer_addr, nonstop);
+ test_peer->wait();
+}
diff --git a/src/test/crimson/test_monc.cc b/src/test/crimson/test_monc.cc
new file mode 100644
index 000000000..f590ce73a
--- /dev/null
+++ b/src/test/crimson/test_monc.cc
@@ -0,0 +1,90 @@
+#include <seastar/core/app-template.hh>
+#include "common/ceph_argparse.h"
+#include "crimson/common/auth_handler.h"
+#include "crimson/common/config_proxy.h"
+#include "crimson/mon/MonClient.h"
+#include "crimson/net/Connection.h"
+#include "crimson/net/Messenger.h"
+
+using Config = crimson::common::ConfigProxy;
+using MonClient = crimson::mon::Client;
+
+namespace {
+
+class DummyAuthHandler : public crimson::common::AuthHandler {
+public:
+ void handle_authentication(const EntityName& name,
+ const AuthCapsInfo& caps) final
+ {}
+};
+
+DummyAuthHandler dummy_handler;
+
+}
+
+static seastar::future<> test_monc()
+{
+ return crimson::common::sharded_conf().start(EntityName{}, string_view{"ceph"}).then([] {
+ std::vector<const char*> args;
+ std::string cluster;
+ std::string conf_file_list;
+ auto init_params = ceph_argparse_early_args(args,
+ CEPH_ENTITY_TYPE_CLIENT,
+ &cluster,
+ &conf_file_list);
+ auto& conf = crimson::common::local_conf();
+ conf->name = init_params.name;
+ conf->cluster = cluster;
+ return conf.parse_config_files(conf_file_list);
+ }).then([] {
+ return crimson::common::sharded_perf_coll().start();
+ }).then([]() mutable {
+ auto msgr = crimson::net::Messenger::create(entity_name_t::OSD(0), "monc", 0);
+ auto& conf = crimson::common::local_conf();
+ if (conf->ms_crc_data) {
+ msgr->set_crc_data();
+ }
+ if (conf->ms_crc_header) {
+ msgr->set_crc_header();
+ }
+ msgr->set_require_authorizer(false);
+ return seastar::do_with(MonClient{*msgr, dummy_handler},
+ [msgr](auto& monc) mutable {
+ return msgr->start({&monc}).then([&monc] {
+ return seastar::with_timeout(
+ seastar::lowres_clock::now() + std::chrono::seconds{10},
+ monc.start());
+ }).then([&monc] {
+ return monc.stop();
+ });
+ }).finally([msgr] {
+ return msgr->shutdown();
+ });
+ }).finally([] {
+ return crimson::common::sharded_perf_coll().stop().then([] {
+ return crimson::common::sharded_conf().stop();
+ });
+ });
+}
+
+int main(int argc, char** argv)
+{
+ seastar::app_template app;
+ return app.run(argc, argv, [&] {
+ return test_monc().then([] {
+ std::cout << "All tests succeeded" << std::endl;
+ }).handle_exception([] (auto eptr) {
+ std::cout << "Test failure" << std::endl;
+ return seastar::make_exception_future<>(eptr);
+ });
+ });
+}
+
+
+/*
+ * Local Variables:
+ * compile-command: "make -j4 \
+ * -C ../../../build \
+ * unittest_seastar_monc"
+ * End:
+ */
diff --git a/src/test/crimson/test_perfcounters.cc b/src/test/crimson/test_perfcounters.cc
new file mode 100644
index 000000000..8aecbf911
--- /dev/null
+++ b/src/test/crimson/test_perfcounters.cc
@@ -0,0 +1,62 @@
+#include <pthread.h>
+#include <stdlib.h>
+#include <iostream>
+#include <fmt/format.h>
+
+#include "common/Formatter.h"
+#include "common/perf_counters.h"
+#include "crimson/common/perf_counters_collection.h"
+
+#include <seastar/core/app-template.hh>
+#include <seastar/core/sharded.hh>
+
+enum {
+ PERFTEST_FIRST = 1000000,
+ PERFTEST_INDEX,
+ PERFTEST_LAST,
+};
+
+static constexpr uint64_t PERF_VAL = 42;
+
+static seastar::future<> test_perfcounters(){
+ return crimson::common::sharded_perf_coll().start().then([] {
+ return crimson::common::sharded_perf_coll().invoke_on_all([] (auto& s){
+ std::string name =fmt::format("seastar-osd::shard-{}",seastar::this_shard_id());
+ PerfCountersBuilder plb(NULL, name, PERFTEST_FIRST,PERFTEST_LAST);
+ plb.add_u64_counter(PERFTEST_INDEX, "perftest_count", "count perftest");
+ auto perf_logger = plb.create_perf_counters();
+ perf_logger->inc(PERFTEST_INDEX,PERF_VAL);
+ s.get_perf_collection()->add(perf_logger);
+ });
+ }).then([]{
+ return crimson::common::sharded_perf_coll().invoke_on_all([] (auto& s){
+ auto pcc = s.get_perf_collection();
+ pcc->with_counters([](auto& by_path){
+ for (auto& perf_counter : by_path) {
+ if (PERF_VAL != perf_counter.second.perf_counters->get(PERFTEST_INDEX)) {
+ throw std::runtime_error("perf counter does not match");
+ }
+ }
+ });
+ });
+ }).finally([] {
+ return crimson::common::sharded_perf_coll().stop();
+ });
+
+}
+
+int main(int argc, char** argv)
+{
+ seastar::app_template app;
+ return app.run(argc, argv, [&] {
+ return test_perfcounters().then([] {
+ std::cout << "All tests succeeded" << std::endl;
+ }).handle_exception([] (auto eptr) {
+ std::cout << "Test failure" << std::endl;
+ return seastar::make_exception_future<>(eptr);
+ });
+ });
+
+}
+
+
diff --git a/src/test/crimson/test_socket.cc b/src/test/crimson/test_socket.cc
new file mode 100644
index 000000000..bfdeeea2a
--- /dev/null
+++ b/src/test/crimson/test_socket.cc
@@ -0,0 +1,490 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#include <seastar/core/app-template.hh>
+#include <seastar/core/gate.hh>
+#include <seastar/core/sharded.hh>
+#include <seastar/core/sleep.hh>
+#include <seastar/core/when_all.hh>
+#include <seastar/util/later.hh>
+
+#include "crimson/common/log.h"
+#include "crimson/net/Errors.h"
+#include "crimson/net/Fwd.h"
+#include "crimson/net/Socket.h"
+
+namespace {
+
+using seastar::engine;
+using seastar::future;
+using crimson::net::error;
+using crimson::net::FixedCPUServerSocket;
+using crimson::net::Socket;
+using crimson::net::SocketRef;
+using crimson::net::stop_t;
+
+using SocketFRef = seastar::foreign_ptr<SocketRef>;
+
+static seastar::logger logger{"crimsontest"};
+static entity_addr_t get_server_addr() {
+ static int port = 9020;
+ ++port;
+ ceph_assert(port < 9030 && "socket and messenger test ports should not overlap");
+ entity_addr_t saddr;
+ saddr.parse("127.0.0.1", nullptr);
+ saddr.set_port(port);
+ return saddr;
+}
+
+future<SocketRef> socket_connect(const entity_addr_t& saddr) {
+ logger.debug("socket_connect() to {} ...", saddr);
+ return Socket::connect(saddr).then([] (auto socket) {
+ logger.debug("socket_connect() connected");
+ return socket;
+ });
+}
+
+future<> test_refused() {
+ logger.info("test_refused()...");
+ auto saddr = get_server_addr();
+ return socket_connect(saddr).discard_result().then([saddr] {
+ logger.error("test_refused(): connection to {} is not refused", saddr);
+ ceph_abort();
+ }).handle_exception_type([] (const std::system_error& e) {
+ if (e.code() != std::errc::connection_refused) {
+ logger.error("test_refused() got unexpeted error {}", e);
+ ceph_abort();
+ } else {
+ logger.info("test_refused() ok\n");
+ }
+ }).handle_exception([] (auto eptr) {
+ logger.error("test_refused() got unexpeted exception {}", eptr);
+ ceph_abort();
+ });
+}
+
+future<> test_bind_same() {
+ logger.info("test_bind_same()...");
+ return FixedCPUServerSocket::create().then([] (auto pss1) {
+ auto saddr = get_server_addr();
+ return pss1->listen(saddr).safe_then([saddr] {
+ // try to bind the same address
+ return FixedCPUServerSocket::create().then([saddr] (auto pss2) {
+ return pss2->listen(saddr).safe_then([] {
+ logger.error("test_bind_same() should raise address_in_use");
+ ceph_abort();
+ }, FixedCPUServerSocket::listen_ertr::all_same_way(
+ [] (const std::error_code& e) {
+ if (e == std::errc::address_in_use) {
+ // successful!
+ logger.info("test_bind_same() ok\n");
+ } else {
+ logger.error("test_bind_same() got unexpected error {}", e);
+ ceph_abort();
+ }
+ // Note: need to return a explicit ready future, or there will be a
+ // runtime error: member access within null pointer of type 'struct promise_base'
+ return seastar::now();
+ })).then([pss2] {
+ return pss2->destroy();
+ });
+ });
+ }, FixedCPUServerSocket::listen_ertr::all_same_way(
+ [saddr] (const std::error_code& e) {
+ logger.error("test_bind_same(): there is another instance running at {}",
+ saddr);
+ ceph_abort();
+ })).then([pss1] {
+ return pss1->destroy();
+ }).handle_exception([] (auto eptr) {
+ logger.error("test_bind_same() got unexpeted exception {}", eptr);
+ ceph_abort();
+ });
+ });
+}
+
+future<> test_accept() {
+ logger.info("test_accept()");
+ return FixedCPUServerSocket::create().then([] (auto pss) {
+ auto saddr = get_server_addr();
+ return pss->listen(saddr).safe_then([pss] {
+ return pss->accept([] (auto socket, auto paddr) {
+ // simple accept
+ return seastar::sleep(100ms).then([socket = std::move(socket)] () mutable {
+ return socket->close().finally([cleanup = std::move(socket)] {});
+ });
+ });
+ }, FixedCPUServerSocket::listen_ertr::all_same_way(
+ [saddr] (const std::error_code& e) {
+ logger.error("test_accept(): there is another instance running at {}",
+ saddr);
+ ceph_abort();
+ })).then([saddr] {
+ return seastar::when_all(
+ socket_connect(saddr).then([] (auto socket) {
+ return socket->close().finally([cleanup = std::move(socket)] {}); }),
+ socket_connect(saddr).then([] (auto socket) {
+ return socket->close().finally([cleanup = std::move(socket)] {}); }),
+ socket_connect(saddr).then([] (auto socket) {
+ return socket->close().finally([cleanup = std::move(socket)] {}); })
+ ).discard_result();
+ }).then([] {
+ // should be enough to be connected locally
+ return seastar::sleep(50ms);
+ }).then([] {
+ logger.info("test_accept() ok\n");
+ }).then([pss] {
+ return pss->destroy();
+ }).handle_exception([] (auto eptr) {
+ logger.error("test_accept() got unexpeted exception {}", eptr);
+ ceph_abort();
+ });
+ });
+}
+
+class SocketFactory {
+ SocketRef client_socket;
+ SocketFRef server_socket;
+ FixedCPUServerSocket *pss = nullptr;
+ seastar::promise<> server_connected;
+
+ public:
+ // cb_client() on CPU#0, cb_server() on CPU#1
+ template <typename FuncC, typename FuncS>
+ static future<> dispatch_sockets(FuncC&& cb_client, FuncS&& cb_server) {
+ assert(seastar::this_shard_id() == 0u);
+ auto owner = std::make_unique<SocketFactory>();
+ auto psf = owner.get();
+ auto saddr = get_server_addr();
+ return seastar::smp::submit_to(1u, [psf, saddr] {
+ return FixedCPUServerSocket::create().then([psf, saddr] (auto pss) {
+ psf->pss = pss;
+ return pss->listen(saddr
+ ).safe_then([]{}, FixedCPUServerSocket::listen_ertr::all_same_way(
+ [saddr] (const std::error_code& e) {
+ logger.error("dispatch_sockets(): there is another instance running at {}",
+ saddr);
+ ceph_abort();
+ }));
+ });
+ }).then([psf, saddr] {
+ return seastar::when_all_succeed(
+ seastar::smp::submit_to(0u, [psf, saddr] {
+ return socket_connect(saddr).then([psf] (auto socket) {
+ psf->client_socket = std::move(socket);
+ });
+ }),
+ seastar::smp::submit_to(1u, [psf] {
+ return psf->pss->accept([psf] (auto socket, auto paddr) {
+ psf->server_socket = seastar::make_foreign(std::move(socket));
+ return seastar::smp::submit_to(0u, [psf] {
+ psf->server_connected.set_value();
+ });
+ });
+ })
+ );
+ }).then_unpack([] {
+ return seastar::now();
+ }).then([psf] {
+ return psf->server_connected.get_future();
+ }).then([psf] {
+ if (psf->pss) {
+ return seastar::smp::submit_to(1u, [psf] {
+ return psf->pss->destroy();
+ });
+ }
+ return seastar::now();
+ }).then([psf,
+ cb_client = std::move(cb_client),
+ cb_server = std::move(cb_server)] () mutable {
+ logger.debug("dispatch_sockets(): client/server socket are ready");
+ return seastar::when_all_succeed(
+ seastar::smp::submit_to(0u, [socket = psf->client_socket.get(),
+ cb_client = std::move(cb_client)] {
+ return cb_client(socket).then([socket] {
+ logger.debug("closing client socket...");
+ return socket->close();
+ }).handle_exception([] (auto eptr) {
+ logger.error("dispatch_sockets():"
+ " cb_client() got unexpeted exception {}", eptr);
+ ceph_abort();
+ });
+ }),
+ seastar::smp::submit_to(1u, [socket = psf->server_socket.get(),
+ cb_server = std::move(cb_server)] {
+ return cb_server(socket).then([socket] {
+ logger.debug("closing server socket...");
+ return socket->close();
+ }).handle_exception([] (auto eptr) {
+ logger.error("dispatch_sockets():"
+ " cb_server() got unexpeted exception {}", eptr);
+ ceph_abort();
+ });
+ })
+ );
+ }).then_unpack([] {
+ return seastar::now();
+ }).finally([cleanup = std::move(owner)] {});
+ }
+};
+
+class Connection {
+ static const uint64_t DATA_TAIL = 5327;
+ static const unsigned DATA_SIZE = 4096;
+ std::array<uint64_t, DATA_SIZE> data = {0};
+
+ void verify_data_read(const uint64_t read_data[]) {
+ ceph_assert(read_data[0] == read_count);
+ ceph_assert(data[DATA_SIZE - 1] = DATA_TAIL);
+ }
+
+ Socket* socket = nullptr;
+ uint64_t write_count = 0;
+ uint64_t read_count = 0;
+
+ Connection(Socket* socket) : socket{socket} {
+ assert(socket);
+ data[DATA_SIZE - 1] = DATA_TAIL;
+ }
+
+ future<> dispatch_write(unsigned round = 0, bool force_shut = false) {
+ logger.debug("dispatch_write(round={}, force_shut={})...", round, force_shut);
+ return seastar::repeat([this, round, force_shut] {
+ if (round != 0 && round <= write_count) {
+ return seastar::futurize_invoke([this, force_shut] {
+ if (force_shut) {
+ logger.debug("dispatch_write() done, force shutdown output");
+ socket->force_shutdown_out();
+ } else {
+ logger.debug("dispatch_write() done");
+ }
+ }).then([] {
+ return seastar::make_ready_future<stop_t>(stop_t::yes);
+ });
+ } else {
+ data[0] = write_count;
+ return socket->write(seastar::net::packet(
+ reinterpret_cast<const char*>(&data), sizeof(data))
+ ).then([this] {
+ return socket->flush();
+ }).then([this] {
+ write_count += 1;
+ return seastar::make_ready_future<stop_t>(stop_t::no);
+ });
+ }
+ });
+ }
+
+ future<> dispatch_write_unbounded() {
+ return dispatch_write(
+ ).then([] {
+ ceph_abort();
+ }).handle_exception_type([this] (const std::system_error& e) {
+ if (e.code() != std::errc::broken_pipe &&
+ e.code() != std::errc::connection_reset) {
+ logger.error("dispatch_write_unbounded(): "
+ "unexpected error {}", e);
+ throw;
+ }
+ // successful
+ logger.debug("dispatch_write_unbounded(): "
+ "expected error {}", e);
+ shutdown();
+ });
+ }
+
+ future<> dispatch_read(unsigned round = 0, bool force_shut = false) {
+ logger.debug("dispatch_read(round={}, force_shut={})...", round, force_shut);
+ return seastar::repeat([this, round, force_shut] {
+ if (round != 0 && round <= read_count) {
+ return seastar::futurize_invoke([this, force_shut] {
+ if (force_shut) {
+ logger.debug("dispatch_read() done, force shutdown input");
+ socket->force_shutdown_in();
+ } else {
+ logger.debug("dispatch_read() done");
+ }
+ }).then([] {
+ return seastar::make_ready_future<stop_t>(stop_t::yes);
+ });
+ } else {
+ return seastar::futurize_invoke([this] {
+ // we want to test both Socket::read() and Socket::read_exactly()
+ if (read_count % 2) {
+ return socket->read(DATA_SIZE * sizeof(uint64_t)
+ ).then([this] (ceph::bufferlist bl) {
+ uint64_t read_data[DATA_SIZE];
+ auto p = bl.cbegin();
+ ::ceph::decode_raw(read_data, p);
+ verify_data_read(read_data);
+ });
+ } else {
+ return socket->read_exactly(DATA_SIZE * sizeof(uint64_t)
+ ).then([this] (auto buf) {
+ auto read_data = reinterpret_cast<const uint64_t*>(buf.get());
+ verify_data_read(read_data);
+ });
+ }
+ }).then([this] {
+ ++read_count;
+ return seastar::make_ready_future<stop_t>(stop_t::no);
+ });
+ }
+ });
+ }
+
+ future<> dispatch_read_unbounded() {
+ return dispatch_read(
+ ).then([] {
+ ceph_abort();
+ }).handle_exception_type([this] (const std::system_error& e) {
+ if (e.code() != error::read_eof
+ && e.code() != std::errc::connection_reset) {
+ logger.error("dispatch_read_unbounded(): "
+ "unexpected error {}", e);
+ throw;
+ }
+ // successful
+ logger.debug("dispatch_read_unbounded(): "
+ "expected error {}", e);
+ shutdown();
+ });
+ }
+
+ void shutdown() {
+ socket->shutdown();
+ }
+
+ public:
+ static future<> dispatch_rw_bounded(Socket* socket, unsigned round,
+ bool force_shut = false) {
+ logger.debug("dispatch_rw_bounded(round={}, force_shut={})...",
+ round, force_shut);
+ return seastar::do_with(Connection{socket},
+ [round, force_shut] (auto& conn) {
+ ceph_assert(round != 0);
+ return seastar::when_all_succeed(
+ conn.dispatch_write(round, force_shut),
+ conn.dispatch_read(round, force_shut)
+ ).then_unpack([] {
+ return seastar::now();
+ });
+ });
+ }
+
+ static future<> dispatch_rw_unbounded(Socket* socket, bool preemptive_shut = false) {
+ logger.debug("dispatch_rw_unbounded(preemptive_shut={})...", preemptive_shut);
+ return seastar::do_with(Connection{socket}, [preemptive_shut] (auto& conn) {
+ return seastar::when_all_succeed(
+ conn.dispatch_write_unbounded(),
+ conn.dispatch_read_unbounded(),
+ seastar::futurize_invoke([&conn, preemptive_shut] {
+ if (preemptive_shut) {
+ return seastar::sleep(100ms).then([&conn] {
+ logger.debug("dispatch_rw_unbounded() shutdown socket preemptively(100ms)");
+ conn.shutdown();
+ });
+ } else {
+ return seastar::now();
+ }
+ })
+ ).then_unpack([] {
+ return seastar::now();
+ });
+ });
+ }
+};
+
+future<> test_read_write() {
+ logger.info("test_read_write()...");
+ return SocketFactory::dispatch_sockets(
+ [] (auto cs) { return Connection::dispatch_rw_bounded(cs, 128); },
+ [] (auto ss) { return Connection::dispatch_rw_bounded(ss, 128); }
+ ).then([] {
+ logger.info("test_read_write() ok\n");
+ }).handle_exception([] (auto eptr) {
+ logger.error("test_read_write() got unexpeted exception {}", eptr);
+ ceph_abort();
+ });
+}
+
+future<> test_unexpected_down() {
+ logger.info("test_unexpected_down()...");
+ return SocketFactory::dispatch_sockets(
+ [] (auto cs) {
+ return Connection::dispatch_rw_bounded(cs, 128, true
+ ).handle_exception_type([] (const std::system_error& e) {
+ logger.debug("test_unexpected_down(): client get error {}", e);
+ ceph_assert(e.code() == error::read_eof);
+ });
+ },
+ [] (auto ss) { return Connection::dispatch_rw_unbounded(ss); }
+ ).then([] {
+ logger.info("test_unexpected_down() ok\n");
+ }).handle_exception([] (auto eptr) {
+ logger.error("test_unexpected_down() got unexpeted exception {}", eptr);
+ ceph_abort();
+ });
+}
+
+future<> test_shutdown_propagated() {
+ logger.info("test_shutdown_propagated()...");
+ return SocketFactory::dispatch_sockets(
+ [] (auto cs) {
+ logger.debug("test_shutdown_propagated() shutdown client socket");
+ cs->shutdown();
+ return seastar::now();
+ },
+ [] (auto ss) { return Connection::dispatch_rw_unbounded(ss); }
+ ).then([] {
+ logger.info("test_shutdown_propagated() ok\n");
+ }).handle_exception([] (auto eptr) {
+ logger.error("test_shutdown_propagated() got unexpeted exception {}", eptr);
+ ceph_abort();
+ });
+}
+
+future<> test_preemptive_down() {
+ logger.info("test_preemptive_down()...");
+ return SocketFactory::dispatch_sockets(
+ [] (auto cs) { return Connection::dispatch_rw_unbounded(cs, true); },
+ [] (auto ss) { return Connection::dispatch_rw_unbounded(ss); }
+ ).then([] {
+ logger.info("test_preemptive_down() ok\n");
+ }).handle_exception([] (auto eptr) {
+ logger.error("test_preemptive_down() got unexpeted exception {}", eptr);
+ ceph_abort();
+ });
+}
+
+}
+
+int main(int argc, char** argv)
+{
+ seastar::app_template app;
+ return app.run(argc, argv, [] {
+ return seastar::futurize_invoke([] {
+ return test_refused();
+ }).then([] {
+ return test_bind_same();
+ }).then([] {
+ return test_accept();
+ }).then([] {
+ return test_read_write();
+ }).then([] {
+ return test_unexpected_down();
+ }).then([] {
+ return test_shutdown_propagated();
+ }).then([] {
+ return test_preemptive_down();
+ }).then([] {
+ logger.info("All tests succeeded");
+ // Seastar has bugs to have events undispatched during shutdown,
+ // which will result in memory leak and thus fail LeakSanitizer.
+ return seastar::sleep(100ms);
+ }).handle_exception([] (auto eptr) {
+ std::cout << "Test failure" << std::endl;
+ return seastar::make_exception_future<>(eptr);
+ });
+ });
+}