summaryrefslogtreecommitdiffstats
path: root/src/test/librados_test_stub
diff options
context:
space:
mode:
Diffstat (limited to 'src/test/librados_test_stub')
-rw-r--r--src/test/librados_test_stub/CMakeLists.txt12
-rw-r--r--src/test/librados_test_stub/LibradosTestStub.cc1568
-rw-r--r--src/test/librados_test_stub/LibradosTestStub.h42
-rw-r--r--src/test/librados_test_stub/MockTestMemCluster.h36
-rw-r--r--src/test/librados_test_stub/MockTestMemIoCtxImpl.h252
-rw-r--r--src/test/librados_test_stub/MockTestMemRadosClient.h103
-rw-r--r--src/test/librados_test_stub/NeoradosTestStub.cc601
-rw-r--r--src/test/librados_test_stub/TestClassHandler.cc159
-rw-r--r--src/test/librados_test_stub/TestClassHandler.h79
-rw-r--r--src/test/librados_test_stub/TestCluster.h64
-rw-r--r--src/test/librados_test_stub/TestIoCtxImpl.cc394
-rw-r--r--src/test/librados_test_stub/TestIoCtxImpl.h221
-rw-r--r--src/test/librados_test_stub/TestMemCluster.cc203
-rw-r--r--src/test/librados_test_stub/TestMemCluster.h124
-rw-r--r--src/test/librados_test_stub/TestMemIoCtxImpl.cc924
-rw-r--r--src/test/librados_test_stub/TestMemIoCtxImpl.h104
-rw-r--r--src/test/librados_test_stub/TestMemRadosClient.cc118
-rw-r--r--src/test/librados_test_stub/TestMemRadosClient.h88
-rw-r--r--src/test/librados_test_stub/TestRadosClient.cc311
-rw-r--r--src/test/librados_test_stub/TestRadosClient.h162
-rw-r--r--src/test/librados_test_stub/TestWatchNotify.cc459
-rw-r--r--src/test/librados_test_stub/TestWatchNotify.h148
22 files changed, 6172 insertions, 0 deletions
diff --git a/src/test/librados_test_stub/CMakeLists.txt b/src/test/librados_test_stub/CMakeLists.txt
new file mode 100644
index 000000000..f501d2da2
--- /dev/null
+++ b/src/test/librados_test_stub/CMakeLists.txt
@@ -0,0 +1,12 @@
+set(librados_test_stub_srcs
+ LibradosTestStub.cc
+ NeoradosTestStub.cc
+ TestClassHandler.cc
+ TestIoCtxImpl.cc
+ TestMemCluster.cc
+ TestMemIoCtxImpl.cc
+ TestMemRadosClient.cc
+ TestRadosClient.cc
+ TestWatchNotify.cc)
+add_library(rados_test_stub STATIC ${librados_test_stub_srcs})
+
diff --git a/src/test/librados_test_stub/LibradosTestStub.cc b/src/test/librados_test_stub/LibradosTestStub.cc
new file mode 100644
index 000000000..238cffa19
--- /dev/null
+++ b/src/test/librados_test_stub/LibradosTestStub.cc
@@ -0,0 +1,1568 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#include "test/librados_test_stub/LibradosTestStub.h"
+#include "include/rados/librados.hpp"
+#include "include/stringify.h"
+#include "common/ceph_argparse.h"
+#include "common/ceph_context.h"
+#include "common/common_init.h"
+#include "common/config.h"
+#include "common/debug.h"
+#include "common/snap_types.h"
+#include "librados/AioCompletionImpl.h"
+#include "log/Log.h"
+#include "test/librados_test_stub/TestClassHandler.h"
+#include "test/librados_test_stub/TestIoCtxImpl.h"
+#include "test/librados_test_stub/TestRadosClient.h"
+#include "test/librados_test_stub/TestMemCluster.h"
+#include "test/librados_test_stub/TestMemRadosClient.h"
+#include "objclass/objclass.h"
+#include "osd/osd_types.h"
+#include <arpa/inet.h>
+#include <boost/shared_ptr.hpp>
+#include <deque>
+#include <functional>
+#include <list>
+#include <vector>
+#include "include/ceph_assert.h"
+#include "include/compat.h"
+
+#define dout_context g_ceph_context
+#define dout_subsys ceph_subsys_rados
+
+using namespace std;
+
+namespace librados {
+
+MockTestMemIoCtxImpl &get_mock_io_ctx(IoCtx &ioctx) {
+ MockTestMemIoCtxImpl **mock =
+ reinterpret_cast<MockTestMemIoCtxImpl **>(&ioctx);
+ return **mock;
+}
+
+} // namespace librados
+
+namespace librados_test_stub {
+
+TestClusterRef &cluster() {
+ static TestClusterRef s_cluster;
+ return s_cluster;
+}
+
+void set_cluster(TestClusterRef cluster_ref) {
+ cluster() = cluster_ref;
+}
+
+TestClusterRef get_cluster() {
+ auto &cluster_ref = cluster();
+ if (cluster_ref.get() == nullptr) {
+ cluster_ref.reset(new librados::TestMemCluster());
+ }
+ return cluster_ref;
+}
+
+librados::TestClassHandler *get_class_handler() {
+ static boost::shared_ptr<librados::TestClassHandler> s_class_handler;
+ if (!s_class_handler) {
+ s_class_handler.reset(new librados::TestClassHandler());
+ s_class_handler->open_all_classes();
+ }
+ return s_class_handler.get();
+}
+
+} // namespace librados_test_stub
+
+namespace {
+
+void do_out_buffer(bufferlist& outbl, char **outbuf, size_t *outbuflen) {
+ if (outbuf) {
+ if (outbl.length() > 0) {
+ *outbuf = (char *)malloc(outbl.length());
+ memcpy(*outbuf, outbl.c_str(), outbl.length());
+ } else {
+ *outbuf = NULL;
+ }
+ }
+ if (outbuflen) {
+ *outbuflen = outbl.length();
+ }
+}
+
+void do_out_buffer(string& outbl, char **outbuf, size_t *outbuflen) {
+ if (outbuf) {
+ if (outbl.length() > 0) {
+ *outbuf = (char *)malloc(outbl.length());
+ memcpy(*outbuf, outbl.c_str(), outbl.length());
+ } else {
+ *outbuf = NULL;
+ }
+ }
+ if (outbuflen) {
+ *outbuflen = outbl.length();
+ }
+}
+
+librados::TestRadosClient *create_rados_client() {
+ CephInitParameters iparams(CEPH_ENTITY_TYPE_CLIENT);
+ CephContext *cct = common_preinit(iparams, CODE_ENVIRONMENT_LIBRARY, 0);
+ cct->_conf.parse_env(cct->get_module_type());
+ cct->_conf.apply_changes(nullptr);
+ cct->_log->start();
+
+ auto rados_client =
+ librados_test_stub::get_cluster()->create_rados_client(cct);
+ cct->put();
+ return rados_client;
+}
+
+} // anonymous namespace
+
+extern "C" int rados_aio_create_completion2(void *cb_arg,
+ rados_callback_t cb_complete,
+ rados_completion_t *pc)
+{
+ librados::AioCompletionImpl *c = new librados::AioCompletionImpl;
+ if (cb_complete) {
+ c->set_complete_callback(cb_arg, cb_complete);
+ }
+ *pc = c;
+ return 0;
+}
+
+extern "C" int rados_aio_get_return_value(rados_completion_t c) {
+ return reinterpret_cast<librados::AioCompletionImpl*>(c)->get_return_value();
+}
+
+extern "C" rados_config_t rados_cct(rados_t cluster)
+{
+ librados::TestRadosClient *client =
+ reinterpret_cast<librados::TestRadosClient*>(cluster);
+ return reinterpret_cast<rados_config_t>(client->cct());
+}
+
+extern "C" int rados_conf_set(rados_t cluster, const char *option,
+ const char *value) {
+ librados::TestRadosClient *impl =
+ reinterpret_cast<librados::TestRadosClient*>(cluster);
+ CephContext *cct = impl->cct();
+ return cct->_conf.set_val(option, value);
+}
+
+extern "C" int rados_conf_parse_env(rados_t cluster, const char *var) {
+ librados::TestRadosClient *client =
+ reinterpret_cast<librados::TestRadosClient*>(cluster);
+ auto& conf = client->cct()->_conf;
+ conf.parse_env(client->cct()->get_module_type(), var);
+ conf.apply_changes(NULL);
+ return 0;
+}
+
+extern "C" int rados_conf_read_file(rados_t cluster, const char *path) {
+ librados::TestRadosClient *client =
+ reinterpret_cast<librados::TestRadosClient*>(cluster);
+ auto& conf = client->cct()->_conf;
+ int ret = conf.parse_config_files(path, NULL, 0);
+ if (ret == 0) {
+ conf.parse_env(client->cct()->get_module_type());
+ conf.apply_changes(NULL);
+ conf.complain_about_parse_error(client->cct());
+ } else if (ret == -ENOENT) {
+ // ignore missing client config
+ return 0;
+ }
+ return ret;
+}
+
+extern "C" int rados_connect(rados_t cluster) {
+ librados::TestRadosClient *client =
+ reinterpret_cast<librados::TestRadosClient*>(cluster);
+ return client->connect();
+}
+
+extern "C" int rados_create(rados_t *cluster, const char * const id) {
+ *cluster = create_rados_client();
+ return 0;
+}
+
+extern "C" int rados_create_with_context(rados_t *cluster,
+ rados_config_t cct_) {
+ auto cct = reinterpret_cast<CephContext*>(cct_);
+ *cluster = librados_test_stub::get_cluster()->create_rados_client(cct);
+ return 0;
+}
+
+extern "C" rados_config_t rados_ioctx_cct(rados_ioctx_t ioctx)
+{
+ librados::TestIoCtxImpl *ctx =
+ reinterpret_cast<librados::TestIoCtxImpl*>(ioctx);
+ return reinterpret_cast<rados_config_t>(ctx->get_rados_client()->cct());
+}
+
+extern "C" int rados_ioctx_create(rados_t cluster, const char *pool_name,
+ rados_ioctx_t *ioctx) {
+ librados::TestRadosClient *client =
+ reinterpret_cast<librados::TestRadosClient*>(cluster);
+
+ int64_t pool_id = client->pool_lookup(pool_name);
+ if (pool_id < 0) {
+ return static_cast<int>(pool_id);
+ }
+
+ *ioctx = reinterpret_cast<rados_ioctx_t>(
+ client->create_ioctx(pool_id, pool_name));
+ return 0;
+}
+
+extern "C" int rados_ioctx_create2(rados_t cluster, int64_t pool_id,
+ rados_ioctx_t *ioctx)
+{
+ librados::TestRadosClient *client =
+ reinterpret_cast<librados::TestRadosClient*>(cluster);
+
+ std::list<std::pair<int64_t, std::string> > pools;
+ int r = client->pool_list(pools);
+ if (r < 0) {
+ return r;
+ }
+
+ for (std::list<std::pair<int64_t, std::string> >::iterator it =
+ pools.begin(); it != pools.end(); ++it) {
+ if (it->first == pool_id) {
+ *ioctx = reinterpret_cast<rados_ioctx_t>(
+ client->create_ioctx(pool_id, it->second));
+ return 0;
+ }
+ }
+ return -ENOENT;
+}
+
+extern "C" void rados_ioctx_destroy(rados_ioctx_t io) {
+ librados::TestIoCtxImpl *ctx =
+ reinterpret_cast<librados::TestIoCtxImpl*>(io);
+ ctx->put();
+}
+
+extern "C" rados_t rados_ioctx_get_cluster(rados_ioctx_t io) {
+ librados::TestIoCtxImpl *ctx =
+ reinterpret_cast<librados::TestIoCtxImpl*>(io);
+ return reinterpret_cast<rados_t>(ctx->get_rados_client());
+}
+
+extern "C" int rados_mon_command(rados_t cluster, const char **cmd,
+ size_t cmdlen, const char *inbuf,
+ size_t inbuflen, char **outbuf,
+ size_t *outbuflen, char **outs,
+ size_t *outslen) {
+ librados::TestRadosClient *client =
+ reinterpret_cast<librados::TestRadosClient*>(cluster);
+
+ vector<string> cmdvec;
+ for (size_t i = 0; i < cmdlen; i++) {
+ cmdvec.push_back(cmd[i]);
+ }
+
+ bufferlist inbl;
+ inbl.append(inbuf, inbuflen);
+
+ bufferlist outbl;
+ string outstring;
+ int ret = client->mon_command(cmdvec, inbl, &outbl, &outstring);
+
+ do_out_buffer(outbl, outbuf, outbuflen);
+ do_out_buffer(outstring, outs, outslen);
+ return ret;
+}
+
+extern "C" int rados_nobjects_list_open(rados_ioctx_t io,
+ rados_list_ctx_t *ctx) {
+ librados::TestIoCtxImpl *io_ctx =
+ reinterpret_cast<librados::TestIoCtxImpl*>(io);
+ librados::TestRadosClient *client = io_ctx->get_rados_client();
+
+ std::list<librados::TestRadosClient::Object> *list =
+ new std::list<librados::TestRadosClient::Object>();
+
+ client->object_list(io_ctx->get_id(), list);
+ list->push_front(librados::TestRadosClient::Object());
+ *ctx = reinterpret_cast<rados_list_ctx_t>(list);
+ return 0;
+}
+
+extern "C" int rados_nobjects_list_next(rados_list_ctx_t ctx,
+ const char **entry,
+ const char **key,
+ const char **nspace) {
+ std::list<librados::TestRadosClient::Object> *list =
+ reinterpret_cast<std::list<librados::TestRadosClient::Object> *>(ctx);
+ if (!list->empty()) {
+ list->pop_front();
+ }
+ if (list->empty()) {
+ return -ENOENT;
+ }
+
+ librados::TestRadosClient::Object &obj = list->front();
+ if (entry != NULL) {
+ *entry = obj.oid.c_str();
+ }
+ if (key != NULL) {
+ *key = obj.locator.c_str();
+ }
+ if (nspace != NULL) {
+ *nspace = obj.nspace.c_str();
+ }
+ return 0;
+}
+
+extern "C" void rados_nobjects_list_close(rados_list_ctx_t ctx) {
+ std::list<librados::TestRadosClient::Object> *list =
+ reinterpret_cast<std::list<librados::TestRadosClient::Object> *>(ctx);
+ delete list;
+}
+
+extern "C" int rados_pool_create(rados_t cluster, const char *pool_name) {
+ librados::TestRadosClient *client =
+ reinterpret_cast<librados::TestRadosClient*>(cluster);
+ return client->pool_create(pool_name);
+}
+
+extern "C" int rados_pool_delete(rados_t cluster, const char *pool_name) {
+ librados::TestRadosClient *client =
+ reinterpret_cast<librados::TestRadosClient*>(cluster);
+ return client->pool_delete(pool_name);
+}
+
+extern "C" void rados_shutdown(rados_t cluster) {
+ librados::TestRadosClient *client =
+ reinterpret_cast<librados::TestRadosClient*>(cluster);
+ client->put();
+}
+
+extern "C" int rados_wait_for_latest_osdmap(rados_t cluster) {
+ librados::TestRadosClient *client =
+ reinterpret_cast<librados::TestRadosClient*>(cluster);
+ return client->wait_for_latest_osdmap();
+}
+
+using namespace std::placeholders;
+
+namespace librados {
+
+AioCompletion::~AioCompletion()
+{
+ auto c = reinterpret_cast<AioCompletionImpl *>(pc);
+ c->release();
+}
+
+void AioCompletion::release() {
+ delete this;
+}
+
+IoCtx::IoCtx() : io_ctx_impl(NULL) {
+}
+
+IoCtx::~IoCtx() {
+ close();
+}
+
+IoCtx::IoCtx(const IoCtx& rhs) {
+ io_ctx_impl = rhs.io_ctx_impl;
+ if (io_ctx_impl) {
+ TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
+ ctx->get();
+ }
+}
+
+IoCtx::IoCtx(IoCtx&& rhs) noexcept : io_ctx_impl(std::exchange(rhs.io_ctx_impl, nullptr))
+{
+}
+
+IoCtx& IoCtx::operator=(const IoCtx& rhs) {
+ if (io_ctx_impl) {
+ TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
+ ctx->put();
+ }
+
+ io_ctx_impl = rhs.io_ctx_impl;
+ if (io_ctx_impl) {
+ TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
+ ctx->get();
+ }
+ return *this;
+}
+
+librados::IoCtx& librados::IoCtx::operator=(IoCtx&& rhs) noexcept
+{
+ if (io_ctx_impl) {
+ TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
+ ctx->put();
+ }
+
+ io_ctx_impl = std::exchange(rhs.io_ctx_impl, nullptr);
+ return *this;
+}
+
+int IoCtx::aio_flush() {
+ TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
+ ctx->aio_flush();
+ return 0;
+}
+
+int IoCtx::aio_flush_async(AioCompletion *c) {
+ TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
+ ctx->aio_flush_async(c->pc);
+ return 0;
+}
+
+int IoCtx::aio_notify(const std::string& oid, AioCompletion *c, bufferlist& bl,
+ uint64_t timeout_ms, bufferlist *pbl) {
+ TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
+ ctx->aio_notify(oid, c->pc, bl, timeout_ms, pbl);
+ return 0;
+}
+
+int IoCtx::aio_operate(const std::string& oid, AioCompletion *c,
+ ObjectReadOperation *op, bufferlist *pbl) {
+ return aio_operate(oid, c, op, 0, pbl);
+}
+
+int IoCtx::aio_operate(const std::string& oid, AioCompletion *c,
+ ObjectReadOperation *op, int flags,
+ bufferlist *pbl) {
+ TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
+ TestObjectOperationImpl *ops = reinterpret_cast<TestObjectOperationImpl*>(op->impl);
+ return ctx->aio_operate_read(oid, *ops, c->pc, flags, pbl,
+ ctx->get_snap_read(), nullptr);
+}
+
+int IoCtx::aio_operate(const std::string& oid, AioCompletion *c,
+ ObjectReadOperation *op, int flags,
+ bufferlist *pbl, const blkin_trace_info *trace_info) {
+ return aio_operate(oid, c, op, flags, pbl);
+}
+
+int IoCtx::aio_operate(const std::string& oid, AioCompletion *c,
+ ObjectWriteOperation *op) {
+ TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
+ TestObjectOperationImpl *ops = reinterpret_cast<TestObjectOperationImpl*>(op->impl);
+ return ctx->aio_operate(oid, *ops, c->pc, nullptr, nullptr, 0);
+}
+
+int IoCtx::aio_operate(const std::string& oid, AioCompletion *c,
+ ObjectWriteOperation *op, snap_t seq,
+ std::vector<snap_t>& snaps, int flags,
+ const blkin_trace_info *trace_info) {
+ TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
+ TestObjectOperationImpl *ops = reinterpret_cast<TestObjectOperationImpl*>(op->impl);
+
+ std::vector<snapid_t> snv;
+ snv.resize(snaps.size());
+ for (size_t i = 0; i < snaps.size(); ++i)
+ snv[i] = snaps[i];
+ SnapContext snapc(seq, snv);
+
+ return ctx->aio_operate(oid, *ops, c->pc, &snapc, nullptr, flags);
+}
+
+int IoCtx::aio_operate(const std::string& oid, AioCompletion *c,
+ ObjectWriteOperation *op, snap_t seq,
+ std::vector<snap_t>& snaps) {
+ return aio_operate(oid, c, op, seq, snaps, 0, nullptr);
+}
+
+int IoCtx::aio_operate(const std::string& oid, AioCompletion *c,
+ ObjectWriteOperation *op, snap_t seq,
+ std::vector<snap_t>& snaps,
+ const blkin_trace_info *trace_info) {
+ return aio_operate(oid, c, op, seq, snaps, 0, trace_info);
+}
+
+int IoCtx::aio_remove(const std::string& oid, AioCompletion *c) {
+ TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
+ return ctx->aio_remove(oid, c->pc);
+}
+
+int IoCtx::aio_remove(const std::string& oid, AioCompletion *c, int flags) {
+ TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
+ return ctx->aio_remove(oid, c->pc, flags);
+}
+
+int IoCtx::aio_watch(const std::string& o, AioCompletion *c, uint64_t *handle,
+ librados::WatchCtx2 *watch_ctx) {
+ TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
+ return ctx->aio_watch(o, c->pc, handle, watch_ctx);
+}
+
+int IoCtx::aio_unwatch(uint64_t handle, AioCompletion *c) {
+ TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
+ return ctx->aio_unwatch(handle, c->pc);
+}
+
+config_t IoCtx::cct() {
+ TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
+ return reinterpret_cast<config_t>(ctx->get_rados_client()->cct());
+}
+
+void IoCtx::close() {
+ if (io_ctx_impl) {
+ TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
+ ctx->put();
+ }
+ io_ctx_impl = NULL;
+}
+
+int IoCtx::create(const std::string& oid, bool exclusive) {
+ TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
+ return ctx->execute_operation(
+ oid, std::bind(&TestIoCtxImpl::create, _1, _2, exclusive,
+ ctx->get_snap_context()));
+}
+
+void IoCtx::dup(const IoCtx& rhs) {
+ close();
+ TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(rhs.io_ctx_impl);
+ io_ctx_impl = reinterpret_cast<IoCtxImpl*>(ctx->clone());
+}
+
+int IoCtx::exec(const std::string& oid, const char *cls, const char *method,
+ bufferlist& inbl, bufferlist& outbl) {
+ TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
+ return ctx->execute_operation(
+ oid, std::bind(&TestIoCtxImpl::exec, _1, _2,
+ librados_test_stub::get_class_handler(), cls,
+ method, inbl, &outbl, ctx->get_snap_read(),
+ ctx->get_snap_context()));
+}
+
+void IoCtx::from_rados_ioctx_t(rados_ioctx_t p, IoCtx &io) {
+ TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(p);
+ ctx->get();
+
+ io.close();
+ io.io_ctx_impl = reinterpret_cast<IoCtxImpl*>(ctx);
+}
+
+uint64_t IoCtx::get_instance_id() const {
+ TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
+ return ctx->get_instance_id();
+}
+
+int64_t IoCtx::get_id() {
+ TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
+ return ctx->get_id();
+}
+
+uint64_t IoCtx::get_last_version() {
+ TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
+ return ctx->get_last_version();
+}
+
+std::string IoCtx::get_pool_name() {
+ TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
+ return ctx->get_pool_name();
+}
+
+int IoCtx::list_snaps(const std::string& o, snap_set_t *out_snaps) {
+ TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
+ return ctx->execute_operation(
+ o, std::bind(&TestIoCtxImpl::list_snaps, _1, _2, out_snaps));
+}
+
+int IoCtx::list_watchers(const std::string& o,
+ std::list<obj_watch_t> *out_watchers) {
+ TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
+ return ctx->execute_operation(
+ o, std::bind(&TestIoCtxImpl::list_watchers, _1, _2, out_watchers));
+}
+
+int IoCtx::notify(const std::string& o, uint64_t ver, bufferlist& bl) {
+ TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
+ return ctx->notify(o, bl, 0, NULL);
+}
+
+int IoCtx::notify2(const std::string& o, bufferlist& bl,
+ uint64_t timeout_ms, bufferlist *pbl) {
+ TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
+ return ctx->notify(o, bl, timeout_ms, pbl);
+}
+
+void IoCtx::notify_ack(const std::string& o, uint64_t notify_id,
+ uint64_t handle, bufferlist& bl) {
+ TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
+ ctx->notify_ack(o, notify_id, handle, bl);
+}
+
+int IoCtx::omap_get_vals(const std::string& oid,
+ const std::string& start_after,
+ uint64_t max_return,
+ std::map<std::string, bufferlist> *out_vals) {
+ TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
+ return ctx->execute_operation(
+ oid, std::bind(&TestIoCtxImpl::omap_get_vals, _1, _2, start_after, "",
+ max_return, out_vals));
+}
+
+int IoCtx::operate(const std::string& oid, ObjectWriteOperation *op) {
+ TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
+ TestObjectOperationImpl *ops = reinterpret_cast<TestObjectOperationImpl*>(op->impl);
+ return ctx->operate(oid, *ops);
+}
+
+int IoCtx::operate(const std::string& oid, ObjectReadOperation *op,
+ bufferlist *pbl) {
+ TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
+ TestObjectOperationImpl *ops = reinterpret_cast<TestObjectOperationImpl*>(op->impl);
+ return ctx->operate_read(oid, *ops, pbl);
+}
+
+int IoCtx::read(const std::string& oid, bufferlist& bl, size_t len,
+ uint64_t off) {
+ TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
+ return ctx->execute_operation(
+ oid, std::bind(&TestIoCtxImpl::read, _1, _2, len, off, &bl,
+ ctx->get_snap_read(), nullptr));
+}
+
+int IoCtx::remove(const std::string& oid) {
+ TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
+ return ctx->execute_operation(
+ oid, std::bind(&TestIoCtxImpl::remove, _1, _2, ctx->get_snap_context()));
+}
+
+int IoCtx::selfmanaged_snap_create(uint64_t *snapid) {
+ TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
+ return ctx->selfmanaged_snap_create(snapid);
+}
+
+void IoCtx::aio_selfmanaged_snap_create(uint64_t *snapid, AioCompletion* c) {
+ TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
+ return ctx->aio_selfmanaged_snap_create(snapid, c->pc);
+}
+
+int IoCtx::selfmanaged_snap_remove(uint64_t snapid) {
+ TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
+ return ctx->selfmanaged_snap_remove(snapid);
+}
+
+void IoCtx::aio_selfmanaged_snap_remove(uint64_t snapid, AioCompletion* c) {
+ TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
+ ctx->aio_selfmanaged_snap_remove(snapid, c->pc);
+}
+
+int IoCtx::selfmanaged_snap_rollback(const std::string& oid,
+ uint64_t snapid) {
+ TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
+ return ctx->selfmanaged_snap_rollback(oid, snapid);
+}
+
+int IoCtx::selfmanaged_snap_set_write_ctx(snap_t seq,
+ std::vector<snap_t>& snaps) {
+ TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
+ return ctx->selfmanaged_snap_set_write_ctx(seq, snaps);
+}
+
+void IoCtx::snap_set_read(snap_t seq) {
+ TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
+ ctx->set_snap_read(seq);
+}
+
+int IoCtx::sparse_read(const std::string& oid, std::map<uint64_t,uint64_t>& m,
+ bufferlist& bl, size_t len, uint64_t off) {
+ TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
+ return ctx->execute_operation(
+ oid, std::bind(&TestIoCtxImpl::sparse_read, _1, _2, off, len, &m, &bl,
+ ctx->get_snap_read()));
+}
+
+int IoCtx::stat(const std::string& oid, uint64_t *psize, time_t *pmtime) {
+ TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
+ return ctx->execute_operation(
+ oid, std::bind(&TestIoCtxImpl::stat, _1, _2, psize, pmtime));
+}
+
+int IoCtx::tmap_update(const std::string& oid, bufferlist& cmdbl) {
+ TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
+ return ctx->execute_operation(
+ oid, std::bind(&TestIoCtxImpl::tmap_update, _1, _2, cmdbl));
+}
+
+int IoCtx::trunc(const std::string& oid, uint64_t off) {
+ TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
+ return ctx->execute_operation(
+ oid, std::bind(&TestIoCtxImpl::truncate, _1, _2, off,
+ ctx->get_snap_context()));
+}
+
+int IoCtx::unwatch2(uint64_t handle) {
+ TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
+ return ctx->unwatch(handle);
+}
+
+int IoCtx::unwatch(const std::string& o, uint64_t handle) {
+ TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
+ return ctx->unwatch(handle);
+}
+
+int IoCtx::watch(const std::string& o, uint64_t ver, uint64_t *handle,
+ librados::WatchCtx *wctx) {
+ TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
+ return ctx->watch(o, handle, wctx, NULL);
+}
+
+int IoCtx::watch2(const std::string& o, uint64_t *handle,
+ librados::WatchCtx2 *wctx) {
+ TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
+ return ctx->watch(o, handle, NULL, wctx);
+}
+
+int IoCtx::write(const std::string& oid, bufferlist& bl, size_t len,
+ uint64_t off) {
+ TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
+ return ctx->execute_operation(
+ oid, std::bind(&TestIoCtxImpl::write, _1, _2, bl, len, off,
+ ctx->get_snap_context()));
+}
+
+int IoCtx::write_full(const std::string& oid, bufferlist& bl) {
+ TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
+ return ctx->execute_operation(
+ oid, std::bind(&TestIoCtxImpl::write_full, _1, _2, bl,
+ ctx->get_snap_context()));
+}
+
+int IoCtx::writesame(const std::string& oid, bufferlist& bl, size_t len,
+ uint64_t off) {
+ TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
+ return ctx->execute_operation(
+ oid, std::bind(&TestIoCtxImpl::writesame, _1, _2, bl, len, off,
+ ctx->get_snap_context()));
+}
+
+int IoCtx::cmpext(const std::string& oid, uint64_t off, bufferlist& cmp_bl) {
+ TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
+ return ctx->execute_operation(
+ oid, std::bind(&TestIoCtxImpl::cmpext, _1, _2, off, cmp_bl,
+ ctx->get_snap_read()));
+}
+
+int IoCtx::application_enable(const std::string& app_name, bool force) {
+ return 0;
+}
+
+int IoCtx::application_enable_async(const std::string& app_name,
+ bool force, PoolAsyncCompletion *c) {
+ return -EOPNOTSUPP;
+}
+
+int IoCtx::application_list(std::set<std::string> *app_names) {
+ return -EOPNOTSUPP;
+}
+
+int IoCtx::application_metadata_get(const std::string& app_name,
+ const std::string &key,
+ std::string *value) {
+ return -EOPNOTSUPP;
+}
+
+int IoCtx::application_metadata_set(const std::string& app_name,
+ const std::string &key,
+ const std::string& value) {
+ return -EOPNOTSUPP;
+}
+
+int IoCtx::application_metadata_remove(const std::string& app_name,
+ const std::string &key) {
+ return -EOPNOTSUPP;
+}
+
+int IoCtx::application_metadata_list(const std::string& app_name,
+ std::map<std::string, std::string> *values) {
+ return -EOPNOTSUPP;
+}
+
+void IoCtx::set_namespace(const std::string& nspace) {
+ TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
+ ctx->set_namespace(nspace);
+}
+
+std::string IoCtx::get_namespace() const {
+ TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
+ return ctx->get_namespace();
+}
+
+void IoCtx::set_pool_full_try() {
+}
+
+bool IoCtx::get_pool_full_try() {
+ return false;
+}
+
+static int save_operation_result(int result, int *pval) {
+ if (pval != NULL) {
+ *pval = result;
+ }
+ return result;
+}
+
+ObjectOperation::ObjectOperation() {
+ TestObjectOperationImpl *o = new TestObjectOperationImpl();
+ o->get();
+ impl = reinterpret_cast<ObjectOperationImpl*>(o);
+}
+
+ObjectOperation::~ObjectOperation() {
+ TestObjectOperationImpl *o = reinterpret_cast<TestObjectOperationImpl*>(impl);
+ if (o) {
+ o->put();
+ o = NULL;
+ }
+}
+
+void ObjectOperation::assert_exists() {
+ TestObjectOperationImpl *o = reinterpret_cast<TestObjectOperationImpl*>(impl);
+ o->ops.push_back(std::bind(&TestIoCtxImpl::assert_exists, _1, _2, _4));
+}
+
+void ObjectOperation::assert_version(uint64_t ver) {
+ TestObjectOperationImpl *o = reinterpret_cast<TestObjectOperationImpl*>(impl);
+ o->ops.push_back(std::bind(&TestIoCtxImpl::assert_version, _1, _2, ver));
+}
+
+void ObjectOperation::exec(const char *cls, const char *method,
+ bufferlist& inbl) {
+ TestObjectOperationImpl *o = reinterpret_cast<TestObjectOperationImpl*>(impl);
+ o->ops.push_back(std::bind(&TestIoCtxImpl::exec, _1, _2,
+ librados_test_stub::get_class_handler(), cls,
+ method, inbl, _3, _4, _5));
+}
+
+void ObjectOperation::set_op_flags2(int flags) {
+}
+
+size_t ObjectOperation::size() {
+ TestObjectOperationImpl *o = reinterpret_cast<TestObjectOperationImpl*>(impl);
+ return o->ops.size();
+}
+
+void ObjectOperation::cmpext(uint64_t off, const bufferlist& cmp_bl,
+ int *prval) {
+ TestObjectOperationImpl *o = reinterpret_cast<TestObjectOperationImpl*>(impl);
+ ObjectOperationTestImpl op = std::bind(&TestIoCtxImpl::cmpext, _1, _2, off,
+ cmp_bl, _4);
+ if (prval != NULL) {
+ op = std::bind(save_operation_result,
+ std::bind(op, _1, _2, _3, _4, _5, _6), prval);
+ }
+ o->ops.push_back(op);
+}
+
+void ObjectReadOperation::list_snaps(snap_set_t *out_snaps, int *prval) {
+ TestObjectOperationImpl *o = reinterpret_cast<TestObjectOperationImpl*>(impl);
+
+ ObjectOperationTestImpl op = std::bind(&TestIoCtxImpl::list_snaps, _1, _2,
+ out_snaps);
+ if (prval != NULL) {
+ op = std::bind(save_operation_result,
+ std::bind(op, _1, _2, _3, _4, _5, _6), prval);
+ }
+ o->ops.push_back(op);
+}
+
+void ObjectReadOperation::list_watchers(std::list<obj_watch_t> *out_watchers,
+ int *prval) {
+ TestObjectOperationImpl *o = reinterpret_cast<TestObjectOperationImpl*>(impl);
+
+ ObjectOperationTestImpl op = std::bind(&TestIoCtxImpl::list_watchers, _1,
+ _2, out_watchers);
+ if (prval != NULL) {
+ op = std::bind(save_operation_result,
+ std::bind(op, _1, _2, _3, _4, _5, _6), prval);
+ }
+ o->ops.push_back(op);
+}
+
+void ObjectReadOperation::read(size_t off, uint64_t len, bufferlist *pbl,
+ int *prval) {
+ TestObjectOperationImpl *o = reinterpret_cast<TestObjectOperationImpl*>(impl);
+
+ ObjectOperationTestImpl op;
+ if (pbl != NULL) {
+ op = std::bind(&TestIoCtxImpl::read, _1, _2, len, off, pbl, _4, nullptr);
+ } else {
+ op = std::bind(&TestIoCtxImpl::read, _1, _2, len, off, _3, _4, nullptr);
+ }
+
+ if (prval != NULL) {
+ op = std::bind(save_operation_result,
+ std::bind(op, _1, _2, _3, _4, _5, _6), prval);
+ }
+ o->ops.push_back(op);
+}
+
+void ObjectReadOperation::sparse_read(uint64_t off, uint64_t len,
+ std::map<uint64_t,uint64_t> *m,
+ bufferlist *pbl, int *prval,
+ uint64_t truncate_size,
+ uint32_t truncate_seq) {
+ TestObjectOperationImpl *o = reinterpret_cast<TestObjectOperationImpl*>(impl);
+
+ ObjectOperationTestImpl op;
+ if (pbl != NULL) {
+ op = std::bind(&TestIoCtxImpl::sparse_read, _1, _2, off, len, m, pbl, _4);
+ } else {
+ op = std::bind(&TestIoCtxImpl::sparse_read, _1, _2, off, len, m, _3, _4);
+ }
+
+ if (prval != NULL) {
+ op = std::bind(save_operation_result,
+ std::bind(op, _1, _2, _3, _4, _5, _6), prval);
+ }
+ o->ops.push_back(op);
+}
+
+void ObjectReadOperation::stat(uint64_t *psize, time_t *pmtime, int *prval) {
+ TestObjectOperationImpl *o = reinterpret_cast<TestObjectOperationImpl*>(impl);
+
+ ObjectOperationTestImpl op = std::bind(&TestIoCtxImpl::stat, _1, _2,
+ psize, pmtime);
+
+ if (prval != NULL) {
+ op = std::bind(save_operation_result,
+ std::bind(op, _1, _2, _3, _4, _5, _6), prval);
+ }
+ o->ops.push_back(op);
+}
+
+void ObjectWriteOperation::append(const bufferlist &bl) {
+ TestObjectOperationImpl *o = reinterpret_cast<TestObjectOperationImpl*>(impl);
+ o->ops.push_back(std::bind(&TestIoCtxImpl::append, _1, _2, bl, _5));
+}
+
+void ObjectWriteOperation::create(bool exclusive) {
+ TestObjectOperationImpl *o = reinterpret_cast<TestObjectOperationImpl*>(impl);
+ o->ops.push_back(std::bind(&TestIoCtxImpl::create, _1, _2, exclusive, _5));
+}
+
+void ObjectWriteOperation::omap_set(const std::map<std::string, bufferlist> &map) {
+ TestObjectOperationImpl *o = reinterpret_cast<TestObjectOperationImpl*>(impl);
+ o->ops.push_back(std::bind(&TestIoCtxImpl::omap_set, _1, _2, boost::ref(map)));
+}
+
+void ObjectWriteOperation::remove() {
+ TestObjectOperationImpl *o = reinterpret_cast<TestObjectOperationImpl*>(impl);
+ o->ops.push_back(std::bind(&TestIoCtxImpl::remove, _1, _2, _5));
+}
+
+void ObjectWriteOperation::selfmanaged_snap_rollback(uint64_t snapid) {
+ TestObjectOperationImpl *o = reinterpret_cast<TestObjectOperationImpl*>(impl);
+ o->ops.push_back(std::bind(&TestIoCtxImpl::selfmanaged_snap_rollback,
+ _1, _2, snapid));
+}
+
+void ObjectWriteOperation::set_alloc_hint(uint64_t expected_object_size,
+ uint64_t expected_write_size) {
+ TestObjectOperationImpl *o = reinterpret_cast<TestObjectOperationImpl*>(impl);
+ o->ops.push_back(std::bind(&TestIoCtxImpl::set_alloc_hint, _1, _2,
+ expected_object_size, expected_write_size, 0,
+ _5));
+}
+
+void ObjectWriteOperation::set_alloc_hint2(uint64_t expected_object_size,
+ uint64_t expected_write_size,
+ uint32_t flags) {
+ TestObjectOperationImpl *o = reinterpret_cast<TestObjectOperationImpl*>(impl);
+ o->ops.push_back(std::bind(&TestIoCtxImpl::set_alloc_hint, _1, _2,
+ expected_object_size, expected_write_size, flags,
+ _5));
+}
+
+void ObjectWriteOperation::tmap_update(const bufferlist& cmdbl) {
+ TestObjectOperationImpl *o = reinterpret_cast<TestObjectOperationImpl*>(impl);
+ o->ops.push_back(std::bind(&TestIoCtxImpl::tmap_update, _1, _2,
+ cmdbl));
+}
+
+void ObjectWriteOperation::truncate(uint64_t off) {
+ TestObjectOperationImpl *o = reinterpret_cast<TestObjectOperationImpl*>(impl);
+ o->ops.push_back(std::bind(&TestIoCtxImpl::truncate, _1, _2, off, _5));
+}
+
+void ObjectWriteOperation::write(uint64_t off, const bufferlist& bl) {
+ TestObjectOperationImpl *o = reinterpret_cast<TestObjectOperationImpl*>(impl);
+ o->ops.push_back(std::bind(&TestIoCtxImpl::write, _1, _2, bl, bl.length(),
+ off, _5));
+}
+
+void ObjectWriteOperation::write_full(const bufferlist& bl) {
+ TestObjectOperationImpl *o = reinterpret_cast<TestObjectOperationImpl*>(impl);
+ o->ops.push_back(std::bind(&TestIoCtxImpl::write_full, _1, _2, bl, _5));
+}
+
+void ObjectWriteOperation::writesame(uint64_t off, uint64_t len,
+ const bufferlist& bl) {
+ TestObjectOperationImpl *o = reinterpret_cast<TestObjectOperationImpl*>(impl);
+ o->ops.push_back(std::bind(&TestIoCtxImpl::writesame, _1, _2, bl, len,
+ off, _5));
+}
+
+void ObjectWriteOperation::zero(uint64_t off, uint64_t len) {
+ TestObjectOperationImpl *o = reinterpret_cast<TestObjectOperationImpl*>(impl);
+ o->ops.push_back(std::bind(&TestIoCtxImpl::zero, _1, _2, off, len, _5));
+}
+
+Rados::Rados() : client(NULL) {
+}
+
+Rados::Rados(IoCtx& ioctx) {
+ TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(ioctx.io_ctx_impl);
+ TestRadosClient *impl = ctx->get_rados_client();
+ impl->get();
+
+ client = reinterpret_cast<RadosClient*>(impl);
+ ceph_assert(client != NULL);
+}
+
+Rados::~Rados() {
+ shutdown();
+}
+
+void Rados::from_rados_t(rados_t p, Rados &rados) {
+ if (rados.client != nullptr) {
+ reinterpret_cast<TestRadosClient*>(rados.client)->put();
+ rados.client = nullptr;
+ }
+
+ auto impl = reinterpret_cast<TestRadosClient*>(p);
+ if (impl) {
+ impl->get();
+ rados.client = reinterpret_cast<RadosClient*>(impl);
+ }
+}
+
+AioCompletion *Rados::aio_create_completion(void *cb_arg,
+ callback_t cb_complete) {
+ AioCompletionImpl *c;
+ int r = rados_aio_create_completion2(cb_arg, cb_complete,
+ reinterpret_cast<void**>(&c));
+ ceph_assert(r == 0);
+ return new AioCompletion(c);
+}
+
+int Rados::aio_watch_flush(AioCompletion* c) {
+ TestRadosClient *impl = reinterpret_cast<TestRadosClient*>(client);
+ return impl->aio_watch_flush(c->pc);
+}
+
+int Rados::blocklist_add(const std::string& client_address,
+ uint32_t expire_seconds) {
+ TestRadosClient *impl = reinterpret_cast<TestRadosClient*>(client);
+ return impl->blocklist_add(client_address, expire_seconds);
+}
+
+config_t Rados::cct() {
+ TestRadosClient *impl = reinterpret_cast<TestRadosClient*>(client);
+ return reinterpret_cast<config_t>(impl->cct());
+}
+
+int Rados::cluster_fsid(std::string* fsid) {
+ *fsid = "00000000-1111-2222-3333-444444444444";
+ return 0;
+}
+
+int Rados::conf_set(const char *option, const char *value) {
+ return rados_conf_set(reinterpret_cast<rados_t>(client), option, value);
+}
+
+int Rados::conf_get(const char *option, std::string &val) {
+ TestRadosClient *impl = reinterpret_cast<TestRadosClient*>(client);
+ CephContext *cct = impl->cct();
+
+ char *str = NULL;
+ int ret = cct->_conf.get_val(option, &str, -1);
+ if (ret != 0) {
+ free(str);
+ return ret;
+ }
+
+ val = str;
+ free(str);
+ return 0;
+}
+
+int Rados::conf_parse_env(const char *env) const {
+ return rados_conf_parse_env(reinterpret_cast<rados_t>(client), env);
+}
+
+int Rados::conf_read_file(const char * const path) const {
+ return rados_conf_read_file(reinterpret_cast<rados_t>(client), path);
+}
+
+int Rados::connect() {
+ return rados_connect(reinterpret_cast<rados_t>(client));
+}
+
+uint64_t Rados::get_instance_id() {
+ TestRadosClient *impl = reinterpret_cast<TestRadosClient*>(client);
+ return impl->get_instance_id();
+}
+
+int Rados::get_min_compatible_osd(int8_t* require_osd_release) {
+ TestRadosClient *impl = reinterpret_cast<TestRadosClient*>(client);
+ return impl->get_min_compatible_osd(require_osd_release);
+}
+
+int Rados::get_min_compatible_client(int8_t* min_compat_client,
+ int8_t* require_min_compat_client) {
+ TestRadosClient *impl = reinterpret_cast<TestRadosClient*>(client);
+ return impl->get_min_compatible_client(min_compat_client,
+ require_min_compat_client);
+}
+
+int Rados::init(const char * const id) {
+ return rados_create(reinterpret_cast<rados_t *>(&client), id);
+}
+
+int Rados::init_with_context(config_t cct_) {
+ return rados_create_with_context(reinterpret_cast<rados_t *>(&client), cct_);
+}
+
+int Rados::ioctx_create(const char *name, IoCtx &io) {
+ rados_ioctx_t p;
+ int ret = rados_ioctx_create(reinterpret_cast<rados_t>(client), name, &p);
+ if (ret) {
+ return ret;
+ }
+
+ io.close();
+ io.io_ctx_impl = reinterpret_cast<IoCtxImpl*>(p);
+ return 0;
+}
+
+int Rados::ioctx_create2(int64_t pool_id, IoCtx &io)
+{
+ rados_ioctx_t p;
+ int ret = rados_ioctx_create2(reinterpret_cast<rados_t>(client), pool_id, &p);
+ if (ret) {
+ return ret;
+ }
+
+ io.close();
+ io.io_ctx_impl = reinterpret_cast<IoCtxImpl*>(p);
+ return 0;
+}
+
+int Rados::mon_command(std::string cmd, const bufferlist& inbl,
+ bufferlist *outbl, std::string *outs) {
+ TestRadosClient *impl = reinterpret_cast<TestRadosClient*>(client);
+
+ std::vector<std::string> cmds;
+ cmds.push_back(cmd);
+ return impl->mon_command(cmds, inbl, outbl, outs);
+}
+
+int Rados::service_daemon_register(const std::string& service,
+ const std::string& name,
+ const std::map<std::string,std::string>& metadata) {
+ TestRadosClient *impl = reinterpret_cast<TestRadosClient*>(client);
+ return impl->service_daemon_register(service, name, metadata);
+}
+
+int Rados::service_daemon_update_status(std::map<std::string,std::string>&& status) {
+ TestRadosClient *impl = reinterpret_cast<TestRadosClient*>(client);
+ return impl->service_daemon_update_status(std::move(status));
+}
+
+int Rados::pool_create(const char *name) {
+ TestRadosClient *impl = reinterpret_cast<TestRadosClient*>(client);
+ return impl->pool_create(name);
+}
+
+int Rados::pool_delete(const char *name) {
+ TestRadosClient *impl = reinterpret_cast<TestRadosClient*>(client);
+ return impl->pool_delete(name);
+}
+
+int Rados::pool_get_base_tier(int64_t pool, int64_t* base_tier) {
+ TestRadosClient *impl = reinterpret_cast<TestRadosClient*>(client);
+ return impl->pool_get_base_tier(pool, base_tier);
+}
+
+int Rados::pool_list(std::list<std::string>& v) {
+ TestRadosClient *impl = reinterpret_cast<TestRadosClient*>(client);
+ std::list<std::pair<int64_t, std::string> > pools;
+ int r = impl->pool_list(pools);
+ if (r < 0) {
+ return r;
+ }
+
+ v.clear();
+ for (std::list<std::pair<int64_t, std::string> >::iterator it = pools.begin();
+ it != pools.end(); ++it) {
+ v.push_back(it->second);
+ }
+ return 0;
+}
+
+int Rados::pool_list2(std::list<std::pair<int64_t, std::string> >& v)
+{
+ TestRadosClient *impl = reinterpret_cast<TestRadosClient*>(client);
+ return impl->pool_list(v);
+}
+
+int64_t Rados::pool_lookup(const char *name) {
+ TestRadosClient *impl = reinterpret_cast<TestRadosClient*>(client);
+ return impl->pool_lookup(name);
+}
+
+int Rados::pool_reverse_lookup(int64_t id, std::string *name) {
+ TestRadosClient *impl = reinterpret_cast<TestRadosClient*>(client);
+ return impl->pool_reverse_lookup(id, name);
+}
+
+void Rados::shutdown() {
+ if (client == NULL) {
+ return;
+ }
+ TestRadosClient *impl = reinterpret_cast<TestRadosClient*>(client);
+ impl->put();
+ client = NULL;
+}
+
+void Rados::test_blocklist_self(bool set) {
+}
+
+int Rados::wait_for_latest_osdmap() {
+ TestRadosClient *impl = reinterpret_cast<TestRadosClient*>(client);
+ return impl->wait_for_latest_osdmap();
+}
+
+int Rados::watch_flush() {
+ TestRadosClient *impl = reinterpret_cast<TestRadosClient*>(client);
+ return impl->watch_flush();
+}
+
+WatchCtx::~WatchCtx() {
+}
+
+WatchCtx2::~WatchCtx2() {
+}
+
+} // namespace librados
+
+int cls_cxx_create(cls_method_context_t hctx, bool exclusive) {
+ librados::TestClassHandler::MethodContext *ctx =
+ reinterpret_cast<librados::TestClassHandler::MethodContext*>(hctx);
+ return ctx->io_ctx_impl->create(ctx->oid, exclusive, ctx->snapc);
+}
+
+int cls_cxx_remove(cls_method_context_t hctx) {
+ librados::TestClassHandler::MethodContext *ctx =
+ reinterpret_cast<librados::TestClassHandler::MethodContext*>(hctx);
+ return ctx->io_ctx_impl->remove(ctx->oid, ctx->io_ctx_impl->get_snap_context());
+}
+
+int cls_get_request_origin(cls_method_context_t hctx, entity_inst_t *origin) {
+ librados::TestClassHandler::MethodContext *ctx =
+ reinterpret_cast<librados::TestClassHandler::MethodContext*>(hctx);
+
+ librados::TestRadosClient *rados_client =
+ ctx->io_ctx_impl->get_rados_client();
+
+ struct sockaddr_in sin;
+ memset(&sin, 0, sizeof(sin));
+ sin.sin_family = AF_INET;
+ sin.sin_port = 0;
+ inet_pton(AF_INET, "127.0.0.1", &sin.sin_addr);
+
+ entity_addr_t entity_addr(entity_addr_t::TYPE_DEFAULT,
+ rados_client->get_nonce());
+ entity_addr.in4_addr() = sin;
+
+ *origin = entity_inst_t(
+ entity_name_t::CLIENT(rados_client->get_instance_id()),
+ entity_addr);
+ return 0;
+}
+
+int cls_cxx_getxattr(cls_method_context_t hctx, const char *name,
+ bufferlist *outbl) {
+ std::map<string, bufferlist> attrs;
+ int r = cls_cxx_getxattrs(hctx, &attrs);
+ if (r < 0) {
+ return r;
+ }
+
+ std::map<string, bufferlist>::iterator it = attrs.find(name);
+ if (it == attrs.end()) {
+ return -ENODATA;
+ }
+ *outbl = it->second;
+ return 0;
+}
+
+int cls_cxx_getxattrs(cls_method_context_t hctx, std::map<string, bufferlist> *attrset) {
+ librados::TestClassHandler::MethodContext *ctx =
+ reinterpret_cast<librados::TestClassHandler::MethodContext*>(hctx);
+ return ctx->io_ctx_impl->xattr_get(ctx->oid, attrset);
+}
+
+int cls_cxx_map_get_keys(cls_method_context_t hctx, const string &start_obj,
+ uint64_t max_to_get, std::set<string> *keys, bool *more) {
+ librados::TestClassHandler::MethodContext *ctx =
+ reinterpret_cast<librados::TestClassHandler::MethodContext*>(hctx);
+
+ keys->clear();
+ std::map<string, bufferlist> vals;
+ int r = ctx->io_ctx_impl->omap_get_vals2(ctx->oid, start_obj, "", max_to_get,
+ &vals, more);
+ if (r < 0) {
+ return r;
+ }
+
+ for (std::map<string, bufferlist>::iterator it = vals.begin();
+ it != vals.end(); ++it) {
+ keys->insert(it->first);
+ }
+ return keys->size();
+}
+
+int cls_cxx_map_get_val(cls_method_context_t hctx, const string &key,
+ bufferlist *outbl) {
+ librados::TestClassHandler::MethodContext *ctx =
+ reinterpret_cast<librados::TestClassHandler::MethodContext*>(hctx);
+
+ std::map<string, bufferlist> vals;
+ int r = ctx->io_ctx_impl->omap_get_vals(ctx->oid, "", key, 1024, &vals);
+ if (r < 0) {
+ return r;
+ }
+
+ std::map<string, bufferlist>::iterator it = vals.find(key);
+ if (it == vals.end()) {
+ return -ENOENT;
+ }
+
+ *outbl = it->second;
+ return 0;
+}
+
+int cls_cxx_map_get_vals(cls_method_context_t hctx, const string &start_obj,
+ const string &filter_prefix, uint64_t max_to_get,
+ std::map<string, bufferlist> *vals, bool *more) {
+ librados::TestClassHandler::MethodContext *ctx =
+ reinterpret_cast<librados::TestClassHandler::MethodContext*>(hctx);
+ int r = ctx->io_ctx_impl->omap_get_vals2(ctx->oid, start_obj, filter_prefix,
+ max_to_get, vals, more);
+ if (r < 0) {
+ return r;
+ }
+ return vals->size();
+}
+
+int cls_cxx_map_remove_key(cls_method_context_t hctx, const string &key) {
+ std::set<std::string> keys;
+ keys.insert(key);
+
+ librados::TestClassHandler::MethodContext *ctx =
+ reinterpret_cast<librados::TestClassHandler::MethodContext*>(hctx);
+ return ctx->io_ctx_impl->omap_rm_keys(ctx->oid, keys);
+}
+
+int cls_cxx_map_set_val(cls_method_context_t hctx, const string &key,
+ bufferlist *inbl) {
+ std::map<std::string, bufferlist> m;
+ m[key] = *inbl;
+ return cls_cxx_map_set_vals(hctx, &m);
+}
+
+int cls_cxx_map_set_vals(cls_method_context_t hctx,
+ const std::map<string, bufferlist> *map) {
+ librados::TestClassHandler::MethodContext *ctx =
+ reinterpret_cast<librados::TestClassHandler::MethodContext*>(hctx);
+ return ctx->io_ctx_impl->omap_set(ctx->oid, *map);
+}
+
+int cls_cxx_read(cls_method_context_t hctx, int ofs, int len,
+ bufferlist *outbl) {
+ return cls_cxx_read2(hctx, ofs, len, outbl, 0);
+}
+
+int cls_cxx_read2(cls_method_context_t hctx, int ofs, int len,
+ bufferlist *outbl, uint32_t op_flags) {
+ librados::TestClassHandler::MethodContext *ctx =
+ reinterpret_cast<librados::TestClassHandler::MethodContext*>(hctx);
+ return ctx->io_ctx_impl->read(
+ ctx->oid, len, ofs, outbl, ctx->snap_id, nullptr);
+}
+
+int cls_cxx_setxattr(cls_method_context_t hctx, const char *name,
+ bufferlist *inbl) {
+ librados::TestClassHandler::MethodContext *ctx =
+ reinterpret_cast<librados::TestClassHandler::MethodContext*>(hctx);
+ return ctx->io_ctx_impl->xattr_set(ctx->oid, name, *inbl);
+}
+
+int cls_cxx_stat(cls_method_context_t hctx, uint64_t *size, time_t *mtime) {
+ librados::TestClassHandler::MethodContext *ctx =
+ reinterpret_cast<librados::TestClassHandler::MethodContext*>(hctx);
+ return ctx->io_ctx_impl->stat(ctx->oid, size, mtime);
+}
+
+int cls_cxx_write(cls_method_context_t hctx, int ofs, int len,
+ bufferlist *inbl) {
+ return cls_cxx_write2(hctx, ofs, len, inbl, 0);
+}
+
+int cls_cxx_write2(cls_method_context_t hctx, int ofs, int len,
+ bufferlist *inbl, uint32_t op_flags) {
+ librados::TestClassHandler::MethodContext *ctx =
+ reinterpret_cast<librados::TestClassHandler::MethodContext*>(hctx);
+ return ctx->io_ctx_impl->write(ctx->oid, *inbl, len, ofs, ctx->snapc);
+}
+
+int cls_cxx_write_full(cls_method_context_t hctx, bufferlist *inbl) {
+ librados::TestClassHandler::MethodContext *ctx =
+ reinterpret_cast<librados::TestClassHandler::MethodContext*>(hctx);
+ return ctx->io_ctx_impl->write_full(ctx->oid, *inbl, ctx->snapc);
+}
+
+int cls_cxx_replace(cls_method_context_t hctx, int ofs, int len,
+ bufferlist *inbl) {
+ librados::TestClassHandler::MethodContext *ctx =
+ reinterpret_cast<librados::TestClassHandler::MethodContext*>(hctx);
+ int r = ctx->io_ctx_impl->truncate(ctx->oid, 0, ctx->snapc);
+ if (r < 0) {
+ return r;
+ }
+ return ctx->io_ctx_impl->write(ctx->oid, *inbl, len, ofs, ctx->snapc);
+}
+
+int cls_cxx_truncate(cls_method_context_t hctx, int ofs) {
+ librados::TestClassHandler::MethodContext *ctx =
+ reinterpret_cast<librados::TestClassHandler::MethodContext*>(hctx);
+ return ctx->io_ctx_impl->truncate(ctx->oid, ofs, ctx->snapc);
+}
+
+int cls_cxx_write_zero(cls_method_context_t hctx, int ofs, int len) {
+ librados::TestClassHandler::MethodContext *ctx =
+ reinterpret_cast<librados::TestClassHandler::MethodContext*>(hctx);
+ return ctx->io_ctx_impl->zero(ctx->oid, len, ofs, ctx->snapc);
+}
+
+int cls_cxx_list_watchers(cls_method_context_t hctx,
+ obj_list_watch_response_t *watchers) {
+ librados::TestClassHandler::MethodContext *ctx =
+ reinterpret_cast<librados::TestClassHandler::MethodContext*>(hctx);
+
+ std::list<obj_watch_t> obj_watchers;
+ int r = ctx->io_ctx_impl->list_watchers(ctx->oid, &obj_watchers);
+ if (r < 0) {
+ return r;
+ }
+
+ for (auto &w : obj_watchers) {
+ watch_item_t watcher;
+ watcher.name = entity_name_t::CLIENT(w.watcher_id);
+ watcher.cookie = w.cookie;
+ watcher.timeout_seconds = w.timeout_seconds;
+ watcher.addr.parse(w.addr);
+ watchers->entries.push_back(watcher);
+ }
+
+ return 0;
+}
+
+uint64_t cls_get_features(cls_method_context_t hctx) {
+ return CEPH_FEATURES_SUPPORTED_DEFAULT;
+}
+
+uint64_t cls_get_client_features(cls_method_context_t hctx) {
+ return CEPH_FEATURES_SUPPORTED_DEFAULT;
+}
+
+int cls_get_snapset_seq(cls_method_context_t hctx, uint64_t *snap_seq) {
+ librados::TestClassHandler::MethodContext *ctx =
+ reinterpret_cast<librados::TestClassHandler::MethodContext*>(hctx);
+ librados::snap_set_t snapset;
+ int r = ctx->io_ctx_impl->list_snaps(ctx->oid, &snapset);
+ if (r < 0) {
+ return r;
+ }
+
+ *snap_seq = snapset.seq;
+ return 0;
+}
+
+int cls_log(int level, const char *format, ...) {
+ int size = 256;
+ va_list ap;
+ while (1) {
+ char buf[size];
+ va_start(ap, format);
+ int n = vsnprintf(buf, size, format, ap);
+ va_end(ap);
+ if ((n > -1 && n < size) || size > 8196) {
+ dout(ceph::dout::need_dynamic(level)) << buf << dendl;
+ return n;
+ }
+ size *= 2;
+ }
+ return 0;
+}
+
+int cls_register(const char *name, cls_handle_t *handle) {
+ librados::TestClassHandler *cls = librados_test_stub::get_class_handler();
+ return cls->create(name, handle);
+}
+
+int cls_register_cxx_method(cls_handle_t hclass, const char *method,
+ int flags,
+ cls_method_cxx_call_t class_call,
+ cls_method_handle_t *handle) {
+ librados::TestClassHandler *cls = librados_test_stub::get_class_handler();
+ return cls->create_method(hclass, method, class_call, handle);
+}
+
+int cls_register_cxx_filter(cls_handle_t hclass,
+ const std::string &filter_name,
+ cls_cxx_filter_factory_t fn,
+ cls_filter_handle_t *)
+{
+ librados::TestClassHandler *cls = librados_test_stub::get_class_handler();
+ return cls->create_filter(hclass, filter_name, fn);
+}
+
+ceph_release_t cls_get_required_osd_release(cls_handle_t hclass) {
+ return ceph_release_t::nautilus;
+}
+
+ceph_release_t cls_get_min_compatible_client(cls_handle_t hclass) {
+ return ceph_release_t::nautilus;
+}
+
+// stubs to silence TestClassHandler::open_class()
+PGLSFilter::~PGLSFilter()
+{}
+
+int cls_gen_rand_base64(char *, int) {
+ return -ENOTSUP;
+}
+
+int cls_cxx_chunk_write_and_set(cls_method_handle_t, int,
+ int, bufferlist *,
+ uint32_t, bufferlist *, int) {
+ return -ENOTSUP;
+}
+
+int cls_cxx_map_read_header(cls_method_handle_t, bufferlist *) {
+ return -ENOTSUP;
+}
+
+uint64_t cls_get_osd_min_alloc_size(cls_method_context_t hctx) {
+ return 0;
+}
+
+uint64_t cls_get_pool_stripe_width(cls_method_context_t hctx) {
+ return 0;
+}
diff --git a/src/test/librados_test_stub/LibradosTestStub.h b/src/test/librados_test_stub/LibradosTestStub.h
new file mode 100644
index 000000000..5d335f488
--- /dev/null
+++ b/src/test/librados_test_stub/LibradosTestStub.h
@@ -0,0 +1,42 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#ifndef LIBRADOS_TEST_STUB_H
+#define LIBRADOS_TEST_STUB_H
+
+#include "include/rados/librados_fwd.hpp"
+#include <boost/shared_ptr.hpp>
+
+namespace neorados {
+struct IOContext;
+struct RADOS;
+} // namespace neorados
+
+namespace librados {
+
+class MockTestMemIoCtxImpl;
+class MockTestMemRadosClient;
+class TestCluster;
+class TestClassHandler;
+
+MockTestMemIoCtxImpl &get_mock_io_ctx(IoCtx &ioctx);
+MockTestMemIoCtxImpl &get_mock_io_ctx(neorados::RADOS& rados,
+ neorados::IOContext& io_context);
+
+MockTestMemRadosClient &get_mock_rados_client(neorados::RADOS& rados);
+
+} // namespace librados
+
+namespace librados_test_stub {
+
+typedef boost::shared_ptr<librados::TestCluster> TestClusterRef;
+
+void set_cluster(TestClusterRef cluster);
+TestClusterRef get_cluster();
+
+librados::TestClassHandler* get_class_handler();
+
+} // namespace librados_test_stub
+
+
+#endif // LIBRADOS_TEST_STUB_H
diff --git a/src/test/librados_test_stub/MockTestMemCluster.h b/src/test/librados_test_stub/MockTestMemCluster.h
new file mode 100644
index 000000000..4a6da0cc0
--- /dev/null
+++ b/src/test/librados_test_stub/MockTestMemCluster.h
@@ -0,0 +1,36 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#ifndef LIBRADOS_MOCK_TEST_MEM_CLUSTER_H
+#define LIBRADOS_MOCK_TEST_MEM_CLUSTER_H
+
+#include "include/common_fwd.h"
+#include "test/librados_test_stub/TestMemCluster.h"
+#include "test/librados_test_stub/MockTestMemRadosClient.h"
+#include "gmock/gmock.h"
+
+
+namespace librados {
+
+class TestRadosClient;
+
+class MockTestMemCluster : public TestMemCluster {
+public:
+ MockTestMemCluster() {
+ default_to_dispatch();
+ }
+
+ MOCK_METHOD1(create_rados_client, TestRadosClient*(CephContext*));
+ MockTestMemRadosClient* do_create_rados_client(CephContext *cct) {
+ return new ::testing::NiceMock<MockTestMemRadosClient>(cct, this);
+ }
+
+ void default_to_dispatch() {
+ using namespace ::testing;
+ ON_CALL(*this, create_rados_client(_)).WillByDefault(Invoke(this, &MockTestMemCluster::do_create_rados_client));
+ }
+};
+
+} // namespace librados
+
+#endif // LIBRADOS_MOCK_TEST_MEM_CLUSTER_H
diff --git a/src/test/librados_test_stub/MockTestMemIoCtxImpl.h b/src/test/librados_test_stub/MockTestMemIoCtxImpl.h
new file mode 100644
index 000000000..aed431c11
--- /dev/null
+++ b/src/test/librados_test_stub/MockTestMemIoCtxImpl.h
@@ -0,0 +1,252 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#ifndef LIBRADOS_TEST_STUB_MOCK_TEST_MEM_IO_CTX_IMPL_H
+#define LIBRADOS_TEST_STUB_MOCK_TEST_MEM_IO_CTX_IMPL_H
+
+#include "test/librados_test_stub/TestMemIoCtxImpl.h"
+#include "test/librados_test_stub/TestMemCluster.h"
+#include "gmock/gmock.h"
+
+namespace librados {
+
+class MockTestMemRadosClient;
+
+class MockTestMemIoCtxImpl : public TestMemIoCtxImpl {
+public:
+ MockTestMemIoCtxImpl(MockTestMemRadosClient *mock_client,
+ TestMemRadosClient *client, int64_t pool_id,
+ const std::string& pool_name,
+ TestMemCluster::Pool *pool)
+ : TestMemIoCtxImpl(client, pool_id, pool_name, pool),
+ m_mock_client(mock_client), m_client(client) {
+ default_to_parent();
+ }
+
+ MockTestMemRadosClient *get_mock_rados_client() {
+ return m_mock_client;
+ }
+
+ MOCK_METHOD0(clone, TestIoCtxImpl*());
+ TestIoCtxImpl *do_clone() {
+ TestIoCtxImpl *io_ctx_impl = new ::testing::NiceMock<MockTestMemIoCtxImpl>(
+ m_mock_client, m_client, get_pool_id(), get_pool_name(), get_pool());
+ io_ctx_impl->set_snap_read(get_snap_read());
+ io_ctx_impl->set_snap_context(get_snap_context());
+ return io_ctx_impl;
+ }
+
+ MOCK_METHOD5(aio_notify, void(const std::string& o, AioCompletionImpl *c,
+ bufferlist& bl, uint64_t timeout_ms,
+ bufferlist *pbl));
+ void do_aio_notify(const std::string& o, AioCompletionImpl *c, bufferlist& bl,
+ uint64_t timeout_ms, bufferlist *pbl) {
+ return TestMemIoCtxImpl::aio_notify(o, c, bl, timeout_ms, pbl);
+ }
+
+ MOCK_METHOD6(aio_operate, int(const std::string&, TestObjectOperationImpl&,
+ AioCompletionImpl*, SnapContext*,
+ const ceph::real_time*, int));
+ int do_aio_operate(const std::string& o, TestObjectOperationImpl& ops,
+ AioCompletionImpl* c, SnapContext* snapc,
+ const ceph::real_time* pmtime, int flags) {
+ return TestMemIoCtxImpl::aio_operate(o, ops, c, snapc, pmtime, flags);
+ }
+
+ MOCK_METHOD4(aio_watch, int(const std::string& o, AioCompletionImpl *c,
+ uint64_t *handle, librados::WatchCtx2 *ctx));
+ int do_aio_watch(const std::string& o, AioCompletionImpl *c,
+ uint64_t *handle, librados::WatchCtx2 *ctx) {
+ return TestMemIoCtxImpl::aio_watch(o, c, handle, ctx);
+ }
+
+ MOCK_METHOD2(aio_unwatch, int(uint64_t handle, AioCompletionImpl *c));
+ int do_aio_unwatch(uint64_t handle, AioCompletionImpl *c) {
+ return TestMemIoCtxImpl::aio_unwatch(handle, c);
+ }
+
+ MOCK_METHOD2(assert_exists, int(const std::string &, uint64_t));
+ int do_assert_exists(const std::string &oid, uint64_t snap_id) {
+ return TestMemIoCtxImpl::assert_exists(oid, snap_id);
+ }
+
+ MOCK_METHOD2(assert_version, int(const std::string &, uint64_t));
+ int do_assert_version(const std::string &oid, uint64_t ver) {
+ return TestMemIoCtxImpl::assert_version(oid, ver);
+ }
+
+ MOCK_METHOD3(create, int(const std::string&, bool, const SnapContext &));
+ int do_create(const std::string& oid, bool exclusive,
+ const SnapContext &snapc) {
+ return TestMemIoCtxImpl::create(oid, exclusive, snapc);
+ }
+
+ MOCK_METHOD4(cmpext, int(const std::string&, uint64_t, bufferlist&,
+ uint64_t snap_id));
+ int do_cmpext(const std::string& oid, uint64_t off, bufferlist& cmp_bl,
+ uint64_t snap_id) {
+ return TestMemIoCtxImpl::cmpext(oid, off, cmp_bl, snap_id);
+ }
+
+ MOCK_METHOD8(exec, int(const std::string& oid,
+ TestClassHandler *handler,
+ const char *cls,
+ const char *method,
+ bufferlist& inbl,
+ bufferlist* outbl,
+ uint64_t snap_id,
+ const SnapContext &snapc));
+ int do_exec(const std::string& oid, TestClassHandler *handler,
+ const char *cls, const char *method, bufferlist& inbl,
+ bufferlist* outbl, uint64_t snap_id, const SnapContext &snapc) {
+ return TestMemIoCtxImpl::exec(oid, handler, cls, method, inbl, outbl,
+ snap_id, snapc);
+ }
+
+ MOCK_CONST_METHOD0(get_instance_id, uint64_t());
+ uint64_t do_get_instance_id() const {
+ return TestMemIoCtxImpl::get_instance_id();
+ }
+
+ MOCK_METHOD2(list_snaps, int(const std::string& o, snap_set_t *out_snaps));
+ int do_list_snaps(const std::string& o, snap_set_t *out_snaps) {
+ return TestMemIoCtxImpl::list_snaps(o, out_snaps);
+ }
+
+ MOCK_METHOD2(list_watchers, int(const std::string& o,
+ std::list<obj_watch_t> *out_watchers));
+ int do_list_watchers(const std::string& o,
+ std::list<obj_watch_t> *out_watchers) {
+ return TestMemIoCtxImpl::list_watchers(o, out_watchers);
+ }
+
+ MOCK_METHOD4(notify, int(const std::string& o, bufferlist& bl,
+ uint64_t timeout_ms, bufferlist *pbl));
+ int do_notify(const std::string& o, bufferlist& bl,
+ uint64_t timeout_ms, bufferlist *pbl) {
+ return TestMemIoCtxImpl::notify(o, bl, timeout_ms, pbl);
+ }
+
+ MOCK_METHOD1(set_snap_read, void(snap_t));
+ void do_set_snap_read(snap_t snap_id) {
+ return TestMemIoCtxImpl::set_snap_read(snap_id);
+ }
+ MOCK_METHOD6(sparse_read, int(const std::string& oid,
+ uint64_t off,
+ uint64_t len,
+ std::map<uint64_t, uint64_t> *m,
+ bufferlist *bl, uint64_t));
+ int do_sparse_read(const std::string& oid, uint64_t off, size_t len,
+ std::map<uint64_t, uint64_t> *m, bufferlist *bl,
+ uint64_t snap_id) {
+ return TestMemIoCtxImpl::sparse_read(oid, off, len, m, bl, snap_id);
+ }
+
+ MOCK_METHOD6(read, int(const std::string& oid,
+ size_t len,
+ uint64_t off,
+ bufferlist *bl, uint64_t snap_id, uint64_t* objver));
+ int do_read(const std::string& oid, size_t len, uint64_t off,
+ bufferlist *bl, uint64_t snap_id, uint64_t* objver) {
+ return TestMemIoCtxImpl::read(oid, len, off, bl, snap_id, objver);
+ }
+
+ MOCK_METHOD2(remove, int(const std::string& oid, const SnapContext &snapc));
+ int do_remove(const std::string& oid, const SnapContext &snapc) {
+ return TestMemIoCtxImpl::remove(oid, snapc);
+ }
+
+ MOCK_METHOD1(selfmanaged_snap_create, int(uint64_t *snap_id));
+ int do_selfmanaged_snap_create(uint64_t *snap_id) {
+ return TestMemIoCtxImpl::selfmanaged_snap_create(snap_id);
+ }
+
+ MOCK_METHOD1(selfmanaged_snap_remove, int(uint64_t snap_id));
+ int do_selfmanaged_snap_remove(uint64_t snap_id) {
+ return TestMemIoCtxImpl::selfmanaged_snap_remove(snap_id);
+ }
+
+ MOCK_METHOD2(selfmanaged_snap_rollback, int(const std::string& oid,
+ uint64_t snap_id));
+ int do_selfmanaged_snap_rollback(const std::string& oid, uint64_t snap_id) {
+ return TestMemIoCtxImpl::selfmanaged_snap_rollback(oid, snap_id);
+ }
+
+ MOCK_METHOD3(truncate, int(const std::string& oid,
+ uint64_t size,
+ const SnapContext &snapc));
+ int do_truncate(const std::string& oid, uint64_t size,
+ const SnapContext &snapc) {
+ return TestMemIoCtxImpl::truncate(oid, size, snapc);
+ }
+
+ MOCK_METHOD5(write, int(const std::string& oid, bufferlist& bl, size_t len,
+ uint64_t off, const SnapContext &snapc));
+ int do_write(const std::string& oid, bufferlist& bl, size_t len, uint64_t off,
+ const SnapContext &snapc) {
+ return TestMemIoCtxImpl::write(oid, bl, len, off, snapc);
+ }
+
+ MOCK_METHOD3(write_full, int(const std::string& oid,
+ bufferlist& bl,
+ const SnapContext &snapc));
+ int do_write_full(const std::string& oid, bufferlist& bl,
+ const SnapContext &snapc) {
+ return TestMemIoCtxImpl::write_full(oid, bl, snapc);
+ }
+
+
+ MOCK_METHOD5(writesame, int(const std::string& oid, bufferlist& bl,
+ size_t len, uint64_t off,
+ const SnapContext &snapc));
+ int do_writesame(const std::string& oid, bufferlist& bl, size_t len,
+ uint64_t off, const SnapContext &snapc) {
+ return TestMemIoCtxImpl::writesame(oid, bl, len, off, snapc);
+ }
+
+ MOCK_METHOD4(zero, int(const std::string& oid, uint64_t offset,
+ uint64_t length, const SnapContext &snapc));
+ int do_zero(const std::string& oid, uint64_t offset,
+ uint64_t length, const SnapContext &snapc) {
+ return TestMemIoCtxImpl::zero(oid, offset, length, snapc);
+ }
+
+ void default_to_parent() {
+ using namespace ::testing;
+
+ ON_CALL(*this, clone()).WillByDefault(Invoke(this, &MockTestMemIoCtxImpl::do_clone));
+ ON_CALL(*this, aio_notify(_, _, _, _, _)).WillByDefault(Invoke(this, &MockTestMemIoCtxImpl::do_aio_notify));
+ ON_CALL(*this, aio_operate(_, _, _, _, _, _)).WillByDefault(Invoke(this, &MockTestMemIoCtxImpl::do_aio_operate));
+ ON_CALL(*this, aio_watch(_, _, _, _)).WillByDefault(Invoke(this, &MockTestMemIoCtxImpl::do_aio_watch));
+ ON_CALL(*this, aio_unwatch(_, _)).WillByDefault(Invoke(this, &MockTestMemIoCtxImpl::do_aio_unwatch));
+ ON_CALL(*this, assert_exists(_, _)).WillByDefault(Invoke(this, &MockTestMemIoCtxImpl::do_assert_exists));
+ ON_CALL(*this, assert_version(_, _)).WillByDefault(Invoke(this, &MockTestMemIoCtxImpl::do_assert_version));
+ ON_CALL(*this, create(_, _, _)).WillByDefault(Invoke(this, &MockTestMemIoCtxImpl::do_create));
+ ON_CALL(*this, cmpext(_, _, _, _)).WillByDefault(Invoke(this, &MockTestMemIoCtxImpl::do_cmpext));
+ ON_CALL(*this, exec(_, _, _, _, _, _, _, _)).WillByDefault(Invoke(this, &MockTestMemIoCtxImpl::do_exec));
+ ON_CALL(*this, get_instance_id()).WillByDefault(Invoke(this, &MockTestMemIoCtxImpl::do_get_instance_id));
+ ON_CALL(*this, list_snaps(_, _)).WillByDefault(Invoke(this, &MockTestMemIoCtxImpl::do_list_snaps));
+ ON_CALL(*this, list_watchers(_, _)).WillByDefault(Invoke(this, &MockTestMemIoCtxImpl::do_list_watchers));
+ ON_CALL(*this, notify(_, _, _, _)).WillByDefault(Invoke(this, &MockTestMemIoCtxImpl::do_notify));
+ ON_CALL(*this, read(_, _, _, _, _, _)).WillByDefault(Invoke(this, &MockTestMemIoCtxImpl::do_read));
+ ON_CALL(*this, set_snap_read(_)).WillByDefault(Invoke(this, &MockTestMemIoCtxImpl::do_set_snap_read));
+ ON_CALL(*this, sparse_read(_, _, _, _, _, _)).WillByDefault(Invoke(this, &MockTestMemIoCtxImpl::do_sparse_read));
+ ON_CALL(*this, remove(_, _)).WillByDefault(Invoke(this, &MockTestMemIoCtxImpl::do_remove));
+ ON_CALL(*this, selfmanaged_snap_create(_)).WillByDefault(Invoke(this, &MockTestMemIoCtxImpl::do_selfmanaged_snap_create));
+ ON_CALL(*this, selfmanaged_snap_remove(_)).WillByDefault(Invoke(this, &MockTestMemIoCtxImpl::do_selfmanaged_snap_remove));
+ ON_CALL(*this, selfmanaged_snap_rollback(_, _)).WillByDefault(Invoke(this, &MockTestMemIoCtxImpl::do_selfmanaged_snap_rollback));
+ ON_CALL(*this, truncate(_,_,_)).WillByDefault(Invoke(this, &MockTestMemIoCtxImpl::do_truncate));
+ ON_CALL(*this, write(_, _, _, _, _)).WillByDefault(Invoke(this, &MockTestMemIoCtxImpl::do_write));
+ ON_CALL(*this, write_full(_, _, _)).WillByDefault(Invoke(this, &MockTestMemIoCtxImpl::do_write_full));
+ ON_CALL(*this, writesame(_, _, _, _, _)).WillByDefault(Invoke(this, &MockTestMemIoCtxImpl::do_writesame));
+ ON_CALL(*this, zero(_,_,_,_)).WillByDefault(Invoke(this, &MockTestMemIoCtxImpl::do_zero));
+ }
+
+private:
+ MockTestMemRadosClient *m_mock_client;
+ TestMemRadosClient *m_client;
+};
+
+} // namespace librados
+
+#endif // LIBRADOS_TEST_STUB_MOCK_TEST_MEM_IO_CTX_IMPL_H
diff --git a/src/test/librados_test_stub/MockTestMemRadosClient.h b/src/test/librados_test_stub/MockTestMemRadosClient.h
new file mode 100644
index 000000000..65a1ac82e
--- /dev/null
+++ b/src/test/librados_test_stub/MockTestMemRadosClient.h
@@ -0,0 +1,103 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#ifndef LIBRADOS_TEST_STUB_MOCK_TEST_MEM_RADOS_CLIENT_H
+#define LIBRADOS_TEST_STUB_MOCK_TEST_MEM_RADOS_CLIENT_H
+
+#include "test/librados_test_stub/TestMemRadosClient.h"
+#include "test/librados_test_stub/MockTestMemIoCtxImpl.h"
+#include "gmock/gmock.h"
+
+namespace librados {
+
+class TestMemCluster;
+
+class MockTestMemRadosClient : public TestMemRadosClient {
+public:
+ MockTestMemRadosClient(CephContext *cct, TestMemCluster *test_mem_cluster)
+ : TestMemRadosClient(cct, test_mem_cluster) {
+ default_to_dispatch();
+ }
+
+ MOCK_METHOD0(connect, int());
+ int do_connect() {
+ return TestMemRadosClient::connect();
+ }
+
+ MOCK_METHOD2(create_ioctx, TestIoCtxImpl *(int64_t pool_id,
+ const std::string &pool_name));
+ MockTestMemIoCtxImpl* do_create_ioctx(int64_t pool_id,
+ const std::string &pool_name) {
+ return new ::testing::NiceMock<MockTestMemIoCtxImpl>(
+ this, this, pool_id, pool_name,
+ get_mem_cluster()->get_pool(pool_name));
+ }
+
+ MOCK_METHOD2(blocklist_add, int(const std::string& client_address,
+ uint32_t expire_seconds));
+ int do_blocklist_add(const std::string& client_address,
+ uint32_t expire_seconds) {
+ return TestMemRadosClient::blocklist_add(client_address, expire_seconds);
+ }
+
+ MOCK_METHOD1(get_min_compatible_osd, int(int8_t*));
+ int do_get_min_compatible_osd(int8_t* require_osd_release) {
+ return TestMemRadosClient::get_min_compatible_osd(require_osd_release);
+ }
+
+ MOCK_METHOD2(get_min_compatible_client, int(int8_t*, int8_t*));
+ int do_get_min_compatible_client(int8_t* min_compat_client,
+ int8_t* require_min_compat_client) {
+ return TestMemRadosClient::get_min_compatible_client(
+ min_compat_client, require_min_compat_client);
+ }
+
+ MOCK_METHOD3(service_daemon_register,
+ int(const std::string&,
+ const std::string&,
+ const std::map<std::string,std::string>&));
+ int do_service_daemon_register(const std::string& service,
+ const std::string& name,
+ const std::map<std::string,std::string>& metadata) {
+ return TestMemRadosClient::service_daemon_register(service, name, metadata);
+ }
+
+ // workaround of https://github.com/google/googletest/issues/1155
+ MOCK_METHOD1(service_daemon_update_status_r,
+ int(const std::map<std::string,std::string>&));
+ int do_service_daemon_update_status_r(const std::map<std::string,std::string>& status) {
+ auto s = status;
+ return TestMemRadosClient::service_daemon_update_status(std::move(s));
+ }
+
+ MOCK_METHOD4(mon_command, int(const std::vector<std::string>&,
+ const bufferlist&, bufferlist*, std::string*));
+ int do_mon_command(const std::vector<std::string>& cmd,
+ const bufferlist &inbl, bufferlist *outbl,
+ std::string *outs) {
+ return mon_command(cmd, inbl, outbl, outs);
+ }
+
+ MOCK_METHOD0(wait_for_latest_osd_map, int());
+ int do_wait_for_latest_osd_map() {
+ return wait_for_latest_osd_map();
+ }
+
+ void default_to_dispatch() {
+ using namespace ::testing;
+
+ ON_CALL(*this, connect()).WillByDefault(Invoke(this, &MockTestMemRadosClient::do_connect));
+ ON_CALL(*this, create_ioctx(_, _)).WillByDefault(Invoke(this, &MockTestMemRadosClient::do_create_ioctx));
+ ON_CALL(*this, blocklist_add(_, _)).WillByDefault(Invoke(this, &MockTestMemRadosClient::do_blocklist_add));
+ ON_CALL(*this, get_min_compatible_osd(_)).WillByDefault(Invoke(this, &MockTestMemRadosClient::do_get_min_compatible_osd));
+ ON_CALL(*this, get_min_compatible_client(_, _)).WillByDefault(Invoke(this, &MockTestMemRadosClient::do_get_min_compatible_client));
+ ON_CALL(*this, service_daemon_register(_, _, _)).WillByDefault(Invoke(this, &MockTestMemRadosClient::do_service_daemon_register));
+ ON_CALL(*this, service_daemon_update_status_r(_)).WillByDefault(Invoke(this, &MockTestMemRadosClient::do_service_daemon_update_status_r));
+ ON_CALL(*this, mon_command(_, _, _, _)).WillByDefault(Invoke(this, &MockTestMemRadosClient::do_mon_command));
+ ON_CALL(*this, wait_for_latest_osd_map()).WillByDefault(Invoke(this, &MockTestMemRadosClient::do_wait_for_latest_osd_map));
+ }
+};
+
+} // namespace librados
+
+#endif // LIBRADOS_TEST_STUB_MOCK_TEST_MEM_RADOS_CLIENT_H
diff --git a/src/test/librados_test_stub/NeoradosTestStub.cc b/src/test/librados_test_stub/NeoradosTestStub.cc
new file mode 100644
index 000000000..0de2cd902
--- /dev/null
+++ b/src/test/librados_test_stub/NeoradosTestStub.cc
@@ -0,0 +1,601 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#include "include/neorados/RADOS.hpp"
+#include "include/rados/librados.hpp"
+#include "common/ceph_mutex.h"
+#include "common/hobject.h"
+#include "librados/AioCompletionImpl.h"
+#include "mon/error_code.h"
+#include "osd/error_code.h"
+#include "osd/osd_types.h"
+#include "osdc/error_code.h"
+#include "test/librados_test_stub/LibradosTestStub.h"
+#include "test/librados_test_stub/TestClassHandler.h"
+#include "test/librados_test_stub/TestIoCtxImpl.h"
+#include "test/librados_test_stub/TestRadosClient.h"
+#include <map>
+#include <memory>
+#include <optional>
+#include <string>
+#include <functional>
+#include <boost/system/system_error.hpp>
+
+namespace bs = boost::system;
+using namespace std::literals;
+using namespace std::placeholders;
+
+namespace neorados {
+namespace detail {
+
+class Client {
+public:
+ ceph::mutex mutex = ceph::make_mutex("NeoradosTestStub::Client");
+
+ librados::TestRadosClient* test_rados_client;
+ boost::asio::io_context& io_context;
+
+ std::map<std::pair<int64_t, std::string>, librados::TestIoCtxImpl*> io_ctxs;
+
+ Client(librados::TestRadosClient* test_rados_client)
+ : test_rados_client(test_rados_client),
+ io_context(test_rados_client->get_io_context()) {
+ }
+
+ ~Client() {
+ for (auto& io_ctx : io_ctxs) {
+ io_ctx.second->put();
+ }
+ }
+
+ librados::TestIoCtxImpl* get_io_ctx(const IOContext& ioc) {
+ int64_t pool_id = ioc.pool();
+ std::string ns = std::string{ioc.ns()};
+
+ auto lock = std::scoped_lock{mutex};
+ auto key = make_pair(pool_id, ns);
+ auto it = io_ctxs.find(key);
+ if (it != io_ctxs.end()) {
+ return it->second;
+ }
+
+ std::list<std::pair<int64_t, std::string>> pools;
+ int r = test_rados_client->pool_list(pools);
+ if (r < 0) {
+ return nullptr;
+ }
+
+ for (auto& pool : pools) {
+ if (pool.first == pool_id) {
+ auto io_ctx = test_rados_client->create_ioctx(pool_id, pool.second);
+ io_ctx->set_namespace(ns);
+ io_ctxs[key] = io_ctx;
+ return io_ctx;
+ }
+ }
+ return nullptr;
+ }
+};
+
+} // namespace detail
+
+namespace {
+
+struct CompletionPayload {
+ std::unique_ptr<Op::Completion> c;
+};
+
+void completion_callback_adapter(rados_completion_t c, void *arg) {
+ auto impl = reinterpret_cast<librados::AioCompletionImpl *>(c);
+ auto r = impl->get_return_value();
+ impl->release();
+
+ auto payload = reinterpret_cast<CompletionPayload*>(arg);
+ payload->c->defer(std::move(payload->c),
+ (r < 0) ? bs::error_code(-r, osd_category()) :
+ bs::error_code());
+ delete payload;
+}
+
+librados::AioCompletionImpl* create_aio_completion(
+ std::unique_ptr<Op::Completion>&& c) {
+ auto payload = new CompletionPayload{std::move(c)};
+
+ auto impl = new librados::AioCompletionImpl();
+ impl->set_complete_callback(payload, completion_callback_adapter);
+
+ return impl;
+}
+
+int save_operation_size(int result, size_t* pval) {
+ if (pval != NULL) {
+ *pval = result;
+ }
+ return result;
+}
+
+int save_operation_ec(int result, boost::system::error_code* ec) {
+ if (ec != NULL) {
+ *ec = {std::abs(result), bs::system_category()};
+ }
+ return result;
+}
+
+} // anonymous namespace
+
+Object::Object() {
+ static_assert(impl_size >= sizeof(object_t));
+ new (&impl) object_t();
+}
+
+Object::Object(std::string&& s) {
+ static_assert(impl_size >= sizeof(object_t));
+ new (&impl) object_t(std::move(s));
+}
+
+Object::~Object() {
+ reinterpret_cast<object_t*>(&impl)->~object_t();
+}
+
+Object::operator std::string_view() const {
+ return std::string_view(reinterpret_cast<const object_t*>(&impl)->name);
+}
+
+struct IOContextImpl {
+ object_locator_t oloc;
+ snapid_t snap_seq = CEPH_NOSNAP;
+ SnapContext snapc;
+};
+
+IOContext::IOContext() {
+ static_assert(impl_size >= sizeof(IOContextImpl));
+ new (&impl) IOContextImpl();
+}
+
+IOContext::IOContext(const IOContext& rhs) {
+ static_assert(impl_size >= sizeof(IOContextImpl));
+ new (&impl) IOContextImpl(*reinterpret_cast<const IOContextImpl*>(&rhs.impl));
+}
+
+IOContext::IOContext(int64_t _pool, std::string&& _ns)
+ : IOContext() {
+ pool(_pool);
+ ns(std::move(_ns));
+}
+
+IOContext::~IOContext() {
+ reinterpret_cast<IOContextImpl*>(&impl)->~IOContextImpl();
+}
+
+std::int64_t IOContext::pool() const {
+ return reinterpret_cast<const IOContextImpl*>(&impl)->oloc.pool;
+}
+
+void IOContext::pool(std::int64_t _pool) {
+ reinterpret_cast<IOContextImpl*>(&impl)->oloc.pool = _pool;
+}
+
+std::string_view IOContext::ns() const {
+ return reinterpret_cast<const IOContextImpl*>(&impl)->oloc.nspace;
+}
+
+void IOContext::ns(std::string&& _ns) {
+ reinterpret_cast<IOContextImpl*>(&impl)->oloc.nspace = std::move(_ns);
+}
+
+std::optional<std::uint64_t> IOContext::read_snap() const {
+ auto& snap_seq = reinterpret_cast<const IOContextImpl*>(&impl)->snap_seq;
+ if (snap_seq == CEPH_NOSNAP)
+ return std::nullopt;
+ else
+ return snap_seq;
+}
+void IOContext::read_snap(std::optional<std::uint64_t> _snapid) {
+ auto& snap_seq = reinterpret_cast<IOContextImpl*>(&impl)->snap_seq;
+ snap_seq = _snapid.value_or(CEPH_NOSNAP);
+}
+
+std::optional<
+ std::pair<std::uint64_t,
+ std::vector<std::uint64_t>>> IOContext::write_snap_context() const {
+ auto& snapc = reinterpret_cast<const IOContextImpl*>(&impl)->snapc;
+ if (snapc.empty()) {
+ return std::nullopt;
+ } else {
+ std::vector<uint64_t> v(snapc.snaps.begin(), snapc.snaps.end());
+ return std::make_optional(std::make_pair(uint64_t(snapc.seq), v));
+ }
+}
+
+void IOContext::write_snap_context(
+ std::optional<std::pair<std::uint64_t, std::vector<std::uint64_t>>> _snapc) {
+ auto& snapc = reinterpret_cast<IOContextImpl*>(&impl)->snapc;
+ if (!_snapc) {
+ snapc.clear();
+ } else {
+ SnapContext n(_snapc->first, { _snapc->second.begin(), _snapc->second.end()});
+ if (!n.is_valid()) {
+ throw bs::system_error(EINVAL,
+ bs::system_category(),
+ "Invalid snap context.");
+ }
+
+ snapc = n;
+ }
+}
+
+void IOContext::full_try(bool _full_try) {
+ // no-op
+}
+
+bool operator ==(const IOContext& lhs, const IOContext& rhs) {
+ auto l = reinterpret_cast<const IOContextImpl*>(&lhs.impl);
+ auto r = reinterpret_cast<const IOContextImpl*>(&rhs.impl);
+ return (l->oloc == r->oloc &&
+ l->snap_seq == r->snap_seq &&
+ l->snapc.seq == r->snapc.seq &&
+ l->snapc.snaps == r->snapc.snaps);
+}
+
+bool operator !=(const IOContext& lhs, const IOContext& rhs) {
+ return !(lhs == rhs);
+}
+
+Op::Op() {
+ static_assert(Op::impl_size >= sizeof(librados::TestObjectOperationImpl*));
+ auto& o = *reinterpret_cast<librados::TestObjectOperationImpl**>(&impl);
+ o = new librados::TestObjectOperationImpl();
+ o->get();
+}
+
+Op::~Op() {
+ auto& o = *reinterpret_cast<librados::TestObjectOperationImpl**>(&impl);
+ if (o != nullptr) {
+ o->put();
+ o = nullptr;
+ }
+}
+
+void Op::assert_exists() {
+ auto o = *reinterpret_cast<librados::TestObjectOperationImpl**>(&impl);
+ o->ops.push_back(std::bind(
+ &librados::TestIoCtxImpl::assert_exists, _1, _2, _4));
+}
+
+void Op::assert_version(uint64_t ver) {
+ auto o = *reinterpret_cast<librados::TestObjectOperationImpl**>(&impl);
+ o->ops.push_back(std::bind(
+ &librados::TestIoCtxImpl::assert_version, _1, _2, ver));
+}
+
+void Op::cmpext(uint64_t off, ceph::buffer::list&& cmp_bl, std::size_t* s) {
+ auto o = *reinterpret_cast<librados::TestObjectOperationImpl**>(&impl);
+ librados::ObjectOperationTestImpl op = std::bind(
+ &librados::TestIoCtxImpl::cmpext, _1, _2, off, cmp_bl, _4);
+ if (s != nullptr) {
+ op = std::bind(
+ save_operation_size, std::bind(op, _1, _2, _3, _4, _5, _6), s);
+ }
+ o->ops.push_back(op);
+}
+
+std::size_t Op::size() const {
+ auto o = *reinterpret_cast<librados::TestObjectOperationImpl* const *>(&impl);
+ return o->ops.size();
+}
+
+void Op::set_fadvise_random() {
+ // no-op
+}
+
+void Op::set_fadvise_sequential() {
+ // no-op
+}
+
+void Op::set_fadvise_willneed() {
+ // no-op
+}
+
+void Op::set_fadvise_dontneed() {
+ // no-op
+}
+
+void Op::set_fadvise_nocache() {
+ // no-op
+}
+
+void Op::balance_reads() {
+ // no-op
+}
+
+void Op::localize_reads() {
+ // no-op
+}
+
+void Op::exec(std::string_view cls, std::string_view method,
+ const ceph::buffer::list& inbl,
+ ceph::buffer::list* out,
+ boost::system::error_code* ec) {
+ auto o = *reinterpret_cast<librados::TestObjectOperationImpl**>(&impl);
+
+ auto cls_handler = librados_test_stub::get_class_handler();
+ librados::ObjectOperationTestImpl op =
+ [cls_handler, cls, method, inbl = const_cast<bufferlist&>(inbl), out]
+ (librados::TestIoCtxImpl* io_ctx, const std::string& oid, bufferlist* outbl,
+ uint64_t snap_id, const SnapContext& snapc, uint64_t*) mutable -> int {
+ return io_ctx->exec(
+ oid, cls_handler, std::string(cls).c_str(),
+ std::string(method).c_str(), inbl,
+ (out != nullptr ? out : outbl), snap_id, snapc);
+ };
+ if (ec != nullptr) {
+ op = std::bind(
+ save_operation_ec, std::bind(op, _1, _2, _3, _4, _5, _6), ec);
+ }
+ o->ops.push_back(op);
+}
+
+void Op::exec(std::string_view cls, std::string_view method,
+ const ceph::buffer::list& inbl,
+ boost::system::error_code* ec) {
+ auto o = *reinterpret_cast<librados::TestObjectOperationImpl**>(&impl);
+
+ auto cls_handler = librados_test_stub::get_class_handler();
+ librados::ObjectOperationTestImpl op =
+ [cls_handler, cls, method, inbl = const_cast<bufferlist&>(inbl)]
+ (librados::TestIoCtxImpl* io_ctx, const std::string& oid, bufferlist* outbl,
+ uint64_t snap_id, const SnapContext& snapc, uint64_t*) mutable -> int {
+ return io_ctx->exec(
+ oid, cls_handler, std::string(cls).c_str(),
+ std::string(method).c_str(), inbl, outbl, snap_id, snapc);
+ };
+ if (ec != NULL) {
+ op = std::bind(
+ save_operation_ec, std::bind(op, _1, _2, _3, _4, _5, _6), ec);
+ }
+ o->ops.push_back(op);
+}
+
+void ReadOp::read(size_t off, uint64_t len, ceph::buffer::list* out,
+ boost::system::error_code* ec) {
+ auto o = *reinterpret_cast<librados::TestObjectOperationImpl**>(&impl);
+ librados::ObjectOperationTestImpl op;
+ if (out != nullptr) {
+ op = std::bind(
+ &librados::TestIoCtxImpl::read, _1, _2, len, off, out, _4, _6);
+ } else {
+ op = std::bind(
+ &librados::TestIoCtxImpl::read, _1, _2, len, off, _3, _4, _6);
+ }
+
+ if (ec != NULL) {
+ op = std::bind(
+ save_operation_ec, std::bind(op, _1, _2, _3, _4, _5, _6), ec);
+ }
+ o->ops.push_back(op);
+}
+
+void ReadOp::sparse_read(uint64_t off, uint64_t len,
+ ceph::buffer::list* out,
+ std::vector<std::pair<std::uint64_t,
+ std::uint64_t>>* extents,
+ boost::system::error_code* ec) {
+ auto o = *reinterpret_cast<librados::TestObjectOperationImpl**>(&impl);
+ librados::ObjectOperationTestImpl op =
+ [off, len, out, extents]
+ (librados::TestIoCtxImpl* io_ctx, const std::string& oid, bufferlist* outbl,
+ uint64_t snap_id, const SnapContext& snapc, uint64_t*) mutable -> int {
+ std::map<uint64_t,uint64_t> m;
+ int r = io_ctx->sparse_read(
+ oid, off, len, &m, (out != nullptr ? out : outbl), snap_id);
+ if (r >= 0 && extents != nullptr) {
+ extents->clear();
+ extents->insert(extents->end(), m.begin(), m.end());
+ }
+ return r;
+ };
+ if (ec != NULL) {
+ op = std::bind(save_operation_ec,
+ std::bind(op, _1, _2, _3, _4, _5, _6), ec);
+ }
+ o->ops.push_back(op);
+}
+
+void ReadOp::list_snaps(SnapSet* snaps, bs::error_code* ec) {
+ auto o = *reinterpret_cast<librados::TestObjectOperationImpl**>(&impl);
+ librados::ObjectOperationTestImpl op =
+ [snaps]
+ (librados::TestIoCtxImpl* io_ctx, const std::string& oid, bufferlist*,
+ uint64_t, const SnapContext&, uint64_t*) mutable -> int {
+ librados::snap_set_t snap_set;
+ int r = io_ctx->list_snaps(oid, &snap_set);
+ if (r >= 0 && snaps != nullptr) {
+ *snaps = {};
+ snaps->seq = snap_set.seq;
+ snaps->clones.reserve(snap_set.clones.size());
+ for (auto& clone : snap_set.clones) {
+ neorados::CloneInfo clone_info;
+ clone_info.cloneid = clone.cloneid;
+ clone_info.snaps = clone.snaps;
+ clone_info.overlap = clone.overlap;
+ clone_info.size = clone.size;
+ snaps->clones.push_back(clone_info);
+ }
+ }
+ return r;
+ };
+ if (ec != NULL) {
+ op = std::bind(save_operation_ec,
+ std::bind(op, _1, _2, _3, _4, _5, _6), ec);
+ }
+ o->ops.push_back(op);
+}
+
+void WriteOp::create(bool exclusive) {
+ auto o = *reinterpret_cast<librados::TestObjectOperationImpl**>(&impl);
+ o->ops.push_back(std::bind(
+ &librados::TestIoCtxImpl::create, _1, _2, exclusive, _5));
+}
+
+void WriteOp::write(uint64_t off, ceph::buffer::list&& bl) {
+ auto o = *reinterpret_cast<librados::TestObjectOperationImpl**>(&impl);
+ o->ops.push_back(std::bind(
+ &librados::TestIoCtxImpl::write, _1, _2, bl, bl.length(), off, _5));
+}
+
+void WriteOp::write_full(ceph::buffer::list&& bl) {
+ auto o = *reinterpret_cast<librados::TestObjectOperationImpl**>(&impl);
+ o->ops.push_back(std::bind(
+ &librados::TestIoCtxImpl::write_full, _1, _2, bl, _5));
+}
+
+void WriteOp::remove() {
+ auto o = *reinterpret_cast<librados::TestObjectOperationImpl**>(&impl);
+ o->ops.push_back(std::bind(
+ &librados::TestIoCtxImpl::remove, _1, _2, _5));
+}
+
+void WriteOp::truncate(uint64_t off) {
+ auto o = *reinterpret_cast<librados::TestObjectOperationImpl**>(&impl);
+ o->ops.push_back(std::bind(
+ &librados::TestIoCtxImpl::truncate, _1, _2, off, _5));
+}
+
+void WriteOp::zero(uint64_t off, uint64_t len) {
+ auto o = *reinterpret_cast<librados::TestObjectOperationImpl**>(&impl);
+ o->ops.push_back(std::bind(
+ &librados::TestIoCtxImpl::zero, _1, _2, off, len, _5));
+}
+
+void WriteOp::writesame(std::uint64_t off, std::uint64_t write_len,
+ ceph::buffer::list&& bl) {
+ auto o = *reinterpret_cast<librados::TestObjectOperationImpl**>(&impl);
+ o->ops.push_back(std::bind(
+ &librados::TestIoCtxImpl::writesame, _1, _2, bl, write_len, off, _5));
+}
+
+void WriteOp::set_alloc_hint(uint64_t expected_object_size,
+ uint64_t expected_write_size,
+ alloc_hint::alloc_hint_t flags) {
+ // no-op
+}
+
+RADOS::RADOS() = default;
+
+RADOS::RADOS(RADOS&&) = default;
+
+RADOS::RADOS(std::unique_ptr<detail::Client> impl)
+ : impl(std::move(impl)) {
+}
+
+RADOS::~RADOS() = default;
+
+RADOS RADOS::make_with_librados(librados::Rados& rados) {
+ auto test_rados_client = reinterpret_cast<librados::TestRadosClient*>(
+ rados.client);
+ return RADOS{std::make_unique<detail::Client>(test_rados_client)};
+}
+
+CephContext* neorados::RADOS::cct() {
+ return impl->test_rados_client->cct();
+}
+
+boost::asio::io_context& neorados::RADOS::get_io_context() {
+ return impl->io_context;
+}
+
+boost::asio::io_context::executor_type neorados::RADOS::get_executor() const {
+ return impl->io_context.get_executor();
+}
+
+void RADOS::execute(const Object& o, const IOContext& ioc, ReadOp&& op,
+ ceph::buffer::list* bl, std::unique_ptr<Op::Completion> c,
+ uint64_t* objver, const blkin_trace_info* trace_info) {
+ auto io_ctx = impl->get_io_ctx(ioc);
+ if (io_ctx == nullptr) {
+ c->dispatch(std::move(c), osdc_errc::pool_dne);
+ return;
+ }
+
+ auto ops = *reinterpret_cast<librados::TestObjectOperationImpl**>(&op.impl);
+
+ auto snap_id = CEPH_NOSNAP;
+ auto opt_snap_id = ioc.read_snap();
+ if (opt_snap_id) {
+ snap_id = *opt_snap_id;
+ }
+
+ auto completion = create_aio_completion(std::move(c));
+ auto r = io_ctx->aio_operate_read(std::string{o}, *ops, completion, 0U, bl,
+ snap_id, objver);
+ ceph_assert(r == 0);
+}
+
+void RADOS::execute(const Object& o, const IOContext& ioc, WriteOp&& op,
+ std::unique_ptr<Op::Completion> c, uint64_t* objver,
+ const blkin_trace_info* trace_info) {
+ auto io_ctx = impl->get_io_ctx(ioc);
+ if (io_ctx == nullptr) {
+ c->dispatch(std::move(c), osdc_errc::pool_dne);
+ return;
+ }
+
+ auto ops = *reinterpret_cast<librados::TestObjectOperationImpl**>(&op.impl);
+
+ SnapContext snapc;
+ auto opt_snapc = ioc.write_snap_context();
+ if (opt_snapc) {
+ snapc.seq = opt_snapc->first;
+ snapc.snaps.assign(opt_snapc->second.begin(), opt_snapc->second.end());
+ }
+
+ auto completion = create_aio_completion(std::move(c));
+ auto r = io_ctx->aio_operate(std::string{o}, *ops, completion, &snapc, nullptr, 0U);
+ ceph_assert(r == 0);
+}
+
+void RADOS::mon_command(std::vector<std::string> command,
+ const bufferlist& bl,
+ std::string* outs, bufferlist* outbl,
+ std::unique_ptr<Op::Completion> c) {
+ auto r = impl->test_rados_client->mon_command(command, bl, outbl, outs);
+ c->post(std::move(c),
+ (r < 0 ? bs::error_code(-r, osd_category()) : bs::error_code()));
+}
+
+void RADOS::blocklist_add(std::string_view client_address,
+ std::optional<std::chrono::seconds> expire,
+ std::unique_ptr<SimpleOpComp> c) {
+ auto r = impl->test_rados_client->blocklist_add(
+ std::string(client_address), expire.value_or(0s).count());
+ c->post(std::move(c),
+ (r < 0 ? bs::error_code(-r, mon_category()) : bs::error_code()));
+}
+
+void RADOS::wait_for_latest_osd_map(std::unique_ptr<Op::Completion> c) {
+ auto r = impl->test_rados_client->wait_for_latest_osd_map();
+ c->dispatch(std::move(c),
+ (r < 0 ? bs::error_code(-r, osd_category()) :
+ bs::error_code()));
+}
+
+} // namespace neorados
+
+namespace librados {
+
+MockTestMemIoCtxImpl& get_mock_io_ctx(neorados::RADOS& rados,
+ neorados::IOContext& io_context) {
+ auto& impl = *reinterpret_cast<std::unique_ptr<neorados::detail::Client>*>(
+ &rados);
+ auto io_ctx = impl->get_io_ctx(io_context);
+ ceph_assert(io_ctx != nullptr);
+ return *reinterpret_cast<MockTestMemIoCtxImpl*>(io_ctx);
+}
+
+MockTestMemRadosClient& get_mock_rados_client(neorados::RADOS& rados) {
+ auto& impl = *reinterpret_cast<std::unique_ptr<neorados::detail::Client>*>(
+ &rados);
+ return *reinterpret_cast<MockTestMemRadosClient*>(impl->test_rados_client);
+}
+
+} // namespace librados
diff --git a/src/test/librados_test_stub/TestClassHandler.cc b/src/test/librados_test_stub/TestClassHandler.cc
new file mode 100644
index 000000000..df830918f
--- /dev/null
+++ b/src/test/librados_test_stub/TestClassHandler.cc
@@ -0,0 +1,159 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#include "test/librados_test_stub/TestClassHandler.h"
+#include "test/librados_test_stub/TestIoCtxImpl.h"
+#include <boost/algorithm/string/predicate.hpp>
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+#include "common/debug.h"
+#include "include/ceph_assert.h"
+#include "include/dlfcn_compat.h"
+
+#define dout_context g_ceph_context
+#define dout_subsys ceph_subsys_rados
+
+namespace librados {
+
+TestClassHandler::TestClassHandler() {
+}
+
+TestClassHandler::~TestClassHandler() {
+ for (ClassHandles::iterator it = m_class_handles.begin();
+ it != m_class_handles.end(); ++it) {
+ dlclose(*it);
+ }
+}
+
+void TestClassHandler::open_class(const std::string& name,
+ const std::string& path) {
+ void *handle = dlopen(path.c_str(), RTLD_NOW);
+ if (handle == NULL) {
+ std::cerr << "Failed to load class: " << name << " (" << path << "): "
+ << dlerror() << std::endl;
+ return;
+ }
+
+ // initialize
+ void (*cls_init)() = reinterpret_cast<void (*)()>(
+ dlsym(handle, "__cls_init"));
+
+ if (!cls_init) {
+ std::cerr << "Error locating initializer: " << dlerror() << std::endl;
+ } else if (cls_init) {
+ m_class_handles.push_back(handle);
+ cls_init();
+ return;
+ }
+
+ std::cerr << "Class: " << name << " (" << path << ") missing initializer"
+ << std::endl;
+ dlclose(handle);
+}
+
+void TestClassHandler::open_all_classes() {
+ ceph_assert(m_class_handles.empty());
+
+ const char* env = getenv("CEPH_LIB");
+ std::string CEPH_LIB(env ? env : "lib");
+ DIR *dir = ::opendir(CEPH_LIB.c_str());
+ if (dir == NULL) {
+ ceph_abort();;
+ }
+
+ std::set<std::string> names;
+ struct dirent *pde = nullptr;
+ while ((pde = ::readdir(dir))) {
+ std::string name(pde->d_name);
+ if (!boost::algorithm::starts_with(name, "libcls_") ||
+ !boost::algorithm::ends_with(name, SHARED_LIB_SUFFIX)) {
+ continue;
+ }
+ names.insert(name);
+ }
+
+ for (auto& name : names) {
+ std::string class_name = name.substr(7, name.size() - 10);
+ open_class(class_name, CEPH_LIB + "/" + name);
+ }
+ closedir(dir);
+}
+
+int TestClassHandler::create(const std::string &name, cls_handle_t *handle) {
+ if (m_classes.find(name) != m_classes.end()) {
+ std::cerr << "Class " << name << " already exists" << std::endl;
+ return -EEXIST;
+ }
+
+ SharedClass cls(new Class());
+ m_classes[name] = cls;
+ *handle = reinterpret_cast<cls_handle_t>(cls.get());
+ return 0;
+}
+
+int TestClassHandler::create_method(cls_handle_t hclass,
+ const char *name,
+ cls_method_cxx_call_t class_call,
+ cls_method_handle_t *handle) {
+ Class *cls = reinterpret_cast<Class*>(hclass);
+ if (cls->methods.find(name) != cls->methods.end()) {
+ std::cerr << "Class method " << hclass << ":" << name << " already exists"
+ << std::endl;
+ return -EEXIST;
+ }
+
+ SharedMethod method(new Method());
+ method->class_call = class_call;
+ cls->methods[name] = method;
+ return 0;
+}
+
+cls_method_cxx_call_t TestClassHandler::get_method(const std::string &cls,
+ const std::string &method) {
+ Classes::iterator c_it = m_classes.find(cls);
+ if (c_it == m_classes.end()) {
+ std::cerr << "Failed to located class " << cls << std::endl;
+ return NULL;
+ }
+
+ SharedClass scls = c_it->second;
+ Methods::iterator m_it = scls->methods.find(method);
+ if (m_it == scls->methods.end()) {
+ std::cerr << "Failed to located class method" << cls << "." << method
+ << std::endl;
+ return NULL;
+ }
+ return m_it->second->class_call;
+}
+
+TestClassHandler::SharedMethodContext TestClassHandler::get_method_context(
+ TestIoCtxImpl *io_ctx_impl, const std::string &oid, uint64_t snap_id,
+ const SnapContext &snapc) {
+ SharedMethodContext ctx(new MethodContext());
+
+ // clone to ioctx to provide a firewall for gmock expectations
+ ctx->io_ctx_impl = io_ctx_impl->clone();
+ ctx->oid = oid;
+ ctx->snap_id = snap_id;
+ ctx->snapc = snapc;
+ return ctx;
+}
+
+int TestClassHandler::create_filter(cls_handle_t hclass,
+ const std::string& name,
+ cls_cxx_filter_factory_t fn)
+{
+ Class *cls = reinterpret_cast<Class*>(hclass);
+ if (cls->filters.find(name) != cls->filters.end()) {
+ return -EEXIST;
+ }
+ cls->filters[name] = fn;
+ return 0;
+}
+
+TestClassHandler::MethodContext::~MethodContext() {
+ io_ctx_impl->put();
+}
+
+} // namespace librados
diff --git a/src/test/librados_test_stub/TestClassHandler.h b/src/test/librados_test_stub/TestClassHandler.h
new file mode 100644
index 000000000..09e009bf8
--- /dev/null
+++ b/src/test/librados_test_stub/TestClassHandler.h
@@ -0,0 +1,79 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#ifndef CEPH_TEST_CLASS_HANDLER_H
+#define CEPH_TEST_CLASS_HANDLER_H
+
+#include "objclass/objclass.h"
+#include "common/snap_types.h"
+#include <boost/shared_ptr.hpp>
+#include <list>
+#include <map>
+#include <string>
+
+namespace librados
+{
+
+class TestIoCtxImpl;
+
+class TestClassHandler {
+public:
+
+ TestClassHandler();
+ ~TestClassHandler();
+
+ struct MethodContext {
+ ~MethodContext();
+
+ TestIoCtxImpl *io_ctx_impl;
+ std::string oid;
+ uint64_t snap_id;
+ SnapContext snapc;
+ };
+ typedef boost::shared_ptr<MethodContext> SharedMethodContext;
+
+ struct Method {
+ cls_method_cxx_call_t class_call;
+ };
+ typedef boost::shared_ptr<Method> SharedMethod;
+ typedef std::map<std::string, SharedMethod> Methods;
+ typedef std::map<std::string, cls_cxx_filter_factory_t> Filters;
+
+ struct Class {
+ Methods methods;
+ Filters filters;
+ };
+ typedef boost::shared_ptr<Class> SharedClass;
+
+ void open_all_classes();
+
+ int create(const std::string &name, cls_handle_t *handle);
+ int create_method(cls_handle_t hclass, const char *method,
+ cls_method_cxx_call_t class_call,
+ cls_method_handle_t *handle);
+ cls_method_cxx_call_t get_method(const std::string &cls,
+ const std::string &method);
+ SharedMethodContext get_method_context(TestIoCtxImpl *io_ctx_impl,
+ const std::string &oid,
+ uint64_t snap_id,
+ const SnapContext &snapc);
+
+ int create_filter(cls_handle_t hclass, const std::string& filter_name,
+ cls_cxx_filter_factory_t fn);
+
+private:
+
+ typedef std::map<std::string, SharedClass> Classes;
+ typedef std::list<void*> ClassHandles;
+
+ Classes m_classes;
+ ClassHandles m_class_handles;
+ Filters m_filters;
+
+ void open_class(const std::string& name, const std::string& path);
+
+};
+
+} // namespace librados
+
+#endif // CEPH_TEST_CLASS_HANDLER_H
diff --git a/src/test/librados_test_stub/TestCluster.h b/src/test/librados_test_stub/TestCluster.h
new file mode 100644
index 000000000..9b7612d31
--- /dev/null
+++ b/src/test/librados_test_stub/TestCluster.h
@@ -0,0 +1,64 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#ifndef CEPH_TEST_CLUSTER_H
+#define CEPH_TEST_CLUSTER_H
+
+#include "test/librados_test_stub/TestWatchNotify.h"
+#include "include/common_fwd.h"
+
+namespace librados {
+
+class TestRadosClient;
+class TestWatchNotify;
+
+class TestCluster {
+public:
+ struct ObjectLocator {
+ std::string nspace;
+ std::string name;
+
+ ObjectLocator(const std::string& nspace, const std::string& name)
+ : nspace(nspace), name(name) {
+ }
+
+ bool operator<(const ObjectLocator& rhs) const {
+ if (nspace != rhs.nspace) {
+ return nspace < rhs.nspace;
+ }
+ return name < rhs.name;
+ }
+ };
+
+ struct ObjectHandler {
+ virtual ~ObjectHandler() {}
+
+ virtual void handle_removed(TestRadosClient* test_rados_client) = 0;
+ };
+
+ TestCluster() : m_watch_notify(this) {
+ }
+ virtual ~TestCluster() {
+ }
+
+ virtual TestRadosClient *create_rados_client(CephContext *cct) = 0;
+
+ virtual int register_object_handler(int64_t pool_id,
+ const ObjectLocator& locator,
+ ObjectHandler* object_handler) = 0;
+ virtual void unregister_object_handler(int64_t pool_id,
+ const ObjectLocator& locator,
+ ObjectHandler* object_handler) = 0;
+
+ TestWatchNotify *get_watch_notify() {
+ return &m_watch_notify;
+ }
+
+protected:
+ TestWatchNotify m_watch_notify;
+
+};
+
+} // namespace librados
+
+#endif // CEPH_TEST_CLUSTER_H
diff --git a/src/test/librados_test_stub/TestIoCtxImpl.cc b/src/test/librados_test_stub/TestIoCtxImpl.cc
new file mode 100644
index 000000000..8a953ff7c
--- /dev/null
+++ b/src/test/librados_test_stub/TestIoCtxImpl.cc
@@ -0,0 +1,394 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#include "test/librados_test_stub/TestIoCtxImpl.h"
+#include "test/librados_test_stub/TestClassHandler.h"
+#include "test/librados_test_stub/TestRadosClient.h"
+#include "test/librados_test_stub/TestWatchNotify.h"
+#include "librados/AioCompletionImpl.h"
+#include "include/ceph_assert.h"
+#include "common/Finisher.h"
+#include "common/valgrind.h"
+#include "objclass/objclass.h"
+#include <functional>
+#include <errno.h>
+
+using namespace std;
+
+namespace librados {
+
+TestIoCtxImpl::TestIoCtxImpl() : m_client(NULL) {
+ get();
+}
+
+TestIoCtxImpl::TestIoCtxImpl(TestRadosClient *client, int64_t pool_id,
+ const std::string& pool_name)
+ : m_client(client), m_pool_id(pool_id), m_pool_name(pool_name),
+ m_snap_seq(CEPH_NOSNAP)
+{
+ m_client->get();
+ get();
+}
+
+TestIoCtxImpl::TestIoCtxImpl(const TestIoCtxImpl& rhs)
+ : m_client(rhs.m_client),
+ m_pool_id(rhs.m_pool_id),
+ m_pool_name(rhs.m_pool_name),
+ m_namespace_name(rhs.m_namespace_name),
+ m_snap_seq(rhs.m_snap_seq)
+{
+ m_client->get();
+ get();
+}
+
+TestIoCtxImpl::~TestIoCtxImpl() {
+ ceph_assert(m_pending_ops == 0);
+}
+
+void TestObjectOperationImpl::get() {
+ m_refcount++;
+}
+
+void TestObjectOperationImpl::put() {
+ if (--m_refcount == 0) {
+ ANNOTATE_HAPPENS_AFTER(&m_refcount);
+ ANNOTATE_HAPPENS_BEFORE_FORGET_ALL(&m_refcount);
+ delete this;
+ } else {
+ ANNOTATE_HAPPENS_BEFORE(&m_refcount);
+ }
+}
+
+void TestIoCtxImpl::get() {
+ m_refcount++;
+}
+
+void TestIoCtxImpl::put() {
+ if (--m_refcount == 0) {
+ m_client->put();
+ delete this;
+ }
+}
+
+uint64_t TestIoCtxImpl::get_instance_id() const {
+ return m_client->get_instance_id();
+}
+
+int64_t TestIoCtxImpl::get_id() {
+ return m_pool_id;
+}
+
+uint64_t TestIoCtxImpl::get_last_version() {
+ return 0;
+}
+
+std::string TestIoCtxImpl::get_pool_name() {
+ return m_pool_name;
+}
+
+int TestIoCtxImpl::aio_flush() {
+ m_client->flush_aio_operations();
+ return 0;
+}
+
+void TestIoCtxImpl::aio_flush_async(AioCompletionImpl *c) {
+ m_client->flush_aio_operations(c);
+}
+
+void TestIoCtxImpl::aio_notify(const std::string& oid, AioCompletionImpl *c,
+ bufferlist& bl, uint64_t timeout_ms,
+ bufferlist *pbl) {
+ m_pending_ops++;
+ c->get();
+ C_AioNotify *ctx = new C_AioNotify(this, c);
+ m_client->get_watch_notify()->aio_notify(m_client, m_pool_id, get_namespace(),
+ oid, bl, timeout_ms, pbl, ctx);
+}
+
+int TestIoCtxImpl::aio_operate(const std::string& oid, TestObjectOperationImpl &ops,
+ AioCompletionImpl *c, SnapContext *snap_context,
+ const ceph::real_time *pmtime, int flags) {
+ // TODO flags for now
+ ops.get();
+ m_pending_ops++;
+ m_client->add_aio_operation(oid, true, std::bind(
+ &TestIoCtxImpl::execute_aio_operations, this, oid, &ops,
+ reinterpret_cast<bufferlist*>(0), m_snap_seq,
+ snap_context != NULL ? *snap_context : m_snapc, nullptr), c);
+ return 0;
+}
+
+int TestIoCtxImpl::aio_operate_read(const std::string& oid,
+ TestObjectOperationImpl &ops,
+ AioCompletionImpl *c, int flags,
+ bufferlist *pbl, uint64_t snap_id,
+ uint64_t* objver) {
+ // TODO ignoring flags for now
+ ops.get();
+ m_pending_ops++;
+ m_client->add_aio_operation(oid, true, std::bind(
+ &TestIoCtxImpl::execute_aio_operations, this, oid, &ops, pbl, snap_id,
+ m_snapc, objver), c);
+ return 0;
+}
+
+int TestIoCtxImpl::aio_watch(const std::string& o, AioCompletionImpl *c,
+ uint64_t *handle, librados::WatchCtx2 *watch_ctx) {
+ m_pending_ops++;
+ c->get();
+ C_AioNotify *ctx = new C_AioNotify(this, c);
+ if (m_client->is_blocklisted()) {
+ m_client->get_aio_finisher()->queue(ctx, -EBLOCKLISTED);
+ } else {
+ m_client->get_watch_notify()->aio_watch(m_client, m_pool_id,
+ get_namespace(), o,
+ get_instance_id(), handle, nullptr,
+ watch_ctx, ctx);
+ }
+ return 0;
+}
+
+int TestIoCtxImpl::aio_unwatch(uint64_t handle, AioCompletionImpl *c) {
+ m_pending_ops++;
+ c->get();
+ C_AioNotify *ctx = new C_AioNotify(this, c);
+ if (m_client->is_blocklisted()) {
+ m_client->get_aio_finisher()->queue(ctx, -EBLOCKLISTED);
+ } else {
+ m_client->get_watch_notify()->aio_unwatch(m_client, handle, ctx);
+ }
+ return 0;
+}
+
+int TestIoCtxImpl::exec(const std::string& oid, TestClassHandler *handler,
+ const char *cls, const char *method,
+ bufferlist& inbl, bufferlist* outbl,
+ uint64_t snap_id, const SnapContext &snapc) {
+ if (m_client->is_blocklisted()) {
+ return -EBLOCKLISTED;
+ }
+
+ cls_method_cxx_call_t call = handler->get_method(cls, method);
+ if (call == NULL) {
+ return -ENOSYS;
+ }
+
+ return (*call)(reinterpret_cast<cls_method_context_t>(
+ handler->get_method_context(this, oid, snap_id, snapc).get()), &inbl,
+ outbl);
+}
+
+int TestIoCtxImpl::list_watchers(const std::string& o,
+ std::list<obj_watch_t> *out_watchers) {
+ if (m_client->is_blocklisted()) {
+ return -EBLOCKLISTED;
+ }
+
+ return m_client->get_watch_notify()->list_watchers(m_pool_id, get_namespace(),
+ o, out_watchers);
+}
+
+int TestIoCtxImpl::notify(const std::string& o, bufferlist& bl,
+ uint64_t timeout_ms, bufferlist *pbl) {
+ if (m_client->is_blocklisted()) {
+ return -EBLOCKLISTED;
+ }
+
+ return m_client->get_watch_notify()->notify(m_client, m_pool_id,
+ get_namespace(), o, bl,
+ timeout_ms, pbl);
+}
+
+void TestIoCtxImpl::notify_ack(const std::string& o, uint64_t notify_id,
+ uint64_t handle, bufferlist& bl) {
+ m_client->get_watch_notify()->notify_ack(m_client, m_pool_id, get_namespace(),
+ o, notify_id, handle,
+ m_client->get_instance_id(), bl);
+}
+
+int TestIoCtxImpl::operate(const std::string& oid,
+ TestObjectOperationImpl &ops) {
+ AioCompletionImpl *comp = new AioCompletionImpl();
+
+ ops.get();
+ m_pending_ops++;
+ m_client->add_aio_operation(oid, false, std::bind(
+ &TestIoCtxImpl::execute_aio_operations, this, oid, &ops,
+ reinterpret_cast<bufferlist*>(0), m_snap_seq, m_snapc, nullptr), comp);
+
+ comp->wait_for_complete();
+ int ret = comp->get_return_value();
+ comp->put();
+ return ret;
+}
+
+int TestIoCtxImpl::operate_read(const std::string& oid,
+ TestObjectOperationImpl &ops,
+ bufferlist *pbl) {
+ AioCompletionImpl *comp = new AioCompletionImpl();
+
+ ops.get();
+ m_pending_ops++;
+ m_client->add_aio_operation(oid, false, std::bind(
+ &TestIoCtxImpl::execute_aio_operations, this, oid, &ops, pbl,
+ m_snap_seq, m_snapc, nullptr), comp);
+
+ comp->wait_for_complete();
+ int ret = comp->get_return_value();
+ comp->put();
+ return ret;
+}
+
+void TestIoCtxImpl::aio_selfmanaged_snap_create(uint64_t *snapid,
+ AioCompletionImpl *c) {
+ m_client->add_aio_operation(
+ "", true,
+ std::bind(&TestIoCtxImpl::selfmanaged_snap_create, this, snapid), c);
+}
+
+void TestIoCtxImpl::aio_selfmanaged_snap_remove(uint64_t snapid,
+ AioCompletionImpl *c) {
+ m_client->add_aio_operation(
+ "", true,
+ std::bind(&TestIoCtxImpl::selfmanaged_snap_remove, this, snapid), c);
+}
+
+int TestIoCtxImpl::selfmanaged_snap_set_write_ctx(snap_t seq,
+ std::vector<snap_t>& snaps) {
+ std::vector<snapid_t> snap_ids(snaps.begin(), snaps.end());
+ m_snapc = SnapContext(seq, snap_ids);
+ return 0;
+}
+
+int TestIoCtxImpl::set_alloc_hint(const std::string& oid,
+ uint64_t expected_object_size,
+ uint64_t expected_write_size,
+ uint32_t flags,
+ const SnapContext &snapc) {
+ return 0;
+}
+
+void TestIoCtxImpl::set_snap_read(snap_t seq) {
+ if (seq == 0) {
+ seq = CEPH_NOSNAP;
+ }
+ m_snap_seq = seq;
+}
+
+int TestIoCtxImpl::tmap_update(const std::string& oid, bufferlist& cmdbl) {
+ if (m_client->is_blocklisted()) {
+ return -EBLOCKLISTED;
+ }
+
+ // TODO: protect against concurrent tmap updates
+ bufferlist tmap_header;
+ std::map<string,bufferlist> tmap;
+ uint64_t size = 0;
+ int r = stat(oid, &size, NULL);
+ if (r == -ENOENT) {
+ r = create(oid, false, m_snapc);
+ }
+ if (r < 0) {
+ return r;
+ }
+
+ if (size > 0) {
+ bufferlist inbl;
+ r = read(oid, size, 0, &inbl, CEPH_NOSNAP, nullptr);
+ if (r < 0) {
+ return r;
+ }
+ auto iter = inbl.cbegin();
+ decode(tmap_header, iter);
+ decode(tmap, iter);
+ }
+
+ __u8 c;
+ std::string key;
+ bufferlist value;
+ auto iter = cmdbl.cbegin();
+ decode(c, iter);
+ decode(key, iter);
+
+ switch (c) {
+ case CEPH_OSD_TMAP_SET:
+ decode(value, iter);
+ tmap[key] = value;
+ break;
+ case CEPH_OSD_TMAP_RM:
+ r = tmap.erase(key);
+ if (r == 0) {
+ return -ENOENT;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ bufferlist out;
+ encode(tmap_header, out);
+ encode(tmap, out);
+ r = write_full(oid, out, m_snapc);
+ return r;
+}
+
+int TestIoCtxImpl::unwatch(uint64_t handle) {
+ if (m_client->is_blocklisted()) {
+ return -EBLOCKLISTED;
+ }
+
+ return m_client->get_watch_notify()->unwatch(m_client, handle);
+}
+
+int TestIoCtxImpl::watch(const std::string& o, uint64_t *handle,
+ librados::WatchCtx *ctx, librados::WatchCtx2 *ctx2) {
+ if (m_client->is_blocklisted()) {
+ return -EBLOCKLISTED;
+ }
+
+ return m_client->get_watch_notify()->watch(m_client, m_pool_id,
+ get_namespace(), o,
+ get_instance_id(), handle, ctx,
+ ctx2);
+}
+
+int TestIoCtxImpl::execute_operation(const std::string& oid,
+ const Operation &operation) {
+ if (m_client->is_blocklisted()) {
+ return -EBLOCKLISTED;
+ }
+
+ TestRadosClient::Transaction transaction(m_client, get_namespace(), oid);
+ return operation(this, oid);
+}
+
+int TestIoCtxImpl::execute_aio_operations(const std::string& oid,
+ TestObjectOperationImpl *ops,
+ bufferlist *pbl, uint64_t snap_id,
+ const SnapContext &snapc,
+ uint64_t* objver) {
+ int ret = 0;
+ if (m_client->is_blocklisted()) {
+ ret = -EBLOCKLISTED;
+ } else {
+ TestRadosClient::Transaction transaction(m_client, get_namespace(), oid);
+ for (ObjectOperations::iterator it = ops->ops.begin();
+ it != ops->ops.end(); ++it) {
+ ret = (*it)(this, oid, pbl, snap_id, snapc, objver);
+ if (ret < 0) {
+ break;
+ }
+ }
+ }
+ m_pending_ops--;
+ ops->put();
+ return ret;
+}
+
+void TestIoCtxImpl::handle_aio_notify_complete(AioCompletionImpl *c, int r) {
+ m_pending_ops--;
+
+ m_client->finish_aio_completion(c, r);
+}
+
+} // namespace librados
diff --git a/src/test/librados_test_stub/TestIoCtxImpl.h b/src/test/librados_test_stub/TestIoCtxImpl.h
new file mode 100644
index 000000000..fdda17cfd
--- /dev/null
+++ b/src/test/librados_test_stub/TestIoCtxImpl.h
@@ -0,0 +1,221 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#ifndef CEPH_TEST_IO_CTX_IMPL_H
+#define CEPH_TEST_IO_CTX_IMPL_H
+
+#include <list>
+#include <atomic>
+
+#include <boost/function.hpp>
+
+#include "include/rados/librados.hpp"
+#include "include/Context.h"
+#include "common/snap_types.h"
+
+namespace librados {
+
+class TestClassHandler;
+class TestIoCtxImpl;
+class TestRadosClient;
+
+typedef boost::function<int(TestIoCtxImpl*,
+ const std::string&,
+ bufferlist *,
+ uint64_t,
+ const SnapContext &,
+ uint64_t*)> ObjectOperationTestImpl;
+typedef std::list<ObjectOperationTestImpl> ObjectOperations;
+
+struct TestObjectOperationImpl {
+public:
+ void get();
+ void put();
+
+ ObjectOperations ops;
+private:
+ std::atomic<uint64_t> m_refcount = { 0 };
+};
+
+class TestIoCtxImpl {
+public:
+ typedef boost::function<int(TestIoCtxImpl *, const std::string &)> Operation;
+
+
+ TestIoCtxImpl();
+ explicit TestIoCtxImpl(TestRadosClient *client, int64_t m_pool_id,
+ const std::string& pool_name);
+
+ TestRadosClient *get_rados_client() {
+ return m_client;
+ }
+
+ void get();
+ void put();
+
+ inline int64_t get_pool_id() const {
+ return m_pool_id;
+ }
+
+ virtual TestIoCtxImpl *clone() = 0;
+
+ virtual uint64_t get_instance_id() const;
+ virtual int64_t get_id();
+ virtual uint64_t get_last_version();
+ virtual std::string get_pool_name();
+
+ inline void set_namespace(const std::string& namespace_name) {
+ m_namespace_name = namespace_name;
+ }
+ inline std::string get_namespace() const {
+ return m_namespace_name;
+ }
+
+ snap_t get_snap_read() const {
+ return m_snap_seq;
+ }
+
+ inline void set_snap_context(const SnapContext& snapc) {
+ m_snapc = snapc;
+ }
+ const SnapContext &get_snap_context() const {
+ return m_snapc;
+ }
+
+ virtual int aio_flush();
+ virtual void aio_flush_async(AioCompletionImpl *c);
+ virtual void aio_notify(const std::string& oid, AioCompletionImpl *c,
+ bufferlist& bl, uint64_t timeout_ms, bufferlist *pbl);
+ virtual int aio_operate(const std::string& oid, TestObjectOperationImpl &ops,
+ AioCompletionImpl *c, SnapContext *snap_context,
+ const ceph::real_time *pmtime, int flags);
+ virtual int aio_operate_read(const std::string& oid, TestObjectOperationImpl &ops,
+ AioCompletionImpl *c, int flags,
+ bufferlist *pbl, uint64_t snap_id,
+ uint64_t* objver);
+ virtual int aio_remove(const std::string& oid, AioCompletionImpl *c,
+ int flags = 0) = 0;
+ virtual int aio_watch(const std::string& o, AioCompletionImpl *c,
+ uint64_t *handle, librados::WatchCtx2 *ctx);
+ virtual int aio_unwatch(uint64_t handle, AioCompletionImpl *c);
+ virtual int append(const std::string& oid, const bufferlist &bl,
+ const SnapContext &snapc) = 0;
+ virtual int assert_exists(const std::string &oid, uint64_t snap_id) = 0;
+ virtual int assert_version(const std::string &oid, uint64_t ver) = 0;
+
+ virtual int create(const std::string& oid, bool exclusive,
+ const SnapContext &snapc) = 0;
+ virtual int exec(const std::string& oid, TestClassHandler *handler,
+ const char *cls, const char *method,
+ bufferlist& inbl, bufferlist* outbl,
+ uint64_t snap_id, const SnapContext &snapc);
+ virtual int list_snaps(const std::string& o, snap_set_t *out_snaps) = 0;
+ virtual int list_watchers(const std::string& o,
+ std::list<obj_watch_t> *out_watchers);
+ virtual int notify(const std::string& o, bufferlist& bl,
+ uint64_t timeout_ms, bufferlist *pbl);
+ virtual void notify_ack(const std::string& o, uint64_t notify_id,
+ uint64_t handle, bufferlist& bl);
+ virtual int omap_get_vals(const std::string& oid,
+ const std::string& start_after,
+ const std::string &filter_prefix,
+ uint64_t max_return,
+ std::map<std::string, bufferlist> *out_vals) = 0;
+ virtual int omap_get_vals2(const std::string& oid,
+ const std::string& start_after,
+ const std::string &filter_prefix,
+ uint64_t max_return,
+ std::map<std::string, bufferlist> *out_vals,
+ bool *pmore) = 0;
+ virtual int omap_rm_keys(const std::string& oid,
+ const std::set<std::string>& keys) = 0;
+ virtual int omap_set(const std::string& oid,
+ const std::map<std::string, bufferlist> &map) = 0;
+ virtual int operate(const std::string& oid, TestObjectOperationImpl &ops);
+ virtual int operate_read(const std::string& oid, TestObjectOperationImpl &ops,
+ bufferlist *pbl);
+ virtual int read(const std::string& oid, size_t len, uint64_t off,
+ bufferlist *bl, uint64_t snap_id, uint64_t* objver) = 0;
+ virtual int remove(const std::string& oid, const SnapContext &snapc) = 0;
+ virtual int selfmanaged_snap_create(uint64_t *snapid) = 0;
+ virtual void aio_selfmanaged_snap_create(uint64_t *snapid,
+ AioCompletionImpl *c);
+ virtual int selfmanaged_snap_remove(uint64_t snapid) = 0;
+ virtual void aio_selfmanaged_snap_remove(uint64_t snapid,
+ AioCompletionImpl *c);
+ virtual int selfmanaged_snap_rollback(const std::string& oid,
+ uint64_t snapid) = 0;
+ virtual int selfmanaged_snap_set_write_ctx(snap_t seq,
+ std::vector<snap_t>& snaps);
+ virtual int set_alloc_hint(const std::string& oid,
+ uint64_t expected_object_size,
+ uint64_t expected_write_size,
+ uint32_t flags,
+ const SnapContext &snapc);
+ virtual void set_snap_read(snap_t seq);
+ virtual int sparse_read(const std::string& oid, uint64_t off, uint64_t len,
+ std::map<uint64_t,uint64_t> *m,
+ bufferlist *data_bl, uint64_t snap_id) = 0;
+ virtual int stat(const std::string& oid, uint64_t *psize, time_t *pmtime) = 0;
+ virtual int truncate(const std::string& oid, uint64_t size,
+ const SnapContext &snapc) = 0;
+ virtual int tmap_update(const std::string& oid, bufferlist& cmdbl);
+ virtual int unwatch(uint64_t handle);
+ virtual int watch(const std::string& o, uint64_t *handle,
+ librados::WatchCtx *ctx, librados::WatchCtx2 *ctx2);
+ virtual int write(const std::string& oid, bufferlist& bl, size_t len,
+ uint64_t off, const SnapContext &snapc) = 0;
+ virtual int write_full(const std::string& oid, bufferlist& bl,
+ const SnapContext &snapc) = 0;
+ virtual int writesame(const std::string& oid, bufferlist& bl, size_t len,
+ uint64_t off, const SnapContext &snapc) = 0;
+ virtual int cmpext(const std::string& oid, uint64_t off, bufferlist& cmp_bl,
+ uint64_t snap_id) = 0;
+ virtual int xattr_get(const std::string& oid,
+ std::map<std::string, bufferlist>* attrset) = 0;
+ virtual int xattr_set(const std::string& oid, const std::string &name,
+ bufferlist& bl) = 0;
+ virtual int zero(const std::string& oid, uint64_t off, uint64_t len,
+ const SnapContext &snapc) = 0;
+
+ int execute_operation(const std::string& oid,
+ const Operation &operation);
+
+protected:
+ TestIoCtxImpl(const TestIoCtxImpl& rhs);
+ virtual ~TestIoCtxImpl();
+
+ int execute_aio_operations(const std::string& oid,
+ TestObjectOperationImpl *ops,
+ bufferlist *pbl, uint64_t,
+ const SnapContext &snapc,
+ uint64_t* objver);
+
+private:
+ struct C_AioNotify : public Context {
+ TestIoCtxImpl *io_ctx;
+ AioCompletionImpl *aio_comp;
+ C_AioNotify(TestIoCtxImpl *_io_ctx, AioCompletionImpl *_aio_comp)
+ : io_ctx(_io_ctx), aio_comp(_aio_comp) {
+ }
+ void finish(int r) override {
+ io_ctx->handle_aio_notify_complete(aio_comp, r);
+ }
+ };
+
+ TestRadosClient *m_client;
+ int64_t m_pool_id = 0;
+ std::string m_pool_name;
+ std::string m_namespace_name;
+
+ snap_t m_snap_seq = 0;
+ SnapContext m_snapc;
+ std::atomic<uint64_t> m_refcount = { 0 };
+ std::atomic<uint64_t> m_pending_ops = { 0 };
+
+ void handle_aio_notify_complete(AioCompletionImpl *aio_comp, int r);
+};
+
+} // namespace librados
+
+#endif // CEPH_TEST_IO_CTX_IMPL_H
diff --git a/src/test/librados_test_stub/TestMemCluster.cc b/src/test/librados_test_stub/TestMemCluster.cc
new file mode 100644
index 000000000..f0995788b
--- /dev/null
+++ b/src/test/librados_test_stub/TestMemCluster.cc
@@ -0,0 +1,203 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#include "test/librados_test_stub/TestMemCluster.h"
+#include "test/librados_test_stub/TestMemRadosClient.h"
+
+namespace librados {
+
+TestMemCluster::File::File()
+ : objver(0), snap_id(), exists(true) {
+}
+
+TestMemCluster::File::File(const File &rhs)
+ : data(rhs.data),
+ mtime(rhs.mtime),
+ objver(rhs.objver),
+ snap_id(rhs.snap_id),
+ exists(rhs.exists) {
+}
+
+TestMemCluster::Pool::Pool() = default;
+
+TestMemCluster::TestMemCluster()
+ : m_next_nonce(static_cast<uint32_t>(reinterpret_cast<uint64_t>(this))) {
+}
+
+TestMemCluster::~TestMemCluster() {
+ for (auto pool_pair : m_pools) {
+ pool_pair.second->put();
+ }
+}
+
+TestRadosClient *TestMemCluster::create_rados_client(CephContext *cct) {
+ return new TestMemRadosClient(cct, this);
+}
+
+int TestMemCluster::register_object_handler(int64_t pool_id,
+ const ObjectLocator& locator,
+ ObjectHandler* object_handler) {
+ std::lock_guard locker{m_lock};
+ auto pool = get_pool(m_lock, pool_id);
+ if (pool == nullptr) {
+ return -ENOENT;
+ }
+
+ std::unique_lock pool_locker{pool->file_lock};
+ auto file_it = pool->files.find(locator);
+ if (file_it == pool->files.end()) {
+ return -ENOENT;
+ }
+
+ auto& object_handlers = pool->file_handlers[locator];
+ auto it = object_handlers.find(object_handler);
+ ceph_assert(it == object_handlers.end());
+
+ object_handlers.insert(object_handler);
+ return 0;
+}
+
+void TestMemCluster::unregister_object_handler(int64_t pool_id,
+ const ObjectLocator& locator,
+ ObjectHandler* object_handler) {
+ std::lock_guard locker{m_lock};
+ auto pool = get_pool(m_lock, pool_id);
+ if (pool == nullptr) {
+ return;
+ }
+
+ std::unique_lock pool_locker{pool->file_lock};
+ auto handlers_it = pool->file_handlers.find(locator);
+ if (handlers_it == pool->file_handlers.end()) {
+ return;
+ }
+
+ auto& object_handlers = handlers_it->second;
+ object_handlers.erase(object_handler);
+}
+
+int TestMemCluster::pool_create(const std::string &pool_name) {
+ std::lock_guard locker{m_lock};
+ if (m_pools.find(pool_name) != m_pools.end()) {
+ return -EEXIST;
+ }
+ Pool *pool = new Pool();
+ pool->pool_id = ++m_pool_id;
+ m_pools[pool_name] = pool;
+ return 0;
+}
+
+int TestMemCluster::pool_delete(const std::string &pool_name) {
+ std::lock_guard locker{m_lock};
+ Pools::iterator iter = m_pools.find(pool_name);
+ if (iter == m_pools.end()) {
+ return -ENOENT;
+ }
+ iter->second->put();
+ m_pools.erase(iter);
+ return 0;
+}
+
+int TestMemCluster::pool_get_base_tier(int64_t pool_id, int64_t* base_tier) {
+ // TODO
+ *base_tier = pool_id;
+ return 0;
+}
+
+int TestMemCluster::pool_list(std::list<std::pair<int64_t, std::string> >& v) {
+ std::lock_guard locker{m_lock};
+ v.clear();
+ for (Pools::iterator iter = m_pools.begin(); iter != m_pools.end(); ++iter) {
+ v.push_back(std::make_pair(iter->second->pool_id, iter->first));
+ }
+ return 0;
+}
+
+int64_t TestMemCluster::pool_lookup(const std::string &pool_name) {
+ std::lock_guard locker{m_lock};
+ Pools::iterator iter = m_pools.find(pool_name);
+ if (iter == m_pools.end()) {
+ return -ENOENT;
+ }
+ return iter->second->pool_id;
+}
+
+int TestMemCluster::pool_reverse_lookup(int64_t id, std::string *name) {
+ std::lock_guard locker{m_lock};
+ for (Pools::iterator iter = m_pools.begin(); iter != m_pools.end(); ++iter) {
+ if (iter->second->pool_id == id) {
+ *name = iter->first;
+ return 0;
+ }
+ }
+ return -ENOENT;
+}
+
+TestMemCluster::Pool *TestMemCluster::get_pool(int64_t pool_id) {
+ std::lock_guard locker{m_lock};
+ return get_pool(m_lock, pool_id);
+}
+
+TestMemCluster::Pool *TestMemCluster::get_pool(const ceph::mutex& lock,
+ int64_t pool_id) {
+ for (auto &pool_pair : m_pools) {
+ if (pool_pair.second->pool_id == pool_id) {
+ return pool_pair.second;
+ }
+ }
+ return nullptr;
+}
+
+TestMemCluster::Pool *TestMemCluster::get_pool(const std::string &pool_name) {
+ std::lock_guard locker{m_lock};
+ Pools::iterator iter = m_pools.find(pool_name);
+ if (iter != m_pools.end()) {
+ return iter->second;
+ }
+ return nullptr;
+}
+
+void TestMemCluster::allocate_client(uint32_t *nonce, uint64_t *global_id) {
+ std::lock_guard locker{m_lock};
+ *nonce = m_next_nonce++;
+ *global_id = m_next_global_id++;
+}
+
+void TestMemCluster::deallocate_client(uint32_t nonce) {
+ std::lock_guard locker{m_lock};
+ m_blocklist.erase(nonce);
+}
+
+bool TestMemCluster::is_blocklisted(uint32_t nonce) const {
+ std::lock_guard locker{m_lock};
+ return (m_blocklist.find(nonce) != m_blocklist.end());
+}
+
+void TestMemCluster::blocklist(uint32_t nonce) {
+ {
+ std::lock_guard locker{m_lock};
+ m_blocklist.insert(nonce);
+ }
+
+ // after blocklisting the client, disconnect and drop its watches
+ m_watch_notify.blocklist(nonce);
+}
+
+void TestMemCluster::transaction_start(const ObjectLocator& locator) {
+ std::unique_lock locker{m_lock};
+ m_transaction_cond.wait(locker, [&locator, this] {
+ return m_transactions.count(locator) == 0;
+ });
+ auto result = m_transactions.insert(locator);
+ ceph_assert(result.second);
+}
+
+void TestMemCluster::transaction_finish(const ObjectLocator& locator) {
+ std::lock_guard locker{m_lock};
+ size_t count = m_transactions.erase(locator);
+ ceph_assert(count == 1);
+ m_transaction_cond.notify_all();
+}
+
+} // namespace librados
+
diff --git a/src/test/librados_test_stub/TestMemCluster.h b/src/test/librados_test_stub/TestMemCluster.h
new file mode 100644
index 000000000..2e80bff17
--- /dev/null
+++ b/src/test/librados_test_stub/TestMemCluster.h
@@ -0,0 +1,124 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#ifndef CEPH_TEST_MEM_CLUSTER_H
+#define CEPH_TEST_MEM_CLUSTER_H
+
+#include "test/librados_test_stub/TestCluster.h"
+#include "include/buffer.h"
+#include "include/interval_set.h"
+#include "include/int_types.h"
+#include "common/ceph_mutex.h"
+#include "common/RefCountedObj.h"
+#include <boost/shared_ptr.hpp>
+#include <list>
+#include <map>
+#include <set>
+#include <string>
+
+namespace librados {
+
+class TestMemCluster : public TestCluster {
+public:
+ typedef std::map<std::string, bufferlist> OMap;
+ typedef std::map<ObjectLocator, OMap> FileOMaps;
+ typedef std::map<ObjectLocator, bufferlist> FileTMaps;
+ typedef std::map<std::string, bufferlist> XAttrs;
+ typedef std::map<ObjectLocator, XAttrs> FileXAttrs;
+ typedef std::set<ObjectHandler*> ObjectHandlers;
+ typedef std::map<ObjectLocator, ObjectHandlers> FileHandlers;
+
+ struct File {
+ File();
+ File(const File &rhs);
+
+ bufferlist data;
+ time_t mtime;
+ uint64_t objver;
+
+ uint64_t snap_id;
+ std::vector<uint64_t> snaps;
+ interval_set<uint64_t> snap_overlap;
+
+ bool exists;
+ ceph::shared_mutex lock =
+ ceph::make_shared_mutex("TestMemCluster::File::lock");
+ };
+ typedef boost::shared_ptr<File> SharedFile;
+
+ typedef std::list<SharedFile> FileSnapshots;
+ typedef std::map<ObjectLocator, FileSnapshots> Files;
+
+ typedef std::set<uint64_t> SnapSeqs;
+ struct Pool : public RefCountedObject {
+ Pool();
+
+ int64_t pool_id = 0;
+
+ SnapSeqs snap_seqs;
+ uint64_t snap_id = 1;
+
+ ceph::shared_mutex file_lock =
+ ceph::make_shared_mutex("TestMemCluster::Pool::file_lock");
+ Files files;
+ FileOMaps file_omaps;
+ FileTMaps file_tmaps;
+ FileXAttrs file_xattrs;
+ FileHandlers file_handlers;
+ };
+
+ TestMemCluster();
+ ~TestMemCluster() override;
+
+ TestRadosClient *create_rados_client(CephContext *cct) override;
+
+ int register_object_handler(int64_t pool_id, const ObjectLocator& locator,
+ ObjectHandler* object_handler) override;
+ void unregister_object_handler(int64_t pool_id, const ObjectLocator& locator,
+ ObjectHandler* object_handler) override;
+
+ int pool_create(const std::string &pool_name);
+ int pool_delete(const std::string &pool_name);
+ int pool_get_base_tier(int64_t pool_id, int64_t* base_tier);
+ int pool_list(std::list<std::pair<int64_t, std::string> >& v);
+ int64_t pool_lookup(const std::string &name);
+ int pool_reverse_lookup(int64_t id, std::string *name);
+
+ Pool *get_pool(int64_t pool_id);
+ Pool *get_pool(const std::string &pool_name);
+
+ void allocate_client(uint32_t *nonce, uint64_t *global_id);
+ void deallocate_client(uint32_t nonce);
+
+ bool is_blocklisted(uint32_t nonce) const;
+ void blocklist(uint32_t nonce);
+
+ void transaction_start(const ObjectLocator& locator);
+ void transaction_finish(const ObjectLocator& locator);
+
+private:
+
+ typedef std::map<std::string, Pool*> Pools;
+ typedef std::set<uint32_t> Blocklist;
+
+ mutable ceph::mutex m_lock =
+ ceph::make_mutex("TestMemCluster::m_lock");
+
+ Pools m_pools;
+ int64_t m_pool_id = 0;
+
+ uint32_t m_next_nonce;
+ uint64_t m_next_global_id = 1234;
+
+ Blocklist m_blocklist;
+
+ ceph::condition_variable m_transaction_cond;
+ std::set<ObjectLocator> m_transactions;
+
+ Pool *get_pool(const ceph::mutex& lock, int64_t pool_id);
+
+};
+
+} // namespace librados
+
+#endif // CEPH_TEST_MEM_CLUSTER_H
diff --git a/src/test/librados_test_stub/TestMemIoCtxImpl.cc b/src/test/librados_test_stub/TestMemIoCtxImpl.cc
new file mode 100644
index 000000000..77ea14366
--- /dev/null
+++ b/src/test/librados_test_stub/TestMemIoCtxImpl.cc
@@ -0,0 +1,924 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#include "test/librados_test_stub/TestMemIoCtxImpl.h"
+#include "test/librados_test_stub/TestMemRadosClient.h"
+#include "common/Clock.h"
+#include "include/err.h"
+#include <functional>
+#include <boost/algorithm/string/predicate.hpp>
+#include <errno.h>
+#include <include/compat.h>
+
+#define dout_subsys ceph_subsys_rados
+#undef dout_prefix
+#define dout_prefix *_dout << "TestMemIoCtxImpl: " << this << " " << __func__ \
+ << ": " << oid << " "
+
+static void to_vector(const interval_set<uint64_t> &set,
+ std::vector<std::pair<uint64_t, uint64_t> > *vec) {
+ vec->clear();
+ for (interval_set<uint64_t>::const_iterator it = set.begin();
+ it != set.end(); ++it) {
+ vec->push_back(*it);
+ }
+}
+
+// see PrimaryLogPG::finish_extent_cmp()
+static int cmpext_compare(const bufferlist &bl, const bufferlist &read_bl) {
+ for (uint64_t idx = 0; idx < bl.length(); ++idx) {
+ char read_byte = (idx < read_bl.length() ? read_bl[idx] : 0);
+ if (bl[idx] != read_byte) {
+ return -MAX_ERRNO - idx;
+ }
+ }
+ return 0;
+}
+
+namespace librados {
+
+TestMemIoCtxImpl::TestMemIoCtxImpl() {
+}
+
+TestMemIoCtxImpl::TestMemIoCtxImpl(const TestMemIoCtxImpl& rhs)
+ : TestIoCtxImpl(rhs), m_client(rhs.m_client), m_pool(rhs.m_pool) {
+ m_pool->get();
+}
+
+TestMemIoCtxImpl::TestMemIoCtxImpl(TestMemRadosClient *client, int64_t pool_id,
+ const std::string& pool_name,
+ TestMemCluster::Pool *pool)
+ : TestIoCtxImpl(client, pool_id, pool_name), m_client(client),
+ m_pool(pool) {
+ m_pool->get();
+}
+
+TestMemIoCtxImpl::~TestMemIoCtxImpl() {
+ m_pool->put();
+}
+
+TestIoCtxImpl *TestMemIoCtxImpl::clone() {
+ return new TestMemIoCtxImpl(*this);
+}
+
+int TestMemIoCtxImpl::aio_remove(const std::string& oid, AioCompletionImpl *c, int flags) {
+ m_client->add_aio_operation(oid, true,
+ std::bind(&TestMemIoCtxImpl::remove, this, oid,
+ get_snap_context()),
+ c);
+ return 0;
+}
+
+int TestMemIoCtxImpl::append(const std::string& oid, const bufferlist &bl,
+ const SnapContext &snapc) {
+ if (get_snap_read() != CEPH_NOSNAP) {
+ return -EROFS;
+ } else if (m_client->is_blocklisted()) {
+ return -EBLOCKLISTED;
+ }
+
+ auto cct = m_client->cct();
+ ldout(cct, 20) << "length=" << bl.length() << ", snapc=" << snapc << dendl;
+
+ TestMemCluster::SharedFile file;
+ {
+ std::unique_lock l{m_pool->file_lock};
+ file = get_file(oid, true, CEPH_NOSNAP, snapc);
+ }
+
+ std::unique_lock l{file->lock};
+ auto off = file->data.length();
+ ensure_minimum_length(off + bl.length(), &file->data);
+ file->data.begin(off).copy_in(bl.length(), bl);
+ return 0;
+}
+
+int TestMemIoCtxImpl::assert_exists(const std::string &oid, uint64_t snap_id) {
+ if (m_client->is_blocklisted()) {
+ return -EBLOCKLISTED;
+ }
+
+ std::shared_lock l{m_pool->file_lock};
+ TestMemCluster::SharedFile file = get_file(oid, false, snap_id, {});
+ if (file == NULL) {
+ return -ENOENT;
+ }
+ return 0;
+}
+
+int TestMemIoCtxImpl::assert_version(const std::string &oid, uint64_t ver) {
+ if (m_client->is_blocklisted()) {
+ return -EBLOCKLISTED;
+ }
+
+ std::shared_lock l{m_pool->file_lock};
+ TestMemCluster::SharedFile file = get_file(oid, false, CEPH_NOSNAP, {});
+ if (file == NULL || !file->exists) {
+ return -ENOENT;
+ }
+ if (ver < file->objver) {
+ return -ERANGE;
+ }
+ if (ver > file->objver) {
+ return -EOVERFLOW;
+ }
+
+ return 0;
+}
+
+int TestMemIoCtxImpl::create(const std::string& oid, bool exclusive,
+ const SnapContext &snapc) {
+ if (get_snap_read() != CEPH_NOSNAP) {
+ return -EROFS;
+ } else if (m_client->is_blocklisted()) {
+ return -EBLOCKLISTED;
+ }
+
+ auto cct = m_client->cct();
+ ldout(cct, 20) << "snapc=" << snapc << dendl;
+
+ std::unique_lock l{m_pool->file_lock};
+ if (exclusive) {
+ TestMemCluster::SharedFile file = get_file(oid, false, CEPH_NOSNAP, {});
+ if (file != NULL && file->exists) {
+ return -EEXIST;
+ }
+ }
+
+ get_file(oid, true, CEPH_NOSNAP, snapc);
+ return 0;
+}
+
+int TestMemIoCtxImpl::list_snaps(const std::string& oid, snap_set_t *out_snaps) {
+ auto cct = m_client->cct();
+ ldout(cct, 20) << dendl;
+
+ if (m_client->is_blocklisted()) {
+ return -EBLOCKLISTED;
+ }
+
+ out_snaps->seq = 0;
+ out_snaps->clones.clear();
+
+ std::shared_lock l{m_pool->file_lock};
+ TestMemCluster::Files::iterator it = m_pool->files.find(
+ {get_namespace(), oid});
+ if (it == m_pool->files.end()) {
+ return -ENOENT;
+ }
+
+ bool include_head = false;
+ TestMemCluster::FileSnapshots &file_snaps = it->second;
+ for (TestMemCluster::FileSnapshots::iterator s_it = file_snaps.begin();
+ s_it != file_snaps.end(); ++s_it) {
+ TestMemCluster::File &file = *s_it->get();
+
+ if (file_snaps.size() > 1) {
+ out_snaps->seq = file.snap_id;
+ TestMemCluster::FileSnapshots::iterator next_it(s_it);
+ ++next_it;
+ if (next_it == file_snaps.end()) {
+ include_head = true;
+ break;
+ }
+
+ ++out_snaps->seq;
+ if (!file.exists) {
+ continue;
+ }
+
+ // update the overlap with the next version's overlap metadata
+ TestMemCluster::File &next_file = *next_it->get();
+ interval_set<uint64_t> overlap;
+ if (next_file.exists) {
+ overlap = next_file.snap_overlap;
+ }
+
+ clone_info_t clone;
+ clone.cloneid = file.snap_id;
+ clone.snaps = file.snaps;
+ to_vector(overlap, &clone.overlap);
+ clone.size = file.data.length();
+ out_snaps->clones.push_back(clone);
+ }
+ }
+
+ if ((file_snaps.size() == 1 && file_snaps.back()->data.length() > 0) ||
+ include_head)
+ {
+ // Include the SNAP_HEAD
+ TestMemCluster::File &file = *file_snaps.back();
+ if (file.exists) {
+ std::shared_lock l2{file.lock};
+ if (out_snaps->seq == 0 && !include_head) {
+ out_snaps->seq = file.snap_id;
+ }
+ clone_info_t head_clone;
+ head_clone.cloneid = librados::SNAP_HEAD;
+ head_clone.size = file.data.length();
+ out_snaps->clones.push_back(head_clone);
+ }
+ }
+
+ ldout(cct, 20) << "seq=" << out_snaps->seq << ", "
+ << "clones=[";
+ bool first_clone = true;
+ for (auto& clone : out_snaps->clones) {
+ *_dout << "{"
+ << "cloneid=" << clone.cloneid << ", "
+ << "snaps=" << clone.snaps << ", "
+ << "overlap=" << clone.overlap << ", "
+ << "size=" << clone.size << "}";
+ if (!first_clone) {
+ *_dout << ", ";
+ } else {
+ first_clone = false;
+ }
+ }
+ *_dout << "]" << dendl;
+ return 0;
+
+}
+
+int TestMemIoCtxImpl::omap_get_vals2(const std::string& oid,
+ const std::string& start_after,
+ const std::string &filter_prefix,
+ uint64_t max_return,
+ std::map<std::string, bufferlist> *out_vals,
+ bool *pmore) {
+ if (out_vals == NULL) {
+ return -EINVAL;
+ } else if (m_client->is_blocklisted()) {
+ return -EBLOCKLISTED;
+ }
+
+ TestMemCluster::SharedFile file;
+ {
+ std::shared_lock l{m_pool->file_lock};
+ file = get_file(oid, false, CEPH_NOSNAP, {});
+ if (file == NULL) {
+ return -ENOENT;
+ }
+ }
+
+ out_vals->clear();
+
+ std::shared_lock l{file->lock};
+ TestMemCluster::FileOMaps::iterator o_it = m_pool->file_omaps.find(
+ {get_namespace(), oid});
+ if (o_it == m_pool->file_omaps.end()) {
+ if (pmore) {
+ *pmore = false;
+ }
+ return 0;
+ }
+
+ TestMemCluster::OMap &omap = o_it->second;
+ TestMemCluster::OMap::iterator it = omap.begin();
+ if (!start_after.empty()) {
+ it = omap.upper_bound(start_after);
+ }
+
+ while (it != omap.end() && max_return > 0) {
+ if (filter_prefix.empty() ||
+ boost::algorithm::starts_with(it->first, filter_prefix)) {
+ (*out_vals)[it->first] = it->second;
+ --max_return;
+ }
+ ++it;
+ }
+ if (pmore) {
+ *pmore = (it != omap.end());
+ }
+ return 0;
+}
+
+int TestMemIoCtxImpl::omap_get_vals(const std::string& oid,
+ const std::string& start_after,
+ const std::string &filter_prefix,
+ uint64_t max_return,
+ std::map<std::string, bufferlist> *out_vals) {
+ return omap_get_vals2(oid, start_after, filter_prefix, max_return, out_vals, nullptr);
+}
+
+int TestMemIoCtxImpl::omap_rm_keys(const std::string& oid,
+ const std::set<std::string>& keys) {
+ if (get_snap_read() != CEPH_NOSNAP) {
+ return -EROFS;
+ } else if (m_client->is_blocklisted()) {
+ return -EBLOCKLISTED;
+ }
+
+ TestMemCluster::SharedFile file;
+ {
+ std::unique_lock l{m_pool->file_lock};
+ file = get_file(oid, true, CEPH_NOSNAP, get_snap_context());
+ if (file == NULL) {
+ return -ENOENT;
+ }
+ }
+
+ std::unique_lock l{file->lock};
+ for (std::set<std::string>::iterator it = keys.begin();
+ it != keys.end(); ++it) {
+ m_pool->file_omaps[{get_namespace(), oid}].erase(*it);
+ }
+ return 0;
+}
+
+int TestMemIoCtxImpl::omap_set(const std::string& oid,
+ const std::map<std::string, bufferlist> &map) {
+ if (get_snap_read() != CEPH_NOSNAP) {
+ return -EROFS;
+ } else if (m_client->is_blocklisted()) {
+ return -EBLOCKLISTED;
+ }
+
+ TestMemCluster::SharedFile file;
+ {
+ std::unique_lock l{m_pool->file_lock};
+ file = get_file(oid, true, CEPH_NOSNAP, get_snap_context());
+ if (file == NULL) {
+ return -ENOENT;
+ }
+ }
+
+ std::unique_lock l{file->lock};
+ for (std::map<std::string, bufferlist>::const_iterator it = map.begin();
+ it != map.end(); ++it) {
+ bufferlist bl;
+ bl.append(it->second);
+ m_pool->file_omaps[{get_namespace(), oid}][it->first] = bl;
+ }
+
+ return 0;
+}
+
+int TestMemIoCtxImpl::read(const std::string& oid, size_t len, uint64_t off,
+ bufferlist *bl, uint64_t snap_id,
+ uint64_t* objver) {
+ if (m_client->is_blocklisted()) {
+ return -EBLOCKLISTED;
+ }
+
+ TestMemCluster::SharedFile file;
+ {
+ std::shared_lock l{m_pool->file_lock};
+ file = get_file(oid, false, snap_id, {});
+ if (file == NULL) {
+ return -ENOENT;
+ }
+ }
+
+ std::shared_lock l{file->lock};
+ if (len == 0) {
+ len = file->data.length();
+ }
+ len = clip_io(off, len, file->data.length());
+ if (bl != NULL && len > 0) {
+ bufferlist bit;
+ bit.substr_of(file->data, off, len);
+ append_clone(bit, bl);
+ }
+ if (objver != nullptr) {
+ *objver = file->objver;
+ }
+ return len;
+}
+
+int TestMemIoCtxImpl::remove(const std::string& oid, const SnapContext &snapc) {
+ if (get_snap_read() != CEPH_NOSNAP) {
+ return -EROFS;
+ } else if (m_client->is_blocklisted()) {
+ return -EBLOCKLISTED;
+ }
+
+ auto cct = m_client->cct();
+ ldout(cct, 20) << "snapc=" << snapc << dendl;
+
+ std::unique_lock l{m_pool->file_lock};
+ TestMemCluster::SharedFile file = get_file(oid, false, CEPH_NOSNAP, snapc);
+ if (file == NULL) {
+ return -ENOENT;
+ }
+ file = get_file(oid, true, CEPH_NOSNAP, snapc);
+
+ {
+ std::unique_lock l2{file->lock};
+ file->exists = false;
+ }
+
+ TestCluster::ObjectLocator locator(get_namespace(), oid);
+ TestMemCluster::Files::iterator it = m_pool->files.find(locator);
+ ceph_assert(it != m_pool->files.end());
+
+ if (*it->second.rbegin() == file) {
+ TestMemCluster::ObjectHandlers object_handlers;
+ std::swap(object_handlers, m_pool->file_handlers[locator]);
+ m_pool->file_handlers.erase(locator);
+
+ for (auto object_handler : object_handlers) {
+ object_handler->handle_removed(m_client);
+ }
+ }
+
+ if (it->second.size() == 1) {
+ m_pool->files.erase(it);
+ m_pool->file_omaps.erase(locator);
+ }
+ return 0;
+}
+
+int TestMemIoCtxImpl::selfmanaged_snap_create(uint64_t *snapid) {
+ if (m_client->is_blocklisted()) {
+ return -EBLOCKLISTED;
+ }
+
+ std::unique_lock l{m_pool->file_lock};
+ *snapid = ++m_pool->snap_id;
+ m_pool->snap_seqs.insert(*snapid);
+ return 0;
+}
+
+int TestMemIoCtxImpl::selfmanaged_snap_remove(uint64_t snapid) {
+ if (m_client->is_blocklisted()) {
+ return -EBLOCKLISTED;
+ }
+
+ std::unique_lock l{m_pool->file_lock};
+ TestMemCluster::SnapSeqs::iterator it =
+ m_pool->snap_seqs.find(snapid);
+ if (it == m_pool->snap_seqs.end()) {
+ return -ENOENT;
+ }
+
+ // TODO clean up all file snapshots
+ m_pool->snap_seqs.erase(it);
+ return 0;
+}
+
+int TestMemIoCtxImpl::selfmanaged_snap_rollback(const std::string& oid,
+ uint64_t snapid) {
+ if (m_client->is_blocklisted()) {
+ return -EBLOCKLISTED;
+ }
+
+ std::unique_lock l{m_pool->file_lock};
+
+ TestMemCluster::SharedFile file;
+ TestMemCluster::Files::iterator f_it = m_pool->files.find(
+ {get_namespace(), oid});
+ if (f_it == m_pool->files.end()) {
+ return 0;
+ }
+
+ TestMemCluster::FileSnapshots &snaps = f_it->second;
+ file = snaps.back();
+
+ size_t versions = 0;
+ for (TestMemCluster::FileSnapshots::reverse_iterator it = snaps.rbegin();
+ it != snaps.rend(); ++it) {
+ TestMemCluster::SharedFile file = *it;
+ if (file->snap_id < get_snap_read()) {
+ if (versions == 0) {
+ // already at the snapshot version
+ return 0;
+ } else if (file->snap_id == CEPH_NOSNAP) {
+ if (versions == 1) {
+ // delete it current HEAD, next one is correct version
+ snaps.erase(it.base());
+ } else {
+ // overwrite contents of current HEAD
+ file = TestMemCluster::SharedFile (new TestMemCluster::File(**it));
+ file->snap_id = CEPH_NOSNAP;
+ *it = file;
+ }
+ } else {
+ // create new head version
+ file = TestMemCluster::SharedFile (new TestMemCluster::File(**it));
+ file->snap_id = m_pool->snap_id;
+ snaps.push_back(file);
+ }
+ return 0;
+ }
+ ++versions;
+ }
+ return 0;
+}
+
+int TestMemIoCtxImpl::set_alloc_hint(const std::string& oid,
+ uint64_t expected_object_size,
+ uint64_t expected_write_size,
+ uint32_t flags,
+ const SnapContext &snapc) {
+ if (get_snap_read() != CEPH_NOSNAP) {
+ return -EROFS;
+ } else if (m_client->is_blocklisted()) {
+ return -EBLOCKLISTED;
+ }
+
+ {
+ std::unique_lock l{m_pool->file_lock};
+ get_file(oid, true, CEPH_NOSNAP, snapc);
+ }
+
+ return 0;
+}
+
+int TestMemIoCtxImpl::sparse_read(const std::string& oid, uint64_t off,
+ uint64_t len,
+ std::map<uint64_t,uint64_t> *m,
+ bufferlist *data_bl, uint64_t snap_id) {
+ if (m_client->is_blocklisted()) {
+ return -EBLOCKLISTED;
+ }
+
+ // TODO verify correctness
+ TestMemCluster::SharedFile file;
+ {
+ std::shared_lock l{m_pool->file_lock};
+ file = get_file(oid, false, snap_id, {});
+ if (file == NULL) {
+ return -ENOENT;
+ }
+ }
+
+ std::shared_lock l{file->lock};
+ len = clip_io(off, len, file->data.length());
+ // TODO support sparse read
+ if (m != NULL) {
+ m->clear();
+ if (len > 0) {
+ (*m)[off] = len;
+ }
+ }
+ if (data_bl != NULL && len > 0) {
+ bufferlist bit;
+ bit.substr_of(file->data, off, len);
+ append_clone(bit, data_bl);
+ }
+ return len > 0 ? 1 : 0;
+}
+
+int TestMemIoCtxImpl::stat(const std::string& oid, uint64_t *psize,
+ time_t *pmtime) {
+ if (m_client->is_blocklisted()) {
+ return -EBLOCKLISTED;
+ }
+
+ TestMemCluster::SharedFile file;
+ {
+ std::shared_lock l{m_pool->file_lock};
+ file = get_file(oid, false, CEPH_NOSNAP, {});
+ if (file == NULL) {
+ return -ENOENT;
+ }
+ }
+
+ std::shared_lock l{file->lock};
+ if (psize != NULL) {
+ *psize = file->data.length();
+ }
+ if (pmtime != NULL) {
+ *pmtime = file->mtime;
+ }
+ return 0;
+}
+
+int TestMemIoCtxImpl::truncate(const std::string& oid, uint64_t size,
+ const SnapContext &snapc) {
+ if (get_snap_read() != CEPH_NOSNAP) {
+ return -EROFS;
+ } else if (m_client->is_blocklisted()) {
+ return -EBLOCKLISTED;
+ }
+
+ auto cct = m_client->cct();
+ ldout(cct, 20) << "size=" << size << ", snapc=" << snapc << dendl;
+
+ TestMemCluster::SharedFile file;
+ {
+ std::unique_lock l{m_pool->file_lock};
+ file = get_file(oid, true, CEPH_NOSNAP, snapc);
+ }
+
+ std::unique_lock l{file->lock};
+ bufferlist bl(size);
+
+ interval_set<uint64_t> is;
+ if (file->data.length() > size) {
+ is.insert(size, file->data.length() - size);
+
+ bl.substr_of(file->data, 0, size);
+ file->data.swap(bl);
+ } else if (file->data.length() != size) {
+ if (size == 0) {
+ bl.clear();
+ } else {
+ is.insert(0, size);
+
+ bl.append_zero(size - file->data.length());
+ file->data.append(bl);
+ }
+ }
+ is.intersection_of(file->snap_overlap);
+ file->snap_overlap.subtract(is);
+ return 0;
+}
+
+int TestMemIoCtxImpl::write(const std::string& oid, bufferlist& bl, size_t len,
+ uint64_t off, const SnapContext &snapc) {
+ if (get_snap_read() != CEPH_NOSNAP) {
+ return -EROFS;
+ } else if (m_client->is_blocklisted()) {
+ return -EBLOCKLISTED;
+ }
+
+ auto cct = m_client->cct();
+ ldout(cct, 20) << "extent=" << off << "~" << len << ", snapc=" << snapc
+ << dendl;
+
+ TestMemCluster::SharedFile file;
+ {
+ std::unique_lock l{m_pool->file_lock};
+ file = get_file(oid, true, CEPH_NOSNAP, snapc);
+ }
+
+ std::unique_lock l{file->lock};
+ if (len > 0) {
+ interval_set<uint64_t> is;
+ is.insert(off, len);
+ is.intersection_of(file->snap_overlap);
+ file->snap_overlap.subtract(is);
+ }
+
+ ensure_minimum_length(off + len, &file->data);
+ file->data.begin(off).copy_in(len, bl);
+ return 0;
+}
+
+int TestMemIoCtxImpl::write_full(const std::string& oid, bufferlist& bl,
+ const SnapContext &snapc) {
+ if (get_snap_read() != CEPH_NOSNAP) {
+ return -EROFS;
+ } else if (m_client->is_blocklisted()) {
+ return -EBLOCKLISTED;
+ }
+
+ auto cct = m_client->cct();
+ ldout(cct, 20) << "length=" << bl.length() << ", snapc=" << snapc << dendl;
+
+ TestMemCluster::SharedFile file;
+ {
+ std::unique_lock l{m_pool->file_lock};
+ file = get_file(oid, true, CEPH_NOSNAP, snapc);
+ if (file == NULL) {
+ return -ENOENT;
+ }
+ }
+
+ std::unique_lock l{file->lock};
+ if (bl.length() > 0) {
+ interval_set<uint64_t> is;
+ is.insert(0, bl.length());
+ is.intersection_of(file->snap_overlap);
+ file->snap_overlap.subtract(is);
+ }
+
+ file->data.clear();
+ ensure_minimum_length(bl.length(), &file->data);
+ file->data.begin().copy_in(bl.length(), bl);
+ return 0;
+}
+
+int TestMemIoCtxImpl::writesame(const std::string& oid, bufferlist& bl,
+ size_t len, uint64_t off,
+ const SnapContext &snapc) {
+ if (get_snap_read() != CEPH_NOSNAP) {
+ return -EROFS;
+ } else if (m_client->is_blocklisted()) {
+ return -EBLOCKLISTED;
+ }
+
+ if (len == 0 || (len % bl.length())) {
+ return -EINVAL;
+ }
+
+ TestMemCluster::SharedFile file;
+ {
+ std::unique_lock l{m_pool->file_lock};
+ file = get_file(oid, true, CEPH_NOSNAP, snapc);
+ }
+
+ std::unique_lock l{file->lock};
+ if (len > 0) {
+ interval_set<uint64_t> is;
+ is.insert(off, len);
+ is.intersection_of(file->snap_overlap);
+ file->snap_overlap.subtract(is);
+ }
+
+ ensure_minimum_length(off + len, &file->data);
+ while (len > 0) {
+ file->data.begin(off).copy_in(bl.length(), bl);
+ off += bl.length();
+ len -= bl.length();
+ }
+ return 0;
+}
+
+int TestMemIoCtxImpl::cmpext(const std::string& oid, uint64_t off,
+ bufferlist& cmp_bl, uint64_t snap_id) {
+ if (m_client->is_blocklisted()) {
+ return -EBLOCKLISTED;
+ }
+
+ bufferlist read_bl;
+ uint64_t len = cmp_bl.length();
+
+ TestMemCluster::SharedFile file;
+ {
+ std::shared_lock l{m_pool->file_lock};
+ file = get_file(oid, false, snap_id, {});
+ if (file == NULL) {
+ return cmpext_compare(cmp_bl, read_bl);
+ }
+ }
+
+ std::shared_lock l{file->lock};
+ if (off >= file->data.length()) {
+ len = 0;
+ } else if (off + len > file->data.length()) {
+ len = file->data.length() - off;
+ }
+ read_bl.substr_of(file->data, off, len);
+ return cmpext_compare(cmp_bl, read_bl);
+}
+
+int TestMemIoCtxImpl::xattr_get(const std::string& oid,
+ std::map<std::string, bufferlist>* attrset) {
+ if (m_client->is_blocklisted()) {
+ return -EBLOCKLISTED;
+ }
+
+ TestMemCluster::SharedFile file;
+ std::shared_lock l{m_pool->file_lock};
+ TestMemCluster::FileXAttrs::iterator it = m_pool->file_xattrs.find(
+ {get_namespace(), oid});
+ if (it == m_pool->file_xattrs.end()) {
+ return -ENODATA;
+ }
+ *attrset = it->second;
+ return 0;
+}
+
+int TestMemIoCtxImpl::xattr_set(const std::string& oid, const std::string &name,
+ bufferlist& bl) {
+ if (m_client->is_blocklisted()) {
+ return -EBLOCKLISTED;
+ }
+
+ std::unique_lock l{m_pool->file_lock};
+ m_pool->file_xattrs[{get_namespace(), oid}][name] = bl;
+ return 0;
+}
+
+int TestMemIoCtxImpl::zero(const std::string& oid, uint64_t off, uint64_t len,
+ const SnapContext &snapc) {
+ if (m_client->is_blocklisted()) {
+ return -EBLOCKLISTED;
+ }
+
+ auto cct = m_client->cct();
+ ldout(cct, 20) << "extent=" << off << "~" << len << ", snapc=" << snapc
+ << dendl;
+
+ bool truncate_redirect = false;
+ TestMemCluster::SharedFile file;
+ {
+ std::unique_lock l{m_pool->file_lock};
+ file = get_file(oid, false, CEPH_NOSNAP, snapc);
+ if (!file) {
+ return 0;
+ }
+ file = get_file(oid, true, CEPH_NOSNAP, snapc);
+
+ std::shared_lock l2{file->lock};
+ if (len > 0 && off + len >= file->data.length()) {
+ // Zero -> Truncate logic embedded in OSD
+ truncate_redirect = true;
+ }
+ }
+ if (truncate_redirect) {
+ return truncate(oid, off, snapc);
+ }
+
+ bufferlist bl;
+ bl.append_zero(len);
+ return write(oid, bl, len, off, snapc);
+}
+
+void TestMemIoCtxImpl::append_clone(bufferlist& src, bufferlist* dest) {
+ // deep-copy the src to ensure our memory-based mock RADOS data cannot
+ // be modified by callers
+ if (src.length() > 0) {
+ bufferlist::iterator iter = src.begin();
+ buffer::ptr ptr;
+ iter.copy_deep(src.length(), ptr);
+ dest->append(ptr);
+ }
+}
+
+size_t TestMemIoCtxImpl::clip_io(size_t off, size_t len, size_t bl_len) {
+ if (off >= bl_len) {
+ len = 0;
+ } else if (off + len > bl_len) {
+ len = bl_len - off;
+ }
+ return len;
+}
+
+void TestMemIoCtxImpl::ensure_minimum_length(size_t len, bufferlist *bl) {
+ if (len > bl->length()) {
+ bufferptr ptr(buffer::create(len - bl->length()));
+ ptr.zero();
+ bl->append(ptr);
+ }
+}
+
+TestMemCluster::SharedFile TestMemIoCtxImpl::get_file(
+ const std::string &oid, bool write, uint64_t snap_id,
+ const SnapContext &snapc) {
+ ceph_assert(ceph_mutex_is_locked(m_pool->file_lock) ||
+ ceph_mutex_is_wlocked(m_pool->file_lock));
+ ceph_assert(!write || ceph_mutex_is_wlocked(m_pool->file_lock));
+
+ TestMemCluster::SharedFile file;
+ TestMemCluster::Files::iterator it = m_pool->files.find(
+ {get_namespace(), oid});
+ if (it != m_pool->files.end()) {
+ file = it->second.back();
+ } else if (!write) {
+ return TestMemCluster::SharedFile();
+ }
+
+ if (write) {
+ bool new_version = false;
+ if (!file || !file->exists) {
+ file = TestMemCluster::SharedFile(new TestMemCluster::File());
+ new_version = true;
+ } else {
+ if (!snapc.snaps.empty() && file->snap_id < snapc.seq) {
+ for (std::vector<snapid_t>::const_reverse_iterator seq_it =
+ snapc.snaps.rbegin();
+ seq_it != snapc.snaps.rend(); ++seq_it) {
+ if (*seq_it > file->snap_id && *seq_it <= snapc.seq) {
+ file->snaps.push_back(*seq_it);
+ }
+ }
+
+ bufferlist prev_data = file->data;
+ file = TestMemCluster::SharedFile(
+ new TestMemCluster::File(*file));
+ file->data.clear();
+ append_clone(prev_data, &file->data);
+ if (prev_data.length() > 0) {
+ file->snap_overlap.insert(0, prev_data.length());
+ }
+ new_version = true;
+ }
+ }
+
+ if (new_version) {
+ file->snap_id = snapc.seq;
+ file->mtime = ceph_clock_now().sec();
+ m_pool->files[{get_namespace(), oid}].push_back(file);
+ }
+
+ file->objver++;
+ return file;
+ }
+
+ if (snap_id == CEPH_NOSNAP) {
+ if (!file->exists) {
+ ceph_assert(it->second.size() > 1);
+ return TestMemCluster::SharedFile();
+ }
+ return file;
+ }
+
+ TestMemCluster::FileSnapshots &snaps = it->second;
+ for (TestMemCluster::FileSnapshots::reverse_iterator it = snaps.rbegin();
+ it != snaps.rend(); ++it) {
+ TestMemCluster::SharedFile file = *it;
+ if (file->snap_id < snap_id) {
+ if (!file->exists) {
+ return TestMemCluster::SharedFile();
+ }
+ return file;
+ }
+ }
+ return TestMemCluster::SharedFile();
+}
+
+} // namespace librados
diff --git a/src/test/librados_test_stub/TestMemIoCtxImpl.h b/src/test/librados_test_stub/TestMemIoCtxImpl.h
new file mode 100644
index 000000000..4706f46d2
--- /dev/null
+++ b/src/test/librados_test_stub/TestMemIoCtxImpl.h
@@ -0,0 +1,104 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#ifndef CEPH_TEST_MEM_IO_CTX_IMPL_H
+#define CEPH_TEST_MEM_IO_CTX_IMPL_H
+
+#include "test/librados_test_stub/TestIoCtxImpl.h"
+#include "test/librados_test_stub/TestMemCluster.h"
+
+namespace librados {
+
+class TestMemRadosClient;
+
+class TestMemIoCtxImpl : public TestIoCtxImpl {
+public:
+ TestMemIoCtxImpl();
+ TestMemIoCtxImpl(TestMemRadosClient *client, int64_t m_pool_id,
+ const std::string& pool_name,
+ TestMemCluster::Pool *pool);
+ ~TestMemIoCtxImpl() override;
+
+ TestIoCtxImpl *clone() override;
+
+ int aio_remove(const std::string& oid, AioCompletionImpl *c, int flags = 0) override;
+
+ int append(const std::string& oid, const bufferlist &bl,
+ const SnapContext &snapc) override;
+
+ int assert_exists(const std::string &oid, uint64_t snap_id) override;
+ int assert_version(const std::string &oid, uint64_t ver) override;
+
+ int create(const std::string& oid, bool exclusive,
+ const SnapContext &snapc) override;
+ int list_snaps(const std::string& o, snap_set_t *out_snaps) override;
+ int omap_get_vals(const std::string& oid,
+ const std::string& start_after,
+ const std::string &filter_prefix,
+ uint64_t max_return,
+ std::map<std::string, bufferlist> *out_vals) override;
+ int omap_get_vals2(const std::string& oid,
+ const std::string& start_after,
+ const std::string &filter_prefix,
+ uint64_t max_return,
+ std::map<std::string, bufferlist> *out_vals,
+ bool *pmore) override;
+ int omap_rm_keys(const std::string& oid,
+ const std::set<std::string>& keys) override;
+ int omap_set(const std::string& oid, const std::map<std::string,
+ bufferlist> &map) override;
+ int read(const std::string& oid, size_t len, uint64_t off,
+ bufferlist *bl, uint64_t snap_id, uint64_t* objver) override;
+ int remove(const std::string& oid, const SnapContext &snapc) override;
+ int selfmanaged_snap_create(uint64_t *snapid) override;
+ int selfmanaged_snap_remove(uint64_t snapid) override;
+ int selfmanaged_snap_rollback(const std::string& oid,
+ uint64_t snapid) override;
+ int set_alloc_hint(const std::string& oid, uint64_t expected_object_size,
+ uint64_t expected_write_size, uint32_t flags,
+ const SnapContext &snapc) override;
+ int sparse_read(const std::string& oid, uint64_t off, uint64_t len,
+ std::map<uint64_t,uint64_t> *m, bufferlist *data_bl,
+ uint64_t snap_id) override;
+ int stat(const std::string& oid, uint64_t *psize, time_t *pmtime) override;
+ int truncate(const std::string& oid, uint64_t size,
+ const SnapContext &snapc) override;
+ int write(const std::string& oid, bufferlist& bl, size_t len,
+ uint64_t off, const SnapContext &snapc) override;
+ int write_full(const std::string& oid, bufferlist& bl,
+ const SnapContext &snapc) override;
+ int writesame(const std::string& oid, bufferlist& bl, size_t len,
+ uint64_t off, const SnapContext &snapc) override;
+ int cmpext(const std::string& oid, uint64_t off, bufferlist& cmp_bl,
+ uint64_t snap_id) override;
+ int xattr_get(const std::string& oid,
+ std::map<std::string, bufferlist>* attrset) override;
+ int xattr_set(const std::string& oid, const std::string &name,
+ bufferlist& bl) override;
+ int zero(const std::string& oid, uint64_t off, uint64_t len,
+ const SnapContext &snapc) override;
+
+protected:
+ TestMemCluster::Pool *get_pool() {
+ return m_pool;
+ }
+
+private:
+ TestMemIoCtxImpl(const TestMemIoCtxImpl&);
+
+ TestMemRadosClient *m_client = nullptr;
+ TestMemCluster::Pool *m_pool = nullptr;
+
+ void append_clone(bufferlist& src, bufferlist* dest);
+ size_t clip_io(size_t off, size_t len, size_t bl_len);
+ void ensure_minimum_length(size_t len, bufferlist *bl);
+
+ TestMemCluster::SharedFile get_file(const std::string &oid, bool write,
+ uint64_t snap_id,
+ const SnapContext &snapc);
+
+};
+
+} // namespace librados
+
+#endif // CEPH_TEST_MEM_IO_CTX_IMPL_H
diff --git a/src/test/librados_test_stub/TestMemRadosClient.cc b/src/test/librados_test_stub/TestMemRadosClient.cc
new file mode 100644
index 000000000..37d45327c
--- /dev/null
+++ b/src/test/librados_test_stub/TestMemRadosClient.cc
@@ -0,0 +1,118 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#include "test/librados_test_stub/TestMemRadosClient.h"
+#include "test/librados_test_stub/TestMemCluster.h"
+#include "test/librados_test_stub/TestMemIoCtxImpl.h"
+#include <errno.h>
+#include <sstream>
+
+namespace librados {
+
+TestMemRadosClient::TestMemRadosClient(CephContext *cct,
+ TestMemCluster *test_mem_cluster)
+ : TestRadosClient(cct, test_mem_cluster->get_watch_notify()),
+ m_mem_cluster(test_mem_cluster) {
+ m_mem_cluster->allocate_client(&m_nonce, &m_global_id);
+}
+
+TestMemRadosClient::~TestMemRadosClient() {
+ m_mem_cluster->deallocate_client(m_nonce);
+}
+
+TestIoCtxImpl *TestMemRadosClient::create_ioctx(int64_t pool_id,
+ const std::string &pool_name) {
+ return new TestMemIoCtxImpl(this, pool_id, pool_name,
+ m_mem_cluster->get_pool(pool_name));
+}
+
+void TestMemRadosClient::object_list(int64_t pool_id,
+ std::list<librados::TestRadosClient::Object> *list) {
+ list->clear();
+
+ auto pool = m_mem_cluster->get_pool(pool_id);
+ if (pool != nullptr) {
+ std::shared_lock file_locker{pool->file_lock};
+ for (auto &file_pair : pool->files) {
+ Object obj;
+ obj.oid = file_pair.first.name;
+ list->push_back(obj);
+ }
+ }
+}
+
+int TestMemRadosClient::pool_create(const std::string &pool_name) {
+ if (is_blocklisted()) {
+ return -EBLOCKLISTED;
+ }
+ return m_mem_cluster->pool_create(pool_name);
+}
+
+int TestMemRadosClient::pool_delete(const std::string &pool_name) {
+ if (is_blocklisted()) {
+ return -EBLOCKLISTED;
+ }
+ return m_mem_cluster->pool_delete(pool_name);
+}
+
+int TestMemRadosClient::pool_get_base_tier(int64_t pool_id, int64_t* base_tier) {
+ // TODO
+ *base_tier = pool_id;
+ return 0;
+}
+
+int TestMemRadosClient::pool_list(std::list<std::pair<int64_t, std::string> >& v) {
+ return m_mem_cluster->pool_list(v);
+}
+
+int64_t TestMemRadosClient::pool_lookup(const std::string &pool_name) {
+ return m_mem_cluster->pool_lookup(pool_name);
+}
+
+int TestMemRadosClient::pool_reverse_lookup(int64_t id, std::string *name) {
+ return m_mem_cluster->pool_reverse_lookup(id, name);
+}
+
+int TestMemRadosClient::watch_flush() {
+ get_watch_notify()->flush(this);
+ return 0;
+}
+
+bool TestMemRadosClient::is_blocklisted() const {
+ return m_mem_cluster->is_blocklisted(m_nonce);
+}
+
+int TestMemRadosClient::blocklist_add(const std::string& client_address,
+ uint32_t expire_seconds) {
+ if (is_blocklisted()) {
+ return -EBLOCKLISTED;
+ }
+
+ // extract the nonce to use as a unique key to the client
+ auto idx = client_address.find("/");
+ if (idx == std::string::npos || idx + 1 >= client_address.size()) {
+ return -EINVAL;
+ }
+
+ std::stringstream nonce_ss(client_address.substr(idx + 1));
+ uint32_t nonce;
+ nonce_ss >> nonce;
+ if (!nonce_ss) {
+ return -EINVAL;
+ }
+
+ m_mem_cluster->blocklist(nonce);
+ return 0;
+}
+
+void TestMemRadosClient::transaction_start(const std::string& nspace,
+ const std::string &oid) {
+ m_mem_cluster->transaction_start({nspace, oid});
+}
+
+void TestMemRadosClient::transaction_finish(const std::string& nspace,
+ const std::string &oid) {
+ m_mem_cluster->transaction_finish({nspace, oid});
+}
+
+} // namespace librados
diff --git a/src/test/librados_test_stub/TestMemRadosClient.h b/src/test/librados_test_stub/TestMemRadosClient.h
new file mode 100644
index 000000000..ecd6fedb0
--- /dev/null
+++ b/src/test/librados_test_stub/TestMemRadosClient.h
@@ -0,0 +1,88 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#ifndef CEPH_TEST_MEM_RADOS_CLIENT_H
+#define CEPH_TEST_MEM_RADOS_CLIENT_H
+
+#include "test/librados_test_stub/TestRadosClient.h"
+#include "include/ceph_assert.h"
+#include <list>
+#include <string>
+
+namespace librados {
+
+class AioCompletionImpl;
+class TestMemCluster;
+
+class TestMemRadosClient : public TestRadosClient {
+public:
+ TestMemRadosClient(CephContext *cct, TestMemCluster *test_mem_cluster);
+ ~TestMemRadosClient() override;
+
+ TestIoCtxImpl *create_ioctx(int64_t pool_id,
+ const std::string &pool_name) override;
+
+ uint32_t get_nonce() override {
+ return m_nonce;
+ }
+ uint64_t get_instance_id() override {
+ return m_global_id;
+ }
+
+ int get_min_compatible_osd(int8_t* require_osd_release) override {
+ *require_osd_release = CEPH_RELEASE_OCTOPUS;
+ return 0;
+ }
+
+ int get_min_compatible_client(int8_t* min_compat_client,
+ int8_t* require_min_compat_client) override {
+ *min_compat_client = CEPH_RELEASE_MIMIC;
+ *require_min_compat_client = CEPH_RELEASE_MIMIC;
+ return 0;
+ }
+
+ void object_list(int64_t pool_id,
+ std::list<librados::TestRadosClient::Object> *list) override;
+
+ int service_daemon_register(const std::string& service,
+ const std::string& name,
+ const std::map<std::string,std::string>& metadata) override {
+ return 0;
+ }
+ int service_daemon_update_status(std::map<std::string,std::string>&& status) override {
+ return 0;
+ }
+
+ int pool_create(const std::string &pool_name) override;
+ int pool_delete(const std::string &pool_name) override;
+ int pool_get_base_tier(int64_t pool_id, int64_t* base_tier) override;
+ int pool_list(std::list<std::pair<int64_t, std::string> >& v) override;
+ int64_t pool_lookup(const std::string &name) override;
+ int pool_reverse_lookup(int64_t id, std::string *name) override;
+
+ int watch_flush() override;
+
+ bool is_blocklisted() const override;
+ int blocklist_add(const std::string& client_address,
+ uint32_t expire_seconds) override;
+protected:
+ TestMemCluster *get_mem_cluster() {
+ return m_mem_cluster;
+ }
+
+protected:
+ void transaction_start(const std::string& nspace,
+ const std::string &oid) override;
+ void transaction_finish(const std::string& nspace,
+ const std::string &oid) override;
+
+private:
+ TestMemCluster *m_mem_cluster;
+ uint32_t m_nonce;
+ uint64_t m_global_id;
+
+};
+
+} // namespace librados
+
+#endif // CEPH_TEST_MEM_RADOS_CLIENT_H
diff --git a/src/test/librados_test_stub/TestRadosClient.cc b/src/test/librados_test_stub/TestRadosClient.cc
new file mode 100644
index 000000000..1f49aba93
--- /dev/null
+++ b/src/test/librados_test_stub/TestRadosClient.cc
@@ -0,0 +1,311 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#include "test/librados_test_stub/TestRadosClient.h"
+#include "test/librados_test_stub/TestIoCtxImpl.h"
+#include "librados/AioCompletionImpl.h"
+#include "include/ceph_assert.h"
+#include "common/ceph_json.h"
+#include "common/Finisher.h"
+#include "common/async/context_pool.h"
+#include <boost/lexical_cast.hpp>
+#include <boost/thread.hpp>
+#include <errno.h>
+
+#include <atomic>
+#include <functional>
+#include <sstream>
+
+static int get_concurrency() {
+ int concurrency = 0;
+ char *env = getenv("LIBRADOS_CONCURRENCY");
+ if (env != NULL) {
+ concurrency = atoi(env);
+ }
+ if (concurrency == 0) {
+ concurrency = boost::thread::thread::hardware_concurrency();
+ }
+ if (concurrency == 0) {
+ concurrency = 1;
+ }
+ return concurrency;
+}
+
+using namespace std::placeholders;
+
+namespace librados {
+
+namespace {
+
+const char *config_keys[] = {
+ "librados_thread_count",
+ NULL
+};
+
+} // anonymous namespace
+
+static void finish_aio_completion(AioCompletionImpl *c, int r) {
+ c->lock.lock();
+ c->complete = true;
+ c->rval = r;
+ c->lock.unlock();
+
+ rados_callback_t cb_complete = c->callback_complete;
+ void *cb_complete_arg = c->callback_complete_arg;
+ if (cb_complete) {
+ cb_complete(c, cb_complete_arg);
+ }
+
+ rados_callback_t cb_safe = c->callback_safe;
+ void *cb_safe_arg = c->callback_safe_arg;
+ if (cb_safe) {
+ cb_safe(c, cb_safe_arg);
+ }
+
+ c->lock.lock();
+ c->callback_complete = NULL;
+ c->callback_safe = NULL;
+ c->cond.notify_all();
+ c->put_unlock();
+}
+
+class AioFunctionContext : public Context {
+public:
+ AioFunctionContext(const TestRadosClient::AioFunction &callback,
+ Finisher *finisher, AioCompletionImpl *c)
+ : m_callback(callback), m_finisher(finisher), m_comp(c)
+ {
+ if (m_comp != NULL) {
+ m_comp->get();
+ }
+ }
+
+ void finish(int r) override {
+ int ret = m_callback();
+ if (m_comp != NULL) {
+ if (m_finisher != NULL) {
+ m_finisher->queue(new LambdaContext(std::bind(
+ &finish_aio_completion, m_comp, ret)));
+ } else {
+ finish_aio_completion(m_comp, ret);
+ }
+ }
+ }
+private:
+ TestRadosClient::AioFunction m_callback;
+ Finisher *m_finisher;
+ AioCompletionImpl *m_comp;
+};
+
+TestRadosClient::TestRadosClient(CephContext *cct,
+ TestWatchNotify *watch_notify)
+ : m_cct(cct->get()), m_watch_notify(watch_notify),
+ m_aio_finisher(new Finisher(m_cct)),
+ m_io_context_pool(std::make_unique<ceph::async::io_context_pool>())
+{
+ get();
+
+ // simulate multiple OSDs
+ int concurrency = get_concurrency();
+ for (int i = 0; i < concurrency; ++i) {
+ m_finishers.push_back(new Finisher(m_cct));
+ m_finishers.back()->start();
+ }
+
+ // replicate AIO callback processing
+ m_aio_finisher->start();
+
+ // replicate neorados callback processing
+ m_cct->_conf.add_observer(this);
+ m_io_context_pool->start(m_cct->_conf.get_val<uint64_t>(
+ "librados_thread_count"));
+}
+
+TestRadosClient::~TestRadosClient() {
+ flush_aio_operations();
+
+ for (size_t i = 0; i < m_finishers.size(); ++i) {
+ m_finishers[i]->stop();
+ delete m_finishers[i];
+ }
+ m_aio_finisher->stop();
+ delete m_aio_finisher;
+
+ m_cct->_conf.remove_observer(this);
+ m_io_context_pool->stop();
+
+ m_cct->put();
+ m_cct = NULL;
+}
+
+boost::asio::io_context& TestRadosClient::get_io_context() {
+ return m_io_context_pool->get_io_context();
+}
+
+const char** TestRadosClient::get_tracked_conf_keys() const {
+ return config_keys;
+}
+
+void TestRadosClient::handle_conf_change(
+ const ConfigProxy& conf, const std::set<std::string> &changed) {
+ if (changed.count("librados_thread_count")) {
+ m_io_context_pool->stop();
+ m_io_context_pool->start(conf.get_val<std::uint64_t>(
+ "librados_thread_count"));
+ }
+}
+
+void TestRadosClient::get() {
+ m_refcount++;
+}
+
+void TestRadosClient::put() {
+ if (--m_refcount == 0) {
+ shutdown();
+ delete this;
+ }
+}
+
+CephContext *TestRadosClient::cct() {
+ return m_cct;
+}
+
+int TestRadosClient::connect() {
+ return 0;
+}
+
+void TestRadosClient::shutdown() {
+}
+
+int TestRadosClient::wait_for_latest_osdmap() {
+ return 0;
+}
+
+int TestRadosClient::mon_command(const std::vector<std::string>& cmd,
+ const bufferlist &inbl,
+ bufferlist *outbl, std::string *outs) {
+ for (std::vector<std::string>::const_iterator it = cmd.begin();
+ it != cmd.end(); ++it) {
+ JSONParser parser;
+ if (!parser.parse(it->c_str(), it->length())) {
+ return -EINVAL;
+ }
+
+ JSONObjIter j_it = parser.find("prefix");
+ if (j_it.end()) {
+ return -EINVAL;
+ }
+
+ if ((*j_it)->get_data() == "osd tier add") {
+ return 0;
+ } else if ((*j_it)->get_data() == "osd tier cache-mode") {
+ return 0;
+ } else if ((*j_it)->get_data() == "osd tier set-overlay") {
+ return 0;
+ } else if ((*j_it)->get_data() == "osd tier remove-overlay") {
+ return 0;
+ } else if ((*j_it)->get_data() == "osd tier remove") {
+ return 0;
+ } else if ((*j_it)->get_data() == "config-key rm") {
+ return 0;
+ } else if ((*j_it)->get_data() == "config set") {
+ return 0;
+ } else if ((*j_it)->get_data() == "df") {
+ std::stringstream str;
+ str << R"({"pools": [)";
+
+ std::list<std::pair<int64_t, std::string>> pools;
+ pool_list(pools);
+ for (auto& pool : pools) {
+ if (pools.begin()->first != pool.first) {
+ str << ",";
+ }
+ str << R"({"name": ")" << pool.second << R"(", "stats": )"
+ << R"({"percent_used": 1.0, "bytes_used": 0, "max_avail": 0}})";
+ }
+
+ str << "]}";
+ outbl->append(str.str());
+ return 0;
+ } else if ((*j_it)->get_data() == "osd blocklist") {
+ auto op_it = parser.find("blocklistop");
+ if (!op_it.end() && (*op_it)->get_data() == "add") {
+ uint32_t expire = 0;
+ auto expire_it = parser.find("expire");
+ if (!expire_it.end()) {
+ expire = boost::lexical_cast<uint32_t>((*expire_it)->get_data());
+ }
+
+ auto addr_it = parser.find("addr");
+ return blocklist_add((*addr_it)->get_data(), expire);
+ }
+ }
+ }
+ return -ENOSYS;
+}
+
+void TestRadosClient::add_aio_operation(const std::string& oid,
+ bool queue_callback,
+ const AioFunction &aio_function,
+ AioCompletionImpl *c) {
+ AioFunctionContext *ctx = new AioFunctionContext(
+ aio_function, queue_callback ? m_aio_finisher : NULL, c);
+ get_finisher(oid)->queue(ctx);
+}
+
+struct WaitForFlush {
+ int flushed() {
+ if (--count == 0) {
+ aio_finisher->queue(new LambdaContext(std::bind(
+ &finish_aio_completion, c, 0)));
+ delete this;
+ }
+ return 0;
+ }
+
+ std::atomic<int64_t> count = { 0 };
+ Finisher *aio_finisher;
+ AioCompletionImpl *c;
+};
+
+void TestRadosClient::flush_aio_operations() {
+ AioCompletionImpl *comp = new AioCompletionImpl();
+ flush_aio_operations(comp);
+ comp->wait_for_complete();
+ comp->put();
+}
+
+void TestRadosClient::flush_aio_operations(AioCompletionImpl *c) {
+ c->get();
+
+ WaitForFlush *wait_for_flush = new WaitForFlush();
+ wait_for_flush->count = m_finishers.size();
+ wait_for_flush->aio_finisher = m_aio_finisher;
+ wait_for_flush->c = c;
+
+ for (size_t i = 0; i < m_finishers.size(); ++i) {
+ AioFunctionContext *ctx = new AioFunctionContext(
+ std::bind(&WaitForFlush::flushed, wait_for_flush),
+ nullptr, nullptr);
+ m_finishers[i]->queue(ctx);
+ }
+}
+
+int TestRadosClient::aio_watch_flush(AioCompletionImpl *c) {
+ c->get();
+ Context *ctx = new LambdaContext(std::bind(
+ &TestRadosClient::finish_aio_completion, this, c, std::placeholders::_1));
+ get_watch_notify()->aio_flush(this, ctx);
+ return 0;
+}
+
+void TestRadosClient::finish_aio_completion(AioCompletionImpl *c, int r) {
+ librados::finish_aio_completion(c, r);
+}
+
+Finisher *TestRadosClient::get_finisher(const std::string &oid) {
+ std::size_t h = m_hash(oid);
+ return m_finishers[h % m_finishers.size()];
+}
+
+} // namespace librados
diff --git a/src/test/librados_test_stub/TestRadosClient.h b/src/test/librados_test_stub/TestRadosClient.h
new file mode 100644
index 000000000..e7f8d0751
--- /dev/null
+++ b/src/test/librados_test_stub/TestRadosClient.h
@@ -0,0 +1,162 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#ifndef CEPH_TEST_RADOS_CLIENT_H
+#define CEPH_TEST_RADOS_CLIENT_H
+
+#include <map>
+#include <memory>
+#include <list>
+#include <string>
+#include <vector>
+#include <atomic>
+
+#include <boost/function.hpp>
+#include <boost/functional/hash.hpp>
+
+#include "include/rados/librados.hpp"
+#include "common/config.h"
+#include "common/config_obs.h"
+#include "include/buffer_fwd.h"
+#include "test/librados_test_stub/TestWatchNotify.h"
+
+class Finisher;
+
+namespace boost { namespace asio { struct io_context; }}
+namespace ceph { namespace async { struct io_context_pool; }}
+
+namespace librados {
+
+class TestIoCtxImpl;
+
+class TestRadosClient : public md_config_obs_t {
+public:
+
+ static void Deallocate(librados::TestRadosClient* client)
+ {
+ client->put();
+ }
+
+ typedef boost::function<int()> AioFunction;
+
+ struct Object {
+ std::string oid;
+ std::string locator;
+ std::string nspace;
+ };
+
+ class Transaction {
+ public:
+ Transaction(TestRadosClient *rados_client, const std::string& nspace,
+ const std::string &oid)
+ : rados_client(rados_client), nspace(nspace), oid(oid) {
+ rados_client->transaction_start(nspace, oid);
+ }
+ ~Transaction() {
+ rados_client->transaction_finish(nspace, oid);
+ }
+ private:
+ TestRadosClient *rados_client;
+ std::string nspace;
+ std::string oid;
+ };
+
+ TestRadosClient(CephContext *cct, TestWatchNotify *watch_notify);
+
+ void get();
+ void put();
+
+ virtual CephContext *cct();
+
+ virtual uint32_t get_nonce() = 0;
+ virtual uint64_t get_instance_id() = 0;
+
+ virtual int get_min_compatible_osd(int8_t* require_osd_release) = 0;
+ virtual int get_min_compatible_client(int8_t* min_compat_client,
+ int8_t* require_min_compat_client) = 0;
+
+ virtual int connect();
+ virtual void shutdown();
+ virtual int wait_for_latest_osdmap();
+
+ virtual TestIoCtxImpl *create_ioctx(int64_t pool_id,
+ const std::string &pool_name) = 0;
+
+ virtual int mon_command(const std::vector<std::string>& cmd,
+ const bufferlist &inbl,
+ bufferlist *outbl, std::string *outs);
+
+ virtual void object_list(int64_t pool_id,
+ std::list<librados::TestRadosClient::Object> *list) = 0;
+
+ virtual int service_daemon_register(const std::string& service,
+ const std::string& name,
+ const std::map<std::string,std::string>& metadata) = 0;
+ virtual int service_daemon_update_status(std::map<std::string,std::string>&& status) = 0;
+
+ virtual int pool_create(const std::string &pool_name) = 0;
+ virtual int pool_delete(const std::string &pool_name) = 0;
+ virtual int pool_get_base_tier(int64_t pool_id, int64_t* base_tier) = 0;
+ virtual int pool_list(std::list<std::pair<int64_t, std::string> >& v) = 0;
+ virtual int64_t pool_lookup(const std::string &name) = 0;
+ virtual int pool_reverse_lookup(int64_t id, std::string *name) = 0;
+
+ virtual int aio_watch_flush(AioCompletionImpl *c);
+ virtual int watch_flush() = 0;
+
+ virtual bool is_blocklisted() const = 0;
+ virtual int blocklist_add(const std::string& client_address,
+ uint32_t expire_seconds) = 0;
+
+ virtual int wait_for_latest_osd_map() {
+ return 0;
+ }
+
+ Finisher *get_aio_finisher() {
+ return m_aio_finisher;
+ }
+ TestWatchNotify *get_watch_notify() {
+ return m_watch_notify;
+ }
+
+ void add_aio_operation(const std::string& oid, bool queue_callback,
+ const AioFunction &aio_function, AioCompletionImpl *c);
+ void flush_aio_operations();
+ void flush_aio_operations(AioCompletionImpl *c);
+
+ void finish_aio_completion(AioCompletionImpl *c, int r);
+
+ boost::asio::io_context& get_io_context();
+
+protected:
+ virtual ~TestRadosClient();
+
+ virtual void transaction_start(const std::string& nspace,
+ const std::string &oid) = 0;
+ virtual void transaction_finish(const std::string& nspace,
+ const std::string &oid) = 0;
+
+ const char** get_tracked_conf_keys() const override;
+ void handle_conf_change(const ConfigProxy& conf,
+ const std::set<std::string> &changed) override;
+
+private:
+ struct IOContextPool;
+
+ CephContext *m_cct;
+ std::atomic<uint64_t> m_refcount = { 0 };
+
+ TestWatchNotify *m_watch_notify;
+
+ Finisher *get_finisher(const std::string& oid);
+
+ Finisher *m_aio_finisher;
+ std::vector<Finisher *> m_finishers;
+ boost::hash<std::string> m_hash;
+
+ std::unique_ptr<ceph::async::io_context_pool> m_io_context_pool;
+};
+
+} // namespace librados
+
+#endif // CEPH_TEST_RADOS_CLIENT_H
diff --git a/src/test/librados_test_stub/TestWatchNotify.cc b/src/test/librados_test_stub/TestWatchNotify.cc
new file mode 100644
index 000000000..93875182c
--- /dev/null
+++ b/src/test/librados_test_stub/TestWatchNotify.cc
@@ -0,0 +1,459 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#include "test/librados_test_stub/TestWatchNotify.h"
+#include "include/Context.h"
+#include "common/Cond.h"
+#include "include/stringify.h"
+#include "common/Finisher.h"
+#include "test/librados_test_stub/TestCluster.h"
+#include "test/librados_test_stub/TestRadosClient.h"
+#include <boost/bind/bind.hpp>
+#include <boost/function.hpp>
+#include "include/ceph_assert.h"
+
+#define dout_subsys ceph_subsys_rados
+#undef dout_prefix
+#define dout_prefix *_dout << "TestWatchNotify::" << __func__ << ": "
+
+namespace librados {
+
+std::ostream& operator<<(std::ostream& out,
+ const TestWatchNotify::WatcherID &watcher_id) {
+ out << "(" << watcher_id.first << "," << watcher_id.second << ")";
+ return out;
+}
+
+struct TestWatchNotify::ObjectHandler : public TestCluster::ObjectHandler {
+ TestWatchNotify* test_watch_notify;
+ int64_t pool_id;
+ std::string nspace;
+ std::string oid;
+
+ ObjectHandler(TestWatchNotify* test_watch_notify, int64_t pool_id,
+ const std::string& nspace, const std::string& oid)
+ : test_watch_notify(test_watch_notify), pool_id(pool_id),
+ nspace(nspace), oid(oid) {
+ }
+
+ void handle_removed(TestRadosClient* test_rados_client) override {
+ // copy member variables since this object might be deleted
+ auto _test_watch_notify = test_watch_notify;
+ auto _pool_id = pool_id;
+ auto _nspace = nspace;
+ auto _oid = oid;
+ auto ctx = new LambdaContext([_test_watch_notify, _pool_id, _nspace, _oid](int r) {
+ _test_watch_notify->handle_object_removed(_pool_id, _nspace, _oid);
+ });
+ test_rados_client->get_aio_finisher()->queue(ctx);
+ }
+};
+
+TestWatchNotify::TestWatchNotify(TestCluster* test_cluster)
+ : m_test_cluster(test_cluster) {
+}
+
+void TestWatchNotify::flush(TestRadosClient *rados_client) {
+ CephContext *cct = rados_client->cct();
+
+ ldout(cct, 20) << "enter" << dendl;
+ // block until we know no additional async notify callbacks will occur
+ C_SaferCond ctx;
+ m_async_op_tracker.wait_for_ops(&ctx);
+ ctx.wait();
+}
+
+int TestWatchNotify::list_watchers(int64_t pool_id, const std::string& nspace,
+ const std::string& o,
+ std::list<obj_watch_t> *out_watchers) {
+ std::lock_guard lock{m_lock};
+ SharedWatcher watcher = get_watcher(pool_id, nspace, o);
+ if (!watcher) {
+ return -ENOENT;
+ }
+
+ out_watchers->clear();
+ for (TestWatchNotify::WatchHandles::iterator it =
+ watcher->watch_handles.begin();
+ it != watcher->watch_handles.end(); ++it) {
+ obj_watch_t obj;
+ strncpy(obj.addr, it->second.addr.c_str(), sizeof(obj.addr) - 1);
+ obj.addr[sizeof(obj.addr) - 1] = '\0';
+ obj.watcher_id = static_cast<int64_t>(it->second.gid);
+ obj.cookie = it->second.handle;
+ obj.timeout_seconds = 30;
+ out_watchers->push_back(obj);
+ }
+ return 0;
+}
+
+void TestWatchNotify::aio_flush(TestRadosClient *rados_client,
+ Context *on_finish) {
+ rados_client->get_aio_finisher()->queue(on_finish);
+}
+
+int TestWatchNotify::watch(TestRadosClient *rados_client, int64_t pool_id,
+ const std::string& nspace, const std::string& o,
+ uint64_t gid, uint64_t *handle,
+ librados::WatchCtx *ctx, librados::WatchCtx2 *ctx2) {
+ C_SaferCond cond;
+ aio_watch(rados_client, pool_id, nspace, o, gid, handle, ctx, ctx2, &cond);
+ return cond.wait();
+}
+
+void TestWatchNotify::aio_watch(TestRadosClient *rados_client, int64_t pool_id,
+ const std::string& nspace, const std::string& o,
+ uint64_t gid, uint64_t *handle,
+ librados::WatchCtx *watch_ctx,
+ librados::WatchCtx2 *watch_ctx2,
+ Context *on_finish) {
+ auto ctx = new LambdaContext([=, this](int) {
+ execute_watch(rados_client, pool_id, nspace, o, gid, handle, watch_ctx,
+ watch_ctx2, on_finish);
+ });
+ rados_client->get_aio_finisher()->queue(ctx);
+}
+
+int TestWatchNotify::unwatch(TestRadosClient *rados_client,
+ uint64_t handle) {
+ C_SaferCond ctx;
+ aio_unwatch(rados_client, handle, &ctx);
+ return ctx.wait();
+}
+
+void TestWatchNotify::aio_unwatch(TestRadosClient *rados_client,
+ uint64_t handle, Context *on_finish) {
+ auto ctx = new LambdaContext([this, rados_client, handle, on_finish](int) {
+ execute_unwatch(rados_client, handle, on_finish);
+ });
+ rados_client->get_aio_finisher()->queue(ctx);
+}
+
+void TestWatchNotify::aio_notify(TestRadosClient *rados_client, int64_t pool_id,
+ const std::string& nspace,
+ const std::string& oid, const bufferlist& bl,
+ uint64_t timeout_ms, bufferlist *pbl,
+ Context *on_notify) {
+ auto ctx = new LambdaContext([=, this](int) {
+ execute_notify(rados_client, pool_id, nspace, oid, bl, pbl, on_notify);
+ });
+ rados_client->get_aio_finisher()->queue(ctx);
+}
+
+int TestWatchNotify::notify(TestRadosClient *rados_client, int64_t pool_id,
+ const std::string& nspace, const std::string& oid,
+ bufferlist& bl, uint64_t timeout_ms,
+ bufferlist *pbl) {
+ C_SaferCond cond;
+ aio_notify(rados_client, pool_id, nspace, oid, bl, timeout_ms, pbl, &cond);
+ return cond.wait();
+}
+
+void TestWatchNotify::notify_ack(TestRadosClient *rados_client, int64_t pool_id,
+ const std::string& nspace,
+ const std::string& o, uint64_t notify_id,
+ uint64_t handle, uint64_t gid,
+ bufferlist& bl) {
+ CephContext *cct = rados_client->cct();
+ ldout(cct, 20) << "notify_id=" << notify_id << ", handle=" << handle
+ << ", gid=" << gid << dendl;
+ std::lock_guard lock{m_lock};
+ WatcherID watcher_id = std::make_pair(gid, handle);
+ ack_notify(rados_client, pool_id, nspace, o, notify_id, watcher_id, bl);
+ finish_notify(rados_client, pool_id, nspace, o, notify_id);
+}
+
+void TestWatchNotify::execute_watch(TestRadosClient *rados_client,
+ int64_t pool_id, const std::string& nspace,
+ const std::string& o, uint64_t gid,
+ uint64_t *handle, librados::WatchCtx *ctx,
+ librados::WatchCtx2 *ctx2,
+ Context* on_finish) {
+ CephContext *cct = rados_client->cct();
+
+ m_lock.lock();
+ SharedWatcher watcher = get_watcher(pool_id, nspace, o);
+ if (!watcher) {
+ m_lock.unlock();
+ on_finish->complete(-ENOENT);
+ return;
+ }
+
+ WatchHandle watch_handle;
+ watch_handle.rados_client = rados_client;
+ watch_handle.addr = "127.0.0.1:0/" + stringify(rados_client->get_nonce());
+ watch_handle.nonce = rados_client->get_nonce();
+ watch_handle.gid = gid;
+ watch_handle.handle = ++m_handle;
+ watch_handle.watch_ctx = ctx;
+ watch_handle.watch_ctx2 = ctx2;
+ watcher->watch_handles[watch_handle.handle] = watch_handle;
+
+ *handle = watch_handle.handle;
+
+ ldout(cct, 20) << "oid=" << o << ", gid=" << gid << ": handle=" << *handle
+ << dendl;
+ m_lock.unlock();
+
+ on_finish->complete(0);
+}
+
+void TestWatchNotify::execute_unwatch(TestRadosClient *rados_client,
+ uint64_t handle, Context* on_finish) {
+ CephContext *cct = rados_client->cct();
+
+ ldout(cct, 20) << "handle=" << handle << dendl;
+ {
+ std::lock_guard locker{m_lock};
+ for (FileWatchers::iterator it = m_file_watchers.begin();
+ it != m_file_watchers.end(); ++it) {
+ SharedWatcher watcher = it->second;
+
+ WatchHandles::iterator w_it = watcher->watch_handles.find(handle);
+ if (w_it != watcher->watch_handles.end()) {
+ watcher->watch_handles.erase(w_it);
+ maybe_remove_watcher(watcher);
+ break;
+ }
+ }
+ }
+ on_finish->complete(0);
+}
+
+TestWatchNotify::SharedWatcher TestWatchNotify::get_watcher(
+ int64_t pool_id, const std::string& nspace, const std::string& oid) {
+ ceph_assert(ceph_mutex_is_locked(m_lock));
+
+ auto it = m_file_watchers.find({pool_id, nspace, oid});
+ if (it == m_file_watchers.end()) {
+ SharedWatcher watcher(new Watcher(pool_id, nspace, oid));
+ watcher->object_handler.reset(new ObjectHandler(
+ this, pool_id, nspace, oid));
+ int r = m_test_cluster->register_object_handler(
+ pool_id, {nspace, oid}, watcher->object_handler.get());
+ if (r < 0) {
+ // object doesn't exist
+ return SharedWatcher();
+ }
+ m_file_watchers[{pool_id, nspace, oid}] = watcher;
+ return watcher;
+ }
+
+ return it->second;
+}
+
+void TestWatchNotify::maybe_remove_watcher(SharedWatcher watcher) {
+ ceph_assert(ceph_mutex_is_locked(m_lock));
+
+ // TODO
+ if (watcher->watch_handles.empty() && watcher->notify_handles.empty()) {
+ auto pool_id = watcher->pool_id;
+ auto& nspace = watcher->nspace;
+ auto& oid = watcher->oid;
+ if (watcher->object_handler) {
+ m_test_cluster->unregister_object_handler(pool_id, {nspace, oid},
+ watcher->object_handler.get());
+ watcher->object_handler.reset();
+ }
+
+ m_file_watchers.erase({pool_id, nspace, oid});
+ }
+}
+
+void TestWatchNotify::execute_notify(TestRadosClient *rados_client,
+ int64_t pool_id, const std::string& nspace,
+ const std::string &oid,
+ const bufferlist &bl, bufferlist *pbl,
+ Context *on_notify) {
+ CephContext *cct = rados_client->cct();
+
+ m_lock.lock();
+ uint64_t notify_id = ++m_notify_id;
+
+ SharedWatcher watcher = get_watcher(pool_id, nspace, oid);
+ if (!watcher) {
+ ldout(cct, 1) << "oid=" << oid << ": not found" << dendl;
+ m_lock.unlock();
+ on_notify->complete(-ENOENT);
+ return;
+ }
+
+ ldout(cct, 20) << "oid=" << oid << ": notify_id=" << notify_id << dendl;
+
+ SharedNotifyHandle notify_handle(new NotifyHandle());
+ notify_handle->rados_client = rados_client;
+ notify_handle->pbl = pbl;
+ notify_handle->on_notify = on_notify;
+
+ WatchHandles &watch_handles = watcher->watch_handles;
+ for (auto &watch_handle_pair : watch_handles) {
+ WatchHandle &watch_handle = watch_handle_pair.second;
+ notify_handle->pending_watcher_ids.insert(std::make_pair(
+ watch_handle.gid, watch_handle.handle));
+
+ m_async_op_tracker.start_op();
+ uint64_t notifier_id = rados_client->get_instance_id();
+ watch_handle.rados_client->get_aio_finisher()->queue(new LambdaContext(
+ [this, pool_id, nspace, oid, bl, notify_id, watch_handle, notifier_id](int r) {
+ bufferlist notify_bl;
+ notify_bl.append(bl);
+
+ if (watch_handle.watch_ctx2 != NULL) {
+ watch_handle.watch_ctx2->handle_notify(notify_id,
+ watch_handle.handle,
+ notifier_id, notify_bl);
+ } else if (watch_handle.watch_ctx != NULL) {
+ watch_handle.watch_ctx->notify(0, 0, notify_bl);
+
+ // auto ack old-style watch/notify clients
+ ack_notify(watch_handle.rados_client, pool_id, nspace, oid, notify_id,
+ {watch_handle.gid, watch_handle.handle}, bufferlist());
+ }
+
+ m_async_op_tracker.finish_op();
+ }));
+ }
+ watcher->notify_handles[notify_id] = notify_handle;
+
+ finish_notify(rados_client, pool_id, nspace, oid, notify_id);
+ m_lock.unlock();
+}
+
+void TestWatchNotify::ack_notify(TestRadosClient *rados_client, int64_t pool_id,
+ const std::string& nspace,
+ const std::string &oid, uint64_t notify_id,
+ const WatcherID &watcher_id,
+ const bufferlist &bl) {
+ CephContext *cct = rados_client->cct();
+
+ ceph_assert(ceph_mutex_is_locked(m_lock));
+ SharedWatcher watcher = get_watcher(pool_id, nspace, oid);
+ if (!watcher) {
+ ldout(cct, 1) << "oid=" << oid << ": not found" << dendl;
+ return;
+ }
+
+ NotifyHandles::iterator it = watcher->notify_handles.find(notify_id);
+ if (it == watcher->notify_handles.end()) {
+ ldout(cct, 1) << "oid=" << oid << ", notify_id=" << notify_id
+ << ", WatcherID=" << watcher_id << ": not found" << dendl;
+ return;
+ }
+
+ ldout(cct, 20) << "oid=" << oid << ", notify_id=" << notify_id
+ << ", WatcherID=" << watcher_id << dendl;
+
+ bufferlist response;
+ response.append(bl);
+
+ SharedNotifyHandle notify_handle = it->second;
+ notify_handle->notify_responses[watcher_id] = response;
+ notify_handle->pending_watcher_ids.erase(watcher_id);
+}
+
+void TestWatchNotify::finish_notify(TestRadosClient *rados_client,
+ int64_t pool_id, const std::string& nspace,
+ const std::string &oid,
+ uint64_t notify_id) {
+ CephContext *cct = rados_client->cct();
+
+ ldout(cct, 20) << "oid=" << oid << ", notify_id=" << notify_id << dendl;
+
+ ceph_assert(ceph_mutex_is_locked(m_lock));
+ SharedWatcher watcher = get_watcher(pool_id, nspace, oid);
+ if (!watcher) {
+ ldout(cct, 1) << "oid=" << oid << ": not found" << dendl;
+ return;
+ }
+
+ NotifyHandles::iterator it = watcher->notify_handles.find(notify_id);
+ if (it == watcher->notify_handles.end()) {
+ ldout(cct, 1) << "oid=" << oid << ", notify_id=" << notify_id
+ << ": not found" << dendl;
+ return;
+ }
+
+ SharedNotifyHandle notify_handle = it->second;
+ if (!notify_handle->pending_watcher_ids.empty()) {
+ ldout(cct, 10) << "oid=" << oid << ", notify_id=" << notify_id
+ << ": pending watchers, returning" << dendl;
+ return;
+ }
+
+ ldout(cct, 20) << "oid=" << oid << ", notify_id=" << notify_id
+ << ": completing" << dendl;
+
+ if (notify_handle->pbl != NULL) {
+ encode(notify_handle->notify_responses, *notify_handle->pbl);
+ encode(notify_handle->pending_watcher_ids, *notify_handle->pbl);
+ }
+
+ notify_handle->rados_client->get_aio_finisher()->queue(
+ notify_handle->on_notify, 0);
+ watcher->notify_handles.erase(notify_id);
+ maybe_remove_watcher(watcher);
+}
+
+void TestWatchNotify::blocklist(uint32_t nonce) {
+ std::lock_guard locker{m_lock};
+
+ for (auto file_it = m_file_watchers.begin();
+ file_it != m_file_watchers.end(); ) {
+ auto &watcher = file_it->second;
+ for (auto w_it = watcher->watch_handles.begin();
+ w_it != watcher->watch_handles.end();) {
+ auto& watch_handle = w_it->second;
+ if (watch_handle.nonce == nonce) {
+ auto handle = watch_handle.handle;
+ auto watch_ctx2 = watch_handle.watch_ctx2;
+ if (watch_ctx2 != nullptr) {
+ auto ctx = new LambdaContext([handle, watch_ctx2](int) {
+ watch_ctx2->handle_error(handle, -ENOTCONN);
+ });
+ watch_handle.rados_client->get_aio_finisher()->queue(ctx);
+ }
+ w_it = watcher->watch_handles.erase(w_it);
+ } else {
+ ++w_it;
+ }
+ }
+
+ ++file_it;
+ maybe_remove_watcher(watcher);
+ }
+}
+
+void TestWatchNotify::handle_object_removed(int64_t pool_id,
+ const std::string& nspace,
+ const std::string& oid) {
+ std::lock_guard locker{m_lock};
+ auto it = m_file_watchers.find({pool_id, nspace, oid});
+ if (it == m_file_watchers.end()) {
+ return;
+ }
+
+ auto watcher = it->second;
+
+ // cancel all in-flight notifications
+ for (auto& notify_handle_pair : watcher->notify_handles) {
+ auto notify_handle = notify_handle_pair.second;
+ notify_handle->rados_client->get_aio_finisher()->queue(
+ notify_handle->on_notify, -ENOENT);
+ }
+
+ // alert all watchers of the loss of connection
+ for (auto& watch_handle_pair : watcher->watch_handles) {
+ auto& watch_handle = watch_handle_pair.second;
+ auto handle = watch_handle.handle;
+ auto watch_ctx2 = watch_handle.watch_ctx2;
+ if (watch_ctx2 != nullptr) {
+ auto ctx = new LambdaContext([handle, watch_ctx2](int) {
+ watch_ctx2->handle_error(handle, -ENOTCONN);
+ });
+ watch_handle.rados_client->get_aio_finisher()->queue(ctx);
+ }
+ }
+ m_file_watchers.erase(it);
+}
+
+} // namespace librados
diff --git a/src/test/librados_test_stub/TestWatchNotify.h b/src/test/librados_test_stub/TestWatchNotify.h
new file mode 100644
index 000000000..bb973ea6b
--- /dev/null
+++ b/src/test/librados_test_stub/TestWatchNotify.h
@@ -0,0 +1,148 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#ifndef CEPH_TEST_WATCH_NOTIFY_H
+#define CEPH_TEST_WATCH_NOTIFY_H
+
+#include "include/rados/librados.hpp"
+#include "common/AsyncOpTracker.h"
+#include "common/ceph_mutex.h"
+#include <boost/noncopyable.hpp>
+#include <boost/shared_ptr.hpp>
+#include <list>
+#include <map>
+
+class Finisher;
+
+namespace librados {
+
+class TestCluster;
+class TestRadosClient;
+
+class TestWatchNotify : boost::noncopyable {
+public:
+ typedef std::pair<uint64_t, uint64_t> WatcherID;
+ typedef std::set<WatcherID> WatcherIDs;
+ typedef std::map<std::pair<uint64_t, uint64_t>, bufferlist> NotifyResponses;
+
+ struct NotifyHandle {
+ TestRadosClient *rados_client = nullptr;
+ WatcherIDs pending_watcher_ids;
+ NotifyResponses notify_responses;
+ bufferlist *pbl = nullptr;
+ Context *on_notify = nullptr;
+ };
+ typedef boost::shared_ptr<NotifyHandle> SharedNotifyHandle;
+ typedef std::map<uint64_t, SharedNotifyHandle> NotifyHandles;
+
+ struct WatchHandle {
+ TestRadosClient *rados_client = nullptr;
+ std::string addr;
+ uint32_t nonce;
+ uint64_t gid;
+ uint64_t handle;
+ librados::WatchCtx* watch_ctx;
+ librados::WatchCtx2* watch_ctx2;
+ };
+
+ typedef std::map<uint64_t, WatchHandle> WatchHandles;
+
+ struct ObjectHandler;
+ typedef boost::shared_ptr<ObjectHandler> SharedObjectHandler;
+
+ struct Watcher {
+ Watcher(int64_t pool_id, const std::string& nspace, const std::string& oid)
+ : pool_id(pool_id), nspace(nspace), oid(oid) {
+ }
+
+ int64_t pool_id;
+ std::string nspace;
+ std::string oid;
+
+ SharedObjectHandler object_handler;
+ WatchHandles watch_handles;
+ NotifyHandles notify_handles;
+ };
+ typedef boost::shared_ptr<Watcher> SharedWatcher;
+
+ TestWatchNotify(TestCluster* test_cluster);
+
+ int list_watchers(int64_t pool_id, const std::string& nspace,
+ const std::string& o, std::list<obj_watch_t> *out_watchers);
+
+ void aio_flush(TestRadosClient *rados_client, Context *on_finish);
+ void aio_watch(TestRadosClient *rados_client, int64_t pool_id,
+ const std::string& nspace, const std::string& o, uint64_t gid,
+ uint64_t *handle, librados::WatchCtx *watch_ctx,
+ librados::WatchCtx2 *watch_ctx2, Context *on_finish);
+ void aio_unwatch(TestRadosClient *rados_client, uint64_t handle,
+ Context *on_finish);
+ void aio_notify(TestRadosClient *rados_client, int64_t pool_id,
+ const std::string& nspace, const std::string& oid,
+ const bufferlist& bl, uint64_t timeout_ms, bufferlist *pbl,
+ Context *on_notify);
+
+ void flush(TestRadosClient *rados_client);
+ int notify(TestRadosClient *rados_client, int64_t pool_id,
+ const std::string& nspace, const std::string& o, bufferlist& bl,
+ uint64_t timeout_ms, bufferlist *pbl);
+ void notify_ack(TestRadosClient *rados_client, int64_t pool_id,
+ const std::string& nspace, const std::string& o,
+ uint64_t notify_id, uint64_t handle, uint64_t gid,
+ bufferlist& bl);
+
+ int watch(TestRadosClient *rados_client, int64_t pool_id,
+ const std::string& nspace, const std::string& o, uint64_t gid,
+ uint64_t *handle, librados::WatchCtx *ctx,
+ librados::WatchCtx2 *ctx2);
+ int unwatch(TestRadosClient *rados_client, uint64_t handle);
+
+ void blocklist(uint32_t nonce);
+
+private:
+ typedef std::tuple<int64_t, std::string, std::string> PoolFile;
+ typedef std::map<PoolFile, SharedWatcher> FileWatchers;
+
+ TestCluster *m_test_cluster;
+
+ uint64_t m_handle = 0;
+ uint64_t m_notify_id = 0;
+
+ ceph::mutex m_lock =
+ ceph::make_mutex("librados::TestWatchNotify::m_lock");
+ AsyncOpTracker m_async_op_tracker;
+
+ FileWatchers m_file_watchers;
+
+ SharedWatcher get_watcher(int64_t pool_id, const std::string& nspace,
+ const std::string& oid);
+ void maybe_remove_watcher(SharedWatcher shared_watcher);
+
+ void execute_watch(TestRadosClient *rados_client, int64_t pool_id,
+ const std::string& nspace, const std::string& o,
+ uint64_t gid, uint64_t *handle,
+ librados::WatchCtx *watch_ctx,
+ librados::WatchCtx2 *watch_ctx2,
+ Context *on_finish);
+ void execute_unwatch(TestRadosClient *rados_client, uint64_t handle,
+ Context *on_finish);
+
+ void execute_notify(TestRadosClient *rados_client, int64_t pool_id,
+ const std::string& nspace, const std::string &oid,
+ const bufferlist &bl, bufferlist *pbl,
+ Context *on_notify);
+ void ack_notify(TestRadosClient *rados_client, int64_t pool_id,
+ const std::string& nspace, const std::string &oid,
+ uint64_t notify_id, const WatcherID &watcher_id,
+ const bufferlist &bl);
+ void finish_notify(TestRadosClient *rados_client, int64_t pool_id,
+ const std::string& nspace, const std::string &oid,
+ uint64_t notify_id);
+
+ void handle_object_removed(int64_t pool_id, const std::string& nspace,
+ const std::string& oid);
+};
+
+} // namespace librados
+
+#endif // CEPH_TEST_WATCH_NOTIFY_H