summaryrefslogtreecommitdiffstats
path: root/src/test/librados_test_stub
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 18:24:20 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 18:24:20 +0000
commit483eb2f56657e8e7f419ab1a4fab8dce9ade8609 (patch)
treee5d88d25d870d5dedacb6bbdbe2a966086a0a5cf /src/test/librados_test_stub
parentInitial commit. (diff)
downloadceph-483eb2f56657e8e7f419ab1a4fab8dce9ade8609.tar.xz
ceph-483eb2f56657e8e7f419ab1a4fab8dce9ade8609.zip
Adding upstream version 14.2.21.upstream/14.2.21upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'src/test/librados_test_stub')
-rw-r--r--src/test/librados_test_stub/CMakeLists.txt11
-rw-r--r--src/test/librados_test_stub/LibradosTestStub.cc1498
-rw-r--r--src/test/librados_test_stub/LibradosTestStub.h29
-rw-r--r--src/test/librados_test_stub/MockTestMemCluster.h36
-rw-r--r--src/test/librados_test_stub/MockTestMemIoCtxImpl.h231
-rw-r--r--src/test/librados_test_stub/MockTestMemRadosClient.h88
-rw-r--r--src/test/librados_test_stub/TestClassHandler.cc162
-rw-r--r--src/test/librados_test_stub/TestClassHandler.h77
-rw-r--r--src/test/librados_test_stub/TestCluster.h65
-rw-r--r--src/test/librados_test_stub/TestIoCtxImpl.cc386
-rw-r--r--src/test/librados_test_stub/TestIoCtxImpl.h213
-rw-r--r--src/test/librados_test_stub/TestMemCluster.cc203
-rw-r--r--src/test/librados_test_stub/TestMemCluster.h122
-rw-r--r--src/test/librados_test_stub/TestMemIoCtxImpl.cc840
-rw-r--r--src/test/librados_test_stub/TestMemIoCtxImpl.h99
-rw-r--r--src/test/librados_test_stub/TestMemRadosClient.cc118
-rw-r--r--src/test/librados_test_stub/TestMemRadosClient.h88
-rw-r--r--src/test/librados_test_stub/TestRadosClient.cc260
-rw-r--r--src/test/librados_test_stub/TestRadosClient.h145
-rw-r--r--src/test/librados_test_stub/TestWatchNotify.cc449
-rw-r--r--src/test/librados_test_stub/TestWatchNotify.h148
21 files changed, 5268 insertions, 0 deletions
diff --git a/src/test/librados_test_stub/CMakeLists.txt b/src/test/librados_test_stub/CMakeLists.txt
new file mode 100644
index 00000000..ba30a435
--- /dev/null
+++ b/src/test/librados_test_stub/CMakeLists.txt
@@ -0,0 +1,11 @@
+set(librados_test_stub_srcs
+ LibradosTestStub.cc
+ TestClassHandler.cc
+ TestIoCtxImpl.cc
+ TestMemCluster.cc
+ TestMemIoCtxImpl.cc
+ TestMemRadosClient.cc
+ TestRadosClient.cc
+ TestWatchNotify.cc)
+add_library(rados_test_stub STATIC ${librados_test_stub_srcs})
+
diff --git a/src/test/librados_test_stub/LibradosTestStub.cc b/src/test/librados_test_stub/LibradosTestStub.cc
new file mode 100644
index 00000000..f8464250
--- /dev/null
+++ b/src/test/librados_test_stub/LibradosTestStub.cc
@@ -0,0 +1,1498 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#include "test/librados_test_stub/LibradosTestStub.h"
+#include "include/rados/librados.hpp"
+#include "include/stringify.h"
+#include "common/ceph_argparse.h"
+#include "common/ceph_context.h"
+#include "common/common_init.h"
+#include "common/config.h"
+#include "common/debug.h"
+#include "common/snap_types.h"
+#include "librados/AioCompletionImpl.h"
+#include "log/Log.h"
+#include "test/librados_test_stub/TestClassHandler.h"
+#include "test/librados_test_stub/TestIoCtxImpl.h"
+#include "test/librados_test_stub/TestRadosClient.h"
+#include "test/librados_test_stub/TestMemCluster.h"
+#include "test/librados_test_stub/TestMemRadosClient.h"
+#include "objclass/objclass.h"
+#include "osd/osd_types.h"
+#include <arpa/inet.h>
+#include <boost/bind.hpp>
+#include <boost/shared_ptr.hpp>
+#include <deque>
+#include <list>
+#include <vector>
+#include "include/ceph_assert.h"
+#include "include/compat.h"
+
+#define dout_context g_ceph_context
+#define dout_subsys ceph_subsys_rados
+
+namespace librados {
+
+MockTestMemIoCtxImpl &get_mock_io_ctx(IoCtx &ioctx) {
+ MockTestMemIoCtxImpl **mock =
+ reinterpret_cast<MockTestMemIoCtxImpl **>(&ioctx);
+ return **mock;
+}
+
+} // namespace librados
+
+namespace librados_test_stub {
+
+TestClusterRef &cluster() {
+ static TestClusterRef s_cluster;
+ return s_cluster;
+}
+
+void set_cluster(TestClusterRef cluster_ref) {
+ cluster() = cluster_ref;
+}
+
+TestClusterRef get_cluster() {
+ auto &cluster_ref = cluster();
+ if (cluster_ref.get() == nullptr) {
+ cluster_ref.reset(new librados::TestMemCluster());
+ }
+ return cluster_ref;
+}
+
+} // namespace librados_test_stub
+
+namespace {
+
+librados::TestClassHandler *get_class_handler() {
+ static boost::shared_ptr<librados::TestClassHandler> s_class_handler;
+ if (!s_class_handler) {
+ s_class_handler.reset(new librados::TestClassHandler());
+ s_class_handler->open_all_classes();
+ }
+ return s_class_handler.get();
+}
+
+void do_out_buffer(bufferlist& outbl, char **outbuf, size_t *outbuflen) {
+ if (outbuf) {
+ if (outbl.length() > 0) {
+ *outbuf = (char *)malloc(outbl.length());
+ memcpy(*outbuf, outbl.c_str(), outbl.length());
+ } else {
+ *outbuf = NULL;
+ }
+ }
+ if (outbuflen) {
+ *outbuflen = outbl.length();
+ }
+}
+
+void do_out_buffer(string& outbl, char **outbuf, size_t *outbuflen) {
+ if (outbuf) {
+ if (outbl.length() > 0) {
+ *outbuf = (char *)malloc(outbl.length());
+ memcpy(*outbuf, outbl.c_str(), outbl.length());
+ } else {
+ *outbuf = NULL;
+ }
+ }
+ if (outbuflen) {
+ *outbuflen = outbl.length();
+ }
+}
+
+librados::TestRadosClient *create_rados_client() {
+ CephInitParameters iparams(CEPH_ENTITY_TYPE_CLIENT);
+ CephContext *cct = common_preinit(iparams, CODE_ENVIRONMENT_LIBRARY, 0);
+ cct->_conf.parse_env(cct->get_module_type());
+ cct->_conf.apply_changes(nullptr);
+ cct->_log->start();
+
+ auto rados_client =
+ librados_test_stub::get_cluster()->create_rados_client(cct);
+ cct->put();
+ return rados_client;
+}
+
+} // anonymous namespace
+
+extern "C" int rados_aio_create_completion(void *cb_arg,
+ rados_callback_t cb_complete,
+ rados_callback_t cb_safe,
+ rados_completion_t *pc)
+{
+ librados::AioCompletionImpl *c = new librados::AioCompletionImpl;
+ if (cb_complete) {
+ c->set_complete_callback(cb_arg, cb_complete);
+ }
+ if (cb_safe) {
+ c->set_safe_callback(cb_arg, cb_safe);
+ }
+ *pc = c;
+ return 0;
+}
+
+extern "C" int rados_aio_get_return_value(rados_completion_t c) {
+ return reinterpret_cast<librados::AioCompletionImpl*>(c)->get_return_value();
+}
+
+extern "C" rados_config_t rados_cct(rados_t cluster)
+{
+ librados::TestRadosClient *client =
+ reinterpret_cast<librados::TestRadosClient*>(cluster);
+ return reinterpret_cast<rados_config_t>(client->cct());
+}
+
+extern "C" int rados_conf_set(rados_t cluster, const char *option,
+ const char *value) {
+ librados::TestRadosClient *impl =
+ reinterpret_cast<librados::TestRadosClient*>(cluster);
+ CephContext *cct = impl->cct();
+ return cct->_conf.set_val(option, value);
+}
+
+extern "C" int rados_conf_parse_env(rados_t cluster, const char *var) {
+ librados::TestRadosClient *client =
+ reinterpret_cast<librados::TestRadosClient*>(cluster);
+ auto& conf = client->cct()->_conf;
+ conf.parse_env(client->cct()->get_module_type(), var);
+ conf.apply_changes(NULL);
+ return 0;
+}
+
+extern "C" int rados_conf_read_file(rados_t cluster, const char *path) {
+ librados::TestRadosClient *client =
+ reinterpret_cast<librados::TestRadosClient*>(cluster);
+ auto& conf = client->cct()->_conf;
+ int ret = conf.parse_config_files(path, NULL, 0);
+ if (ret == 0) {
+ conf.parse_env(client->cct()->get_module_type());
+ conf.apply_changes(NULL);
+ conf.complain_about_parse_errors(client->cct());
+ } else if (ret == -ENOENT) {
+ // ignore missing client config
+ return 0;
+ }
+ return ret;
+}
+
+extern "C" int rados_connect(rados_t cluster) {
+ librados::TestRadosClient *client =
+ reinterpret_cast<librados::TestRadosClient*>(cluster);
+ return client->connect();
+}
+
+extern "C" int rados_create(rados_t *cluster, const char * const id) {
+ *cluster = create_rados_client();
+ return 0;
+}
+
+extern "C" int rados_create_with_context(rados_t *cluster,
+ rados_config_t cct_) {
+ auto cct = reinterpret_cast<CephContext*>(cct_);
+ *cluster = librados_test_stub::get_cluster()->create_rados_client(cct);
+ return 0;
+}
+
+extern "C" rados_config_t rados_ioctx_cct(rados_ioctx_t ioctx)
+{
+ librados::TestIoCtxImpl *ctx =
+ reinterpret_cast<librados::TestIoCtxImpl*>(ioctx);
+ return reinterpret_cast<rados_config_t>(ctx->get_rados_client()->cct());
+}
+
+extern "C" int rados_ioctx_create(rados_t cluster, const char *pool_name,
+ rados_ioctx_t *ioctx) {
+ librados::TestRadosClient *client =
+ reinterpret_cast<librados::TestRadosClient*>(cluster);
+
+ int64_t pool_id = client->pool_lookup(pool_name);
+ if (pool_id < 0) {
+ return static_cast<int>(pool_id);
+ }
+
+ *ioctx = reinterpret_cast<rados_ioctx_t>(
+ client->create_ioctx(pool_id, pool_name));
+ return 0;
+}
+
+extern "C" int rados_ioctx_create2(rados_t cluster, int64_t pool_id,
+ rados_ioctx_t *ioctx)
+{
+ librados::TestRadosClient *client =
+ reinterpret_cast<librados::TestRadosClient*>(cluster);
+
+ std::list<std::pair<int64_t, std::string> > pools;
+ int r = client->pool_list(pools);
+ if (r < 0) {
+ return r;
+ }
+
+ for (std::list<std::pair<int64_t, std::string> >::iterator it =
+ pools.begin(); it != pools.end(); ++it) {
+ if (it->first == pool_id) {
+ *ioctx = reinterpret_cast<rados_ioctx_t>(
+ client->create_ioctx(pool_id, it->second));
+ return 0;
+ }
+ }
+ return -ENOENT;
+}
+
+extern "C" void rados_ioctx_destroy(rados_ioctx_t io) {
+ librados::TestIoCtxImpl *ctx =
+ reinterpret_cast<librados::TestIoCtxImpl*>(io);
+ ctx->put();
+}
+
+extern "C" rados_t rados_ioctx_get_cluster(rados_ioctx_t io) {
+ librados::TestIoCtxImpl *ctx =
+ reinterpret_cast<librados::TestIoCtxImpl*>(io);
+ return reinterpret_cast<rados_t>(ctx->get_rados_client());
+}
+
+extern "C" int rados_mon_command(rados_t cluster, const char **cmd,
+ size_t cmdlen, const char *inbuf,
+ size_t inbuflen, char **outbuf,
+ size_t *outbuflen, char **outs,
+ size_t *outslen) {
+ librados::TestRadosClient *client =
+ reinterpret_cast<librados::TestRadosClient*>(cluster);
+
+ vector<string> cmdvec;
+ for (size_t i = 0; i < cmdlen; i++) {
+ cmdvec.push_back(cmd[i]);
+ }
+
+ bufferlist inbl;
+ inbl.append(inbuf, inbuflen);
+
+ bufferlist outbl;
+ string outstring;
+ int ret = client->mon_command(cmdvec, inbl, &outbl, &outstring);
+
+ do_out_buffer(outbl, outbuf, outbuflen);
+ do_out_buffer(outstring, outs, outslen);
+ return ret;
+}
+
+extern "C" int rados_nobjects_list_open(rados_ioctx_t io,
+ rados_list_ctx_t *ctx) {
+ librados::TestIoCtxImpl *io_ctx =
+ reinterpret_cast<librados::TestIoCtxImpl*>(io);
+ librados::TestRadosClient *client = io_ctx->get_rados_client();
+
+ std::list<librados::TestRadosClient::Object> *list =
+ new std::list<librados::TestRadosClient::Object>();
+
+ client->object_list(io_ctx->get_id(), list);
+ list->push_front(librados::TestRadosClient::Object());
+ *ctx = reinterpret_cast<rados_list_ctx_t>(list);
+ return 0;
+}
+
+extern "C" int rados_nobjects_list_next(rados_list_ctx_t ctx,
+ const char **entry,
+ const char **key,
+ const char **nspace) {
+ std::list<librados::TestRadosClient::Object> *list =
+ reinterpret_cast<std::list<librados::TestRadosClient::Object> *>(ctx);
+ if (!list->empty()) {
+ list->pop_front();
+ }
+ if (list->empty()) {
+ return -ENOENT;
+ }
+
+ librados::TestRadosClient::Object &obj = list->front();
+ if (entry != NULL) {
+ *entry = obj.oid.c_str();
+ }
+ if (key != NULL) {
+ *key = obj.locator.c_str();
+ }
+ if (nspace != NULL) {
+ *nspace = obj.nspace.c_str();
+ }
+ return 0;
+}
+
+extern "C" void rados_nobjects_list_close(rados_list_ctx_t ctx) {
+ std::list<librados::TestRadosClient::Object> *list =
+ reinterpret_cast<std::list<librados::TestRadosClient::Object> *>(ctx);
+ delete list;
+}
+
+extern "C" int rados_pool_create(rados_t cluster, const char *pool_name) {
+ librados::TestRadosClient *client =
+ reinterpret_cast<librados::TestRadosClient*>(cluster);
+ return client->pool_create(pool_name);
+}
+
+extern "C" int rados_pool_delete(rados_t cluster, const char *pool_name) {
+ librados::TestRadosClient *client =
+ reinterpret_cast<librados::TestRadosClient*>(cluster);
+ return client->pool_delete(pool_name);
+}
+
+extern "C" void rados_shutdown(rados_t cluster) {
+ librados::TestRadosClient *client =
+ reinterpret_cast<librados::TestRadosClient*>(cluster);
+ client->put();
+}
+
+extern "C" int rados_wait_for_latest_osdmap(rados_t cluster) {
+ librados::TestRadosClient *client =
+ reinterpret_cast<librados::TestRadosClient*>(cluster);
+ return client->wait_for_latest_osdmap();
+}
+
+namespace librados {
+
+void AioCompletion::release() {
+ AioCompletionImpl *c = reinterpret_cast<AioCompletionImpl *>(pc);
+ c->release();
+ delete this;
+}
+
+IoCtx::IoCtx() : io_ctx_impl(NULL) {
+}
+
+IoCtx::~IoCtx() {
+ close();
+}
+
+IoCtx::IoCtx(const IoCtx& rhs) {
+ io_ctx_impl = rhs.io_ctx_impl;
+ if (io_ctx_impl) {
+ TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
+ ctx->get();
+ }
+}
+
+IoCtx::IoCtx(IoCtx&& rhs) noexcept : io_ctx_impl(std::exchange(rhs.io_ctx_impl, nullptr))
+{
+}
+
+IoCtx& IoCtx::operator=(const IoCtx& rhs) {
+ if (io_ctx_impl) {
+ TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
+ ctx->put();
+ }
+
+ io_ctx_impl = rhs.io_ctx_impl;
+ if (io_ctx_impl) {
+ TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
+ ctx->get();
+ }
+ return *this;
+}
+
+librados::IoCtx& librados::IoCtx::operator=(IoCtx&& rhs) noexcept
+{
+ if (io_ctx_impl) {
+ TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
+ ctx->put();
+ }
+
+ io_ctx_impl = std::exchange(rhs.io_ctx_impl, nullptr);
+ return *this;
+}
+
+int IoCtx::aio_flush() {
+ TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
+ ctx->aio_flush();
+ return 0;
+}
+
+int IoCtx::aio_flush_async(AioCompletion *c) {
+ TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
+ ctx->aio_flush_async(c->pc);
+ return 0;
+}
+
+int IoCtx::aio_notify(const std::string& oid, AioCompletion *c, bufferlist& bl,
+ uint64_t timeout_ms, bufferlist *pbl) {
+ TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
+ ctx->aio_notify(oid, c->pc, bl, timeout_ms, pbl);
+ return 0;
+}
+
+int IoCtx::aio_operate(const std::string& oid, AioCompletion *c,
+ ObjectReadOperation *op, bufferlist *pbl) {
+ return aio_operate(oid, c, op, 0, pbl);
+}
+
+int IoCtx::aio_operate(const std::string& oid, AioCompletion *c,
+ ObjectReadOperation *op, int flags,
+ bufferlist *pbl) {
+ TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
+ TestObjectOperationImpl *ops = reinterpret_cast<TestObjectOperationImpl*>(op->impl);
+ return ctx->aio_operate_read(oid, *ops, c->pc, flags, pbl);
+}
+
+int IoCtx::aio_operate(const std::string& oid, AioCompletion *c,
+ ObjectReadOperation *op, int flags,
+ bufferlist *pbl, const blkin_trace_info *trace_info) {
+ return aio_operate(oid, c, op, flags, pbl);
+}
+
+int IoCtx::aio_operate(const std::string& oid, AioCompletion *c,
+ ObjectWriteOperation *op) {
+ TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
+ TestObjectOperationImpl *ops = reinterpret_cast<TestObjectOperationImpl*>(op->impl);
+ return ctx->aio_operate(oid, *ops, c->pc, NULL, 0);
+}
+
+int IoCtx::aio_operate(const std::string& oid, AioCompletion *c,
+ ObjectWriteOperation *op, snap_t seq,
+ std::vector<snap_t>& snaps, int flags,
+ const blkin_trace_info *trace_info) {
+ TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
+ TestObjectOperationImpl *ops = reinterpret_cast<TestObjectOperationImpl*>(op->impl);
+
+ std::vector<snapid_t> snv;
+ snv.resize(snaps.size());
+ for (size_t i = 0; i < snaps.size(); ++i)
+ snv[i] = snaps[i];
+ SnapContext snapc(seq, snv);
+
+ return ctx->aio_operate(oid, *ops, c->pc, &snapc, flags);
+}
+
+int IoCtx::aio_operate(const std::string& oid, AioCompletion *c,
+ ObjectWriteOperation *op, snap_t seq,
+ std::vector<snap_t>& snaps) {
+ return aio_operate(oid, c, op, seq, snaps, 0, nullptr);
+}
+
+int IoCtx::aio_operate(const std::string& oid, AioCompletion *c,
+ ObjectWriteOperation *op, snap_t seq,
+ std::vector<snap_t>& snaps,
+ const blkin_trace_info *trace_info) {
+ return aio_operate(oid, c, op, seq, snaps, 0, trace_info);
+}
+
+int IoCtx::aio_remove(const std::string& oid, AioCompletion *c) {
+ TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
+ return ctx->aio_remove(oid, c->pc);
+}
+
+int IoCtx::aio_remove(const std::string& oid, AioCompletion *c, int flags) {
+ TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
+ return ctx->aio_remove(oid, c->pc, flags);
+}
+
+int IoCtx::aio_watch(const std::string& o, AioCompletion *c, uint64_t *handle,
+ librados::WatchCtx2 *watch_ctx) {
+ TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
+ return ctx->aio_watch(o, c->pc, handle, watch_ctx);
+}
+
+int IoCtx::aio_unwatch(uint64_t handle, AioCompletion *c) {
+ TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
+ return ctx->aio_unwatch(handle, c->pc);
+}
+
+config_t IoCtx::cct() {
+ TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
+ return reinterpret_cast<config_t>(ctx->get_rados_client()->cct());
+}
+
+void IoCtx::close() {
+ if (io_ctx_impl) {
+ TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
+ ctx->put();
+ }
+ io_ctx_impl = NULL;
+}
+
+int IoCtx::create(const std::string& oid, bool exclusive) {
+ TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
+ return ctx->execute_operation(
+ oid, boost::bind(&TestIoCtxImpl::create, _1, _2, exclusive));
+}
+
+void IoCtx::dup(const IoCtx& rhs) {
+ close();
+ TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(rhs.io_ctx_impl);
+ io_ctx_impl = reinterpret_cast<IoCtxImpl*>(ctx->clone());
+}
+
+int IoCtx::exec(const std::string& oid, const char *cls, const char *method,
+ bufferlist& inbl, bufferlist& outbl) {
+ TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
+ return ctx->execute_operation(
+ oid, boost::bind(&TestIoCtxImpl::exec, _1, _2, get_class_handler(), cls,
+ method, inbl, &outbl, ctx->get_snap_context()));
+}
+
+void IoCtx::from_rados_ioctx_t(rados_ioctx_t p, IoCtx &io) {
+ TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(p);
+ ctx->get();
+
+ io.close();
+ io.io_ctx_impl = reinterpret_cast<IoCtxImpl*>(ctx);
+}
+
+uint64_t IoCtx::get_instance_id() const {
+ TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
+ return ctx->get_instance_id();
+}
+
+int64_t IoCtx::get_id() {
+ TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
+ return ctx->get_id();
+}
+
+uint64_t IoCtx::get_last_version() {
+ TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
+ return ctx->get_last_version();
+}
+
+std::string IoCtx::get_pool_name() {
+ TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
+ return ctx->get_pool_name();
+}
+
+int IoCtx::list_snaps(const std::string& o, snap_set_t *out_snaps) {
+ TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
+ return ctx->execute_operation(
+ o, boost::bind(&TestIoCtxImpl::list_snaps, _1, _2, out_snaps));
+}
+
+int IoCtx::list_watchers(const std::string& o,
+ std::list<obj_watch_t> *out_watchers) {
+ TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
+ return ctx->execute_operation(
+ o, boost::bind(&TestIoCtxImpl::list_watchers, _1, _2, out_watchers));
+}
+
+int IoCtx::notify(const std::string& o, uint64_t ver, bufferlist& bl) {
+ TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
+ return ctx->notify(o, bl, 0, NULL);
+}
+
+int IoCtx::notify2(const std::string& o, bufferlist& bl,
+ uint64_t timeout_ms, bufferlist *pbl) {
+ TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
+ return ctx->notify(o, bl, timeout_ms, pbl);
+}
+
+void IoCtx::notify_ack(const std::string& o, uint64_t notify_id,
+ uint64_t handle, bufferlist& bl) {
+ TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
+ ctx->notify_ack(o, notify_id, handle, bl);
+}
+
+int IoCtx::omap_get_vals(const std::string& oid,
+ const std::string& start_after,
+ uint64_t max_return,
+ std::map<std::string, bufferlist> *out_vals) {
+ TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
+ return ctx->execute_operation(
+ oid, boost::bind(&TestIoCtxImpl::omap_get_vals, _1, _2, start_after, "",
+ max_return, out_vals));
+}
+
+int IoCtx::operate(const std::string& oid, ObjectWriteOperation *op) {
+ TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
+ TestObjectOperationImpl *ops = reinterpret_cast<TestObjectOperationImpl*>(op->impl);
+ return ctx->operate(oid, *ops);
+}
+
+int IoCtx::operate(const std::string& oid, ObjectReadOperation *op,
+ bufferlist *pbl) {
+ TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
+ TestObjectOperationImpl *ops = reinterpret_cast<TestObjectOperationImpl*>(op->impl);
+ return ctx->operate_read(oid, *ops, pbl);
+}
+
+int IoCtx::read(const std::string& oid, bufferlist& bl, size_t len,
+ uint64_t off) {
+ TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
+ return ctx->execute_operation(
+ oid, boost::bind(&TestIoCtxImpl::read, _1, _2, len, off, &bl));
+}
+
+int IoCtx::remove(const std::string& oid) {
+ TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
+ return ctx->execute_operation(
+ oid, boost::bind(&TestIoCtxImpl::remove, _1, _2, ctx->get_snap_context()));
+}
+
+int IoCtx::selfmanaged_snap_create(uint64_t *snapid) {
+ TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
+ return ctx->selfmanaged_snap_create(snapid);
+}
+
+void IoCtx::aio_selfmanaged_snap_create(uint64_t *snapid, AioCompletion* c) {
+ TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
+ return ctx->aio_selfmanaged_snap_create(snapid, c->pc);
+}
+
+int IoCtx::selfmanaged_snap_remove(uint64_t snapid) {
+ TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
+ return ctx->selfmanaged_snap_remove(snapid);
+}
+
+void IoCtx::aio_selfmanaged_snap_remove(uint64_t snapid, AioCompletion* c) {
+ TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
+ ctx->aio_selfmanaged_snap_remove(snapid, c->pc);
+}
+
+int IoCtx::selfmanaged_snap_rollback(const std::string& oid,
+ uint64_t snapid) {
+ TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
+ return ctx->selfmanaged_snap_rollback(oid, snapid);
+}
+
+int IoCtx::selfmanaged_snap_set_write_ctx(snap_t seq,
+ std::vector<snap_t>& snaps) {
+ TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
+ return ctx->selfmanaged_snap_set_write_ctx(seq, snaps);
+}
+
+void IoCtx::snap_set_read(snap_t seq) {
+ TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
+ ctx->set_snap_read(seq);
+}
+
+int IoCtx::sparse_read(const std::string& oid, std::map<uint64_t,uint64_t>& m,
+ bufferlist& bl, size_t len, uint64_t off) {
+ TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
+ return ctx->execute_operation(
+ oid, boost::bind(&TestIoCtxImpl::sparse_read, _1, _2, off, len, &m, &bl));
+}
+
+int IoCtx::stat(const std::string& oid, uint64_t *psize, time_t *pmtime) {
+ TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
+ return ctx->execute_operation(
+ oid, boost::bind(&TestIoCtxImpl::stat, _1, _2, psize, pmtime));
+}
+
+int IoCtx::tmap_update(const std::string& oid, bufferlist& cmdbl) {
+ TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
+ return ctx->execute_operation(
+ oid, boost::bind(&TestIoCtxImpl::tmap_update, _1, _2, cmdbl));
+}
+
+int IoCtx::trunc(const std::string& oid, uint64_t off) {
+ TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
+ return ctx->execute_operation(
+ oid, boost::bind(&TestIoCtxImpl::truncate, _1, _2, off,
+ ctx->get_snap_context()));
+}
+
+int IoCtx::unwatch2(uint64_t handle) {
+ TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
+ return ctx->unwatch(handle);
+}
+
+int IoCtx::unwatch(const std::string& o, uint64_t handle) {
+ TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
+ return ctx->unwatch(handle);
+}
+
+int IoCtx::watch(const std::string& o, uint64_t ver, uint64_t *handle,
+ librados::WatchCtx *wctx) {
+ TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
+ return ctx->watch(o, handle, wctx, NULL);
+}
+
+int IoCtx::watch2(const std::string& o, uint64_t *handle,
+ librados::WatchCtx2 *wctx) {
+ TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
+ return ctx->watch(o, handle, NULL, wctx);
+}
+
+int IoCtx::write(const std::string& oid, bufferlist& bl, size_t len,
+ uint64_t off) {
+ TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
+ return ctx->execute_operation(
+ oid, boost::bind(&TestIoCtxImpl::write, _1, _2, bl, len, off,
+ ctx->get_snap_context()));
+}
+
+int IoCtx::write_full(const std::string& oid, bufferlist& bl) {
+ TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
+ return ctx->execute_operation(
+ oid, boost::bind(&TestIoCtxImpl::write_full, _1, _2, bl,
+ ctx->get_snap_context()));
+}
+
+int IoCtx::writesame(const std::string& oid, bufferlist& bl, size_t len,
+ uint64_t off) {
+ TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
+ return ctx->execute_operation(
+ oid, boost::bind(&TestIoCtxImpl::writesame, _1, _2, bl, len, off,
+ ctx->get_snap_context()));
+}
+
+int IoCtx::cmpext(const std::string& oid, uint64_t off, bufferlist& cmp_bl) {
+ TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
+ return ctx->execute_operation(
+ oid, boost::bind(&TestIoCtxImpl::cmpext, _1, _2, off, cmp_bl));
+}
+
+int IoCtx::application_enable(const std::string& app_name, bool force) {
+ return 0;
+}
+
+int IoCtx::application_enable_async(const std::string& app_name,
+ bool force, PoolAsyncCompletion *c) {
+ return -EOPNOTSUPP;
+}
+
+int IoCtx::application_list(std::set<std::string> *app_names) {
+ return -EOPNOTSUPP;
+}
+
+int IoCtx::application_metadata_get(const std::string& app_name,
+ const std::string &key,
+ std::string *value) {
+ return -EOPNOTSUPP;
+}
+
+int IoCtx::application_metadata_set(const std::string& app_name,
+ const std::string &key,
+ const std::string& value) {
+ return -EOPNOTSUPP;
+}
+
+int IoCtx::application_metadata_remove(const std::string& app_name,
+ const std::string &key) {
+ return -EOPNOTSUPP;
+}
+
+int IoCtx::application_metadata_list(const std::string& app_name,
+ std::map<std::string, std::string> *values) {
+ return -EOPNOTSUPP;
+}
+
+void IoCtx::set_namespace(const std::string& nspace) {
+ TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
+ ctx->set_namespace(nspace);
+}
+
+std::string IoCtx::get_namespace() const {
+ TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
+ return ctx->get_namespace();
+}
+
+static int save_operation_result(int result, int *pval) {
+ if (pval != NULL) {
+ *pval = result;
+ }
+ return result;
+}
+
+ObjectOperation::ObjectOperation() {
+ TestObjectOperationImpl *o = new TestObjectOperationImpl();
+ o->get();
+ impl = reinterpret_cast<ObjectOperationImpl*>(o);
+}
+
+ObjectOperation::~ObjectOperation() {
+ TestObjectOperationImpl *o = reinterpret_cast<TestObjectOperationImpl*>(impl);
+ if (o) {
+ o->put();
+ o = NULL;
+ }
+}
+
+void ObjectOperation::assert_exists() {
+ TestObjectOperationImpl *o = reinterpret_cast<TestObjectOperationImpl*>(impl);
+ o->ops.push_back(boost::bind(&TestIoCtxImpl::assert_exists, _1, _2));
+}
+
+void ObjectOperation::exec(const char *cls, const char *method,
+ bufferlist& inbl) {
+ TestObjectOperationImpl *o = reinterpret_cast<TestObjectOperationImpl*>(impl);
+ o->ops.push_back(boost::bind(&TestIoCtxImpl::exec, _1, _2,
+ get_class_handler(), cls, method, inbl, _3, _4));
+}
+
+void ObjectOperation::set_op_flags2(int flags) {
+}
+
+size_t ObjectOperation::size() {
+ TestObjectOperationImpl *o = reinterpret_cast<TestObjectOperationImpl*>(impl);
+ return o->ops.size();
+}
+
+void ObjectOperation::cmpext(uint64_t off, const bufferlist& cmp_bl,
+ int *prval) {
+ TestObjectOperationImpl *o = reinterpret_cast<TestObjectOperationImpl*>(impl);
+ ObjectOperationTestImpl op = boost::bind(&TestIoCtxImpl::cmpext, _1, _2, off, cmp_bl);
+ if (prval != NULL) {
+ op = boost::bind(save_operation_result,
+ boost::bind(op, _1, _2, _3, _4), prval);
+ }
+ o->ops.push_back(op);
+}
+
+void ObjectReadOperation::list_snaps(snap_set_t *out_snaps, int *prval) {
+ TestObjectOperationImpl *o = reinterpret_cast<TestObjectOperationImpl*>(impl);
+
+ ObjectOperationTestImpl op = boost::bind(&TestIoCtxImpl::list_snaps, _1, _2,
+ out_snaps);
+ if (prval != NULL) {
+ op = boost::bind(save_operation_result,
+ boost::bind(op, _1, _2, _3, _4), prval);
+ }
+ o->ops.push_back(op);
+}
+
+void ObjectReadOperation::list_watchers(std::list<obj_watch_t> *out_watchers,
+ int *prval) {
+ TestObjectOperationImpl *o = reinterpret_cast<TestObjectOperationImpl*>(impl);
+
+ ObjectOperationTestImpl op = boost::bind(&TestIoCtxImpl::list_watchers, _1,
+ _2, out_watchers);
+ if (prval != NULL) {
+ op = boost::bind(save_operation_result,
+ boost::bind(op, _1, _2, _3, _4), prval);
+ }
+ o->ops.push_back(op);
+}
+
+void ObjectReadOperation::read(size_t off, uint64_t len, bufferlist *pbl,
+ int *prval) {
+ TestObjectOperationImpl *o = reinterpret_cast<TestObjectOperationImpl*>(impl);
+
+ ObjectOperationTestImpl op;
+ if (pbl != NULL) {
+ op = boost::bind(&TestIoCtxImpl::read, _1, _2, len, off, pbl);
+ } else {
+ op = boost::bind(&TestIoCtxImpl::read, _1, _2, len, off, _3);
+ }
+
+ if (prval != NULL) {
+ op = boost::bind(save_operation_result,
+ boost::bind(op, _1, _2, _3, _4), prval);
+ }
+ o->ops.push_back(op);
+}
+
+void ObjectReadOperation::sparse_read(uint64_t off, uint64_t len,
+ std::map<uint64_t,uint64_t> *m,
+ bufferlist *pbl, int *prval) {
+ TestObjectOperationImpl *o = reinterpret_cast<TestObjectOperationImpl*>(impl);
+
+ ObjectOperationTestImpl op;
+ if (pbl != NULL) {
+ op = boost::bind(&TestIoCtxImpl::sparse_read, _1, _2, off, len, m, pbl);
+ } else {
+ op = boost::bind(&TestIoCtxImpl::sparse_read, _1, _2, off, len, m, _3);
+ }
+
+ if (prval != NULL) {
+ op = boost::bind(save_operation_result,
+ boost::bind(op, _1, _2, _3, _4), prval);
+ }
+ o->ops.push_back(op);
+}
+
+void ObjectReadOperation::stat(uint64_t *psize, time_t *pmtime, int *prval) {
+ TestObjectOperationImpl *o = reinterpret_cast<TestObjectOperationImpl*>(impl);
+
+ ObjectOperationTestImpl op = boost::bind(&TestIoCtxImpl::stat, _1, _2,
+ psize, pmtime);
+
+ if (prval != NULL) {
+ op = boost::bind(save_operation_result,
+ boost::bind(op, _1, _2, _3, _4), prval);
+ }
+ o->ops.push_back(op);
+}
+
+void ObjectWriteOperation::append(const bufferlist &bl) {
+ TestObjectOperationImpl *o = reinterpret_cast<TestObjectOperationImpl*>(impl);
+ o->ops.push_back(boost::bind(&TestIoCtxImpl::append, _1, _2, bl, _4));
+}
+
+void ObjectWriteOperation::create(bool exclusive) {
+ TestObjectOperationImpl *o = reinterpret_cast<TestObjectOperationImpl*>(impl);
+ o->ops.push_back(boost::bind(&TestIoCtxImpl::create, _1, _2, exclusive));
+}
+
+void ObjectWriteOperation::omap_set(const std::map<std::string, bufferlist> &map) {
+ TestObjectOperationImpl *o = reinterpret_cast<TestObjectOperationImpl*>(impl);
+ o->ops.push_back(boost::bind(&TestIoCtxImpl::omap_set, _1, _2, boost::ref(map)));
+}
+
+void ObjectWriteOperation::remove() {
+ TestObjectOperationImpl *o = reinterpret_cast<TestObjectOperationImpl*>(impl);
+ o->ops.push_back(boost::bind(&TestIoCtxImpl::remove, _1, _2, _4));
+}
+
+void ObjectWriteOperation::selfmanaged_snap_rollback(uint64_t snapid) {
+ TestObjectOperationImpl *o = reinterpret_cast<TestObjectOperationImpl*>(impl);
+ o->ops.push_back(boost::bind(&TestIoCtxImpl::selfmanaged_snap_rollback,
+ _1, _2, snapid));
+}
+
+void ObjectWriteOperation::set_alloc_hint(uint64_t expected_object_size,
+ uint64_t expected_write_size) {
+ TestObjectOperationImpl *o = reinterpret_cast<TestObjectOperationImpl*>(impl);
+ o->ops.push_back(boost::bind(&TestIoCtxImpl::set_alloc_hint, _1, _2,
+ expected_object_size, expected_write_size, 0,
+ _4));
+}
+
+void ObjectWriteOperation::set_alloc_hint2(uint64_t expected_object_size,
+ uint64_t expected_write_size,
+ uint32_t flags) {
+ TestObjectOperationImpl *o = reinterpret_cast<TestObjectOperationImpl*>(impl);
+ o->ops.push_back(boost::bind(&TestIoCtxImpl::set_alloc_hint, _1, _2,
+ expected_object_size, expected_write_size, flags,
+ _4));
+}
+
+void ObjectWriteOperation::tmap_update(const bufferlist& cmdbl) {
+ TestObjectOperationImpl *o = reinterpret_cast<TestObjectOperationImpl*>(impl);
+ o->ops.push_back(boost::bind(&TestIoCtxImpl::tmap_update, _1, _2,
+ cmdbl));
+}
+
+void ObjectWriteOperation::truncate(uint64_t off) {
+ TestObjectOperationImpl *o = reinterpret_cast<TestObjectOperationImpl*>(impl);
+ o->ops.push_back(boost::bind(&TestIoCtxImpl::truncate, _1, _2, off, _4));
+}
+
+void ObjectWriteOperation::write(uint64_t off, const bufferlist& bl) {
+ TestObjectOperationImpl *o = reinterpret_cast<TestObjectOperationImpl*>(impl);
+ o->ops.push_back(boost::bind(&TestIoCtxImpl::write, _1, _2, bl, bl.length(),
+ off, _4));
+}
+
+void ObjectWriteOperation::write_full(const bufferlist& bl) {
+ TestObjectOperationImpl *o = reinterpret_cast<TestObjectOperationImpl*>(impl);
+ o->ops.push_back(boost::bind(&TestIoCtxImpl::write_full, _1, _2, bl, _4));
+}
+
+void ObjectWriteOperation::writesame(uint64_t off, uint64_t len, const bufferlist& bl) {
+ TestObjectOperationImpl *o = reinterpret_cast<TestObjectOperationImpl*>(impl);
+ o->ops.push_back(boost::bind(&TestIoCtxImpl::writesame, _1, _2, bl, len,
+ off, _4));
+}
+
+void ObjectWriteOperation::zero(uint64_t off, uint64_t len) {
+ TestObjectOperationImpl *o = reinterpret_cast<TestObjectOperationImpl*>(impl);
+ o->ops.push_back(boost::bind(&TestIoCtxImpl::zero, _1, _2, off, len, _4));
+}
+
+Rados::Rados() : client(NULL) {
+}
+
+Rados::Rados(IoCtx& ioctx) {
+ TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(ioctx.io_ctx_impl);
+ TestRadosClient *impl = ctx->get_rados_client();
+ impl->get();
+
+ client = reinterpret_cast<RadosClient*>(impl);
+ ceph_assert(client != NULL);
+}
+
+Rados::~Rados() {
+ shutdown();
+}
+
+void Rados::from_rados_t(rados_t p, Rados &rados) {
+ if (rados.client != nullptr) {
+ reinterpret_cast<TestRadosClient*>(rados.client)->put();
+ rados.client = nullptr;
+ }
+
+ auto impl = reinterpret_cast<TestRadosClient*>(p);
+ if (impl) {
+ impl->get();
+ rados.client = reinterpret_cast<RadosClient*>(impl);
+ }
+}
+
+AioCompletion *Rados::aio_create_completion(void *cb_arg,
+ callback_t cb_complete,
+ callback_t cb_safe) {
+ AioCompletionImpl *c;
+ int r = rados_aio_create_completion(cb_arg, cb_complete, cb_safe,
+ reinterpret_cast<void**>(&c));
+ ceph_assert(r == 0);
+ return new AioCompletion(c);
+}
+
+int Rados::aio_watch_flush(AioCompletion* c) {
+ TestRadosClient *impl = reinterpret_cast<TestRadosClient*>(client);
+ return impl->aio_watch_flush(c->pc);
+}
+
+int Rados::blacklist_add(const std::string& client_address,
+ uint32_t expire_seconds) {
+ TestRadosClient *impl = reinterpret_cast<TestRadosClient*>(client);
+ return impl->blacklist_add(client_address, expire_seconds);
+}
+
+config_t Rados::cct() {
+ TestRadosClient *impl = reinterpret_cast<TestRadosClient*>(client);
+ return reinterpret_cast<config_t>(impl->cct());
+}
+
+int Rados::cluster_fsid(std::string* fsid) {
+ *fsid = "00000000-1111-2222-3333-444444444444";
+ return 0;
+}
+
+int Rados::conf_set(const char *option, const char *value) {
+ return rados_conf_set(reinterpret_cast<rados_t>(client), option, value);
+}
+
+int Rados::conf_get(const char *option, std::string &val) {
+ TestRadosClient *impl = reinterpret_cast<TestRadosClient*>(client);
+ CephContext *cct = impl->cct();
+
+ char *str = NULL;
+ int ret = cct->_conf.get_val(option, &str, -1);
+ if (ret != 0) {
+ free(str);
+ return ret;
+ }
+
+ val = str;
+ free(str);
+ return 0;
+}
+
+int Rados::conf_parse_env(const char *env) const {
+ return rados_conf_parse_env(reinterpret_cast<rados_t>(client), env);
+}
+
+int Rados::conf_read_file(const char * const path) const {
+ return rados_conf_read_file(reinterpret_cast<rados_t>(client), path);
+}
+
+int Rados::connect() {
+ return rados_connect(reinterpret_cast<rados_t>(client));
+}
+
+uint64_t Rados::get_instance_id() {
+ TestRadosClient *impl = reinterpret_cast<TestRadosClient*>(client);
+ return impl->get_instance_id();
+}
+
+int Rados::get_min_compatible_osd(int8_t* require_osd_release) {
+ TestRadosClient *impl = reinterpret_cast<TestRadosClient*>(client);
+ return impl->get_min_compatible_osd(require_osd_release);
+}
+
+int Rados::get_min_compatible_client(int8_t* min_compat_client,
+ int8_t* require_min_compat_client) {
+ TestRadosClient *impl = reinterpret_cast<TestRadosClient*>(client);
+ return impl->get_min_compatible_client(min_compat_client,
+ require_min_compat_client);
+}
+
+int Rados::init(const char * const id) {
+ return rados_create(reinterpret_cast<rados_t *>(&client), id);
+}
+
+int Rados::init_with_context(config_t cct_) {
+ return rados_create_with_context(reinterpret_cast<rados_t *>(&client), cct_);
+}
+
+int Rados::ioctx_create(const char *name, IoCtx &io) {
+ rados_ioctx_t p;
+ int ret = rados_ioctx_create(reinterpret_cast<rados_t>(client), name, &p);
+ if (ret) {
+ return ret;
+ }
+
+ io.close();
+ io.io_ctx_impl = reinterpret_cast<IoCtxImpl*>(p);
+ return 0;
+}
+
+int Rados::ioctx_create2(int64_t pool_id, IoCtx &io)
+{
+ rados_ioctx_t p;
+ int ret = rados_ioctx_create2(reinterpret_cast<rados_t>(client), pool_id, &p);
+ if (ret) {
+ return ret;
+ }
+
+ io.close();
+ io.io_ctx_impl = reinterpret_cast<IoCtxImpl*>(p);
+ return 0;
+}
+
+int Rados::mon_command(std::string cmd, const bufferlist& inbl,
+ bufferlist *outbl, std::string *outs) {
+ TestRadosClient *impl = reinterpret_cast<TestRadosClient*>(client);
+
+ std::vector<std::string> cmds;
+ cmds.push_back(cmd);
+ return impl->mon_command(cmds, inbl, outbl, outs);
+}
+
+int Rados::service_daemon_register(const std::string& service,
+ const std::string& name,
+ const std::map<std::string,std::string>& metadata) {
+ TestRadosClient *impl = reinterpret_cast<TestRadosClient*>(client);
+ return impl->service_daemon_register(service, name, metadata);
+}
+
+int Rados::service_daemon_update_status(std::map<std::string,std::string>&& status) {
+ TestRadosClient *impl = reinterpret_cast<TestRadosClient*>(client);
+ return impl->service_daemon_update_status(std::move(status));
+}
+
+int Rados::pool_create(const char *name) {
+ TestRadosClient *impl = reinterpret_cast<TestRadosClient*>(client);
+ return impl->pool_create(name);
+}
+
+int Rados::pool_delete(const char *name) {
+ TestRadosClient *impl = reinterpret_cast<TestRadosClient*>(client);
+ return impl->pool_delete(name);
+}
+
+int Rados::pool_get_base_tier(int64_t pool, int64_t* base_tier) {
+ TestRadosClient *impl = reinterpret_cast<TestRadosClient*>(client);
+ return impl->pool_get_base_tier(pool, base_tier);
+}
+
+int Rados::pool_list(std::list<std::string>& v) {
+ TestRadosClient *impl = reinterpret_cast<TestRadosClient*>(client);
+ std::list<std::pair<int64_t, std::string> > pools;
+ int r = impl->pool_list(pools);
+ if (r < 0) {
+ return r;
+ }
+
+ v.clear();
+ for (std::list<std::pair<int64_t, std::string> >::iterator it = pools.begin();
+ it != pools.end(); ++it) {
+ v.push_back(it->second);
+ }
+ return 0;
+}
+
+int Rados::pool_list2(std::list<std::pair<int64_t, std::string> >& v)
+{
+ TestRadosClient *impl = reinterpret_cast<TestRadosClient*>(client);
+ return impl->pool_list(v);
+}
+
+int64_t Rados::pool_lookup(const char *name) {
+ TestRadosClient *impl = reinterpret_cast<TestRadosClient*>(client);
+ return impl->pool_lookup(name);
+}
+
+int Rados::pool_reverse_lookup(int64_t id, std::string *name) {
+ TestRadosClient *impl = reinterpret_cast<TestRadosClient*>(client);
+ return impl->pool_reverse_lookup(id, name);
+}
+
+void Rados::shutdown() {
+ if (client == NULL) {
+ return;
+ }
+ TestRadosClient *impl = reinterpret_cast<TestRadosClient*>(client);
+ impl->put();
+ client = NULL;
+}
+
+void Rados::test_blacklist_self(bool set) {
+}
+
+int Rados::wait_for_latest_osdmap() {
+ TestRadosClient *impl = reinterpret_cast<TestRadosClient*>(client);
+ return impl->wait_for_latest_osdmap();
+}
+
+int Rados::watch_flush() {
+ TestRadosClient *impl = reinterpret_cast<TestRadosClient*>(client);
+ return impl->watch_flush();
+}
+
+WatchCtx::~WatchCtx() {
+}
+
+WatchCtx2::~WatchCtx2() {
+}
+
+} // namespace librados
+
+int cls_cxx_create(cls_method_context_t hctx, bool exclusive) {
+ librados::TestClassHandler::MethodContext *ctx =
+ reinterpret_cast<librados::TestClassHandler::MethodContext*>(hctx);
+ return ctx->io_ctx_impl->create(ctx->oid, exclusive);
+}
+
+int cls_cxx_remove(cls_method_context_t hctx) {
+ librados::TestClassHandler::MethodContext *ctx =
+ reinterpret_cast<librados::TestClassHandler::MethodContext*>(hctx);
+ return ctx->io_ctx_impl->remove(ctx->oid, ctx->io_ctx_impl->get_snap_context());
+}
+
+int cls_get_request_origin(cls_method_context_t hctx, entity_inst_t *origin) {
+ librados::TestClassHandler::MethodContext *ctx =
+ reinterpret_cast<librados::TestClassHandler::MethodContext*>(hctx);
+
+ librados::TestRadosClient *rados_client =
+ ctx->io_ctx_impl->get_rados_client();
+
+ struct sockaddr_in sin;
+ memset(&sin, 0, sizeof(sin));
+ sin.sin_family = AF_INET;
+ sin.sin_port = 0;
+ inet_pton(AF_INET, "127.0.0.1", &sin.sin_addr);
+
+ entity_addr_t entity_addr(entity_addr_t::TYPE_DEFAULT,
+ rados_client->get_nonce());
+ entity_addr.in4_addr() = sin;
+
+ *origin = entity_inst_t(
+ entity_name_t::CLIENT(rados_client->get_instance_id()),
+ entity_addr);
+ return 0;
+}
+
+int cls_cxx_getxattr(cls_method_context_t hctx, const char *name,
+ bufferlist *outbl) {
+ std::map<string, bufferlist> attrs;
+ int r = cls_cxx_getxattrs(hctx, &attrs);
+ if (r < 0) {
+ return r;
+ }
+
+ std::map<string, bufferlist>::iterator it = attrs.find(name);
+ if (it == attrs.end()) {
+ return -ENODATA;
+ }
+ *outbl = it->second;
+ return 0;
+}
+
+int cls_cxx_getxattrs(cls_method_context_t hctx, std::map<string, bufferlist> *attrset) {
+ librados::TestClassHandler::MethodContext *ctx =
+ reinterpret_cast<librados::TestClassHandler::MethodContext*>(hctx);
+ return ctx->io_ctx_impl->xattr_get(ctx->oid, attrset);
+}
+
+int cls_cxx_map_get_keys(cls_method_context_t hctx, const string &start_obj,
+ uint64_t max_to_get, std::set<string> *keys, bool *more) {
+ librados::TestClassHandler::MethodContext *ctx =
+ reinterpret_cast<librados::TestClassHandler::MethodContext*>(hctx);
+
+ keys->clear();
+ std::map<string, bufferlist> vals;
+ int r = ctx->io_ctx_impl->omap_get_vals2(ctx->oid, start_obj, "", max_to_get,
+ &vals, more);
+ if (r < 0) {
+ return r;
+ }
+
+ for (std::map<string, bufferlist>::iterator it = vals.begin();
+ it != vals.end(); ++it) {
+ keys->insert(it->first);
+ }
+ return keys->size();
+}
+
+int cls_cxx_map_get_val(cls_method_context_t hctx, const string &key,
+ bufferlist *outbl) {
+ librados::TestClassHandler::MethodContext *ctx =
+ reinterpret_cast<librados::TestClassHandler::MethodContext*>(hctx);
+
+ std::map<string, bufferlist> vals;
+ int r = ctx->io_ctx_impl->omap_get_vals(ctx->oid, "", key, 1024, &vals);
+ if (r < 0) {
+ return r;
+ }
+
+ std::map<string, bufferlist>::iterator it = vals.find(key);
+ if (it == vals.end()) {
+ return -ENOENT;
+ }
+
+ *outbl = it->second;
+ return 0;
+}
+
+int cls_cxx_map_get_vals(cls_method_context_t hctx, const string &start_obj,
+ const string &filter_prefix, uint64_t max_to_get,
+ std::map<string, bufferlist> *vals, bool *more) {
+ librados::TestClassHandler::MethodContext *ctx =
+ reinterpret_cast<librados::TestClassHandler::MethodContext*>(hctx);
+ int r = ctx->io_ctx_impl->omap_get_vals2(ctx->oid, start_obj, filter_prefix,
+ max_to_get, vals, more);
+ if (r < 0) {
+ return r;
+ }
+ return vals->size();
+}
+
+int cls_cxx_map_remove_key(cls_method_context_t hctx, const string &key) {
+ std::set<std::string> keys;
+ keys.insert(key);
+
+ librados::TestClassHandler::MethodContext *ctx =
+ reinterpret_cast<librados::TestClassHandler::MethodContext*>(hctx);
+ return ctx->io_ctx_impl->omap_rm_keys(ctx->oid, keys);
+}
+
+int cls_cxx_map_set_val(cls_method_context_t hctx, const string &key,
+ bufferlist *inbl) {
+ std::map<std::string, bufferlist> m;
+ m[key] = *inbl;
+ return cls_cxx_map_set_vals(hctx, &m);
+}
+
+int cls_cxx_map_set_vals(cls_method_context_t hctx,
+ const std::map<string, bufferlist> *map) {
+ librados::TestClassHandler::MethodContext *ctx =
+ reinterpret_cast<librados::TestClassHandler::MethodContext*>(hctx);
+ return ctx->io_ctx_impl->omap_set(ctx->oid, *map);
+}
+
+int cls_cxx_read(cls_method_context_t hctx, int ofs, int len,
+ bufferlist *outbl) {
+ return cls_cxx_read2(hctx, ofs, len, outbl, 0);
+}
+
+int cls_cxx_read2(cls_method_context_t hctx, int ofs, int len,
+ bufferlist *outbl, uint32_t op_flags) {
+ librados::TestClassHandler::MethodContext *ctx =
+ reinterpret_cast<librados::TestClassHandler::MethodContext*>(hctx);
+ return ctx->io_ctx_impl->read(ctx->oid, len, ofs, outbl);
+}
+
+int cls_cxx_setxattr(cls_method_context_t hctx, const char *name,
+ bufferlist *inbl) {
+ librados::TestClassHandler::MethodContext *ctx =
+ reinterpret_cast<librados::TestClassHandler::MethodContext*>(hctx);
+ return ctx->io_ctx_impl->xattr_set(ctx->oid, name, *inbl);
+}
+
+int cls_cxx_stat(cls_method_context_t hctx, uint64_t *size, time_t *mtime) {
+ librados::TestClassHandler::MethodContext *ctx =
+ reinterpret_cast<librados::TestClassHandler::MethodContext*>(hctx);
+ return ctx->io_ctx_impl->stat(ctx->oid, size, mtime);
+}
+
+int cls_cxx_write(cls_method_context_t hctx, int ofs, int len,
+ bufferlist *inbl) {
+ return cls_cxx_write2(hctx, ofs, len, inbl, 0);
+}
+
+int cls_cxx_write2(cls_method_context_t hctx, int ofs, int len,
+ bufferlist *inbl, uint32_t op_flags) {
+ librados::TestClassHandler::MethodContext *ctx =
+ reinterpret_cast<librados::TestClassHandler::MethodContext*>(hctx);
+ return ctx->io_ctx_impl->write(ctx->oid, *inbl, len, ofs, ctx->snapc);
+}
+
+int cls_cxx_write_full(cls_method_context_t hctx, bufferlist *inbl) {
+ librados::TestClassHandler::MethodContext *ctx =
+ reinterpret_cast<librados::TestClassHandler::MethodContext*>(hctx);
+ return ctx->io_ctx_impl->write_full(ctx->oid, *inbl, ctx->snapc);
+}
+
+int cls_cxx_replace(cls_method_context_t hctx, int ofs, int len,
+ bufferlist *inbl) {
+ librados::TestClassHandler::MethodContext *ctx =
+ reinterpret_cast<librados::TestClassHandler::MethodContext*>(hctx);
+ int r = ctx->io_ctx_impl->truncate(ctx->oid, 0, ctx->snapc);
+ if (r < 0) {
+ return r;
+ }
+ return ctx->io_ctx_impl->write(ctx->oid, *inbl, len, ofs, ctx->snapc);
+}
+
+int cls_cxx_list_watchers(cls_method_context_t hctx,
+ obj_list_watch_response_t *watchers) {
+ librados::TestClassHandler::MethodContext *ctx =
+ reinterpret_cast<librados::TestClassHandler::MethodContext*>(hctx);
+
+ std::list<obj_watch_t> obj_watchers;
+ int r = ctx->io_ctx_impl->list_watchers(ctx->oid, &obj_watchers);
+ if (r < 0) {
+ return r;
+ }
+
+ for (auto &w : obj_watchers) {
+ watch_item_t watcher;
+ watcher.name = entity_name_t::CLIENT(w.watcher_id);
+ watcher.cookie = w.cookie;
+ watcher.timeout_seconds = w.timeout_seconds;
+ watcher.addr.parse(w.addr, 0);
+ watchers->entries.push_back(watcher);
+ }
+
+ return 0;
+}
+
+uint64_t cls_get_features(cls_method_context_t hctx) {
+ return CEPH_FEATURES_SUPPORTED_DEFAULT;
+}
+
+uint64_t cls_get_client_features(cls_method_context_t hctx) {
+ return CEPH_FEATURES_SUPPORTED_DEFAULT;
+}
+
+int cls_get_snapset_seq(cls_method_context_t hctx, uint64_t *snap_seq) {
+ librados::TestClassHandler::MethodContext *ctx =
+ reinterpret_cast<librados::TestClassHandler::MethodContext*>(hctx);
+ librados::snap_set_t snapset;
+ int r = ctx->io_ctx_impl->list_snaps(ctx->oid, &snapset);
+ if (r < 0) {
+ return r;
+ }
+
+ *snap_seq = snapset.seq;
+ return 0;
+}
+
+int cls_log(int level, const char *format, ...) {
+ int size = 256;
+ va_list ap;
+ while (1) {
+ char buf[size];
+ va_start(ap, format);
+ int n = vsnprintf(buf, size, format, ap);
+ va_end(ap);
+ if ((n > -1 && n < size) || size > 8196) {
+ dout(ceph::dout::need_dynamic(level)) << buf << dendl;
+ return n;
+ }
+ size *= 2;
+ }
+ return 0;
+}
+
+int cls_register(const char *name, cls_handle_t *handle) {
+ librados::TestClassHandler *cls = get_class_handler();
+ return cls->create(name, handle);
+}
+
+int cls_register_cxx_method(cls_handle_t hclass, const char *method,
+ int flags,
+ cls_method_cxx_call_t class_call,
+ cls_method_handle_t *handle) {
+ librados::TestClassHandler *cls = get_class_handler();
+ return cls->create_method(hclass, method, class_call, handle);
+}
+
+int cls_register_cxx_filter(cls_handle_t hclass,
+ const std::string &filter_name,
+ cls_cxx_filter_factory_t fn,
+ cls_filter_handle_t *)
+{
+ librados::TestClassHandler *cls = get_class_handler();
+ return cls->create_filter(hclass, filter_name, fn);
+}
+
+int8_t cls_get_required_osd_release(cls_handle_t hclass) {
+ return CEPH_FEATURE_SERVER_NAUTILUS;
+}
diff --git a/src/test/librados_test_stub/LibradosTestStub.h b/src/test/librados_test_stub/LibradosTestStub.h
new file mode 100644
index 00000000..8e6c4569
--- /dev/null
+++ b/src/test/librados_test_stub/LibradosTestStub.h
@@ -0,0 +1,29 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#ifndef LIBRADOS_TEST_STUB_H
+#define LIBRADOS_TEST_STUB_H
+
+#include "include/rados/librados_fwd.hpp"
+#include <boost/shared_ptr.hpp>
+
+namespace librados {
+
+class MockTestMemIoCtxImpl;
+class TestCluster;
+
+MockTestMemIoCtxImpl &get_mock_io_ctx(IoCtx &ioctx);
+
+} // namespace librados
+
+namespace librados_test_stub {
+
+typedef boost::shared_ptr<librados::TestCluster> TestClusterRef;
+
+void set_cluster(TestClusterRef cluster);
+TestClusterRef get_cluster();
+
+} // namespace librados_test_stub
+
+
+#endif // LIBRADOS_TEST_STUB_H
diff --git a/src/test/librados_test_stub/MockTestMemCluster.h b/src/test/librados_test_stub/MockTestMemCluster.h
new file mode 100644
index 00000000..685621a8
--- /dev/null
+++ b/src/test/librados_test_stub/MockTestMemCluster.h
@@ -0,0 +1,36 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#ifndef LIBRADOS_MOCK_TEST_MEM_CLUSTER_H
+#define LIBRADOS_MOCK_TEST_MEM_CLUSTER_H
+
+#include "test/librados_test_stub/TestMemCluster.h"
+#include "test/librados_test_stub/MockTestMemRadosClient.h"
+#include "gmock/gmock.h"
+
+struct CephContext;
+
+namespace librados {
+
+class TestRadosClient;
+
+class MockTestMemCluster : public TestMemCluster {
+public:
+ MockTestMemCluster() {
+ default_to_dispatch();
+ }
+
+ MOCK_METHOD1(create_rados_client, TestRadosClient*(CephContext*));
+ MockTestMemRadosClient* do_create_rados_client(CephContext *cct) {
+ return new ::testing::NiceMock<MockTestMemRadosClient>(cct, this);
+ }
+
+ void default_to_dispatch() {
+ using namespace ::testing;
+ ON_CALL(*this, create_rados_client(_)).WillByDefault(Invoke(this, &MockTestMemCluster::do_create_rados_client));
+ }
+};
+
+} // namespace librados
+
+#endif // LIBRADOS_MOCK_TEST_MEM_CLUSTER_H
diff --git a/src/test/librados_test_stub/MockTestMemIoCtxImpl.h b/src/test/librados_test_stub/MockTestMemIoCtxImpl.h
new file mode 100644
index 00000000..0ef52d38
--- /dev/null
+++ b/src/test/librados_test_stub/MockTestMemIoCtxImpl.h
@@ -0,0 +1,231 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#ifndef LIBRADOS_TEST_STUB_MOCK_TEST_MEM_IO_CTX_IMPL_H
+#define LIBRADOS_TEST_STUB_MOCK_TEST_MEM_IO_CTX_IMPL_H
+
+#include "test/librados_test_stub/TestMemIoCtxImpl.h"
+#include "test/librados_test_stub/TestMemCluster.h"
+#include "gmock/gmock.h"
+
+namespace librados {
+
+class MockTestMemRadosClient;
+
+class MockTestMemIoCtxImpl : public TestMemIoCtxImpl {
+public:
+ MockTestMemIoCtxImpl(MockTestMemRadosClient *mock_client,
+ TestMemRadosClient *client, int64_t pool_id,
+ const std::string& pool_name,
+ TestMemCluster::Pool *pool)
+ : TestMemIoCtxImpl(client, pool_id, pool_name, pool),
+ m_mock_client(mock_client), m_client(client) {
+ default_to_parent();
+ }
+
+ MockTestMemRadosClient *get_mock_rados_client() {
+ return m_mock_client;
+ }
+
+ MOCK_METHOD0(clone, TestIoCtxImpl*());
+ TestIoCtxImpl *do_clone() {
+ TestIoCtxImpl *io_ctx_impl = new ::testing::NiceMock<MockTestMemIoCtxImpl>(
+ m_mock_client, m_client, get_pool_id(), get_pool_name(), get_pool());
+ io_ctx_impl->set_snap_read(get_snap_read());
+ io_ctx_impl->set_snap_context(get_snap_context());
+ return io_ctx_impl;
+ }
+
+ MOCK_METHOD5(aio_notify, void(const std::string& o, AioCompletionImpl *c,
+ bufferlist& bl, uint64_t timeout_ms,
+ bufferlist *pbl));
+ void do_aio_notify(const std::string& o, AioCompletionImpl *c, bufferlist& bl,
+ uint64_t timeout_ms, bufferlist *pbl) {
+ return TestMemIoCtxImpl::aio_notify(o, c, bl, timeout_ms, pbl);
+ }
+
+ MOCK_METHOD4(aio_watch, int(const std::string& o, AioCompletionImpl *c,
+ uint64_t *handle, librados::WatchCtx2 *ctx));
+ int do_aio_watch(const std::string& o, AioCompletionImpl *c,
+ uint64_t *handle, librados::WatchCtx2 *ctx) {
+ return TestMemIoCtxImpl::aio_watch(o, c, handle, ctx);
+ }
+
+ MOCK_METHOD2(aio_unwatch, int(uint64_t handle, AioCompletionImpl *c));
+ int do_aio_unwatch(uint64_t handle, AioCompletionImpl *c) {
+ return TestMemIoCtxImpl::aio_unwatch(handle, c);
+ }
+
+ MOCK_METHOD1(assert_exists, int(const std::string &));
+ int do_assert_exists(const std::string &oid) {
+ return TestMemIoCtxImpl::assert_exists(oid);
+ }
+
+ MOCK_METHOD2(create, int(const std::string&, bool));
+ int do_create(const std::string& oid, bool exclusive) {
+ return TestMemIoCtxImpl::create(oid, exclusive);
+ }
+
+ MOCK_METHOD3(cmpext, int(const std::string&, uint64_t, bufferlist&));
+ int do_cmpext(const std::string& oid, uint64_t off, bufferlist& cmp_bl) {
+ return TestMemIoCtxImpl::cmpext(oid, off, cmp_bl);
+ }
+
+ MOCK_METHOD7(exec, int(const std::string& oid,
+ TestClassHandler *handler,
+ const char *cls,
+ const char *method,
+ bufferlist& inbl,
+ bufferlist* outbl,
+ const SnapContext &snapc));
+ int do_exec(const std::string& oid, TestClassHandler *handler,
+ const char *cls, const char *method, bufferlist& inbl,
+ bufferlist* outbl, const SnapContext &snapc) {
+ return TestMemIoCtxImpl::exec(oid, handler, cls, method, inbl, outbl,
+ snapc);
+ }
+
+ MOCK_CONST_METHOD0(get_instance_id, uint64_t());
+ uint64_t do_get_instance_id() const {
+ return TestMemIoCtxImpl::get_instance_id();
+ }
+
+ MOCK_METHOD2(list_snaps, int(const std::string& o, snap_set_t *out_snaps));
+ int do_list_snaps(const std::string& o, snap_set_t *out_snaps) {
+ return TestMemIoCtxImpl::list_snaps(o, out_snaps);
+ }
+
+ MOCK_METHOD2(list_watchers, int(const std::string& o,
+ std::list<obj_watch_t> *out_watchers));
+ int do_list_watchers(const std::string& o,
+ std::list<obj_watch_t> *out_watchers) {
+ return TestMemIoCtxImpl::list_watchers(o, out_watchers);
+ }
+
+ MOCK_METHOD4(notify, int(const std::string& o, bufferlist& bl,
+ uint64_t timeout_ms, bufferlist *pbl));
+ int do_notify(const std::string& o, bufferlist& bl,
+ uint64_t timeout_ms, bufferlist *pbl) {
+ return TestMemIoCtxImpl::notify(o, bl, timeout_ms, pbl);
+ }
+
+ MOCK_METHOD1(set_snap_read, void(snap_t));
+ void do_set_snap_read(snap_t snap_id) {
+ return TestMemIoCtxImpl::set_snap_read(snap_id);
+ }
+ MOCK_METHOD5(sparse_read, int(const std::string& oid,
+ uint64_t off,
+ uint64_t len,
+ std::map<uint64_t, uint64_t> *m,
+ bufferlist *bl));
+ int do_sparse_read(const std::string& oid, uint64_t off, size_t len,
+ std::map<uint64_t, uint64_t> *m, bufferlist *bl){
+ return TestMemIoCtxImpl::sparse_read(oid, off, len, m, bl);
+ }
+
+ MOCK_METHOD4(read, int(const std::string& oid,
+ size_t len,
+ uint64_t off,
+ bufferlist *bl));
+ int do_read(const std::string& oid, size_t len, uint64_t off,
+ bufferlist *bl) {
+ return TestMemIoCtxImpl::read(oid, len, off, bl);
+ }
+
+ MOCK_METHOD2(remove, int(const std::string& oid, const SnapContext &snapc));
+ int do_remove(const std::string& oid, const SnapContext &snapc) {
+ return TestMemIoCtxImpl::remove(oid, snapc);
+ }
+
+ MOCK_METHOD1(selfmanaged_snap_create, int(uint64_t *snap_id));
+ int do_selfmanaged_snap_create(uint64_t *snap_id) {
+ return TestMemIoCtxImpl::selfmanaged_snap_create(snap_id);
+ }
+
+ MOCK_METHOD1(selfmanaged_snap_remove, int(uint64_t snap_id));
+ int do_selfmanaged_snap_remove(uint64_t snap_id) {
+ return TestMemIoCtxImpl::selfmanaged_snap_remove(snap_id);
+ }
+
+ MOCK_METHOD2(selfmanaged_snap_rollback, int(const std::string& oid,
+ uint64_t snap_id));
+ int do_selfmanaged_snap_rollback(const std::string& oid, uint64_t snap_id) {
+ return TestMemIoCtxImpl::selfmanaged_snap_rollback(oid, snap_id);
+ }
+
+ MOCK_METHOD3(truncate, int(const std::string& oid,
+ uint64_t size,
+ const SnapContext &snapc));
+ int do_truncate(const std::string& oid, uint64_t size,
+ const SnapContext &snapc) {
+ return TestMemIoCtxImpl::truncate(oid, size, snapc);
+ }
+
+ MOCK_METHOD5(write, int(const std::string& oid, bufferlist& bl, size_t len,
+ uint64_t off, const SnapContext &snapc));
+ int do_write(const std::string& oid, bufferlist& bl, size_t len, uint64_t off,
+ const SnapContext &snapc) {
+ return TestMemIoCtxImpl::write(oid, bl, len, off, snapc);
+ }
+
+ MOCK_METHOD3(write_full, int(const std::string& oid,
+ bufferlist& bl,
+ const SnapContext &snapc));
+ int do_write_full(const std::string& oid, bufferlist& bl,
+ const SnapContext &snapc) {
+ return TestMemIoCtxImpl::write_full(oid, bl, snapc);
+ }
+
+
+ MOCK_METHOD5(writesame, int(const std::string& oid, bufferlist& bl,
+ size_t len, uint64_t off,
+ const SnapContext &snapc));
+ int do_writesame(const std::string& oid, bufferlist& bl, size_t len,
+ uint64_t off, const SnapContext &snapc) {
+ return TestMemIoCtxImpl::writesame(oid, bl, len, off, snapc);
+ }
+
+ MOCK_METHOD4(zero, int(const std::string& oid, uint64_t offset,
+ uint64_t length, const SnapContext &snapc));
+ int do_zero(const std::string& oid, uint64_t offset,
+ uint64_t length, const SnapContext &snapc) {
+ return TestMemIoCtxImpl::zero(oid, offset, length, snapc);
+ }
+
+ void default_to_parent() {
+ using namespace ::testing;
+
+ ON_CALL(*this, clone()).WillByDefault(Invoke(this, &MockTestMemIoCtxImpl::do_clone));
+ ON_CALL(*this, aio_notify(_, _, _, _, _)).WillByDefault(Invoke(this, &MockTestMemIoCtxImpl::do_aio_notify));
+ ON_CALL(*this, aio_watch(_, _, _, _)).WillByDefault(Invoke(this, &MockTestMemIoCtxImpl::do_aio_watch));
+ ON_CALL(*this, aio_unwatch(_, _)).WillByDefault(Invoke(this, &MockTestMemIoCtxImpl::do_aio_unwatch));
+ ON_CALL(*this, assert_exists(_)).WillByDefault(Invoke(this, &MockTestMemIoCtxImpl::do_assert_exists));
+ ON_CALL(*this, create(_,_)).WillByDefault(Invoke(this, &MockTestMemIoCtxImpl::do_create));
+ ON_CALL(*this, cmpext(_,_,_)).WillByDefault(Invoke(this, &MockTestMemIoCtxImpl::do_cmpext));
+ ON_CALL(*this, exec(_, _, _, _, _, _, _)).WillByDefault(Invoke(this, &MockTestMemIoCtxImpl::do_exec));
+ ON_CALL(*this, get_instance_id()).WillByDefault(Invoke(this, &MockTestMemIoCtxImpl::do_get_instance_id));
+ ON_CALL(*this, list_snaps(_, _)).WillByDefault(Invoke(this, &MockTestMemIoCtxImpl::do_list_snaps));
+ ON_CALL(*this, list_watchers(_, _)).WillByDefault(Invoke(this, &MockTestMemIoCtxImpl::do_list_watchers));
+ ON_CALL(*this, notify(_, _, _, _)).WillByDefault(Invoke(this, &MockTestMemIoCtxImpl::do_notify));
+ ON_CALL(*this, read(_, _, _, _)).WillByDefault(Invoke(this, &MockTestMemIoCtxImpl::do_read));
+ ON_CALL(*this, set_snap_read(_)).WillByDefault(Invoke(this, &MockTestMemIoCtxImpl::do_set_snap_read));
+ ON_CALL(*this, sparse_read(_, _, _, _, _)).WillByDefault(Invoke(this, &MockTestMemIoCtxImpl::do_sparse_read));
+ ON_CALL(*this, remove(_, _)).WillByDefault(Invoke(this, &MockTestMemIoCtxImpl::do_remove));
+ ON_CALL(*this, selfmanaged_snap_create(_)).WillByDefault(Invoke(this, &MockTestMemIoCtxImpl::do_selfmanaged_snap_create));
+ ON_CALL(*this, selfmanaged_snap_remove(_)).WillByDefault(Invoke(this, &MockTestMemIoCtxImpl::do_selfmanaged_snap_remove));
+ ON_CALL(*this, selfmanaged_snap_rollback(_, _)).WillByDefault(Invoke(this, &MockTestMemIoCtxImpl::do_selfmanaged_snap_rollback));
+ ON_CALL(*this, truncate(_,_,_)).WillByDefault(Invoke(this, &MockTestMemIoCtxImpl::do_truncate));
+ ON_CALL(*this, write(_, _, _, _, _)).WillByDefault(Invoke(this, &MockTestMemIoCtxImpl::do_write));
+ ON_CALL(*this, write_full(_, _, _)).WillByDefault(Invoke(this, &MockTestMemIoCtxImpl::do_write_full));
+ ON_CALL(*this, writesame(_, _, _, _, _)).WillByDefault(Invoke(this, &MockTestMemIoCtxImpl::do_writesame));
+ ON_CALL(*this, zero(_,_,_,_)).WillByDefault(Invoke(this, &MockTestMemIoCtxImpl::do_zero));
+ }
+
+private:
+ MockTestMemRadosClient *m_mock_client;
+ TestMemRadosClient *m_client;
+};
+
+} // namespace librados
+
+#endif // LIBRADOS_TEST_STUB_MOCK_TEST_MEM_IO_CTX_IMPL_H
diff --git a/src/test/librados_test_stub/MockTestMemRadosClient.h b/src/test/librados_test_stub/MockTestMemRadosClient.h
new file mode 100644
index 00000000..63050cb1
--- /dev/null
+++ b/src/test/librados_test_stub/MockTestMemRadosClient.h
@@ -0,0 +1,88 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#ifndef LIBRADOS_TEST_STUB_MOCK_TEST_MEM_RADOS_CLIENT_H
+#define LIBRADOS_TEST_STUB_MOCK_TEST_MEM_RADOS_CLIENT_H
+
+#include "test/librados_test_stub/TestMemRadosClient.h"
+#include "test/librados_test_stub/MockTestMemIoCtxImpl.h"
+#include "gmock/gmock.h"
+
+namespace librados {
+
+class TestMemCluster;
+
+class MockTestMemRadosClient : public TestMemRadosClient {
+public:
+ MockTestMemRadosClient(CephContext *cct, TestMemCluster *test_mem_cluster)
+ : TestMemRadosClient(cct, test_mem_cluster) {
+ default_to_dispatch();
+ }
+
+ MOCK_METHOD0(connect, int());
+ int do_connect() {
+ return TestMemRadosClient::connect();
+ }
+
+ MOCK_METHOD2(create_ioctx, TestIoCtxImpl *(int64_t pool_id,
+ const std::string &pool_name));
+ MockTestMemIoCtxImpl* do_create_ioctx(int64_t pool_id,
+ const std::string &pool_name) {
+ return new ::testing::NiceMock<MockTestMemIoCtxImpl>(
+ this, this, pool_id, pool_name,
+ get_mem_cluster()->get_pool(pool_name));
+ }
+
+ MOCK_METHOD2(blacklist_add, int(const std::string& client_address,
+ uint32_t expire_seconds));
+ int do_blacklist_add(const std::string& client_address,
+ uint32_t expire_seconds) {
+ return TestMemRadosClient::blacklist_add(client_address, expire_seconds);
+ }
+
+ MOCK_METHOD1(get_min_compatible_osd, int(int8_t*));
+ int do_get_min_compatible_osd(int8_t* require_osd_release) {
+ return TestMemRadosClient::get_min_compatible_osd(require_osd_release);
+ }
+
+ MOCK_METHOD2(get_min_compatible_client, int(int8_t*, int8_t*));
+ int do_get_min_compatible_client(int8_t* min_compat_client,
+ int8_t* require_min_compat_client) {
+ return TestMemRadosClient::get_min_compatible_client(
+ min_compat_client, require_min_compat_client);
+ }
+
+ MOCK_METHOD3(service_daemon_register,
+ int(const std::string&,
+ const std::string&,
+ const std::map<std::string,std::string>&));
+ int do_service_daemon_register(const std::string& service,
+ const std::string& name,
+ const std::map<std::string,std::string>& metadata) {
+ return TestMemRadosClient::service_daemon_register(service, name, metadata);
+ }
+
+ // workaround of https://github.com/google/googletest/issues/1155
+ MOCK_METHOD1(service_daemon_update_status_r,
+ int(const std::map<std::string,std::string>&));
+ int do_service_daemon_update_status_r(const std::map<std::string,std::string>& status) {
+ auto s = status;
+ return TestMemRadosClient::service_daemon_update_status(std::move(s));
+ }
+
+ void default_to_dispatch() {
+ using namespace ::testing;
+
+ ON_CALL(*this, connect()).WillByDefault(Invoke(this, &MockTestMemRadosClient::do_connect));
+ ON_CALL(*this, create_ioctx(_, _)).WillByDefault(Invoke(this, &MockTestMemRadosClient::do_create_ioctx));
+ ON_CALL(*this, blacklist_add(_, _)).WillByDefault(Invoke(this, &MockTestMemRadosClient::do_blacklist_add));
+ ON_CALL(*this, get_min_compatible_osd(_)).WillByDefault(Invoke(this, &MockTestMemRadosClient::do_get_min_compatible_osd));
+ ON_CALL(*this, get_min_compatible_client(_, _)).WillByDefault(Invoke(this, &MockTestMemRadosClient::do_get_min_compatible_client));
+ ON_CALL(*this, service_daemon_register(_, _, _)).WillByDefault(Invoke(this, &MockTestMemRadosClient::do_service_daemon_register));
+ ON_CALL(*this, service_daemon_update_status_r(_)).WillByDefault(Invoke(this, &MockTestMemRadosClient::do_service_daemon_update_status_r));
+ }
+};
+
+} // namespace librados
+
+#endif // LIBRADOS_TEST_STUB_MOCK_TEST_MEM_RADOS_CLIENT_H
diff --git a/src/test/librados_test_stub/TestClassHandler.cc b/src/test/librados_test_stub/TestClassHandler.cc
new file mode 100644
index 00000000..ef96f619
--- /dev/null
+++ b/src/test/librados_test_stub/TestClassHandler.cc
@@ -0,0 +1,162 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#include "test/librados_test_stub/TestClassHandler.h"
+#include "test/librados_test_stub/TestIoCtxImpl.h"
+#include <boost/algorithm/string/predicate.hpp>
+#include <dlfcn.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+#include "common/debug.h"
+#include "include/ceph_assert.h"
+
+#define dout_context g_ceph_context
+#define dout_subsys ceph_subsys_rados
+
+namespace librados {
+
+TestClassHandler::TestClassHandler() {
+}
+
+TestClassHandler::~TestClassHandler() {
+ for (ClassHandles::iterator it = m_class_handles.begin();
+ it != m_class_handles.end(); ++it) {
+ dlclose(*it);
+ }
+}
+
+void TestClassHandler::open_class(const std::string& name,
+ const std::string& path) {
+ void *handle = dlopen(path.c_str(), RTLD_NOW);
+ if (handle == NULL) {
+ std::cerr << "Failed to load class: " << name << " (" << path << "): "
+ << dlerror() << std::endl;
+ return;
+ }
+
+ // clear any existing error
+ dlerror();
+
+ // initialize
+ void (*cls_init)() = reinterpret_cast<void (*)()>(
+ dlsym(handle, "__cls_init"));
+
+ char* error = nullptr;
+ if ((error = dlerror()) != nullptr) {
+ std::cerr << "Error locating initializer: " << error << std::endl;
+ } else if (cls_init) {
+ m_class_handles.push_back(handle);
+ cls_init();
+ return;
+ }
+
+ std::cerr << "Class: " << name << " (" << path << ") missing initializer"
+ << std::endl;
+ dlclose(handle);
+}
+
+void TestClassHandler::open_all_classes() {
+ ceph_assert(m_class_handles.empty());
+
+ const char* env = getenv("CEPH_LIB");
+ std::string CEPH_LIB(env ? env : "lib");
+ DIR *dir = ::opendir(CEPH_LIB.c_str());
+ if (dir == NULL) {
+ ceph_abort();;
+ }
+
+ std::set<std::string> names;
+ struct dirent *pde = nullptr;
+ while ((pde = ::readdir(dir))) {
+ std::string name(pde->d_name);
+ if (!boost::algorithm::starts_with(name, "libcls_") ||
+ !boost::algorithm::ends_with(name, ".so")) {
+ continue;
+ }
+ names.insert(name);
+ }
+
+ for (auto& name : names) {
+ std::string class_name = name.substr(7, name.size() - 10);
+ open_class(class_name, CEPH_LIB + "/" + name);
+ }
+ closedir(dir);
+}
+
+int TestClassHandler::create(const std::string &name, cls_handle_t *handle) {
+ if (m_classes.find(name) != m_classes.end()) {
+ std::cerr << "Class " << name << " already exists" << std::endl;
+ return -EEXIST;
+ }
+
+ SharedClass cls(new Class());
+ m_classes[name] = cls;
+ *handle = reinterpret_cast<cls_handle_t>(cls.get());
+ return 0;
+}
+
+int TestClassHandler::create_method(cls_handle_t hclass,
+ const char *name,
+ cls_method_cxx_call_t class_call,
+ cls_method_handle_t *handle) {
+ Class *cls = reinterpret_cast<Class*>(hclass);
+ if (cls->methods.find(name) != cls->methods.end()) {
+ std::cerr << "Class method " << hclass << ":" << name << " already exists"
+ << std::endl;
+ return -EEXIST;
+ }
+
+ SharedMethod method(new Method());
+ method->class_call = class_call;
+ cls->methods[name] = method;
+ return 0;
+}
+
+cls_method_cxx_call_t TestClassHandler::get_method(const std::string &cls,
+ const std::string &method) {
+ Classes::iterator c_it = m_classes.find(cls);
+ if (c_it == m_classes.end()) {
+ std::cerr << "Failed to located class " << cls << std::endl;
+ return NULL;
+ }
+
+ SharedClass scls = c_it->second;
+ Methods::iterator m_it = scls->methods.find(method);
+ if (m_it == scls->methods.end()) {
+ std::cerr << "Failed to located class method" << cls << "." << method
+ << std::endl;
+ return NULL;
+ }
+ return m_it->second->class_call;
+}
+
+TestClassHandler::SharedMethodContext TestClassHandler::get_method_context(
+ TestIoCtxImpl *io_ctx_impl, const std::string &oid,
+ const SnapContext &snapc) {
+ SharedMethodContext ctx(new MethodContext());
+
+ // clone to ioctx to provide a firewall for gmock expectations
+ ctx->io_ctx_impl = io_ctx_impl->clone();
+ ctx->oid = oid;
+ ctx->snapc = snapc;
+ return ctx;
+}
+
+int TestClassHandler::create_filter(cls_handle_t hclass,
+ const std::string& name,
+ cls_cxx_filter_factory_t fn)
+{
+ Class *cls = reinterpret_cast<Class*>(hclass);
+ if (cls->filters.find(name) != cls->filters.end()) {
+ return -EEXIST;
+ }
+ cls->filters[name] = fn;
+ return 0;
+}
+
+TestClassHandler::MethodContext::~MethodContext() {
+ io_ctx_impl->put();
+}
+
+} // namespace librados
diff --git a/src/test/librados_test_stub/TestClassHandler.h b/src/test/librados_test_stub/TestClassHandler.h
new file mode 100644
index 00000000..df273364
--- /dev/null
+++ b/src/test/librados_test_stub/TestClassHandler.h
@@ -0,0 +1,77 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#ifndef CEPH_TEST_CLASS_HANDLER_H
+#define CEPH_TEST_CLASS_HANDLER_H
+
+#include "objclass/objclass.h"
+#include "common/snap_types.h"
+#include <boost/shared_ptr.hpp>
+#include <list>
+#include <map>
+#include <string>
+
+namespace librados
+{
+
+class TestIoCtxImpl;
+
+class TestClassHandler {
+public:
+
+ TestClassHandler();
+ ~TestClassHandler();
+
+ struct MethodContext {
+ ~MethodContext();
+
+ TestIoCtxImpl *io_ctx_impl;
+ std::string oid;
+ SnapContext snapc;
+ };
+ typedef boost::shared_ptr<MethodContext> SharedMethodContext;
+
+ struct Method {
+ cls_method_cxx_call_t class_call;
+ };
+ typedef boost::shared_ptr<Method> SharedMethod;
+ typedef std::map<std::string, SharedMethod> Methods;
+ typedef std::map<std::string, cls_cxx_filter_factory_t> Filters;
+
+ struct Class {
+ Methods methods;
+ Filters filters;
+ };
+ typedef boost::shared_ptr<Class> SharedClass;
+
+ void open_all_classes();
+
+ int create(const std::string &name, cls_handle_t *handle);
+ int create_method(cls_handle_t hclass, const char *method,
+ cls_method_cxx_call_t class_call,
+ cls_method_handle_t *handle);
+ cls_method_cxx_call_t get_method(const std::string &cls,
+ const std::string &method);
+ SharedMethodContext get_method_context(TestIoCtxImpl *io_ctx_impl,
+ const std::string &oid,
+ const SnapContext &snapc);
+
+ int create_filter(cls_handle_t hclass, const std::string& filter_name,
+ cls_cxx_filter_factory_t fn);
+
+private:
+
+ typedef std::map<std::string, SharedClass> Classes;
+ typedef std::list<void*> ClassHandles;
+
+ Classes m_classes;
+ ClassHandles m_class_handles;
+ Filters m_filters;
+
+ void open_class(const std::string& name, const std::string& path);
+
+};
+
+} // namespace librados
+
+#endif // CEPH_TEST_CLASS_HANDLER_H
diff --git a/src/test/librados_test_stub/TestCluster.h b/src/test/librados_test_stub/TestCluster.h
new file mode 100644
index 00000000..d7a31b6c
--- /dev/null
+++ b/src/test/librados_test_stub/TestCluster.h
@@ -0,0 +1,65 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#ifndef CEPH_TEST_CLUSTER_H
+#define CEPH_TEST_CLUSTER_H
+
+#include "test/librados_test_stub/TestWatchNotify.h"
+
+class CephContext;
+
+namespace librados {
+
+class TestRadosClient;
+class TestWatchNotify;
+
+class TestCluster {
+public:
+ struct ObjectLocator {
+ std::string nspace;
+ std::string name;
+
+ ObjectLocator(const std::string& nspace, const std::string& name)
+ : nspace(nspace), name(name) {
+ }
+
+ bool operator<(const ObjectLocator& rhs) const {
+ if (nspace != rhs.nspace) {
+ return nspace < rhs.nspace;
+ }
+ return name < rhs.name;
+ }
+ };
+
+ struct ObjectHandler {
+ virtual ~ObjectHandler() {}
+
+ virtual void handle_removed(TestRadosClient* test_rados_client) = 0;
+ };
+
+ TestCluster() : m_watch_notify(this) {
+ }
+ virtual ~TestCluster() {
+ }
+
+ virtual TestRadosClient *create_rados_client(CephContext *cct) = 0;
+
+ virtual int register_object_handler(int64_t pool_id,
+ const ObjectLocator& locator,
+ ObjectHandler* object_handler) = 0;
+ virtual void unregister_object_handler(int64_t pool_id,
+ const ObjectLocator& locator,
+ ObjectHandler* object_handler) = 0;
+
+ TestWatchNotify *get_watch_notify() {
+ return &m_watch_notify;
+ }
+
+protected:
+ TestWatchNotify m_watch_notify;
+
+};
+
+} // namespace librados
+
+#endif // CEPH_TEST_CLUSTER_H
diff --git a/src/test/librados_test_stub/TestIoCtxImpl.cc b/src/test/librados_test_stub/TestIoCtxImpl.cc
new file mode 100644
index 00000000..7e6fa4ee
--- /dev/null
+++ b/src/test/librados_test_stub/TestIoCtxImpl.cc
@@ -0,0 +1,386 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#include "test/librados_test_stub/TestIoCtxImpl.h"
+#include "test/librados_test_stub/TestClassHandler.h"
+#include "test/librados_test_stub/TestRadosClient.h"
+#include "test/librados_test_stub/TestWatchNotify.h"
+#include "librados/AioCompletionImpl.h"
+#include "include/ceph_assert.h"
+#include "common/Finisher.h"
+#include "common/valgrind.h"
+#include "objclass/objclass.h"
+#include <boost/bind.hpp>
+#include <errno.h>
+
+namespace librados {
+
+TestIoCtxImpl::TestIoCtxImpl() : m_client(NULL) {
+ get();
+}
+
+TestIoCtxImpl::TestIoCtxImpl(TestRadosClient *client, int64_t pool_id,
+ const std::string& pool_name)
+ : m_client(client), m_pool_id(pool_id), m_pool_name(pool_name),
+ m_snap_seq(CEPH_NOSNAP)
+{
+ m_client->get();
+ get();
+}
+
+TestIoCtxImpl::TestIoCtxImpl(const TestIoCtxImpl& rhs)
+ : m_client(rhs.m_client),
+ m_pool_id(rhs.m_pool_id),
+ m_pool_name(rhs.m_pool_name),
+ m_namespace_name(rhs.m_namespace_name),
+ m_snap_seq(rhs.m_snap_seq)
+{
+ m_client->get();
+ get();
+}
+
+TestIoCtxImpl::~TestIoCtxImpl() {
+ ceph_assert(m_pending_ops == 0);
+}
+
+void TestObjectOperationImpl::get() {
+ m_refcount++;
+}
+
+void TestObjectOperationImpl::put() {
+ if (--m_refcount == 0) {
+ ANNOTATE_HAPPENS_AFTER(&m_refcount);
+ ANNOTATE_HAPPENS_BEFORE_FORGET_ALL(&m_refcount);
+ delete this;
+ } else {
+ ANNOTATE_HAPPENS_BEFORE(&m_refcount);
+ }
+}
+
+void TestIoCtxImpl::get() {
+ m_refcount++;
+}
+
+void TestIoCtxImpl::put() {
+ if (--m_refcount == 0) {
+ m_client->put();
+ delete this;
+ }
+}
+
+uint64_t TestIoCtxImpl::get_instance_id() const {
+ return m_client->get_instance_id();
+}
+
+int64_t TestIoCtxImpl::get_id() {
+ return m_pool_id;
+}
+
+uint64_t TestIoCtxImpl::get_last_version() {
+ return 0;
+}
+
+std::string TestIoCtxImpl::get_pool_name() {
+ return m_pool_name;
+}
+
+int TestIoCtxImpl::aio_flush() {
+ m_client->flush_aio_operations();
+ return 0;
+}
+
+void TestIoCtxImpl::aio_flush_async(AioCompletionImpl *c) {
+ m_client->flush_aio_operations(c);
+}
+
+void TestIoCtxImpl::aio_notify(const std::string& oid, AioCompletionImpl *c,
+ bufferlist& bl, uint64_t timeout_ms,
+ bufferlist *pbl) {
+ m_pending_ops++;
+ c->get();
+ C_AioNotify *ctx = new C_AioNotify(this, c);
+ m_client->get_watch_notify()->aio_notify(m_client, m_pool_id, get_namespace(),
+ oid, bl, timeout_ms, pbl, ctx);
+}
+
+int TestIoCtxImpl::aio_operate(const std::string& oid, TestObjectOperationImpl &ops,
+ AioCompletionImpl *c, SnapContext *snap_context,
+ int flags) {
+ // TODO flags for now
+ ops.get();
+ m_pending_ops++;
+ m_client->add_aio_operation(oid, true, boost::bind(
+ &TestIoCtxImpl::execute_aio_operations, this, oid, &ops,
+ reinterpret_cast<bufferlist*>(0),
+ snap_context != NULL ? *snap_context : m_snapc), c);
+ return 0;
+}
+
+int TestIoCtxImpl::aio_operate_read(const std::string& oid,
+ TestObjectOperationImpl &ops,
+ AioCompletionImpl *c, int flags,
+ bufferlist *pbl) {
+ // TODO ignoring flags for now
+ ops.get();
+ m_pending_ops++;
+ m_client->add_aio_operation(oid, true, boost::bind(
+ &TestIoCtxImpl::execute_aio_operations, this, oid, &ops, pbl, m_snapc), c);
+ return 0;
+}
+
+int TestIoCtxImpl::aio_watch(const std::string& o, AioCompletionImpl *c,
+ uint64_t *handle, librados::WatchCtx2 *watch_ctx) {
+ m_pending_ops++;
+ c->get();
+ C_AioNotify *ctx = new C_AioNotify(this, c);
+ if (m_client->is_blacklisted()) {
+ m_client->get_aio_finisher()->queue(ctx, -EBLACKLISTED);
+ } else {
+ m_client->get_watch_notify()->aio_watch(m_client, m_pool_id,
+ get_namespace(), o,
+ get_instance_id(), handle, nullptr,
+ watch_ctx, ctx);
+ }
+ return 0;
+}
+
+int TestIoCtxImpl::aio_unwatch(uint64_t handle, AioCompletionImpl *c) {
+ m_pending_ops++;
+ c->get();
+ C_AioNotify *ctx = new C_AioNotify(this, c);
+ if (m_client->is_blacklisted()) {
+ m_client->get_aio_finisher()->queue(ctx, -EBLACKLISTED);
+ } else {
+ m_client->get_watch_notify()->aio_unwatch(m_client, handle, ctx);
+ }
+ return 0;
+}
+
+int TestIoCtxImpl::exec(const std::string& oid, TestClassHandler *handler,
+ const char *cls, const char *method,
+ bufferlist& inbl, bufferlist* outbl,
+ const SnapContext &snapc) {
+ if (m_client->is_blacklisted()) {
+ return -EBLACKLISTED;
+ }
+
+ cls_method_cxx_call_t call = handler->get_method(cls, method);
+ if (call == NULL) {
+ return -ENOSYS;
+ }
+
+ return (*call)(reinterpret_cast<cls_method_context_t>(
+ handler->get_method_context(this, oid, snapc).get()), &inbl, outbl);
+}
+
+int TestIoCtxImpl::list_watchers(const std::string& o,
+ std::list<obj_watch_t> *out_watchers) {
+ if (m_client->is_blacklisted()) {
+ return -EBLACKLISTED;
+ }
+
+ return m_client->get_watch_notify()->list_watchers(m_pool_id, get_namespace(),
+ o, out_watchers);
+}
+
+int TestIoCtxImpl::notify(const std::string& o, bufferlist& bl,
+ uint64_t timeout_ms, bufferlist *pbl) {
+ if (m_client->is_blacklisted()) {
+ return -EBLACKLISTED;
+ }
+
+ return m_client->get_watch_notify()->notify(m_client, m_pool_id,
+ get_namespace(), o, bl,
+ timeout_ms, pbl);
+}
+
+void TestIoCtxImpl::notify_ack(const std::string& o, uint64_t notify_id,
+ uint64_t handle, bufferlist& bl) {
+ m_client->get_watch_notify()->notify_ack(m_client, m_pool_id, get_namespace(),
+ o, notify_id, handle,
+ m_client->get_instance_id(), bl);
+}
+
+int TestIoCtxImpl::operate(const std::string& oid, TestObjectOperationImpl &ops) {
+ AioCompletionImpl *comp = new AioCompletionImpl();
+
+ ops.get();
+ m_pending_ops++;
+ m_client->add_aio_operation(oid, false, boost::bind(
+ &TestIoCtxImpl::execute_aio_operations, this, oid, &ops,
+ reinterpret_cast<bufferlist*>(0), m_snapc), comp);
+
+ comp->wait_for_safe();
+ int ret = comp->get_return_value();
+ comp->put();
+ return ret;
+}
+
+int TestIoCtxImpl::operate_read(const std::string& oid, TestObjectOperationImpl &ops,
+ bufferlist *pbl) {
+ AioCompletionImpl *comp = new AioCompletionImpl();
+
+ ops.get();
+ m_pending_ops++;
+ m_client->add_aio_operation(oid, false, boost::bind(
+ &TestIoCtxImpl::execute_aio_operations, this, oid, &ops, pbl,
+ m_snapc), comp);
+
+ comp->wait_for_complete();
+ int ret = comp->get_return_value();
+ comp->put();
+ return ret;
+}
+
+void TestIoCtxImpl::aio_selfmanaged_snap_create(uint64_t *snapid,
+ AioCompletionImpl *c) {
+ m_client->add_aio_operation(
+ "", true,
+ boost::bind(&TestIoCtxImpl::selfmanaged_snap_create, this, snapid), c);
+}
+
+void TestIoCtxImpl::aio_selfmanaged_snap_remove(uint64_t snapid,
+ AioCompletionImpl *c) {
+ m_client->add_aio_operation(
+ "", true,
+ boost::bind(&TestIoCtxImpl::selfmanaged_snap_remove, this, snapid), c);
+}
+
+int TestIoCtxImpl::selfmanaged_snap_set_write_ctx(snap_t seq,
+ std::vector<snap_t>& snaps) {
+ std::vector<snapid_t> snap_ids(snaps.begin(), snaps.end());
+ m_snapc = SnapContext(seq, snap_ids);
+ return 0;
+}
+
+int TestIoCtxImpl::set_alloc_hint(const std::string& oid,
+ uint64_t expected_object_size,
+ uint64_t expected_write_size,
+ uint32_t flags,
+ const SnapContext &snapc) {
+ return 0;
+}
+
+void TestIoCtxImpl::set_snap_read(snap_t seq) {
+ if (seq == 0) {
+ seq = CEPH_NOSNAP;
+ }
+ m_snap_seq = seq;
+}
+
+int TestIoCtxImpl::tmap_update(const std::string& oid, bufferlist& cmdbl) {
+ if (m_client->is_blacklisted()) {
+ return -EBLACKLISTED;
+ }
+
+ // TODO: protect against concurrent tmap updates
+ bufferlist tmap_header;
+ std::map<string,bufferlist> tmap;
+ uint64_t size = 0;
+ int r = stat(oid, &size, NULL);
+ if (r == -ENOENT) {
+ r = create(oid, false);
+ }
+ if (r < 0) {
+ return r;
+ }
+
+ if (size > 0) {
+ bufferlist inbl;
+ r = read(oid, size, 0, &inbl);
+ if (r < 0) {
+ return r;
+ }
+ auto iter = inbl.cbegin();
+ decode(tmap_header, iter);
+ decode(tmap, iter);
+ }
+
+ __u8 c;
+ std::string key;
+ bufferlist value;
+ auto iter = cmdbl.cbegin();
+ decode(c, iter);
+ decode(key, iter);
+
+ switch (c) {
+ case CEPH_OSD_TMAP_SET:
+ decode(value, iter);
+ tmap[key] = value;
+ break;
+ case CEPH_OSD_TMAP_RM:
+ r = tmap.erase(key);
+ if (r == 0) {
+ return -ENOENT;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ bufferlist out;
+ encode(tmap_header, out);
+ encode(tmap, out);
+ r = write_full(oid, out, m_snapc);
+ return r;
+}
+
+int TestIoCtxImpl::unwatch(uint64_t handle) {
+ if (m_client->is_blacklisted()) {
+ return -EBLACKLISTED;
+ }
+
+ return m_client->get_watch_notify()->unwatch(m_client, handle);
+}
+
+int TestIoCtxImpl::watch(const std::string& o, uint64_t *handle,
+ librados::WatchCtx *ctx, librados::WatchCtx2 *ctx2) {
+ if (m_client->is_blacklisted()) {
+ return -EBLACKLISTED;
+ }
+
+ return m_client->get_watch_notify()->watch(m_client, m_pool_id,
+ get_namespace(), o,
+ get_instance_id(), handle, ctx,
+ ctx2);
+}
+
+int TestIoCtxImpl::execute_operation(const std::string& oid,
+ const Operation &operation) {
+ if (m_client->is_blacklisted()) {
+ return -EBLACKLISTED;
+ }
+
+ TestRadosClient::Transaction transaction(m_client, get_namespace(), oid);
+ return operation(this, oid);
+}
+
+int TestIoCtxImpl::execute_aio_operations(const std::string& oid,
+ TestObjectOperationImpl *ops,
+ bufferlist *pbl,
+ const SnapContext &snapc) {
+ int ret = 0;
+ if (m_client->is_blacklisted()) {
+ ret = -EBLACKLISTED;
+ } else {
+ TestRadosClient::Transaction transaction(m_client, get_namespace(), oid);
+ for (ObjectOperations::iterator it = ops->ops.begin();
+ it != ops->ops.end(); ++it) {
+ ret = (*it)(this, oid, pbl, snapc);
+ if (ret < 0) {
+ break;
+ }
+ }
+ }
+ m_pending_ops--;
+ ops->put();
+ return ret;
+}
+
+void TestIoCtxImpl::handle_aio_notify_complete(AioCompletionImpl *c, int r) {
+ m_pending_ops--;
+
+ m_client->finish_aio_completion(c, r);
+}
+
+} // namespace librados
diff --git a/src/test/librados_test_stub/TestIoCtxImpl.h b/src/test/librados_test_stub/TestIoCtxImpl.h
new file mode 100644
index 00000000..972dd44d
--- /dev/null
+++ b/src/test/librados_test_stub/TestIoCtxImpl.h
@@ -0,0 +1,213 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#ifndef CEPH_TEST_IO_CTX_IMPL_H
+#define CEPH_TEST_IO_CTX_IMPL_H
+
+#include <list>
+#include <atomic>
+
+#include <boost/function.hpp>
+
+#include "include/rados/librados.hpp"
+#include "include/Context.h"
+#include "common/snap_types.h"
+
+namespace librados {
+
+class TestClassHandler;
+class TestIoCtxImpl;
+class TestRadosClient;
+
+typedef boost::function<int(TestIoCtxImpl*,
+ const std::string&,
+ bufferlist *,
+ const SnapContext &)> ObjectOperationTestImpl;
+typedef std::list<ObjectOperationTestImpl> ObjectOperations;
+
+struct TestObjectOperationImpl {
+public:
+ void get();
+ void put();
+
+ ObjectOperations ops;
+private:
+ std::atomic<uint64_t> m_refcount = { 0 };
+};
+
+class TestIoCtxImpl {
+public:
+ typedef boost::function<int(TestIoCtxImpl *, const std::string &)> Operation;
+
+
+ TestIoCtxImpl();
+ explicit TestIoCtxImpl(TestRadosClient *client, int64_t m_pool_id,
+ const std::string& pool_name);
+
+ TestRadosClient *get_rados_client() {
+ return m_client;
+ }
+
+ void get();
+ void put();
+
+ inline int64_t get_pool_id() const {
+ return m_pool_id;
+ }
+
+ virtual TestIoCtxImpl *clone() = 0;
+
+ virtual uint64_t get_instance_id() const;
+ virtual int64_t get_id();
+ virtual uint64_t get_last_version();
+ virtual std::string get_pool_name();
+
+ inline void set_namespace(const std::string& namespace_name) {
+ m_namespace_name = namespace_name;
+ }
+ inline std::string get_namespace() const {
+ return m_namespace_name;
+ }
+
+ snap_t get_snap_read() const {
+ return m_snap_seq;
+ }
+
+ inline void set_snap_context(const SnapContext& snapc) {
+ m_snapc = snapc;
+ }
+ const SnapContext &get_snap_context() const {
+ return m_snapc;
+ }
+
+ virtual int aio_flush();
+ virtual void aio_flush_async(AioCompletionImpl *c);
+ virtual void aio_notify(const std::string& oid, AioCompletionImpl *c,
+ bufferlist& bl, uint64_t timeout_ms, bufferlist *pbl);
+ virtual int aio_operate(const std::string& oid, TestObjectOperationImpl &ops,
+ AioCompletionImpl *c, SnapContext *snap_context,
+ int flags);
+ virtual int aio_operate_read(const std::string& oid, TestObjectOperationImpl &ops,
+ AioCompletionImpl *c, int flags,
+ bufferlist *pbl);
+ virtual int aio_remove(const std::string& oid, AioCompletionImpl *c,
+ int flags = 0) = 0;
+ virtual int aio_watch(const std::string& o, AioCompletionImpl *c,
+ uint64_t *handle, librados::WatchCtx2 *ctx);
+ virtual int aio_unwatch(uint64_t handle, AioCompletionImpl *c);
+ virtual int append(const std::string& oid, const bufferlist &bl,
+ const SnapContext &snapc) = 0;
+ virtual int assert_exists(const std::string &oid) = 0;
+
+ virtual int create(const std::string& oid, bool exclusive) = 0;
+ virtual int exec(const std::string& oid, TestClassHandler *handler,
+ const char *cls, const char *method,
+ bufferlist& inbl, bufferlist* outbl,
+ const SnapContext &snapc);
+ virtual int list_snaps(const std::string& o, snap_set_t *out_snaps) = 0;
+ virtual int list_watchers(const std::string& o,
+ std::list<obj_watch_t> *out_watchers);
+ virtual int notify(const std::string& o, bufferlist& bl,
+ uint64_t timeout_ms, bufferlist *pbl);
+ virtual void notify_ack(const std::string& o, uint64_t notify_id,
+ uint64_t handle, bufferlist& bl);
+ virtual int omap_get_vals(const std::string& oid,
+ const std::string& start_after,
+ const std::string &filter_prefix,
+ uint64_t max_return,
+ std::map<std::string, bufferlist> *out_vals) = 0;
+ virtual int omap_get_vals2(const std::string& oid,
+ const std::string& start_after,
+ const std::string &filter_prefix,
+ uint64_t max_return,
+ std::map<std::string, bufferlist> *out_vals,
+ bool *pmore) = 0;
+ virtual int omap_rm_keys(const std::string& oid,
+ const std::set<std::string>& keys) = 0;
+ virtual int omap_set(const std::string& oid,
+ const std::map<std::string, bufferlist> &map) = 0;
+ virtual int operate(const std::string& oid, TestObjectOperationImpl &ops);
+ virtual int operate_read(const std::string& oid, TestObjectOperationImpl &ops,
+ bufferlist *pbl);
+ virtual int read(const std::string& oid, size_t len, uint64_t off,
+ bufferlist *bl) = 0;
+ virtual int remove(const std::string& oid, const SnapContext &snapc) = 0;
+ virtual int selfmanaged_snap_create(uint64_t *snapid) = 0;
+ virtual void aio_selfmanaged_snap_create(uint64_t *snapid,
+ AioCompletionImpl *c);
+ virtual int selfmanaged_snap_remove(uint64_t snapid) = 0;
+ virtual void aio_selfmanaged_snap_remove(uint64_t snapid,
+ AioCompletionImpl *c);
+ virtual int selfmanaged_snap_rollback(const std::string& oid,
+ uint64_t snapid) = 0;
+ virtual int selfmanaged_snap_set_write_ctx(snap_t seq,
+ std::vector<snap_t>& snaps);
+ virtual int set_alloc_hint(const std::string& oid,
+ uint64_t expected_object_size,
+ uint64_t expected_write_size,
+ uint32_t flags,
+ const SnapContext &snapc);
+ virtual void set_snap_read(snap_t seq);
+ virtual int sparse_read(const std::string& oid, uint64_t off, uint64_t len,
+ std::map<uint64_t,uint64_t> *m,
+ bufferlist *data_bl) = 0;
+ virtual int stat(const std::string& oid, uint64_t *psize, time_t *pmtime) = 0;
+ virtual int truncate(const std::string& oid, uint64_t size,
+ const SnapContext &snapc) = 0;
+ virtual int tmap_update(const std::string& oid, bufferlist& cmdbl);
+ virtual int unwatch(uint64_t handle);
+ virtual int watch(const std::string& o, uint64_t *handle,
+ librados::WatchCtx *ctx, librados::WatchCtx2 *ctx2);
+ virtual int write(const std::string& oid, bufferlist& bl, size_t len,
+ uint64_t off, const SnapContext &snapc) = 0;
+ virtual int write_full(const std::string& oid, bufferlist& bl,
+ const SnapContext &snapc) = 0;
+ virtual int writesame(const std::string& oid, bufferlist& bl, size_t len,
+ uint64_t off, const SnapContext &snapc) = 0;
+ virtual int cmpext(const std::string& oid, uint64_t off, bufferlist& cmp_bl) = 0;
+ virtual int xattr_get(const std::string& oid,
+ std::map<std::string, bufferlist>* attrset) = 0;
+ virtual int xattr_set(const std::string& oid, const std::string &name,
+ bufferlist& bl) = 0;
+ virtual int zero(const std::string& oid, uint64_t off, uint64_t len,
+ const SnapContext &snapc) = 0;
+
+ int execute_operation(const std::string& oid,
+ const Operation &operation);
+
+protected:
+ TestIoCtxImpl(const TestIoCtxImpl& rhs);
+ virtual ~TestIoCtxImpl();
+
+ int execute_aio_operations(const std::string& oid,
+ TestObjectOperationImpl *ops,
+ bufferlist *pbl, const SnapContext &snapc);
+
+private:
+ struct C_AioNotify : public Context {
+ TestIoCtxImpl *io_ctx;
+ AioCompletionImpl *aio_comp;
+ C_AioNotify(TestIoCtxImpl *_io_ctx, AioCompletionImpl *_aio_comp)
+ : io_ctx(_io_ctx), aio_comp(_aio_comp) {
+ }
+ void finish(int r) override {
+ io_ctx->handle_aio_notify_complete(aio_comp, r);
+ }
+ };
+
+ TestRadosClient *m_client;
+ int64_t m_pool_id = 0;
+ std::string m_pool_name;
+ std::string m_namespace_name;
+
+ snap_t m_snap_seq = 0;
+ SnapContext m_snapc;
+ std::atomic<uint64_t> m_refcount = { 0 };
+ std::atomic<uint64_t> m_pending_ops = { 0 };
+
+ void handle_aio_notify_complete(AioCompletionImpl *aio_comp, int r);
+};
+
+} // namespace librados
+
+#endif // CEPH_TEST_IO_CTX_IMPL_H
diff --git a/src/test/librados_test_stub/TestMemCluster.cc b/src/test/librados_test_stub/TestMemCluster.cc
new file mode 100644
index 00000000..3779be75
--- /dev/null
+++ b/src/test/librados_test_stub/TestMemCluster.cc
@@ -0,0 +1,203 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#include "test/librados_test_stub/TestMemCluster.h"
+#include "test/librados_test_stub/TestMemRadosClient.h"
+
+namespace librados {
+
+TestMemCluster::File::File()
+ : snap_id(), exists(true), lock("TestMemCluster::File::lock") {
+}
+
+TestMemCluster::File::File(const File &rhs)
+ : data(rhs.data),
+ mtime(rhs.mtime),
+ snap_id(rhs.snap_id),
+ exists(rhs.exists),
+ lock("TestMemCluster::File::lock") {
+}
+
+TestMemCluster::Pool::Pool()
+ : file_lock("TestMemCluster::Pool::file_lock") {
+}
+
+TestMemCluster::TestMemCluster()
+ : m_lock("TestMemCluster::m_lock"),
+ m_next_nonce(static_cast<uint32_t>(reinterpret_cast<uint64_t>(this))) {
+}
+
+TestMemCluster::~TestMemCluster() {
+ for (auto pool_pair : m_pools) {
+ pool_pair.second->put();
+ }
+}
+
+TestRadosClient *TestMemCluster::create_rados_client(CephContext *cct) {
+ return new TestMemRadosClient(cct, this);
+}
+
+int TestMemCluster::register_object_handler(int64_t pool_id,
+ const ObjectLocator& locator,
+ ObjectHandler* object_handler) {
+ Mutex::Locker locker(m_lock);
+ auto pool = get_pool(m_lock, pool_id);
+ if (pool == nullptr) {
+ return -ENOENT;
+ }
+
+ RWLock::WLocker pool_locker(pool->file_lock);
+ auto file_it = pool->files.find(locator);
+ if (file_it == pool->files.end()) {
+ return -ENOENT;
+ }
+
+ auto& object_handlers = pool->file_handlers[locator];
+ auto it = object_handlers.find(object_handler);
+ ceph_assert(it == object_handlers.end());
+
+ object_handlers.insert(object_handler);
+ return 0;
+}
+
+void TestMemCluster::unregister_object_handler(int64_t pool_id,
+ const ObjectLocator& locator,
+ ObjectHandler* object_handler) {
+ Mutex::Locker locker(m_lock);
+ auto pool = get_pool(m_lock, pool_id);
+ if (pool == nullptr) {
+ return;
+ }
+
+ RWLock::WLocker pool_locker(pool->file_lock);
+ auto handlers_it = pool->file_handlers.find(locator);
+ if (handlers_it == pool->file_handlers.end()) {
+ return;
+ }
+
+ auto& object_handlers = handlers_it->second;
+ object_handlers.erase(object_handler);
+}
+
+int TestMemCluster::pool_create(const std::string &pool_name) {
+ Mutex::Locker locker(m_lock);
+ if (m_pools.find(pool_name) != m_pools.end()) {
+ return -EEXIST;
+ }
+ Pool *pool = new Pool();
+ pool->pool_id = ++m_pool_id;
+ m_pools[pool_name] = pool;
+ return 0;
+}
+
+int TestMemCluster::pool_delete(const std::string &pool_name) {
+ Mutex::Locker locker(m_lock);
+ Pools::iterator iter = m_pools.find(pool_name);
+ if (iter == m_pools.end()) {
+ return -ENOENT;
+ }
+ iter->second->put();
+ m_pools.erase(iter);
+ return 0;
+}
+
+int TestMemCluster::pool_get_base_tier(int64_t pool_id, int64_t* base_tier) {
+ // TODO
+ *base_tier = pool_id;
+ return 0;
+}
+
+int TestMemCluster::pool_list(std::list<std::pair<int64_t, std::string> >& v) {
+ Mutex::Locker locker(m_lock);
+ v.clear();
+ for (Pools::iterator iter = m_pools.begin(); iter != m_pools.end(); ++iter) {
+ v.push_back(std::make_pair(iter->second->pool_id, iter->first));
+ }
+ return 0;
+}
+
+int64_t TestMemCluster::pool_lookup(const std::string &pool_name) {
+ Mutex::Locker locker(m_lock);
+ Pools::iterator iter = m_pools.find(pool_name);
+ if (iter == m_pools.end()) {
+ return -ENOENT;
+ }
+ return iter->second->pool_id;
+}
+
+int TestMemCluster::pool_reverse_lookup(int64_t id, std::string *name) {
+ Mutex::Locker locker(m_lock);
+ for (Pools::iterator iter = m_pools.begin(); iter != m_pools.end(); ++iter) {
+ if (iter->second->pool_id == id) {
+ *name = iter->first;
+ return 0;
+ }
+ }
+ return -ENOENT;
+}
+
+TestMemCluster::Pool *TestMemCluster::get_pool(int64_t pool_id) {
+ Mutex::Locker locker(m_lock);
+ return get_pool(m_lock, pool_id);
+}
+
+TestMemCluster::Pool *TestMemCluster::get_pool(const Mutex& lock,
+ int64_t pool_id) {
+ for (auto &pool_pair : m_pools) {
+ if (pool_pair.second->pool_id == pool_id) {
+ return pool_pair.second;
+ }
+ }
+ return nullptr;
+}
+
+TestMemCluster::Pool *TestMemCluster::get_pool(const std::string &pool_name) {
+ Mutex::Locker locker(m_lock);
+ Pools::iterator iter = m_pools.find(pool_name);
+ if (iter != m_pools.end()) {
+ return iter->second;
+ }
+ return nullptr;
+}
+
+void TestMemCluster::allocate_client(uint32_t *nonce, uint64_t *global_id) {
+ Mutex::Locker locker(m_lock);
+ *nonce = m_next_nonce++;
+ *global_id = m_next_global_id++;
+}
+
+void TestMemCluster::deallocate_client(uint32_t nonce) {
+ Mutex::Locker locker(m_lock);
+ m_blacklist.erase(nonce);
+}
+
+bool TestMemCluster::is_blacklisted(uint32_t nonce) const {
+ Mutex::Locker locker(m_lock);
+ return (m_blacklist.find(nonce) != m_blacklist.end());
+}
+
+void TestMemCluster::blacklist(uint32_t nonce) {
+ m_watch_notify.blacklist(nonce);
+
+ Mutex::Locker locker(m_lock);
+ m_blacklist.insert(nonce);
+}
+
+void TestMemCluster::transaction_start(const ObjectLocator& locator) {
+ Mutex::Locker locker(m_lock);
+ while (m_transactions.count(locator)) {
+ m_transaction_cond.Wait(m_lock);
+ }
+ auto result = m_transactions.insert(locator);
+ ceph_assert(result.second);
+}
+
+void TestMemCluster::transaction_finish(const ObjectLocator& locator) {
+ Mutex::Locker locker(m_lock);
+ size_t count = m_transactions.erase(locator);
+ ceph_assert(count == 1);
+ m_transaction_cond.Signal();
+}
+
+} // namespace librados
+
diff --git a/src/test/librados_test_stub/TestMemCluster.h b/src/test/librados_test_stub/TestMemCluster.h
new file mode 100644
index 00000000..af5c1af4
--- /dev/null
+++ b/src/test/librados_test_stub/TestMemCluster.h
@@ -0,0 +1,122 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#ifndef CEPH_TEST_MEM_CLUSTER_H
+#define CEPH_TEST_MEM_CLUSTER_H
+
+#include "test/librados_test_stub/TestCluster.h"
+#include "include/buffer.h"
+#include "include/interval_set.h"
+#include "include/int_types.h"
+#include "common/Cond.h"
+#include "common/Mutex.h"
+#include "common/RefCountedObj.h"
+#include "common/RWLock.h"
+#include <boost/shared_ptr.hpp>
+#include <list>
+#include <map>
+#include <set>
+#include <string>
+
+namespace librados {
+
+class TestMemCluster : public TestCluster {
+public:
+ typedef std::map<std::string, bufferlist> OMap;
+ typedef std::map<ObjectLocator, OMap> FileOMaps;
+ typedef std::map<ObjectLocator, bufferlist> FileTMaps;
+ typedef std::map<std::string, bufferlist> XAttrs;
+ typedef std::map<ObjectLocator, XAttrs> FileXAttrs;
+ typedef std::set<ObjectHandler*> ObjectHandlers;
+ typedef std::map<ObjectLocator, ObjectHandlers> FileHandlers;
+
+ struct File {
+ File();
+ File(const File &rhs);
+
+ bufferlist data;
+ time_t mtime;
+
+ uint64_t snap_id;
+ std::vector<uint64_t> snaps;
+ interval_set<uint64_t> snap_overlap;
+
+ bool exists;
+ RWLock lock;
+ };
+ typedef boost::shared_ptr<File> SharedFile;
+
+ typedef std::list<SharedFile> FileSnapshots;
+ typedef std::map<ObjectLocator, FileSnapshots> Files;
+
+ typedef std::set<uint64_t> SnapSeqs;
+ struct Pool : public RefCountedObject {
+ Pool();
+
+ int64_t pool_id = 0;
+
+ SnapSeqs snap_seqs;
+ uint64_t snap_id = 1;
+
+ RWLock file_lock;
+ Files files;
+ FileOMaps file_omaps;
+ FileTMaps file_tmaps;
+ FileXAttrs file_xattrs;
+ FileHandlers file_handlers;
+ };
+
+ TestMemCluster();
+ ~TestMemCluster() override;
+
+ TestRadosClient *create_rados_client(CephContext *cct) override;
+
+ int register_object_handler(int64_t pool_id, const ObjectLocator& locator,
+ ObjectHandler* object_handler) override;
+ void unregister_object_handler(int64_t pool_id, const ObjectLocator& locator,
+ ObjectHandler* object_handler) override;
+
+ int pool_create(const std::string &pool_name);
+ int pool_delete(const std::string &pool_name);
+ int pool_get_base_tier(int64_t pool_id, int64_t* base_tier);
+ int pool_list(std::list<std::pair<int64_t, std::string> >& v);
+ int64_t pool_lookup(const std::string &name);
+ int pool_reverse_lookup(int64_t id, std::string *name);
+
+ Pool *get_pool(int64_t pool_id);
+ Pool *get_pool(const std::string &pool_name);
+
+ void allocate_client(uint32_t *nonce, uint64_t *global_id);
+ void deallocate_client(uint32_t nonce);
+
+ bool is_blacklisted(uint32_t nonce) const;
+ void blacklist(uint32_t nonce);
+
+ void transaction_start(const ObjectLocator& locator);
+ void transaction_finish(const ObjectLocator& locator);
+
+private:
+
+ typedef std::map<std::string, Pool*> Pools;
+ typedef std::set<uint32_t> Blacklist;
+
+ mutable Mutex m_lock;
+
+ Pools m_pools;
+ int64_t m_pool_id = 0;
+
+ uint32_t m_next_nonce;
+ uint64_t m_next_global_id = 1234;
+
+ Blacklist m_blacklist;
+
+ Cond m_transaction_cond;
+ std::set<ObjectLocator> m_transactions;
+
+ Pool *get_pool(const Mutex& lock, int64_t pool_id);
+
+};
+
+} // namespace librados
+
+#endif // CEPH_TEST_MEM_CLUSTER_H
diff --git a/src/test/librados_test_stub/TestMemIoCtxImpl.cc b/src/test/librados_test_stub/TestMemIoCtxImpl.cc
new file mode 100644
index 00000000..ac73e950
--- /dev/null
+++ b/src/test/librados_test_stub/TestMemIoCtxImpl.cc
@@ -0,0 +1,840 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#include "test/librados_test_stub/TestMemIoCtxImpl.h"
+#include "test/librados_test_stub/TestMemRadosClient.h"
+#include "common/Clock.h"
+#include "common/RWLock.h"
+#include "include/err.h"
+#include <boost/algorithm/string/predicate.hpp>
+#include <boost/bind.hpp>
+#include <errno.h>
+#include <include/compat.h>
+
+static void to_vector(const interval_set<uint64_t> &set,
+ std::vector<std::pair<uint64_t, uint64_t> > *vec) {
+ vec->clear();
+ for (interval_set<uint64_t>::const_iterator it = set.begin();
+ it != set.end(); ++it) {
+ vec->push_back(*it);
+ }
+}
+
+// see PrimaryLogPG::finish_extent_cmp()
+static int cmpext_compare(const bufferlist &bl, const bufferlist &read_bl) {
+ for (uint64_t idx = 0; idx < bl.length(); ++idx) {
+ char read_byte = (idx < read_bl.length() ? read_bl[idx] : 0);
+ if (bl[idx] != read_byte) {
+ return -MAX_ERRNO - idx;
+ }
+ }
+ return 0;
+}
+
+namespace librados {
+
+TestMemIoCtxImpl::TestMemIoCtxImpl() {
+}
+
+TestMemIoCtxImpl::TestMemIoCtxImpl(const TestMemIoCtxImpl& rhs)
+ : TestIoCtxImpl(rhs), m_client(rhs.m_client), m_pool(rhs.m_pool) {
+ m_pool->get();
+}
+
+TestMemIoCtxImpl::TestMemIoCtxImpl(TestMemRadosClient *client, int64_t pool_id,
+ const std::string& pool_name,
+ TestMemCluster::Pool *pool)
+ : TestIoCtxImpl(client, pool_id, pool_name), m_client(client),
+ m_pool(pool) {
+ m_pool->get();
+}
+
+TestMemIoCtxImpl::~TestMemIoCtxImpl() {
+ m_pool->put();
+}
+
+TestIoCtxImpl *TestMemIoCtxImpl::clone() {
+ return new TestMemIoCtxImpl(*this);
+}
+
+int TestMemIoCtxImpl::aio_remove(const std::string& oid, AioCompletionImpl *c, int flags) {
+ m_client->add_aio_operation(oid, true,
+ boost::bind(&TestMemIoCtxImpl::remove, this, oid,
+ get_snap_context()),
+ c);
+ return 0;
+}
+
+int TestMemIoCtxImpl::append(const std::string& oid, const bufferlist &bl,
+ const SnapContext &snapc) {
+ if (get_snap_read() != CEPH_NOSNAP) {
+ return -EROFS;
+ } else if (m_client->is_blacklisted()) {
+ return -EBLACKLISTED;
+ }
+
+ TestMemCluster::SharedFile file;
+ {
+ RWLock::WLocker l(m_pool->file_lock);
+ file = get_file(oid, true, snapc);
+ }
+
+ RWLock::WLocker l(file->lock);
+ auto off = file->data.length();
+ ensure_minimum_length(off + bl.length(), &file->data);
+ file->data.copy_in(off, bl.length(), bl);
+ return 0;
+}
+
+int TestMemIoCtxImpl::assert_exists(const std::string &oid) {
+ if (m_client->is_blacklisted()) {
+ return -EBLACKLISTED;
+ }
+
+ RWLock::RLocker l(m_pool->file_lock);
+ TestMemCluster::SharedFile file = get_file(oid, false, get_snap_context());
+ if (file == NULL) {
+ return -ENOENT;
+ }
+ return 0;
+}
+
+int TestMemIoCtxImpl::create(const std::string& oid, bool exclusive) {
+ if (get_snap_read() != CEPH_NOSNAP) {
+ return -EROFS;
+ } else if (m_client->is_blacklisted()) {
+ return -EBLACKLISTED;
+ }
+
+ RWLock::WLocker l(m_pool->file_lock);
+ get_file(oid, true, get_snap_context());
+ return 0;
+}
+
+int TestMemIoCtxImpl::list_snaps(const std::string& oid, snap_set_t *out_snaps) {
+ if (m_client->is_blacklisted()) {
+ return -EBLACKLISTED;
+ }
+
+ out_snaps->seq = 0;
+ out_snaps->clones.clear();
+
+ RWLock::RLocker l(m_pool->file_lock);
+ TestMemCluster::Files::iterator it = m_pool->files.find(
+ {get_namespace(), oid});
+ if (it == m_pool->files.end()) {
+ return -ENOENT;
+ }
+
+ bool include_head = false;
+ TestMemCluster::FileSnapshots &file_snaps = it->second;
+ for (TestMemCluster::FileSnapshots::iterator s_it = file_snaps.begin();
+ s_it != file_snaps.end(); ++s_it) {
+ TestMemCluster::File &file = *s_it->get();
+
+ if (file_snaps.size() > 1) {
+ out_snaps->seq = file.snap_id;
+ TestMemCluster::FileSnapshots::iterator next_it(s_it);
+ ++next_it;
+ if (next_it == file_snaps.end()) {
+ include_head = true;
+ break;
+ }
+
+ ++out_snaps->seq;
+ if (!file.exists) {
+ continue;
+ }
+
+ // update the overlap with the next version's overlap metadata
+ TestMemCluster::File &next_file = *next_it->get();
+ interval_set<uint64_t> overlap;
+ if (next_file.exists) {
+ overlap = next_file.snap_overlap;
+ }
+
+ clone_info_t clone;
+ clone.cloneid = file.snap_id;
+ clone.snaps = file.snaps;
+ to_vector(overlap, &clone.overlap);
+ clone.size = file.data.length();
+ out_snaps->clones.push_back(clone);
+ }
+ }
+
+ if ((file_snaps.size() == 1 && file_snaps.back()->data.length() > 0) ||
+ include_head)
+ {
+ // Include the SNAP_HEAD
+ TestMemCluster::File &file = *file_snaps.back();
+ if (file.exists) {
+ RWLock::RLocker l2(file.lock);
+ if (out_snaps->seq == 0 && !include_head) {
+ out_snaps->seq = file.snap_id;
+ }
+ clone_info_t head_clone;
+ head_clone.cloneid = librados::SNAP_HEAD;
+ head_clone.size = file.data.length();
+ out_snaps->clones.push_back(head_clone);
+ }
+ }
+ return 0;
+
+}
+
+int TestMemIoCtxImpl::omap_get_vals2(const std::string& oid,
+ const std::string& start_after,
+ const std::string &filter_prefix,
+ uint64_t max_return,
+ std::map<std::string, bufferlist> *out_vals,
+ bool *pmore) {
+ if (out_vals == NULL) {
+ return -EINVAL;
+ } else if (m_client->is_blacklisted()) {
+ return -EBLACKLISTED;
+ }
+
+ TestMemCluster::SharedFile file;
+ {
+ RWLock::RLocker l(m_pool->file_lock);
+ file = get_file(oid, false, get_snap_context());
+ if (file == NULL) {
+ return -ENOENT;
+ }
+ }
+
+ out_vals->clear();
+
+ RWLock::RLocker l(file->lock);
+ TestMemCluster::FileOMaps::iterator o_it = m_pool->file_omaps.find(
+ {get_namespace(), oid});
+ if (o_it == m_pool->file_omaps.end()) {
+ if (pmore) {
+ *pmore = false;
+ }
+ return 0;
+ }
+
+ TestMemCluster::OMap &omap = o_it->second;
+ TestMemCluster::OMap::iterator it = omap.begin();
+ if (!start_after.empty()) {
+ it = omap.upper_bound(start_after);
+ }
+
+ while (it != omap.end() && max_return > 0) {
+ if (filter_prefix.empty() ||
+ boost::algorithm::starts_with(it->first, filter_prefix)) {
+ (*out_vals)[it->first] = it->second;
+ --max_return;
+ }
+ ++it;
+ }
+ if (pmore) {
+ *pmore = (it != omap.end());
+ }
+ return 0;
+}
+
+int TestMemIoCtxImpl::omap_get_vals(const std::string& oid,
+ const std::string& start_after,
+ const std::string &filter_prefix,
+ uint64_t max_return,
+ std::map<std::string, bufferlist> *out_vals) {
+ return omap_get_vals2(oid, start_after, filter_prefix, max_return, out_vals, nullptr);
+}
+
+int TestMemIoCtxImpl::omap_rm_keys(const std::string& oid,
+ const std::set<std::string>& keys) {
+ if (get_snap_read() != CEPH_NOSNAP) {
+ return -EROFS;
+ } else if (m_client->is_blacklisted()) {
+ return -EBLACKLISTED;
+ }
+
+ TestMemCluster::SharedFile file;
+ {
+ RWLock::WLocker l(m_pool->file_lock);
+ file = get_file(oid, true, get_snap_context());
+ if (file == NULL) {
+ return -ENOENT;
+ }
+ }
+
+ RWLock::WLocker l(file->lock);
+ for (std::set<std::string>::iterator it = keys.begin();
+ it != keys.end(); ++it) {
+ m_pool->file_omaps[{get_namespace(), oid}].erase(*it);
+ }
+ return 0;
+}
+
+int TestMemIoCtxImpl::omap_set(const std::string& oid,
+ const std::map<std::string, bufferlist> &map) {
+ if (get_snap_read() != CEPH_NOSNAP) {
+ return -EROFS;
+ } else if (m_client->is_blacklisted()) {
+ return -EBLACKLISTED;
+ }
+
+ TestMemCluster::SharedFile file;
+ {
+ RWLock::WLocker l(m_pool->file_lock);
+ file = get_file(oid, true, get_snap_context());
+ if (file == NULL) {
+ return -ENOENT;
+ }
+ }
+
+ RWLock::WLocker l(file->lock);
+ for (std::map<std::string, bufferlist>::const_iterator it = map.begin();
+ it != map.end(); ++it) {
+ bufferlist bl;
+ bl.append(it->second);
+ m_pool->file_omaps[{get_namespace(), oid}][it->first] = bl;
+ }
+
+ return 0;
+}
+
+int TestMemIoCtxImpl::read(const std::string& oid, size_t len, uint64_t off,
+ bufferlist *bl) {
+ if (m_client->is_blacklisted()) {
+ return -EBLACKLISTED;
+ }
+
+ TestMemCluster::SharedFile file;
+ {
+ RWLock::RLocker l(m_pool->file_lock);
+ file = get_file(oid, false, get_snap_context());
+ if (file == NULL) {
+ return -ENOENT;
+ }
+ }
+
+ RWLock::RLocker l(file->lock);
+ if (len == 0) {
+ len = file->data.length();
+ }
+ len = clip_io(off, len, file->data.length());
+ if (bl != NULL && len > 0) {
+ bufferlist bit;
+ bit.substr_of(file->data, off, len);
+ append_clone(bit, bl);
+ }
+ return len;
+}
+
+int TestMemIoCtxImpl::remove(const std::string& oid, const SnapContext &snapc) {
+ if (get_snap_read() != CEPH_NOSNAP) {
+ return -EROFS;
+ } else if (m_client->is_blacklisted()) {
+ return -EBLACKLISTED;
+ }
+
+ RWLock::WLocker l(m_pool->file_lock);
+ TestMemCluster::SharedFile file = get_file(oid, false, snapc);
+ if (file == NULL) {
+ return -ENOENT;
+ }
+ file = get_file(oid, true, snapc);
+
+ {
+ RWLock::WLocker l2(file->lock);
+ file->exists = false;
+ }
+
+ TestCluster::ObjectLocator locator(get_namespace(), oid);
+ TestMemCluster::Files::iterator it = m_pool->files.find(locator);
+ ceph_assert(it != m_pool->files.end());
+
+ if (*it->second.rbegin() == file) {
+ TestMemCluster::ObjectHandlers object_handlers;
+ std::swap(object_handlers, m_pool->file_handlers[locator]);
+ m_pool->file_handlers.erase(locator);
+
+ for (auto object_handler : object_handlers) {
+ object_handler->handle_removed(m_client);
+ }
+ }
+
+ if (it->second.size() == 1) {
+ m_pool->files.erase(it);
+ m_pool->file_omaps.erase(locator);
+ }
+ return 0;
+}
+
+int TestMemIoCtxImpl::selfmanaged_snap_create(uint64_t *snapid) {
+ if (m_client->is_blacklisted()) {
+ return -EBLACKLISTED;
+ }
+
+ RWLock::WLocker l(m_pool->file_lock);
+ *snapid = ++m_pool->snap_id;
+ m_pool->snap_seqs.insert(*snapid);
+ return 0;
+}
+
+int TestMemIoCtxImpl::selfmanaged_snap_remove(uint64_t snapid) {
+ if (m_client->is_blacklisted()) {
+ return -EBLACKLISTED;
+ }
+
+ RWLock::WLocker l(m_pool->file_lock);
+ TestMemCluster::SnapSeqs::iterator it =
+ m_pool->snap_seqs.find(snapid);
+ if (it == m_pool->snap_seqs.end()) {
+ return -ENOENT;
+ }
+
+ // TODO clean up all file snapshots
+ m_pool->snap_seqs.erase(it);
+ return 0;
+}
+
+int TestMemIoCtxImpl::selfmanaged_snap_rollback(const std::string& oid,
+ uint64_t snapid) {
+ if (m_client->is_blacklisted()) {
+ return -EBLACKLISTED;
+ }
+
+ RWLock::WLocker l(m_pool->file_lock);
+
+ TestMemCluster::SharedFile file;
+ TestMemCluster::Files::iterator f_it = m_pool->files.find(
+ {get_namespace(), oid});
+ if (f_it == m_pool->files.end()) {
+ return 0;
+ }
+
+ TestMemCluster::FileSnapshots &snaps = f_it->second;
+ file = snaps.back();
+
+ size_t versions = 0;
+ for (TestMemCluster::FileSnapshots::reverse_iterator it = snaps.rbegin();
+ it != snaps.rend(); ++it) {
+ TestMemCluster::SharedFile file = *it;
+ if (file->snap_id < get_snap_read()) {
+ if (versions == 0) {
+ // already at the snapshot version
+ return 0;
+ } else if (file->snap_id == CEPH_NOSNAP) {
+ if (versions == 1) {
+ // delete it current HEAD, next one is correct version
+ snaps.erase(it.base());
+ } else {
+ // overwrite contents of current HEAD
+ file = TestMemCluster::SharedFile (new TestMemCluster::File(**it));
+ file->snap_id = CEPH_NOSNAP;
+ *it = file;
+ }
+ } else {
+ // create new head version
+ file = TestMemCluster::SharedFile (new TestMemCluster::File(**it));
+ file->snap_id = m_pool->snap_id;
+ snaps.push_back(file);
+ }
+ return 0;
+ }
+ ++versions;
+ }
+ return 0;
+}
+
+int TestMemIoCtxImpl::set_alloc_hint(const std::string& oid,
+ uint64_t expected_object_size,
+ uint64_t expected_write_size,
+ uint32_t flags,
+ const SnapContext &snapc) {
+ if (get_snap_read() != CEPH_NOSNAP) {
+ return -EROFS;
+ } else if (m_client->is_blacklisted()) {
+ return -EBLACKLISTED;
+ }
+
+ {
+ RWLock::WLocker l(m_pool->file_lock);
+ get_file(oid, true, snapc);
+ }
+
+ return 0;
+}
+
+int TestMemIoCtxImpl::sparse_read(const std::string& oid, uint64_t off,
+ uint64_t len,
+ std::map<uint64_t,uint64_t> *m,
+ bufferlist *data_bl) {
+ if (m_client->is_blacklisted()) {
+ return -EBLACKLISTED;
+ }
+
+ // TODO verify correctness
+ TestMemCluster::SharedFile file;
+ {
+ RWLock::RLocker l(m_pool->file_lock);
+ file = get_file(oid, false, get_snap_context());
+ if (file == NULL) {
+ return -ENOENT;
+ }
+ }
+
+ RWLock::RLocker l(file->lock);
+ len = clip_io(off, len, file->data.length());
+ // TODO support sparse read
+ if (m != NULL) {
+ m->clear();
+ if (len > 0) {
+ (*m)[off] = len;
+ }
+ }
+ if (data_bl != NULL && len > 0) {
+ bufferlist bit;
+ bit.substr_of(file->data, off, len);
+ append_clone(bit, data_bl);
+ }
+ return len > 0 ? 1 : 0;
+}
+
+int TestMemIoCtxImpl::stat(const std::string& oid, uint64_t *psize,
+ time_t *pmtime) {
+ if (m_client->is_blacklisted()) {
+ return -EBLACKLISTED;
+ }
+
+ TestMemCluster::SharedFile file;
+ {
+ RWLock::RLocker l(m_pool->file_lock);
+ file = get_file(oid, false, get_snap_context());
+ if (file == NULL) {
+ return -ENOENT;
+ }
+ }
+
+ RWLock::RLocker l(file->lock);
+ if (psize != NULL) {
+ *psize = file->data.length();
+ }
+ if (pmtime != NULL) {
+ *pmtime = file->mtime;
+ }
+ return 0;
+}
+
+int TestMemIoCtxImpl::truncate(const std::string& oid, uint64_t size,
+ const SnapContext &snapc) {
+ if (get_snap_read() != CEPH_NOSNAP) {
+ return -EROFS;
+ } else if (m_client->is_blacklisted()) {
+ return -EBLACKLISTED;
+ }
+
+ TestMemCluster::SharedFile file;
+ {
+ RWLock::WLocker l(m_pool->file_lock);
+ file = get_file(oid, true, snapc);
+ }
+
+ RWLock::WLocker l(file->lock);
+ bufferlist bl(size);
+
+ interval_set<uint64_t> is;
+ if (file->data.length() > size) {
+ is.insert(size, file->data.length() - size);
+
+ bl.substr_of(file->data, 0, size);
+ file->data.swap(bl);
+ } else if (file->data.length() != size) {
+ if (size == 0) {
+ bl.clear();
+ } else {
+ is.insert(0, size);
+
+ bl.append_zero(size - file->data.length());
+ file->data.append(bl);
+ }
+ }
+ is.intersection_of(file->snap_overlap);
+ file->snap_overlap.subtract(is);
+ return 0;
+}
+
+int TestMemIoCtxImpl::write(const std::string& oid, bufferlist& bl, size_t len,
+ uint64_t off, const SnapContext &snapc) {
+ if (get_snap_read() != CEPH_NOSNAP) {
+ return -EROFS;
+ } else if (m_client->is_blacklisted()) {
+ return -EBLACKLISTED;
+ }
+
+ TestMemCluster::SharedFile file;
+ {
+ RWLock::WLocker l(m_pool->file_lock);
+ file = get_file(oid, true, snapc);
+ }
+
+ RWLock::WLocker l(file->lock);
+ if (len > 0) {
+ interval_set<uint64_t> is;
+ is.insert(off, len);
+ is.intersection_of(file->snap_overlap);
+ file->snap_overlap.subtract(is);
+ }
+
+ ensure_minimum_length(off + len, &file->data);
+ file->data.copy_in(off, len, bl);
+ return 0;
+}
+
+int TestMemIoCtxImpl::write_full(const std::string& oid, bufferlist& bl,
+ const SnapContext &snapc) {
+ if (get_snap_read() != CEPH_NOSNAP) {
+ return -EROFS;
+ } else if (m_client->is_blacklisted()) {
+ return -EBLACKLISTED;
+ }
+
+ TestMemCluster::SharedFile file;
+ {
+ RWLock::WLocker l(m_pool->file_lock);
+ file = get_file(oid, true, snapc);
+ if (file == NULL) {
+ return -ENOENT;
+ }
+ }
+
+ RWLock::WLocker l(file->lock);
+ if (bl.length() > 0) {
+ interval_set<uint64_t> is;
+ is.insert(0, bl.length());
+ is.intersection_of(file->snap_overlap);
+ file->snap_overlap.subtract(is);
+ }
+
+ file->data.clear();
+ ensure_minimum_length(bl.length(), &file->data);
+ file->data.copy_in(0, bl.length(), bl);
+ return 0;
+}
+
+int TestMemIoCtxImpl::writesame(const std::string& oid, bufferlist& bl, size_t len,
+ uint64_t off, const SnapContext &snapc) {
+ if (get_snap_read() != CEPH_NOSNAP) {
+ return -EROFS;
+ } else if (m_client->is_blacklisted()) {
+ return -EBLACKLISTED;
+ }
+
+ if (len == 0 || (len % bl.length())) {
+ return -EINVAL;
+ }
+
+ TestMemCluster::SharedFile file;
+ {
+ RWLock::WLocker l(m_pool->file_lock);
+ file = get_file(oid, true, snapc);
+ }
+
+ RWLock::WLocker l(file->lock);
+ if (len > 0) {
+ interval_set<uint64_t> is;
+ is.insert(off, len);
+ is.intersection_of(file->snap_overlap);
+ file->snap_overlap.subtract(is);
+ }
+
+ ensure_minimum_length(off + len, &file->data);
+ while (len > 0) {
+ file->data.copy_in(off, bl.length(), bl);
+ off += bl.length();
+ len -= bl.length();
+ }
+ return 0;
+}
+
+int TestMemIoCtxImpl::cmpext(const std::string& oid, uint64_t off,
+ bufferlist& cmp_bl) {
+ if (m_client->is_blacklisted()) {
+ return -EBLACKLISTED;
+ }
+
+ bufferlist read_bl;
+ uint64_t len = cmp_bl.length();
+
+ TestMemCluster::SharedFile file;
+ {
+ RWLock::RLocker l(m_pool->file_lock);
+ file = get_file(oid, false, get_snap_context());
+ if (file == NULL) {
+ return cmpext_compare(cmp_bl, read_bl);
+ }
+ }
+
+ RWLock::RLocker l(file->lock);
+ if (off >= file->data.length()) {
+ len = 0;
+ } else if (off + len > file->data.length()) {
+ len = file->data.length() - off;
+ }
+ read_bl.substr_of(file->data, off, len);
+ return cmpext_compare(cmp_bl, read_bl);
+}
+
+int TestMemIoCtxImpl::xattr_get(const std::string& oid,
+ std::map<std::string, bufferlist>* attrset) {
+ if (m_client->is_blacklisted()) {
+ return -EBLACKLISTED;
+ }
+
+ TestMemCluster::SharedFile file;
+ RWLock::RLocker l(m_pool->file_lock);
+ TestMemCluster::FileXAttrs::iterator it = m_pool->file_xattrs.find(
+ {get_namespace(), oid});
+ if (it == m_pool->file_xattrs.end()) {
+ return -ENODATA;
+ }
+ *attrset = it->second;
+ return 0;
+}
+
+int TestMemIoCtxImpl::xattr_set(const std::string& oid, const std::string &name,
+ bufferlist& bl) {
+ if (m_client->is_blacklisted()) {
+ return -EBLACKLISTED;
+ }
+
+ RWLock::WLocker l(m_pool->file_lock);
+ m_pool->file_xattrs[{get_namespace(), oid}][name] = bl;
+ return 0;
+}
+
+int TestMemIoCtxImpl::zero(const std::string& oid, uint64_t off, uint64_t len,
+ const SnapContext &snapc) {
+ if (m_client->is_blacklisted()) {
+ return -EBLACKLISTED;
+ }
+
+ bool truncate_redirect = false;
+ TestMemCluster::SharedFile file;
+ {
+ RWLock::WLocker l(m_pool->file_lock);
+ file = get_file(oid, false, snapc);
+ if (!file) {
+ return 0;
+ }
+ file = get_file(oid, true, snapc);
+
+ RWLock::RLocker l2(file->lock);
+ if (len > 0 && off + len >= file->data.length()) {
+ // Zero -> Truncate logic embedded in OSD
+ truncate_redirect = true;
+ }
+ }
+ if (truncate_redirect) {
+ return truncate(oid, off, snapc);
+ }
+
+ bufferlist bl;
+ bl.append_zero(len);
+ return write(oid, bl, len, off, snapc);
+}
+
+void TestMemIoCtxImpl::append_clone(bufferlist& src, bufferlist* dest) {
+ // deep-copy the src to ensure our memory-based mock RADOS data cannot
+ // be modified by callers
+ if (src.length() > 0) {
+ bufferlist::iterator iter = src.begin();
+ buffer::ptr ptr;
+ iter.copy_deep(src.length(), ptr);
+ dest->append(ptr);
+ }
+}
+
+size_t TestMemIoCtxImpl::clip_io(size_t off, size_t len, size_t bl_len) {
+ if (off >= bl_len) {
+ len = 0;
+ } else if (off + len > bl_len) {
+ len = bl_len - off;
+ }
+ return len;
+}
+
+void TestMemIoCtxImpl::ensure_minimum_length(size_t len, bufferlist *bl) {
+ if (len > bl->length()) {
+ bufferptr ptr(buffer::create(len - bl->length()));
+ ptr.zero();
+ bl->append(ptr);
+ }
+}
+
+TestMemCluster::SharedFile TestMemIoCtxImpl::get_file(
+ const std::string &oid, bool write, const SnapContext &snapc) {
+ ceph_assert(m_pool->file_lock.is_locked() || m_pool->file_lock.is_wlocked());
+ ceph_assert(!write || m_pool->file_lock.is_wlocked());
+
+ TestMemCluster::SharedFile file;
+ TestMemCluster::Files::iterator it = m_pool->files.find(
+ {get_namespace(), oid});
+ if (it != m_pool->files.end()) {
+ file = it->second.back();
+ } else if (!write) {
+ return TestMemCluster::SharedFile();
+ }
+
+ if (write) {
+ bool new_version = false;
+ if (!file || !file->exists) {
+ file = TestMemCluster::SharedFile(new TestMemCluster::File());
+ new_version = true;
+ } else {
+ if (!snapc.snaps.empty() && file->snap_id < snapc.seq) {
+ for (std::vector<snapid_t>::const_reverse_iterator seq_it =
+ snapc.snaps.rbegin();
+ seq_it != snapc.snaps.rend(); ++seq_it) {
+ if (*seq_it > file->snap_id && *seq_it <= snapc.seq) {
+ file->snaps.push_back(*seq_it);
+ }
+ }
+
+ bufferlist prev_data = file->data;
+ file = TestMemCluster::SharedFile(
+ new TestMemCluster::File(*file));
+ file->data.clear();
+ append_clone(prev_data, &file->data);
+ if (prev_data.length() > 0) {
+ file->snap_overlap.insert(0, prev_data.length());
+ }
+ new_version = true;
+ }
+ }
+
+ if (new_version) {
+ file->snap_id = snapc.seq;
+ file->mtime = ceph_clock_now().sec();
+ m_pool->files[{get_namespace(), oid}].push_back(file);
+ }
+ return file;
+ }
+
+ if (get_snap_read() == CEPH_NOSNAP) {
+ if (!file->exists) {
+ ceph_assert(it->second.size() > 1);
+ return TestMemCluster::SharedFile();
+ }
+ return file;
+ }
+
+ TestMemCluster::FileSnapshots &snaps = it->second;
+ for (TestMemCluster::FileSnapshots::reverse_iterator it = snaps.rbegin();
+ it != snaps.rend(); ++it) {
+ TestMemCluster::SharedFile file = *it;
+ if (file->snap_id < get_snap_read()) {
+ if (!file->exists) {
+ return TestMemCluster::SharedFile();
+ }
+ return file;
+ }
+ }
+ return TestMemCluster::SharedFile();
+}
+
+} // namespace librados
diff --git a/src/test/librados_test_stub/TestMemIoCtxImpl.h b/src/test/librados_test_stub/TestMemIoCtxImpl.h
new file mode 100644
index 00000000..ebe3a46e
--- /dev/null
+++ b/src/test/librados_test_stub/TestMemIoCtxImpl.h
@@ -0,0 +1,99 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#ifndef CEPH_TEST_MEM_IO_CTX_IMPL_H
+#define CEPH_TEST_MEM_IO_CTX_IMPL_H
+
+#include "test/librados_test_stub/TestIoCtxImpl.h"
+#include "test/librados_test_stub/TestMemCluster.h"
+
+namespace librados {
+
+class TestMemRadosClient;
+
+class TestMemIoCtxImpl : public TestIoCtxImpl {
+public:
+ TestMemIoCtxImpl();
+ TestMemIoCtxImpl(TestMemRadosClient *client, int64_t m_pool_id,
+ const std::string& pool_name,
+ TestMemCluster::Pool *pool);
+ ~TestMemIoCtxImpl() override;
+
+ TestIoCtxImpl *clone() override;
+
+ int aio_remove(const std::string& oid, AioCompletionImpl *c, int flags = 0) override;
+
+ int append(const std::string& oid, const bufferlist &bl,
+ const SnapContext &snapc) override;
+
+ int assert_exists(const std::string &oid) override;
+
+ int create(const std::string& oid, bool exclusive) override;
+ int list_snaps(const std::string& o, snap_set_t *out_snaps) override;
+ int omap_get_vals(const std::string& oid,
+ const std::string& start_after,
+ const std::string &filter_prefix,
+ uint64_t max_return,
+ std::map<std::string, bufferlist> *out_vals) override;
+ int omap_get_vals2(const std::string& oid,
+ const std::string& start_after,
+ const std::string &filter_prefix,
+ uint64_t max_return,
+ std::map<std::string, bufferlist> *out_vals,
+ bool *pmore) override;
+ int omap_rm_keys(const std::string& oid,
+ const std::set<std::string>& keys) override;
+ int omap_set(const std::string& oid, const std::map<std::string,
+ bufferlist> &map) override;
+ int read(const std::string& oid, size_t len, uint64_t off,
+ bufferlist *bl) override;
+ int remove(const std::string& oid, const SnapContext &snapc) override;
+ int selfmanaged_snap_create(uint64_t *snapid) override;
+ int selfmanaged_snap_remove(uint64_t snapid) override;
+ int selfmanaged_snap_rollback(const std::string& oid,
+ uint64_t snapid) override;
+ int set_alloc_hint(const std::string& oid, uint64_t expected_object_size,
+ uint64_t expected_write_size, uint32_t flags,
+ const SnapContext &snapc) override;
+ int sparse_read(const std::string& oid, uint64_t off, uint64_t len,
+ std::map<uint64_t,uint64_t> *m, bufferlist *data_bl) override;
+ int stat(const std::string& oid, uint64_t *psize, time_t *pmtime) override;
+ int truncate(const std::string& oid, uint64_t size,
+ const SnapContext &snapc) override;
+ int write(const std::string& oid, bufferlist& bl, size_t len,
+ uint64_t off, const SnapContext &snapc) override;
+ int write_full(const std::string& oid, bufferlist& bl,
+ const SnapContext &snapc) override;
+ int writesame(const std::string& oid, bufferlist& bl, size_t len,
+ uint64_t off, const SnapContext &snapc) override;
+ int cmpext(const std::string& oid, uint64_t off, bufferlist& cmp_bl) override;
+ int xattr_get(const std::string& oid,
+ std::map<std::string, bufferlist>* attrset) override;
+ int xattr_set(const std::string& oid, const std::string &name,
+ bufferlist& bl) override;
+ int zero(const std::string& oid, uint64_t off, uint64_t len,
+ const SnapContext &snapc) override;
+
+protected:
+ TestMemCluster::Pool *get_pool() {
+ return m_pool;
+ }
+
+private:
+ TestMemIoCtxImpl(const TestMemIoCtxImpl&);
+
+ TestMemRadosClient *m_client = nullptr;
+ TestMemCluster::Pool *m_pool = nullptr;
+
+ void append_clone(bufferlist& src, bufferlist* dest);
+ size_t clip_io(size_t off, size_t len, size_t bl_len);
+ void ensure_minimum_length(size_t len, bufferlist *bl);
+
+ TestMemCluster::SharedFile get_file(const std::string &oid, bool write,
+ const SnapContext &snapc);
+
+};
+
+} // namespace librados
+
+#endif // CEPH_TEST_MEM_IO_CTX_IMPL_H
diff --git a/src/test/librados_test_stub/TestMemRadosClient.cc b/src/test/librados_test_stub/TestMemRadosClient.cc
new file mode 100644
index 00000000..93807acf
--- /dev/null
+++ b/src/test/librados_test_stub/TestMemRadosClient.cc
@@ -0,0 +1,118 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#include "test/librados_test_stub/TestMemRadosClient.h"
+#include "test/librados_test_stub/TestMemCluster.h"
+#include "test/librados_test_stub/TestMemIoCtxImpl.h"
+#include <errno.h>
+#include <sstream>
+
+namespace librados {
+
+TestMemRadosClient::TestMemRadosClient(CephContext *cct,
+ TestMemCluster *test_mem_cluster)
+ : TestRadosClient(cct, test_mem_cluster->get_watch_notify()),
+ m_mem_cluster(test_mem_cluster) {
+ m_mem_cluster->allocate_client(&m_nonce, &m_global_id);
+}
+
+TestMemRadosClient::~TestMemRadosClient() {
+ m_mem_cluster->deallocate_client(m_nonce);
+}
+
+TestIoCtxImpl *TestMemRadosClient::create_ioctx(int64_t pool_id,
+ const std::string &pool_name) {
+ return new TestMemIoCtxImpl(this, pool_id, pool_name,
+ m_mem_cluster->get_pool(pool_name));
+}
+
+void TestMemRadosClient::object_list(int64_t pool_id,
+ std::list<librados::TestRadosClient::Object> *list) {
+ list->clear();
+
+ auto pool = m_mem_cluster->get_pool(pool_id);
+ if (pool != nullptr) {
+ RWLock::RLocker file_locker(pool->file_lock);
+ for (auto &file_pair : pool->files) {
+ Object obj;
+ obj.oid = file_pair.first.name;
+ list->push_back(obj);
+ }
+ }
+}
+
+int TestMemRadosClient::pool_create(const std::string &pool_name) {
+ if (is_blacklisted()) {
+ return -EBLACKLISTED;
+ }
+ return m_mem_cluster->pool_create(pool_name);
+}
+
+int TestMemRadosClient::pool_delete(const std::string &pool_name) {
+ if (is_blacklisted()) {
+ return -EBLACKLISTED;
+ }
+ return m_mem_cluster->pool_delete(pool_name);
+}
+
+int TestMemRadosClient::pool_get_base_tier(int64_t pool_id, int64_t* base_tier) {
+ // TODO
+ *base_tier = pool_id;
+ return 0;
+}
+
+int TestMemRadosClient::pool_list(std::list<std::pair<int64_t, std::string> >& v) {
+ return m_mem_cluster->pool_list(v);
+}
+
+int64_t TestMemRadosClient::pool_lookup(const std::string &pool_name) {
+ return m_mem_cluster->pool_lookup(pool_name);
+}
+
+int TestMemRadosClient::pool_reverse_lookup(int64_t id, std::string *name) {
+ return m_mem_cluster->pool_reverse_lookup(id, name);
+}
+
+int TestMemRadosClient::watch_flush() {
+ get_watch_notify()->flush(this);
+ return 0;
+}
+
+bool TestMemRadosClient::is_blacklisted() const {
+ return m_mem_cluster->is_blacklisted(m_nonce);
+}
+
+int TestMemRadosClient::blacklist_add(const std::string& client_address,
+ uint32_t expire_seconds) {
+ if (is_blacklisted()) {
+ return -EBLACKLISTED;
+ }
+
+ // extract the nonce to use as a unique key to the client
+ auto idx = client_address.find("/");
+ if (idx == std::string::npos || idx + 1 >= client_address.size()) {
+ return -EINVAL;
+ }
+
+ std::stringstream nonce_ss(client_address.substr(idx + 1));
+ uint32_t nonce;
+ nonce_ss >> nonce;
+ if (!nonce_ss) {
+ return -EINVAL;
+ }
+
+ m_mem_cluster->blacklist(nonce);
+ return 0;
+}
+
+void TestMemRadosClient::transaction_start(const std::string& nspace,
+ const std::string &oid) {
+ m_mem_cluster->transaction_start({nspace, oid});
+}
+
+void TestMemRadosClient::transaction_finish(const std::string& nspace,
+ const std::string &oid) {
+ m_mem_cluster->transaction_finish({nspace, oid});
+}
+
+} // namespace librados
diff --git a/src/test/librados_test_stub/TestMemRadosClient.h b/src/test/librados_test_stub/TestMemRadosClient.h
new file mode 100644
index 00000000..06f29620
--- /dev/null
+++ b/src/test/librados_test_stub/TestMemRadosClient.h
@@ -0,0 +1,88 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#ifndef CEPH_TEST_MEM_RADOS_CLIENT_H
+#define CEPH_TEST_MEM_RADOS_CLIENT_H
+
+#include "test/librados_test_stub/TestRadosClient.h"
+#include "include/ceph_assert.h"
+#include <list>
+#include <string>
+
+namespace librados {
+
+class AioCompletionImpl;
+class TestMemCluster;
+
+class TestMemRadosClient : public TestRadosClient {
+public:
+ TestMemRadosClient(CephContext *cct, TestMemCluster *test_mem_cluster);
+ ~TestMemRadosClient() override;
+
+ TestIoCtxImpl *create_ioctx(int64_t pool_id,
+ const std::string &pool_name) override;
+
+ uint32_t get_nonce() override {
+ return m_nonce;
+ }
+ uint64_t get_instance_id() override {
+ return m_global_id;
+ }
+
+ int get_min_compatible_osd(int8_t* require_osd_release) override {
+ *require_osd_release = CEPH_RELEASE_NAUTILUS;
+ return 0;
+ }
+
+ int get_min_compatible_client(int8_t* min_compat_client,
+ int8_t* require_min_compat_client) override {
+ *min_compat_client = CEPH_RELEASE_MIMIC;
+ *require_min_compat_client = CEPH_RELEASE_MIMIC;
+ return 0;
+ }
+
+ void object_list(int64_t pool_id,
+ std::list<librados::TestRadosClient::Object> *list) override;
+
+ int service_daemon_register(const std::string& service,
+ const std::string& name,
+ const std::map<std::string,std::string>& metadata) override {
+ return 0;
+ }
+ int service_daemon_update_status(std::map<std::string,std::string>&& status) override {
+ return 0;
+ }
+
+ int pool_create(const std::string &pool_name) override;
+ int pool_delete(const std::string &pool_name) override;
+ int pool_get_base_tier(int64_t pool_id, int64_t* base_tier) override;
+ int pool_list(std::list<std::pair<int64_t, std::string> >& v) override;
+ int64_t pool_lookup(const std::string &name) override;
+ int pool_reverse_lookup(int64_t id, std::string *name) override;
+
+ int watch_flush() override;
+
+ bool is_blacklisted() const override;
+ int blacklist_add(const std::string& client_address,
+ uint32_t expire_seconds) override;
+protected:
+ TestMemCluster *get_mem_cluster() {
+ return m_mem_cluster;
+ }
+
+protected:
+ void transaction_start(const std::string& nspace,
+ const std::string &oid) override;
+ void transaction_finish(const std::string& nspace,
+ const std::string &oid) override;
+
+private:
+ TestMemCluster *m_mem_cluster;
+ uint32_t m_nonce;
+ uint64_t m_global_id;
+
+};
+
+} // namespace librados
+
+#endif // CEPH_TEST_MEM_RADOS_CLIENT_H
diff --git a/src/test/librados_test_stub/TestRadosClient.cc b/src/test/librados_test_stub/TestRadosClient.cc
new file mode 100644
index 00000000..d8a4248e
--- /dev/null
+++ b/src/test/librados_test_stub/TestRadosClient.cc
@@ -0,0 +1,260 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#include "test/librados_test_stub/TestRadosClient.h"
+#include "test/librados_test_stub/TestIoCtxImpl.h"
+#include "librados/AioCompletionImpl.h"
+#include "include/ceph_assert.h"
+#include "common/ceph_json.h"
+#include "common/Finisher.h"
+#include <boost/bind.hpp>
+#include <boost/thread.hpp>
+#include <errno.h>
+
+#include <atomic>
+#include <sstream>
+
+static int get_concurrency() {
+ int concurrency = 0;
+ char *env = getenv("LIBRADOS_CONCURRENCY");
+ if (env != NULL) {
+ concurrency = atoi(env);
+ }
+ if (concurrency == 0) {
+ concurrency = boost::thread::thread::hardware_concurrency();
+ }
+ if (concurrency == 0) {
+ concurrency = 1;
+ }
+ return concurrency;
+}
+
+namespace librados {
+
+static void finish_aio_completion(AioCompletionImpl *c, int r) {
+ c->lock.Lock();
+ c->complete = true;
+ c->rval = r;
+ c->lock.Unlock();
+
+ rados_callback_t cb_complete = c->callback_complete;
+ void *cb_complete_arg = c->callback_complete_arg;
+ if (cb_complete) {
+ cb_complete(c, cb_complete_arg);
+ }
+
+ rados_callback_t cb_safe = c->callback_safe;
+ void *cb_safe_arg = c->callback_safe_arg;
+ if (cb_safe) {
+ cb_safe(c, cb_safe_arg);
+ }
+
+ c->lock.Lock();
+ c->callback_complete = NULL;
+ c->callback_safe = NULL;
+ c->cond.Signal();
+ c->put_unlock();
+}
+
+class AioFunctionContext : public Context {
+public:
+ AioFunctionContext(const TestRadosClient::AioFunction &callback,
+ Finisher *finisher, AioCompletionImpl *c)
+ : m_callback(callback), m_finisher(finisher), m_comp(c)
+ {
+ if (m_comp != NULL) {
+ m_comp->get();
+ }
+ }
+
+ void finish(int r) override {
+ int ret = m_callback();
+ if (m_comp != NULL) {
+ if (m_finisher != NULL) {
+ m_finisher->queue(new FunctionContext(boost::bind(
+ &finish_aio_completion, m_comp, ret)));
+ } else {
+ finish_aio_completion(m_comp, ret);
+ }
+ }
+ }
+private:
+ TestRadosClient::AioFunction m_callback;
+ Finisher *m_finisher;
+ AioCompletionImpl *m_comp;
+};
+
+TestRadosClient::TestRadosClient(CephContext *cct,
+ TestWatchNotify *watch_notify)
+ : m_cct(cct->get()), m_watch_notify(watch_notify),
+ m_aio_finisher(new Finisher(m_cct))
+{
+ get();
+
+ // simulate multiple OSDs
+ int concurrency = get_concurrency();
+ for (int i = 0; i < concurrency; ++i) {
+ m_finishers.push_back(new Finisher(m_cct));
+ m_finishers.back()->start();
+ }
+
+ // replicate AIO callback processing
+ m_aio_finisher->start();
+}
+
+TestRadosClient::~TestRadosClient() {
+ flush_aio_operations();
+
+ for (size_t i = 0; i < m_finishers.size(); ++i) {
+ m_finishers[i]->stop();
+ delete m_finishers[i];
+ }
+ m_aio_finisher->stop();
+ delete m_aio_finisher;
+
+ m_cct->put();
+ m_cct = NULL;
+}
+
+void TestRadosClient::get() {
+ m_refcount++;
+}
+
+void TestRadosClient::put() {
+ if (--m_refcount == 0) {
+ shutdown();
+ delete this;
+ }
+}
+
+CephContext *TestRadosClient::cct() {
+ return m_cct;
+}
+
+int TestRadosClient::connect() {
+ return 0;
+}
+
+void TestRadosClient::shutdown() {
+}
+
+int TestRadosClient::wait_for_latest_osdmap() {
+ return 0;
+}
+
+int TestRadosClient::mon_command(const std::vector<std::string>& cmd,
+ const bufferlist &inbl,
+ bufferlist *outbl, std::string *outs) {
+ for (std::vector<std::string>::const_iterator it = cmd.begin();
+ it != cmd.end(); ++it) {
+ JSONParser parser;
+ if (!parser.parse(it->c_str(), it->length())) {
+ return -EINVAL;
+ }
+
+ JSONObjIter j_it = parser.find("prefix");
+ if (j_it.end()) {
+ return -EINVAL;
+ }
+
+ if ((*j_it)->get_data() == "osd tier add") {
+ return 0;
+ } else if ((*j_it)->get_data() == "osd tier cache-mode") {
+ return 0;
+ } else if ((*j_it)->get_data() == "osd tier set-overlay") {
+ return 0;
+ } else if ((*j_it)->get_data() == "osd tier remove-overlay") {
+ return 0;
+ } else if ((*j_it)->get_data() == "osd tier remove") {
+ return 0;
+ } else if ((*j_it)->get_data() == "config-key rm") {
+ return 0;
+ } else if ((*j_it)->get_data() == "config set") {
+ return 0;
+ } else if ((*j_it)->get_data() == "df") {
+ std::stringstream str;
+ str << R"({"pools": [)";
+
+ std::list<std::pair<int64_t, std::string>> pools;
+ pool_list(pools);
+ for (auto& pool : pools) {
+ if (pools.begin()->first != pool.first) {
+ str << ",";
+ }
+ str << R"({"name": ")" << pool.second << R"(", "stats": )"
+ << R"({"percent_used": 1.0, "bytes_used": 0, "max_avail": 0}})";
+ }
+
+ str << "]}";
+ outbl->append(str.str());
+ return 0;
+ }
+ }
+ return -ENOSYS;
+}
+
+void TestRadosClient::add_aio_operation(const std::string& oid,
+ bool queue_callback,
+ const AioFunction &aio_function,
+ AioCompletionImpl *c) {
+ AioFunctionContext *ctx = new AioFunctionContext(
+ aio_function, queue_callback ? m_aio_finisher : NULL, c);
+ get_finisher(oid)->queue(ctx);
+}
+
+struct WaitForFlush {
+ int flushed() {
+ if (--count == 0) {
+ aio_finisher->queue(new FunctionContext(boost::bind(
+ &finish_aio_completion, c, 0)));
+ delete this;
+ }
+ return 0;
+ }
+
+ std::atomic<int64_t> count = { 0 };
+ Finisher *aio_finisher;
+ AioCompletionImpl *c;
+};
+
+void TestRadosClient::flush_aio_operations() {
+ AioCompletionImpl *comp = new AioCompletionImpl();
+ flush_aio_operations(comp);
+ comp->wait_for_safe();
+ comp->put();
+}
+
+void TestRadosClient::flush_aio_operations(AioCompletionImpl *c) {
+ c->get();
+
+ WaitForFlush *wait_for_flush = new WaitForFlush();
+ wait_for_flush->count = m_finishers.size();
+ wait_for_flush->aio_finisher = m_aio_finisher;
+ wait_for_flush->c = c;
+
+ for (size_t i = 0; i < m_finishers.size(); ++i) {
+ AioFunctionContext *ctx = new AioFunctionContext(
+ boost::bind(&WaitForFlush::flushed, wait_for_flush),
+ nullptr, nullptr);
+ m_finishers[i]->queue(ctx);
+ }
+}
+
+int TestRadosClient::aio_watch_flush(AioCompletionImpl *c) {
+ c->get();
+ Context *ctx = new FunctionContext(boost::bind(
+ &TestRadosClient::finish_aio_completion, this, c, _1));
+ get_watch_notify()->aio_flush(this, ctx);
+ return 0;
+}
+
+void TestRadosClient::finish_aio_completion(AioCompletionImpl *c, int r) {
+ librados::finish_aio_completion(c, r);
+}
+
+Finisher *TestRadosClient::get_finisher(const std::string &oid) {
+ std::size_t h = m_hash(oid);
+ return m_finishers[h % m_finishers.size()];
+}
+
+} // namespace librados
diff --git a/src/test/librados_test_stub/TestRadosClient.h b/src/test/librados_test_stub/TestRadosClient.h
new file mode 100644
index 00000000..993382f7
--- /dev/null
+++ b/src/test/librados_test_stub/TestRadosClient.h
@@ -0,0 +1,145 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#ifndef CEPH_TEST_RADOS_CLIENT_H
+#define CEPH_TEST_RADOS_CLIENT_H
+
+#include <map>
+#include <list>
+#include <string>
+#include <vector>
+#include <atomic>
+
+#include <boost/function.hpp>
+#include <boost/functional/hash.hpp>
+
+#include "include/rados/librados.hpp"
+#include "common/config.h"
+#include "include/buffer_fwd.h"
+#include "test/librados_test_stub/TestWatchNotify.h"
+
+class Finisher;
+
+namespace librados {
+
+class TestIoCtxImpl;
+
+class TestRadosClient {
+public:
+
+ static void Deallocate(librados::TestRadosClient* client)
+ {
+ client->put();
+ }
+
+ typedef boost::function<int()> AioFunction;
+
+ struct Object {
+ std::string oid;
+ std::string locator;
+ std::string nspace;
+ };
+
+ class Transaction {
+ public:
+ Transaction(TestRadosClient *rados_client, const std::string& nspace,
+ const std::string &oid)
+ : rados_client(rados_client), nspace(nspace), oid(oid) {
+ rados_client->transaction_start(nspace, oid);
+ }
+ ~Transaction() {
+ rados_client->transaction_finish(nspace, oid);
+ }
+ private:
+ TestRadosClient *rados_client;
+ std::string nspace;
+ std::string oid;
+ };
+
+ TestRadosClient(CephContext *cct, TestWatchNotify *watch_notify);
+
+ void get();
+ void put();
+
+ virtual CephContext *cct();
+
+ virtual uint32_t get_nonce() = 0;
+ virtual uint64_t get_instance_id() = 0;
+
+ virtual int get_min_compatible_osd(int8_t* require_osd_release) = 0;
+ virtual int get_min_compatible_client(int8_t* min_compat_client,
+ int8_t* require_min_compat_client) = 0;
+
+ virtual int connect();
+ virtual void shutdown();
+ virtual int wait_for_latest_osdmap();
+
+ virtual TestIoCtxImpl *create_ioctx(int64_t pool_id,
+ const std::string &pool_name) = 0;
+
+ virtual int mon_command(const std::vector<std::string>& cmd,
+ const bufferlist &inbl,
+ bufferlist *outbl, std::string *outs);
+
+ virtual void object_list(int64_t pool_id,
+ std::list<librados::TestRadosClient::Object> *list) = 0;
+
+ virtual int service_daemon_register(const std::string& service,
+ const std::string& name,
+ const std::map<std::string,std::string>& metadata) = 0;
+ virtual int service_daemon_update_status(std::map<std::string,std::string>&& status) = 0;
+
+ virtual int pool_create(const std::string &pool_name) = 0;
+ virtual int pool_delete(const std::string &pool_name) = 0;
+ virtual int pool_get_base_tier(int64_t pool_id, int64_t* base_tier) = 0;
+ virtual int pool_list(std::list<std::pair<int64_t, std::string> >& v) = 0;
+ virtual int64_t pool_lookup(const std::string &name) = 0;
+ virtual int pool_reverse_lookup(int64_t id, std::string *name) = 0;
+
+ virtual int aio_watch_flush(AioCompletionImpl *c);
+ virtual int watch_flush() = 0;
+
+ virtual bool is_blacklisted() const = 0;
+ virtual int blacklist_add(const std::string& client_address,
+ uint32_t expire_seconds) = 0;
+
+ Finisher *get_aio_finisher() {
+ return m_aio_finisher;
+ }
+ TestWatchNotify *get_watch_notify() {
+ return m_watch_notify;
+ }
+
+ void add_aio_operation(const std::string& oid, bool queue_callback,
+ const AioFunction &aio_function, AioCompletionImpl *c);
+ void flush_aio_operations();
+ void flush_aio_operations(AioCompletionImpl *c);
+
+ void finish_aio_completion(AioCompletionImpl *c, int r);
+
+protected:
+ virtual ~TestRadosClient();
+
+ virtual void transaction_start(const std::string& nspace,
+ const std::string &oid) = 0;
+ virtual void transaction_finish(const std::string& nspace,
+ const std::string &oid) = 0;
+
+private:
+
+ CephContext *m_cct;
+ std::atomic<uint64_t> m_refcount = { 0 };
+
+ TestWatchNotify *m_watch_notify;
+
+ Finisher *get_finisher(const std::string& oid);
+
+ Finisher *m_aio_finisher;
+ std::vector<Finisher *> m_finishers;
+ boost::hash<std::string> m_hash;
+
+};
+
+} // namespace librados
+
+#endif // CEPH_TEST_RADOS_CLIENT_H
diff --git a/src/test/librados_test_stub/TestWatchNotify.cc b/src/test/librados_test_stub/TestWatchNotify.cc
new file mode 100644
index 00000000..0068d075
--- /dev/null
+++ b/src/test/librados_test_stub/TestWatchNotify.cc
@@ -0,0 +1,449 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#include "test/librados_test_stub/TestWatchNotify.h"
+#include "include/Context.h"
+#include "common/Cond.h"
+#include "include/stringify.h"
+#include "common/Finisher.h"
+#include "test/librados_test_stub/TestCluster.h"
+#include "test/librados_test_stub/TestRadosClient.h"
+#include <boost/bind.hpp>
+#include <boost/function.hpp>
+#include "include/ceph_assert.h"
+
+#define dout_subsys ceph_subsys_rados
+#undef dout_prefix
+#define dout_prefix *_dout << "TestWatchNotify::" << __func__ << ": "
+
+namespace librados {
+
+std::ostream& operator<<(std::ostream& out,
+ const TestWatchNotify::WatcherID &watcher_id) {
+ out << "(" << watcher_id.first << "," << watcher_id.second << ")";
+ return out;
+}
+
+struct TestWatchNotify::ObjectHandler : public TestCluster::ObjectHandler {
+ TestWatchNotify* test_watch_notify;
+ int64_t pool_id;
+ std::string nspace;
+ std::string oid;
+
+ ObjectHandler(TestWatchNotify* test_watch_notify, int64_t pool_id,
+ const std::string& nspace, const std::string& oid)
+ : test_watch_notify(test_watch_notify), pool_id(pool_id),
+ nspace(nspace), oid(oid) {
+ }
+
+ void handle_removed(TestRadosClient* test_rados_client) override {
+ // copy member variables since this object might be deleted
+ auto _test_watch_notify = test_watch_notify;
+ auto _pool_id = pool_id;
+ auto _nspace = nspace;
+ auto _oid = oid;
+ auto ctx = new FunctionContext([_test_watch_notify, _pool_id, _nspace, _oid](int r) {
+ _test_watch_notify->handle_object_removed(_pool_id, _nspace, _oid);
+ });
+ test_rados_client->get_aio_finisher()->queue(ctx);
+ }
+};
+
+TestWatchNotify::TestWatchNotify(TestCluster* test_cluster)
+ : m_test_cluster(test_cluster), m_lock("librados::TestWatchNotify::m_lock") {
+}
+
+void TestWatchNotify::flush(TestRadosClient *rados_client) {
+ CephContext *cct = rados_client->cct();
+
+ ldout(cct, 20) << "enter" << dendl;
+ // block until we know no additional async notify callbacks will occur
+ C_SaferCond ctx;
+ m_async_op_tracker.wait_for_ops(&ctx);
+ ctx.wait();
+}
+
+int TestWatchNotify::list_watchers(int64_t pool_id, const std::string& nspace,
+ const std::string& o,
+ std::list<obj_watch_t> *out_watchers) {
+ Mutex::Locker lock(m_lock);
+ SharedWatcher watcher = get_watcher(pool_id, nspace, o);
+ if (!watcher) {
+ return -ENOENT;
+ }
+
+ out_watchers->clear();
+ for (TestWatchNotify::WatchHandles::iterator it =
+ watcher->watch_handles.begin();
+ it != watcher->watch_handles.end(); ++it) {
+ obj_watch_t obj;
+ strcpy(obj.addr, it->second.addr.c_str());
+ obj.watcher_id = static_cast<int64_t>(it->second.gid);
+ obj.cookie = it->second.handle;
+ obj.timeout_seconds = 30;
+ out_watchers->push_back(obj);
+ }
+ return 0;
+}
+
+void TestWatchNotify::aio_flush(TestRadosClient *rados_client,
+ Context *on_finish) {
+ rados_client->get_aio_finisher()->queue(on_finish);
+}
+
+int TestWatchNotify::watch(TestRadosClient *rados_client, int64_t pool_id,
+ const std::string& nspace, const std::string& o,
+ uint64_t gid, uint64_t *handle,
+ librados::WatchCtx *ctx, librados::WatchCtx2 *ctx2) {
+ C_SaferCond cond;
+ aio_watch(rados_client, pool_id, nspace, o, gid, handle, ctx, ctx2, &cond);
+ return cond.wait();
+}
+
+void TestWatchNotify::aio_watch(TestRadosClient *rados_client, int64_t pool_id,
+ const std::string& nspace, const std::string& o,
+ uint64_t gid, uint64_t *handle,
+ librados::WatchCtx *watch_ctx,
+ librados::WatchCtx2 *watch_ctx2,
+ Context *on_finish) {
+ auto ctx = new FunctionContext([=](int) {
+ execute_watch(rados_client, pool_id, nspace, o, gid, handle, watch_ctx,
+ watch_ctx2, on_finish);
+ });
+ rados_client->get_aio_finisher()->queue(ctx);
+}
+
+int TestWatchNotify::unwatch(TestRadosClient *rados_client,
+ uint64_t handle) {
+ C_SaferCond ctx;
+ aio_unwatch(rados_client, handle, &ctx);
+ return ctx.wait();
+}
+
+void TestWatchNotify::aio_unwatch(TestRadosClient *rados_client,
+ uint64_t handle, Context *on_finish) {
+ auto ctx = new FunctionContext([this, rados_client, handle, on_finish](int) {
+ execute_unwatch(rados_client, handle, on_finish);
+ });
+ rados_client->get_aio_finisher()->queue(ctx);
+}
+
+void TestWatchNotify::aio_notify(TestRadosClient *rados_client, int64_t pool_id,
+ const std::string& nspace,
+ const std::string& oid, const bufferlist& bl,
+ uint64_t timeout_ms, bufferlist *pbl,
+ Context *on_notify) {
+ auto ctx = new FunctionContext([=](int) {
+ execute_notify(rados_client, pool_id, nspace, oid, bl, pbl, on_notify);
+ });
+ rados_client->get_aio_finisher()->queue(ctx);
+}
+
+int TestWatchNotify::notify(TestRadosClient *rados_client, int64_t pool_id,
+ const std::string& nspace, const std::string& oid,
+ bufferlist& bl, uint64_t timeout_ms,
+ bufferlist *pbl) {
+ C_SaferCond cond;
+ aio_notify(rados_client, pool_id, nspace, oid, bl, timeout_ms, pbl, &cond);
+ return cond.wait();
+}
+
+void TestWatchNotify::notify_ack(TestRadosClient *rados_client, int64_t pool_id,
+ const std::string& nspace,
+ const std::string& o, uint64_t notify_id,
+ uint64_t handle, uint64_t gid,
+ bufferlist& bl) {
+ CephContext *cct = rados_client->cct();
+ ldout(cct, 20) << "notify_id=" << notify_id << ", handle=" << handle
+ << ", gid=" << gid << dendl;
+ Mutex::Locker lock(m_lock);
+ WatcherID watcher_id = std::make_pair(gid, handle);
+ ack_notify(rados_client, pool_id, nspace, o, notify_id, watcher_id, bl);
+ finish_notify(rados_client, pool_id, nspace, o, notify_id);
+}
+
+void TestWatchNotify::execute_watch(TestRadosClient *rados_client,
+ int64_t pool_id, const std::string& nspace,
+ const std::string& o, uint64_t gid,
+ uint64_t *handle, librados::WatchCtx *ctx,
+ librados::WatchCtx2 *ctx2,
+ Context* on_finish) {
+ CephContext *cct = rados_client->cct();
+
+ m_lock.Lock();
+ SharedWatcher watcher = get_watcher(pool_id, nspace, o);
+ if (!watcher) {
+ m_lock.Unlock();
+ on_finish->complete(-ENOENT);
+ return;
+ }
+
+ WatchHandle watch_handle;
+ watch_handle.rados_client = rados_client;
+ watch_handle.addr = "127.0.0.1:0/" + stringify(rados_client->get_nonce());
+ watch_handle.nonce = rados_client->get_nonce();
+ watch_handle.gid = gid;
+ watch_handle.handle = ++m_handle;
+ watch_handle.watch_ctx = ctx;
+ watch_handle.watch_ctx2 = ctx2;
+ watcher->watch_handles[watch_handle.handle] = watch_handle;
+
+ *handle = watch_handle.handle;
+
+ ldout(cct, 20) << "oid=" << o << ", gid=" << gid << ": handle=" << *handle
+ << dendl;
+ m_lock.Unlock();
+
+ on_finish->complete(0);
+}
+
+void TestWatchNotify::execute_unwatch(TestRadosClient *rados_client,
+ uint64_t handle, Context* on_finish) {
+ CephContext *cct = rados_client->cct();
+
+ ldout(cct, 20) << "handle=" << handle << dendl;
+ {
+ Mutex::Locker locker(m_lock);
+ for (FileWatchers::iterator it = m_file_watchers.begin();
+ it != m_file_watchers.end(); ++it) {
+ SharedWatcher watcher = it->second;
+
+ WatchHandles::iterator w_it = watcher->watch_handles.find(handle);
+ if (w_it != watcher->watch_handles.end()) {
+ watcher->watch_handles.erase(w_it);
+ maybe_remove_watcher(watcher);
+ break;
+ }
+ }
+ }
+ on_finish->complete(0);
+}
+
+TestWatchNotify::SharedWatcher TestWatchNotify::get_watcher(
+ int64_t pool_id, const std::string& nspace, const std::string& oid) {
+ ceph_assert(m_lock.is_locked());
+
+ auto it = m_file_watchers.find({pool_id, nspace, oid});
+ if (it == m_file_watchers.end()) {
+ SharedWatcher watcher(new Watcher(pool_id, nspace, oid));
+ watcher->object_handler.reset(new ObjectHandler(
+ this, pool_id, nspace, oid));
+ int r = m_test_cluster->register_object_handler(
+ pool_id, {nspace, oid}, watcher->object_handler.get());
+ if (r < 0) {
+ // object doesn't exist
+ return SharedWatcher();
+ }
+ m_file_watchers[{pool_id, nspace, oid}] = watcher;
+ return watcher;
+ }
+
+ return it->second;
+}
+
+void TestWatchNotify::maybe_remove_watcher(SharedWatcher watcher) {
+ ceph_assert(m_lock.is_locked());
+
+ // TODO
+ if (watcher->watch_handles.empty() && watcher->notify_handles.empty()) {
+ auto pool_id = watcher->pool_id;
+ auto& nspace = watcher->nspace;
+ auto& oid = watcher->oid;
+ if (watcher->object_handler) {
+ m_test_cluster->unregister_object_handler(pool_id, {nspace, oid},
+ watcher->object_handler.get());
+ watcher->object_handler.reset();
+ }
+
+ m_file_watchers.erase({pool_id, nspace, oid});
+ }
+}
+
+void TestWatchNotify::execute_notify(TestRadosClient *rados_client,
+ int64_t pool_id, const std::string& nspace,
+ const std::string &oid,
+ const bufferlist &bl, bufferlist *pbl,
+ Context *on_notify) {
+ CephContext *cct = rados_client->cct();
+
+ m_lock.Lock();
+ uint64_t notify_id = ++m_notify_id;
+
+ SharedWatcher watcher = get_watcher(pool_id, nspace, oid);
+ if (!watcher) {
+ ldout(cct, 1) << "oid=" << oid << ": not found" << dendl;
+ m_lock.Unlock();
+ on_notify->complete(-ENOENT);
+ return;
+ }
+
+ ldout(cct, 20) << "oid=" << oid << ": notify_id=" << notify_id << dendl;
+
+ SharedNotifyHandle notify_handle(new NotifyHandle());
+ notify_handle->rados_client = rados_client;
+ notify_handle->pbl = pbl;
+ notify_handle->on_notify = on_notify;
+
+ WatchHandles &watch_handles = watcher->watch_handles;
+ for (auto &watch_handle_pair : watch_handles) {
+ WatchHandle &watch_handle = watch_handle_pair.second;
+ notify_handle->pending_watcher_ids.insert(std::make_pair(
+ watch_handle.gid, watch_handle.handle));
+
+ m_async_op_tracker.start_op();
+ uint64_t notifier_id = rados_client->get_instance_id();
+ watch_handle.rados_client->get_aio_finisher()->queue(new FunctionContext(
+ [this, pool_id, nspace, oid, bl, notify_id, watch_handle, notifier_id](int r) {
+ bufferlist notify_bl;
+ notify_bl.append(bl);
+
+ if (watch_handle.watch_ctx2 != NULL) {
+ watch_handle.watch_ctx2->handle_notify(notify_id,
+ watch_handle.handle,
+ notifier_id, notify_bl);
+ } else if (watch_handle.watch_ctx != NULL) {
+ watch_handle.watch_ctx->notify(0, 0, notify_bl);
+
+ // auto ack old-style watch/notify clients
+ ack_notify(watch_handle.rados_client, pool_id, nspace, oid, notify_id,
+ {watch_handle.gid, watch_handle.handle}, bufferlist());
+ }
+
+ m_async_op_tracker.finish_op();
+ }));
+ }
+ watcher->notify_handles[notify_id] = notify_handle;
+
+ finish_notify(rados_client, pool_id, nspace, oid, notify_id);
+ m_lock.Unlock();
+}
+
+void TestWatchNotify::ack_notify(TestRadosClient *rados_client, int64_t pool_id,
+ const std::string& nspace,
+ const std::string &oid, uint64_t notify_id,
+ const WatcherID &watcher_id,
+ const bufferlist &bl) {
+ CephContext *cct = rados_client->cct();
+
+ ceph_assert(m_lock.is_locked());
+ SharedWatcher watcher = get_watcher(pool_id, nspace, oid);
+ if (!watcher) {
+ ldout(cct, 1) << "oid=" << oid << ": not found" << dendl;
+ return;
+ }
+
+ NotifyHandles::iterator it = watcher->notify_handles.find(notify_id);
+ if (it == watcher->notify_handles.end()) {
+ ldout(cct, 1) << "oid=" << oid << ", notify_id=" << notify_id
+ << ", WatcherID=" << watcher_id << ": not found" << dendl;
+ return;
+ }
+
+ ldout(cct, 20) << "oid=" << oid << ", notify_id=" << notify_id
+ << ", WatcherID=" << watcher_id << dendl;
+
+ bufferlist response;
+ response.append(bl);
+
+ SharedNotifyHandle notify_handle = it->second;
+ notify_handle->notify_responses[watcher_id] = response;
+ notify_handle->pending_watcher_ids.erase(watcher_id);
+}
+
+void TestWatchNotify::finish_notify(TestRadosClient *rados_client,
+ int64_t pool_id, const std::string& nspace,
+ const std::string &oid,
+ uint64_t notify_id) {
+ CephContext *cct = rados_client->cct();
+
+ ldout(cct, 20) << "oid=" << oid << ", notify_id=" << notify_id << dendl;
+
+ ceph_assert(m_lock.is_locked());
+ SharedWatcher watcher = get_watcher(pool_id, nspace, oid);
+ if (!watcher) {
+ ldout(cct, 1) << "oid=" << oid << ": not found" << dendl;
+ return;
+ }
+
+ NotifyHandles::iterator it = watcher->notify_handles.find(notify_id);
+ if (it == watcher->notify_handles.end()) {
+ ldout(cct, 1) << "oid=" << oid << ", notify_id=" << notify_id
+ << ": not found" << dendl;
+ return;
+ }
+
+ SharedNotifyHandle notify_handle = it->second;
+ if (!notify_handle->pending_watcher_ids.empty()) {
+ ldout(cct, 10) << "oid=" << oid << ", notify_id=" << notify_id
+ << ": pending watchers, returning" << dendl;
+ return;
+ }
+
+ ldout(cct, 20) << "oid=" << oid << ", notify_id=" << notify_id
+ << ": completing" << dendl;
+
+ if (notify_handle->pbl != NULL) {
+ encode(notify_handle->notify_responses, *notify_handle->pbl);
+ encode(notify_handle->pending_watcher_ids, *notify_handle->pbl);
+ }
+
+ notify_handle->rados_client->get_aio_finisher()->queue(
+ notify_handle->on_notify, 0);
+ watcher->notify_handles.erase(notify_id);
+ maybe_remove_watcher(watcher);
+}
+
+void TestWatchNotify::blacklist(uint32_t nonce) {
+ Mutex::Locker locker(m_lock);
+
+ for (auto file_it = m_file_watchers.begin();
+ file_it != m_file_watchers.end(); ) {
+ auto &watcher = file_it->second;
+ for (auto w_it = watcher->watch_handles.begin();
+ w_it != watcher->watch_handles.end();) {
+ if (w_it->second.nonce == nonce) {
+ w_it = watcher->watch_handles.erase(w_it);
+ } else {
+ ++w_it;
+ }
+ }
+
+ ++file_it;
+ maybe_remove_watcher(watcher);
+ }
+}
+
+void TestWatchNotify::handle_object_removed(int64_t pool_id,
+ const std::string& nspace,
+ const std::string& oid) {
+ Mutex::Locker locker(m_lock);
+ auto it = m_file_watchers.find({pool_id, nspace, oid});
+ if (it == m_file_watchers.end()) {
+ return;
+ }
+
+ auto watcher = it->second;
+
+ // cancel all in-flight notifications
+ for (auto& notify_handle_pair : watcher->notify_handles) {
+ auto notify_handle = notify_handle_pair.second;
+ notify_handle->rados_client->get_aio_finisher()->queue(
+ notify_handle->on_notify, -ENOENT);
+ }
+
+ // alert all watchers of the loss of connection
+ for (auto& watch_handle_pair : watcher->watch_handles) {
+ auto& watch_handle = watch_handle_pair.second;
+ auto handle = watch_handle.handle;
+ auto watch_ctx2 = watch_handle.watch_ctx2;
+ if (watch_ctx2 != nullptr) {
+ auto ctx = new FunctionContext([handle, watch_ctx2](int) {
+ watch_ctx2->handle_error(handle, -ENOTCONN);
+ });
+ watch_handle.rados_client->get_aio_finisher()->queue(ctx);
+ }
+ }
+ m_file_watchers.erase(it);
+}
+
+} // namespace librados
diff --git a/src/test/librados_test_stub/TestWatchNotify.h b/src/test/librados_test_stub/TestWatchNotify.h
new file mode 100644
index 00000000..4d381373
--- /dev/null
+++ b/src/test/librados_test_stub/TestWatchNotify.h
@@ -0,0 +1,148 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#ifndef CEPH_TEST_WATCH_NOTIFY_H
+#define CEPH_TEST_WATCH_NOTIFY_H
+
+#include "include/rados/librados.hpp"
+#include "common/AsyncOpTracker.h"
+#include "common/Mutex.h"
+#include <boost/noncopyable.hpp>
+#include <boost/shared_ptr.hpp>
+#include <list>
+#include <map>
+
+class Cond;
+class Finisher;
+
+namespace librados {
+
+class TestCluster;
+class TestRadosClient;
+
+class TestWatchNotify : boost::noncopyable {
+public:
+ typedef std::pair<uint64_t, uint64_t> WatcherID;
+ typedef std::set<WatcherID> WatcherIDs;
+ typedef std::map<std::pair<uint64_t, uint64_t>, bufferlist> NotifyResponses;
+
+ struct NotifyHandle {
+ TestRadosClient *rados_client = nullptr;
+ WatcherIDs pending_watcher_ids;
+ NotifyResponses notify_responses;
+ bufferlist *pbl = nullptr;
+ Context *on_notify = nullptr;
+ };
+ typedef boost::shared_ptr<NotifyHandle> SharedNotifyHandle;
+ typedef std::map<uint64_t, SharedNotifyHandle> NotifyHandles;
+
+ struct WatchHandle {
+ TestRadosClient *rados_client = nullptr;
+ std::string addr;
+ uint32_t nonce;
+ uint64_t gid;
+ uint64_t handle;
+ librados::WatchCtx* watch_ctx;
+ librados::WatchCtx2* watch_ctx2;
+ };
+
+ typedef std::map<uint64_t, WatchHandle> WatchHandles;
+
+ struct ObjectHandler;
+ typedef boost::shared_ptr<ObjectHandler> SharedObjectHandler;
+
+ struct Watcher {
+ Watcher(int64_t pool_id, const std::string& nspace, const std::string& oid)
+ : pool_id(pool_id), nspace(nspace), oid(oid) {
+ }
+
+ int64_t pool_id;
+ std::string nspace;
+ std::string oid;
+
+ SharedObjectHandler object_handler;
+ WatchHandles watch_handles;
+ NotifyHandles notify_handles;
+ };
+ typedef boost::shared_ptr<Watcher> SharedWatcher;
+
+ TestWatchNotify(TestCluster* test_cluster);
+
+ int list_watchers(int64_t pool_id, const std::string& nspace,
+ const std::string& o, std::list<obj_watch_t> *out_watchers);
+
+ void aio_flush(TestRadosClient *rados_client, Context *on_finish);
+ void aio_watch(TestRadosClient *rados_client, int64_t pool_id,
+ const std::string& nspace, const std::string& o, uint64_t gid,
+ uint64_t *handle, librados::WatchCtx *watch_ctx,
+ librados::WatchCtx2 *watch_ctx2, Context *on_finish);
+ void aio_unwatch(TestRadosClient *rados_client, uint64_t handle,
+ Context *on_finish);
+ void aio_notify(TestRadosClient *rados_client, int64_t pool_id,
+ const std::string& nspace, const std::string& oid,
+ const bufferlist& bl, uint64_t timeout_ms, bufferlist *pbl,
+ Context *on_notify);
+
+ void flush(TestRadosClient *rados_client);
+ int notify(TestRadosClient *rados_client, int64_t pool_id,
+ const std::string& nspace, const std::string& o, bufferlist& bl,
+ uint64_t timeout_ms, bufferlist *pbl);
+ void notify_ack(TestRadosClient *rados_client, int64_t pool_id,
+ const std::string& nspace, const std::string& o,
+ uint64_t notify_id, uint64_t handle, uint64_t gid,
+ bufferlist& bl);
+
+ int watch(TestRadosClient *rados_client, int64_t pool_id,
+ const std::string& nspace, const std::string& o, uint64_t gid,
+ uint64_t *handle, librados::WatchCtx *ctx,
+ librados::WatchCtx2 *ctx2);
+ int unwatch(TestRadosClient *rados_client, uint64_t handle);
+
+ void blacklist(uint32_t nonce);
+
+private:
+ typedef std::tuple<int64_t, std::string, std::string> PoolFile;
+ typedef std::map<PoolFile, SharedWatcher> FileWatchers;
+
+ TestCluster *m_test_cluster;
+
+ uint64_t m_handle = 0;
+ uint64_t m_notify_id = 0;
+
+ Mutex m_lock;
+ AsyncOpTracker m_async_op_tracker;
+
+ FileWatchers m_file_watchers;
+
+ SharedWatcher get_watcher(int64_t pool_id, const std::string& nspace,
+ const std::string& oid);
+ void maybe_remove_watcher(SharedWatcher shared_watcher);
+
+ void execute_watch(TestRadosClient *rados_client, int64_t pool_id,
+ const std::string& nspace, const std::string& o,
+ uint64_t gid, uint64_t *handle,
+ librados::WatchCtx *watch_ctx,
+ librados::WatchCtx2 *watch_ctx2,
+ Context *on_finish);
+ void execute_unwatch(TestRadosClient *rados_client, uint64_t handle,
+ Context *on_finish);
+
+ void execute_notify(TestRadosClient *rados_client, int64_t pool_id,
+ const std::string& nspace, const std::string &oid,
+ const bufferlist &bl, bufferlist *pbl,
+ Context *on_notify);
+ void ack_notify(TestRadosClient *rados_client, int64_t pool_id,
+ const std::string& nspace, const std::string &oid,
+ uint64_t notify_id, const WatcherID &watcher_id,
+ const bufferlist &bl);
+ void finish_notify(TestRadosClient *rados_client, int64_t pool_id,
+ const std::string& nspace, const std::string &oid,
+ uint64_t notify_id);
+
+ void handle_object_removed(int64_t pool_id, const std::string& nspace,
+ const std::string& oid);
+};
+
+} // namespace librados
+
+#endif // CEPH_TEST_WATCH_NOTIFY_H