summaryrefslogtreecommitdiffstats
path: root/src/cls
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 18:24:20 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 18:24:20 +0000
commit483eb2f56657e8e7f419ab1a4fab8dce9ade8609 (patch)
treee5d88d25d870d5dedacb6bbdbe2a966086a0a5cf /src/cls
parentInitial commit. (diff)
downloadceph-483eb2f56657e8e7f419ab1a4fab8dce9ade8609.tar.xz
ceph-483eb2f56657e8e7f419ab1a4fab8dce9ade8609.zip
Adding upstream version 14.2.21.upstream/14.2.21upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'src/cls')
-rw-r--r--src/cls/CMakeLists.txt264
-rw-r--r--src/cls/cas/cls_cas.cc265
-rw-r--r--src/cls/cas/cls_cas_client.cc62
-rw-r--r--src/cls/cas/cls_cas_client.h13
-rw-r--r--src/cls/cas/cls_cas_ops.h142
-rw-r--r--src/cls/cephfs/cls_cephfs.cc210
-rw-r--r--src/cls/cephfs/cls_cephfs.h147
-rw-r--r--src/cls/cephfs/cls_cephfs_client.cc177
-rw-r--r--src/cls/cephfs/cls_cephfs_client.h33
-rw-r--r--src/cls/hello/cls_hello.cc344
-rw-r--r--src/cls/journal/cls_journal.cc1230
-rw-r--r--src/cls/journal/cls_journal_client.cc498
-rw-r--r--src/cls/journal/cls_journal_client.h107
-rw-r--r--src/cls/journal/cls_journal_types.cc196
-rw-r--r--src/cls/journal/cls_journal_types.h154
-rw-r--r--src/cls/lock/cls_lock.cc647
-rw-r--r--src/cls/lock/cls_lock_client.cc284
-rw-r--r--src/cls/lock/cls_lock_client.h141
-rw-r--r--src/cls/lock/cls_lock_ops.cc210
-rw-r--r--src/cls/lock/cls_lock_ops.h245
-rw-r--r--src/cls/lock/cls_lock_types.cc98
-rw-r--r--src/cls/lock/cls_lock_types.h172
-rw-r--r--src/cls/log/cls_log.cc317
-rw-r--r--src/cls/log/cls_log_client.cc154
-rw-r--r--src/cls/log/cls_log_client.h31
-rw-r--r--src/cls/log/cls_log_ops.h156
-rw-r--r--src/cls/log/cls_log_types.h65
-rw-r--r--src/cls/lua/cls_lua.cc1052
-rw-r--r--src/cls/lua/cls_lua.h14
-rw-r--r--src/cls/lua/cls_lua_client.cc34
-rw-r--r--src/cls/lua/cls_lua_client.h13
-rw-r--r--src/cls/lua/cls_lua_ops.h31
-rw-r--r--src/cls/lua/lua_bufferlist.cc180
-rw-r--r--src/cls/numops/cls_numops.cc161
-rw-r--r--src/cls/numops/cls_numops_client.cc79
-rw-r--r--src/cls/numops/cls_numops_client.h50
-rw-r--r--src/cls/otp/cls_otp.cc571
-rw-r--r--src/cls/otp/cls_otp_client.cc190
-rw-r--r--src/cls/otp/cls_otp_client.h56
-rw-r--r--src/cls/otp/cls_otp_ops.h166
-rw-r--r--src/cls/otp/cls_otp_types.cc67
-rw-r--r--src/cls/otp/cls_otp_types.h132
-rw-r--r--src/cls/rbd/cls_rbd.cc7964
-rw-r--r--src/cls/rbd/cls_rbd.h243
-rw-r--r--src/cls/rbd/cls_rbd_client.cc2807
-rw-r--r--src/cls/rbd/cls_rbd_client.h610
-rw-r--r--src/cls/rbd/cls_rbd_types.cc909
-rw-r--r--src/cls/rbd/cls_rbd_types.h791
-rw-r--r--src/cls/refcount/cls_refcount.cc216
-rw-r--r--src/cls/refcount/cls_refcount_client.cc61
-rw-r--r--src/cls/refcount/cls_refcount_client.h34
-rw-r--r--src/cls/refcount/cls_refcount_ops.cc104
-rw-r--r--src/cls/refcount/cls_refcount_ops.h154
-rw-r--r--src/cls/rgw/cls_rgw.cc4031
-rw-r--r--src/cls/rgw/cls_rgw_client.cc1018
-rw-r--r--src/cls/rgw/cls_rgw_client.h593
-rw-r--r--src/cls/rgw/cls_rgw_const.h76
-rw-r--r--src/cls/rgw/cls_rgw_ops.cc548
-rw-r--r--src/cls/rgw/cls_rgw_ops.h1429
-rw-r--r--src/cls/rgw/cls_rgw_types.cc700
-rw-r--r--src/cls/rgw/cls_rgw_types.h1183
-rw-r--r--src/cls/sdk/cls_sdk.cc131
-rw-r--r--src/cls/timeindex/cls_timeindex.cc261
-rw-r--r--src/cls/timeindex/cls_timeindex_client.cc120
-rw-r--r--src/cls/timeindex/cls_timeindex_client.h93
-rw-r--r--src/cls/timeindex/cls_timeindex_ops.h115
-rw-r--r--src/cls/timeindex/cls_timeindex_types.h43
-rw-r--r--src/cls/user/cls_user.cc455
-rw-r--r--src/cls/user/cls_user_client.cc158
-rw-r--r--src/cls/user/cls_user_client.h36
-rw-r--r--src/cls/user/cls_user_ops.cc114
-rw-r--r--src/cls/user/cls_user_ops.h204
-rw-r--r--src/cls/user/cls_user_types.cc104
-rw-r--r--src/cls/user/cls_user_types.h226
-rw-r--r--src/cls/version/cls_version.cc234
-rw-r--r--src/cls/version/cls_version_client.cc101
-rw-r--r--src/cls/version/cls_version_client.h25
-rw-r--r--src/cls/version/cls_version_ops.h92
-rw-r--r--src/cls/version/cls_version_types.cc18
-rw-r--r--src/cls/version/cls_version_types.h89
80 files changed, 35248 insertions, 0 deletions
diff --git a/src/cls/CMakeLists.txt b/src/cls/CMakeLists.txt
new file mode 100644
index 00000000..d62273d4
--- /dev/null
+++ b/src/cls/CMakeLists.txt
@@ -0,0 +1,264 @@
+## Rados object classes
+
+set(cls_dir ${CMAKE_INSTALL_LIBDIR}/rados-classes)
+
+# cls_sdk
+add_library(cls_sdk SHARED sdk/cls_sdk.cc)
+set_target_properties(cls_sdk PROPERTIES
+ VERSION "1.0.0"
+ SOVERSION "1"
+ INSTALL_RPATH ""
+ CXX_VISIBILITY_PRESET hidden)
+install(TARGETS cls_sdk DESTINATION ${cls_dir})
+
+# cls_hello
+set(cls_hello_srcs hello/cls_hello.cc)
+add_library(cls_hello SHARED ${cls_hello_srcs})
+set_target_properties(cls_hello PROPERTIES
+ VERSION "1.0.0"
+ SOVERSION "1"
+ INSTALL_RPATH ""
+ CXX_VISIBILITY_PRESET hidden)
+install(TARGETS cls_hello DESTINATION ${cls_dir})
+
+# cls_numops
+set(cls_numops_srcs numops/cls_numops.cc)
+add_library(cls_numops SHARED ${cls_numops_srcs})
+set_target_properties(cls_numops PROPERTIES
+ VERSION "1.0.0"
+ SOVERSION "1"
+ INSTALL_RPATH ""
+ CXX_VISIBILITY_PRESET hidden)
+install(TARGETS cls_numops DESTINATION ${cls_dir})
+
+set(cls_numops_client_srcs numops/cls_numops_client.cc)
+add_library(cls_numops_client STATIC ${cls_numops_client_srcs})
+
+
+# cls_rbd
+if (WITH_RBD)
+ set(cls_rbd_srcs rbd/cls_rbd.cc rbd/cls_rbd_types.cc)
+ add_library(cls_rbd SHARED ${cls_rbd_srcs})
+ set_target_properties(cls_rbd PROPERTIES
+ VERSION "1.0.0"
+ SOVERSION "1"
+ INSTALL_RPATH ""
+ CXX_VISIBILITY_PRESET hidden)
+ install(TARGETS cls_rbd DESTINATION ${cls_dir})
+
+ set(cls_rbd_client_srcs rbd/cls_rbd_client.cc rbd/cls_rbd_types.cc)
+ add_library(cls_rbd_client STATIC ${cls_rbd_client_srcs})
+ target_link_libraries(cls_rbd_client cls_lock_client)
+
+endif (WITH_RBD)
+
+# cls_lock
+set(cls_lock_srcs lock/cls_lock.cc)
+add_library(cls_lock SHARED ${cls_lock_srcs})
+set_target_properties(cls_lock PROPERTIES
+ VERSION "1.0.0"
+ SOVERSION "1"
+ INSTALL_RPATH ""
+ CXX_VISIBILITY_PRESET hidden)
+install(TARGETS cls_lock DESTINATION ${cls_dir})
+
+set(cls_lock_client_srcs
+ lock/cls_lock_client.cc
+ lock/cls_lock_types.cc
+ lock/cls_lock_ops.cc)
+add_library(cls_lock_client STATIC ${cls_lock_client_srcs})
+
+
+# cls_otp
+if (WITH_RADOSGW)
+ set(cls_otp_srcs otp/cls_otp.cc)
+ add_library(cls_otp SHARED ${cls_otp_srcs})
+ target_link_libraries(cls_otp OATH::OATH)
+ set_target_properties(cls_otp PROPERTIES
+ VERSION "1.0.0"
+ SOVERSION "1"
+ INSTALL_RPATH ""
+ CXX_VISIBILITY_PRESET hidden)
+ install(TARGETS cls_otp DESTINATION ${cls_dir})
+
+ set(cls_otp_client_srcs
+ otp/cls_otp_client.cc
+ otp/cls_otp_types.cc
+ )
+ add_library(cls_otp_client STATIC ${cls_otp_client_srcs})
+endif (WITH_RADOSGW)
+
+# cls_refcount
+set(cls_refcount_srcs
+ refcount/cls_refcount.cc
+ refcount/cls_refcount_ops.cc
+ ${CMAKE_SOURCE_DIR}/src/common/ceph_json.cc)
+add_library(cls_refcount SHARED ${cls_refcount_srcs})
+target_link_libraries(cls_refcount json_spirit)
+set_target_properties(cls_refcount PROPERTIES
+ VERSION "1.0.0"
+ SOVERSION "1"
+ INSTALL_RPATH ""
+ CXX_VISIBILITY_PRESET hidden)
+install(TARGETS cls_refcount DESTINATION ${cls_dir})
+
+set(cls_refcount_client_srcs
+ refcount/cls_refcount_client.cc
+ refcount/cls_refcount_ops.cc)
+add_library(cls_refcount_client STATIC ${cls_refcount_client_srcs})
+
+
+# cls_version
+set(cls_version_srcs version/cls_version.cc)
+add_library(cls_version SHARED ${cls_version_srcs})
+set_target_properties(cls_version PROPERTIES
+ VERSION "1.0.0"
+ SOVERSION "1"
+ INSTALL_RPATH ""
+ CXX_VISIBILITY_PRESET hidden)
+install(TARGETS cls_version DESTINATION ${cls_dir})
+
+set(cls_version_client_srcs
+ version/cls_version_client.cc
+ version/cls_version_types.cc)
+add_library(cls_version_client STATIC ${cls_version_client_srcs})
+
+
+# cls_log
+set(cls_log_srcs log/cls_log.cc)
+add_library(cls_log SHARED ${cls_log_srcs})
+set_target_properties(cls_log PROPERTIES
+ VERSION "1.0.0"
+ SOVERSION "1"
+ INSTALL_RPATH ""
+ CXX_VISIBILITY_PRESET hidden)
+install(TARGETS cls_log DESTINATION ${cls_dir})
+
+set(cls_log_client_srcs log/cls_log_client.cc)
+add_library(cls_log_client STATIC ${cls_log_client_srcs})
+
+
+# cls_timeindex
+set(cls_timeindex_srcs timeindex/cls_timeindex.cc)
+add_library(cls_timeindex SHARED ${cls_timeindex_srcs})
+set_target_properties(cls_timeindex PROPERTIES
+ VERSION "1.0.0"
+ SOVERSION "1"
+ INSTALL_RPATH ""
+ CXX_VISIBILITY_PRESET hidden)
+install(TARGETS cls_timeindex DESTINATION ${cls_dir})
+
+set(cls_timeindex_client_srcs timeindex/cls_timeindex_client.cc)
+add_library(cls_timeindex_client STATIC ${cls_timeindex_client_srcs})
+
+
+# cls_user
+set(cls_user_srcs user/cls_user.cc)
+add_library(cls_user SHARED ${cls_user_srcs})
+set_target_properties(cls_user PROPERTIES
+ VERSION "1.0.0"
+ SOVERSION "1"
+ INSTALL_RPATH ""
+ CXX_VISIBILITY_PRESET hidden)
+install(TARGETS cls_user DESTINATION ${cls_dir})
+
+set(cls_user_client_srcs
+ user/cls_user_client.cc
+ user/cls_user_types.cc
+ user/cls_user_ops.cc)
+add_library(cls_user_client STATIC ${cls_user_client_srcs})
+
+
+# cls_journal
+set(cls_journal_srcs
+ journal/cls_journal.cc
+ journal/cls_journal_types.cc)
+add_library(cls_journal SHARED ${cls_journal_srcs})
+set_target_properties(cls_journal PROPERTIES
+ VERSION "1.0.0"
+ SOVERSION "1"
+ INSTALL_RPATH ""
+ CXX_VISIBILITY_PRESET hidden)
+install(TARGETS cls_journal DESTINATION ${cls_dir})
+
+set(cls_journal_client_srcs
+ journal/cls_journal_client.cc
+ journal/cls_journal_types.cc)
+add_library(cls_journal_client STATIC ${cls_journal_client_srcs})
+
+
+# cls_rgw
+if (WITH_RADOSGW)
+ set(cls_rgw_srcs
+ rgw/cls_rgw.cc
+ rgw/cls_rgw_ops.cc
+ rgw/cls_rgw_types.cc
+ ${CMAKE_SOURCE_DIR}/src/common/ceph_json.cc)
+ add_library(cls_rgw SHARED ${cls_rgw_srcs})
+ target_link_libraries(cls_rgw json_spirit)
+ set_target_properties(cls_rgw PROPERTIES
+ VERSION "1.0.0"
+ SOVERSION "1"
+ INSTALL_RPATH ""
+ CXX_VISIBILITY_PRESET hidden)
+ install(TARGETS cls_rgw DESTINATION ${cls_dir})
+
+ set(cls_rgw_client_srcs
+ rgw/cls_rgw_client.cc
+ rgw/cls_rgw_types.cc
+ rgw/cls_rgw_ops.cc)
+ add_library(cls_rgw_client STATIC ${cls_rgw_client_srcs})
+
+endif (WITH_RADOSGW)
+
+# cls_cephfs
+if (WITH_CEPHFS)
+ set(cls_cephfs_srcs
+ cephfs/cls_cephfs.cc)
+ add_library(cls_cephfs SHARED ${cls_cephfs_srcs})
+ set_target_properties(cls_cephfs PROPERTIES
+ VERSION "1.0.0"
+ SOVERSION "1"
+ INSTALL_RPATH ""
+ CXX_VISIBILITY_PRESET hidden)
+ install(TARGETS cls_cephfs DESTINATION ${cls_dir})
+
+ set(cls_cephfs_client_srcs
+ cephfs/cls_cephfs_client.cc)
+ add_library(cls_cephfs_client STATIC ${cls_cephfs_client_srcs})
+
+endif (WITH_CEPHFS)
+
+# cls_lua
+set(cls_lua_srcs
+ lua/cls_lua.cc
+ lua/lua_bufferlist.cc)
+add_library(cls_lua SHARED ${cls_lua_srcs})
+set_target_properties(cls_lua PROPERTIES
+ VERSION "1.0.0"
+ SOVERSION "1"
+ INSTALL_RPATH ""
+ CXX_VISIBILITY_PRESET hidden)
+install(TARGETS cls_lua DESTINATION ${cls_dir})
+target_link_libraries(cls_lua
+ liblua
+ json_spirit)
+
+set(cls_lua_client_srcs
+ lua/cls_lua_client.cc)
+add_library(cls_lua_client STATIC ${cls_lua_client_srcs})
+
+# cls_cas
+set(cls_cas_srcs
+ cas/cls_cas.cc)
+add_library(cls_cas SHARED ${cls_cas_srcs})
+set_target_properties(cls_cas PROPERTIES
+ VERSION "1.0.0"
+ SOVERSION "1"
+ INSTALL_RPATH ""
+ CXX_VISIBILITY_PRESET hidden)
+install(TARGETS cls_cas DESTINATION ${cls_dir})
+
+set(cls_cas_client_srcs
+ cas/cls_cas_client.cc)
+add_library(cls_cas_client STATIC ${cls_cas_client_srcs})
diff --git a/src/cls/cas/cls_cas.cc b/src/cls/cas/cls_cas.cc
new file mode 100644
index 00000000..c6a7b9b5
--- /dev/null
+++ b/src/cls/cas/cls_cas.cc
@@ -0,0 +1,265 @@
+// -*- mode:C; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#include <errno.h>
+
+#include "objclass/objclass.h"
+#include "cls_cas_ops.h"
+
+#include "include/compat.h"
+#include "osd/osd_types.h"
+
+CLS_VER(1,0)
+CLS_NAME(cas)
+
+struct chunk_obj_refcount;
+
+static int chunk_read_refcount(cls_method_context_t hctx, chunk_obj_refcount *objr)
+{
+ bufferlist bl;
+ objr->refs.clear();
+ int ret = cls_cxx_getxattr(hctx, CHUNK_REFCOUNT_ATTR, &bl);
+ if (ret == -ENODATA) {
+ return 0;
+ }
+ if (ret < 0)
+ return ret;
+
+ try {
+ auto iter = bl.cbegin();
+ decode(*objr, iter);
+ } catch (buffer::error& err) {
+ CLS_LOG(0, "ERROR: chunk_read_refcount(): failed to decode refcount entry\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int chunk_set_refcount(cls_method_context_t hctx, const struct chunk_obj_refcount& objr)
+{
+ bufferlist bl;
+
+ encode(objr, bl);
+
+ int ret = cls_cxx_setxattr(hctx, CHUNK_REFCOUNT_ATTR, &bl);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int cls_rc_chunk_refcount_get(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ auto in_iter = in->cbegin();
+
+ cls_chunk_refcount_get_op op;
+ try {
+ decode(op, in_iter);
+ } catch (buffer::error& err) {
+ CLS_LOG(1, "ERROR: cls_rc_refcount_get(): failed to decode entry\n");
+ return -EINVAL;
+ }
+
+ chunk_obj_refcount objr;
+ int ret = chunk_read_refcount(hctx, &objr);
+ if (ret < 0)
+ return ret;
+
+ CLS_LOG(10, "cls_rc_chunk_refcount_get() oid=%s\n", op.source.oid.name.c_str());
+
+ objr.refs.insert(op.source);
+
+ ret = chunk_set_refcount(hctx, objr);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int cls_rc_chunk_refcount_put(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ auto in_iter = in->cbegin();
+
+ cls_chunk_refcount_put_op op;
+ try {
+ decode(op, in_iter);
+ } catch (buffer::error& err) {
+ CLS_LOG(1, "ERROR: cls_rc_chunk_refcount_put(): failed to decode entry\n");
+ return -EINVAL;
+ }
+
+ chunk_obj_refcount objr;
+ int ret = chunk_read_refcount(hctx, &objr);
+ if (ret < 0)
+ return ret;
+
+ if (objr.refs.empty()) {// shouldn't happen!
+ CLS_LOG(0, "ERROR: cls_rc_chunk_refcount_put() was called without any references!\n");
+ return -EINVAL;
+ }
+
+ CLS_LOG(10, "cls_rc_chunk_refcount_put() oid=%s\n", op.source.oid.name.c_str());
+
+ bool found = false;
+ for (auto &p : objr.refs) {
+ if (p == op.source) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ return 0;
+ }
+
+ auto p = objr.refs.find(op.source);
+ objr.refs.erase(p);
+
+ if (objr.refs.empty()) {
+ return cls_cxx_remove(hctx);
+ }
+
+ ret = chunk_set_refcount(hctx, objr);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int cls_rc_chunk_refcount_set(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ auto in_iter = in->cbegin();
+
+ cls_chunk_refcount_set_op op;
+ try {
+ decode(op, in_iter);
+ } catch (buffer::error& err) {
+ CLS_LOG(1, "ERROR: cls_chunk_refcount_set(): failed to decode entry\n");
+ return -EINVAL;
+ }
+
+ if (!op.refs.size()) {
+ return cls_cxx_remove(hctx);
+ }
+
+ chunk_obj_refcount objr;
+ objr.refs = op.refs;
+
+ int ret = chunk_set_refcount(hctx, objr);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int cls_rc_chunk_refcount_read(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ chunk_obj_refcount objr;
+
+ cls_chunk_refcount_read_ret read_ret;
+ int ret = chunk_read_refcount(hctx, &objr);
+ if (ret < 0)
+ return ret;
+
+ for (auto &p : objr.refs) {
+ read_ret.refs.insert(p);
+ }
+
+ encode(read_ret, *out);
+
+ return 0;
+}
+
+static int cls_rc_write_or_get(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ auto in_iter = in->cbegin();
+ hobject_t src_obj;
+ bufferlist indata, outdata;
+ ceph_osd_op op;
+ try {
+ decode (op, in_iter);
+ decode(src_obj, in_iter);
+ in_iter.copy(op.extent.length, indata);
+ }
+ catch (buffer::error& e) {
+ return -EINVAL;
+ }
+
+ CLS_LOG(10, " offset: %llu length: %llu \n",
+ static_cast<long long unsigned>(op.extent.offset),
+ static_cast<long long unsigned>(op.extent.length));
+ chunk_obj_refcount objr;
+ int ret = chunk_read_refcount(hctx, &objr);
+ if (ret == -ENOENT) {
+ objr.refs.insert(src_obj);
+ bufferlist set_bl;
+ encode(objr, set_bl);
+ ret = cls_cxx_chunk_write_and_set(hctx, op.extent.offset, op.extent.length, &indata, op.flags,
+ &set_bl, set_bl.length());
+ if (ret < 0)
+ return ret;
+
+ return 0;
+ }
+
+ objr.refs.insert(src_obj);
+ ret = chunk_set_refcount(hctx, objr);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+
+static int cls_rc_has_chunk(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ auto in_iter = in->cbegin();
+ string fp_oid;
+ bufferlist indata, outdata;
+ try {
+ decode (fp_oid, in_iter);
+ }
+ catch (buffer::error& e) {
+ return -EINVAL;
+ }
+ CLS_LOG(10, " fp_oid: %s \n", fp_oid.c_str());
+
+ bool ret = cls_has_chunk(hctx, fp_oid);
+ if (ret) {
+ return 0;
+ }
+ return -ENOENT;
+}
+
+CLS_INIT(cas)
+{
+ CLS_LOG(1, "Loaded cas class!");
+
+ cls_handle_t h_class;
+ cls_method_handle_t h_cas_write_or_get;
+ cls_method_handle_t h_chunk_refcount_get;
+ cls_method_handle_t h_chunk_refcount_put;
+ cls_method_handle_t h_chunk_refcount_set;
+ cls_method_handle_t h_chunk_refcount_read;
+ cls_method_handle_t h_chunk_has_chunk;
+
+ cls_register("cas", &h_class);
+
+ /* chunk refcount */
+ cls_register_cxx_method(h_class, "chunk_get", CLS_METHOD_RD | CLS_METHOD_WR, cls_rc_chunk_refcount_get,
+ &h_chunk_refcount_get);
+ cls_register_cxx_method(h_class, "chunk_put", CLS_METHOD_RD | CLS_METHOD_WR, cls_rc_chunk_refcount_put,
+ &h_chunk_refcount_put);
+ cls_register_cxx_method(h_class, "chunk_set", CLS_METHOD_RD | CLS_METHOD_WR, cls_rc_chunk_refcount_set,
+ &h_chunk_refcount_set);
+ cls_register_cxx_method(h_class, "chunk_read", CLS_METHOD_RD, cls_rc_chunk_refcount_read,
+ &h_chunk_refcount_read);
+ cls_register_cxx_method(h_class, "cas_write_or_get", CLS_METHOD_RD | CLS_METHOD_WR, cls_rc_write_or_get,
+ &h_cas_write_or_get);
+ cls_register_cxx_method(h_class, "has_chunk", CLS_METHOD_RD, cls_rc_has_chunk,
+ &h_chunk_has_chunk);
+
+ return;
+}
+
diff --git a/src/cls/cas/cls_cas_client.cc b/src/cls/cas/cls_cas_client.cc
new file mode 100644
index 00000000..b041641f
--- /dev/null
+++ b/src/cls/cas/cls_cas_client.cc
@@ -0,0 +1,62 @@
+#include <errno.h>
+
+#include "cls/cas/cls_cas_client.h"
+#include "cls/cas/cls_cas_ops.h"
+#include "include/rados/librados.hpp"
+
+using namespace librados;
+
+void cls_chunk_refcount_get(librados::ObjectWriteOperation& op, const hobject_t& soid)
+{
+ bufferlist in;
+ cls_chunk_refcount_get_op call;
+ call.source = soid;
+ encode(call, in);
+ op.exec("cas", "chunk_get", in);
+}
+
+void cls_chunk_refcount_put(librados::ObjectWriteOperation& op, const hobject_t& soid)
+{
+ bufferlist in;
+ cls_chunk_refcount_put_op call;
+ call.source = soid;
+ encode(call, in);
+ op.exec("cas", "chunk_put", in);
+}
+
+void cls_chunk_refcount_set(librados::ObjectWriteOperation& op, set<hobject_t>& refs)
+{
+ bufferlist in;
+ cls_chunk_refcount_set_op call;
+ call.refs = refs;
+ encode(call, in);
+ op.exec("cas", "chunk_set", in);
+}
+
+int cls_chunk_refcount_read(librados::IoCtx& io_ctx, string& oid, set<hobject_t> *refs)
+{
+ bufferlist in, out;
+ int r = io_ctx.exec(oid, "cas", "chunk_read", in, out);
+ if (r < 0)
+ return r;
+
+ cls_chunk_refcount_read_ret ret;
+ try {
+ auto iter = out.cbegin();
+ decode(ret, iter);
+ } catch (buffer::error& err) {
+ return -EIO;
+ }
+
+ *refs = ret.refs;
+
+ return r;
+}
+
+int cls_chunk_has_chunk(librados::IoCtx& io_ctx, string& oid, string& fp_oid)
+{
+ bufferlist in, out;
+ encode(fp_oid, in);
+ int r = io_ctx.exec(oid, "cas", "has_chunk", in, out);
+ return r;
+}
diff --git a/src/cls/cas/cls_cas_client.h b/src/cls/cas/cls_cas_client.h
new file mode 100644
index 00000000..323be9eb
--- /dev/null
+++ b/src/cls/cas/cls_cas_client.h
@@ -0,0 +1,13 @@
+#ifndef CEPH_CLS_CAS_CLIENT_H
+#define CEPH_CLS_CAS_CLIENT_H
+
+#include "include/types.h"
+#include "include/rados/librados_fwd.hpp"
+#include "common/hobject.h"
+
+void cls_chunk_refcount_get(librados::ObjectWriteOperation& op, const hobject_t& soid);
+void cls_chunk_refcount_put(librados::ObjectWriteOperation& op, const hobject_t& soid);
+void cls_chunk_refcount_set(librados::ObjectWriteOperation& op, set<hobject_t>& refs);
+int cls_chunk_refcount_read(librados::IoCtx& io_ctx, string& oid, set<hobject_t> *refs);
+int cls_chunk_has_chunk(librados::IoCtx& io_ctx, string& oid, string& fp_oid);
+#endif
diff --git a/src/cls/cas/cls_cas_ops.h b/src/cls/cas/cls_cas_ops.h
new file mode 100644
index 00000000..35bc90df
--- /dev/null
+++ b/src/cls/cas/cls_cas_ops.h
@@ -0,0 +1,142 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#ifndef CEPH_CLS_CAS_OPS_H
+#define CEPH_CLS_CAS_OPS_H
+
+#include "include/types.h"
+#include "common/hobject.h"
+
+#define CHUNK_REFCOUNT_ATTR "chunk_refcount"
+
+struct cls_chunk_refcount_get_op {
+ hobject_t source;
+
+ cls_chunk_refcount_get_op() {}
+
+ void encode(bufferlist& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(source, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(bufferlist::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(source, bl);
+ DECODE_FINISH(bl);
+ }
+ void dump(ceph::Formatter *f) const;
+ static void generate_test_instances(list<cls_chunk_refcount_get_op*>& ls);
+};
+WRITE_CLASS_ENCODER(cls_chunk_refcount_get_op)
+
+struct cls_chunk_refcount_put_op {
+ hobject_t source;
+
+ cls_chunk_refcount_put_op() {}
+
+ void encode(bufferlist& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(source, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(bufferlist::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(source, bl);
+ DECODE_FINISH(bl);
+ }
+
+ void dump(ceph::Formatter *f) const;
+ static void generate_test_instances(list<cls_chunk_refcount_put_op*>& ls);
+};
+WRITE_CLASS_ENCODER(cls_chunk_refcount_put_op)
+
+struct cls_chunk_refcount_set_op {
+ set<hobject_t> refs;
+
+ cls_chunk_refcount_set_op() {}
+
+ void encode(bufferlist& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(refs, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(bufferlist::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(refs, bl);
+ DECODE_FINISH(bl);
+ }
+
+ void dump(ceph::Formatter *f) const;
+ static void generate_test_instances(list<cls_chunk_refcount_set_op*>& ls);
+};
+WRITE_CLASS_ENCODER(cls_chunk_refcount_set_op)
+
+struct cls_chunk_refcount_read_ret {
+ set<hobject_t> refs;
+
+ cls_chunk_refcount_read_ret() {}
+
+ void encode(bufferlist& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(refs, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(bufferlist::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(refs, bl);
+ DECODE_FINISH(bl);
+ }
+
+ void dump(ceph::Formatter *f) const;
+ static void generate_test_instances(list<cls_chunk_refcount_read_ret*>& ls);
+};
+WRITE_CLASS_ENCODER(cls_chunk_refcount_read_ret)
+
+struct chunk_obj_refcount {
+ set<hobject_t> refs;
+
+ chunk_obj_refcount() {}
+
+ void encode(bufferlist& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(refs, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(bufferlist::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(refs, bl);
+ DECODE_FINISH(bl);
+ }
+};
+WRITE_CLASS_ENCODER(chunk_obj_refcount)
+
+struct obj_refcount {
+ map<string, bool> refs;
+ set<string> retired_refs;
+
+ obj_refcount() {}
+
+ void encode(bufferlist& bl) const {
+ ENCODE_START(2, 1, bl);
+ encode(refs, bl);
+ encode(retired_refs, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(bufferlist::const_iterator& bl) {
+ DECODE_START(2, bl);
+ decode(refs, bl);
+ if (struct_v >= 2) {
+ decode(retired_refs, bl);
+ }
+ DECODE_FINISH(bl);
+ }
+};
+WRITE_CLASS_ENCODER(obj_refcount)
+
+#endif
diff --git a/src/cls/cephfs/cls_cephfs.cc b/src/cls/cephfs/cls_cephfs.cc
new file mode 100644
index 00000000..7e3214b9
--- /dev/null
+++ b/src/cls/cephfs/cls_cephfs.cc
@@ -0,0 +1,210 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+/*
+ * Ceph - scalable distributed file system
+ *
+ * Copyright (C) 2015 Red Hat
+ *
+ * This is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License version 2.1, as published by the Free Software
+ * Foundation. See file COPYING.
+ *
+ */
+
+
+#include <string>
+#include <errno.h>
+
+#include "objclass/objclass.h"
+
+#include "cls_cephfs.h"
+
+CLS_VER(1,0)
+CLS_NAME(cephfs)
+
+
+std::ostream &operator<<(std::ostream &out, const ObjCeiling &in)
+{
+ out << "id: " << in.id << " size: " << in.size;
+ return out;
+}
+
+
+/**
+ * Set a named xattr to a given value, if and only if the xattr
+ * is not already set to a greater value.
+ *
+ * If the xattr is missing, then it is set to the input integer.
+ *
+ * @param xattr_name: name of xattr to compare against and set
+ * @param input_val: candidate new value, of encode()'able type
+ * @returns 0 on success (irrespective of whether our new value
+ * was used) else an error code
+ */
+template <typename A>
+static int set_if_greater(cls_method_context_t hctx,
+ const std::string &xattr_name, const A input_val)
+{
+ bufferlist existing_val_bl;
+
+ bool set_val = false;
+ int r = cls_cxx_getxattr(hctx, xattr_name.c_str(), &existing_val_bl);
+ if (r == -ENOENT || existing_val_bl.length() == 0) {
+ set_val = true;
+ } else if (r >= 0) {
+ auto existing_p = existing_val_bl.cbegin();
+ try {
+ A existing_val;
+ decode(existing_val, existing_p);
+ if (!existing_p.end()) {
+ // Trailing junk? Consider it invalid and overwrite
+ set_val = true;
+ } else {
+ // Valid existing value, do comparison
+ set_val = input_val > existing_val;
+ }
+ } catch (const buffer::error &err) {
+ // Corrupt or empty existing value, overwrite it
+ set_val = true;
+ }
+ } else {
+ return r;
+ }
+
+ // Conditionally set the new xattr
+ if (set_val) {
+ bufferlist set_bl;
+ encode(input_val, set_bl);
+ return cls_cxx_setxattr(hctx, xattr_name.c_str(), &set_bl);
+ } else {
+ return 0;
+ }
+}
+
+static int accumulate_inode_metadata(cls_method_context_t hctx,
+ bufferlist *in, bufferlist *out)
+{
+ ceph_assert(in != NULL);
+ ceph_assert(out != NULL);
+
+ int r = 0;
+
+ // Decode `in`
+ auto q = in->cbegin();
+ AccumulateArgs args;
+ try {
+ args.decode(q);
+ } catch (const buffer::error &err) {
+ return -EINVAL;
+ }
+
+ ObjCeiling ceiling(args.obj_index, args.obj_size);
+ r = set_if_greater(hctx, args.obj_xattr_name, ceiling);
+ if (r < 0) {
+ return r;
+ }
+
+ r = set_if_greater(hctx, args.mtime_xattr_name, args.mtime);
+ if (r < 0) {
+ return r;
+ }
+
+ r = set_if_greater(hctx, args.obj_size_xattr_name, args.obj_size);
+ if (r < 0) {
+ return r;
+ }
+
+ return 0;
+}
+
+// I want to select objects that have a name ending 00000000
+// and an xattr (scrub_tag) not equal to a specific value.
+// This is so special case that we can't really pretend it's
+// generic, so just fess up and call this the cephfs filter.
+class PGLSCephFSFilter : public PGLSFilter {
+protected:
+ std::string scrub_tag;
+public:
+ int init(bufferlist::const_iterator& params) override {
+ try {
+ InodeTagFilterArgs args;
+ args.decode(params);
+ scrub_tag = args.scrub_tag;
+ } catch (buffer::error &e) {
+ return -EINVAL;
+ }
+
+ if (scrub_tag.empty()) {
+ xattr = "";
+ } else {
+ xattr = "_scrub_tag";
+ }
+
+ return 0;
+ }
+
+ ~PGLSCephFSFilter() override {}
+ bool reject_empty_xattr() override { return false; }
+ bool filter(const hobject_t &obj, bufferlist& xattr_data,
+ bufferlist& outdata) override;
+};
+
+bool PGLSCephFSFilter::filter(const hobject_t &obj,
+ bufferlist& xattr_data, bufferlist& outdata)
+{
+ const std::string need_ending = ".00000000";
+ const std::string &obj_name = obj.oid.name;
+
+ if (obj_name.length() < need_ending.length()) {
+ return false;
+ }
+
+ const bool match = obj_name.compare (obj_name.length() - need_ending.length(), need_ending.length(), need_ending) == 0;
+ if (!match) {
+ return false;
+ }
+
+ if (!scrub_tag.empty() && xattr_data.length() > 0) {
+ std::string tag_ondisk;
+ auto q = xattr_data.cbegin();
+ try {
+ decode(tag_ondisk, q);
+ if (tag_ondisk == scrub_tag)
+ return false;
+ } catch (const buffer::error &err) {
+ }
+ }
+
+ return true;
+}
+
+PGLSFilter *inode_tag_filter()
+{
+ return new PGLSCephFSFilter();
+}
+
+/**
+ * initialize class
+ *
+ * We do two things here: we register the new class, and then register
+ * all of the class's methods.
+ */
+CLS_INIT(cephfs)
+{
+ // this log message, at level 0, will always appear in the ceph-osd
+ // log file.
+ CLS_LOG(0, "loading cephfs");
+
+ cls_handle_t h_class;
+ cls_method_handle_t h_accumulate_inode_metadata;
+
+ cls_register("cephfs", &h_class);
+ cls_register_cxx_method(h_class, "accumulate_inode_metadata",
+ CLS_METHOD_WR | CLS_METHOD_RD,
+ accumulate_inode_metadata, &h_accumulate_inode_metadata);
+
+ // A PGLS filter
+ cls_register_cxx_filter(h_class, "inode_tag", inode_tag_filter);
+}
+
diff --git a/src/cls/cephfs/cls_cephfs.h b/src/cls/cephfs/cls_cephfs.h
new file mode 100644
index 00000000..89f4dab4
--- /dev/null
+++ b/src/cls/cephfs/cls_cephfs.h
@@ -0,0 +1,147 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+/*
+ * Ceph - scalable distributed file system
+ *
+ * Copyright (C) 2015 Red Hat
+ *
+ * This is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License version 2.1, as published by the Free Software
+ * Foundation. See file COPYING.
+ *
+ */
+
+#include "include/encoding.h"
+
+/**
+ * Value class for the xattr we'll use to accumulate
+ * the highest object seen for a given inode
+ */
+class ObjCeiling {
+ public:
+ uint64_t id;
+ uint64_t size;
+
+ ObjCeiling()
+ : id(0), size(0)
+ {}
+
+ ObjCeiling(uint64_t id_, uint64_t size_)
+ : id(id_), size(size_)
+ {}
+
+ bool operator >(ObjCeiling const &rhs) const
+ {
+ return id > rhs.id;
+ }
+
+ void encode(bufferlist &bl) const
+ {
+ ENCODE_START(1, 1, bl);
+ encode(id, bl);
+ encode(size, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(bufferlist::const_iterator &p)
+ {
+ DECODE_START(1, p);
+ decode(id, p);
+ decode(size, p);
+ DECODE_FINISH(p);
+ }
+};
+WRITE_CLASS_ENCODER(ObjCeiling)
+
+class AccumulateArgs
+{
+public:
+ uint64_t obj_index;
+ uint64_t obj_size;
+ int64_t mtime;
+ std::string obj_xattr_name;
+ std::string mtime_xattr_name;
+ std::string obj_size_xattr_name;
+
+ AccumulateArgs(
+ uint64_t obj_index_,
+ uint64_t obj_size_,
+ time_t mtime_,
+ const std::string &obj_xattr_name_,
+ const std::string &mtime_xattr_name_,
+ const std::string &obj_size_xattr_name_)
+ : obj_index(obj_index_),
+ obj_size(obj_size_),
+ mtime(mtime_),
+ obj_xattr_name(obj_xattr_name_),
+ mtime_xattr_name(mtime_xattr_name_),
+ obj_size_xattr_name(obj_size_xattr_name_)
+ {}
+
+ AccumulateArgs()
+ : obj_index(0), obj_size(0), mtime(0)
+ {}
+
+ void encode(bufferlist &bl) const
+ {
+ ENCODE_START(1, 1, bl);
+ encode(obj_xattr_name, bl);
+ encode(mtime_xattr_name, bl);
+ encode(obj_size_xattr_name, bl);
+ encode(obj_index, bl);
+ encode(obj_size, bl);
+ encode(mtime, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(bufferlist::const_iterator &bl)
+ {
+ DECODE_START(1, bl);
+ decode(obj_xattr_name, bl);
+ decode(mtime_xattr_name, bl);
+ decode(obj_size_xattr_name, bl);
+ decode(obj_index, bl);
+ decode(obj_size, bl);
+ decode(mtime, bl);
+ DECODE_FINISH(bl);
+ }
+};
+
+class InodeTagFilterArgs
+{
+ public:
+ std::string scrub_tag;
+
+ void encode(bufferlist &bl) const
+ {
+ ENCODE_START(1, 1, bl);
+ encode(scrub_tag, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(bufferlist::const_iterator &bl)
+ {
+ DECODE_START(1, bl);
+ decode(scrub_tag, bl);
+ DECODE_FINISH(bl);
+ }
+};
+
+class AccumulateResult
+{
+public:
+ // Index of the highest-indexed object seen
+ uint64_t ceiling_obj_index;
+ // Size of the highest-index object seen
+ uint64_t ceiling_obj_size;
+ // Largest object seen
+ uint64_t max_obj_size;
+ // Highest mtime seen
+ int64_t max_mtime;
+
+ AccumulateResult()
+ : ceiling_obj_index(0), ceiling_obj_size(0), max_obj_size(0), max_mtime(0)
+ {}
+};
+
diff --git a/src/cls/cephfs/cls_cephfs_client.cc b/src/cls/cephfs/cls_cephfs_client.cc
new file mode 100644
index 00000000..988eab41
--- /dev/null
+++ b/src/cls/cephfs/cls_cephfs_client.cc
@@ -0,0 +1,177 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+/*
+ * Ceph - scalable distributed file system
+ *
+ * Copyright (C) 2015 Red Hat
+ *
+ * This is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License version 2.1, as published by the Free Software
+ * Foundation. See file COPYING.
+ *
+ */
+
+
+
+#include "include/rados/librados.hpp"
+#include "mds/CInode.h"
+
+#include "cls_cephfs_client.h"
+
+#define XATTR_CEILING "scan_ceiling"
+#define XATTR_MAX_MTIME "scan_max_mtime"
+#define XATTR_MAX_SIZE "scan_max_size"
+
+int ClsCephFSClient::accumulate_inode_metadata(
+ librados::IoCtx &ctx,
+ inodeno_t inode_no,
+ const uint64_t obj_index,
+ const uint64_t obj_size,
+ const time_t mtime)
+{
+ AccumulateArgs args(
+ obj_index,
+ obj_size,
+ mtime,
+ XATTR_CEILING,
+ XATTR_MAX_MTIME,
+ XATTR_MAX_SIZE);
+
+ // Generate 0th object name, where we will accumulate sizes/mtimes
+ object_t zeroth_object = InodeStore::get_object_name(inode_no, frag_t(), "");
+
+ // Construct a librados operation invoking our class method
+ librados::ObjectReadOperation op;
+ bufferlist inbl;
+ args.encode(inbl);
+ op.exec("cephfs", "accumulate_inode_metadata", inbl);
+
+ // Execute op
+ bufferlist outbl;
+ return ctx.operate(zeroth_object.name, &op, &outbl);
+}
+
+int ClsCephFSClient::delete_inode_accumulate_result(
+ librados::IoCtx &ctx,
+ const std::string &oid)
+{
+ librados::ObjectWriteOperation op;
+
+ // Remove xattrs from object
+ //
+ op.rmxattr(XATTR_CEILING);
+ op.rmxattr(XATTR_MAX_SIZE);
+ op.rmxattr(XATTR_MAX_MTIME);
+
+ return (ctx.operate(oid, &op));
+}
+
+int ClsCephFSClient::fetch_inode_accumulate_result(
+ librados::IoCtx &ctx,
+ const std::string &oid,
+ inode_backtrace_t *backtrace,
+ file_layout_t *layout,
+ AccumulateResult *result)
+{
+ ceph_assert(backtrace != NULL);
+ ceph_assert(result != NULL);
+
+ librados::ObjectReadOperation op;
+
+ int scan_ceiling_r = 0;
+ bufferlist scan_ceiling_bl;
+ op.getxattr(XATTR_CEILING, &scan_ceiling_bl, &scan_ceiling_r);
+
+ int scan_max_size_r = 0;
+ bufferlist scan_max_size_bl;
+ op.getxattr(XATTR_MAX_SIZE, &scan_max_size_bl, &scan_max_size_r);
+
+ int scan_max_mtime_r = 0;
+ bufferlist scan_max_mtime_bl;
+ op.getxattr(XATTR_MAX_MTIME, &scan_max_mtime_bl, &scan_max_mtime_r);
+
+ int parent_r = 0;
+ bufferlist parent_bl;
+ op.getxattr("parent", &parent_bl, &parent_r);
+ op.set_op_flags2(librados::OP_FAILOK);
+
+ int layout_r = 0;
+ bufferlist layout_bl;
+ op.getxattr("layout", &layout_bl, &layout_r);
+ op.set_op_flags2(librados::OP_FAILOK);
+
+ bufferlist op_bl;
+ int r = ctx.operate(oid, &op, &op_bl);
+ if (r < 0) {
+ return r;
+ }
+
+ // Load scan_ceiling
+ try {
+ auto scan_ceiling_bl_iter = scan_ceiling_bl.cbegin();
+ ObjCeiling ceiling;
+ ceiling.decode(scan_ceiling_bl_iter);
+ result->ceiling_obj_index = ceiling.id;
+ result->ceiling_obj_size = ceiling.size;
+ } catch (const buffer::error &err) {
+ //dout(4) << "Invalid size attr on '" << oid << "'" << dendl;
+ return -EINVAL;
+ }
+
+ // Load scan_max_size
+ try {
+ auto scan_max_size_bl_iter = scan_max_size_bl.cbegin();
+ decode(result->max_obj_size, scan_max_size_bl_iter);
+ } catch (const buffer::error &err) {
+ //dout(4) << "Invalid size attr on '" << oid << "'" << dendl;
+ return -EINVAL;
+ }
+
+ // Load scan_max_mtime
+ try {
+ auto scan_max_mtime_bl_iter = scan_max_mtime_bl.cbegin();
+ decode(result->max_mtime, scan_max_mtime_bl_iter);
+ } catch (const buffer::error &err) {
+ //dout(4) << "Invalid size attr on '" << oid << "'" << dendl;
+ return -EINVAL;
+ }
+
+ // Deserialize backtrace
+ if (parent_bl.length()) {
+ try {
+ auto q = parent_bl.cbegin();
+ backtrace->decode(q);
+ } catch (buffer::error &e) {
+ //dout(4) << "Corrupt backtrace on '" << oid << "': " << e << dendl;
+ return -EINVAL;
+ }
+ }
+
+ // Deserialize layout
+ if (layout_bl.length()) {
+ try {
+ auto q = layout_bl.cbegin();
+ decode(*layout, q);
+ } catch (buffer::error &e) {
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+void ClsCephFSClient::build_tag_filter(
+ const std::string &scrub_tag,
+ bufferlist *out_bl)
+{
+ ceph_assert(out_bl != NULL);
+
+ // Leading part of bl is un-versioned string naming the filter
+ encode(std::string("cephfs.inode_tag"), *out_bl);
+
+ // Filter-specific part of the bl: in our case this is a versioned structure
+ InodeTagFilterArgs args;
+ args.scrub_tag = scrub_tag;
+ args.encode(*out_bl);
+}
diff --git a/src/cls/cephfs/cls_cephfs_client.h b/src/cls/cephfs/cls_cephfs_client.h
new file mode 100644
index 00000000..744c0aed
--- /dev/null
+++ b/src/cls/cephfs/cls_cephfs_client.h
@@ -0,0 +1,33 @@
+
+#include "include/rados/librados_fwd.hpp"
+#include "mds/mdstypes.h"
+#include "cls_cephfs.h"
+
+class AccumulateArgs;
+
+class ClsCephFSClient
+{
+ public:
+ static int accumulate_inode_metadata(
+ librados::IoCtx &ctx,
+ inodeno_t inode_no,
+ const uint64_t obj_index,
+ const uint64_t obj_size,
+ const time_t mtime);
+
+ static int fetch_inode_accumulate_result(
+ librados::IoCtx &ctx,
+ const std::string &oid,
+ inode_backtrace_t *backtrace,
+ file_layout_t *layout,
+ AccumulateResult *result);
+
+ static int delete_inode_accumulate_result(
+ librados::IoCtx &ctx,
+ const std::string &oid);
+
+ static void build_tag_filter(
+ const std::string &scrub_tag,
+ bufferlist *out_bl);
+};
+
diff --git a/src/cls/hello/cls_hello.cc b/src/cls/hello/cls_hello.cc
new file mode 100644
index 00000000..accca898
--- /dev/null
+++ b/src/cls/hello/cls_hello.cc
@@ -0,0 +1,344 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+/*
+ * This is a simple example RADOS class, designed to be usable as a
+ * template for implementing new methods.
+ *
+ * Our goal here is to illustrate the interface between the OSD and
+ * the class and demonstrate what kinds of things a class can do.
+ *
+ * Note that any *real* class will probably have a much more
+ * sophisticated protocol dealing with the in and out data buffers.
+ * For an example of the model that we've settled on for handling that
+ * in a clean way, please refer to cls_lock or cls_version for
+ * relatively simple examples of how the parameter encoding can be
+ * encoded in a way that allows for forward and backward compatibility
+ * between client vs class revisions.
+ */
+
+/*
+ * A quick note about bufferlists:
+ *
+ * The bufferlist class allows memory buffers to be concatenated,
+ * truncated, spliced, "copied," encoded/embedded, and decoded. For
+ * most operations no actual data is ever copied, making bufferlists
+ * very convenient for efficiently passing data around.
+ *
+ * bufferlist is actually a typedef of buffer::list, and is defined in
+ * include/buffer.h (and implemented in common/buffer.cc).
+ */
+
+#include <algorithm>
+#include <string>
+#include <sstream>
+#include <errno.h>
+
+#include "objclass/objclass.h"
+
+CLS_VER(1,0)
+CLS_NAME(hello)
+
+/**
+ * say hello - a "read" method that does not depend on the object
+ *
+ * This is an example of a method that does some computation and
+ * returns data to the caller, without depending on the local object
+ * content.
+ */
+static int say_hello(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ // see if the input data from the client matches what this method
+ // expects to receive. your class can fill this buffer with what it
+ // wants.
+ if (in->length() > 100)
+ return -EINVAL;
+
+ // we generate our reply
+ out->append("Hello, ");
+ if (in->length() == 0)
+ out->append("world");
+ else
+ out->append(*in);
+ out->append("!");
+
+ // this return value will be returned back to the librados caller
+ return 0;
+}
+
+/**
+ * record hello - a "write" method that creates an object
+ *
+ * This method modifies a local object (in this case, by creating it
+ * if it doesn't exist). We make multiple write calls (write,
+ * setxattr) which are accumulated and applied as an atomic
+ * transaction.
+ */
+static int record_hello(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ // we can write arbitrary stuff to the ceph-osd debug log. each log
+ // message is accompanied by an integer log level. smaller is
+ // "louder". how much of this makes it into the log is controlled
+ // by the debug_cls option on the ceph-osd, similar to how other log
+ // levels are controlled. this message, at level 20, will generally
+ // not be seen by anyone unless debug_cls is set at 20 or higher.
+ CLS_LOG(20, "in record_hello");
+
+ // see if the input data from the client matches what this method
+ // expects to receive. your class can fill this buffer with what it
+ // wants.
+ if (in->length() > 100)
+ return -EINVAL;
+
+ // only say hello to non-existent objects
+ if (cls_cxx_stat(hctx, NULL, NULL) == 0)
+ return -EEXIST;
+
+ bufferlist content;
+ content.append("Hello, ");
+ if (in->length() == 0)
+ content.append("world");
+ else
+ content.append(*in);
+ content.append("!");
+
+ // create/write the object
+ int r = cls_cxx_write_full(hctx, &content);
+ if (r < 0)
+ return r;
+
+ // also make note of who said it
+ entity_inst_t origin;
+ cls_get_request_origin(hctx, &origin);
+ ostringstream ss;
+ ss << origin;
+ bufferlist attrbl;
+ attrbl.append(ss.str());
+ r = cls_cxx_setxattr(hctx, "said_by", &attrbl);
+ if (r < 0)
+ return r;
+
+ // For write operations, there are two possible outcomes:
+ //
+ // * For a failure, we return a negative error code. The out
+ // buffer can contain any data that we want, and that data will
+ // be returned to the caller. No change is made to the object.
+ //
+ // * For a success, we must return 0 and *no* data in the out
+ // buffer. This is becaues the OSD does not log write result
+ // codes or output buffers and we need a replayed/resent
+ // operation (e.g., after a TCP disconnect) to be idempotent.
+ //
+ // If a class returns a positive value or puts data in the out
+ // buffer, the OSD code will ignore it and return 0 to the
+ // client.
+ return 0;
+}
+
+static int writes_dont_return_data(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ // make some change to the object
+ bufferlist attrbl;
+ attrbl.append("bar");
+ int r = cls_cxx_setxattr(hctx, "foo", &attrbl);
+ if (r < 0)
+ return r;
+
+ if (in->length() > 0) {
+ // note that if we return anything < 0 (an error), this
+ // operation/transaction will abort, and the setattr above will
+ // never happen. however, we *can* return data on error.
+ out->append("too much input data!");
+ return -EINVAL;
+ }
+
+ // try to return some data. note that this *won't* reach the
+ // client! see the matching test case in test_cls_hello.cc.
+ out->append("you will never see this");
+
+ // if we try to return anything > 0 here the client will see 0.
+ return 42;
+}
+
+
+/**
+ * replay - a "read" method to get a previously recorded hello
+ *
+ * This is a read method that will retrieve a previously recorded
+ * hello statement.
+ */
+static int replay(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ // read contents out of the on-disk object. our behavior can be a
+ // function of either the request alone, or the request and the
+ // on-disk state, depending on whether the RD flag is specified when
+ // registering the method (see the __cls__init function below).
+ int r = cls_cxx_read(hctx, 0, 1100, out);
+ if (r < 0)
+ return r;
+
+ // note that our return value need not be the length of the returned
+ // data; it can be whatever value we want: positive, zero or
+ // negative (this is a read).
+ return 0;
+}
+
+/**
+ * turn_it_to_11 - a "write" method that mutates existing object data
+ *
+ * A write method can depend on previous object content (i.e., perform
+ * a read/modify/write operation). This atomically transitions the
+ * object state from the old content to the new content.
+ */
+static int turn_it_to_11(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ // see if the input data from the client matches what this method
+ // expects to receive. your class can fill this buffer with what it
+ // wants.
+ if (in->length() != 0)
+ return -EINVAL;
+
+ bufferlist previous;
+ int r = cls_cxx_read(hctx, 0, 1100, &previous);
+ if (r < 0)
+ return r;
+
+ std::string str(previous.c_str(), previous.length());
+ std::transform(str.begin(), str.end(), str.begin(), ::toupper);
+ previous.clear();
+ previous.append(str);
+
+ // replace previous byte data content (write_full == truncate(0) + write)
+ r = cls_cxx_write_full(hctx, &previous);
+ if (r < 0)
+ return r;
+
+ // record who did it
+ entity_inst_t origin;
+ cls_get_request_origin(hctx, &origin);
+ ostringstream ss;
+ ss << origin;
+ bufferlist attrbl;
+ attrbl.append(ss.str());
+ r = cls_cxx_setxattr(hctx, "amplified_by", &attrbl);
+ if (r < 0)
+ return r;
+
+ // return value is 0 for success; out buffer is empty.
+ return 0;
+}
+
+/**
+ * example method that does not behave
+ *
+ * This method is registered as WR but tries to read
+ */
+static int bad_reader(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ return cls_cxx_read(hctx, 0, 100, out);
+}
+
+/**
+ * example method that does not behave
+ *
+ * This method is registered as RD but tries to write
+ */
+static int bad_writer(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ return cls_cxx_write_full(hctx, in);
+}
+
+
+class PGLSHelloFilter : public PGLSFilter {
+ string val;
+public:
+ int init(bufferlist::const_iterator& params) override {
+ try {
+ decode(xattr, params);
+ decode(val, params);
+ } catch (buffer::error &e) {
+ return -EINVAL;
+ }
+ return 0;
+ }
+
+ ~PGLSHelloFilter() override {}
+ bool filter(const hobject_t &obj, bufferlist& xattr_data,
+ bufferlist& outdata) override
+ {
+ if (val.size() != xattr_data.length())
+ return false;
+
+ if (memcmp(val.c_str(), xattr_data.c_str(), val.size()))
+ return false;
+
+ return true;
+ }
+};
+
+
+PGLSFilter *hello_filter()
+{
+ return new PGLSHelloFilter();
+}
+
+
+/**
+ * initialize class
+ *
+ * We do two things here: we register the new class, and then register
+ * all of the class's methods.
+ */
+CLS_INIT(hello)
+{
+ // this log message, at level 0, will always appear in the ceph-osd
+ // log file.
+ CLS_LOG(0, "loading cls_hello");
+
+ cls_handle_t h_class;
+ cls_method_handle_t h_say_hello;
+ cls_method_handle_t h_record_hello;
+ cls_method_handle_t h_replay;
+ cls_method_handle_t h_writes_dont_return_data;
+ cls_method_handle_t h_turn_it_to_11;
+ cls_method_handle_t h_bad_reader;
+ cls_method_handle_t h_bad_writer;
+
+ cls_register("hello", &h_class);
+
+ // There are two flags we specify for methods:
+ //
+ // RD : whether this method (may) read prior object state
+ // WR : whether this method (may) write or update the object
+ //
+ // A method can be RD, WR, neither, or both. If a method does
+ // neither, the data it returns to the caller is a function of the
+ // request and not the object contents.
+
+ cls_register_cxx_method(h_class, "say_hello",
+ CLS_METHOD_RD,
+ say_hello, &h_say_hello);
+ cls_register_cxx_method(h_class, "record_hello",
+ CLS_METHOD_WR | CLS_METHOD_PROMOTE,
+ record_hello, &h_record_hello);
+ cls_register_cxx_method(h_class, "writes_dont_return_data",
+ CLS_METHOD_WR,
+ writes_dont_return_data, &h_writes_dont_return_data);
+ cls_register_cxx_method(h_class, "replay",
+ CLS_METHOD_RD,
+ replay, &h_replay);
+
+ // RD | WR is a read-modify-write method.
+ cls_register_cxx_method(h_class, "turn_it_to_11",
+ CLS_METHOD_RD | CLS_METHOD_WR | CLS_METHOD_PROMOTE,
+ turn_it_to_11, &h_turn_it_to_11);
+
+ // counter-examples
+ cls_register_cxx_method(h_class, "bad_reader", CLS_METHOD_WR,
+ bad_reader, &h_bad_reader);
+ cls_register_cxx_method(h_class, "bad_writer", CLS_METHOD_RD,
+ bad_writer, &h_bad_writer);
+
+ // A PGLS filter
+ cls_register_cxx_filter(h_class, "hello", hello_filter);
+}
diff --git a/src/cls/journal/cls_journal.cc b/src/cls/journal/cls_journal.cc
new file mode 100644
index 00000000..ff909bea
--- /dev/null
+++ b/src/cls/journal/cls_journal.cc
@@ -0,0 +1,1230 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#include "include/int_types.h"
+#include "include/buffer.h"
+#include "include/encoding.h"
+#include "common/errno.h"
+#include "objclass/objclass.h"
+#include "cls/journal/cls_journal_types.h"
+#include <errno.h>
+#include <map>
+#include <string>
+#include <sstream>
+
+CLS_VER(1, 0)
+CLS_NAME(journal)
+
+namespace {
+
+static const uint64_t MAX_KEYS_READ = 64;
+
+static const std::string HEADER_KEY_ORDER = "order";
+static const std::string HEADER_KEY_SPLAY_WIDTH = "splay_width";
+static const std::string HEADER_KEY_POOL_ID = "pool_id";
+static const std::string HEADER_KEY_MINIMUM_SET = "minimum_set";
+static const std::string HEADER_KEY_ACTIVE_SET = "active_set";
+static const std::string HEADER_KEY_NEXT_TAG_TID = "next_tag_tid";
+static const std::string HEADER_KEY_NEXT_TAG_CLASS = "next_tag_class";
+static const std::string HEADER_KEY_CLIENT_PREFIX = "client_";
+static const std::string HEADER_KEY_TAG_PREFIX = "tag_";
+
+std::string to_hex(uint64_t value) {
+ std::ostringstream oss;
+ oss << std::setw(16) << std::setfill('0') << std::hex << value;
+ return oss.str();
+}
+
+std::string key_from_client_id(const std::string &client_id) {
+ return HEADER_KEY_CLIENT_PREFIX + client_id;
+}
+
+std::string key_from_tag_tid(uint64_t tag_tid) {
+ return HEADER_KEY_TAG_PREFIX + to_hex(tag_tid);
+}
+
+uint64_t tag_tid_from_key(const std::string &key) {
+ std::istringstream iss(key);
+ uint64_t id;
+ iss.ignore(HEADER_KEY_TAG_PREFIX.size()) >> std::hex >> id;
+ return id;
+}
+
+template <typename T>
+int read_key(cls_method_context_t hctx, const string &key, T *t,
+ bool ignore_enoent = false) {
+ bufferlist bl;
+ int r = cls_cxx_map_get_val(hctx, key, &bl);
+ if (r == -ENOENT) {
+ if (ignore_enoent) {
+ r = 0;
+ }
+ return r;
+ } else if (r < 0) {
+ CLS_ERR("failed to get omap key: %s", key.c_str());
+ return r;
+ }
+
+ try {
+ auto iter = bl.cbegin();
+ decode(*t, iter);
+ } catch (const buffer::error &err) {
+ CLS_ERR("failed to decode input parameters: %s", err.what());
+ return -EINVAL;
+ }
+ return 0;
+}
+
+template <typename T>
+int write_key(cls_method_context_t hctx, const string &key, const T &t) {
+ bufferlist bl;
+ encode(t, bl);
+
+ int r = cls_cxx_map_set_val(hctx, key, &bl);
+ if (r < 0) {
+ CLS_ERR("failed to set omap key: %s", key.c_str());
+ return r;
+ }
+ return 0;
+}
+
+int remove_key(cls_method_context_t hctx, const string &key) {
+ int r = cls_cxx_map_remove_key(hctx, key);
+ if (r < 0 && r != -ENOENT) {
+ CLS_ERR("failed to remove key: %s", key.c_str());
+ return r;
+ }
+ return 0;
+}
+
+int expire_tags(cls_method_context_t hctx, const std::string *skip_client_id) {
+
+ std::string skip_client_key;
+ if (skip_client_id != nullptr) {
+ skip_client_key = key_from_client_id(*skip_client_id);
+ }
+
+ uint64_t minimum_tag_tid = std::numeric_limits<uint64_t>::max();
+ std::string last_read = "";
+ bool more;
+ do {
+ std::map<std::string, bufferlist> vals;
+ int r = cls_cxx_map_get_vals(hctx, last_read, HEADER_KEY_CLIENT_PREFIX,
+ MAX_KEYS_READ, &vals, &more);
+ if (r < 0 && r != -ENOENT) {
+ CLS_ERR("failed to retrieve registered clients: %s",
+ cpp_strerror(r).c_str());
+ return r;
+ }
+
+ for (auto &val : vals) {
+ // if we are removing a client, skip its commit positions
+ if (val.first == skip_client_key) {
+ continue;
+ }
+
+ cls::journal::Client client;
+ auto iter = val.second.cbegin();
+ try {
+ decode(client, iter);
+ } catch (const buffer::error &err) {
+ CLS_ERR("error decoding registered client: %s",
+ val.first.c_str());
+ return -EIO;
+ }
+
+ if (client.state == cls::journal::CLIENT_STATE_DISCONNECTED) {
+ // don't allow a disconnected client to prevent pruning
+ continue;
+ } else if (client.commit_position.object_positions.empty()) {
+ // cannot prune if one or more clients has an empty commit history
+ return 0;
+ }
+
+ for (auto object_position : client.commit_position.object_positions) {
+ minimum_tag_tid = std::min(minimum_tag_tid, object_position.tag_tid);
+ }
+ }
+ if (!vals.empty()) {
+ last_read = vals.rbegin()->first;
+ }
+ } while (more);
+
+ // cannot expire tags if a client hasn't committed yet
+ if (minimum_tag_tid == std::numeric_limits<uint64_t>::max()) {
+ return 0;
+ }
+
+ // compute the minimum in-use tag for each class
+ std::map<uint64_t, uint64_t> minimum_tag_class_to_tids;
+ typedef enum { TAG_PASS_CALCULATE_MINIMUMS,
+ TAG_PASS_SCRUB,
+ TAG_PASS_DONE } TagPass;
+ int tag_pass = TAG_PASS_CALCULATE_MINIMUMS;
+ last_read = HEADER_KEY_TAG_PREFIX;
+ do {
+ std::map<std::string, bufferlist> vals;
+ int r = cls_cxx_map_get_vals(hctx, last_read, HEADER_KEY_TAG_PREFIX,
+ MAX_KEYS_READ, &vals, &more);
+ if (r < 0 && r != -ENOENT) {
+ CLS_ERR("failed to retrieve tags: %s", cpp_strerror(r).c_str());
+ return r;
+ }
+
+ for (auto &val : vals) {
+ cls::journal::Tag tag;
+ auto iter = val.second.cbegin();
+ try {
+ decode(tag, iter);
+ } catch (const buffer::error &err) {
+ CLS_ERR("error decoding tag: %s", val.first.c_str());
+ return -EIO;
+ }
+
+ if (tag.tid != tag_tid_from_key(val.first)) {
+ CLS_ERR("tag tid mismatched: %s", val.first.c_str());
+ return -EINVAL;
+ }
+
+ if (tag_pass == TAG_PASS_CALCULATE_MINIMUMS) {
+ minimum_tag_class_to_tids[tag.tag_class] = tag.tid;
+ } else if (tag_pass == TAG_PASS_SCRUB &&
+ tag.tid < minimum_tag_class_to_tids[tag.tag_class]) {
+ r = remove_key(hctx, val.first);
+ if (r < 0) {
+ return r;
+ }
+ }
+
+ if (tag.tid >= minimum_tag_tid) {
+ // no need to check for tag classes beyond this point
+ vals.clear();
+ more = false;
+ break;
+ }
+ }
+
+ if (tag_pass != TAG_PASS_DONE && !more) {
+ last_read = HEADER_KEY_TAG_PREFIX;
+ ++tag_pass;
+ } else if (!vals.empty()) {
+ last_read = vals.rbegin()->first;
+ }
+ } while (tag_pass != TAG_PASS_DONE);
+ return 0;
+}
+
+int get_client_list_range(cls_method_context_t hctx,
+ std::set<cls::journal::Client> *clients,
+ std::string start_after, uint64_t max_return) {
+ std::string last_read;
+ if (!start_after.empty()) {
+ last_read = key_from_client_id(start_after);
+ }
+
+ std::map<std::string, bufferlist> vals;
+ bool more;
+ int r = cls_cxx_map_get_vals(hctx, last_read, HEADER_KEY_CLIENT_PREFIX,
+ max_return, &vals, &more);
+ if (r < 0) {
+ CLS_ERR("failed to retrieve omap values: %s", cpp_strerror(r).c_str());
+ return r;
+ }
+
+ for (std::map<std::string, bufferlist>::iterator it = vals.begin();
+ it != vals.end(); ++it) {
+ try {
+ auto iter = it->second.cbegin();
+
+ cls::journal::Client client;
+ decode(client, iter);
+ clients->insert(client);
+ } catch (const buffer::error &err) {
+ CLS_ERR("could not decode client '%s': %s", it->first.c_str(),
+ err.what());
+ return -EIO;
+ }
+ }
+
+ return 0;
+}
+
+int find_min_commit_position(cls_method_context_t hctx,
+ cls::journal::ObjectSetPosition *minset) {
+ int r;
+ bool valid = false;
+ std::string start_after = "";
+ uint64_t tag_tid = 0, entry_tid = 0;
+
+ while (true) {
+ std::set<cls::journal::Client> batch;
+
+ r = get_client_list_range(hctx, &batch, start_after, cls::journal::JOURNAL_MAX_RETURN);
+ if ((r < 0) || batch.empty()) {
+ break;
+ }
+
+ start_after = batch.rbegin()->id;
+
+ // update the (minimum) commit position from this batch of clients
+ for(std::set<cls::journal::Client>::iterator it = batch.begin();
+ it != batch.end(); ++it) {
+ cls::journal::ObjectSetPosition object_set_position = (*it).commit_position;
+ if (object_set_position.object_positions.empty()) {
+ *minset = cls::journal::ObjectSetPosition();
+ break;
+ }
+ cls::journal::ObjectPosition first = object_set_position.object_positions.front();
+
+ // least tag_tid (or least entry_tid for matching tag_tid)
+ if (!valid || (tag_tid > first.tag_tid) || ((tag_tid == first.tag_tid) && (entry_tid > first.entry_tid))) {
+ tag_tid = first.tag_tid;
+ entry_tid = first.entry_tid;
+ *minset = cls::journal::ObjectSetPosition(object_set_position);
+ valid = true;
+ }
+ }
+
+ // got the last batch, we're done
+ if (batch.size() < cls::journal::JOURNAL_MAX_RETURN) {
+ break;
+ }
+ }
+
+ return r;
+}
+
+} // anonymous namespace
+
+/**
+ * Input:
+ * @param order (uint8_t) - bits to shift to compute the object max size
+ * @param splay width (uint8_t) - number of active journal objects
+ *
+ * Output:
+ * @returns 0 on success, negative error code on failure
+ */
+int journal_create(cls_method_context_t hctx, bufferlist *in, bufferlist *out) {
+ uint8_t order;
+ uint8_t splay_width;
+ int64_t pool_id;
+ try {
+ auto iter = in->cbegin();
+ decode(order, iter);
+ decode(splay_width, iter);
+ decode(pool_id, iter);
+ } catch (const buffer::error &err) {
+ CLS_ERR("failed to decode input parameters: %s", err.what());
+ return -EINVAL;
+ }
+
+ bufferlist stored_orderbl;
+ int r = cls_cxx_map_get_val(hctx, HEADER_KEY_ORDER, &stored_orderbl);
+ if (r >= 0) {
+ CLS_ERR("journal already exists");
+ return -EEXIST;
+ } else if (r != -ENOENT) {
+ return r;
+ }
+
+ r = write_key(hctx, HEADER_KEY_ORDER, order);
+ if (r < 0) {
+ return r;
+ }
+
+ r = write_key(hctx, HEADER_KEY_SPLAY_WIDTH, splay_width);
+ if (r < 0) {
+ return r;
+ }
+
+ r = write_key(hctx, HEADER_KEY_POOL_ID, pool_id);
+ if (r < 0) {
+ return r;
+ }
+
+ uint64_t object_set = 0;
+ r = write_key(hctx, HEADER_KEY_ACTIVE_SET, object_set);
+ if (r < 0) {
+ return r;
+ }
+
+ r = write_key(hctx, HEADER_KEY_MINIMUM_SET, object_set);
+ if (r < 0) {
+ return r;
+ }
+
+ uint64_t tag_id = 0;
+ r = write_key(hctx, HEADER_KEY_NEXT_TAG_TID, tag_id);
+ if (r < 0) {
+ return r;
+ }
+
+ r = write_key(hctx, HEADER_KEY_NEXT_TAG_CLASS, tag_id);
+ if (r < 0) {
+ return r;
+ }
+ return 0;
+}
+
+/**
+ * Input:
+ * none
+ *
+ * Output:
+ * order (uint8_t)
+ * @returns 0 on success, negative error code on failure
+ */
+int journal_get_order(cls_method_context_t hctx, bufferlist *in,
+ bufferlist *out) {
+ uint8_t order;
+ int r = read_key(hctx, HEADER_KEY_ORDER, &order);
+ if (r < 0) {
+ return r;
+ }
+
+ encode(order, *out);
+ return 0;
+}
+
+/**
+ * Input:
+ * none
+ *
+ * Output:
+ * splay_width (uint8_t)
+ * @returns 0 on success, negative error code on failure
+ */
+int journal_get_splay_width(cls_method_context_t hctx, bufferlist *in,
+ bufferlist *out) {
+ uint8_t splay_width;
+ int r = read_key(hctx, HEADER_KEY_SPLAY_WIDTH, &splay_width);
+ if (r < 0) {
+ return r;
+ }
+
+ encode(splay_width, *out);
+ return 0;
+}
+
+/**
+ * Input:
+ * none
+ *
+ * Output:
+ * pool_id (int64_t)
+ * @returns 0 on success, negative error code on failure
+ */
+int journal_get_pool_id(cls_method_context_t hctx, bufferlist *in,
+ bufferlist *out) {
+ int64_t pool_id = 0;
+ int r = read_key(hctx, HEADER_KEY_POOL_ID, &pool_id);
+ if (r < 0) {
+ return r;
+ }
+
+ encode(pool_id, *out);
+ return 0;
+}
+
+/**
+ * Input:
+ * none
+ *
+ * Output:
+ * object set (uint64_t)
+ * @returns 0 on success, negative error code on failure
+ */
+int journal_get_minimum_set(cls_method_context_t hctx, bufferlist *in,
+ bufferlist *out) {
+ uint64_t minimum_set;
+ int r = read_key(hctx, HEADER_KEY_MINIMUM_SET, &minimum_set);
+ if (r < 0) {
+ return r;
+ }
+
+ encode(minimum_set, *out);
+ return 0;
+}
+
+/**
+ * Input:
+ * @param object set (uint64_t)
+ *
+ * Output:
+ * @returns 0 on success, negative error code on failure
+ */
+int journal_set_minimum_set(cls_method_context_t hctx, bufferlist *in,
+ bufferlist *out) {
+ uint64_t object_set;
+ try {
+ auto iter = in->cbegin();
+ decode(object_set, iter);
+ } catch (const buffer::error &err) {
+ CLS_ERR("failed to decode input parameters: %s", err.what());
+ return -EINVAL;
+ }
+
+ uint64_t current_active_set;
+ int r = read_key(hctx, HEADER_KEY_ACTIVE_SET, &current_active_set);
+ if (r < 0) {
+ return r;
+ }
+
+ if (current_active_set < object_set) {
+ CLS_LOG(10, "active object set earlier than minimum: %" PRIu64
+ " < %" PRIu64, current_active_set, object_set);
+ return -EINVAL;
+ }
+
+ uint64_t current_minimum_set;
+ r = read_key(hctx, HEADER_KEY_MINIMUM_SET, &current_minimum_set);
+ if (r < 0) {
+ return r;
+ }
+
+ if (object_set == current_minimum_set) {
+ return 0;
+ } else if (object_set < current_minimum_set) {
+ CLS_ERR("object number earlier than current object: %" PRIu64 " < %" PRIu64,
+ object_set, current_minimum_set);
+ return -ESTALE;
+ }
+
+ r = write_key(hctx, HEADER_KEY_MINIMUM_SET, object_set);
+ if (r < 0) {
+ return r;
+ }
+ return 0;
+}
+
+/**
+ * Input:
+ * none
+ *
+ * Output:
+ * object set (uint64_t)
+ * @returns 0 on success, negative error code on failure
+ */
+int journal_get_active_set(cls_method_context_t hctx, bufferlist *in,
+ bufferlist *out) {
+ uint64_t active_set;
+ int r = read_key(hctx, HEADER_KEY_ACTIVE_SET, &active_set);
+ if (r < 0) {
+ return r;
+ }
+
+ encode(active_set, *out);
+ return 0;
+}
+
+/**
+ * Input:
+ * @param object set (uint64_t)
+ *
+ * Output:
+ * @returns 0 on success, negative error code on failure
+ */
+int journal_set_active_set(cls_method_context_t hctx, bufferlist *in,
+ bufferlist *out) {
+ uint64_t object_set;
+ try {
+ auto iter = in->cbegin();
+ decode(object_set, iter);
+ } catch (const buffer::error &err) {
+ CLS_ERR("failed to decode input parameters: %s", err.what());
+ return -EINVAL;
+ }
+
+ uint64_t current_minimum_set;
+ int r = read_key(hctx, HEADER_KEY_MINIMUM_SET, &current_minimum_set);
+ if (r < 0) {
+ return r;
+ }
+
+ if (current_minimum_set > object_set) {
+ CLS_ERR("minimum object set later than active: %" PRIu64
+ " > %" PRIu64, current_minimum_set, object_set);
+ return -EINVAL;
+ }
+
+ uint64_t current_active_set;
+ r = read_key(hctx, HEADER_KEY_ACTIVE_SET, &current_active_set);
+ if (r < 0) {
+ return r;
+ }
+
+ if (object_set == current_active_set) {
+ return 0;
+ } else if (object_set < current_active_set) {
+ CLS_ERR("object number earlier than current object: %" PRIu64 " < %" PRIu64,
+ object_set, current_active_set);
+ return -ESTALE;
+ }
+
+ r = write_key(hctx, HEADER_KEY_ACTIVE_SET, object_set);
+ if (r < 0) {
+ return r;
+ }
+ return 0;
+}
+
+/**
+ * Input:
+ * @param id (string) - unique client id
+ *
+ * Output:
+ * cls::journal::Client
+ * @returns 0 on success, negative error code on failure
+ */
+int journal_get_client(cls_method_context_t hctx, bufferlist *in,
+ bufferlist *out) {
+ std::string id;
+ try {
+ auto iter = in->cbegin();
+ decode(id, iter);
+ } catch (const buffer::error &err) {
+ CLS_ERR("failed to decode input parameters: %s", err.what());
+ return -EINVAL;
+ }
+
+ std::string key(key_from_client_id(id));
+ cls::journal::Client client;
+ int r = read_key(hctx, key, &client);
+ if (r < 0) {
+ return r;
+ }
+
+ encode(client, *out);
+ return 0;
+}
+
+/**
+ * Input:
+ * @param id (string) - unique client id
+ * @param data (bufferlist) - opaque data associated to client
+ *
+ * Output:
+ * @returns 0 on success, negative error code on failure
+ */
+int journal_client_register(cls_method_context_t hctx, bufferlist *in,
+ bufferlist *out) {
+ std::string id;
+ bufferlist data;
+ try {
+ auto iter = in->cbegin();
+ decode(id, iter);
+ decode(data, iter);
+ } catch (const buffer::error &err) {
+ CLS_ERR("failed to decode input parameters: %s", err.what());
+ return -EINVAL;
+ }
+
+ uint8_t order;
+ int r = read_key(hctx, HEADER_KEY_ORDER, &order);
+ if (r < 0) {
+ return r;
+ }
+
+ std::string key(key_from_client_id(id));
+ bufferlist stored_clientbl;
+ r = cls_cxx_map_get_val(hctx, key, &stored_clientbl);
+ if (r >= 0) {
+ CLS_ERR("duplicate client id: %s", id.c_str());
+ return -EEXIST;
+ } else if (r != -ENOENT) {
+ return r;
+ }
+
+ cls::journal::ObjectSetPosition minset;
+ r = find_min_commit_position(hctx, &minset);
+ if (r < 0)
+ return r;
+
+ cls::journal::Client client(id, data, minset);
+ r = write_key(hctx, key, client);
+ if (r < 0) {
+ return r;
+ }
+ return 0;
+}
+
+/**
+ * Input:
+ * @param id (string) - unique client id
+ * @param data (bufferlist) - opaque data associated to client
+ *
+ * Output:
+ * @returns 0 on success, negative error code on failure
+ */
+int journal_client_update_data(cls_method_context_t hctx, bufferlist *in,
+ bufferlist *out) {
+ std::string id;
+ bufferlist data;
+ try {
+ auto iter = in->cbegin();
+ decode(id, iter);
+ decode(data, iter);
+ } catch (const buffer::error &err) {
+ CLS_ERR("failed to decode input parameters: %s", err.what());
+ return -EINVAL;
+ }
+
+ std::string key(key_from_client_id(id));
+ cls::journal::Client client;
+ int r = read_key(hctx, key, &client);
+ if (r < 0) {
+ return r;
+ }
+
+ client.data = data;
+ r = write_key(hctx, key, client);
+ if (r < 0) {
+ return r;
+ }
+ return 0;
+}
+
+/**
+ * Input:
+ * @param id (string) - unique client id
+ * @param state (uint8_t) - client state
+ *
+ * Output:
+ * @returns 0 on success, negative error code on failure
+ */
+int journal_client_update_state(cls_method_context_t hctx, bufferlist *in,
+ bufferlist *out) {
+ std::string id;
+ cls::journal::ClientState state;
+ bufferlist data;
+ try {
+ auto iter = in->cbegin();
+ decode(id, iter);
+ uint8_t state_raw;
+ decode(state_raw, iter);
+ state = static_cast<cls::journal::ClientState>(state_raw);
+ } catch (const buffer::error &err) {
+ CLS_ERR("failed to decode input parameters: %s", err.what());
+ return -EINVAL;
+ }
+
+ std::string key(key_from_client_id(id));
+ cls::journal::Client client;
+ int r = read_key(hctx, key, &client);
+ if (r < 0) {
+ return r;
+ }
+
+ client.state = state;
+ r = write_key(hctx, key, client);
+ if (r < 0) {
+ return r;
+ }
+ return 0;
+}
+
+/**
+ * Input:
+ * @param id (string) - unique client id
+ *
+ * Output:
+ * @returns 0 on success, negative error code on failure
+ */
+int journal_client_unregister(cls_method_context_t hctx, bufferlist *in,
+ bufferlist *out) {
+ std::string id;
+ try {
+ auto iter = in->cbegin();
+ decode(id, iter);
+ } catch (const buffer::error &err) {
+ CLS_ERR("failed to decode input parameters: %s", err.what());
+ return -EINVAL;
+ }
+
+ std::string key(key_from_client_id(id));
+ bufferlist bl;
+ int r = cls_cxx_map_get_val(hctx, key, &bl);
+ if (r < 0) {
+ CLS_ERR("client is not registered: %s", id.c_str());
+ return r;
+ }
+
+ r = cls_cxx_map_remove_key(hctx, key);
+ if (r < 0) {
+ CLS_ERR("failed to remove omap key: %s", key.c_str());
+ return r;
+ }
+
+ // prune expired tags
+ r = expire_tags(hctx, &id);
+ if (r < 0) {
+ return r;
+ }
+ return 0;
+}
+
+/**
+ * Input:
+ * @param client_id (uint64_t) - unique client id
+ * @param commit_position (ObjectSetPosition)
+ *
+ * Output:
+ * @returns 0 on success, negative error code on failure
+ */
+int journal_client_commit(cls_method_context_t hctx, bufferlist *in,
+ bufferlist *out) {
+ std::string id;
+ cls::journal::ObjectSetPosition commit_position;
+ try {
+ auto iter = in->cbegin();
+ decode(id, iter);
+ decode(commit_position, iter);
+ } catch (const buffer::error &err) {
+ CLS_ERR("failed to decode input parameters: %s", err.what());
+ return -EINVAL;
+ }
+
+ uint8_t splay_width;
+ int r = read_key(hctx, HEADER_KEY_SPLAY_WIDTH, &splay_width);
+ if (r < 0) {
+ return r;
+ }
+ if (commit_position.object_positions.size() > splay_width) {
+ CLS_ERR("too many object positions");
+ return -EINVAL;
+ }
+
+ std::string key(key_from_client_id(id));
+ cls::journal::Client client;
+ r = read_key(hctx, key, &client);
+ if (r < 0) {
+ return r;
+ }
+
+ if (client.commit_position == commit_position) {
+ return 0;
+ }
+
+ client.commit_position = commit_position;
+ r = write_key(hctx, key, client);
+ if (r < 0) {
+ return r;
+ }
+ return 0;
+}
+
+/**
+ * Input:
+ * @param start_after (string)
+ * @param max_return (uint64_t)
+ *
+ * Output:
+ * clients (set<cls::journal::Client>) - collection of registered clients
+ * @returns 0 on success, negative error code on failure
+ */
+int journal_client_list(cls_method_context_t hctx, bufferlist *in,
+ bufferlist *out) {
+ std::string start_after;
+ uint64_t max_return;
+ try {
+ auto iter = in->cbegin();
+ decode(start_after, iter);
+ decode(max_return, iter);
+ } catch (const buffer::error &err) {
+ CLS_ERR("failed to decode input parameters: %s", err.what());
+ return -EINVAL;
+ }
+
+ std::set<cls::journal::Client> clients;
+ int r = get_client_list_range(hctx, &clients, start_after, max_return);
+ if (r < 0)
+ return r;
+
+ encode(clients, *out);
+ return 0;
+}
+
+/**
+ * Input:
+ * none
+ *
+ * Output:
+ * @returns 0 on success, negative error code on failure
+ */
+int journal_get_next_tag_tid(cls_method_context_t hctx, bufferlist *in,
+ bufferlist *out) {
+ uint64_t tag_tid;
+ int r = read_key(hctx, HEADER_KEY_NEXT_TAG_TID, &tag_tid);
+ if (r < 0) {
+ return r;
+ }
+
+ encode(tag_tid, *out);
+ return 0;
+}
+
+/**
+ * Input:
+ * @param tag_tid (uint64_t)
+ *
+ * Output:
+ * cls::journal::Tag
+ * @returns 0 on success, negative error code on failure
+ */
+int journal_get_tag(cls_method_context_t hctx, bufferlist *in,
+ bufferlist *out) {
+ uint64_t tag_tid;
+ try {
+ auto iter = in->cbegin();
+ decode(tag_tid, iter);
+ } catch (const buffer::error &err) {
+ CLS_ERR("failed to decode input parameters: %s", err.what());
+ return -EINVAL;
+ }
+
+ std::string key(key_from_tag_tid(tag_tid));
+ cls::journal::Tag tag;
+ int r = read_key(hctx, key, &tag);
+ if (r < 0) {
+ return r;
+ }
+
+ encode(tag, *out);
+ return 0;
+}
+
+/**
+ * Input:
+ * @param tag_tid (uint64_t)
+ * @param tag_class (uint64_t)
+ * @param data (bufferlist)
+ *
+ * Output:
+ * @returns 0 on success, negative error code on failure
+ */
+int journal_tag_create(cls_method_context_t hctx, bufferlist *in,
+ bufferlist *out) {
+ uint64_t tag_tid;
+ uint64_t tag_class;
+ bufferlist data;
+ try {
+ auto iter = in->cbegin();
+ decode(tag_tid, iter);
+ decode(tag_class, iter);
+ decode(data, iter);
+ } catch (const buffer::error &err) {
+ CLS_ERR("failed to decode input parameters: %s", err.what());
+ return -EINVAL;
+ }
+
+ std::string key(key_from_tag_tid(tag_tid));
+ bufferlist stored_tag_bl;
+ int r = cls_cxx_map_get_val(hctx, key, &stored_tag_bl);
+ if (r >= 0) {
+ CLS_ERR("duplicate tag id: %" PRIu64, tag_tid);
+ return -EEXIST;
+ } else if (r != -ENOENT) {
+ return r;
+ }
+
+ // verify tag tid ordering
+ uint64_t next_tag_tid;
+ r = read_key(hctx, HEADER_KEY_NEXT_TAG_TID, &next_tag_tid);
+ if (r < 0) {
+ return r;
+ }
+ if (tag_tid != next_tag_tid) {
+ CLS_LOG(5, "out-of-order tag sequence: %" PRIu64, tag_tid);
+ return -ESTALE;
+ }
+
+ uint64_t next_tag_class;
+ r = read_key(hctx, HEADER_KEY_NEXT_TAG_CLASS, &next_tag_class);
+ if (r < 0) {
+ return r;
+ }
+
+ if (tag_class == cls::journal::Tag::TAG_CLASS_NEW) {
+ // allocate a new tag class
+ tag_class = next_tag_class;
+ r = write_key(hctx, HEADER_KEY_NEXT_TAG_CLASS, tag_class + 1);
+ if (r < 0) {
+ return r;
+ }
+ } else {
+ // verify tag class range
+ if (tag_class >= next_tag_class) {
+ CLS_ERR("out-of-sequence tag class: %" PRIu64, tag_class);
+ return -EINVAL;
+ }
+ }
+
+ // prune expired tags
+ r = expire_tags(hctx, nullptr);
+ if (r < 0) {
+ return r;
+ }
+
+ // update tag tid sequence
+ r = write_key(hctx, HEADER_KEY_NEXT_TAG_TID, tag_tid + 1);
+ if (r < 0) {
+ return r;
+ }
+
+ // write tag structure
+ cls::journal::Tag tag(tag_tid, tag_class, data);
+ key = key_from_tag_tid(tag_tid);
+ r = write_key(hctx, key, tag);
+ if (r < 0) {
+ return r;
+ }
+ return 0;
+}
+
+/**
+ * Input:
+ * @param start_after_tag_tid (uint64_t) - first tag tid
+ * @param max_return (uint64_t) - max tags to return
+ * @param client_id (std::string) - client id filter
+ * @param tag_class (boost::optional<uint64_t> - optional tag class filter
+ *
+ * Output:
+ * std::set<cls::journal::Tag> - collection of tags
+ * @returns 0 on success, negative error code on failure
+ */
+int journal_tag_list(cls_method_context_t hctx, bufferlist *in,
+ bufferlist *out) {
+ uint64_t start_after_tag_tid;
+ uint64_t max_return;
+ std::string client_id;
+ boost::optional<uint64_t> tag_class(0);
+
+ // handle compiler false positive about use-before-init
+ tag_class = boost::none;
+ try {
+ auto iter = in->cbegin();
+ decode(start_after_tag_tid, iter);
+ decode(max_return, iter);
+ decode(client_id, iter);
+ decode(tag_class, iter);
+ } catch (const buffer::error &err) {
+ CLS_ERR("failed to decode input parameters: %s", err.what());
+ return -EINVAL;
+ }
+
+ // calculate the minimum tag within client's commit position
+ uint64_t minimum_tag_tid = std::numeric_limits<uint64_t>::max();
+ cls::journal::Client client;
+ int r = read_key(hctx, key_from_client_id(client_id), &client);
+ if (r < 0) {
+ return r;
+ }
+
+ for (auto object_position : client.commit_position.object_positions) {
+ minimum_tag_tid = std::min(minimum_tag_tid, object_position.tag_tid);
+ }
+
+ // compute minimum tags in use per-class
+ std::set<cls::journal::Tag> tags;
+ std::map<uint64_t, uint64_t> minimum_tag_class_to_tids;
+ typedef enum { TAG_PASS_CALCULATE_MINIMUMS,
+ TAG_PASS_LIST,
+ TAG_PASS_DONE } TagPass;
+ int tag_pass = (minimum_tag_tid == std::numeric_limits<uint64_t>::max() ?
+ TAG_PASS_LIST : TAG_PASS_CALCULATE_MINIMUMS);
+ std::string last_read = HEADER_KEY_TAG_PREFIX;
+ do {
+ std::map<std::string, bufferlist> vals;
+ bool more;
+ r = cls_cxx_map_get_vals(hctx, last_read, HEADER_KEY_TAG_PREFIX,
+ MAX_KEYS_READ, &vals, &more);
+ if (r < 0 && r != -ENOENT) {
+ CLS_ERR("failed to retrieve tags: %s", cpp_strerror(r).c_str());
+ return r;
+ }
+
+ for (auto &val : vals) {
+ cls::journal::Tag tag;
+ auto iter = val.second.cbegin();
+ try {
+ decode(tag, iter);
+ } catch (const buffer::error &err) {
+ CLS_ERR("error decoding tag: %s", val.first.c_str());
+ return -EIO;
+ }
+
+ if (tag_pass == TAG_PASS_CALCULATE_MINIMUMS) {
+ minimum_tag_class_to_tids[tag.tag_class] = tag.tid;
+
+ // completed calculation of tag class minimums
+ if (tag.tid >= minimum_tag_tid) {
+ vals.clear();
+ more = false;
+ break;
+ }
+ } else if (tag_pass == TAG_PASS_LIST) {
+ if (start_after_tag_tid != 0 && tag.tid <= start_after_tag_tid) {
+ continue;
+ }
+
+ if (tag.tid >= minimum_tag_class_to_tids[tag.tag_class] &&
+ (!tag_class || *tag_class == tag.tag_class)) {
+ tags.insert(tag);
+ }
+ if (tags.size() >= max_return) {
+ tag_pass = TAG_PASS_DONE;
+ }
+ }
+ }
+
+ if (tag_pass != TAG_PASS_DONE && !more) {
+ last_read = HEADER_KEY_TAG_PREFIX;
+ ++tag_pass;
+ } else if (!vals.empty()) {
+ last_read = vals.rbegin()->first;
+ }
+ } while (tag_pass != TAG_PASS_DONE);
+
+ encode(tags, *out);
+ return 0;
+}
+
+/**
+ * Input:
+ * @param soft_max_size (uint64_t)
+ *
+ * Output:
+ * @returns 0 if object size less than max, negative error code otherwise
+ */
+int journal_object_guard_append(cls_method_context_t hctx, bufferlist *in,
+ bufferlist *out) {
+ uint64_t soft_max_size;
+ try {
+ auto iter = in->cbegin();
+ decode(soft_max_size, iter);
+ } catch (const buffer::error &err) {
+ CLS_ERR("failed to decode input parameters: %s", err.what());
+ return -EINVAL;
+ }
+
+ uint64_t size;
+ time_t mtime;
+ int r = cls_cxx_stat(hctx, &size, &mtime);
+ if (r == -ENOENT) {
+ return 0;
+ } else if (r < 0) {
+ CLS_ERR("failed to stat object: %s", cpp_strerror(r).c_str());
+ return r;
+ }
+
+ if (size >= soft_max_size) {
+ CLS_LOG(5, "journal object full: %" PRIu64 " >= %" PRIu64,
+ size, soft_max_size);
+ return -EOVERFLOW;
+ }
+ return 0;
+}
+
+CLS_INIT(journal)
+{
+ CLS_LOG(20, "Loaded journal class!");
+
+ cls_handle_t h_class;
+ cls_method_handle_t h_journal_create;
+ cls_method_handle_t h_journal_get_order;
+ cls_method_handle_t h_journal_get_splay_width;
+ cls_method_handle_t h_journal_get_pool_id;
+ cls_method_handle_t h_journal_get_minimum_set;
+ cls_method_handle_t h_journal_set_minimum_set;
+ cls_method_handle_t h_journal_get_active_set;
+ cls_method_handle_t h_journal_set_active_set;
+ cls_method_handle_t h_journal_get_client;
+ cls_method_handle_t h_journal_client_register;
+ cls_method_handle_t h_journal_client_update_data;
+ cls_method_handle_t h_journal_client_update_state;
+ cls_method_handle_t h_journal_client_unregister;
+ cls_method_handle_t h_journal_client_commit;
+ cls_method_handle_t h_journal_client_list;
+ cls_method_handle_t h_journal_get_next_tag_tid;
+ cls_method_handle_t h_journal_get_tag;
+ cls_method_handle_t h_journal_tag_create;
+ cls_method_handle_t h_journal_tag_list;
+ cls_method_handle_t h_journal_object_guard_append;
+
+ cls_register("journal", &h_class);
+
+ /// methods for journal.$journal_id objects
+ cls_register_cxx_method(h_class, "create",
+ CLS_METHOD_RD | CLS_METHOD_WR,
+ journal_create, &h_journal_create);
+ cls_register_cxx_method(h_class, "get_order",
+ CLS_METHOD_RD,
+ journal_get_order, &h_journal_get_order);
+ cls_register_cxx_method(h_class, "get_splay_width",
+ CLS_METHOD_RD,
+ journal_get_splay_width, &h_journal_get_splay_width);
+ cls_register_cxx_method(h_class, "get_pool_id",
+ CLS_METHOD_RD,
+ journal_get_pool_id, &h_journal_get_pool_id);
+ cls_register_cxx_method(h_class, "get_minimum_set",
+ CLS_METHOD_RD,
+ journal_get_minimum_set,
+ &h_journal_get_minimum_set);
+ cls_register_cxx_method(h_class, "set_minimum_set",
+ CLS_METHOD_RD | CLS_METHOD_WR,
+ journal_set_minimum_set,
+ &h_journal_set_minimum_set);
+ cls_register_cxx_method(h_class, "get_active_set",
+ CLS_METHOD_RD,
+ journal_get_active_set,
+ &h_journal_get_active_set);
+ cls_register_cxx_method(h_class, "set_active_set",
+ CLS_METHOD_RD | CLS_METHOD_WR,
+ journal_set_active_set,
+ &h_journal_set_active_set);
+
+ cls_register_cxx_method(h_class, "get_client",
+ CLS_METHOD_RD,
+ journal_get_client, &h_journal_get_client);
+ cls_register_cxx_method(h_class, "client_register",
+ CLS_METHOD_RD | CLS_METHOD_WR,
+ journal_client_register, &h_journal_client_register);
+ cls_register_cxx_method(h_class, "client_update_data",
+ CLS_METHOD_RD | CLS_METHOD_WR,
+ journal_client_update_data,
+ &h_journal_client_update_data);
+ cls_register_cxx_method(h_class, "client_update_state",
+ CLS_METHOD_RD | CLS_METHOD_WR,
+ journal_client_update_state,
+ &h_journal_client_update_state);
+ cls_register_cxx_method(h_class, "client_unregister",
+ CLS_METHOD_RD | CLS_METHOD_WR,
+ journal_client_unregister,
+ &h_journal_client_unregister);
+ cls_register_cxx_method(h_class, "client_commit",
+ CLS_METHOD_RD | CLS_METHOD_WR,
+ journal_client_commit, &h_journal_client_commit);
+ cls_register_cxx_method(h_class, "client_list",
+ CLS_METHOD_RD,
+ journal_client_list, &h_journal_client_list);
+
+ cls_register_cxx_method(h_class, "get_next_tag_tid",
+ CLS_METHOD_RD,
+ journal_get_next_tag_tid,
+ &h_journal_get_next_tag_tid);
+ cls_register_cxx_method(h_class, "get_tag",
+ CLS_METHOD_RD,
+ journal_get_tag, &h_journal_get_tag);
+ cls_register_cxx_method(h_class, "tag_create",
+ CLS_METHOD_RD | CLS_METHOD_WR,
+ journal_tag_create, &h_journal_tag_create);
+ cls_register_cxx_method(h_class, "tag_list",
+ CLS_METHOD_RD,
+ journal_tag_list, &h_journal_tag_list);
+
+ /// methods for journal_data.$journal_id.$object_id objects
+ cls_register_cxx_method(h_class, "guard_append",
+ CLS_METHOD_RD | CLS_METHOD_WR,
+ journal_object_guard_append,
+ &h_journal_object_guard_append);
+}
diff --git a/src/cls/journal/cls_journal_client.cc b/src/cls/journal/cls_journal_client.cc
new file mode 100644
index 00000000..c22a32cf
--- /dev/null
+++ b/src/cls/journal/cls_journal_client.cc
@@ -0,0 +1,498 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#include "cls/journal/cls_journal_client.h"
+#include "include/rados/librados.hpp"
+#include "include/buffer.h"
+#include "include/Context.h"
+#include "common/Cond.h"
+#include <errno.h>
+
+namespace cls {
+namespace journal {
+namespace client {
+using ceph::encode;
+using ceph::decode;
+
+namespace {
+
+struct C_AioExec : public Context {
+ librados::IoCtx &ioctx;
+ std::string oid;
+
+ C_AioExec(librados::IoCtx &_ioctx, const std::string &_oid)
+ : ioctx(_ioctx), oid(_oid) {
+ }
+
+ static void rados_callback(rados_completion_t c, void *arg) {
+ Context *ctx = reinterpret_cast<Context *>(arg);
+ ctx->complete(rados_aio_get_return_value(c));
+ }
+};
+
+struct C_ClientList : public C_AioExec {
+ std::set<cls::journal::Client> *clients;
+ Context *on_finish;
+ bufferlist outbl;
+
+ C_ClientList(librados::IoCtx &_ioctx, const std::string &_oid,
+ std::set<cls::journal::Client> *_clients,
+ Context *_on_finish)
+ : C_AioExec(_ioctx, _oid), clients(_clients), on_finish(_on_finish) {}
+
+ void send(const std::string &start_after) {
+ bufferlist inbl;
+ encode(start_after, inbl);
+ encode(JOURNAL_MAX_RETURN, inbl);
+
+ librados::ObjectReadOperation op;
+ op.exec("journal", "client_list", inbl);
+
+ outbl.clear();
+ librados::AioCompletion *rados_completion =
+ librados::Rados::aio_create_completion(this, rados_callback, NULL);
+ int r = ioctx.aio_operate(oid, rados_completion, &op, &outbl);
+ ceph_assert(r == 0);
+ rados_completion->release();
+ }
+
+ void complete(int r) override {
+ if (r < 0) {
+ finish(r);
+ return;
+ }
+
+ try {
+ auto iter = outbl.cbegin();
+ std::set<cls::journal::Client> partial_clients;
+ decode(partial_clients, iter);
+
+ std::string start_after;
+ if (!partial_clients.empty()) {
+ start_after = partial_clients.rbegin()->id;
+ clients->insert(partial_clients.begin(), partial_clients.end());
+ }
+
+ if (partial_clients.size() < JOURNAL_MAX_RETURN) {
+ finish(0);
+ } else {
+ send(start_after);
+ }
+ } catch (const buffer::error &err) {
+ finish(-EBADMSG);
+ }
+ }
+
+ void finish(int r) override {
+ on_finish->complete(r);
+ delete this;
+ }
+};
+
+struct C_ImmutableMetadata : public C_AioExec {
+ uint8_t *order;
+ uint8_t *splay_width;
+ int64_t *pool_id;
+ Context *on_finish;
+ bufferlist outbl;
+
+ C_ImmutableMetadata(librados::IoCtx &_ioctx, const std::string &_oid,
+ uint8_t *_order, uint8_t *_splay_width,
+ int64_t *_pool_id, Context *_on_finish)
+ : C_AioExec(_ioctx, _oid), order(_order), splay_width(_splay_width),
+ pool_id(_pool_id), on_finish(_on_finish) {
+ }
+
+ void send() {
+ librados::ObjectReadOperation op;
+ bufferlist inbl;
+ op.exec("journal", "get_order", inbl);
+ op.exec("journal", "get_splay_width", inbl);
+ op.exec("journal", "get_pool_id", inbl);
+
+ librados::AioCompletion *rados_completion =
+ librados::Rados::aio_create_completion(this, rados_callback, NULL);
+ int r = ioctx.aio_operate(oid, rados_completion, &op, &outbl);
+ ceph_assert(r == 0);
+ rados_completion->release();
+ }
+
+ void finish(int r) override {
+ if (r == 0) {
+ try {
+ auto iter = outbl.cbegin();
+ decode(*order, iter);
+ decode(*splay_width, iter);
+ decode(*pool_id, iter);
+ } catch (const buffer::error &err) {
+ r = -EBADMSG;
+ }
+ }
+ on_finish->complete(r);
+ }
+};
+
+struct C_MutableMetadata : public C_AioExec {
+ uint64_t *minimum_set;
+ uint64_t *active_set;
+ C_ClientList *client_list;
+ bufferlist outbl;
+
+ C_MutableMetadata(librados::IoCtx &_ioctx, const std::string &_oid,
+ uint64_t *_minimum_set, uint64_t *_active_set,
+ C_ClientList *_client_list)
+ : C_AioExec(_ioctx, _oid), minimum_set(_minimum_set),
+ active_set(_active_set), client_list(_client_list) {}
+
+ void send() {
+ librados::ObjectReadOperation op;
+ bufferlist inbl;
+ op.exec("journal", "get_minimum_set", inbl);
+ op.exec("journal", "get_active_set", inbl);
+
+ librados::AioCompletion *rados_completion =
+ librados::Rados::aio_create_completion(this, rados_callback, NULL);
+ int r = ioctx.aio_operate(oid, rados_completion, &op, &outbl);
+ ceph_assert(r == 0);
+ rados_completion->release();
+ }
+
+ void finish(int r) override {
+ if (r == 0) {
+ try {
+ auto iter = outbl.cbegin();
+ decode(*minimum_set, iter);
+ decode(*active_set, iter);
+ client_list->send("");
+ } catch (const buffer::error &err) {
+ r = -EBADMSG;
+ }
+ }
+ if (r < 0) {
+ client_list->complete(r);
+ }
+ }
+};
+
+
+} // anonymous namespace
+
+void create(librados::ObjectWriteOperation *op,
+ uint8_t order, uint8_t splay, int64_t pool_id) {
+ bufferlist bl;
+ encode(order, bl);
+ encode(splay, bl);
+ encode(pool_id, bl);
+
+ op->exec("journal", "create", bl);
+}
+
+int create(librados::IoCtx &ioctx, const std::string &oid, uint8_t order,
+ uint8_t splay, int64_t pool_id) {
+ librados::ObjectWriteOperation op;
+ create(&op, order, splay, pool_id);
+
+ int r = ioctx.operate(oid, &op);
+ if (r < 0) {
+ return r;
+ }
+ return 0;
+}
+
+void get_immutable_metadata(librados::IoCtx &ioctx, const std::string &oid,
+ uint8_t *order, uint8_t *splay_width,
+ int64_t *pool_id, Context *on_finish) {
+ C_ImmutableMetadata *metadata = new C_ImmutableMetadata(ioctx, oid, order,
+ splay_width, pool_id,
+ on_finish);
+ metadata->send();
+}
+
+void get_mutable_metadata(librados::IoCtx &ioctx, const std::string &oid,
+ uint64_t *minimum_set, uint64_t *active_set,
+ std::set<cls::journal::Client> *clients,
+ Context *on_finish) {
+ C_ClientList *client_list = new C_ClientList(ioctx, oid, clients, on_finish);
+ C_MutableMetadata *metadata = new C_MutableMetadata(
+ ioctx, oid, minimum_set, active_set, client_list);
+ metadata->send();
+}
+
+void set_minimum_set(librados::ObjectWriteOperation *op, uint64_t object_set) {
+ bufferlist bl;
+ encode(object_set, bl);
+ op->exec("journal", "set_minimum_set", bl);
+}
+
+void set_active_set(librados::ObjectWriteOperation *op, uint64_t object_set) {
+ bufferlist bl;
+ encode(object_set, bl);
+ op->exec("journal", "set_active_set", bl);
+}
+
+int get_client(librados::IoCtx &ioctx, const std::string &oid,
+ const std::string &id, cls::journal::Client *client) {
+ librados::ObjectReadOperation op;
+ get_client_start(&op, id);
+
+ bufferlist out_bl;
+ int r = ioctx.operate(oid, &op, &out_bl);
+ if (r < 0) {
+ return r;
+ }
+
+ auto iter = out_bl.cbegin();
+ r = get_client_finish(&iter, client);
+ if (r < 0) {
+ return r;
+ }
+ return 0;
+}
+
+void get_client_start(librados::ObjectReadOperation *op,
+ const std::string &id) {
+ bufferlist bl;
+ encode(id, bl);
+ op->exec("journal", "get_client", bl);
+}
+
+int get_client_finish(bufferlist::const_iterator *iter,
+ cls::journal::Client *client) {
+ try {
+ decode(*client, *iter);
+ } catch (const buffer::error &err) {
+ return -EBADMSG;
+ }
+ return 0;
+}
+
+int client_register(librados::IoCtx &ioctx, const std::string &oid,
+ const std::string &id, const bufferlist &data) {
+ librados::ObjectWriteOperation op;
+ client_register(&op, id, data);
+ return ioctx.operate(oid, &op);
+}
+
+void client_register(librados::ObjectWriteOperation *op,
+ const std::string &id, const bufferlist &data) {
+ bufferlist bl;
+ encode(id, bl);
+ encode(data, bl);
+ op->exec("journal", "client_register", bl);
+}
+
+int client_update_data(librados::IoCtx &ioctx, const std::string &oid,
+ const std::string &id, const bufferlist &data) {
+ librados::ObjectWriteOperation op;
+ client_update_data(&op, id, data);
+ return ioctx.operate(oid, &op);
+}
+
+void client_update_data(librados::ObjectWriteOperation *op,
+ const std::string &id, const bufferlist &data) {
+ bufferlist bl;
+ encode(id, bl);
+ encode(data, bl);
+ op->exec("journal", "client_update_data", bl);
+}
+
+int client_update_state(librados::IoCtx &ioctx, const std::string &oid,
+ const std::string &id, cls::journal::ClientState state) {
+ librados::ObjectWriteOperation op;
+ client_update_state(&op, id, state);
+ return ioctx.operate(oid, &op);
+}
+
+void client_update_state(librados::ObjectWriteOperation *op,
+ const std::string &id,
+ cls::journal::ClientState state) {
+ bufferlist bl;
+ encode(id, bl);
+ encode(static_cast<uint8_t>(state), bl);
+ op->exec("journal", "client_update_state", bl);
+}
+
+int client_unregister(librados::IoCtx &ioctx, const std::string &oid,
+ const std::string &id) {
+ librados::ObjectWriteOperation op;
+ client_unregister(&op, id);
+ return ioctx.operate(oid, &op);
+}
+
+void client_unregister(librados::ObjectWriteOperation *op,
+ const std::string &id) {
+
+ bufferlist bl;
+ encode(id, bl);
+ op->exec("journal", "client_unregister", bl);
+}
+
+void client_commit(librados::ObjectWriteOperation *op, const std::string &id,
+ const cls::journal::ObjectSetPosition &commit_position) {
+ bufferlist bl;
+ encode(id, bl);
+ encode(commit_position, bl);
+ op->exec("journal", "client_commit", bl);
+}
+
+int client_list(librados::IoCtx &ioctx, const std::string &oid,
+ std::set<cls::journal::Client> *clients) {
+ C_SaferCond cond;
+ client_list(ioctx, oid, clients, &cond);
+ return cond.wait();
+}
+
+void client_list(librados::IoCtx &ioctx, const std::string &oid,
+ std::set<cls::journal::Client> *clients, Context *on_finish) {
+ C_ClientList *client_list = new C_ClientList(ioctx, oid, clients, on_finish);
+ client_list->send("");
+}
+
+int get_next_tag_tid(librados::IoCtx &ioctx, const std::string &oid,
+ uint64_t *tag_tid) {
+ librados::ObjectReadOperation op;
+ get_next_tag_tid_start(&op);
+
+ bufferlist out_bl;
+ int r = ioctx.operate(oid, &op, &out_bl);
+ if (r < 0) {
+ return r;
+ }
+
+ auto iter = out_bl.cbegin();
+ r = get_next_tag_tid_finish(&iter, tag_tid);
+ if (r < 0) {
+ return r;
+ }
+ return 0;
+}
+
+void get_next_tag_tid_start(librados::ObjectReadOperation *op) {
+ bufferlist bl;
+ op->exec("journal", "get_next_tag_tid", bl);
+}
+
+int get_next_tag_tid_finish(bufferlist::const_iterator *iter,
+ uint64_t *tag_tid) {
+ try {
+ decode(*tag_tid, *iter);
+ } catch (const buffer::error &err) {
+ return -EBADMSG;
+ }
+ return 0;
+}
+
+int get_tag(librados::IoCtx &ioctx, const std::string &oid,
+ uint64_t tag_tid, cls::journal::Tag *tag) {
+ librados::ObjectReadOperation op;
+ get_tag_start(&op, tag_tid);
+
+ bufferlist out_bl;
+ int r = ioctx.operate(oid, &op, &out_bl);
+ if (r < 0) {
+ return r;
+ }
+
+ auto iter = out_bl.cbegin();
+ r = get_tag_finish(&iter, tag);
+ if (r < 0) {
+ return r;
+ }
+ return 0;
+}
+
+void get_tag_start(librados::ObjectReadOperation *op,
+ uint64_t tag_tid) {
+ bufferlist bl;
+ encode(tag_tid, bl);
+ op->exec("journal", "get_tag", bl);
+}
+
+int get_tag_finish(bufferlist::const_iterator *iter, cls::journal::Tag *tag) {
+ try {
+ decode(*tag, *iter);
+ } catch (const buffer::error &err) {
+ return -EBADMSG;
+ }
+ return 0;
+}
+
+int tag_create(librados::IoCtx &ioctx, const std::string &oid,
+ uint64_t tag_tid, uint64_t tag_class,
+ const bufferlist &data) {
+ librados::ObjectWriteOperation op;
+ tag_create(&op, tag_tid, tag_class, data);
+ return ioctx.operate(oid, &op);
+}
+
+void tag_create(librados::ObjectWriteOperation *op, uint64_t tag_tid,
+ uint64_t tag_class, const bufferlist &data) {
+ bufferlist bl;
+ encode(tag_tid, bl);
+ encode(tag_class, bl);
+ encode(data, bl);
+ op->exec("journal", "tag_create", bl);
+}
+
+int tag_list(librados::IoCtx &ioctx, const std::string &oid,
+ const std::string &client_id, boost::optional<uint64_t> tag_class,
+ std::set<cls::journal::Tag> *tags) {
+ tags->clear();
+ uint64_t start_after_tag_tid = 0;
+ while (true) {
+ librados::ObjectReadOperation op;
+ tag_list_start(&op, start_after_tag_tid, JOURNAL_MAX_RETURN, client_id,
+ tag_class);
+
+ bufferlist out_bl;
+ int r = ioctx.operate(oid, &op, &out_bl);
+ if (r < 0) {
+ return r;
+ }
+
+ auto iter = out_bl.cbegin();
+ std::set<cls::journal::Tag> decode_tags;
+ r = tag_list_finish(&iter, &decode_tags);
+ if (r < 0) {
+ return r;
+ }
+
+ tags->insert(decode_tags.begin(), decode_tags.end());
+ if (decode_tags.size() < JOURNAL_MAX_RETURN) {
+ break;
+ }
+ }
+ return 0;
+}
+
+void tag_list_start(librados::ObjectReadOperation *op,
+ uint64_t start_after_tag_tid, uint64_t max_return,
+ const std::string &client_id,
+ boost::optional<uint64_t> tag_class) {
+ bufferlist bl;
+ encode(start_after_tag_tid, bl);
+ encode(max_return, bl);
+ encode(client_id, bl);
+ encode(tag_class, bl);
+ op->exec("journal", "tag_list", bl);
+}
+
+int tag_list_finish(bufferlist::const_iterator *iter,
+ std::set<cls::journal::Tag> *tags) {
+ try {
+ decode(*tags, *iter);
+ } catch (const buffer::error &err) {
+ return -EBADMSG;
+ }
+ return 0;
+}
+
+void guard_append(librados::ObjectWriteOperation *op, uint64_t soft_max_size) {
+ bufferlist bl;
+ encode(soft_max_size, bl);
+ op->exec("journal", "guard_append", bl);
+}
+
+} // namespace client
+} // namespace journal
+} // namespace cls
diff --git a/src/cls/journal/cls_journal_client.h b/src/cls/journal/cls_journal_client.h
new file mode 100644
index 00000000..50579cfc
--- /dev/null
+++ b/src/cls/journal/cls_journal_client.h
@@ -0,0 +1,107 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#ifndef CEPH_CLS_JOURNAL_CLIENT_H
+#define CEPH_CLS_JOURNAL_CLIENT_H
+
+#include "include/rados/librados_fwd.hpp"
+#include "cls/journal/cls_journal_types.h"
+#include <set>
+#include <boost/optional.hpp>
+
+class Context;
+
+namespace cls {
+namespace journal {
+namespace client {
+
+void create(librados::ObjectWriteOperation *op,
+ uint8_t order, uint8_t splay, int64_t pool_id);
+int create(librados::IoCtx &ioctx, const std::string &oid, uint8_t order,
+ uint8_t splay, int64_t pool_id);
+
+void get_immutable_metadata(librados::IoCtx &ioctx, const std::string &oid,
+ uint8_t *order, uint8_t *splay_width,
+ int64_t *pool_id, Context *on_finish);
+void get_mutable_metadata(librados::IoCtx &ioctx, const std::string &oid,
+ uint64_t *minimum_set, uint64_t *active_set,
+ std::set<cls::journal::Client> *clients,
+ Context *on_finish);
+
+void set_minimum_set(librados::ObjectWriteOperation *op, uint64_t object_set);
+void set_active_set(librados::ObjectWriteOperation *op, uint64_t object_set);
+
+// journal client helpers
+int get_client(librados::IoCtx &ioctx, const std::string &oid,
+ const std::string &id, cls::journal::Client *client);
+void get_client_start(librados::ObjectReadOperation *op,
+ const std::string &id);
+int get_client_finish(bufferlist::const_iterator *iter,
+ cls::journal::Client *client);
+
+int client_register(librados::IoCtx &ioctx, const std::string &oid,
+ const std::string &id, const bufferlist &data);
+void client_register(librados::ObjectWriteOperation *op,
+ const std::string &id, const bufferlist &data);
+
+int client_update_data(librados::IoCtx &ioctx, const std::string &oid,
+ const std::string &id, const bufferlist &data);
+void client_update_data(librados::ObjectWriteOperation *op,
+ const std::string &id, const bufferlist &data);
+int client_update_state(librados::IoCtx &ioctx, const std::string &oid,
+ const std::string &id, cls::journal::ClientState state);
+void client_update_state(librados::ObjectWriteOperation *op,
+ const std::string &id,
+ cls::journal::ClientState state);
+
+int client_unregister(librados::IoCtx &ioctx, const std::string &oid,
+ const std::string &id);
+void client_unregister(librados::ObjectWriteOperation *op,
+ const std::string &id);
+
+void client_commit(librados::ObjectWriteOperation *op, const std::string &id,
+ const cls::journal::ObjectSetPosition &commit_position);
+
+int client_list(librados::IoCtx &ioctx, const std::string &oid,
+ std::set<cls::journal::Client> *clients);
+void client_list(librados::IoCtx &ioctx, const std::string &oid,
+ std::set<cls::journal::Client> *clients, Context *on_finish);
+
+// journal tag helpers
+int get_next_tag_tid(librados::IoCtx &ioctx, const std::string &oid,
+ uint64_t *tag_tid);
+void get_next_tag_tid_start(librados::ObjectReadOperation *op);
+int get_next_tag_tid_finish(bufferlist::const_iterator *iter,
+ uint64_t *tag_tid);
+
+int get_tag(librados::IoCtx &ioctx, const std::string &oid,
+ uint64_t tag_tid, cls::journal::Tag *tag);
+void get_tag_start(librados::ObjectReadOperation *op,
+ uint64_t tag_tid);
+int get_tag_finish(bufferlist::const_iterator *iter, cls::journal::Tag *tag);
+
+int tag_create(librados::IoCtx &ioctx, const std::string &oid,
+ uint64_t tag_tid, uint64_t tag_class,
+ const bufferlist &data);
+void tag_create(librados::ObjectWriteOperation *op,
+ uint64_t tag_tid, uint64_t tag_class,
+ const bufferlist &data);
+
+int tag_list(librados::IoCtx &ioctx, const std::string &oid,
+ const std::string &client_id, boost::optional<uint64_t> tag_class,
+ std::set<cls::journal::Tag> *tags);
+void tag_list_start(librados::ObjectReadOperation *op,
+ uint64_t start_after_tag_tid, uint64_t max_return,
+ const std::string &client_id,
+ boost::optional<uint64_t> tag_class);
+int tag_list_finish(bufferlist::const_iterator *iter,
+ std::set<cls::journal::Tag> *tags);
+
+// journal entry helpers
+void guard_append(librados::ObjectWriteOperation *op, uint64_t soft_max_size);
+
+} // namespace client
+} // namespace journal
+} // namespace cls
+
+#endif // CEPH_CLS_JOURNAL_CLIENT_H
diff --git a/src/cls/journal/cls_journal_types.cc b/src/cls/journal/cls_journal_types.cc
new file mode 100644
index 00000000..6976304d
--- /dev/null
+++ b/src/cls/journal/cls_journal_types.cc
@@ -0,0 +1,196 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#include "cls/journal/cls_journal_types.h"
+#include "include/stringify.h"
+#include "common/Formatter.h"
+
+namespace cls {
+namespace journal {
+
+void ObjectPosition::encode(bufferlist& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(object_number, bl);
+ encode(tag_tid, bl);
+ encode(entry_tid, bl);
+ ENCODE_FINISH(bl);
+}
+
+void ObjectPosition::decode(bufferlist::const_iterator& iter) {
+ DECODE_START(1, iter);
+ decode(object_number, iter);
+ decode(tag_tid, iter);
+ decode(entry_tid, iter);
+ DECODE_FINISH(iter);
+}
+
+void ObjectPosition::dump(Formatter *f) const {
+ f->dump_unsigned("object_number", object_number);
+ f->dump_unsigned("tag_tid", tag_tid);
+ f->dump_unsigned("entry_tid", entry_tid);
+}
+
+void ObjectPosition::generate_test_instances(std::list<ObjectPosition *> &o) {
+ o.push_back(new ObjectPosition());
+ o.push_back(new ObjectPosition(1, 2, 3));
+}
+
+void ObjectSetPosition::encode(bufferlist& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(object_positions, bl);
+ ENCODE_FINISH(bl);
+}
+
+void ObjectSetPosition::decode(bufferlist::const_iterator& iter) {
+ DECODE_START(1, iter);
+ decode(object_positions, iter);
+ DECODE_FINISH(iter);
+}
+
+void ObjectSetPosition::dump(Formatter *f) const {
+ f->open_array_section("object_positions");
+ for (auto &pos : object_positions) {
+ f->open_object_section("object_position");
+ pos.dump(f);
+ f->close_section();
+ }
+ f->close_section();
+}
+
+void ObjectSetPosition::generate_test_instances(
+ std::list<ObjectSetPosition *> &o) {
+ o.push_back(new ObjectSetPosition());
+ o.push_back(new ObjectSetPosition({{0, 1, 120}, {121, 2, 121}}));
+}
+
+void Client::encode(bufferlist& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(id, bl);
+ encode(data, bl);
+ encode(commit_position, bl);
+ encode(static_cast<uint8_t>(state), bl);
+ ENCODE_FINISH(bl);
+}
+
+void Client::decode(bufferlist::const_iterator& iter) {
+ DECODE_START(1, iter);
+ decode(id, iter);
+ decode(data, iter);
+ decode(commit_position, iter);
+
+ uint8_t state_raw;
+ decode(state_raw, iter);
+ state = static_cast<ClientState>(state_raw);
+ DECODE_FINISH(iter);
+}
+
+void Client::dump(Formatter *f) const {
+ f->dump_string("id", id);
+
+ std::stringstream data_ss;
+ data.hexdump(data_ss);
+ f->dump_string("data", data_ss.str());
+
+ f->open_object_section("commit_position");
+ commit_position.dump(f);
+ f->close_section();
+
+ f->dump_string("state", stringify(state));
+}
+
+void Client::generate_test_instances(std::list<Client *> &o) {
+ bufferlist data;
+ data.append(std::string(128, '1'));
+
+ o.push_back(new Client());
+ o.push_back(new Client("id", data));
+ o.push_back(new Client("id", data, {{{1, 2, 120}, {2, 3, 121}}}));
+}
+
+void Tag::encode(bufferlist& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(tid, bl);
+ encode(tag_class, bl);
+ encode(data, bl);
+ ENCODE_FINISH(bl);
+}
+
+void Tag::decode(bufferlist::const_iterator& iter) {
+ DECODE_START(1, iter);
+ decode(tid, iter);
+ decode(tag_class, iter);
+ decode(data, iter);
+ DECODE_FINISH(iter);
+}
+
+void Tag::dump(Formatter *f) const {
+ f->dump_unsigned("tid", tid);
+ f->dump_unsigned("tag_class", tag_class);
+
+ std::stringstream data_ss;
+ data.hexdump(data_ss);
+ f->dump_string("data", data_ss.str());
+}
+
+void Tag::generate_test_instances(std::list<Tag *> &o) {
+ o.push_back(new Tag());
+
+ bufferlist data;
+ data.append(std::string(128, '1'));
+ o.push_back(new Tag(123, 234, data));
+}
+
+std::ostream &operator<<(std::ostream &os, const ClientState &state) {
+ switch (state) {
+ case CLIENT_STATE_CONNECTED:
+ os << "connected";
+ break;
+ case CLIENT_STATE_DISCONNECTED:
+ os << "disconnected";
+ break;
+ default:
+ os << "unknown (" << static_cast<uint32_t>(state) << ")";
+ break;
+ }
+ return os;
+}
+
+std::ostream &operator<<(std::ostream &os,
+ const ObjectPosition &object_position) {
+ os << "["
+ << "object_number=" << object_position.object_number << ", "
+ << "tag_tid=" << object_position.tag_tid << ", "
+ << "entry_tid=" << object_position.entry_tid << "]";
+ return os;
+}
+
+std::ostream &operator<<(std::ostream &os,
+ const ObjectSetPosition &object_set_position) {
+ os << "[positions=[";
+ std::string delim;
+ for (auto &object_position : object_set_position.object_positions) {
+ os << delim << object_position;
+ delim = ", ";
+ }
+ os << "]]";
+ return os;
+}
+
+std::ostream &operator<<(std::ostream &os, const Client &client) {
+ os << "[id=" << client.id << ", "
+ << "commit_position=" << client.commit_position << ", "
+ << "state=" << client.state << "]";
+ return os;
+}
+
+std::ostream &operator<<(std::ostream &os, const Tag &tag) {
+ os << "[tid=" << tag.tid << ", "
+ << "tag_class=" << tag.tag_class << ", "
+ << "data=";
+ tag.data.hexdump(os);
+ os << "]";
+ return os;
+}
+
+} // namespace journal
+} // namespace cls
diff --git a/src/cls/journal/cls_journal_types.h b/src/cls/journal/cls_journal_types.h
new file mode 100644
index 00000000..2a617698
--- /dev/null
+++ b/src/cls/journal/cls_journal_types.h
@@ -0,0 +1,154 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#ifndef CEPH_CLS_JOURNAL_TYPES_H
+#define CEPH_CLS_JOURNAL_TYPES_H
+
+#include "include/int_types.h"
+#include "include/buffer_fwd.h"
+#include "include/encoding.h"
+#include <iosfwd>
+#include <list>
+#include <string>
+
+namespace ceph {
+class Formatter;
+}
+
+namespace cls {
+namespace journal {
+
+static const uint64_t JOURNAL_MAX_RETURN = 256;
+
+struct ObjectPosition {
+ uint64_t object_number;
+ uint64_t tag_tid;
+ uint64_t entry_tid;
+
+ ObjectPosition() : object_number(0), tag_tid(0), entry_tid(0) {}
+ ObjectPosition(uint64_t _object_number, uint64_t _tag_tid,
+ uint64_t _entry_tid)
+ : object_number(_object_number), tag_tid(_tag_tid), entry_tid(_entry_tid) {}
+
+ inline bool operator==(const ObjectPosition& rhs) const {
+ return (object_number == rhs.object_number &&
+ tag_tid == rhs.tag_tid &&
+ entry_tid == rhs.entry_tid);
+ }
+
+ void encode(bufferlist& bl) const;
+ void decode(bufferlist::const_iterator& iter);
+ void dump(Formatter *f) const;
+
+ inline bool operator<(const ObjectPosition &rhs) const {
+ if (object_number != rhs.object_number) {
+ return object_number < rhs.object_number;
+ } else if (tag_tid != rhs.tag_tid) {
+ return tag_tid < rhs.tag_tid;
+ }
+ return entry_tid < rhs.entry_tid;
+ }
+
+ static void generate_test_instances(std::list<ObjectPosition *> &o);
+};
+
+typedef std::list<ObjectPosition> ObjectPositions;
+
+struct ObjectSetPosition {
+ // stored in most-recent -> least recent committed entry order
+ ObjectPositions object_positions;
+
+ ObjectSetPosition() {}
+ ObjectSetPosition(const ObjectPositions &_object_positions)
+ : object_positions(_object_positions) {}
+
+ void encode(bufferlist& bl) const;
+ void decode(bufferlist::const_iterator& iter);
+ void dump(Formatter *f) const;
+
+ inline bool operator==(const ObjectSetPosition &rhs) const {
+ return (object_positions == rhs.object_positions);
+ }
+
+ static void generate_test_instances(std::list<ObjectSetPosition *> &o);
+};
+
+enum ClientState {
+ CLIENT_STATE_CONNECTED = 0,
+ CLIENT_STATE_DISCONNECTED = 1
+};
+
+struct Client {
+ std::string id;
+ bufferlist data;
+ ObjectSetPosition commit_position;
+ ClientState state;
+
+ Client() : state(CLIENT_STATE_CONNECTED) {}
+ Client(const std::string& _id, const bufferlist &_data,
+ const ObjectSetPosition &_commit_position = ObjectSetPosition(),
+ ClientState _state = CLIENT_STATE_CONNECTED)
+ : id(_id), data(_data), commit_position(_commit_position), state(_state) {}
+
+ inline bool operator==(const Client &rhs) const {
+ return (id == rhs.id &&
+ data.contents_equal(rhs.data) &&
+ commit_position == rhs.commit_position &&
+ state == rhs.state);
+ }
+ inline bool operator<(const Client &rhs) const {
+ return (id < rhs.id);
+ }
+
+ void encode(bufferlist& bl) const;
+ void decode(bufferlist::const_iterator& iter);
+ void dump(Formatter *f) const;
+
+ static void generate_test_instances(std::list<Client *> &o);
+};
+
+struct Tag {
+ static const uint64_t TAG_CLASS_NEW = static_cast<uint64_t>(-1);
+
+ uint64_t tid;
+ uint64_t tag_class;
+ bufferlist data;
+
+ Tag() : tid(0), tag_class(0) {}
+ Tag(uint64_t tid, uint64_t tag_class, const bufferlist &data)
+ : tid(tid), tag_class(tag_class), data(data) {}
+
+ inline bool operator==(const Tag &rhs) const {
+ return (tid == rhs.tid &&
+ tag_class == rhs.tag_class &&
+ data.contents_equal(rhs.data));
+ }
+ inline bool operator<(const Tag &rhs) const {
+ return (tid < rhs.tid);
+ }
+
+ void encode(bufferlist& bl) const;
+ void decode(bufferlist::const_iterator& iter);
+ void dump(Formatter *f) const;
+
+ static void generate_test_instances(std::list<Tag *> &o);
+};
+
+WRITE_CLASS_ENCODER(ObjectPosition);
+WRITE_CLASS_ENCODER(ObjectSetPosition);
+WRITE_CLASS_ENCODER(Client);
+WRITE_CLASS_ENCODER(Tag);
+
+std::ostream &operator<<(std::ostream &os, const ClientState &state);
+std::ostream &operator<<(std::ostream &os,
+ const ObjectPosition &object_position);
+std::ostream &operator<<(std::ostream &os,
+ const ObjectSetPosition &object_set_position);
+std::ostream &operator<<(std::ostream &os,
+ const Client &client);
+std::ostream &operator<<(std::ostream &os, const Tag &tag);
+
+} // namespace journal
+} // namespace cls
+
+#endif // CEPH_CLS_JOURNAL_TYPES_H
diff --git a/src/cls/lock/cls_lock.cc b/src/cls/lock/cls_lock.cc
new file mode 100644
index 00000000..74880225
--- /dev/null
+++ b/src/cls/lock/cls_lock.cc
@@ -0,0 +1,647 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+/** \file
+ *
+ * This is an OSD class that implements methods for object
+ * advisory locking.
+ *
+ */
+
+#include <errno.h>
+#include <map>
+#include <sstream>
+
+#include "include/types.h"
+#include "include/utime.h"
+#include "objclass/objclass.h"
+
+#include "common/errno.h"
+#include "common/Clock.h"
+
+#include "cls/lock/cls_lock_types.h"
+#include "cls/lock/cls_lock_ops.h"
+
+#include "global/global_context.h"
+
+#include "include/compat.h"
+
+
+using namespace rados::cls::lock;
+
+
+CLS_VER(1,0)
+CLS_NAME(lock)
+
+#define LOCK_PREFIX "lock."
+
+static int clean_lock(cls_method_context_t hctx)
+{
+ int r = cls_cxx_remove(hctx);
+ if (r < 0)
+ return r;
+
+ return 0;
+}
+
+static int read_lock(cls_method_context_t hctx,
+ const string& name,
+ lock_info_t *lock)
+{
+ bufferlist bl;
+ string key = LOCK_PREFIX;
+ key.append(name);
+
+ int r = cls_cxx_getxattr(hctx, key.c_str(), &bl);
+ if (r < 0) {
+ if (r == -ENODATA) {
+ *lock = lock_info_t();
+ return 0;
+ }
+ if (r != -ENOENT) {
+ CLS_ERR("error reading xattr %s: %d", key.c_str(), r);
+ }
+ return r;
+ }
+
+ try {
+ auto it = bl.cbegin();
+ decode(*lock, it);
+ } catch (const buffer::error &err) {
+ CLS_ERR("error decoding %s", key.c_str());
+ return -EIO;
+ }
+
+ /* now trim expired locks */
+
+ utime_t now = ceph_clock_now();
+
+ map<locker_id_t, locker_info_t>::iterator iter = lock->lockers.begin();
+
+ while (iter != lock->lockers.end()) {
+ struct locker_info_t& info = iter->second;
+ if (!info.expiration.is_zero() && info.expiration < now) {
+ CLS_LOG(20, "expiring locker");
+ iter = lock->lockers.erase(iter);
+ } else {
+ ++iter;
+ }
+ }
+
+ if (lock->lockers.empty() && cls_lock_is_ephemeral(lock->lock_type)) {
+ r = clean_lock(hctx);
+ if (r < 0) {
+ CLS_ERR("error, on read, cleaning lock object %s", cpp_strerror(r).c_str());
+ }
+ }
+
+ return 0;
+}
+
+static int write_lock(cls_method_context_t hctx, const string& name, const lock_info_t& lock)
+{
+ using ceph::encode;
+ string key = LOCK_PREFIX;
+ key.append(name);
+
+ bufferlist lock_bl;
+ encode(lock, lock_bl, cls_get_client_features(hctx));
+
+ int r = cls_cxx_setxattr(hctx, key.c_str(), &lock_bl);
+ if (r < 0)
+ return r;
+
+ return 0;
+}
+
+/**
+ * helper function to add a lock and update disk state.
+ *
+ * Input:
+ * @param name Lock name
+ * @param lock_type Type of lock (exclusive / shared)
+ * @param duration Duration of lock (in seconds). Zero means it doesn't expire.
+ * @param flags lock flags
+ * @param cookie The cookie to set in the lock
+ * @param tag The tag to match with the lock (can only lock with matching tags)
+ * @param lock_description The lock description to set (if not empty)
+ * @param locker_description The locker description
+ *
+ * @return 0 on success, or -errno on failure
+ */
+static int lock_obj(cls_method_context_t hctx,
+ const string& name,
+ ClsLockType lock_type,
+ utime_t duration,
+ const string& description,
+ uint8_t flags,
+ const string& cookie,
+ const string& tag)
+{
+ bool exclusive = cls_lock_is_exclusive(lock_type);
+ lock_info_t linfo;
+ bool fail_if_exists = (flags & LOCK_FLAG_MAY_RENEW) == 0;
+ bool fail_if_does_not_exist = flags & LOCK_FLAG_MUST_RENEW;
+
+ CLS_LOG(20,
+ "requested lock_type=%s fail_if_exists=%d fail_if_does_not_exist=%d",
+ cls_lock_type_str(lock_type), fail_if_exists, fail_if_does_not_exist);
+ if (!cls_lock_is_valid(lock_type)) {
+ return -EINVAL;
+ }
+
+ if (name.empty())
+ return -EINVAL;
+
+ if (!fail_if_exists && fail_if_does_not_exist) {
+ // at most one of LOCK_FLAG_MAY_RENEW and LOCK_FLAG_MUST_RENEW may
+ // be set since they have different implications if the lock does
+ // not already exist
+ return -EINVAL;
+ }
+
+ // see if there's already a locker
+ int r = read_lock(hctx, name, &linfo);
+ if (r < 0 && r != -ENOENT) {
+ CLS_ERR("Could not read lock info: %s", cpp_strerror(r).c_str());
+ return r;
+ }
+
+ map<locker_id_t, locker_info_t>& lockers = linfo.lockers;
+ map<locker_id_t, locker_info_t>::iterator iter;
+
+ locker_id_t id;
+ id.cookie = cookie;
+ entity_inst_t inst;
+ r = cls_get_request_origin(hctx, &inst);
+ id.locker = inst.name;
+ ceph_assert(r == 0);
+
+ /* check this early, before we check fail_if_exists, otherwise we might
+ * remove the locker entry and not check it later */
+ if (lockers.size() && tag != linfo.tag) {
+ CLS_LOG(20, "cannot take lock on object, conflicting tag");
+ return -EBUSY;
+ }
+
+ ClsLockType existing_lock_type = linfo.lock_type;
+ CLS_LOG(20, "existing_lock_type=%s", cls_lock_type_str(existing_lock_type));
+ iter = lockers.find(id);
+ if (iter != lockers.end()) {
+ if (fail_if_exists && !fail_if_does_not_exist) {
+ return -EEXIST;
+ } else {
+ lockers.erase(iter); // remove old entry
+ }
+ } else if (fail_if_does_not_exist) {
+ return -ENOENT;
+ }
+
+ if (!lockers.empty()) {
+ if (exclusive) {
+ auto locker_lister =
+ [&lockers]() -> std::string {
+ std::stringstream locker_list;
+ locker_list << lockers;
+ return locker_list.str();
+ };
+ CLS_LOG(20, "could not exclusive-lock object, already locked by %s",
+ locker_lister().c_str());
+ return -EBUSY;
+ }
+
+ if (existing_lock_type != lock_type) {
+ CLS_LOG(20, "cannot take lock on object, conflicting lock type");
+ return -EBUSY;
+ }
+ }
+
+ linfo.lock_type = lock_type;
+ linfo.tag = tag;
+ utime_t expiration;
+ if (!duration.is_zero()) {
+ expiration = ceph_clock_now();
+ expiration += duration;
+
+ }
+ // make all addrs of type legacy, because v2 clients speak v2 or v1,
+ // even depending on which OSD they are talking to, and the type
+ // isn't what uniquely identifies them. also, storing a v1 addr
+ // here means that old clients who get this locker_info won't see an
+ // old "msgr2:" prefix.
+ inst.addr.set_type(entity_addr_t::TYPE_LEGACY);
+
+ struct locker_info_t info(expiration, inst.addr, description);
+
+ linfo.lockers[id] = info;
+
+ r = write_lock(hctx, name, linfo);
+ if (r < 0)
+ return r;
+
+ return 0;
+}
+
+/**
+ * Set an exclusive lock on an object for the activating client, if possible.
+ *
+ * Input:
+ * @param cls_lock_lock_op request input
+ *
+ * @returns 0 on success, -EINVAL if it can't decode the lock_cookie,
+ * -EBUSY if the object is already locked, or -errno on (unexpected) failure.
+ */
+static int lock_op(cls_method_context_t hctx,
+ bufferlist *in, bufferlist *out)
+{
+ CLS_LOG(20, "lock_op");
+ cls_lock_lock_op op;
+ try {
+ auto iter = in->cbegin();
+ decode(op, iter);
+ } catch (const buffer::error &err) {
+ return -EINVAL;
+ }
+
+ return lock_obj(hctx,
+ op.name, op.type, op.duration, op.description,
+ op.flags, op.cookie, op.tag);
+}
+
+/**
+ * helper function to remove a lock from on disk and clean up state.
+ *
+ * @param name The lock name
+ * @param locker The locker entity name
+ * @param cookie The user-defined cookie associated with the lock.
+ *
+ * @return 0 on success, -ENOENT if there is no such lock (either
+ * entity or cookie is wrong), or -errno on other error.
+ */
+static int remove_lock(cls_method_context_t hctx,
+ const string& name,
+ entity_name_t& locker,
+ const string& cookie)
+{
+ // get current lockers
+ lock_info_t linfo;
+ int r = read_lock(hctx, name, &linfo);
+ if (r < 0) {
+ CLS_ERR("Could not read list of current lockers off disk: %s", cpp_strerror(r).c_str());
+ return r;
+ }
+
+ map<locker_id_t, locker_info_t>& lockers = linfo.lockers;
+ struct locker_id_t id(locker, cookie);
+
+ // remove named locker from set
+ map<locker_id_t, locker_info_t>::iterator iter = lockers.find(id);
+ if (iter == lockers.end()) { // no such key
+ return -ENOENT;
+ }
+ lockers.erase(iter);
+
+ if (cls_lock_is_ephemeral(linfo.lock_type)) {
+ ceph_assert(lockers.empty());
+ r = clean_lock(hctx);
+ } else {
+ r = write_lock(hctx, name, linfo);
+ }
+
+ return r;
+}
+
+/**
+ * Unlock an object which the activating client currently has locked.
+ *
+ * Input:
+ * @param cls_lock_unlock_op request input
+ *
+ * @return 0 on success, -EINVAL if it can't decode the cookie, -ENOENT
+ * if there is no such lock (either entity or cookie is wrong), or
+ * -errno on other (unexpected) error.
+ */
+static int unlock_op(cls_method_context_t hctx,
+ bufferlist *in, bufferlist *out)
+{
+ CLS_LOG(20, "unlock_op");
+ cls_lock_unlock_op op;
+ try {
+ auto iter = in->cbegin();
+ decode(op, iter);
+ } catch (const buffer::error& err) {
+ return -EINVAL;
+ }
+
+ entity_inst_t inst;
+ int r = cls_get_request_origin(hctx, &inst);
+ ceph_assert(r == 0);
+ return remove_lock(hctx, op.name, inst.name, op.cookie);
+}
+
+/**
+ * Break the lock on an object held by any client.
+ *
+ * Input:
+ * @param cls_lock_break_op request input
+ *
+ * @return 0 on success, -EINVAL if it can't decode the locker and
+ * cookie, -ENOENT if there is no such lock (either entity or cookie
+ * is wrong), or -errno on other (unexpected) error.
+ */
+static int break_lock(cls_method_context_t hctx,
+ bufferlist *in, bufferlist *out)
+{
+ CLS_LOG(20, "break_lock");
+ cls_lock_break_op op;
+ try {
+ auto iter = in->cbegin();
+ decode(op, iter);
+ } catch (const buffer::error& err) {
+ return -EINVAL;
+ }
+
+ return remove_lock(hctx, op.name, op.locker, op.cookie);
+}
+
+
+/**
+ * Retrieve lock info: lockers, tag, exclusive
+ *
+ * Input:
+ * @param cls_lock_list_lockers_op request input
+ *
+ * Output:
+ * @param cls_lock_list_lockers_reply result
+ *
+ * @return 0 on success, -errno on failure.
+ */
+static int get_info(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ CLS_LOG(20, "get_info");
+ cls_lock_get_info_op op;
+ try {
+ auto iter = in->cbegin();
+ decode(op, iter);
+ } catch (const buffer::error& err) {
+ return -EINVAL;
+ }
+
+ // get current lockers
+ lock_info_t linfo;
+ int r = read_lock(hctx, op.name, &linfo);
+ if (r < 0) {
+ CLS_ERR("Could not read lock info: %s", cpp_strerror(r).c_str());
+ return r;
+ }
+
+ struct cls_lock_get_info_reply ret;
+
+ map<locker_id_t, locker_info_t>::iterator iter;
+ for (iter = linfo.lockers.begin(); iter != linfo.lockers.end(); ++iter) {
+ ret.lockers[iter->first] = iter->second;
+ }
+ ret.lock_type = linfo.lock_type;
+ ret.tag = linfo.tag;
+
+ encode(ret, *out, cls_get_client_features(hctx));
+
+ return 0;
+}
+
+
+/**
+ * Retrieve a list of locks for this object
+ *
+ * Input:
+ * @param in is ignored.
+ *
+ * Output:
+ * @param out contains encoded cls_list_locks_reply
+ *
+ * @return 0 on success, -errno on failure.
+ */
+static int list_locks(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ CLS_LOG(20, "list_locks");
+
+ map<string, bufferlist> attrs;
+
+ int r = cls_cxx_getxattrs(hctx, &attrs);
+ if (r < 0)
+ return r;
+
+ cls_lock_list_locks_reply ret;
+
+ map<string, bufferlist>::iterator iter;
+ size_t pos = sizeof(LOCK_PREFIX) - 1;
+ for (iter = attrs.begin(); iter != attrs.end(); ++iter) {
+ const string& attr = iter->first;
+ if (attr.substr(0, pos).compare(LOCK_PREFIX) == 0) {
+ ret.locks.push_back(attr.substr(pos));
+ }
+ }
+
+ encode(ret, *out);
+
+ return 0;
+}
+
+/**
+ * Assert that the object is currently locked
+ *
+ * Input:
+ * @param cls_lock_assert_op request input
+ *
+ * Output:
+ * @param none
+ *
+ * @return 0 on success, -errno on failure.
+ */
+int assert_locked(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ CLS_LOG(20, "assert_locked");
+
+ cls_lock_assert_op op;
+ try {
+ auto iter = in->cbegin();
+ decode(op, iter);
+ } catch (const buffer::error& err) {
+ return -EINVAL;
+ }
+
+ if (!cls_lock_is_valid(op.type)) {
+ return -EINVAL;
+ }
+
+ if (op.name.empty()) {
+ return -EINVAL;
+ }
+
+ // see if there's already a locker
+ lock_info_t linfo;
+ int r = read_lock(hctx, op.name, &linfo);
+ if (r < 0) {
+ CLS_ERR("Could not read lock info: %s", cpp_strerror(r).c_str());
+ return r;
+ }
+
+ if (linfo.lockers.empty()) {
+ CLS_LOG(20, "object not locked");
+ return -EBUSY;
+ }
+
+ if (linfo.lock_type != op.type) {
+ CLS_LOG(20, "lock type mismatch: current=%s, assert=%s",
+ cls_lock_type_str(linfo.lock_type), cls_lock_type_str(op.type));
+ return -EBUSY;
+ }
+
+ if (linfo.tag != op.tag) {
+ CLS_LOG(20, "lock tag mismatch: current=%s, assert=%s", linfo.tag.c_str(),
+ op.tag.c_str());
+ return -EBUSY;
+ }
+
+ entity_inst_t inst;
+ r = cls_get_request_origin(hctx, &inst);
+ ceph_assert(r == 0);
+
+ locker_id_t id;
+ id.cookie = op.cookie;
+ id.locker = inst.name;
+
+ map<locker_id_t, locker_info_t>::iterator iter = linfo.lockers.find(id);
+ if (iter == linfo.lockers.end()) {
+ CLS_LOG(20, "not locked by assert client");
+ return -EBUSY;
+ }
+ return 0;
+}
+
+/**
+ * Update the cookie associated with an object lock
+ *
+ * Input:
+ * @param cls_lock_set_cookie_op request input
+ *
+ * Output:
+ * @param none
+ *
+ * @return 0 on success, -errno on failure.
+ */
+int set_cookie(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ CLS_LOG(20, "set_cookie");
+
+ cls_lock_set_cookie_op op;
+ try {
+ auto iter = in->cbegin();
+ decode(op, iter);
+ } catch (const buffer::error& err) {
+ return -EINVAL;
+ }
+
+ if (!cls_lock_is_valid(op.type)) {
+ return -EINVAL;
+ }
+
+ if (op.name.empty()) {
+ return -EINVAL;
+ }
+
+ // see if there's already a locker
+ lock_info_t linfo;
+ int r = read_lock(hctx, op.name, &linfo);
+ if (r < 0) {
+ CLS_ERR("Could not read lock info: %s", cpp_strerror(r).c_str());
+ return r;
+ }
+
+ if (linfo.lockers.empty()) {
+ CLS_LOG(20, "object not locked");
+ return -EBUSY;
+ }
+
+ if (linfo.lock_type != op.type) {
+ CLS_LOG(20, "lock type mismatch: current=%s, assert=%s",
+ cls_lock_type_str(linfo.lock_type), cls_lock_type_str(op.type));
+ return -EBUSY;
+ }
+
+ if (linfo.tag != op.tag) {
+ CLS_LOG(20, "lock tag mismatch: current=%s, assert=%s", linfo.tag.c_str(),
+ op.tag.c_str());
+ return -EBUSY;
+ }
+
+ entity_inst_t inst;
+ r = cls_get_request_origin(hctx, &inst);
+ ceph_assert(r == 0);
+
+ locker_id_t id;
+ id.cookie = op.cookie;
+ id.locker = inst.name;
+
+ map<locker_id_t, locker_info_t>::iterator iter = linfo.lockers.find(id);
+ if (iter == linfo.lockers.end()) {
+ CLS_LOG(20, "not locked by client");
+ return -EBUSY;
+ }
+
+ id.cookie = op.new_cookie;
+ if (linfo.lockers.count(id) != 0) {
+ CLS_LOG(20, "lock cookie in-use");
+ return -EBUSY;
+ }
+
+ locker_info_t locker_info(iter->second);
+ linfo.lockers.erase(iter);
+
+ linfo.lockers[id] = locker_info;
+ r = write_lock(hctx, op.name, linfo);
+ if (r < 0) {
+ CLS_ERR("Could not update lock info: %s", cpp_strerror(r).c_str());
+ return r;
+ }
+ return 0;
+}
+
+CLS_INIT(lock)
+{
+ CLS_LOG(20, "Loaded lock class!");
+
+ cls_handle_t h_class;
+ cls_method_handle_t h_lock_op;
+ cls_method_handle_t h_unlock_op;
+ cls_method_handle_t h_break_lock;
+ cls_method_handle_t h_get_info;
+ cls_method_handle_t h_list_locks;
+ cls_method_handle_t h_assert_locked;
+ cls_method_handle_t h_set_cookie;
+
+ cls_register("lock", &h_class);
+ cls_register_cxx_method(h_class, "lock",
+ CLS_METHOD_RD | CLS_METHOD_WR | CLS_METHOD_PROMOTE,
+ lock_op, &h_lock_op);
+ cls_register_cxx_method(h_class, "unlock",
+ CLS_METHOD_RD | CLS_METHOD_WR | CLS_METHOD_PROMOTE,
+ unlock_op, &h_unlock_op);
+ cls_register_cxx_method(h_class, "break_lock",
+ CLS_METHOD_RD | CLS_METHOD_WR,
+ break_lock, &h_break_lock);
+ cls_register_cxx_method(h_class, "get_info",
+ CLS_METHOD_RD,
+ get_info, &h_get_info);
+ cls_register_cxx_method(h_class, "list_locks",
+ CLS_METHOD_RD,
+ list_locks, &h_list_locks);
+ cls_register_cxx_method(h_class, "assert_locked",
+ CLS_METHOD_RD | CLS_METHOD_PROMOTE,
+ assert_locked, &h_assert_locked);
+ cls_register_cxx_method(h_class, "set_cookie",
+ CLS_METHOD_RD | CLS_METHOD_WR | CLS_METHOD_PROMOTE,
+ set_cookie, &h_set_cookie);
+
+ return;
+}
diff --git a/src/cls/lock/cls_lock_client.cc b/src/cls/lock/cls_lock_client.cc
new file mode 100644
index 00000000..498d573f
--- /dev/null
+++ b/src/cls/lock/cls_lock_client.cc
@@ -0,0 +1,284 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+/*
+ * Ceph - scalable distributed file system
+ *
+ * Copyright (C) 2004-2006 Sage Weil <sage@newdream.net>
+ *
+ * This is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License version 2.1, as published by the Free Software
+ * Foundation. See file COPYING.
+ *
+ */
+
+#include "include/types.h"
+#include "msg/msg_types.h"
+#include "include/rados/librados.hpp"
+#include "include/utime.h"
+
+using namespace librados;
+
+#include "cls/lock/cls_lock_ops.h"
+#include "cls/lock/cls_lock_client.h"
+
+namespace rados {
+ namespace cls {
+ namespace lock {
+
+ void lock(ObjectWriteOperation *rados_op,
+ const string& name, ClsLockType type,
+ const string& cookie, const string& tag,
+ const string& description,
+ const utime_t& duration, uint8_t flags)
+ {
+ cls_lock_lock_op op;
+ op.name = name;
+ op.type = type;
+ op.cookie = cookie;
+ op.tag = tag;
+ op.description = description;
+ op.duration = duration;
+ op.flags = flags;
+ bufferlist in;
+ encode(op, in);
+ rados_op->exec("lock", "lock", in);
+ }
+
+ int lock(IoCtx *ioctx,
+ const string& oid,
+ const string& name, ClsLockType type,
+ const string& cookie, const string& tag,
+ const string& description, const utime_t& duration,
+ uint8_t flags)
+ {
+ ObjectWriteOperation op;
+ lock(&op, name, type, cookie, tag, description, duration, flags);
+ return ioctx->operate(oid, &op);
+ }
+
+ void unlock(ObjectWriteOperation *rados_op,
+ const string& name, const string& cookie)
+ {
+ cls_lock_unlock_op op;
+ op.name = name;
+ op.cookie = cookie;
+ bufferlist in;
+ encode(op, in);
+
+ rados_op->exec("lock", "unlock", in);
+ }
+
+ int unlock(IoCtx *ioctx, const string& oid,
+ const string& name, const string& cookie)
+ {
+ ObjectWriteOperation op;
+ unlock(&op, name, cookie);
+ return ioctx->operate(oid, &op);
+ }
+
+ int aio_unlock(IoCtx *ioctx, const string& oid,
+ const string& name, const string& cookie,
+ librados::AioCompletion *completion)
+ {
+ ObjectWriteOperation op;
+ unlock(&op, name, cookie);
+ return ioctx->aio_operate(oid, completion, &op);
+ }
+
+ void break_lock(ObjectWriteOperation *rados_op,
+ const string& name, const string& cookie,
+ const entity_name_t& locker)
+ {
+ cls_lock_break_op op;
+ op.name = name;
+ op.cookie = cookie;
+ op.locker = locker;
+ bufferlist in;
+ encode(op, in);
+ rados_op->exec("lock", "break_lock", in);
+ }
+
+ int break_lock(IoCtx *ioctx, const string& oid,
+ const string& name, const string& cookie,
+ const entity_name_t& locker)
+ {
+ ObjectWriteOperation op;
+ break_lock(&op, name, cookie, locker);
+ return ioctx->operate(oid, &op);
+ }
+
+ int list_locks(IoCtx *ioctx, const string& oid, list<string> *locks)
+ {
+ bufferlist in, out;
+ int r = ioctx->exec(oid, "lock", "list_locks", in, out);
+ if (r < 0)
+ return r;
+
+ cls_lock_list_locks_reply ret;
+ auto iter = cbegin(out);
+ try {
+ decode(ret, iter);
+ } catch (buffer::error& err) {
+ return -EBADMSG;
+ }
+
+ *locks = ret.locks;
+
+ return 0;
+ }
+
+ void get_lock_info_start(ObjectReadOperation *rados_op,
+ const string& name)
+ {
+ bufferlist in;
+ cls_lock_get_info_op op;
+ op.name = name;
+ encode(op, in);
+ rados_op->exec("lock", "get_info", in);
+ }
+
+ int get_lock_info_finish(bufferlist::const_iterator *iter,
+ map<locker_id_t, locker_info_t> *lockers,
+ ClsLockType *type, string *tag)
+ {
+ cls_lock_get_info_reply ret;
+ try {
+ decode(ret, *iter);
+ } catch (buffer::error& err) {
+ return -EBADMSG;
+ }
+
+ if (lockers) {
+ *lockers = ret.lockers;
+ }
+
+ if (type) {
+ *type = ret.lock_type;
+ }
+
+ if (tag) {
+ *tag = ret.tag;
+ }
+
+ return 0;
+ }
+
+ int get_lock_info(IoCtx *ioctx, const string& oid, const string& name,
+ map<locker_id_t, locker_info_t> *lockers,
+ ClsLockType *type, string *tag)
+ {
+ ObjectReadOperation op;
+ get_lock_info_start(&op, name);
+ bufferlist out;
+ int r = ioctx->operate(oid, &op, &out);
+ if (r < 0)
+ return r;
+ auto it = std::cbegin(out);
+ return get_lock_info_finish(&it, lockers, type, tag);
+ }
+
+ void assert_locked(librados::ObjectOperation *rados_op,
+ const std::string& name, ClsLockType type,
+ const std::string& cookie, const std::string& tag)
+ {
+ cls_lock_assert_op op;
+ op.name = name;
+ op.type = type;
+ op.cookie = cookie;
+ op.tag = tag;
+ bufferlist in;
+ encode(op, in);
+ rados_op->exec("lock", "assert_locked", in);
+ }
+
+ void set_cookie(librados::ObjectWriteOperation *rados_op,
+ const std::string& name, ClsLockType type,
+ const std::string& cookie, const std::string& tag,
+ const std::string& new_cookie)
+ {
+ cls_lock_set_cookie_op op;
+ op.name = name;
+ op.type = type;
+ op.cookie = cookie;
+ op.tag = tag;
+ op.new_cookie = new_cookie;
+ bufferlist in;
+ encode(op, in);
+ rados_op->exec("lock", "set_cookie", in);
+ }
+
+ void Lock::assert_locked_shared(ObjectOperation *op)
+ {
+ assert_locked(op, name, LOCK_SHARED, cookie, tag);
+ }
+
+ void Lock::assert_locked_exclusive(ObjectOperation *op)
+ {
+ assert_locked(op, name, LOCK_EXCLUSIVE, cookie, tag);
+ }
+
+ void Lock::assert_locked_exclusive_ephemeral(ObjectOperation *op)
+ {
+ assert_locked(op, name, LOCK_EXCLUSIVE_EPHEMERAL, cookie, tag);
+ }
+
+ void Lock::lock_shared(ObjectWriteOperation *op)
+ {
+ lock(op, name, LOCK_SHARED,
+ cookie, tag, description, duration, flags);
+ }
+
+ int Lock::lock_shared(IoCtx *ioctx, const string& oid)
+ {
+ return lock(ioctx, oid, name, LOCK_SHARED,
+ cookie, tag, description, duration, flags);
+ }
+
+ void Lock::lock_exclusive(ObjectWriteOperation *op)
+ {
+ lock(op, name, LOCK_EXCLUSIVE,
+ cookie, tag, description, duration, flags);
+ }
+
+ int Lock::lock_exclusive(IoCtx *ioctx, const string& oid)
+ {
+ return lock(ioctx, oid, name, LOCK_EXCLUSIVE,
+ cookie, tag, description, duration, flags);
+ }
+
+ void Lock::lock_exclusive_ephemeral(ObjectWriteOperation *op)
+ {
+ lock(op, name, LOCK_EXCLUSIVE_EPHEMERAL,
+ cookie, tag, description, duration, flags);
+ }
+
+ int Lock::lock_exclusive_ephemeral(IoCtx *ioctx, const string& oid)
+ {
+ return lock(ioctx, oid, name, LOCK_EXCLUSIVE_EPHEMERAL,
+ cookie, tag, description, duration, flags);
+ }
+
+ void Lock::unlock(ObjectWriteOperation *op)
+ {
+ rados::cls::lock::unlock(op, name, cookie);
+ }
+
+ int Lock::unlock(IoCtx *ioctx, const string& oid)
+ {
+ return rados::cls::lock::unlock(ioctx, oid, name, cookie);
+ }
+
+ void Lock::break_lock(ObjectWriteOperation *op, const entity_name_t& locker)
+ {
+ rados::cls::lock::break_lock(op, name, cookie, locker);
+ }
+
+ int Lock::break_lock(IoCtx *ioctx, const string& oid, const entity_name_t& locker)
+ {
+ return rados::cls::lock::break_lock(ioctx, oid, name, cookie, locker);
+ }
+ } // namespace lock
+ } // namespace cls
+} // namespace rados
+
diff --git a/src/cls/lock/cls_lock_client.h b/src/cls/lock/cls_lock_client.h
new file mode 100644
index 00000000..11c0e658
--- /dev/null
+++ b/src/cls/lock/cls_lock_client.h
@@ -0,0 +1,141 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#ifndef CEPH_CLS_LOCK_CLIENT_H
+#define CEPH_CLS_LOCK_CLIENT_H
+
+#include <chrono>
+
+#include "include/rados/librados_fwd.hpp"
+#include "cls/lock/cls_lock_types.h"
+
+namespace rados {
+ namespace cls {
+ namespace lock {
+ extern void lock(librados::ObjectWriteOperation *rados_op,
+ const std::string& name, ClsLockType type,
+ const std::string& cookie, const std::string& tag,
+ const std::string& description, const utime_t& duration,
+ uint8_t flags);
+
+ extern int lock(librados::IoCtx *ioctx,
+ const std::string& oid,
+ const std::string& name, ClsLockType type,
+ const std::string& cookie, const std::string& tag,
+ const std::string& description, const utime_t& duration,
+ uint8_t flags);
+
+ extern void unlock(librados::ObjectWriteOperation *rados_op,
+ const std::string& name, const std::string& cookie);
+
+ extern int unlock(librados::IoCtx *ioctx, const std::string& oid,
+ const std::string& name, const std::string& cookie);
+
+ extern int aio_unlock(librados::IoCtx *ioctx, const std::string& oid,
+ const std::string& name, const std::string& cookie,
+ librados::AioCompletion *completion);
+
+ extern void break_lock(librados::ObjectWriteOperation *op,
+ const std::string& name, const std::string& cookie,
+ const entity_name_t& locker);
+
+ extern int break_lock(librados::IoCtx *ioctx, const std::string& oid,
+ const std::string& name, const std::string& cookie,
+ const entity_name_t& locker);
+
+ extern int list_locks(librados::IoCtx *ioctx, const std::string& oid,
+ list<std::string> *locks);
+ extern void get_lock_info_start(librados::ObjectReadOperation *rados_op,
+ const std::string& name);
+ extern int get_lock_info_finish(ceph::bufferlist::const_iterator *out,
+ map<locker_id_t, locker_info_t> *lockers,
+ ClsLockType *type, std::string *tag);
+
+ extern int get_lock_info(librados::IoCtx *ioctx, const std::string& oid,
+ const std::string& name,
+ map<locker_id_t, locker_info_t> *lockers,
+ ClsLockType *type, std::string *tag);
+
+ extern void assert_locked(librados::ObjectOperation *rados_op,
+ const std::string& name, ClsLockType type,
+ const std::string& cookie,
+ const std::string& tag);
+
+ extern void set_cookie(librados::ObjectWriteOperation *rados_op,
+ const std::string& name, ClsLockType type,
+ const std::string& cookie, const std::string& tag,
+ const std::string& new_cookie);
+
+ class Lock {
+ std::string name;
+ std::string cookie;
+ std::string tag;
+ std::string description;
+ utime_t duration;
+ uint8_t flags;
+
+ public:
+
+ Lock(const std::string& _n) : name(_n), flags(0) {}
+
+ void set_cookie(const std::string& c) { cookie = c; }
+ void set_tag(const std::string& t) { tag = t; }
+ void set_description(const std::string& desc) { description = desc; }
+ void set_duration(const utime_t& e) { duration = e; }
+ void set_duration(const ceph::timespan& d) {
+ duration = utime_t(ceph::real_clock::zero() + d);
+ }
+
+ void set_may_renew(bool renew) {
+ if (renew) {
+ flags |= LOCK_FLAG_MAY_RENEW;
+ flags &= ~LOCK_FLAG_MUST_RENEW; // if may then not must
+ } else {
+ flags &= ~LOCK_FLAG_MAY_RENEW;
+ }
+ }
+
+ void set_must_renew(bool renew) {
+ if (renew) {
+ flags |= LOCK_FLAG_MUST_RENEW;
+ flags &= ~LOCK_FLAG_MAY_RENEW; // if must then not may
+ } else {
+ flags &= ~LOCK_FLAG_MUST_RENEW;
+ }
+ }
+
+ void assert_locked_shared(librados::ObjectOperation *rados_op);
+ void assert_locked_exclusive(librados::ObjectOperation *rados_op);
+ void assert_locked_exclusive_ephemeral(librados::ObjectOperation *rados_op);
+
+ /* ObjectWriteOperation */
+ void lock_shared(librados::ObjectWriteOperation *ioctx);
+ void lock_exclusive(librados::ObjectWriteOperation *ioctx);
+
+ // Be careful when using an exclusive ephemeral lock; it is
+ // intended strictly for cases when a lock object exists
+ // solely for a lock in a given process and the object is no
+ // longer needed when the lock is unlocked or expired, as the
+ // cls back-end will make an effort to delete it.
+ void lock_exclusive_ephemeral(librados::ObjectWriteOperation *ioctx);
+ void unlock(librados::ObjectWriteOperation *ioctx);
+ void break_lock(librados::ObjectWriteOperation *ioctx,
+ const entity_name_t& locker);
+
+ /* IoCtx */
+ int lock_shared(librados::IoCtx *ioctx, const std::string& oid);
+ int lock_exclusive(librados::IoCtx *ioctx, const std::string& oid);
+
+ // NB: see above comment on exclusive ephemeral locks
+ int lock_exclusive_ephemeral(librados::IoCtx *ioctx,
+ const std::string& oid);
+ int unlock(librados::IoCtx *ioctx, const std::string& oid);
+ int break_lock(librados::IoCtx *ioctx, const std::string& oid,
+ const entity_name_t& locker);
+ };
+
+ } // namespace lock
+ } // namespace cls
+} // namespace rados
+
+#endif
diff --git a/src/cls/lock/cls_lock_ops.cc b/src/cls/lock/cls_lock_ops.cc
new file mode 100644
index 00000000..aa5f8245
--- /dev/null
+++ b/src/cls/lock/cls_lock_ops.cc
@@ -0,0 +1,210 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+/*
+ * Ceph - scalable distributed file system
+ *
+ * Copyright (C) 2004-2006 Sage Weil <sage@newdream.net>
+ *
+ * This is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License version 2.1, as published by the Free Software
+ * Foundation. See file COPYING.
+ *
+ */
+
+#include "msg/msg_types.h"
+#include "common/Formatter.h"
+
+#include "cls/lock/cls_lock_ops.h"
+
+using namespace rados::cls::lock;
+
+static void generate_lock_id(locker_id_t& i, int n, const string& cookie)
+{
+ i.locker = entity_name_t::CLIENT(n);
+ i.cookie = cookie;
+}
+
+void cls_lock_lock_op::dump(Formatter *f) const
+{
+ f->dump_string("name", name);
+ f->dump_string("type", cls_lock_type_str(type));
+ f->dump_string("cookie", cookie);
+ f->dump_string("tag", tag);
+ f->dump_string("description", description);
+ f->dump_stream("duration") << duration;
+ f->dump_int("flags", (int)flags);
+}
+
+void cls_lock_lock_op::generate_test_instances(list<cls_lock_lock_op*>& o)
+{
+ cls_lock_lock_op *i = new cls_lock_lock_op;
+ i->name = "name";
+ i->type = LOCK_SHARED;
+ i->cookie = "cookie";
+ i->tag = "tag";
+ i->description = "description";
+ i->duration = utime_t(5, 0);
+ i->flags = LOCK_FLAG_MAY_RENEW;
+ o.push_back(i);
+ o.push_back(new cls_lock_lock_op);
+}
+
+void cls_lock_unlock_op::dump(Formatter *f) const
+{
+ f->dump_string("name", name);
+ f->dump_string("cookie", cookie);
+}
+
+void cls_lock_unlock_op::generate_test_instances(list<cls_lock_unlock_op*>& o)
+{
+ cls_lock_unlock_op *i = new cls_lock_unlock_op;
+ i->name = "name";
+ i->cookie = "cookie";
+ o.push_back(i);
+ o.push_back(new cls_lock_unlock_op);
+}
+
+void cls_lock_break_op::dump(Formatter *f) const
+{
+ f->dump_string("name", name);
+ f->dump_string("cookie", cookie);
+ f->dump_stream("locker") << locker;
+}
+
+void cls_lock_break_op::generate_test_instances(list<cls_lock_break_op*>& o)
+{
+ cls_lock_break_op *i = new cls_lock_break_op;
+ i->name = "name";
+ i->cookie = "cookie";
+ i->locker = entity_name_t::CLIENT(1);
+ o.push_back(i);
+ o.push_back(new cls_lock_break_op);
+}
+
+void cls_lock_get_info_op::dump(Formatter *f) const
+{
+ f->dump_string("name", name);
+}
+
+void cls_lock_get_info_op::generate_test_instances(list<cls_lock_get_info_op*>& o)
+{
+ cls_lock_get_info_op *i = new cls_lock_get_info_op;
+ i->name = "name";
+ o.push_back(i);
+ o.push_back(new cls_lock_get_info_op);
+}
+
+static void generate_test_addr(entity_addr_t& a, int nonce, int port)
+{
+ a.set_type(entity_addr_t::TYPE_LEGACY);
+ a.set_nonce(nonce);
+ a.set_family(AF_INET);
+ a.set_in4_quad(0, 127);
+ a.set_in4_quad(1, 0);
+ a.set_in4_quad(2, 1);
+ a.set_in4_quad(3, 2);
+ a.set_port(port);
+}
+
+void cls_lock_get_info_reply::dump(Formatter *f) const
+{
+ f->dump_string("lock_type", cls_lock_type_str(lock_type));
+ f->dump_string("tag", tag);
+ f->open_array_section("lockers");
+ map<locker_id_t, locker_info_t>::const_iterator iter;
+ for (iter = lockers.begin(); iter != lockers.end(); ++iter) {
+ const locker_id_t& id = iter->first;
+ const locker_info_t& info = iter->second;
+ f->open_object_section("object");
+ f->dump_stream("locker") << id.locker;
+ f->dump_string("description", info.description);
+ f->dump_string("cookie", id.cookie);
+ f->dump_stream("expiration") << info.expiration;
+ f->dump_string("addr", info.addr.get_legacy_str());
+ f->close_section();
+ }
+ f->close_section();
+}
+
+void cls_lock_get_info_reply::generate_test_instances(list<cls_lock_get_info_reply*>& o)
+{
+ cls_lock_get_info_reply *i = new cls_lock_get_info_reply;
+ i->lock_type = LOCK_SHARED;
+ i->tag = "tag";
+ locker_id_t id1, id2;
+ entity_addr_t addr1, addr2;
+ generate_lock_id(id1, 1, "cookie1");
+ generate_test_addr(addr1, 10, 20);
+ i->lockers[id1] = locker_info_t(utime_t(10, 0), addr1, "description1");
+ generate_lock_id(id2, 2, "cookie2");
+ generate_test_addr(addr2, 30, 40);
+ i->lockers[id2] = locker_info_t(utime_t(20, 0), addr2, "description2");
+
+ o.push_back(i);
+ o.push_back(new cls_lock_get_info_reply);
+}
+
+void cls_lock_list_locks_reply::dump(Formatter *f) const
+{
+ list<string>::const_iterator iter;
+ f->open_array_section("locks");
+ for (iter = locks.begin(); iter != locks.end(); ++iter) {
+ f->open_array_section("object");
+ f->dump_string("lock", *iter);
+ f->close_section();
+ }
+ f->close_section();
+}
+
+void cls_lock_list_locks_reply::generate_test_instances(list<cls_lock_list_locks_reply*>& o)
+{
+ cls_lock_list_locks_reply *i = new cls_lock_list_locks_reply;
+ i->locks.push_back("lock1");
+ i->locks.push_back("lock2");
+ i->locks.push_back("lock3");
+
+ o.push_back(i);
+ o.push_back(new cls_lock_list_locks_reply);
+}
+
+void cls_lock_assert_op::dump(Formatter *f) const
+{
+ f->dump_string("name", name);
+ f->dump_string("type", cls_lock_type_str(type));
+ f->dump_string("cookie", cookie);
+ f->dump_string("tag", tag);
+}
+
+void cls_lock_assert_op::generate_test_instances(list<cls_lock_assert_op*>& o)
+{
+ cls_lock_assert_op *i = new cls_lock_assert_op;
+ i->name = "name";
+ i->type = LOCK_SHARED;
+ i->cookie = "cookie";
+ i->tag = "tag";
+ o.push_back(i);
+ o.push_back(new cls_lock_assert_op);
+}
+
+void cls_lock_set_cookie_op::dump(Formatter *f) const
+{
+ f->dump_string("name", name);
+ f->dump_string("type", cls_lock_type_str(type));
+ f->dump_string("cookie", cookie);
+ f->dump_string("tag", tag);
+ f->dump_string("new_cookie", new_cookie);
+}
+
+void cls_lock_set_cookie_op::generate_test_instances(list<cls_lock_set_cookie_op*>& o)
+{
+ cls_lock_set_cookie_op *i = new cls_lock_set_cookie_op;
+ i->name = "name";
+ i->type = LOCK_SHARED;
+ i->cookie = "cookie";
+ i->tag = "tag";
+ i->new_cookie = "new cookie";
+ o.push_back(i);
+ o.push_back(new cls_lock_set_cookie_op);
+}
+
diff --git a/src/cls/lock/cls_lock_ops.h b/src/cls/lock/cls_lock_ops.h
new file mode 100644
index 00000000..5d22452b
--- /dev/null
+++ b/src/cls/lock/cls_lock_ops.h
@@ -0,0 +1,245 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#ifndef CEPH_CLS_LOCK_OPS_H
+#define CEPH_CLS_LOCK_OPS_H
+
+#include "include/types.h"
+#include "include/utime.h"
+#include "cls/lock/cls_lock_types.h"
+
+struct cls_lock_lock_op
+{
+ string name;
+ ClsLockType type;
+ string cookie;
+ string tag;
+ string description;
+ utime_t duration;
+ uint8_t flags;
+
+ cls_lock_lock_op() : type(LOCK_NONE), flags(0) {}
+
+ void encode(bufferlist &bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(name, bl);
+ uint8_t t = (uint8_t)type;
+ encode(t, bl);
+ encode(cookie, bl);
+ encode(tag, bl);
+ encode(description, bl);
+ encode(duration, bl);
+ encode(flags, bl);
+ ENCODE_FINISH(bl);
+ }
+ void decode(bufferlist::const_iterator &bl) {
+ DECODE_START_LEGACY_COMPAT_LEN(1, 1, 1, bl);
+ decode(name, bl);
+ uint8_t t;
+ decode(t, bl);
+ type = (ClsLockType)t;
+ decode(cookie, bl);
+ decode(tag, bl);
+ decode(description, bl);
+ decode(duration, bl);
+ decode(flags, bl);
+ DECODE_FINISH(bl);
+ }
+ void dump(Formatter *f) const;
+ static void generate_test_instances(list<cls_lock_lock_op*>& o);
+};
+WRITE_CLASS_ENCODER(cls_lock_lock_op)
+
+struct cls_lock_unlock_op
+{
+ string name;
+ string cookie;
+
+ cls_lock_unlock_op() {}
+
+ void encode(bufferlist &bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(name, bl);
+ encode(cookie, bl);
+ ENCODE_FINISH(bl);
+ }
+ void decode(bufferlist::const_iterator &bl) {
+ DECODE_START_LEGACY_COMPAT_LEN(1, 1, 1, bl);
+ decode(name, bl);
+ decode(cookie, bl);
+ DECODE_FINISH(bl);
+ }
+ void dump(Formatter *f) const;
+ static void generate_test_instances(list<cls_lock_unlock_op*>& o);
+};
+WRITE_CLASS_ENCODER(cls_lock_unlock_op)
+
+struct cls_lock_break_op
+{
+ string name;
+ entity_name_t locker;
+ string cookie;
+
+ cls_lock_break_op() {}
+
+ void encode(bufferlist &bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(name, bl);
+ encode(locker, bl);
+ encode(cookie, bl);
+ ENCODE_FINISH(bl);
+ }
+ void decode(bufferlist::const_iterator &bl) {
+ DECODE_START_LEGACY_COMPAT_LEN(1, 1, 1, bl);
+ decode(name, bl);
+ decode(locker, bl);
+ decode(cookie, bl);
+ DECODE_FINISH(bl);
+ }
+ void dump(Formatter *f) const;
+ static void generate_test_instances(list<cls_lock_break_op*>& o);
+};
+WRITE_CLASS_ENCODER(cls_lock_break_op)
+
+struct cls_lock_get_info_op
+{
+ string name;
+
+ cls_lock_get_info_op() {}
+
+ void encode(bufferlist &bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(name, bl);
+ ENCODE_FINISH(bl);
+ }
+ void decode(bufferlist::const_iterator &bl) {
+ DECODE_START_LEGACY_COMPAT_LEN(1, 1, 1, bl);
+ decode(name, bl);
+ DECODE_FINISH(bl);
+ }
+ void dump(Formatter *f) const;
+ static void generate_test_instances(list<cls_lock_get_info_op*>& o);
+};
+WRITE_CLASS_ENCODER(cls_lock_get_info_op)
+
+struct cls_lock_get_info_reply
+{
+ map<rados::cls::lock::locker_id_t, rados::cls::lock::locker_info_t> lockers;
+ ClsLockType lock_type;
+ string tag;
+
+ cls_lock_get_info_reply() : lock_type(LOCK_NONE) {}
+
+ void encode(bufferlist &bl, uint64_t features) const {
+ ENCODE_START(1, 1, bl);
+ encode(lockers, bl, features);
+ uint8_t t = (uint8_t)lock_type;
+ encode(t, bl);
+ encode(tag, bl);
+ ENCODE_FINISH(bl);
+ }
+ void decode(bufferlist::const_iterator &bl) {
+ DECODE_START_LEGACY_COMPAT_LEN(1, 1, 1, bl);
+ decode(lockers, bl);
+ uint8_t t;
+ decode(t, bl);
+ lock_type = (ClsLockType)t;
+ decode(tag, bl);
+ DECODE_FINISH(bl);
+ }
+ void dump(Formatter *f) const;
+ static void generate_test_instances(list<cls_lock_get_info_reply*>& o);
+};
+WRITE_CLASS_ENCODER_FEATURES(cls_lock_get_info_reply)
+
+struct cls_lock_list_locks_reply
+{
+ list<string> locks;
+
+ cls_lock_list_locks_reply() {}
+
+ void encode(bufferlist &bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(locks, bl);
+ ENCODE_FINISH(bl);
+ }
+ void decode(bufferlist::const_iterator &bl) {
+ DECODE_START_LEGACY_COMPAT_LEN(1, 1, 1, bl);
+ decode(locks, bl);
+ DECODE_FINISH(bl);
+ }
+ void dump(Formatter *f) const;
+ static void generate_test_instances(list<cls_lock_list_locks_reply*>& o);
+};
+WRITE_CLASS_ENCODER(cls_lock_list_locks_reply)
+
+struct cls_lock_assert_op
+{
+ string name;
+ ClsLockType type;
+ string cookie;
+ string tag;
+
+ cls_lock_assert_op() : type(LOCK_NONE) {}
+
+ void encode(bufferlist &bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(name, bl);
+ uint8_t t = (uint8_t)type;
+ encode(t, bl);
+ encode(cookie, bl);
+ encode(tag, bl);
+ ENCODE_FINISH(bl);
+ }
+ void decode(bufferlist::const_iterator &bl) {
+ DECODE_START_LEGACY_COMPAT_LEN(1, 1, 1, bl);
+ decode(name, bl);
+ uint8_t t;
+ decode(t, bl);
+ type = (ClsLockType)t;
+ decode(cookie, bl);
+ decode(tag, bl);
+ DECODE_FINISH(bl);
+ }
+ void dump(Formatter *f) const;
+ static void generate_test_instances(list<cls_lock_assert_op*>& o);
+};
+WRITE_CLASS_ENCODER(cls_lock_assert_op)
+
+struct cls_lock_set_cookie_op
+{
+ string name;
+ ClsLockType type;
+ string cookie;
+ string tag;
+ string new_cookie;
+
+ cls_lock_set_cookie_op() : type(LOCK_NONE) {}
+
+ void encode(bufferlist &bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(name, bl);
+ uint8_t t = (uint8_t)type;
+ encode(t, bl);
+ encode(cookie, bl);
+ encode(tag, bl);
+ encode(new_cookie, bl);
+ ENCODE_FINISH(bl);
+ }
+ void decode(bufferlist::const_iterator &bl) {
+ DECODE_START_LEGACY_COMPAT_LEN(1, 1, 1, bl);
+ decode(name, bl);
+ uint8_t t;
+ decode(t, bl);
+ type = (ClsLockType)t;
+ decode(cookie, bl);
+ decode(tag, bl);
+ decode(new_cookie, bl);
+ DECODE_FINISH(bl);
+ }
+ void dump(Formatter *f) const;
+ static void generate_test_instances(list<cls_lock_set_cookie_op*>& o);
+};
+WRITE_CLASS_ENCODER(cls_lock_set_cookie_op)
+
+#endif
diff --git a/src/cls/lock/cls_lock_types.cc b/src/cls/lock/cls_lock_types.cc
new file mode 100644
index 00000000..d1aa6334
--- /dev/null
+++ b/src/cls/lock/cls_lock_types.cc
@@ -0,0 +1,98 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+/*
+ * Ceph - scalable distributed file system
+ *
+ * Copyright (C) 2004-2006 Sage Weil <sage@newdream.net>
+ *
+ * This is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License version 2.1, as published by the Free Software
+ * Foundation. See file COPYING.
+ *
+ */
+
+#include "common/Formatter.h"
+
+#include "cls/lock/cls_lock_types.h"
+
+using namespace rados::cls::lock;
+
+static void generate_lock_id(locker_id_t& i, int n, const string& cookie)
+{
+ i.locker = entity_name_t::CLIENT(n);
+ i.cookie = cookie;
+}
+
+void locker_id_t::dump(Formatter *f) const
+{
+ f->dump_stream("locker") << locker;
+ f->dump_string("cookie", cookie);
+}
+
+void locker_id_t::generate_test_instances(list<locker_id_t*>& o)
+{
+ locker_id_t *i = new locker_id_t;
+ generate_lock_id(*i, 1, "cookie");
+ o.push_back(i);
+ o.push_back(new locker_id_t);
+}
+
+void locker_info_t::dump(Formatter *f) const
+{
+ f->dump_stream("expiration") << expiration;
+ f->dump_string("addr", addr.get_legacy_str());
+ f->dump_string("description", description);
+}
+
+static void generate_test_addr(entity_addr_t& a, int nonce, int port)
+{
+ a.set_type(entity_addr_t::TYPE_LEGACY);
+ a.set_nonce(nonce);
+ a.set_family(AF_INET);
+ a.set_in4_quad(0, 127);
+ a.set_in4_quad(1, 0);
+ a.set_in4_quad(2, 1);
+ a.set_in4_quad(3, 2);
+ a.set_port(port);
+}
+
+void locker_info_t::generate_test_instances(list<locker_info_t*>& o)
+{
+ locker_info_t *i = new locker_info_t;
+ i->expiration = utime_t(5, 0);
+ generate_test_addr(i->addr, 1, 2);
+ i->description = "description";
+ o.push_back(i);
+ o.push_back(new locker_info_t);
+}
+
+void lock_info_t::dump(Formatter *f) const
+{
+ f->dump_int("lock_type", lock_type);
+ f->dump_string("tag", tag);
+ f->open_array_section("lockers");
+ for (auto &i : lockers) {
+ f->open_object_section("locker");
+ f->dump_object("id", i.first);
+ f->dump_object("info", i.second);
+ f->close_section();
+ }
+ f->close_section();
+}
+
+void lock_info_t::generate_test_instances(list<lock_info_t *>& o)
+{
+ lock_info_t *i = new lock_info_t;
+ locker_id_t id;
+ locker_info_t info;
+ generate_lock_id(id, 1, "cookie");
+ info.expiration = utime_t(5, 0);
+ generate_test_addr(info.addr, 1, 2);
+ info.description = "description";
+ i->lockers[id] = info;
+ i->lock_type = LOCK_EXCLUSIVE;
+ i->tag = "tag";
+ o.push_back(i);
+ o.push_back(new lock_info_t);
+}
diff --git a/src/cls/lock/cls_lock_types.h b/src/cls/lock/cls_lock_types.h
new file mode 100644
index 00000000..fbd5b571
--- /dev/null
+++ b/src/cls/lock/cls_lock_types.h
@@ -0,0 +1,172 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#ifndef CEPH_CLS_LOCK_TYPES_H
+#define CEPH_CLS_LOCK_TYPES_H
+
+#include "include/encoding.h"
+#include "include/types.h"
+#include "include/utime.h"
+#include "msg/msg_types.h"
+
+/* lock flags */
+#define LOCK_FLAG_MAY_RENEW 0x1 /* idempotent lock acquire */
+#define LOCK_FLAG_MUST_RENEW 0x2 /* lock must already be acquired */
+
+enum ClsLockType {
+ LOCK_NONE = 0,
+ LOCK_EXCLUSIVE = 1,
+ LOCK_SHARED = 2,
+ LOCK_EXCLUSIVE_EPHEMERAL = 3, /* lock object is removed @ unlock */
+};
+
+inline const char *cls_lock_type_str(ClsLockType type)
+{
+ switch (type) {
+ case LOCK_NONE:
+ return "none";
+ case LOCK_EXCLUSIVE:
+ return "exclusive";
+ case LOCK_SHARED:
+ return "shared";
+ case LOCK_EXCLUSIVE_EPHEMERAL:
+ return "exclusive-ephemeral";
+ default:
+ return "<unknown>";
+ }
+}
+
+inline bool cls_lock_is_exclusive(ClsLockType type) {
+ return LOCK_EXCLUSIVE == type || LOCK_EXCLUSIVE_EPHEMERAL == type;
+}
+
+inline bool cls_lock_is_ephemeral(ClsLockType type) {
+ return LOCK_EXCLUSIVE_EPHEMERAL == type;
+}
+
+inline bool cls_lock_is_valid(ClsLockType type) {
+ return LOCK_SHARED == type ||
+ LOCK_EXCLUSIVE == type ||
+ LOCK_EXCLUSIVE_EPHEMERAL == type;
+}
+
+namespace rados {
+ namespace cls {
+ namespace lock {
+
+ /*
+ * locker_id_t: the locker id, needs to be unique in a single lock
+ */
+ struct locker_id_t {
+ entity_name_t locker; // locker's client name
+ string cookie; // locker's cookie.
+
+ locker_id_t() {}
+ locker_id_t(entity_name_t& _n, const string& _c) : locker(_n), cookie(_c) {}
+
+ void encode(bufferlist &bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(locker, bl);
+ encode(cookie, bl);
+ ENCODE_FINISH(bl);
+ }
+ void decode(bufferlist::const_iterator &bl) {
+ DECODE_START_LEGACY_COMPAT_LEN(1, 1, 1, bl);
+ decode(locker, bl);
+ decode(cookie, bl);
+ DECODE_FINISH(bl);
+ }
+
+ bool operator<(const locker_id_t& rhs) const {
+ if (locker == rhs.locker)
+ return cookie.compare(rhs.cookie) < 0;
+ if (locker < rhs.locker)
+ return true;
+ return false;
+ }
+ void dump(Formatter *f) const;
+ friend std::ostream& operator<<(std::ostream& out,
+ const locker_id_t& data) {
+ out << data.locker;
+ return out;
+ }
+ static void generate_test_instances(list<locker_id_t*>& o);
+ };
+ WRITE_CLASS_ENCODER(locker_id_t)
+
+ struct locker_info_t
+ {
+ utime_t expiration; // expiration: non-zero means epoch of locker expiration
+ entity_addr_t addr; // addr: locker address
+ string description; // description: locker description, may be empty
+
+ locker_info_t() {}
+ locker_info_t(const utime_t& _e, const entity_addr_t& _a,
+ const string& _d) : expiration(_e), addr(_a), description(_d) {}
+
+ void encode(bufferlist &bl, uint64_t features) const {
+ ENCODE_START(1, 1, bl);
+ encode(expiration, bl);
+ encode(addr, bl, features);
+ encode(description, bl);
+ ENCODE_FINISH(bl);
+ }
+ void decode(bufferlist::const_iterator &bl) {
+ DECODE_START_LEGACY_COMPAT_LEN(1, 1, 1, bl);
+ decode(expiration, bl);
+ decode(addr, bl);
+ decode(description, bl);
+ DECODE_FINISH(bl);
+ }
+ void dump(Formatter *f) const;
+ friend std::ostream& operator<<(std::ostream& out,
+ const locker_info_t& data) {
+ out << "{addr:" << data.addr << ", exp:";
+
+ const auto& exp = data.expiration;
+ if (exp.is_zero()) {
+ out << "never}";
+ } else {
+ out << exp.to_real_time() << "}";
+ }
+
+ return out;
+ }
+ static void generate_test_instances(list<locker_info_t *>& o);
+ };
+ WRITE_CLASS_ENCODER_FEATURES(locker_info_t)
+
+ struct lock_info_t {
+ map<locker_id_t, locker_info_t> lockers; // map of lockers
+ ClsLockType lock_type; // lock type (exclusive / shared)
+ string tag; // tag: operations on lock can only succeed with this tag
+ // as long as set of non expired lockers
+ // is bigger than 0.
+
+ void encode(bufferlist &bl, uint64_t features) const {
+ ENCODE_START(1, 1, bl);
+ encode(lockers, bl, features);
+ uint8_t t = (uint8_t)lock_type;
+ encode(t, bl);
+ encode(tag, bl);
+ ENCODE_FINISH(bl);
+ }
+ void decode(bufferlist::const_iterator &bl) {
+ DECODE_START_LEGACY_COMPAT_LEN(1, 1, 1, bl);
+ decode(lockers, bl);
+ uint8_t t;
+ decode(t, bl);
+ lock_type = (ClsLockType)t;
+ decode(tag, bl);
+ DECODE_FINISH(bl);
+ }
+ lock_info_t() : lock_type(LOCK_NONE) {}
+ void dump(Formatter *f) const;
+ static void generate_test_instances(list<lock_info_t *>& o);
+ };
+ WRITE_CLASS_ENCODER_FEATURES(lock_info_t);
+ }
+ }
+}
+
+#endif
diff --git a/src/cls/log/cls_log.cc b/src/cls/log/cls_log.cc
new file mode 100644
index 00000000..3de35484
--- /dev/null
+++ b/src/cls/log/cls_log.cc
@@ -0,0 +1,317 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#include "include/types.h"
+#include "include/utime.h"
+#include "objclass/objclass.h"
+
+#include "cls_log_types.h"
+#include "cls_log_ops.h"
+
+#include "global/global_context.h"
+#include "include/compat.h"
+
+CLS_VER(1,0)
+CLS_NAME(log)
+
+static string log_index_prefix = "1_";
+
+
+static int write_log_entry(cls_method_context_t hctx, string& index, cls_log_entry& entry)
+{
+ bufferlist bl;
+ encode(entry, bl);
+
+ int ret = cls_cxx_map_set_val(hctx, index, &bl);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static void get_index_time_prefix(utime_t& ts, string& index)
+{
+ char buf[32];
+ snprintf(buf, sizeof(buf), "%010ld.%06ld_", (long)ts.sec(), (long)ts.usec());
+
+ index = log_index_prefix + buf;
+}
+
+static int read_header(cls_method_context_t hctx, cls_log_header& header)
+{
+ bufferlist header_bl;
+
+ int ret = cls_cxx_map_read_header(hctx, &header_bl);
+ if (ret < 0)
+ return ret;
+
+ if (header_bl.length() == 0) {
+ header = cls_log_header();
+ return 0;
+ }
+
+ auto iter = header_bl.cbegin();
+ try {
+ decode(header, iter);
+ } catch (buffer::error& err) {
+ CLS_LOG(0, "ERROR: read_header(): failed to decode header");
+ }
+
+ return 0;
+}
+
+static int write_header(cls_method_context_t hctx, cls_log_header& header)
+{
+ bufferlist header_bl;
+ encode(header, header_bl);
+
+ int ret = cls_cxx_map_write_header(hctx, &header_bl);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static void get_index(cls_method_context_t hctx, utime_t& ts, string& index)
+{
+ get_index_time_prefix(ts, index);
+
+ string unique_id;
+
+ cls_cxx_subop_version(hctx, &unique_id);
+
+ index.append(unique_id);
+}
+
+static int cls_log_add(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ auto in_iter = in->cbegin();
+
+ cls_log_add_op op;
+ try {
+ decode(op, in_iter);
+ } catch (buffer::error& err) {
+ CLS_LOG(1, "ERROR: cls_log_add_op(): failed to decode op");
+ return -EINVAL;
+ }
+
+ cls_log_header header;
+
+ int ret = read_header(hctx, header);
+ if (ret < 0)
+ return ret;
+
+ for (list<cls_log_entry>::iterator iter = op.entries.begin();
+ iter != op.entries.end(); ++iter) {
+ cls_log_entry& entry = *iter;
+
+ string index;
+
+ utime_t timestamp = entry.timestamp;
+ if (op.monotonic_inc && timestamp < header.max_time)
+ timestamp = header.max_time;
+ else if (timestamp > header.max_time)
+ header.max_time = timestamp;
+
+ if (entry.id.empty()) {
+ get_index(hctx, timestamp, index);
+ entry.id = index;
+ } else {
+ index = entry.id;
+ }
+
+ CLS_LOG(20, "storing entry at %s", index.c_str());
+
+
+ if (index > header.max_marker)
+ header.max_marker = index;
+
+ ret = write_log_entry(hctx, index, entry);
+ if (ret < 0)
+ return ret;
+ }
+
+ ret = write_header(hctx, header);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int cls_log_list(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ auto in_iter = in->cbegin();
+
+ cls_log_list_op op;
+ try {
+ decode(op, in_iter);
+ } catch (buffer::error& err) {
+ CLS_LOG(1, "ERROR: cls_log_list_op(): failed to decode op");
+ return -EINVAL;
+ }
+
+ map<string, bufferlist> keys;
+
+ string from_index;
+ string to_index;
+
+ if (op.marker.empty()) {
+ get_index_time_prefix(op.from_time, from_index);
+ } else {
+ from_index = op.marker;
+ }
+ bool use_time_boundary = (!op.from_time.is_zero() && (op.to_time >= op.from_time));
+
+ if (use_time_boundary)
+ get_index_time_prefix(op.to_time, to_index);
+
+#define MAX_ENTRIES 1000
+ size_t max_entries = op.max_entries;
+ if (!max_entries || max_entries > MAX_ENTRIES)
+ max_entries = MAX_ENTRIES;
+
+ cls_log_list_ret ret;
+
+ int rc = cls_cxx_map_get_vals(hctx, from_index, log_index_prefix, max_entries, &keys, &ret.truncated);
+ if (rc < 0)
+ return rc;
+
+ list<cls_log_entry>& entries = ret.entries;
+ map<string, bufferlist>::iterator iter = keys.begin();
+
+ string marker;
+
+ for (; iter != keys.end(); ++iter) {
+ const string& index = iter->first;
+ marker = index;
+ if (use_time_boundary && index.compare(0, to_index.size(), to_index) >= 0) {
+ ret.truncated = false;
+ break;
+ }
+
+ bufferlist& bl = iter->second;
+ auto biter = bl.cbegin();
+ try {
+ cls_log_entry e;
+ decode(e, biter);
+ entries.push_back(e);
+ } catch (buffer::error& err) {
+ CLS_LOG(0, "ERROR: cls_log_list: could not decode entry, index=%s", index.c_str());
+ }
+ }
+
+ ret.marker = marker;
+
+ encode(ret, *out);
+
+ return 0;
+}
+
+
+static int cls_log_trim(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ auto in_iter = in->cbegin();
+
+ cls_log_trim_op op;
+ try {
+ decode(op, in_iter);
+ } catch (buffer::error& err) {
+ CLS_LOG(0, "ERROR: cls_log_list_op(): failed to decode entry");
+ return -EINVAL;
+ }
+
+ map<string, bufferlist> keys;
+
+ string from_index;
+ string to_index;
+
+ if (op.from_marker.empty()) {
+ get_index_time_prefix(op.from_time, from_index);
+ } else {
+ from_index = op.from_marker;
+ }
+ if (op.to_marker.empty()) {
+ get_index_time_prefix(op.to_time, to_index);
+ } else {
+ to_index = op.to_marker;
+ }
+
+#define MAX_TRIM_ENTRIES 1000
+ size_t max_entries = MAX_TRIM_ENTRIES;
+ bool more;
+
+ int rc = cls_cxx_map_get_vals(hctx, from_index, log_index_prefix, max_entries, &keys, &more);
+ if (rc < 0)
+ return rc;
+
+ map<string, bufferlist>::iterator iter = keys.begin();
+
+ bool removed = false;
+ for (; iter != keys.end(); ++iter) {
+ const string& index = iter->first;
+
+ CLS_LOG(20, "index=%s to_index=%s", index.c_str(), to_index.c_str());
+
+ if (index.compare(0, to_index.size(), to_index) > 0)
+ break;
+
+ CLS_LOG(20, "removing key: index=%s", index.c_str());
+
+ int rc = cls_cxx_map_remove_key(hctx, index);
+ if (rc < 0) {
+ CLS_LOG(1, "ERROR: cls_cxx_map_remove_key failed rc=%d", rc);
+ return -EINVAL;
+ }
+ removed = true;
+ }
+
+ if (!removed)
+ return -ENODATA;
+
+ return 0;
+}
+
+static int cls_log_info(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ auto in_iter = in->cbegin();
+
+ cls_log_info_op op;
+ try {
+ decode(op, in_iter);
+ } catch (buffer::error& err) {
+ CLS_LOG(1, "ERROR: cls_log_add_op(): failed to decode op");
+ return -EINVAL;
+ }
+
+ cls_log_info_ret ret;
+
+ int rc = read_header(hctx, ret.header);
+ if (rc < 0)
+ return rc;
+
+ encode(ret, *out);
+
+ return 0;
+}
+
+CLS_INIT(log)
+{
+ CLS_LOG(1, "Loaded log class!");
+
+ cls_handle_t h_class;
+ cls_method_handle_t h_log_add;
+ cls_method_handle_t h_log_list;
+ cls_method_handle_t h_log_trim;
+ cls_method_handle_t h_log_info;
+
+ cls_register("log", &h_class);
+
+ /* log */
+ cls_register_cxx_method(h_class, "add", CLS_METHOD_RD | CLS_METHOD_WR, cls_log_add, &h_log_add);
+ cls_register_cxx_method(h_class, "list", CLS_METHOD_RD, cls_log_list, &h_log_list);
+ cls_register_cxx_method(h_class, "trim", CLS_METHOD_RD | CLS_METHOD_WR, cls_log_trim, &h_log_trim);
+ cls_register_cxx_method(h_class, "info", CLS_METHOD_RD, cls_log_info, &h_log_info);
+
+ return;
+}
+
diff --git a/src/cls/log/cls_log_client.cc b/src/cls/log/cls_log_client.cc
new file mode 100644
index 00000000..accc9dd1
--- /dev/null
+++ b/src/cls/log/cls_log_client.cc
@@ -0,0 +1,154 @@
+#include <errno.h>
+
+#include "cls/log/cls_log_ops.h"
+#include "include/rados/librados.hpp"
+#include "include/compat.h"
+
+
+using namespace librados;
+
+
+
+void cls_log_add(librados::ObjectWriteOperation& op, list<cls_log_entry>& entries, bool monotonic_inc)
+{
+ bufferlist in;
+ cls_log_add_op call;
+ call.entries = entries;
+ encode(call, in);
+ op.exec("log", "add", in);
+}
+
+void cls_log_add(librados::ObjectWriteOperation& op, cls_log_entry& entry)
+{
+ bufferlist in;
+ cls_log_add_op call;
+ call.entries.push_back(entry);
+ encode(call, in);
+ op.exec("log", "add", in);
+}
+
+void cls_log_add_prepare_entry(cls_log_entry& entry, const utime_t& timestamp,
+ const string& section, const string& name, bufferlist& bl)
+{
+ entry.timestamp = timestamp;
+ entry.section = section;
+ entry.name = name;
+ entry.data = bl;
+}
+
+void cls_log_add(librados::ObjectWriteOperation& op, const utime_t& timestamp,
+ const string& section, const string& name, bufferlist& bl)
+{
+ cls_log_entry entry;
+
+ cls_log_add_prepare_entry(entry, timestamp, section, name, bl);
+ cls_log_add(op, entry);
+}
+
+void cls_log_trim(librados::ObjectWriteOperation& op, const utime_t& from_time, const utime_t& to_time,
+ const string& from_marker, const string& to_marker)
+{
+ bufferlist in;
+ cls_log_trim_op call;
+ call.from_time = from_time;
+ call.to_time = to_time;
+ call.from_marker = from_marker;
+ call.to_marker = to_marker;
+ encode(call, in);
+ op.exec("log", "trim", in);
+}
+
+int cls_log_trim(librados::IoCtx& io_ctx, const string& oid, const utime_t& from_time, const utime_t& to_time,
+ const string& from_marker, const string& to_marker)
+{
+ bool done = false;
+
+ do {
+ ObjectWriteOperation op;
+
+ cls_log_trim(op, from_time, to_time, from_marker, to_marker);
+
+ int r = io_ctx.operate(oid, &op);
+ if (r == -ENODATA)
+ done = true;
+ else if (r < 0)
+ return r;
+
+ } while (!done);
+
+
+ return 0;
+}
+
+class LogListCtx : public ObjectOperationCompletion {
+ list<cls_log_entry> *entries;
+ string *marker;
+ bool *truncated;
+public:
+ LogListCtx(list<cls_log_entry> *_entries, string *_marker, bool *_truncated) :
+ entries(_entries), marker(_marker), truncated(_truncated) {}
+ void handle_completion(int r, bufferlist& outbl) override {
+ if (r >= 0) {
+ cls_log_list_ret ret;
+ try {
+ auto iter = outbl.cbegin();
+ decode(ret, iter);
+ if (entries)
+ *entries = std::move(ret.entries);
+ if (truncated)
+ *truncated = ret.truncated;
+ if (marker)
+ *marker = std::move(ret.marker);
+ } catch (buffer::error& err) {
+ // nothing we can do about it atm
+ }
+ }
+ }
+};
+
+void cls_log_list(librados::ObjectReadOperation& op, utime_t& from, utime_t& to,
+ const string& in_marker, int max_entries,
+ list<cls_log_entry>& entries,
+ string *out_marker, bool *truncated)
+{
+ bufferlist inbl;
+ cls_log_list_op call;
+ call.from_time = from;
+ call.to_time = to;
+ call.marker = in_marker;
+ call.max_entries = max_entries;
+
+ encode(call, inbl);
+
+ op.exec("log", "list", inbl, new LogListCtx(&entries, out_marker, truncated));
+}
+
+class LogInfoCtx : public ObjectOperationCompletion {
+ cls_log_header *header;
+public:
+ explicit LogInfoCtx(cls_log_header *_header) : header(_header) {}
+ void handle_completion(int r, bufferlist& outbl) override {
+ if (r >= 0) {
+ cls_log_info_ret ret;
+ try {
+ auto iter = outbl.cbegin();
+ decode(ret, iter);
+ if (header)
+ *header = ret.header;
+ } catch (buffer::error& err) {
+ // nothing we can do about it atm
+ }
+ }
+ }
+};
+
+void cls_log_info(librados::ObjectReadOperation& op, cls_log_header *header)
+{
+ bufferlist inbl;
+ cls_log_info_op call;
+
+ encode(call, inbl);
+
+ op.exec("log", "info", inbl, new LogInfoCtx(header));
+}
+
diff --git a/src/cls/log/cls_log_client.h b/src/cls/log/cls_log_client.h
new file mode 100644
index 00000000..0e701ff9
--- /dev/null
+++ b/src/cls/log/cls_log_client.h
@@ -0,0 +1,31 @@
+#ifndef CEPH_CLS_LOG_CLIENT_H
+#define CEPH_CLS_LOG_CLIENT_H
+
+#include "include/rados/librados_fwd.hpp"
+#include "cls_log_types.h"
+
+/*
+ * log objclass
+ */
+
+void cls_log_add_prepare_entry(cls_log_entry& entry, const utime_t& timestamp,
+ const string& section, const string& name, bufferlist& bl);
+
+void cls_log_add(librados::ObjectWriteOperation& op, list<cls_log_entry>& entries, bool monotonic_inc);
+void cls_log_add(librados::ObjectWriteOperation& op, cls_log_entry& entry);
+void cls_log_add(librados::ObjectWriteOperation& op, const utime_t& timestamp,
+ const string& section, const string& name, bufferlist& bl);
+
+void cls_log_list(librados::ObjectReadOperation& op, utime_t& from, utime_t& to,
+ const string& in_marker, int max_entries,
+ list<cls_log_entry>& entries,
+ string *out_marker, bool *truncated);
+
+void cls_log_trim(librados::ObjectWriteOperation& op, const utime_t& from_time, const utime_t& to_time,
+ const string& from_marker, const string& to_marker);
+int cls_log_trim(librados::IoCtx& io_ctx, const string& oid, const utime_t& from_time, const utime_t& to_time,
+ const string& from_marker, const string& to_marker);
+
+void cls_log_info(librados::ObjectReadOperation& op, cls_log_header *header);
+
+#endif
diff --git a/src/cls/log/cls_log_ops.h b/src/cls/log/cls_log_ops.h
new file mode 100644
index 00000000..2967da6e
--- /dev/null
+++ b/src/cls/log/cls_log_ops.h
@@ -0,0 +1,156 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#ifndef CEPH_CLS_LOG_OPS_H
+#define CEPH_CLS_LOG_OPS_H
+
+#include "cls_log_types.h"
+
+struct cls_log_add_op {
+ list<cls_log_entry> entries;
+ bool monotonic_inc;
+
+ cls_log_add_op() : monotonic_inc(true) {}
+
+ void encode(bufferlist& bl) const {
+ ENCODE_START(2, 1, bl);
+ encode(entries, bl);
+ encode(monotonic_inc, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(bufferlist::const_iterator& bl) {
+ DECODE_START(2, bl);
+ decode(entries, bl);
+ if (struct_v >= 2) {
+ decode(monotonic_inc, bl);
+ }
+ DECODE_FINISH(bl);
+ }
+};
+WRITE_CLASS_ENCODER(cls_log_add_op)
+
+struct cls_log_list_op {
+ utime_t from_time;
+ string marker; /* if not empty, overrides from_time */
+ utime_t to_time; /* not inclusive */
+ int max_entries; /* upperbound to returned num of entries
+ might return less than that and still be truncated */
+
+ cls_log_list_op() : max_entries(0) {}
+
+ void encode(bufferlist& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(from_time, bl);
+ encode(marker, bl);
+ encode(to_time, bl);
+ encode(max_entries, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(bufferlist::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(from_time, bl);
+ decode(marker, bl);
+ decode(to_time, bl);
+ decode(max_entries, bl);
+ DECODE_FINISH(bl);
+ }
+};
+WRITE_CLASS_ENCODER(cls_log_list_op)
+
+struct cls_log_list_ret {
+ list<cls_log_entry> entries;
+ string marker;
+ bool truncated;
+
+ cls_log_list_ret() : truncated(false) {}
+
+ void encode(bufferlist& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(entries, bl);
+ encode(marker, bl);
+ encode(truncated, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(bufferlist::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(entries, bl);
+ decode(marker, bl);
+ decode(truncated, bl);
+ DECODE_FINISH(bl);
+ }
+};
+WRITE_CLASS_ENCODER(cls_log_list_ret)
+
+
+/*
+ * operation will return 0 when successfully removed but not done. Will return
+ * -ENODATA when done, so caller needs to repeat sending request until that.
+ */
+struct cls_log_trim_op {
+ utime_t from_time;
+ utime_t to_time; /* inclusive */
+ string from_marker;
+ string to_marker;
+
+ cls_log_trim_op() {}
+
+ void encode(bufferlist& bl) const {
+ ENCODE_START(2, 1, bl);
+ encode(from_time, bl);
+ encode(to_time, bl);
+ encode(from_marker, bl);
+ encode(to_marker, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(bufferlist::const_iterator& bl) {
+ DECODE_START(2, bl);
+ decode(from_time, bl);
+ decode(to_time, bl);
+ if (struct_v >= 2) {
+ decode(from_marker, bl);
+ decode(to_marker, bl);
+ }
+ DECODE_FINISH(bl);
+ }
+};
+WRITE_CLASS_ENCODER(cls_log_trim_op)
+
+struct cls_log_info_op {
+ cls_log_info_op() {}
+
+ void encode(bufferlist& bl) const {
+ ENCODE_START(1, 1, bl);
+ // currently empty request
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(bufferlist::const_iterator& bl) {
+ DECODE_START(1, bl);
+ // currently empty request
+ DECODE_FINISH(bl);
+ }
+};
+WRITE_CLASS_ENCODER(cls_log_info_op)
+
+struct cls_log_info_ret {
+ cls_log_header header;
+
+ void encode(bufferlist& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(header, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(bufferlist::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(header, bl);
+ DECODE_FINISH(bl);
+ }
+};
+WRITE_CLASS_ENCODER(cls_log_info_ret)
+
+#endif
diff --git a/src/cls/log/cls_log_types.h b/src/cls/log/cls_log_types.h
new file mode 100644
index 00000000..5ec8628f
--- /dev/null
+++ b/src/cls/log/cls_log_types.h
@@ -0,0 +1,65 @@
+#ifndef CEPH_CLS_LOG_TYPES_H
+#define CEPH_CLS_LOG_TYPES_H
+
+#include "include/encoding.h"
+#include "include/types.h"
+
+#include "include/utime.h"
+
+class JSONObj;
+
+
+struct cls_log_entry {
+ string id;
+ string section;
+ string name;
+ utime_t timestamp;
+ bufferlist data;
+
+ cls_log_entry() {}
+
+ void encode(bufferlist& bl) const {
+ ENCODE_START(2, 1, bl);
+ encode(section, bl);
+ encode(name, bl);
+ encode(timestamp, bl);
+ encode(data, bl);
+ encode(id, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(bufferlist::const_iterator& bl) {
+ DECODE_START(2, bl);
+ decode(section, bl);
+ decode(name, bl);
+ decode(timestamp, bl);
+ decode(data, bl);
+ if (struct_v >= 2)
+ decode(id, bl);
+ DECODE_FINISH(bl);
+ }
+};
+WRITE_CLASS_ENCODER(cls_log_entry)
+
+struct cls_log_header {
+ string max_marker;
+ utime_t max_time;
+
+ void encode(bufferlist& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(max_marker, bl);
+ encode(max_time, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(bufferlist::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(max_marker, bl);
+ decode(max_time, bl);
+ DECODE_FINISH(bl);
+ }
+};
+WRITE_CLASS_ENCODER(cls_log_header)
+
+
+#endif
diff --git a/src/cls/lua/cls_lua.cc b/src/cls/lua/cls_lua.cc
new file mode 100644
index 00000000..ecc5417f
--- /dev/null
+++ b/src/cls/lua/cls_lua.cc
@@ -0,0 +1,1052 @@
+/*
+ * Lua Bindings for RADOS Object Class
+ */
+#include <errno.h>
+#include <setjmp.h>
+#include <string>
+#include <sstream>
+#include <lua.hpp>
+#include "include/types.h"
+#include "objclass/objclass.h"
+#include "json_spirit/json_spirit.h"
+#include "cls_lua.h"
+#include "cls_lua_ops.h"
+
+CLS_VER(1,0)
+CLS_NAME(lua)
+
+/*
+ * Jump point for recovering from Lua panic.
+ */
+static jmp_buf cls_lua_panic_jump;
+
+/*
+ * Handle Lua panic.
+ */
+static int cls_lua_atpanic(lua_State *lua)
+{
+ CLS_ERR("error: Lua panic: %s", lua_tostring(lua, -1));
+ longjmp(cls_lua_panic_jump, 1);
+ return 0;
+}
+
+struct clslua_err {
+ bool error;
+ int ret;
+};
+
+/*
+ * Input parameter encoding.
+ */
+enum InputEncoding {
+ JSON_ENC,
+ BUFFERLIST_ENC,
+};
+
+struct clslua_hctx {
+ struct clslua_err error;
+ InputEncoding in_enc;
+ int ret;
+
+ cls_method_context_t *hctx;
+ bufferlist *inbl; // raw cls input
+ bufferlist *outbl; // raw cls output
+
+ string script; // lua script
+ string handler; // lua handler
+ bufferlist input; // lua handler input
+};
+
+/* Lua registry key for method context */
+static char clslua_hctx_reg_key;
+
+/*
+ * Grabs the full method handler context
+ */
+static clslua_hctx *__clslua_get_hctx(lua_State *L)
+{
+ /* lookup registry value */
+ lua_pushlightuserdata(L, &clslua_hctx_reg_key);
+ lua_gettable(L, LUA_REGISTRYINDEX);
+
+ /* check cls_lua assumptions */
+ ceph_assert(!lua_isnil(L, -1));
+ ceph_assert(lua_type(L, -1) == LUA_TLIGHTUSERDATA);
+
+ /* cast and cleanup stack */
+ clslua_hctx *hctx = (struct clslua_hctx *)lua_touserdata(L, -1);
+ lua_pop(L, 1);
+
+ return hctx;
+}
+
+/*
+ * Get the method context out of the registry. This is called at the beginning
+ * of each clx_cxx_* wrapper, and must be set before there is any chance a Lua
+ * script calling a 'cls' module function that requires it.
+ */
+static cls_method_context_t clslua_get_hctx(lua_State *L)
+{
+ struct clslua_hctx *hctx = __clslua_get_hctx(L);
+ return *hctx->hctx;
+}
+
+/*
+ * Returns a reference to cls_lua error state from registry.
+ */
+struct clslua_err *clslua_checkerr(lua_State *L)
+{
+ struct clslua_hctx *hctx = __clslua_get_hctx(L);
+ struct clslua_err *err = &hctx->error;
+ return err;
+}
+
+
+/* Registry key for real `pcall` function */
+static char clslua_pcall_reg_key;
+
+/*
+ * Wrap Lua pcall to check for errors thrown by cls_lua (e.g. I/O errors or
+ * bufferlist decoding errors). The global error is cleared before returning
+ * to the caller.
+ */
+static int clslua_pcall(lua_State *L)
+{
+ int nargs = lua_gettop(L);
+ lua_pushlightuserdata(L, &clslua_pcall_reg_key);
+ lua_gettable(L, LUA_REGISTRYINDEX);
+ lua_insert(L, 1);
+ lua_call(L, nargs, LUA_MULTRET);
+ struct clslua_err *err = clslua_checkerr(L);
+ ceph_assert(err);
+ if (err->error) {
+ err->error = false;
+ lua_pushinteger(L, err->ret);
+ lua_insert(L, -2);
+ }
+ return lua_gettop(L);
+}
+
+
+/*
+ * cls_log
+ */
+static int clslua_log(lua_State *L)
+{
+ int nargs = lua_gettop(L);
+
+ if (!nargs)
+ return 0;
+
+ int loglevel = LOG_LEVEL_DEFAULT;
+ bool custom_ll = false;
+
+ /* check if first arg can be a log level */
+ if (nargs > 1 && lua_isnumber(L, 1)) {
+ int ll = (int)lua_tonumber(L, 1);
+ if (ll >= 0) {
+ loglevel = ll;
+ custom_ll = true;
+ }
+ }
+
+ /* check space for args and seperators (" ") */
+ int nelems = ((nargs - (custom_ll ? 1 : 0)) * 2) - 1;
+ luaL_checkstack(L, nelems, "rados.log(..)");
+
+ for (int i = custom_ll ? 2 : 1; i <= nargs; i++) {
+ const char *part = lua_tostring(L, i);
+ if (!part) {
+ if (lua_type(L, i) == LUA_TBOOLEAN)
+ part = lua_toboolean(L, i) ? "true" : "false";
+ else
+ part = luaL_typename(L, i);
+ }
+ lua_pushstring(L, part);
+ if ((i+1) <= nargs)
+ lua_pushstring(L, " ");
+ }
+
+ /* join string parts and send to Ceph/reply log */
+ lua_concat(L, nelems);
+ CLS_LOG(loglevel, "%s", lua_tostring(L, -1));
+
+ /* concat leaves result at top of stack */
+ return 1;
+}
+
+static char clslua_registered_handle_reg_key;
+
+/*
+ * Register a function to be used as a handler target
+ */
+static int clslua_register(lua_State *L)
+{
+ luaL_checktype(L, 1, LUA_TFUNCTION);
+
+ /* get table of registered handlers */
+ lua_pushlightuserdata(L, &clslua_registered_handle_reg_key);
+ lua_gettable(L, LUA_REGISTRYINDEX);
+ ceph_assert(lua_type(L, -1) == LUA_TTABLE);
+
+ /* lookup function argument */
+ lua_pushvalue(L, 1);
+ lua_gettable(L, -2);
+
+ if (lua_isnil(L, -1)) {
+ lua_pushvalue(L, 1);
+ lua_pushvalue(L, 1);
+ lua_settable(L, -4);
+ } else {
+ lua_pushstring(L, "Cannot register handler more than once");
+ return lua_error(L);
+ }
+
+ return 0;
+}
+
+/*
+ * Check if a function is registered as a handler
+ */
+static void clslua_check_registered_handler(lua_State *L)
+{
+ luaL_checktype(L, -1, LUA_TFUNCTION);
+
+ /* get table of registered handlers */
+ lua_pushlightuserdata(L, &clslua_registered_handle_reg_key);
+ lua_gettable(L, LUA_REGISTRYINDEX);
+ ceph_assert(lua_type(L, -1) == LUA_TTABLE);
+
+ /* lookup function argument */
+ lua_pushvalue(L, -2);
+ lua_gettable(L, -2);
+
+ if (!lua_rawequal(L, -1, -3)) {
+ lua_pushstring(L, "Handler is not registered");
+ lua_error(L);
+ }
+
+ lua_pop(L, 2);
+}
+
+/*
+ * Handle result of a cls_cxx_* call. If @ok is non-zero then we return with
+ * the number of Lua return arguments on the stack. Otherwise we save error
+ * information in the registry and throw a Lua error.
+ */
+static int clslua_opresult(lua_State *L, int ok, int ret, int nargs,
+ bool error_on_stack = false)
+{
+ struct clslua_err *err = clslua_checkerr(L);
+
+ ceph_assert(err);
+ if (err->error) {
+ CLS_ERR("error: cls_lua state machine: unexpected error");
+ ceph_abort();
+ }
+
+ /* everything is cherry */
+ if (ok)
+ return nargs;
+
+ /* set error in registry */
+ err->error = true;
+ err->ret = ret;
+
+ /* push error message */
+ if (!error_on_stack)
+ lua_pushfstring(L, "%s", strerror(-ret));
+
+ return lua_error(L);
+}
+
+/*
+ * cls_cxx_create
+ */
+static int clslua_create(lua_State *lua)
+{
+ cls_method_context_t hctx = clslua_get_hctx(lua);
+ int exclusive = lua_toboolean(lua, 1);
+
+ int ret = cls_cxx_create(hctx, exclusive);
+ return clslua_opresult(lua, (ret == 0), ret, 0);
+}
+
+/*
+ * cls_cxx_remove
+ */
+static int clslua_remove(lua_State *lua)
+{
+ cls_method_context_t hctx = clslua_get_hctx(lua);
+
+ int ret = cls_cxx_remove(hctx);
+ return clslua_opresult(lua, (ret == 0), ret, 0);
+}
+
+/*
+ * cls_cxx_stat
+ */
+static int clslua_stat(lua_State *L)
+{
+ cls_method_context_t hctx = clslua_get_hctx(L);
+
+ uint64_t size;
+ time_t mtime;
+ int ret = cls_cxx_stat(hctx, &size, &mtime);
+ if (!ret) {
+ lua_pushinteger(L, size);
+ lua_pushinteger(L, mtime);
+ }
+ return clslua_opresult(L, (ret == 0), ret, 2);
+}
+
+/*
+ * cls_cxx_read
+ */
+static int clslua_read(lua_State *L)
+{
+ cls_method_context_t hctx = clslua_get_hctx(L);
+ int offset = luaL_checkinteger(L, 1);
+ int length = luaL_checkinteger(L, 2);
+ bufferlist *bl = clslua_pushbufferlist(L, NULL);
+ int ret = cls_cxx_read(hctx, offset, length, bl);
+ return clslua_opresult(L, (ret >= 0), ret, 1);
+}
+
+/*
+ * cls_cxx_write
+ */
+static int clslua_write(lua_State *L)
+{
+ cls_method_context_t hctx = clslua_get_hctx(L);
+ int offset = luaL_checkinteger(L, 1);
+ int length = luaL_checkinteger(L, 2);
+ bufferlist *bl = clslua_checkbufferlist(L, 3);
+ int ret = cls_cxx_write(hctx, offset, length, bl);
+ return clslua_opresult(L, (ret == 0), ret, 0);
+}
+
+/*
+ * cls_cxx_write_full
+ */
+static int clslua_write_full(lua_State *L)
+{
+ cls_method_context_t hctx = clslua_get_hctx(L);
+ bufferlist *bl = clslua_checkbufferlist(L, 1);
+ int ret = cls_cxx_write_full(hctx, bl);
+ return clslua_opresult(L, (ret == 0), ret, 0);
+}
+
+/*
+ * cls_cxx_getxattr
+ */
+static int clslua_getxattr(lua_State *L)
+{
+ cls_method_context_t hctx = clslua_get_hctx(L);
+ const char *name = luaL_checkstring(L, 1);
+ bufferlist *bl = clslua_pushbufferlist(L, NULL);
+ int ret = cls_cxx_getxattr(hctx, name, bl);
+ return clslua_opresult(L, (ret >= 0), ret, 1);
+}
+
+/*
+ * cls_cxx_getxattrs
+ */
+static int clslua_getxattrs(lua_State *L)
+{
+ cls_method_context_t hctx = clslua_get_hctx(L);
+
+ map<string, bufferlist> attrs;
+ int ret = cls_cxx_getxattrs(hctx, &attrs);
+ if (ret < 0)
+ return clslua_opresult(L, 0, ret, 0);
+
+ lua_createtable(L, 0, attrs.size());
+
+ for (auto it = attrs.cbegin(); it != attrs.cend(); it++) {
+ lua_pushstring(L, it->first.c_str());
+ bufferlist *bl = clslua_pushbufferlist(L, NULL);
+ *bl = it->second; // xfer ownership... will be GC'd
+ lua_settable(L, -3);
+ }
+
+ return clslua_opresult(L, 1, ret, 1);
+}
+
+/*
+ * cls_cxx_setxattr
+ */
+static int clslua_setxattr(lua_State *L)
+{
+ cls_method_context_t hctx = clslua_get_hctx(L);
+ const char *name = luaL_checkstring(L, 1);
+ bufferlist *bl = clslua_checkbufferlist(L, 2);
+ int ret = cls_cxx_setxattr(hctx, name, bl);
+ return clslua_opresult(L, (ret == 0), ret, 1);
+}
+
+/*
+ * cls_cxx_map_get_val
+ */
+static int clslua_map_get_val(lua_State *L)
+{
+ cls_method_context_t hctx = clslua_get_hctx(L);
+ const char *key = luaL_checkstring(L, 1);
+ bufferlist *bl = clslua_pushbufferlist(L, NULL);
+ int ret = cls_cxx_map_get_val(hctx, key, bl);
+ return clslua_opresult(L, (ret == 0), ret, 1);
+}
+
+/*
+ * cls_cxx_map_set_val
+ */
+static int clslua_map_set_val(lua_State *L)
+{
+ cls_method_context_t hctx = clslua_get_hctx(L);
+ const char *key = luaL_checkstring(L, 1);
+ bufferlist *val = clslua_checkbufferlist(L, 2);
+ int ret = cls_cxx_map_set_val(hctx, key, val);
+ return clslua_opresult(L, (ret == 0), ret, 0);
+}
+
+/*
+ * cls_cxx_map_clear
+ */
+static int clslua_map_clear(lua_State *L)
+{
+ cls_method_context_t hctx = clslua_get_hctx(L);
+ int ret = cls_cxx_map_clear(hctx);
+ return clslua_opresult(L, (ret == 0), ret, 0);
+}
+
+/*
+ * cls_cxx_map_get_keys
+ */
+static int clslua_map_get_keys(lua_State *L)
+{
+ cls_method_context_t hctx = clslua_get_hctx(L);
+ const char *start_after = luaL_checkstring(L, 1);
+ int max_to_get = luaL_checkinteger(L, 2);
+
+ std::set<string> keys;
+ bool more;
+ int ret = cls_cxx_map_get_keys(hctx, start_after, max_to_get, &keys, &more);
+ if (ret < 0)
+ return clslua_opresult(L, 0, ret, 0);
+
+ lua_createtable(L, 0, keys.size());
+
+ for (auto it = keys.cbegin(); it != keys.cend(); it++) {
+ const std::string& key = *it;
+ lua_pushstring(L, key.c_str());
+ lua_pushboolean(L, 1);
+ lua_settable(L, -3);
+ }
+
+ return clslua_opresult(L, 1, ret, 1);
+}
+
+/*
+ * cls_cxx_map_get_vals
+ */
+static int clslua_map_get_vals(lua_State *L)
+{
+ cls_method_context_t hctx = clslua_get_hctx(L);
+ const char *start_after = luaL_checkstring(L, 1);
+ const char *filter_prefix= luaL_checkstring(L, 2);
+ int max_to_get = luaL_checkinteger(L, 3);
+
+ map<string, bufferlist> kvpairs;
+ bool more;
+ int ret = cls_cxx_map_get_vals(hctx, start_after, filter_prefix,
+ max_to_get, &kvpairs, &more);
+ if (ret < 0)
+ return clslua_opresult(L, 0, ret, 0);
+
+ lua_createtable(L, 0, kvpairs.size());
+
+ for (auto it = kvpairs.cbegin(); it != kvpairs.cend(); it++) {
+ lua_pushstring(L, it->first.c_str());
+ bufferlist *bl = clslua_pushbufferlist(L, NULL);
+ *bl = it->second; // xfer ownership... will be GC'd
+ lua_settable(L, -3);
+ }
+
+ return clslua_opresult(L, 1, ret, 1);
+}
+
+/*
+ * cls_cxx_map_read_header
+ */
+static int clslua_map_read_header(lua_State *L)
+{
+ cls_method_context_t hctx = clslua_get_hctx(L);
+ bufferlist *bl = clslua_pushbufferlist(L, NULL);
+ int ret = cls_cxx_map_read_header(hctx, bl);
+ return clslua_opresult(L, (ret >= 0), ret, 1);
+}
+
+/*
+ * cls_cxx_map_write_header
+ */
+static int clslua_map_write_header(lua_State *L)
+{
+ cls_method_context_t hctx = clslua_get_hctx(L);
+ bufferlist *bl = clslua_checkbufferlist(L, 1);
+ int ret = cls_cxx_map_write_header(hctx, bl);
+ return clslua_opresult(L, (ret == 0), ret, 0);
+}
+
+/*
+ * cls_cxx_map_set_vals
+ */
+static int clslua_map_set_vals(lua_State *L)
+{
+ cls_method_context_t hctx = clslua_get_hctx(L);
+ luaL_checktype(L, 1, LUA_TTABLE);
+
+ map<string, bufferlist> kvpairs;
+
+ for (lua_pushnil(L); lua_next(L, 1); lua_pop(L, 1)) {
+ /*
+ * In the case of a numeric key a copy is made on the stack because
+ * converting to a string would otherwise manipulate the original key and
+ * cause problems for iteration.
+ */
+ string key;
+ int type_code = lua_type(L, -2);
+ switch (type_code) {
+ case LUA_TSTRING:
+ key.assign(lua_tolstring(L, -2, NULL));
+ break;
+
+ case LUA_TNUMBER:
+ lua_pushvalue(L, -2);
+ key.assign(lua_tolstring(L, -1, NULL));
+ lua_pop(L, 1);
+ break;
+
+ default:
+ lua_pushfstring(L, "map_set_vals: invalid key type (%s)",
+ lua_typename(L, type_code));
+ return clslua_opresult(L, 0, -EINVAL, 0, true);
+ }
+
+ bufferlist val;
+ type_code = lua_type(L, -1);
+ switch (type_code) {
+ case LUA_TSTRING:
+ {
+ size_t len;
+ const char *data = lua_tolstring(L, -1, &len);
+ val.append(data, len);
+ }
+ break;
+
+ default:
+ lua_pushfstring(L, "map_set_vals: invalid val type (%s) for key (%s)",
+ lua_typename(L, type_code), key.c_str());
+ return clslua_opresult(L, 0, -EINVAL, 0, true);
+ }
+
+ kvpairs[key] = val;
+ }
+
+ int ret = cls_cxx_map_set_vals(hctx, &kvpairs);
+
+ return clslua_opresult(L, (ret == 0), ret, 0);
+}
+
+/*
+ * cls_cxx_map_remove_key
+ */
+static int clslua_map_remove_key(lua_State *L)
+{
+ cls_method_context_t hctx = clslua_get_hctx(L);
+ const char *key = luaL_checkstring(L, 1);
+ int ret = cls_cxx_map_remove_key(hctx, key);
+ return clslua_opresult(L, (ret == 0), ret, 0);
+}
+
+/*
+ * cls_current_version
+ */
+static int clslua_current_version(lua_State *L)
+{
+ cls_method_context_t hctx = clslua_get_hctx(L);
+ uint64_t version = cls_current_version(hctx);
+ lua_pushinteger(L, version);
+ return clslua_opresult(L, 1, 0, 1);
+}
+
+/*
+ * cls_current_subop_num
+ */
+static int clslua_current_subop_num(lua_State *L)
+{
+ cls_method_context_t hctx = clslua_get_hctx(L);
+ int num = cls_current_subop_num(hctx);
+ lua_pushinteger(L, num);
+ return clslua_opresult(L, 1, 0, 1);
+}
+
+/*
+ * cls_current_subop_version
+ */
+static int clslua_current_subop_version(lua_State *L)
+{
+ cls_method_context_t hctx = clslua_get_hctx(L);
+ string s;
+ cls_cxx_subop_version(hctx, &s);
+ lua_pushstring(L, s.c_str());
+ return clslua_opresult(L, 1, 0, 1);
+}
+
+/*
+ * Functions registered in the 'cls' module.
+ */
+static const luaL_Reg clslua_lib[] = {
+ // mgmt
+ {"register", clslua_register},
+ {"log", clslua_log},
+
+ // data
+ {"create", clslua_create},
+ {"remove", clslua_remove},
+ {"stat", clslua_stat},
+ {"read", clslua_read},
+ {"write", clslua_write},
+ {"write_full", clslua_write_full},
+
+ // xattr
+ {"getxattr", clslua_getxattr},
+ {"getxattrs", clslua_getxattrs},
+ {"setxattr", clslua_setxattr},
+
+ // omap
+ {"map_clear", clslua_map_clear},
+ {"map_get_keys", clslua_map_get_keys},
+ {"map_get_vals", clslua_map_get_vals},
+ {"map_read_header", clslua_map_read_header},
+ {"map_write_header", clslua_map_write_header},
+ {"map_get_val", clslua_map_get_val},
+ {"map_set_val", clslua_map_set_val},
+ {"map_set_vals", clslua_map_set_vals},
+ {"map_remove_key", clslua_map_remove_key},
+
+ // env
+ {"current_version", clslua_current_version},
+ {"current_subop_num", clslua_current_subop_num},
+ {"current_subop_version", clslua_current_subop_version},
+
+ {NULL, NULL}
+};
+
+/*
+ * Set const int in table at top of stack
+ */
+#define SET_INT_CONST(var) do { \
+ lua_pushinteger(L, var); \
+ lua_setfield(L, -2, #var); \
+} while (0)
+
+/*
+ *
+ */
+static int luaopen_objclass(lua_State *L)
+{
+ lua_newtable(L);
+
+ /*
+ * Register cls functions (cls.log, etc...)
+ */
+ luaL_setfuncs(L, clslua_lib, 0);
+
+ /*
+ * Register generic errno values under 'cls'
+ */
+ SET_INT_CONST(EPERM);
+ SET_INT_CONST(ENOENT);
+ SET_INT_CONST(ESRCH);
+ SET_INT_CONST(EINTR);
+ SET_INT_CONST(EIO);
+ SET_INT_CONST(ENXIO);
+ SET_INT_CONST(E2BIG);
+ SET_INT_CONST(ENOEXEC);
+ SET_INT_CONST(EBADF);
+ SET_INT_CONST(ECHILD);
+ SET_INT_CONST(EAGAIN);
+ SET_INT_CONST(ENOMEM);
+ SET_INT_CONST(EACCES);
+ SET_INT_CONST(EFAULT);
+ SET_INT_CONST(EBUSY);
+ SET_INT_CONST(EEXIST);
+ SET_INT_CONST(EXDEV);
+ SET_INT_CONST(ENODEV);
+ SET_INT_CONST(ENOTDIR);
+ SET_INT_CONST(EISDIR);
+ SET_INT_CONST(EINVAL);
+ SET_INT_CONST(ENFILE);
+ SET_INT_CONST(EMFILE);
+ SET_INT_CONST(ENOTTY);
+ SET_INT_CONST(EFBIG);
+ SET_INT_CONST(ENOSPC);
+ SET_INT_CONST(ESPIPE);
+ SET_INT_CONST(EROFS);
+ SET_INT_CONST(EMLINK);
+ SET_INT_CONST(EPIPE);
+ SET_INT_CONST(EDOM);
+ SET_INT_CONST(ERANGE);
+
+ return 1;
+}
+
+/*
+ * Setup the execution environment. Our sandbox currently is not
+ * sophisticated. With a new Lua state per-request we don't need to work about
+ * users stepping on each other, but we do rip out access to the local file
+ * system. All this will change when/if we decide to use some shared Lua
+ * states, most likely for performance reasons.
+ */
+static void clslua_setup_env(lua_State *L)
+{
+ luaL_requiref(L, "_G", luaopen_base, 1);
+ lua_pop(L, 1);
+
+ /*
+ * Wrap `pcall` to intercept errors. First save a reference to the default
+ * Lua `pcall` function, and then replace `pcall` with our version.
+ */
+ lua_pushlightuserdata(L, &clslua_pcall_reg_key);
+ lua_getglobal(L, "pcall");
+ lua_settable(L, LUA_REGISTRYINDEX);
+
+ lua_pushcfunction(L, clslua_pcall);
+ lua_setglobal(L, "pcall");
+
+ /* mask unsafe */
+ lua_pushnil(L);
+ lua_setglobal(L, "loadfile");
+
+ /* mask unsafe */
+ lua_pushnil(L);
+ lua_setglobal(L, "dofile");
+
+ /* not integrated into our error handling */
+ lua_pushnil(L);
+ lua_setglobal(L, "xpcall");
+
+ luaL_requiref(L, LUA_TABLIBNAME, luaopen_table, 1);
+ lua_pop(L, 1);
+
+ luaL_requiref(L, LUA_STRLIBNAME, luaopen_string, 1);
+ lua_pop(L, 1);
+
+ luaL_requiref(L, LUA_MATHLIBNAME, luaopen_math, 1);
+ lua_pop(L, 1);
+
+ luaL_requiref(L, "objclass", luaopen_objclass, 1);
+ lua_pop(L, 1);
+
+ luaL_requiref(L, "bufferlist", luaopen_bufferlist, 1);
+ lua_pop(L, 1);
+}
+
+/*
+ * Schema:
+ * {
+ * "script": "...",
+ * "handler": "...",
+ * "input": "..." # optional
+ * }
+ */
+static int unpack_json_command(lua_State *L, struct clslua_hctx *ctx,
+ std::string& script, std::string& handler, std::string& input,
+ size_t *input_len)
+{
+ std::string json_input(ctx->inbl->c_str());
+ json_spirit::mValue value;
+
+ if (!json_spirit::read(json_input, value)) {
+ CLS_ERR("error: unparseable JSON");
+ ctx->ret = -EINVAL;
+ return 1;
+ }
+
+ if (value.type() != json_spirit::obj_type) {
+ CLS_ERR("error: input not a JSON object");
+ ctx->ret = -EINVAL;
+ return 1;
+ }
+ json_spirit::mObject obj = value.get_obj();
+
+ // grab the script
+ std::map<std::string, json_spirit::mValue>::const_iterator it = obj.find("script");
+ if (it == obj.end()) {
+ CLS_ERR("error: 'script' field found in JSON object");
+ ctx->ret = -EINVAL;
+ return 1;
+ }
+
+ if (it->second.type() != json_spirit::str_type) {
+ CLS_ERR("error: script is not a string");
+ ctx->ret = -EINVAL;
+ return 1;
+ }
+ script = it->second.get_str();
+
+ // grab the target function/handler name
+ it = obj.find("handler");
+ if (it == obj.end()) {
+ CLS_ERR("error: no target handler found in JSON object");
+ ctx->ret = -EINVAL;
+ return 1;
+ }
+
+ if (it->second.type() != json_spirit::str_type) {
+ CLS_ERR("error: target handler is not a string");
+ ctx->ret = -EINVAL;
+ return 1;
+ }
+ handler = it->second.get_str();
+
+ // grab the input (optional)
+ it = obj.find("input");
+ if (it != obj.end()) {
+ if (it->second.type() != json_spirit::str_type) {
+ CLS_ERR("error: handler input is not a string");
+ ctx->ret = -EINVAL;
+ return 1;
+ }
+ input = it->second.get_str();
+ *input_len = input.size();
+ }
+
+ return 0;
+}
+
+/*
+ * Runs the script, and calls handler.
+ */
+static int clslua_eval(lua_State *L)
+{
+ struct clslua_hctx *ctx = __clslua_get_hctx(L);
+ ctx->ret = -EIO; /* assume failure */
+
+ /*
+ * Load modules, errno value constants, and other environment goodies. Must
+ * be done before loading/compiling the chunk.
+ */
+ clslua_setup_env(L);
+
+ /*
+ * Deserialize the input that contains the script, the name of the handler
+ * to call, and the handler input.
+ */
+ switch (ctx->in_enc) {
+ case JSON_ENC:
+ {
+ std::string input_str;
+ size_t input_str_len = 0;
+
+ // if there is an error decoding json then ctx->ret will be set and we
+ // return normally from this function.
+ if (unpack_json_command(L, ctx, ctx->script, ctx->handler, input_str,
+ &input_str_len))
+ return 0;
+
+ bufferptr bp(input_str.c_str(), input_str_len);
+ ctx->input.push_back(bp);
+ }
+ break;
+
+ case BUFFERLIST_ENC:
+ {
+ cls_lua_eval_op op;
+
+ try {
+ auto it = ctx->inbl->cbegin();
+ decode(op, it);
+ } catch (const buffer::error &err) {
+ CLS_ERR("error: could not decode ceph encoded input");
+ ctx->ret = -EINVAL;
+ return 0;
+ }
+
+ ctx->script.swap(op.script);
+ ctx->handler.swap(op.handler);
+ ctx->input = op.input;
+ }
+ break;
+
+ default:
+ CLS_ERR("error: unknown encoding type");
+ ctx->ret = -EFAULT;
+ ceph_abort();
+ return 0;
+ }
+
+ /*
+ * Create table to hold registered (valid) handlers.
+ *
+ * Must be done before running the script for the first time because the
+ * script will immediately try to register one or more handlers using
+ * cls.register(function), which depends on this table.
+ */
+ lua_pushlightuserdata(L, &clslua_registered_handle_reg_key);
+ lua_newtable(L);
+ lua_settable(L, LUA_REGISTRYINDEX);
+
+ /* load and compile chunk */
+ if (luaL_loadstring(L, ctx->script.c_str()))
+ return lua_error(L);
+
+ /* execute chunk */
+ lua_call(L, 0, 0);
+
+ /* no error, but nothing left to do */
+ if (!ctx->handler.size()) {
+ CLS_LOG(10, "no handler name provided");
+ ctx->ret = 0; /* success */
+ return 0;
+ }
+
+ lua_getglobal(L, ctx->handler.c_str());
+ if (lua_type(L, -1) != LUA_TFUNCTION) {
+ CLS_ERR("error: unknown handler or not function: %s", ctx->handler.c_str());
+ ctx->ret = -EOPNOTSUPP;
+ return 0;
+ }
+
+ /* throw error if function is not registered */
+ clslua_check_registered_handler(L);
+
+ /* setup the input/output bufferlists */
+ clslua_pushbufferlist(L, &ctx->input);
+ clslua_pushbufferlist(L, ctx->outbl);
+
+ /*
+ * Call the target Lua object class handler. If the call is successful then
+ * we will examine the return value here and store it in the context. Errors
+ * that occur are handled in the top-level eval() function.
+ */
+ int top = lua_gettop(L);
+ lua_call(L, 2, LUA_MULTRET);
+
+ /* store return value in context */
+ if (!(lua_gettop(L) + 3 - top))
+ lua_pushinteger(L, 0);
+ ctx->ret = luaL_checkinteger(L, -1);
+
+ return 0;
+}
+
+/*
+ * Main handler. Proxies the Lua VM and the Lua-defined handler.
+ */
+static int eval_generic(cls_method_context_t hctx, bufferlist *in, bufferlist *out,
+ InputEncoding in_enc)
+{
+ struct clslua_hctx ctx;
+ lua_State *L = NULL;
+ int ret = -EIO;
+
+ /* stash context for use in Lua VM */
+ ctx.hctx = &hctx;
+ ctx.inbl = in;
+ ctx.in_enc = in_enc;
+ ctx.outbl = out;
+ ctx.error.error = false;
+
+ /* build lua vm state */
+ L = luaL_newstate();
+ if (!L) {
+ CLS_ERR("error creating new Lua state");
+ goto out;
+ }
+
+ /* panic handler for unhandled errors */
+ lua_atpanic(L, &cls_lua_atpanic);
+
+ if (setjmp(cls_lua_panic_jump) == 0) {
+
+ /*
+ * Stash the handler context in the register. It contains the objclass
+ * method context, global error state, and the command and reply structs.
+ */
+ lua_pushlightuserdata(L, &clslua_hctx_reg_key);
+ lua_pushlightuserdata(L, &ctx);
+ lua_settable(L, LUA_REGISTRYINDEX);
+
+ /* Process the input and run the script */
+ lua_pushcfunction(L, clslua_eval);
+ ret = lua_pcall(L, 0, 0, 0);
+
+ /* Encountered an error? */
+ if (ret) {
+ struct clslua_err *err = clslua_checkerr(L);
+ if (!err) {
+ CLS_ERR("error: cls_lua state machine: unexpected error");
+ ceph_abort();
+ }
+
+ /* Error origin a cls_cxx_* method? */
+ if (err->error) {
+ ret = err->ret; /* cls_cxx_* return value */
+
+ /* Errors always abort. Fix up ret and log error */
+ if (ret >= 0) {
+ CLS_ERR("error: unexpected handler return value");
+ ret = -EFAULT;
+ }
+
+ } else
+ ret = -EIO; /* Generic error code */
+
+ CLS_ERR("error: %s", lua_tostring(L, -1));
+
+ } else {
+ /*
+ * No Lua error encountered while running the script, but the handler
+ * may still have returned an error code (e.g. an errno value).
+ */
+ ret = ctx.ret;
+ }
+
+ } else {
+ CLS_ERR("error: recovering from Lua panic");
+ ret = -EFAULT;
+ }
+
+out:
+ if (L)
+ lua_close(L);
+ return ret;
+}
+
+static int eval_json(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ return eval_generic(hctx, in, out, JSON_ENC);
+}
+
+static int eval_bufferlist(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ return eval_generic(hctx, in, out, BUFFERLIST_ENC);
+}
+
+CLS_INIT(lua)
+{
+ CLS_LOG(20, "Loaded lua class!");
+
+ cls_handle_t h_class;
+ cls_method_handle_t h_eval_json;
+ cls_method_handle_t h_eval_bufferlist;
+
+ cls_register("lua", &h_class);
+
+ cls_register_cxx_method(h_class, "eval_json",
+ CLS_METHOD_RD | CLS_METHOD_WR, eval_json, &h_eval_json);
+
+ cls_register_cxx_method(h_class, "eval_bufferlist",
+ CLS_METHOD_RD | CLS_METHOD_WR, eval_bufferlist, &h_eval_bufferlist);
+}
diff --git a/src/cls/lua/cls_lua.h b/src/cls/lua/cls_lua.h
new file mode 100644
index 00000000..70ce9a92
--- /dev/null
+++ b/src/cls/lua/cls_lua.h
@@ -0,0 +1,14 @@
+#ifndef CEPH_CLS_LUA_H
+#define CEPH_CLS_LUA_H
+
+#include <lua.hpp>
+#include "include/types.h"
+
+#define LOG_LEVEL_DEFAULT 10
+
+int luaopen_bufferlist(lua_State *L);
+
+bufferlist *clslua_checkbufferlist(lua_State *L, int pos = 1);
+bufferlist *clslua_pushbufferlist(lua_State *L, bufferlist *set);
+
+#endif
diff --git a/src/cls/lua/cls_lua_client.cc b/src/cls/lua/cls_lua_client.cc
new file mode 100644
index 00000000..0e6544a2
--- /dev/null
+++ b/src/cls/lua/cls_lua_client.cc
@@ -0,0 +1,34 @@
+#include <string>
+#include <vector>
+#include "include/encoding.h"
+#include "include/rados/librados.hpp" // for IoCtx
+#include "cls_lua_client.h"
+#include "cls_lua_ops.h"
+
+using std::string;
+using std::vector;
+using librados::IoCtx;
+using librados::bufferlist;
+
+namespace cls_lua_client {
+ /*
+ * Currently the return code and return bufferlist are not wrapped in a
+ * protocol that allows object class vs Lua to be distinguished. For
+ * instance, -EOPNOTSUPP might refer to cls_lua not being found, but would
+ * also be returned when cls_lua is found, but a Lua handler is not found.
+ */
+ int exec(IoCtx& ioctx, const string& oid, const string& script,
+ const string& handler, bufferlist& input, bufferlist& output)
+ {
+ cls_lua_eval_op op;
+
+ op.script = script;
+ op.handler = handler;
+ op.input = input;
+
+ bufferlist inbl;
+ encode(op, inbl);
+
+ return ioctx.exec(oid, "lua", "eval_bufferlist", inbl, output);
+ }
+}
diff --git a/src/cls/lua/cls_lua_client.h b/src/cls/lua/cls_lua_client.h
new file mode 100644
index 00000000..7e7e164b
--- /dev/null
+++ b/src/cls/lua/cls_lua_client.h
@@ -0,0 +1,13 @@
+#ifndef CLS_LUA_CLIENT_HPP
+#define CLS_LUA_CLIENT_HPP
+#include <string>
+
+#include "include/rados/librados.hpp"
+
+namespace cls_lua_client {
+ int exec(librados::IoCtx& ioctx, const std::string& oid,
+ const std::string& script, const std::string& handler,
+ librados::bufferlist& inbl, librados::bufferlist& outbl);
+}
+
+#endif
diff --git a/src/cls/lua/cls_lua_ops.h b/src/cls/lua/cls_lua_ops.h
new file mode 100644
index 00000000..c4afbd8a
--- /dev/null
+++ b/src/cls/lua/cls_lua_ops.h
@@ -0,0 +1,31 @@
+#ifndef CEPH_CLS_LUA_OPS_H
+#define CEPH_CLS_LUA_OPS_H
+
+#include <string>
+
+#include "include/encoding.h"
+
+struct cls_lua_eval_op {
+ std::string script;
+ std::string handler;
+ bufferlist input;
+
+ void encode(bufferlist &bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(script, bl);
+ encode(handler, bl);
+ encode(input, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(bufferlist::const_iterator &bl) {
+ DECODE_START(1, bl);
+ decode(script, bl);
+ decode(handler, bl);
+ decode(input, bl);
+ DECODE_FINISH(bl);
+ }
+};
+WRITE_CLASS_ENCODER(cls_lua_eval_op)
+
+#endif
diff --git a/src/cls/lua/lua_bufferlist.cc b/src/cls/lua/lua_bufferlist.cc
new file mode 100644
index 00000000..995b4082
--- /dev/null
+++ b/src/cls/lua/lua_bufferlist.cc
@@ -0,0 +1,180 @@
+/*
+ * Lua module wrapping librados::bufferlist
+ */
+#include <errno.h>
+#include <string>
+#include <sstream>
+#include <math.h>
+#include <lua.hpp>
+#include "include/types.h"
+#include "include/buffer.h"
+#include "objclass/objclass.h"
+#include "cls/lua/cls_lua.h"
+
+#define LUA_BUFFERLIST "ClsLua.Bufferlist"
+
+struct bufferlist_wrap {
+ bufferlist *bl;
+ int gc; /* do garbage collect? */
+};
+
+static inline struct bufferlist_wrap *to_blwrap(lua_State *L, int pos = 1)
+{
+ return (bufferlist_wrap *)luaL_checkudata(L, pos, LUA_BUFFERLIST);
+}
+
+bufferlist *clslua_checkbufferlist(lua_State *L, int pos)
+{
+ struct bufferlist_wrap *blw = to_blwrap(L, pos);
+ return blw->bl;
+}
+
+/*
+ * Pushes a new bufferlist userdata object onto the stack. If @set is non-null
+ * it is assumed to be a bufferlist that should not be garbage collected.
+ */
+bufferlist *clslua_pushbufferlist(lua_State *L, bufferlist *set)
+{
+ bufferlist_wrap *blw = static_cast<bufferlist_wrap *>(lua_newuserdata(L, sizeof(*blw)));
+ blw->bl = set ? set : new bufferlist();
+ blw->gc = set ? 0 : 1;
+ luaL_getmetatable(L, LUA_BUFFERLIST);
+ lua_setmetatable(L, -2);
+ return blw->bl;
+}
+
+/*
+ * Create a new bufferlist
+ */
+static int bl_new(lua_State *L)
+{
+ clslua_pushbufferlist(L, NULL);
+ return 1;
+}
+
+/*
+ * Convert bufferlist to Lua string
+ */
+static int bl_str(lua_State *L)
+{
+ bufferlist *bl = clslua_checkbufferlist(L);
+ lua_pushlstring(L, bl->c_str(), bl->length());
+ return 1;
+}
+
+/*
+ * Append a Lua string to bufferlist
+ */
+static int bl_append(lua_State *L)
+{
+ bufferlist *bl = clslua_checkbufferlist(L);
+ luaL_checktype(L, 2, LUA_TSTRING);
+
+ size_t len;
+ const char *data = lua_tolstring(L, 2, &len);
+ bl->append(data, len);
+
+ return 0;
+}
+
+/*
+ * Return the length in bytes of bufferlist
+ */
+static int bl_len(lua_State *L)
+{
+ bufferlist *bl = clslua_checkbufferlist(L);
+ lua_pushinteger(L, bl->length());
+ return 1;
+}
+
+/*
+ * Perform byte-for-byte bufferlist equality test
+ */
+static int bl_eq(lua_State *L)
+{
+ bufferlist *bl1 = clslua_checkbufferlist(L, 1);
+ bufferlist *bl2 = clslua_checkbufferlist(L, 2);
+ lua_pushboolean(L, *bl1 == *bl2 ? 1 : 0);
+ return 1;
+}
+
+/*
+ * Bufferlist < operator
+ */
+static int bl_lt(lua_State *L)
+{
+ bufferlist *bl1 = clslua_checkbufferlist(L, 1);
+ bufferlist *bl2 = clslua_checkbufferlist(L, 2);
+ lua_pushboolean(L, *bl1 < *bl2 ? 1 : 0);
+ return 1;
+}
+
+/*
+ * Bufferlist <= operator
+ */
+static int bl_le(lua_State *L)
+{
+ bufferlist *bl1 = clslua_checkbufferlist(L, 1);
+ bufferlist *bl2 = clslua_checkbufferlist(L, 2);
+ lua_pushboolean(L, *bl1 <= *bl2 ? 1 : 0);
+ return 1;
+}
+
+/*
+ * Bufferlist concatentation
+ */
+static int bl_concat(lua_State *L)
+{
+ bufferlist *bl1 = clslua_checkbufferlist(L, 1);
+ bufferlist *bl2 = clslua_checkbufferlist(L, 2);
+ bufferlist *ret = clslua_pushbufferlist(L, NULL);
+ ret->append(bl1->c_str(), bl1->length());
+ ret->append(bl2->c_str(), bl2->length());
+ return 1;
+}
+
+/*
+ * Garbage collect bufferlist
+ */
+static int bl_gc(lua_State *L)
+{
+ struct bufferlist_wrap *blw = to_blwrap(L);
+ ceph_assert(blw);
+ ceph_assert(blw->bl);
+ if (blw->gc)
+ delete blw->bl;
+ return 0;
+}
+
+static const struct luaL_Reg bufferlist_methods[] = {
+ {"str", bl_str},
+ {"append", bl_append},
+ {"__concat", bl_concat},
+ {"__len", bl_len},
+ {"__lt", bl_lt},
+ {"__le", bl_le},
+ {"__gc", bl_gc},
+ {"__eq", bl_eq},
+ {NULL, NULL}
+};
+
+static const struct luaL_Reg bllib_f[] = {
+ {"new", bl_new},
+ {NULL, NULL}
+};
+
+LUALIB_API int luaopen_bufferlist(lua_State *L)
+{
+ /* Setup bufferlist user-data type */
+ luaL_newmetatable(L, LUA_BUFFERLIST);
+ lua_pushvalue(L, -1);
+ lua_setfield(L, -2, "__index");
+
+ luaL_setfuncs(L, bufferlist_methods, 0);
+ lua_pop(L, 1);
+
+ lua_newtable(L);
+ luaL_setfuncs(L, bllib_f, 0);
+
+ return 1;
+}
diff --git a/src/cls/numops/cls_numops.cc b/src/cls/numops/cls_numops.cc
new file mode 100644
index 00000000..bccfd6d3
--- /dev/null
+++ b/src/cls/numops/cls_numops.cc
@@ -0,0 +1,161 @@
+/*
+ * Ceph - scalable distributed file system
+ *
+ * Copyright (C) 2015 CERN
+ *
+ * Author: Joaquim Rocha <joaquim.rocha@cern.ch>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ */
+
+/** \file
+ *
+ * This is an OSD class that implements methods for object numeric options on
+ * its omap values.
+ *
+ */
+
+#include "objclass/objclass.h"
+#include <errno.h>
+#include <string>
+#include <sstream>
+#include <cstdio>
+#include <include/compat.h>
+
+#define DECIMAL_PRECISION 10
+
+CLS_VER(1,0)
+CLS_NAME(numops)
+
+static int add(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ string key, diff_str;
+
+ auto iter = in->cbegin();
+ try {
+ decode(key, iter);
+ decode(diff_str, iter);
+ } catch (const buffer::error &err) {
+ CLS_LOG(20, "add: invalid decode of input");
+ return -EINVAL;
+ }
+
+ char *end_ptr = 0;
+ double difference = strtod(diff_str.c_str(), &end_ptr);
+
+ if (end_ptr && *end_ptr != '\0') {
+ CLS_ERR("add: invalid input value: %s", diff_str.c_str());
+ return -EINVAL;
+ }
+
+ bufferlist bl;
+ int ret = cls_cxx_map_get_val(hctx, key, &bl);
+
+ double value;
+
+ if (ret == -ENODATA || bl.length() == 0) {
+ value = 0;
+ } else if (ret < 0) {
+ if (ret != -ENOENT) {
+ CLS_ERR("add: error reading omap key %s: %d", key.c_str(), ret);
+ }
+ return ret;
+ } else {
+ std::string stored_value(bl.c_str(), bl.length());
+ end_ptr = 0;
+ value = strtod(stored_value.c_str(), &end_ptr);
+
+ if (end_ptr && *end_ptr != '\0') {
+ CLS_ERR("add: invalid stored value: %s", stored_value.c_str());
+ return -EBADMSG;
+ }
+ }
+
+ value += difference;
+
+ std::stringstream stream;
+ stream << std::setprecision(DECIMAL_PRECISION) << value;
+
+ bufferlist new_value;
+ new_value.append(stream.str());
+
+ return cls_cxx_map_set_val(hctx, key, &new_value);
+}
+
+static int mul(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ string key, diff_str;
+
+ auto iter = in->cbegin();
+ try {
+ decode(key, iter);
+ decode(diff_str, iter);
+ } catch (const buffer::error &err) {
+ CLS_LOG(20, "mul: invalid decode of input");
+ return -EINVAL;
+ }
+
+ char *end_ptr = 0;
+ double difference = strtod(diff_str.c_str(), &end_ptr);
+
+ if (end_ptr && *end_ptr != '\0') {
+ CLS_ERR("mul: invalid input value: %s", diff_str.c_str());
+ return -EINVAL;
+ }
+
+ bufferlist bl;
+ int ret = cls_cxx_map_get_val(hctx, key, &bl);
+
+ double value;
+
+ if (ret == -ENODATA || bl.length() == 0) {
+ value = 0;
+ } else if (ret < 0) {
+ if (ret != -ENOENT) {
+ CLS_ERR("mul: error reading omap key %s: %d", key.c_str(), ret);
+ }
+ return ret;
+ } else {
+ std::string stored_value(bl.c_str(), bl.length());
+ end_ptr = 0;
+ value = strtod(stored_value.c_str(), &end_ptr);
+
+ if (end_ptr && *end_ptr != '\0') {
+ CLS_ERR("mul: invalid stored value: %s", stored_value.c_str());
+ return -EBADMSG;
+ }
+ }
+
+ value *= difference;
+
+ std::stringstream stream;
+ stream << std::setprecision(DECIMAL_PRECISION) << value;
+
+ bufferlist new_value;
+ new_value.append(stream.str());
+
+ return cls_cxx_map_set_val(hctx, key, &new_value);
+}
+
+CLS_INIT(numops)
+{
+ CLS_LOG(20, "loading cls_numops");
+
+ cls_handle_t h_class;
+ cls_method_handle_t h_add;
+ cls_method_handle_t h_mul;
+
+ cls_register("numops", &h_class);
+
+ cls_register_cxx_method(h_class, "add",
+ CLS_METHOD_RD | CLS_METHOD_WR,
+ add, &h_add);
+
+ cls_register_cxx_method(h_class, "mul",
+ CLS_METHOD_RD | CLS_METHOD_WR,
+ mul, &h_mul);
+}
diff --git a/src/cls/numops/cls_numops_client.cc b/src/cls/numops/cls_numops_client.cc
new file mode 100644
index 00000000..fa1a69f2
--- /dev/null
+++ b/src/cls/numops/cls_numops_client.cc
@@ -0,0 +1,79 @@
+/*
+ * Ceph - scalable distributed file system
+ *
+ * Copyright (C) 2015 CERN
+ *
+ * Author: Joaquim Rocha <joaquim.rocha@cern.ch>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ */
+
+#include "cls/numops/cls_numops_client.h"
+#include "include/encoding.h"
+#include "include/rados/librados.hpp"
+
+#include <errno.h>
+#include <sstream>
+
+namespace rados {
+ namespace cls {
+ namespace numops {
+
+ int add(librados::IoCtx *ioctx,
+ const std::string& oid,
+ const std::string& key,
+ double value_to_add)
+ {
+ bufferlist in, out;
+ encode(key, in);
+
+ std::stringstream stream;
+ stream << value_to_add;
+
+ encode(stream.str(), in);
+
+ return ioctx->exec(oid, "numops", "add", in, out);
+ }
+
+ int sub(librados::IoCtx *ioctx,
+ const std::string& oid,
+ const std::string& key,
+ double value_to_subtract)
+ {
+ return add(ioctx, oid, key, -value_to_subtract);
+ }
+
+ int mul(librados::IoCtx *ioctx,
+ const std::string& oid,
+ const std::string& key,
+ double value_to_multiply)
+ {
+ bufferlist in, out;
+ encode(key, in);
+
+ std::stringstream stream;
+ stream << value_to_multiply;
+
+ encode(stream.str(), in);
+
+ return ioctx->exec(oid, "numops", "mul", in, out);
+ }
+
+ int div(librados::IoCtx *ioctx,
+ const std::string& oid,
+ const std::string& key,
+ double value_to_divide)
+ {
+ if (value_to_divide == 0)
+ return -EINVAL;
+
+ return mul(ioctx, oid, key, 1 / value_to_divide);
+ }
+
+ } // namespace numops
+ } // namespace cls
+} // namespace rados
diff --git a/src/cls/numops/cls_numops_client.h b/src/cls/numops/cls_numops_client.h
new file mode 100644
index 00000000..0b0ccbe5
--- /dev/null
+++ b/src/cls/numops/cls_numops_client.h
@@ -0,0 +1,50 @@
+/*
+ * Ceph - scalable distributed file system
+ *
+ * Copyright (C) 2015 CERN
+ *
+ * Author: Joaquim Rocha <joaquim.rocha@cern.ch>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ */
+
+#ifndef CEPH_LIBRBD_CLS_NUMOPS_CLIENT_H
+#define CEPH_LIBRBD_CLS_NUMOPS_CLIENT_H
+
+#include "include/rados/librados_fwd.hpp"
+#include <string>
+
+namespace rados {
+ namespace cls {
+ namespace numops {
+
+ extern int add(librados::IoCtx *ioctx,
+ const std::string& oid,
+ const std::string& key,
+ double value_to_add);
+
+ extern int sub(librados::IoCtx *ioctx,
+ const std::string& oid,
+ const std::string& key,
+ double value_to_subtract);
+
+ extern int mul(librados::IoCtx *ioctx,
+ const std::string& oid,
+ const std::string& key,
+ double value_to_multiply);
+
+ extern int div(librados::IoCtx *ioctx,
+ const std::string& oid,
+ const std::string& key,
+ double value_to_divide);
+
+ } // namespace numops
+ } // namespace cls
+} // namespace rados
+
+#endif // CEPH_LIBRBD_CLS_NUMOPS_CLIENT_H
+
diff --git a/src/cls/otp/cls_otp.cc b/src/cls/otp/cls_otp.cc
new file mode 100644
index 00000000..355e14da
--- /dev/null
+++ b/src/cls/otp/cls_otp.cc
@@ -0,0 +1,571 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+/** \file
+ *
+ * This is an OSD class that implements methods for management
+ * and use of otp (one time password).
+ *
+ */
+
+#include <errno.h>
+#include <map>
+#include <list>
+
+#include <boost/range/adaptor/reversed.hpp>
+
+#include <liboath/oath.h>
+
+#include "include/types.h"
+#include "include/utime.h"
+#include "objclass/objclass.h"
+
+#include "common/errno.h"
+#include "common/Clock.h"
+
+#include "cls/otp/cls_otp_ops.h"
+#include "cls/otp/cls_otp_types.h"
+
+
+using namespace rados::cls::otp;
+
+
+CLS_VER(1,0)
+CLS_NAME(otp)
+
+#define ATTEMPTS_PER_WINDOW 5
+
+static string otp_header_key = "header";
+static string otp_key_prefix = "otp/";
+
+struct otp_header {
+ set<string> ids;
+
+ otp_header() {}
+
+ void encode(bufferlist &bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(ids, bl);
+ ENCODE_FINISH(bl);
+ }
+ void decode(bufferlist::const_iterator &bl) {
+ DECODE_START(1, bl);
+ decode(ids, bl);
+ DECODE_FINISH(bl);
+ }
+};
+WRITE_CLASS_ENCODER(otp_header)
+
+struct otp_instance {
+ otp_info_t otp;
+
+ list<otp_check_t> last_checks;
+ uint64_t last_success{0}; /* otp counter/step of last successful check */
+
+ otp_instance() {}
+
+ void encode(bufferlist &bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(otp, bl);
+ encode(last_checks, bl);
+ encode(last_success, bl);
+ ENCODE_FINISH(bl);
+ }
+ void decode(bufferlist::const_iterator &bl) {
+ DECODE_START(1, bl);
+ decode(otp, bl);
+ decode(last_checks, bl);
+ decode(last_success, bl);
+ DECODE_FINISH(bl);
+ }
+
+ void trim_expired(const ceph::real_time& now);
+ void check(const string& token, const string& val, bool *update);
+ bool verify(const ceph::real_time& timestamp, const string& val);
+
+ void find(const string& token, otp_check_t *result);
+};
+WRITE_CLASS_ENCODER(otp_instance)
+
+
+void otp_instance::trim_expired(const ceph::real_time& now)
+{
+ ceph::real_time window_start = now - std::chrono::seconds(otp.step_size);
+
+ while (!last_checks.empty() &&
+ last_checks.front().timestamp < window_start) {
+ last_checks.pop_front();
+ }
+}
+
+void otp_instance::check(const string& token, const string& val, bool *update)
+{
+ ceph::real_time now = ceph::real_clock::now();
+ trim_expired(now);
+
+ if (last_checks.size() >= ATTEMPTS_PER_WINDOW) {
+ /* too many attempts */
+ *update = false;
+ return;
+ }
+
+ otp_check_t check;
+ check.token = token;
+ check.timestamp = now;
+ check.result = (verify(now, val) ? OTP_CHECK_SUCCESS : OTP_CHECK_FAIL);
+
+ last_checks.push_back(check);
+
+ *update = true;
+}
+
+bool otp_instance::verify(const ceph::real_time& timestamp, const string& val)
+{
+ uint64_t index;
+ uint32_t secs = (uint32_t)ceph::real_clock::to_time_t(timestamp);
+ int result = oath_totp_validate2(otp.seed_bin.c_str(), otp.seed_bin.length(),
+ secs, otp.step_size, otp.time_ofs, otp.window,
+ nullptr /* otp pos */,
+ val.c_str());
+ if (result == OATH_INVALID_OTP ||
+ result < 0) {
+ CLS_LOG(20, "otp check failed, result=%d", result);
+ return false;
+ }
+
+ index = result + (secs - otp.time_ofs) / otp.step_size;
+
+ if (index <= last_success) { /* already used value */
+ CLS_LOG(20, "otp, use of old token: index=%lld last_success=%lld", (long long)index, (long long)last_success);
+ return false;
+ }
+
+ last_success = index;
+
+ return true;
+}
+
+void otp_instance::find(const string& token, otp_check_t *result)
+{
+ ceph::real_time now = real_clock::now();
+ trim_expired(now);
+
+ for (auto& entry : boost::adaptors::reverse(last_checks)) {
+ if (entry.token == token) {
+ *result = entry;
+ return;
+ }
+ }
+ result->token = token;
+ result->result = OTP_CHECK_UNKNOWN;
+ result->timestamp = now;
+}
+
+static int get_otp_instance(cls_method_context_t hctx, const string& id, otp_instance *instance)
+{
+ bufferlist bl;
+ string key = otp_key_prefix + id;
+
+ int r = cls_cxx_map_get_val(hctx, key, &bl);
+ if (r < 0) {
+ if (r != -ENOENT) {
+ CLS_ERR("error reading key %s: %d", key.c_str(), r);
+ }
+ return r;
+ }
+
+ try {
+ auto it = bl.cbegin();
+ decode(*instance, it);
+ } catch (const buffer::error &err) {
+ CLS_ERR("ERROR: failed to decode %s", key.c_str());
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int write_otp_instance(cls_method_context_t hctx, const otp_instance& instance)
+{
+ string key = otp_key_prefix + instance.otp.id;
+
+ bufferlist bl;
+ encode(instance, bl);
+
+ int r = cls_cxx_map_set_val(hctx, key, &bl);
+ if (r < 0) {
+ CLS_ERR("ERROR: %s(): failed to store key (otp id=%s, r=%d)", __func__, instance.otp.id.c_str(), r);
+ return r;
+ }
+
+ return 0;
+}
+
+static int remove_otp_instance(cls_method_context_t hctx, const string& id)
+{
+ string key = otp_key_prefix + id;
+
+ int r = cls_cxx_map_remove_key(hctx, key);
+ if (r < 0) {
+ CLS_ERR("ERROR: %s(): failed to remove key (otp id=%s, r=%d)", __func__, id.c_str(), r);
+ return r;
+ }
+
+ return 0;
+}
+
+static int read_header(cls_method_context_t hctx, otp_header *h)
+{
+ bufferlist bl;
+ encode(h, bl);
+ int r = cls_cxx_map_get_val(hctx, otp_header_key, &bl);
+ if (r == -ENOENT || r == -ENODATA) {
+ *h = otp_header();
+ return 0;
+ }
+ if (r < 0) {
+ CLS_ERR("ERROR: %s(): failed to read header (r=%d)", __func__, r);
+ return r;
+ }
+
+ if (bl.length() == 0) {
+ *h = otp_header();
+ return 0;
+ }
+
+ auto iter = bl.cbegin();
+ try {
+ decode(*h, iter);
+ } catch (buffer::error& err) {
+ CLS_ERR("failed to decode otp_header");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int write_header(cls_method_context_t hctx, const otp_header& h)
+{
+ bufferlist bl;
+ encode(h, bl);
+
+ int r = cls_cxx_map_set_val(hctx, otp_header_key, &bl);
+ if (r < 0) {
+ CLS_ERR("failed to store header (r=%d)", r);
+ return r;
+ }
+
+ return 0;
+}
+
+static int parse_seed(const string& seed, SeedType seed_type, bufferlist *seed_bin)
+{
+ size_t slen = seed.length();
+ char secret[seed.length()];
+ char *psecret = secret;
+ int result;
+ bool need_free = false;
+
+ seed_bin->clear();
+
+ switch (seed_type) {
+ case OTP_SEED_BASE32:
+ need_free = true; /* oath_base32_decode allocates dest buffer */
+ result = oath_base32_decode(seed.c_str(), seed.length(),
+ &psecret, &slen);
+ break;
+ default: /* just assume hex is the default */
+ result = oath_hex2bin(seed.c_str(), psecret, &slen);
+ }
+ if (result != OATH_OK) {
+ CLS_LOG(20, "failed to parse seed");
+ return -EINVAL;
+ }
+
+ seed_bin->append(psecret, slen);
+
+ if (need_free) {
+ free(psecret);
+ }
+
+ return 0;
+}
+
+static int otp_set_op(cls_method_context_t hctx,
+ bufferlist *in, bufferlist *out)
+{
+ CLS_LOG(20, "%s", __func__);
+ cls_otp_set_otp_op op;
+ try {
+ auto iter = in->cbegin();
+ decode(op, iter);
+ } catch (const buffer::error &err) {
+ CLS_ERR("ERROR: %s(): failed to decode request", __func__);
+ return -EINVAL;
+ }
+
+ otp_header h;
+ int r = read_header(hctx, &h);
+ if (r < 0) {
+ return r;
+ }
+
+ for (auto entry : op.entries) {
+ otp_instance instance;
+ r = get_otp_instance(hctx, entry.id, &instance);
+ if (r < 0 &&
+ r != -ENOENT) {
+ return r;
+ }
+ instance.otp = entry;
+
+ r = parse_seed(instance.otp.seed, instance.otp.seed_type, &instance.otp.seed_bin);
+ if (r < 0) {
+ return r;
+ }
+
+ r = write_otp_instance(hctx, instance);
+ if (r < 0) {
+ return r;
+ }
+
+ h.ids.insert(entry.id);
+ }
+
+ r = write_header(hctx, h);
+ if (r < 0) {
+ return r;
+ }
+
+ return 0;
+}
+
+static int otp_remove_op(cls_method_context_t hctx,
+ bufferlist *in, bufferlist *out)
+{
+ CLS_LOG(20, "%s", __func__);
+ cls_otp_remove_otp_op op;
+ try {
+ auto iter = in->cbegin();
+ decode(op, iter);
+ } catch (const buffer::error &err) {
+ CLS_ERR("ERROR: %s(): failed to decode request", __func__);
+ return -EINVAL;
+ }
+
+ otp_header h;
+ bool removed_existing = false;
+ int r = read_header(hctx, &h);
+ if (r < 0) {
+ return r;
+ }
+
+ for (auto id : op.ids) {
+ bool existed = (h.ids.find(id) != h.ids.end());
+ removed_existing = (removed_existing || existed);
+
+ if (!existed) {
+ continue;
+ }
+
+ r = remove_otp_instance(hctx, id);
+ if (r < 0) {
+ return r;
+ }
+
+ h.ids.erase(id);
+ }
+
+ if (removed_existing) {
+ r = write_header(hctx, h);
+ if (r < 0) {
+ return r;
+ }
+ }
+
+ return 0;
+}
+
+static int otp_get_op(cls_method_context_t hctx,
+ bufferlist *in, bufferlist *out)
+{
+ CLS_LOG(20, "%s", __func__);
+ cls_otp_get_otp_op op;
+ try {
+ auto iter = in->cbegin();
+ decode(op, iter);
+ } catch (const buffer::error &err) {
+ CLS_ERR("ERROR: %s(): failed to decode request", __func__);
+ return -EINVAL;
+ }
+
+ cls_otp_get_otp_reply result;
+
+ otp_header h;
+ int r;
+
+ r = read_header(hctx, &h);
+ if (r < 0) {
+ return r;
+ }
+
+ if (op.get_all) {
+ op.ids.clear();
+ for (auto id : h.ids) {
+ op.ids.push_back(id);
+ }
+ }
+
+ for (auto id : op.ids) {
+ bool exists = (h.ids.find(id) != h.ids.end());
+
+ if (!exists) {
+ continue;
+ }
+
+ otp_instance instance;
+ r = get_otp_instance(hctx, id, &instance);
+ if (r < 0) {
+ return r;
+ }
+
+ result.found_entries.push_back(instance.otp);
+ }
+
+ encode(result, *out);
+
+ return 0;
+}
+
+static int otp_check_op(cls_method_context_t hctx,
+ bufferlist *in, bufferlist *out)
+{
+ CLS_LOG(20, "%s", __func__);
+ cls_otp_check_otp_op op;
+ try {
+ auto iter = in->cbegin();
+ decode(op, iter);
+ } catch (const buffer::error &err) {
+ CLS_ERR("ERROR: %s(): failed to decode request", __func__);
+ return -EINVAL;
+ }
+
+ otp_header h;
+ int r;
+
+ otp_instance instance;
+
+ r = get_otp_instance(hctx, op.id, &instance);
+ if (r < 0) {
+ return r;
+ }
+
+ bool update{false};
+ instance.check(op.token, op.val, &update);
+
+ if (update) {
+ r = write_otp_instance(hctx, instance);
+ if (r < 0) {
+ return r;
+ }
+ }
+
+ return 0;
+}
+
+static int otp_get_result(cls_method_context_t hctx,
+ bufferlist *in, bufferlist *out)
+{
+ CLS_LOG(20, "%s", __func__);
+ cls_otp_check_otp_op op;
+ try {
+ auto iter = in->cbegin();
+ decode(op, iter);
+ } catch (const buffer::error &err) {
+ CLS_ERR("ERROR: %s(): failed to decode request", __func__);
+ return -EINVAL;
+ }
+
+ otp_header h;
+ int r;
+
+ otp_instance instance;
+
+ r = get_otp_instance(hctx, op.id, &instance);
+ if (r < 0) {
+ return r;
+ }
+
+ cls_otp_get_result_reply reply;
+ instance.find(op.token, &reply.result);
+ encode(reply, *out);
+
+ return 0;
+}
+
+static int otp_get_current_time_op(cls_method_context_t hctx,
+ bufferlist *in, bufferlist *out)
+{
+ CLS_LOG(20, "%s", __func__);
+ cls_otp_get_current_time_op op;
+ try {
+ auto iter = in->cbegin();
+ decode(op, iter);
+ } catch (const buffer::error &err) {
+ CLS_ERR("ERROR: %s(): failed to decode request", __func__);
+ return -EINVAL;
+ }
+
+ cls_otp_get_current_time_reply reply;
+ reply.time = real_clock::now();
+ encode(reply, *out);
+
+ return 0;
+}
+
+CLS_INIT(otp)
+{
+ CLS_LOG(20, "Loaded otp class!");
+
+ oath_init();
+
+ cls_handle_t h_class;
+ cls_method_handle_t h_set_otp_op;
+ cls_method_handle_t h_get_otp_op;
+ cls_method_handle_t h_check_otp_op;
+ cls_method_handle_t h_get_result_op; /*
+ * need to check and get check result in two phases. The
+ * reason is that we need to update failure internally,
+ * however, there's no way to both return a failure and
+ * update, because a failure will cancel the operation,
+ * and write operations will not return a value. So
+ * we're returning a success, potentially updating the
+ * status internally, then a subsequent request can try
+ * to fetch the status. If it fails it means that failed
+ * to authenticate.
+ */
+ cls_method_handle_t h_remove_otp_op;
+ cls_method_handle_t h_get_current_time_op;
+
+ cls_register("otp", &h_class);
+ cls_register_cxx_method(h_class, "otp_set",
+ CLS_METHOD_RD | CLS_METHOD_WR,
+ otp_set_op, &h_set_otp_op);
+ cls_register_cxx_method(h_class, "otp_get",
+ CLS_METHOD_RD,
+ otp_get_op, &h_get_otp_op);
+ cls_register_cxx_method(h_class, "otp_check",
+ CLS_METHOD_RD | CLS_METHOD_WR,
+ otp_check_op, &h_check_otp_op);
+ cls_register_cxx_method(h_class, "otp_get_result",
+ CLS_METHOD_RD,
+ otp_get_result, &h_get_result_op);
+ cls_register_cxx_method(h_class, "otp_remove",
+ CLS_METHOD_RD | CLS_METHOD_WR,
+ otp_remove_op, &h_remove_otp_op);
+ cls_register_cxx_method(h_class, "get_current_time",
+ CLS_METHOD_RD,
+ otp_get_current_time_op, &h_get_current_time_op);
+
+ return;
+}
diff --git a/src/cls/otp/cls_otp_client.cc b/src/cls/otp/cls_otp_client.cc
new file mode 100644
index 00000000..dc1efab4
--- /dev/null
+++ b/src/cls/otp/cls_otp_client.cc
@@ -0,0 +1,190 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+/*
+ * Ceph - scalable distributed file system
+ *
+ * Copyright (C) 2004-2006 Sage Weil <sage@newdream.net>
+ *
+ * This is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License version 2.1, as published by the Free Software
+ * Foundation. See file COPYING.
+ *
+ */
+
+#include "include/types.h"
+#include "msg/msg_types.h"
+#include "include/rados/librados.hpp"
+#include "include/utime.h"
+
+using namespace librados;
+
+#include "cls/otp/cls_otp_ops.h"
+#include "cls/otp/cls_otp_client.h"
+
+#include "common/random_string.h" /* for gen_rand_alphanumeric */
+
+namespace rados {
+ namespace cls {
+ namespace otp {
+
+ void OTP::create(librados::ObjectWriteOperation *rados_op,
+ const otp_info_t& config) {
+ cls_otp_set_otp_op op;
+ op.entries.push_back(config);
+ bufferlist in;
+ encode(op, in);
+ rados_op->exec("otp", "otp_set", in);
+ }
+
+ void OTP::set(librados::ObjectWriteOperation *rados_op,
+ const list<otp_info_t>& entries) {
+ cls_otp_set_otp_op op;
+ op.entries = entries;
+ bufferlist in;
+ encode(op, in);
+ rados_op->exec("otp", "otp_set", in);
+ }
+
+ void OTP::remove(librados::ObjectWriteOperation *rados_op,
+ const string& id) {
+ cls_otp_remove_otp_op op;
+ op.ids.push_back(id);
+ bufferlist in;
+ encode(op, in);
+ rados_op->exec("otp", "otp_remove", in);
+ }
+
+ int OTP::check(CephContext *cct, librados::IoCtx& ioctx, const string& oid,
+ const string& id, const string& val, otp_check_t *result) {
+ cls_otp_check_otp_op op;
+ op.id = id;
+ op.val = val;
+#define TOKEN_LEN 16
+ op.token = gen_rand_alphanumeric(cct, TOKEN_LEN);
+
+ bufferlist in;
+ bufferlist out;
+ encode(op, in);
+ int r = ioctx.exec(oid, "otp", "otp_check", in, out);
+ if (r < 0) {
+ return r;
+ }
+
+ cls_otp_get_result_op op2;
+ op2.token = op.token;
+ bufferlist in2;
+ bufferlist out2;
+ encode(op2, in2);
+ r = ioctx.exec(oid, "otp", "otp_get_result", in, out);
+ if (r < 0) {
+ return r;
+ }
+
+ auto iter = out.cbegin();
+ cls_otp_get_result_reply ret;
+ try {
+ decode(ret, iter);
+ } catch (buffer::error& err) {
+ return -EBADMSG;
+ }
+
+ *result = ret.result;
+
+ return 0;
+ }
+
+ int OTP::get(librados::ObjectReadOperation *rop,
+ librados::IoCtx& ioctx, const string& oid,
+ const list<string> *ids, bool get_all, list<otp_info_t> *result) {
+ librados::ObjectReadOperation _rop;
+ if (!rop) {
+ rop = &_rop;
+ }
+ cls_otp_get_otp_op op;
+ if (ids) {
+ op.ids = *ids;
+ }
+ op.get_all = get_all;
+ bufferlist in;
+ bufferlist out;
+ int op_ret;
+ encode(op, in);
+ rop->exec("otp", "otp_get", in, &out, &op_ret);
+ int r = ioctx.operate(oid, rop, nullptr);
+ if (r < 0) {
+ return r;
+ }
+ if (op_ret < 0) {
+ return op_ret;
+ }
+
+ cls_otp_get_otp_reply ret;
+ auto iter = out.cbegin();
+ try {
+ decode(ret, iter);
+ } catch (buffer::error& err) {
+ return -EBADMSG;
+ }
+
+ *result = ret.found_entries;;
+
+ return 0;
+ }
+
+ int OTP::get(librados::ObjectReadOperation *op,
+ librados::IoCtx& ioctx, const string& oid,
+ const string& id, otp_info_t *result) {
+ list<string> ids{ id };
+ list<otp_info_t> ret;
+
+ int r = get(op, ioctx, oid, &ids, false, &ret);
+ if (r < 0) {
+ return r;
+ }
+ if (ret.empty()) {
+ return -ENOENT;
+ }
+ *result = ret.front();
+
+ return 0;
+ }
+
+ int OTP::get_all(librados::ObjectReadOperation *op, librados::IoCtx& ioctx, const string& oid,
+ list<otp_info_t> *result) {
+ return get(op, ioctx, oid, nullptr, true, result);
+ }
+
+ int OTP::get_current_time(librados::IoCtx& ioctx, const string& oid,
+ ceph::real_time *result) {
+ cls_otp_get_current_time_op op;
+ bufferlist in;
+ bufferlist out;
+ int op_ret;
+ encode(op, in);
+ ObjectReadOperation rop;
+ rop.exec("otp", "get_current_time", in, &out, &op_ret);
+ int r = ioctx.operate(oid, &rop, nullptr);
+ if (r < 0) {
+ return r;
+ }
+ if (op_ret < 0) {
+ return op_ret;
+ }
+
+ cls_otp_get_current_time_reply ret;
+ auto iter = out.cbegin();
+ try {
+ decode(ret, iter);
+ } catch (buffer::error& err) {
+ return -EBADMSG;
+ }
+
+ *result = ret.time;
+
+ return 0;
+ }
+ } // namespace otp
+ } // namespace cls
+} // namespace rados
+
diff --git a/src/cls/otp/cls_otp_client.h b/src/cls/otp/cls_otp_client.h
new file mode 100644
index 00000000..f19c9459
--- /dev/null
+++ b/src/cls/otp/cls_otp_client.h
@@ -0,0 +1,56 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#ifndef CEPH_CLS_OTP_CLIENT_H
+#define CEPH_CLS_OTP_CLIENT_H
+
+#include "include/rados/librados_fwd.hpp"
+#include "cls/otp/cls_otp_types.h"
+
+namespace rados {
+ namespace cls {
+ namespace otp {
+
+ class OTP {
+ public:
+ static void create(librados::ObjectWriteOperation *op, const otp_info_t& config);
+ static void set(librados::ObjectWriteOperation *op, const list<otp_info_t>& entries);
+ static void remove(librados::ObjectWriteOperation *op, const string& id);
+ static int get(librados::ObjectReadOperation *op,
+ librados::IoCtx& ioctx, const string& oid,
+ const list<string> *ids, bool get_all, list<otp_info_t> *result);
+ static int get(librados::ObjectReadOperation *op,
+ librados::IoCtx& ioctx, const string& oid,
+ const string& id, otp_info_t *result);
+ static int get_all(librados::ObjectReadOperation *op,
+ librados::IoCtx& ioctx, const string& oid,
+ list<otp_info_t> *result);
+ static int check(CephContext *cct, librados::IoCtx& ioctx, const string& oid,
+ const string& id, const string& val, otp_check_t *result);
+ static int get_current_time(librados::IoCtx& ioctx, const string& oid,
+ ceph::real_time *result);
+ };
+
+ class TOTPConfig {
+ otp_info_t config;
+ public:
+ TOTPConfig(const string& id, const string& seed) {
+ config.type = OTP_TOTP;
+ config.id = id;
+ config.seed = seed;
+ }
+ void set_step_size(int step_size) {
+ config.step_size = step_size;
+ }
+ void set_window(int window) {
+ config.window = window;
+ }
+ void get_config(otp_info_t *conf) {
+ *conf = config;
+ }
+ };
+ } // namespace otp
+ } // namespace cls
+} // namespace rados
+
+#endif
diff --git a/src/cls/otp/cls_otp_ops.h b/src/cls/otp/cls_otp_ops.h
new file mode 100644
index 00000000..51cec3eb
--- /dev/null
+++ b/src/cls/otp/cls_otp_ops.h
@@ -0,0 +1,166 @@
+#ifndef CEPH_CLS_OTP_OPS_H
+#define CEPH_CLS_OTP_OPS_H
+
+#include "include/types.h"
+#include "include/utime.h"
+#include "cls/otp/cls_otp_types.h"
+
+struct cls_otp_set_otp_op
+{
+ list<rados::cls::otp::otp_info_t> entries;
+
+ void encode(bufferlist &bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(entries, bl);
+ ENCODE_FINISH(bl);
+ }
+ void decode(bufferlist::const_iterator &bl) {
+ DECODE_START(1, bl);
+ decode(entries, bl);
+ DECODE_FINISH(bl);
+ }
+};
+WRITE_CLASS_ENCODER(cls_otp_set_otp_op)
+
+struct cls_otp_check_otp_op
+{
+ string id;
+ string val;
+ string token;
+
+ void encode(bufferlist &bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(id, bl);
+ encode(val, bl);
+ encode(token, bl);
+ ENCODE_FINISH(bl);
+ }
+ void decode(bufferlist::const_iterator &bl) {
+ DECODE_START(1, bl);
+ decode(id, bl);
+ decode(val, bl);
+ decode(token, bl);
+ DECODE_FINISH(bl);
+ }
+};
+WRITE_CLASS_ENCODER(cls_otp_check_otp_op)
+
+struct cls_otp_get_result_op
+{
+ string token;
+
+ void encode(bufferlist &bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(token, bl);
+ ENCODE_FINISH(bl);
+ }
+ void decode(bufferlist::const_iterator &bl) {
+ DECODE_START(1, bl);
+ decode(token, bl);
+ DECODE_FINISH(bl);
+ }
+};
+WRITE_CLASS_ENCODER(cls_otp_get_result_op)
+
+struct cls_otp_get_result_reply
+{
+ rados::cls::otp::otp_check_t result;
+
+ void encode(bufferlist &bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(result, bl);
+ ENCODE_FINISH(bl);
+ }
+ void decode(bufferlist::const_iterator &bl) {
+ DECODE_START(1, bl);
+ decode(result, bl);
+ DECODE_FINISH(bl);
+ }
+};
+WRITE_CLASS_ENCODER(cls_otp_get_result_reply)
+
+struct cls_otp_remove_otp_op
+{
+ list<string> ids;
+
+ void encode(bufferlist &bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(ids, bl);
+ ENCODE_FINISH(bl);
+ }
+ void decode(bufferlist::const_iterator &bl) {
+ DECODE_START(1, bl);
+ decode(ids, bl);
+ DECODE_FINISH(bl);
+ }
+};
+WRITE_CLASS_ENCODER(cls_otp_remove_otp_op)
+
+struct cls_otp_get_otp_op
+{
+ bool get_all{false};
+ list<string> ids;
+
+ void encode(bufferlist &bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(get_all, bl);
+ encode(ids, bl);
+ ENCODE_FINISH(bl);
+ }
+ void decode(bufferlist::const_iterator &bl) {
+ DECODE_START(1, bl);
+ decode(get_all, bl);
+ decode(ids, bl);
+ DECODE_FINISH(bl);
+ }
+};
+WRITE_CLASS_ENCODER(cls_otp_get_otp_op)
+
+struct cls_otp_get_otp_reply
+{
+ list<rados::cls::otp::otp_info_t> found_entries;
+
+ void encode(bufferlist &bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(found_entries, bl);
+ ENCODE_FINISH(bl);
+ }
+ void decode(bufferlist::const_iterator &bl) {
+ DECODE_START(1, bl);
+ decode(found_entries, bl);
+ DECODE_FINISH(bl);
+ }
+};
+WRITE_CLASS_ENCODER(cls_otp_get_otp_reply)
+
+struct cls_otp_get_current_time_op
+{
+ void encode(bufferlist &bl) const {
+ ENCODE_START(1, 1, bl);
+ ENCODE_FINISH(bl);
+ }
+ void decode(bufferlist::const_iterator &bl) {
+ DECODE_START(1, bl);
+ DECODE_FINISH(bl);
+ }
+};
+WRITE_CLASS_ENCODER(cls_otp_get_current_time_op)
+
+struct cls_otp_get_current_time_reply
+{
+ ceph::real_time time;
+
+ void encode(bufferlist &bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(time, bl);
+ ENCODE_FINISH(bl);
+ }
+ void decode(bufferlist::const_iterator &bl) {
+ DECODE_START(1, bl);
+ decode(time, bl);
+ DECODE_FINISH(bl);
+ }
+};
+WRITE_CLASS_ENCODER(cls_otp_get_current_time_reply)
+
+#endif
diff --git a/src/cls/otp/cls_otp_types.cc b/src/cls/otp/cls_otp_types.cc
new file mode 100644
index 00000000..b8a6e012
--- /dev/null
+++ b/src/cls/otp/cls_otp_types.cc
@@ -0,0 +1,67 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+/*
+ * Ceph - scalable distributed file system
+ *
+ * Copyright (C) 2004-2006 Sage Weil <sage@newdream.net>
+ *
+ * This is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License version 2.1, as published by the Free Software
+ * Foundation. See file COPYING.
+ *
+ */
+
+#include "objclass/objclass.h"
+#include "common/Formatter.h"
+#include "common/Clock.h"
+#include "common/ceph_json.h"
+
+#include "include/utime.h"
+
+#include "cls/otp/cls_otp_types.h"
+
+using namespace rados::cls::otp;
+
+void otp_info_t::dump(Formatter *f) const
+{
+ encode_json("type", (int)type, f);
+ encode_json("id", id, f);
+ encode_json("seed", seed, f);
+ string st;
+ switch (seed_type) {
+ case rados::cls::otp::OTP_SEED_HEX:
+ st = "hex";
+ break;
+ case rados::cls::otp::OTP_SEED_BASE32:
+ st = "base32";
+ break;
+ default:
+ st = "unknown";
+ }
+ encode_json("seed_type", st, f);
+ encode_json("time_ofs", time_ofs, f);
+ encode_json("step_size", step_size, f);
+ encode_json("window", window, f);
+}
+
+void otp_info_t::decode_json(JSONObj *obj)
+{
+ int t{-1};
+ JSONDecoder::decode_json("type", t, obj);
+ type = (OTPType)t;
+ JSONDecoder::decode_json("id", id, obj);
+ JSONDecoder::decode_json("seed", seed, obj);
+ string st;
+ JSONDecoder::decode_json("seed_type", st, obj);
+ if (st == "hex") {
+ seed_type = OTP_SEED_HEX;
+ } else if (st == "base32") {
+ seed_type = OTP_SEED_BASE32;
+ } else {
+ seed_type = OTP_SEED_UNKNOWN;
+ }
+ JSONDecoder::decode_json("time_ofs", time_ofs, obj);
+ JSONDecoder::decode_json("step_size", step_size, obj);
+ JSONDecoder::decode_json("window", window, obj);
+}
diff --git a/src/cls/otp/cls_otp_types.h b/src/cls/otp/cls_otp_types.h
new file mode 100644
index 00000000..b542b5cb
--- /dev/null
+++ b/src/cls/otp/cls_otp_types.h
@@ -0,0 +1,132 @@
+#ifndef CEPH_CLS_OTP_TYPES_H
+#define CEPH_CLS_OTP_TYPES_H
+
+#include "include/encoding.h"
+#include "include/types.h"
+
+
+#define CLS_OTP_MAX_REPO_SIZE 100
+
+class JSONObj;
+
+namespace rados {
+ namespace cls {
+ namespace otp {
+
+ enum OTPType {
+ OTP_UNKNOWN = 0,
+ OTP_HOTP = 1, /* unsupported */
+ OTP_TOTP = 2,
+ };
+
+ enum SeedType {
+ OTP_SEED_UNKNOWN = 0,
+ OTP_SEED_HEX = 1,
+ OTP_SEED_BASE32 = 2,
+ };
+
+ struct otp_info_t {
+ OTPType type{OTP_TOTP};
+ string id;
+ string seed;
+ SeedType seed_type{OTP_SEED_UNKNOWN};
+ bufferlist seed_bin; /* parsed seed, built automatically by otp_set_op,
+ * not being json encoded/decoded on purpose
+ */
+ int32_t time_ofs{0};
+ uint32_t step_size{30}; /* num of seconds foreach otp to test */
+ uint32_t window{2}; /* num of otp after/before start otp to test */
+
+ otp_info_t() {}
+
+ void encode(bufferlist &bl) const {
+ ENCODE_START(1, 1, bl);
+ encode((uint8_t)type, bl);
+ /* if we ever implement anything other than TOTP
+ * then we'll need to branch here */
+ encode(id, bl);
+ encode(seed, bl);
+ encode((uint8_t)seed_type, bl);
+ encode(seed_bin, bl);
+ encode(time_ofs, bl);
+ encode(step_size, bl);
+ encode(window, bl);
+ ENCODE_FINISH(bl);
+ }
+ void decode(bufferlist::const_iterator &bl) {
+ DECODE_START(1, bl);
+ uint8_t t;
+ decode(t, bl);
+ type = (OTPType)t;
+ decode(id, bl);
+ decode(seed, bl);
+ uint8_t st;
+ decode(st, bl);
+ seed_type = (SeedType)st;
+ decode(seed_bin, bl);
+ decode(time_ofs, bl);
+ decode(step_size, bl);
+ decode(window, bl);
+ DECODE_FINISH(bl);
+ }
+ void dump(Formatter *f) const;
+ void decode_json(JSONObj *obj);
+ };
+ WRITE_CLASS_ENCODER(rados::cls::otp::otp_info_t)
+
+ enum OTPCheckResult {
+ OTP_CHECK_UNKNOWN = 0,
+ OTP_CHECK_SUCCESS = 1,
+ OTP_CHECK_FAIL = 2,
+ };
+
+ struct otp_check_t {
+ string token;
+ ceph::real_time timestamp;
+ OTPCheckResult result{OTP_CHECK_UNKNOWN};
+
+ void encode(bufferlist &bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(token, bl);
+ encode(timestamp, bl);
+ encode((char)result, bl);
+ ENCODE_FINISH(bl);
+ }
+ void decode(bufferlist::const_iterator &bl) {
+ DECODE_START(1, bl);
+ decode(token, bl);
+ decode(timestamp, bl);
+ uint8_t t;
+ decode(t, bl);
+ result = (OTPCheckResult)t;
+ DECODE_FINISH(bl);
+ }
+ };
+ WRITE_CLASS_ENCODER(rados::cls::otp::otp_check_t)
+
+ struct otp_repo_t {
+ map<string, otp_info_t> entries;
+
+ otp_repo_t() {}
+
+ void encode(bufferlist &bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(entries, bl);
+ ENCODE_FINISH(bl);
+ }
+ void decode(bufferlist::const_iterator &bl) {
+ DECODE_START(1, bl);
+ decode(entries, bl);
+ DECODE_FINISH(bl);
+ }
+ };
+ WRITE_CLASS_ENCODER(rados::cls::otp::otp_repo_t)
+ }
+ }
+}
+
+WRITE_CLASS_ENCODER(rados::cls::otp::otp_info_t)
+WRITE_CLASS_ENCODER(rados::cls::otp::otp_check_t)
+WRITE_CLASS_ENCODER(rados::cls::otp::otp_repo_t)
+
+#endif
diff --git a/src/cls/rbd/cls_rbd.cc b/src/cls/rbd/cls_rbd.cc
new file mode 100644
index 00000000..09f2e790
--- /dev/null
+++ b/src/cls/rbd/cls_rbd.cc
@@ -0,0 +1,7964 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+/** \file
+ *
+ * This is an OSD class that implements methods for
+ * use with rbd.
+ *
+ * Most of these deal with the rbd header object. Methods prefixed
+ * with old_ deal with the original rbd design, in which clients read
+ * and interpreted the header object directly.
+ *
+ * The new format is meant to be opaque to clients - all their
+ * interactions with non-data objects should go through this
+ * class. The OSD class interface leaves the class to implement its
+ * own argument and payload serialization/deserialization, so for ease
+ * of implementation we use the existing ceph encoding/decoding
+ * methods. Something like json might be preferable, but the rbd
+ * kernel module has to be able to understand format as well. The
+ * datatypes exposed to the clients are strings, unsigned integers,
+ * and vectors of those types. The on-wire format can be found in
+ * src/include/encoding.h.
+ *
+ * The methods for interacting with the new format document their
+ * parameters as the client sees them - it would be silly to mention
+ * in each one that they take an input and an output bufferlist.
+ */
+#include "include/types.h"
+
+#include <algorithm>
+#include <errno.h>
+#include <sstream>
+
+#include "include/uuid.h"
+#include "common/bit_vector.hpp"
+#include "common/errno.h"
+#include "objclass/objclass.h"
+#include "osd/osd_types.h"
+#include "include/rbd_types.h"
+#include "include/rbd/object_map_types.h"
+
+#include "cls/rbd/cls_rbd.h"
+#include "cls/rbd/cls_rbd_types.h"
+
+
+/*
+ * Object keys:
+ *
+ * <partial list>
+ *
+ * stripe_unit: size in bytes of the stripe unit. if not present,
+ * the stripe unit is assumed to match the object size (1 << order).
+ *
+ * stripe_count: number of objects to stripe over before looping back.
+ * if not present or 1, striping is disabled. this is the default.
+ *
+ */
+
+CLS_VER(2,0)
+CLS_NAME(rbd)
+
+#define RBD_MAX_KEYS_READ 64
+#define RBD_SNAP_KEY_PREFIX "snapshot_"
+#define RBD_SNAP_CHILDREN_KEY_PREFIX "snap_children_"
+#define RBD_DIR_ID_KEY_PREFIX "id_"
+#define RBD_DIR_NAME_KEY_PREFIX "name_"
+#define RBD_METADATA_KEY_PREFIX "metadata_"
+
+namespace {
+
+uint64_t get_encode_features(cls_method_context_t hctx) {
+ uint64_t features = 0;
+ int8_t require_osd_release = cls_get_required_osd_release(hctx);
+ if (require_osd_release >= CEPH_RELEASE_NAUTILUS) {
+ features |= CEPH_FEATURE_SERVER_NAUTILUS;
+ }
+ return features;
+}
+
+bool calc_sparse_extent(const bufferptr &bp, size_t sparse_size,
+ uint64_t length, size_t *write_offset,
+ size_t *write_length, size_t *offset) {
+ size_t extent_size;
+ if (*offset + sparse_size > length) {
+ extent_size = length - *offset;
+ } else {
+ extent_size = sparse_size;
+ }
+
+ bufferptr extent(bp, *offset, extent_size);
+ *offset += extent_size;
+
+ bool extent_is_zero = extent.is_zero();
+ if (!extent_is_zero) {
+ *write_length += extent_size;
+ }
+ if (extent_is_zero && *write_length == 0) {
+ *write_offset += extent_size;
+ }
+
+ if ((extent_is_zero || *offset == length) && *write_length != 0) {
+ return true;
+ }
+ return false;
+}
+
+} // anonymous namespace
+
+static int snap_read_header(cls_method_context_t hctx, bufferlist& bl)
+{
+ unsigned snap_count = 0;
+ uint64_t snap_names_len = 0;
+ struct rbd_obj_header_ondisk *header;
+
+ CLS_LOG(20, "snapshots_list");
+
+ while (1) {
+ int len = sizeof(*header) +
+ snap_count * sizeof(struct rbd_obj_snap_ondisk) +
+ snap_names_len;
+
+ int rc = cls_cxx_read(hctx, 0, len, &bl);
+ if (rc < 0)
+ return rc;
+
+ if (bl.length() < sizeof(*header))
+ return -EINVAL;
+
+ header = (struct rbd_obj_header_ondisk *)bl.c_str();
+ ceph_assert(header);
+
+ if ((snap_count != header->snap_count) ||
+ (snap_names_len != header->snap_names_len)) {
+ snap_count = header->snap_count;
+ snap_names_len = header->snap_names_len;
+ bl.clear();
+ continue;
+ }
+ break;
+ }
+
+ return 0;
+}
+
+static void key_from_snap_id(snapid_t snap_id, string *out)
+{
+ ostringstream oss;
+ oss << RBD_SNAP_KEY_PREFIX
+ << std::setw(16) << std::setfill('0') << std::hex << snap_id;
+ *out = oss.str();
+}
+
+static snapid_t snap_id_from_key(const string &key) {
+ istringstream iss(key);
+ uint64_t id;
+ iss.ignore(strlen(RBD_SNAP_KEY_PREFIX)) >> std::hex >> id;
+ return id;
+}
+
+template<typename T>
+static int read_key(cls_method_context_t hctx, const string &key, T *out)
+{
+ bufferlist bl;
+ int r = cls_cxx_map_get_val(hctx, key, &bl);
+ if (r < 0) {
+ if (r != -ENOENT) {
+ CLS_ERR("error reading omap key %s: %s", key.c_str(), cpp_strerror(r).c_str());
+ }
+ return r;
+ }
+
+ try {
+ auto it = bl.cbegin();
+ decode(*out, it);
+ } catch (const buffer::error &err) {
+ CLS_ERR("error decoding %s", key.c_str());
+ return -EIO;
+ }
+
+ return 0;
+}
+
+template <typename T>
+static int write_key(cls_method_context_t hctx, const string &key, const T &t) {
+ bufferlist bl;
+ encode(t, bl);
+
+ int r = cls_cxx_map_set_val(hctx, key, &bl);
+ if (r < 0) {
+ CLS_ERR("failed to set omap key: %s", key.c_str());
+ return r;
+ }
+ return 0;
+}
+
+template <typename T>
+static int write_key(cls_method_context_t hctx, const string &key, const T &t,
+ uint64_t features) {
+ bufferlist bl;
+ encode(t, bl, features);
+
+ int r = cls_cxx_map_set_val(hctx, key, &bl);
+ if (r < 0) {
+ CLS_ERR("failed to set omap key: %s", key.c_str());
+ return r;
+ }
+ return 0;
+}
+
+static int remove_key(cls_method_context_t hctx, const string &key) {
+ int r = cls_cxx_map_remove_key(hctx, key);
+ if (r < 0 && r != -ENOENT) {
+ CLS_ERR("failed to remove key: %s", key.c_str());
+ return r;
+ }
+ return 0;
+}
+
+static bool is_valid_id(const string &id) {
+ if (!id.size())
+ return false;
+ for (size_t i = 0; i < id.size(); ++i) {
+ if (!isalnum(id[i])) {
+ return false;
+ }
+ }
+ return true;
+}
+
+/**
+ * verify that the header object exists
+ *
+ * @return 0 if the object exists, -ENOENT if it does not, or other error
+ */
+static int check_exists(cls_method_context_t hctx)
+{
+ uint64_t size;
+ time_t mtime;
+ return cls_cxx_stat(hctx, &size, &mtime);
+}
+
+namespace image {
+
+/**
+ * check that given feature(s) are set
+ *
+ * @param hctx context
+ * @param need features needed
+ * @return 0 if features are set, negative error (like ENOEXEC) otherwise
+ */
+int require_feature(cls_method_context_t hctx, uint64_t need)
+{
+ uint64_t features;
+ int r = read_key(hctx, "features", &features);
+ if (r == -ENOENT) // this implies it's an old-style image with no features
+ return -ENOEXEC;
+ if (r < 0)
+ return r;
+ if ((features & need) != need) {
+ CLS_LOG(10, "require_feature missing feature %llx, have %llx",
+ (unsigned long long)need, (unsigned long long)features);
+ return -ENOEXEC;
+ }
+ return 0;
+}
+
+std::string snap_children_key_from_snap_id(snapid_t snap_id)
+{
+ ostringstream oss;
+ oss << RBD_SNAP_CHILDREN_KEY_PREFIX
+ << std::setw(16) << std::setfill('0') << std::hex << snap_id;
+ return oss.str();
+}
+
+int set_op_features(cls_method_context_t hctx, uint64_t op_features,
+ uint64_t mask) {
+ uint64_t orig_features;
+ int r = read_key(hctx, "features", &orig_features);
+ if (r < 0) {
+ CLS_ERR("failed to read features off disk: %s", cpp_strerror(r).c_str());
+ return r;
+ }
+
+ uint64_t orig_op_features = 0;
+ r = read_key(hctx, "op_features", &orig_op_features);
+ if (r < 0 && r != -ENOENT) {
+ CLS_ERR("Could not read op features off disk: %s", cpp_strerror(r).c_str());
+ return r;
+ }
+
+ op_features = (orig_op_features & ~mask) | (op_features & mask);
+ CLS_LOG(10, "op_features=%" PRIu64 " orig_op_features=%" PRIu64,
+ op_features, orig_op_features);
+ if (op_features == orig_op_features) {
+ return 0;
+ }
+
+ uint64_t features = orig_features;
+ if (op_features == 0ULL) {
+ features &= ~RBD_FEATURE_OPERATIONS;
+
+ r = cls_cxx_map_remove_key(hctx, "op_features");
+ if (r == -ENOENT) {
+ r = 0;
+ }
+ } else {
+ features |= RBD_FEATURE_OPERATIONS;
+
+ bufferlist bl;
+ encode(op_features, bl);
+ r = cls_cxx_map_set_val(hctx, "op_features", &bl);
+ }
+
+ if (r < 0) {
+ CLS_ERR("error updating op features: %s", cpp_strerror(r).c_str());
+ return r;
+ }
+
+ if (features != orig_features) {
+ bufferlist bl;
+ encode(features, bl);
+ r = cls_cxx_map_set_val(hctx, "features", &bl);
+ if (r < 0) {
+ CLS_ERR("error updating features: %s", cpp_strerror(r).c_str());
+ return r;
+ }
+ }
+
+ return 0;
+}
+
+int set_migration(cls_method_context_t hctx,
+ const cls::rbd::MigrationSpec &migration_spec, bool init) {
+ if (init) {
+ bufferlist bl;
+ int r = cls_cxx_map_get_val(hctx, "migration", &bl);
+ if (r != -ENOENT) {
+ if (r == 0) {
+ CLS_LOG(10, "migration already set");
+ return -EEXIST;
+ }
+ CLS_ERR("failed to read migration off disk: %s", cpp_strerror(r).c_str());
+ return r;
+ }
+
+ uint64_t features = 0;
+ r = read_key(hctx, "features", &features);
+ if (r == -ENOENT) {
+ CLS_LOG(20, "no features, assuming v1 format");
+ bufferlist header;
+ r = cls_cxx_read(hctx, 0, sizeof(RBD_HEADER_TEXT), &header);
+ if (r < 0) {
+ CLS_ERR("failed to read v1 header: %s", cpp_strerror(r).c_str());
+ return r;
+ }
+ if (header.length() != sizeof(RBD_HEADER_TEXT)) {
+ CLS_ERR("unrecognized v1 header format");
+ return -ENXIO;
+ }
+ if (memcmp(RBD_HEADER_TEXT, header.c_str(), header.length()) != 0) {
+ if (memcmp(RBD_MIGRATE_HEADER_TEXT, header.c_str(),
+ header.length()) == 0) {
+ CLS_LOG(10, "migration already set");
+ return -EEXIST;
+ } else {
+ CLS_ERR("unrecognized v1 header format");
+ return -ENXIO;
+ }
+ }
+ if (migration_spec.header_type != cls::rbd::MIGRATION_HEADER_TYPE_SRC) {
+ CLS_LOG(10, "v1 format image can only be migration source");
+ return -EINVAL;
+ }
+
+ header.clear();
+ header.append(RBD_MIGRATE_HEADER_TEXT);
+ r = cls_cxx_write(hctx, 0, header.length(), &header);
+ if (r < 0) {
+ CLS_ERR("error updating v1 header: %s", cpp_strerror(r).c_str());
+ return r;
+ }
+ } else if (r < 0) {
+ CLS_ERR("failed to read features off disk: %s", cpp_strerror(r).c_str());
+ return r;
+ } else if ((features & RBD_FEATURE_MIGRATING) != 0ULL) {
+ if (migration_spec.header_type != cls::rbd::MIGRATION_HEADER_TYPE_DST) {
+ CLS_LOG(10, "migrating feature already set");
+ return -EEXIST;
+ }
+ } else {
+ features |= RBD_FEATURE_MIGRATING;
+ bl.clear();
+ encode(features, bl);
+ r = cls_cxx_map_set_val(hctx, "features", &bl);
+ if (r < 0) {
+ CLS_ERR("error updating features: %s", cpp_strerror(r).c_str());
+ return r;
+ }
+ }
+ }
+
+ bufferlist bl;
+ encode(migration_spec, bl);
+ int r = cls_cxx_map_set_val(hctx, "migration", &bl);
+ if (r < 0) {
+ CLS_ERR("error setting migration: %s", cpp_strerror(r).c_str());
+ return r;
+ }
+
+ return 0;
+}
+
+int read_migration(cls_method_context_t hctx,
+ cls::rbd::MigrationSpec *migration_spec) {
+ uint64_t features = 0;
+ int r = read_key(hctx, "features", &features);
+ if (r == -ENOENT) {
+ CLS_LOG(20, "no features, assuming v1 format");
+ bufferlist header;
+ r = cls_cxx_read(hctx, 0, sizeof(RBD_HEADER_TEXT), &header);
+ if (r < 0) {
+ CLS_ERR("failed to read v1 header: %s", cpp_strerror(r).c_str());
+ return r;
+ }
+ if (header.length() != sizeof(RBD_HEADER_TEXT)) {
+ CLS_ERR("unrecognized v1 header format");
+ return -ENXIO;
+ }
+ if (memcmp(RBD_MIGRATE_HEADER_TEXT, header.c_str(), header.length()) != 0) {
+ if (memcmp(RBD_HEADER_TEXT, header.c_str(), header.length()) == 0) {
+ CLS_LOG(10, "migration feature not set");
+ return -EINVAL;
+ } else {
+ CLS_ERR("unrecognized v1 header format");
+ return -ENXIO;
+ }
+ }
+ if (migration_spec->header_type != cls::rbd::MIGRATION_HEADER_TYPE_SRC) {
+ CLS_LOG(10, "v1 format image can only be migration source");
+ return -EINVAL;
+ }
+ } else if (r < 0) {
+ CLS_ERR("failed to read features off disk: %s", cpp_strerror(r).c_str());
+ return r;
+ } else if ((features & RBD_FEATURE_MIGRATING) == 0ULL) {
+ CLS_LOG(10, "migration feature not set");
+ return -EINVAL;
+ }
+
+ r = read_key(hctx, "migration", migration_spec);
+ if (r < 0) {
+ CLS_ERR("failed to read migration off disk: %s", cpp_strerror(r).c_str());
+ return r;
+ }
+
+ return 0;
+}
+
+int remove_migration(cls_method_context_t hctx) {
+ int r = remove_key(hctx, "migration");
+ if (r < 0) {
+ return r;
+ }
+
+ uint64_t features = 0;
+ r = read_key(hctx, "features", &features);
+ if (r == -ENOENT) {
+ CLS_LOG(20, "no features, assuming v1 format");
+ bufferlist header;
+ r = cls_cxx_read(hctx, 0, sizeof(RBD_MIGRATE_HEADER_TEXT), &header);
+ if (header.length() != sizeof(RBD_MIGRATE_HEADER_TEXT)) {
+ CLS_ERR("unrecognized v1 header format");
+ return -ENXIO;
+ }
+ if (memcmp(RBD_MIGRATE_HEADER_TEXT, header.c_str(), header.length()) != 0) {
+ if (memcmp(RBD_HEADER_TEXT, header.c_str(), header.length()) == 0) {
+ CLS_LOG(10, "migration feature not set");
+ return -EINVAL;
+ } else {
+ CLS_ERR("unrecognized v1 header format");
+ return -ENXIO;
+ }
+ }
+ header.clear();
+ header.append(RBD_HEADER_TEXT);
+ r = cls_cxx_write(hctx, 0, header.length(), &header);
+ if (r < 0) {
+ CLS_ERR("error updating v1 header: %s", cpp_strerror(r).c_str());
+ return r;
+ }
+ } else if (r < 0) {
+ CLS_ERR("failed to read features off disk: %s", cpp_strerror(r).c_str());
+ return r;
+ } else if ((features & RBD_FEATURE_MIGRATING) == 0ULL) {
+ CLS_LOG(10, "migrating feature not set");
+ } else {
+ features &= ~RBD_FEATURE_MIGRATING;
+ bufferlist bl;
+ encode(features, bl);
+ r = cls_cxx_map_set_val(hctx, "features", &bl);
+ if (r < 0) {
+ CLS_ERR("error updating features: %s", cpp_strerror(r).c_str());
+ return r;
+ }
+ }
+
+ return 0;
+}
+
+namespace snapshot {
+
+template<typename L>
+int iterate(cls_method_context_t hctx, L& lambda) {
+ int max_read = RBD_MAX_KEYS_READ;
+ string last_read = RBD_SNAP_KEY_PREFIX;
+ bool more = false;
+ do {
+ map<string, bufferlist> vals;
+ int r = cls_cxx_map_get_vals(hctx, last_read, RBD_SNAP_KEY_PREFIX,
+ max_read, &vals, &more);
+ if (r < 0) {
+ return r;
+ }
+
+ cls_rbd_snap snap_meta;
+ for (auto& val : vals) {
+ auto iter = val.second.cbegin();
+ try {
+ decode(snap_meta, iter);
+ } catch (const buffer::error &err) {
+ CLS_ERR("error decoding snapshot metadata for snap : %s",
+ val.first.c_str());
+ return -EIO;
+ }
+
+ r = lambda(snap_meta);
+ if (r < 0) {
+ return r;
+ }
+ }
+
+ if (!vals.empty()) {
+ last_read = vals.rbegin()->first;
+ }
+ } while (more);
+
+ return 0;
+}
+
+int write(cls_method_context_t hctx, const std::string& snap_key,
+ cls_rbd_snap&& snap) {
+ int r;
+ uint64_t encode_features = get_encode_features(hctx);
+ if (snap.migrate_parent_format(encode_features)) {
+ // ensure the normalized parent link exists before removing it from the
+ // snapshot record
+ cls_rbd_parent on_disk_parent;
+ r = read_key(hctx, "parent", &on_disk_parent);
+ if (r < 0 && r != -ENOENT) {
+ return r;
+ }
+
+ if (!on_disk_parent.exists()) {
+ on_disk_parent = snap.parent;
+ on_disk_parent.head_overlap = std::nullopt;
+
+ r = write_key(hctx, "parent", on_disk_parent, encode_features);
+ if (r < 0) {
+ return r;
+ }
+ }
+
+ // only store the parent overlap in the snapshot
+ snap.parent_overlap = snap.parent.head_overlap;
+ snap.parent = {};
+ }
+
+ r = write_key(hctx, snap_key, snap, encode_features);
+ if (r < 0) {
+ return r;
+ }
+ return 0;
+}
+
+} // namespace snapshot
+
+namespace parent {
+
+int attach(cls_method_context_t hctx, cls_rbd_parent parent,
+ bool reattach) {
+ int r = check_exists(hctx);
+ if (r < 0) {
+ CLS_LOG(20, "cls_rbd::image::parent::attach: child doesn't exist");
+ return r;
+ }
+
+ r = image::require_feature(hctx, RBD_FEATURE_LAYERING);
+ if (r < 0) {
+ CLS_LOG(20, "cls_rbd::image::parent::attach: child does not support "
+ "layering");
+ return r;
+ }
+
+ CLS_LOG(20, "cls_rbd::image::parent::attach: pool=%" PRIi64 ", ns=%s, id=%s, "
+ "snapid=%" PRIu64 ", size=%" PRIu64,
+ parent.pool_id, parent.pool_namespace.c_str(),
+ parent.image_id.c_str(), parent.snap_id.val,
+ parent.head_overlap.value_or(0ULL));
+ if (!parent.exists() || parent.head_overlap.value_or(0ULL) == 0ULL) {
+ return -EINVAL;
+ }
+
+ // make sure there isn't already a parent
+ cls_rbd_parent on_disk_parent;
+ r = read_key(hctx, "parent", &on_disk_parent);
+ if (r < 0 && r != -ENOENT) {
+ return r;
+ }
+
+ auto on_disk_parent_without_overlap{on_disk_parent};
+ on_disk_parent_without_overlap.head_overlap = parent.head_overlap;
+
+ if (r == 0 &&
+ (on_disk_parent.head_overlap ||
+ on_disk_parent_without_overlap != parent) &&
+ !reattach) {
+ CLS_LOG(20, "cls_rbd::parent::attach: existing legacy parent "
+ "pool=%" PRIi64 ", ns=%s, id=%s, snapid=%" PRIu64 ", "
+ "overlap=%" PRIu64,
+ on_disk_parent.pool_id, on_disk_parent.pool_namespace.c_str(),
+ on_disk_parent.image_id.c_str(), on_disk_parent.snap_id.val,
+ on_disk_parent.head_overlap.value_or(0ULL));
+ return -EEXIST;
+ }
+
+ // our overlap is the min of our size and the parent's size.
+ uint64_t our_size;
+ r = read_key(hctx, "size", &our_size);
+ if (r < 0) {
+ return r;
+ }
+
+ parent.head_overlap = std::min(*parent.head_overlap, our_size);
+
+ r = write_key(hctx, "parent", parent, get_encode_features(hctx));
+ if (r < 0) {
+ return r;
+ }
+
+ return 0;
+}
+
+int detach(cls_method_context_t hctx, bool legacy_api) {
+ int r = check_exists(hctx);
+ if (r < 0) {
+ CLS_LOG(20, "cls_rbd::parent::detach: child doesn't exist");
+ return r;
+ }
+
+ uint64_t features;
+ r = read_key(hctx, "features", &features);
+ if (r == -ENOENT || ((features & RBD_FEATURE_LAYERING) == 0)) {
+ CLS_LOG(20, "cls_rbd::image::parent::detach: child does not support "
+ "layering");
+ return -ENOEXEC;
+ } else if (r < 0) {
+ return r;
+ }
+
+ cls_rbd_parent on_disk_parent;
+ r = read_key(hctx, "parent", &on_disk_parent);
+ if (r < 0) {
+ return r;
+ } else if (legacy_api && !on_disk_parent.pool_namespace.empty()) {
+ return -EXDEV;
+ } else if (!on_disk_parent.head_overlap) {
+ return -ENOENT;
+ }
+
+ auto detach_lambda = [hctx, features](const cls_rbd_snap& snap_meta) {
+ if (snap_meta.parent.pool_id != -1 || snap_meta.parent_overlap) {
+ if ((features & RBD_FEATURE_DEEP_FLATTEN) != 0ULL) {
+ // remove parent reference from snapshot
+ cls_rbd_snap snap_meta_copy = snap_meta;
+ snap_meta_copy.parent = {};
+ snap_meta_copy.parent_overlap = std::nullopt;
+
+ std::string snap_key;
+ key_from_snap_id(snap_meta_copy.id, &snap_key);
+ int r = snapshot::write(hctx, snap_key, std::move(snap_meta_copy));
+ if (r < 0) {
+ return r;
+ }
+ } else {
+ return -EEXIST;
+ }
+ }
+ return 0;
+ };
+
+ r = snapshot::iterate(hctx, detach_lambda);
+ bool has_child_snaps = (r == -EEXIST);
+ if (r < 0 && r != -EEXIST) {
+ return r;
+ }
+
+ int8_t require_osd_release = cls_get_required_osd_release(hctx);
+ if (has_child_snaps && require_osd_release >= CEPH_RELEASE_NAUTILUS) {
+ // remove overlap from HEAD revision but keep spec for snapshots
+ on_disk_parent.head_overlap = std::nullopt;
+ r = write_key(hctx, "parent", on_disk_parent, get_encode_features(hctx));
+ if (r < 0) {
+ return r;
+ }
+ } else {
+ r = remove_key(hctx, "parent");
+ if (r < 0 && r != -ENOENT) {
+ return r;
+ }
+ }
+
+ if (!has_child_snaps) {
+ // disable clone child op feature if no longer associated
+ r = set_op_features(hctx, 0, RBD_OPERATION_FEATURE_CLONE_CHILD);
+ if (r < 0) {
+ return r;
+ }
+ }
+ return 0;
+}
+
+} // namespace parent
+} // namespace image
+
+/**
+ * Initialize the header with basic metadata.
+ * Extra features may initialize more fields in the future.
+ * Everything is stored as key/value pairs as omaps in the header object.
+ *
+ * If features the OSD does not understand are requested, -ENOSYS is
+ * returned.
+ *
+ * Input:
+ * @param size number of bytes in the image (uint64_t)
+ * @param order bits to shift to determine the size of data objects (uint8_t)
+ * @param features what optional things this image will use (uint64_t)
+ * @param object_prefix a prefix for all the data objects
+ * @param data_pool_id pool id where data objects is stored (int64_t)
+ *
+ * Output:
+ * @return 0 on success, negative error code on failure
+ */
+int create(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ string object_prefix;
+ uint64_t features, size;
+ uint8_t order;
+ int64_t data_pool_id = -1;
+
+ try {
+ auto iter = in->cbegin();
+ decode(size, iter);
+ decode(order, iter);
+ decode(features, iter);
+ decode(object_prefix, iter);
+ if (!iter.end()) {
+ decode(data_pool_id, iter);
+ }
+ } catch (const buffer::error &err) {
+ return -EINVAL;
+ }
+
+ CLS_LOG(20, "create object_prefix=%s size=%llu order=%u features=%llu",
+ object_prefix.c_str(), (unsigned long long)size, order,
+ (unsigned long long)features);
+
+ if (features & ~RBD_FEATURES_ALL) {
+ return -ENOSYS;
+ }
+
+ if (!object_prefix.size()) {
+ return -EINVAL;
+ }
+
+ bufferlist stored_prefixbl;
+ int r = cls_cxx_map_get_val(hctx, "object_prefix", &stored_prefixbl);
+ if (r != -ENOENT) {
+ CLS_ERR("reading object_prefix returned %d", r);
+ return -EEXIST;
+ }
+
+ bufferlist sizebl;
+ bufferlist orderbl;
+ bufferlist featuresbl;
+ bufferlist object_prefixbl;
+ bufferlist snap_seqbl;
+ bufferlist timestampbl;
+ uint64_t snap_seq = 0;
+ utime_t timestamp = ceph_clock_now();
+ encode(size, sizebl);
+ encode(order, orderbl);
+ encode(features, featuresbl);
+ encode(object_prefix, object_prefixbl);
+ encode(snap_seq, snap_seqbl);
+ encode(timestamp, timestampbl);
+
+ map<string, bufferlist> omap_vals;
+ omap_vals["size"] = sizebl;
+ omap_vals["order"] = orderbl;
+ omap_vals["features"] = featuresbl;
+ omap_vals["object_prefix"] = object_prefixbl;
+ omap_vals["snap_seq"] = snap_seqbl;
+ omap_vals["create_timestamp"] = timestampbl;
+ omap_vals["access_timestamp"] = timestampbl;
+ omap_vals["modify_timestamp"] = timestampbl;
+
+ if ((features & RBD_FEATURE_OPERATIONS) != 0ULL) {
+ CLS_ERR("Attempting to set internal feature: operations");
+ return -EINVAL;
+ }
+
+ if (features & RBD_FEATURE_DATA_POOL) {
+ if (data_pool_id == -1) {
+ CLS_ERR("data pool not provided with feature enabled");
+ return -EINVAL;
+ }
+
+ bufferlist data_pool_id_bl;
+ encode(data_pool_id, data_pool_id_bl);
+ omap_vals["data_pool_id"] = data_pool_id_bl;
+ } else if (data_pool_id != -1) {
+ CLS_ERR("data pool provided with feature disabled");
+ return -EINVAL;
+ }
+
+ r = cls_cxx_map_set_vals(hctx, &omap_vals);
+ if (r < 0)
+ return r;
+
+ return 0;
+}
+
+/**
+ * Input:
+ * @param snap_id which snapshot to query, or CEPH_NOSNAP (uint64_t) (deprecated)
+ * @param read_only true if the image will be used read-only (bool)
+ *
+ * Output:
+ * @param features list of enabled features for the given snapshot (uint64_t)
+ * @param incompatible incompatible feature bits
+ * @returns 0 on success, negative error code on failure
+ */
+int get_features(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ bool read_only = false;
+
+ auto iter = in->cbegin();
+ try {
+ uint64_t snap_id;
+ decode(snap_id, iter);
+ if (!iter.end()) {
+ decode(read_only, iter);
+ }
+ } catch (const buffer::error &err) {
+ return -EINVAL;
+ }
+
+ CLS_LOG(20, "get_features read_only=%d", read_only);
+
+ uint64_t features;
+ int r = read_key(hctx, "features", &features);
+ if (r < 0) {
+ CLS_ERR("failed to read features off disk: %s", cpp_strerror(r).c_str());
+ return r;
+ }
+
+ uint64_t incompatible = (read_only ? features & RBD_FEATURES_INCOMPATIBLE :
+ features & RBD_FEATURES_RW_INCOMPATIBLE);
+ encode(features, *out);
+ encode(incompatible, *out);
+ return 0;
+}
+
+/**
+ * set the image features
+ *
+ * Input:
+ * @param features image features
+ * @param mask image feature mask
+ *
+ * Output:
+ * none
+ *
+ * @returns 0 on success, negative error code upon failure
+ */
+int set_features(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ uint64_t features;
+ uint64_t mask;
+ auto iter = in->cbegin();
+ try {
+ decode(features, iter);
+ decode(mask, iter);
+ } catch (const buffer::error &err) {
+ return -EINVAL;
+ }
+
+ // check that features exists to make sure this is a header object
+ // that was created correctly
+ uint64_t orig_features = 0;
+ int r = read_key(hctx, "features", &orig_features);
+ if (r < 0 && r != -ENOENT) {
+ CLS_ERR("Could not read image's features off disk: %s",
+ cpp_strerror(r).c_str());
+ return r;
+ }
+
+ if ((mask & RBD_FEATURES_INTERNAL) != 0ULL) {
+ CLS_ERR("Attempting to set internal feature: %" PRIu64,
+ static_cast<uint64_t>(mask & RBD_FEATURES_INTERNAL));
+ return -EINVAL;
+ }
+
+ // newer clients might attempt to mask off features we don't support
+ mask &= RBD_FEATURES_ALL;
+
+ uint64_t enabled_features = features & mask;
+ if ((enabled_features & RBD_FEATURES_MUTABLE) != enabled_features) {
+ CLS_ERR("Attempting to enable immutable feature: %" PRIu64,
+ static_cast<uint64_t>(enabled_features & ~RBD_FEATURES_MUTABLE));
+ return -EINVAL;
+ }
+
+ uint64_t disabled_features = ~features & mask;
+ uint64_t disable_mask = (RBD_FEATURES_MUTABLE | RBD_FEATURES_DISABLE_ONLY);
+ if ((disabled_features & disable_mask) != disabled_features) {
+ CLS_ERR("Attempting to disable immutable feature: %" PRIu64,
+ enabled_features & ~disable_mask);
+ return -EINVAL;
+ }
+
+ features = (orig_features & ~mask) | (features & mask);
+ CLS_LOG(10, "set_features features=%" PRIu64 " orig_features=%" PRIu64,
+ features, orig_features);
+
+ bufferlist bl;
+ encode(features, bl);
+ r = cls_cxx_map_set_val(hctx, "features", &bl);
+ if (r < 0) {
+ CLS_ERR("error updating features: %s", cpp_strerror(r).c_str());
+ return r;
+ }
+ return 0;
+}
+
+/**
+ * Input:
+ * @param snap_id which snapshot to query, or CEPH_NOSNAP (uint64_t)
+ *
+ * Output:
+ * @param order bits to shift to get the size of data objects (uint8_t)
+ * @param size size of the image in bytes for the given snapshot (uint64_t)
+ * @returns 0 on success, negative error code on failure
+ */
+int get_size(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ uint64_t snap_id, size;
+ uint8_t order;
+
+ auto iter = in->cbegin();
+ try {
+ decode(snap_id, iter);
+ } catch (const buffer::error &err) {
+ return -EINVAL;
+ }
+
+ CLS_LOG(20, "get_size snap_id=%llu", (unsigned long long)snap_id);
+
+ int r = read_key(hctx, "order", &order);
+ if (r < 0) {
+ CLS_ERR("failed to read the order off of disk: %s", cpp_strerror(r).c_str());
+ return r;
+ }
+
+ if (snap_id == CEPH_NOSNAP) {
+ r = read_key(hctx, "size", &size);
+ if (r < 0) {
+ CLS_ERR("failed to read the image's size off of disk: %s", cpp_strerror(r).c_str());
+ return r;
+ }
+ } else {
+ cls_rbd_snap snap;
+ string snapshot_key;
+ key_from_snap_id(snap_id, &snapshot_key);
+ int r = read_key(hctx, snapshot_key, &snap);
+ if (r < 0)
+ return r;
+
+ size = snap.image_size;
+ }
+
+ encode(order, *out);
+ encode(size, *out);
+
+ return 0;
+}
+
+/**
+ * Input:
+ * @param size new capacity of the image in bytes (uint64_t)
+ *
+ * Output:
+ * @returns 0 on success, negative error code on failure
+ */
+int set_size(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ uint64_t size;
+
+ auto iter = in->cbegin();
+ try {
+ decode(size, iter);
+ } catch (const buffer::error &err) {
+ return -EINVAL;
+ }
+
+ // check that size exists to make sure this is a header object
+ // that was created correctly
+ uint64_t orig_size;
+ int r = read_key(hctx, "size", &orig_size);
+ if (r < 0) {
+ CLS_ERR("Could not read image's size off disk: %s", cpp_strerror(r).c_str());
+ return r;
+ }
+
+ CLS_LOG(20, "set_size size=%llu orig_size=%llu", (unsigned long long)size,
+ (unsigned long long)orig_size);
+
+ bufferlist sizebl;
+ encode(size, sizebl);
+ r = cls_cxx_map_set_val(hctx, "size", &sizebl);
+ if (r < 0) {
+ CLS_ERR("error writing snapshot metadata: %s", cpp_strerror(r).c_str());
+ return r;
+ }
+
+ // if we are shrinking, and have a parent, shrink our overlap with
+ // the parent, too.
+ if (size < orig_size) {
+ cls_rbd_parent parent;
+ r = read_key(hctx, "parent", &parent);
+ if (r == -ENOENT)
+ r = 0;
+ if (r < 0)
+ return r;
+ if (parent.exists() && parent.head_overlap.value_or(0ULL) > size) {
+ parent.head_overlap = size;
+ r = write_key(hctx, "parent", parent, get_encode_features(hctx));
+ if (r < 0) {
+ return r;
+ }
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * get the current protection status of the specified snapshot
+ *
+ * Input:
+ * @param snap_id (uint64_t) which snapshot to get the status of
+ *
+ * Output:
+ * @param status (uint8_t) one of:
+ * RBD_PROTECTION_STATUS_{PROTECTED, UNPROTECTED, UNPROTECTING}
+ *
+ * @returns 0 on success, negative error code on failure
+ * @returns -EINVAL if snapid is CEPH_NOSNAP
+ */
+int get_protection_status(cls_method_context_t hctx, bufferlist *in,
+ bufferlist *out)
+{
+ snapid_t snap_id;
+
+ auto iter = in->cbegin();
+ try {
+ decode(snap_id, iter);
+ } catch (const buffer::error &err) {
+ CLS_LOG(20, "get_protection_status: invalid decode");
+ return -EINVAL;
+ }
+
+ int r = check_exists(hctx);
+ if (r < 0)
+ return r;
+
+ CLS_LOG(20, "get_protection_status snap_id=%llu",
+ (unsigned long long)snap_id.val);
+
+ if (snap_id == CEPH_NOSNAP)
+ return -EINVAL;
+
+ cls_rbd_snap snap;
+ string snapshot_key;
+ key_from_snap_id(snap_id.val, &snapshot_key);
+ r = read_key(hctx, snapshot_key, &snap);
+ if (r < 0) {
+ CLS_ERR("could not read key for snapshot id %" PRIu64, snap_id.val);
+ return r;
+ }
+
+ if (snap.protection_status >= RBD_PROTECTION_STATUS_LAST) {
+ CLS_ERR("invalid protection status for snap id %llu: %u",
+ (unsigned long long)snap_id.val, snap.protection_status);
+ return -EIO;
+ }
+
+ encode(snap.protection_status, *out);
+ return 0;
+}
+
+/**
+ * set the proctection status of a snapshot
+ *
+ * Input:
+ * @param snapid (uint64_t) which snapshot to set the status of
+ * @param status (uint8_t) one of:
+ * RBD_PROTECTION_STATUS_{PROTECTED, UNPROTECTED, UNPROTECTING}
+ *
+ * @returns 0 on success, negative error code on failure
+ * @returns -EINVAL if snapid is CEPH_NOSNAP
+ */
+int set_protection_status(cls_method_context_t hctx, bufferlist *in,
+ bufferlist *out)
+{
+ snapid_t snap_id;
+ uint8_t status;
+
+ auto iter = in->cbegin();
+ try {
+ decode(snap_id, iter);
+ decode(status, iter);
+ } catch (const buffer::error &err) {
+ CLS_LOG(20, "set_protection_status: invalid decode");
+ return -EINVAL;
+ }
+
+ int r = check_exists(hctx);
+ if (r < 0)
+ return r;
+
+ r = image::require_feature(hctx, RBD_FEATURE_LAYERING);
+ if (r < 0) {
+ CLS_LOG(20, "image does not support layering");
+ return r;
+ }
+
+ CLS_LOG(20, "set_protection_status snapid=%llu status=%u",
+ (unsigned long long)snap_id.val, status);
+
+ if (snap_id == CEPH_NOSNAP)
+ return -EINVAL;
+
+ if (status >= RBD_PROTECTION_STATUS_LAST) {
+ CLS_LOG(10, "invalid protection status for snap id %llu: %u",
+ (unsigned long long)snap_id.val, status);
+ return -EINVAL;
+ }
+
+ cls_rbd_snap snap;
+ string snapshot_key;
+ key_from_snap_id(snap_id.val, &snapshot_key);
+ r = read_key(hctx, snapshot_key, &snap);
+ if (r < 0) {
+ CLS_ERR("could not read key for snapshot id %" PRIu64, snap_id.val);
+ return r;
+ }
+
+ snap.protection_status = status;
+ r = image::snapshot::write(hctx, snapshot_key, std::move(snap));
+ if (r < 0) {
+ return r;
+ }
+
+ return 0;
+}
+
+/**
+ * get striping parameters
+ *
+ * Input:
+ * none
+ *
+ * Output:
+ * @param stripe unit (bytes)
+ * @param stripe count (num objects)
+ *
+ * @returns 0 on success
+ */
+int get_stripe_unit_count(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ int r = check_exists(hctx);
+ if (r < 0)
+ return r;
+
+ CLS_LOG(20, "get_stripe_unit_count");
+
+ r = image::require_feature(hctx, RBD_FEATURE_STRIPINGV2);
+ if (r < 0)
+ return r;
+
+ uint64_t stripe_unit = 0, stripe_count = 0;
+ r = read_key(hctx, "stripe_unit", &stripe_unit);
+ if (r == -ENOENT) {
+ // default to object size
+ uint8_t order;
+ r = read_key(hctx, "order", &order);
+ if (r < 0) {
+ CLS_ERR("failed to read the order off of disk: %s", cpp_strerror(r).c_str());
+ return -EIO;
+ }
+ stripe_unit = 1ull << order;
+ }
+ if (r < 0)
+ return r;
+ r = read_key(hctx, "stripe_count", &stripe_count);
+ if (r == -ENOENT) {
+ // default to 1
+ stripe_count = 1;
+ r = 0;
+ }
+ if (r < 0)
+ return r;
+
+ encode(stripe_unit, *out);
+ encode(stripe_count, *out);
+ return 0;
+}
+
+/**
+ * set striping parameters
+ *
+ * Input:
+ * @param stripe unit (bytes)
+ * @param stripe count (num objects)
+ *
+ * @returns 0 on success
+ */
+int set_stripe_unit_count(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ uint64_t stripe_unit, stripe_count;
+
+ auto iter = in->cbegin();
+ try {
+ decode(stripe_unit, iter);
+ decode(stripe_count, iter);
+ } catch (const buffer::error &err) {
+ CLS_LOG(20, "set_stripe_unit_count: invalid decode");
+ return -EINVAL;
+ }
+
+ if (!stripe_count || !stripe_unit)
+ return -EINVAL;
+
+ int r = check_exists(hctx);
+ if (r < 0)
+ return r;
+
+ CLS_LOG(20, "set_stripe_unit_count");
+
+ r = image::require_feature(hctx, RBD_FEATURE_STRIPINGV2);
+ if (r < 0)
+ return r;
+
+ uint8_t order;
+ r = read_key(hctx, "order", &order);
+ if (r < 0) {
+ CLS_ERR("failed to read the order off of disk: %s", cpp_strerror(r).c_str());
+ return r;
+ }
+ if ((1ull << order) % stripe_unit || stripe_unit > (1ull << order)) {
+ CLS_ERR("stripe unit %llu is not a factor of the object size %llu",
+ (unsigned long long)stripe_unit, 1ull << order);
+ return -EINVAL;
+ }
+
+ bufferlist bl, bl2;
+ encode(stripe_unit, bl);
+ r = cls_cxx_map_set_val(hctx, "stripe_unit", &bl);
+ if (r < 0) {
+ CLS_ERR("error writing stripe_unit metadata: %s", cpp_strerror(r).c_str());
+ return r;
+ }
+
+ encode(stripe_count, bl2);
+ r = cls_cxx_map_set_val(hctx, "stripe_count", &bl2);
+ if (r < 0) {
+ CLS_ERR("error writing stripe_count metadata: %s", cpp_strerror(r).c_str());
+ return r;
+ }
+
+ return 0;
+}
+
+int get_create_timestamp(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ CLS_LOG(20, "get_create_timestamp");
+
+ utime_t timestamp;
+ bufferlist bl;
+ int r = cls_cxx_map_get_val(hctx, "create_timestamp", &bl);
+ if (r < 0) {
+ if (r != -ENOENT) {
+ CLS_ERR("error reading create_timestamp: %s", cpp_strerror(r).c_str());
+ return r;
+ }
+ } else {
+ try {
+ auto it = bl.cbegin();
+ decode(timestamp, it);
+ } catch (const buffer::error &err) {
+ CLS_ERR("could not decode create_timestamp");
+ return -EIO;
+ }
+ }
+
+ encode(timestamp, *out);
+ return 0;
+}
+
+/**
+ * get the image access timestamp
+ *
+ * Input:
+ * @param none
+ *
+ * Output:
+ * @param timestamp the image access timestamp
+ *
+ * @returns 0 on success, negative error code upon failure
+ */
+int get_access_timestamp(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ CLS_LOG(20, "get_access_timestamp");
+
+ utime_t timestamp;
+ bufferlist bl;
+ int r = cls_cxx_map_get_val(hctx, "access_timestamp", &bl);
+ if (r < 0) {
+ if (r != -ENOENT) {
+ CLS_ERR("error reading access_timestamp: %s", cpp_strerror(r).c_str());
+ return r;
+ }
+ } else {
+ try {
+ auto it = bl.cbegin();
+ decode(timestamp, it);
+ } catch (const buffer::error &err) {
+ CLS_ERR("could not decode access_timestamp");
+ return -EIO;
+ }
+ }
+
+ encode(timestamp, *out);
+ return 0;
+}
+
+/**
+ * get the image modify timestamp
+ *
+ * Input:
+ * @param none
+ *
+ * Output:
+ * @param timestamp the image modify timestamp
+ *
+ * @returns 0 on success, negative error code upon failure
+ */
+int get_modify_timestamp(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ CLS_LOG(20, "get_modify_timestamp");
+
+ utime_t timestamp;
+ bufferlist bl;
+ int r = cls_cxx_map_get_val(hctx, "modify_timestamp", &bl);
+ if (r < 0) {
+ if (r != -ENOENT) {
+ CLS_ERR("error reading modify_timestamp: %s", cpp_strerror(r).c_str());
+ return r;
+ }
+ } else {
+ try {
+ auto it = bl.cbegin();
+ decode(timestamp, it);
+ } catch (const buffer::error &err) {
+ CLS_ERR("could not decode modify_timestamp");
+ return -EIO;
+ }
+ }
+
+ encode(timestamp, *out);
+ return 0;
+}
+
+
+/**
+ * get the image flags
+ *
+ * Input:
+ * @param snap_id which snapshot to query, to CEPH_NOSNAP (uint64_t)
+ *
+ * Output:
+ * @param flags image flags
+ *
+ * @returns 0 on success, negative error code upon failure
+ */
+int get_flags(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ uint64_t snap_id;
+ auto iter = in->cbegin();
+ try {
+ decode(snap_id, iter);
+ } catch (const buffer::error &err) {
+ return -EINVAL;
+ }
+
+ CLS_LOG(20, "get_flags snap_id=%llu", (unsigned long long)snap_id);
+
+ uint64_t flags = 0;
+ if (snap_id == CEPH_NOSNAP) {
+ int r = read_key(hctx, "flags", &flags);
+ if (r < 0 && r != -ENOENT) {
+ CLS_ERR("failed to read flags off disk: %s", cpp_strerror(r).c_str());
+ return r;
+ }
+ } else {
+ cls_rbd_snap snap;
+ string snapshot_key;
+ key_from_snap_id(snap_id, &snapshot_key);
+ int r = read_key(hctx, snapshot_key, &snap);
+ if (r < 0) {
+ return r;
+ }
+ flags = snap.flags;
+ }
+
+ encode(flags, *out);
+ return 0;
+}
+
+/**
+ * set the image flags
+ *
+ * Input:
+ * @param flags image flags
+ * @param mask image flag mask
+ * @param snap_id which snapshot to update, or CEPH_NOSNAP (uint64_t)
+ *
+ * Output:
+ * none
+ *
+ * @returns 0 on success, negative error code upon failure
+ */
+int set_flags(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ uint64_t flags;
+ uint64_t mask;
+ uint64_t snap_id = CEPH_NOSNAP;
+ auto iter = in->cbegin();
+ try {
+ decode(flags, iter);
+ decode(mask, iter);
+ if (!iter.end()) {
+ decode(snap_id, iter);
+ }
+ } catch (const buffer::error &err) {
+ return -EINVAL;
+ }
+
+ // check that size exists to make sure this is a header object
+ // that was created correctly
+ int r;
+ uint64_t orig_flags = 0;
+ cls_rbd_snap snap_meta;
+ string snap_meta_key;
+ if (snap_id == CEPH_NOSNAP) {
+ r = read_key(hctx, "flags", &orig_flags);
+ if (r < 0 && r != -ENOENT) {
+ CLS_ERR("Could not read image's flags off disk: %s",
+ cpp_strerror(r).c_str());
+ return r;
+ }
+ } else {
+ key_from_snap_id(snap_id, &snap_meta_key);
+ r = read_key(hctx, snap_meta_key, &snap_meta);
+ if (r < 0) {
+ CLS_ERR("Could not read snapshot: snap_id=%" PRIu64 ": %s",
+ snap_id, cpp_strerror(r).c_str());
+ return r;
+ }
+ orig_flags = snap_meta.flags;
+ }
+
+ flags = (orig_flags & ~mask) | (flags & mask);
+ CLS_LOG(20, "set_flags snap_id=%" PRIu64 ", orig_flags=%" PRIu64 ", "
+ "new_flags=%" PRIu64 ", mask=%" PRIu64, snap_id, orig_flags,
+ flags, mask);
+
+ if (snap_id == CEPH_NOSNAP) {
+ r = write_key(hctx, "flags", flags);
+ } else {
+ snap_meta.flags = flags;
+ r = image::snapshot::write(hctx, snap_meta_key, std::move(snap_meta));
+ }
+
+ if (r < 0) {
+ return r;
+ }
+ return 0;
+}
+
+/**
+ * Get the operation-based image features
+ *
+ * Input:
+ *
+ * Output:
+ * @param bitmask of enabled op features (uint64_t)
+ * @returns 0 on success, negative error code on failure
+ */
+int op_features_get(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ CLS_LOG(20, "op_features_get");
+
+ uint64_t op_features = 0;
+ int r = read_key(hctx, "op_features", &op_features);
+ if (r < 0 && r != -ENOENT) {
+ CLS_ERR("failed to read op features off disk: %s", cpp_strerror(r).c_str());
+ return r;
+ }
+
+ encode(op_features, *out);
+ return 0;
+}
+
+/**
+ * Set the operation-based image features
+ *
+ * Input:
+ * @param op_features image op features
+ * @param mask image op feature mask
+ *
+ * Output:
+ * none
+ *
+ * @returns 0 on success, negative error code upon failure
+ */
+int op_features_set(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ uint64_t op_features;
+ uint64_t mask;
+ auto iter = in->cbegin();
+ try {
+ decode(op_features, iter);
+ decode(mask, iter);
+ } catch (const buffer::error &err) {
+ return -EINVAL;
+ }
+
+ uint64_t unsupported_op_features = (mask & ~RBD_OPERATION_FEATURES_ALL);
+ if (unsupported_op_features != 0ULL) {
+ CLS_ERR("unsupported op features: %" PRIu64, unsupported_op_features);
+ return -EINVAL;
+ }
+
+ return image::set_op_features(hctx, op_features, mask);
+}
+
+/**
+ * get the current parent, if any
+ *
+ * Input:
+ * @param snap_id which snapshot to query, or CEPH_NOSNAP (uint64_t)
+ *
+ * Output:
+ * @param pool parent pool id (-1 if parent does not exist)
+ * @param image parent image id
+ * @param snapid parent snapid
+ * @param size portion of parent mapped under the child
+ *
+ * @returns 0 on success or parent does not exist, negative error code on failure
+ */
+int get_parent(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ uint64_t snap_id;
+
+ auto iter = in->cbegin();
+ try {
+ decode(snap_id, iter);
+ } catch (const buffer::error &err) {
+ return -EINVAL;
+ }
+
+ int r = check_exists(hctx);
+ if (r < 0) {
+ return r;
+ }
+
+ CLS_LOG(20, "get_parent snap_id=%" PRIu64, snap_id);
+
+ cls_rbd_parent parent;
+ r = image::require_feature(hctx, RBD_FEATURE_LAYERING);
+ if (r == 0) {
+ r = read_key(hctx, "parent", &parent);
+ if (r < 0 && r != -ENOENT) {
+ return r;
+ } else if (!parent.pool_namespace.empty()) {
+ return -EXDEV;
+ }
+
+ if (snap_id != CEPH_NOSNAP) {
+ cls_rbd_snap snap;
+ std::string snapshot_key;
+ key_from_snap_id(snap_id, &snapshot_key);
+ r = read_key(hctx, snapshot_key, &snap);
+ if (r < 0 && r != -ENOENT) {
+ return r;
+ }
+
+ if (snap.parent.exists()) {
+ // legacy format where full parent spec is written within
+ // each snapshot record
+ parent = snap.parent;
+ } else if (snap.parent_overlap) {
+ // normalized parent reference
+ if (!parent.exists()) {
+ CLS_ERR("get_parent: snap_id=%" PRIu64 ": invalid parent spec",
+ snap_id);
+ return -EINVAL;
+ }
+ parent.head_overlap = *snap.parent_overlap;
+ } else {
+ // snapshot doesn't have associated parent
+ parent = {};
+ }
+ }
+ }
+
+ encode(parent.pool_id, *out);
+ encode(parent.image_id, *out);
+ encode(parent.snap_id, *out);
+ encode(parent.head_overlap.value_or(0ULL), *out);
+ return 0;
+}
+
+/**
+ * set the image parent
+ *
+ * Input:
+ * @param pool parent pool
+ * @param id parent image id
+ * @param snapid parent snapid
+ * @param size parent size
+ *
+ * @returns 0 on success, or negative error code
+ */
+int set_parent(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ cls_rbd_parent parent;
+ auto iter = in->cbegin();
+ try {
+ decode(parent.pool_id, iter);
+ decode(parent.image_id, iter);
+ decode(parent.snap_id, iter);
+
+ uint64_t overlap;
+ decode(overlap, iter);
+ parent.head_overlap = overlap;
+ } catch (const buffer::error &err) {
+ CLS_LOG(20, "cls_rbd::set_parent: invalid decode");
+ return -EINVAL;
+ }
+
+ int r = image::parent::attach(hctx, parent, false);
+ if (r < 0) {
+ return r;
+ }
+
+ return 0;
+}
+
+
+/**
+ * remove the parent pointer
+ *
+ * This can only happen on the head, not on a snapshot. No arguments.
+ *
+ * @returns 0 on success, negative error code on failure.
+ */
+int remove_parent(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ int r = image::parent::detach(hctx, true);
+ if (r < 0) {
+ return r;
+ }
+
+ return 0;
+}
+
+/**
+ * Input:
+ * none
+ *
+ * Output:
+ * @param parent spec (cls::rbd::ParentImageSpec)
+ * @returns 0 on success, negative error code on failure
+ */
+int parent_get(cls_method_context_t hctx, bufferlist *in, bufferlist *out) {
+ int r = check_exists(hctx);
+ if (r < 0) {
+ return r;
+ }
+
+ CLS_LOG(20, "parent_get");
+
+ cls_rbd_parent parent;
+ r = image::require_feature(hctx, RBD_FEATURE_LAYERING);
+ if (r == 0) {
+ r = read_key(hctx, "parent", &parent);
+ if (r < 0 && r != -ENOENT) {
+ return r;
+ } else if (r == -ENOENT) {
+ // examine oldest snapshot to see if it has a denormalized parent
+ auto parent_lambda = [hctx, &parent](const cls_rbd_snap& snap_meta) {
+ if (snap_meta.parent.exists()) {
+ parent = snap_meta.parent;
+ }
+ return 0;
+ };
+
+ r = image::snapshot::iterate(hctx, parent_lambda);
+ if (r < 0) {
+ return r;
+ }
+ }
+ }
+
+ cls::rbd::ParentImageSpec parent_image_spec{
+ parent.pool_id, parent.pool_namespace, parent.image_id,
+ parent.snap_id};
+ encode(parent_image_spec, *out);
+ return 0;
+}
+
+/**
+ * Input:
+ * @param snap id (uint64_t) parent snapshot id
+ *
+ * Output:
+ * @param byte overlap of parent image (std::optional<uint64_t>)
+ * @returns 0 on success, negative error code on failure
+ */
+int parent_overlap_get(cls_method_context_t hctx, bufferlist *in,
+ bufferlist *out) {
+ uint64_t snap_id;
+ auto iter = in->cbegin();
+ try {
+ decode(snap_id, iter);
+ } catch (const buffer::error &err) {
+ return -EINVAL;
+ }
+
+ int r = check_exists(hctx);
+ CLS_LOG(20, "parent_overlap_get");
+
+ std::optional<uint64_t> parent_overlap = std::nullopt;
+ r = image::require_feature(hctx, RBD_FEATURE_LAYERING);
+ if (r == 0) {
+ if (snap_id == CEPH_NOSNAP) {
+ cls_rbd_parent parent;
+ r = read_key(hctx, "parent", &parent);
+ if (r < 0 && r != -ENOENT) {
+ return r;
+ } else if (r == 0) {
+ parent_overlap = parent.head_overlap;
+ }
+ } else {
+ cls_rbd_snap snap;
+ std::string snapshot_key;
+ key_from_snap_id(snap_id, &snapshot_key);
+ r = read_key(hctx, snapshot_key, &snap);
+ if (r < 0) {
+ return r;
+ }
+
+ if (snap.parent_overlap) {
+ parent_overlap = snap.parent_overlap;
+ } else if (snap.parent.exists()) {
+ // legacy format where full parent spec is written within
+ // each snapshot record
+ parent_overlap = snap.parent.head_overlap;
+ }
+ }
+ };
+
+ encode(parent_overlap, *out);
+ return 0;
+}
+
+/**
+ * Input:
+ * @param parent spec (cls::rbd::ParentImageSpec)
+ * @param size parent size (uint64_t)
+ *
+ * Output:
+ * @returns 0 on success, negative error code on failure
+ */
+int parent_attach(cls_method_context_t hctx, bufferlist *in, bufferlist *out) {
+ cls::rbd::ParentImageSpec parent_image_spec;
+ uint64_t parent_overlap;
+ bool reattach = false;
+
+ auto iter = in->cbegin();
+ try {
+ decode(parent_image_spec, iter);
+ decode(parent_overlap, iter);
+ if (!iter.end()) {
+ decode(reattach, iter);
+ }
+ } catch (const buffer::error &err) {
+ CLS_LOG(20, "cls_rbd::parent_attach: invalid decode");
+ return -EINVAL;
+ }
+
+ int r = image::parent::attach(hctx, {parent_image_spec, parent_overlap},
+ reattach);
+ if (r < 0) {
+ return r;
+ }
+
+ return 0;
+}
+
+/**
+ * Input:
+ * none
+ *
+ * Output:
+ * @returns 0 on success, negative error code on failure
+ */
+int parent_detach(cls_method_context_t hctx, bufferlist *in, bufferlist *out) {
+ int r = image::parent::detach(hctx, false);
+ if (r < 0) {
+ return r;
+ }
+
+ return 0;
+}
+
+
+/**
+ * methods for dealing with rbd_children object
+ */
+
+static int decode_parent_common(bufferlist::const_iterator& it, uint64_t *pool_id,
+ string *image_id, snapid_t *snap_id)
+{
+ try {
+ decode(*pool_id, it);
+ decode(*image_id, it);
+ decode(*snap_id, it);
+ } catch (const buffer::error &err) {
+ CLS_ERR("error decoding parent spec");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int decode_parent(bufferlist *in, uint64_t *pool_id,
+ string *image_id, snapid_t *snap_id)
+{
+ auto it = in->cbegin();
+ return decode_parent_common(it, pool_id, image_id, snap_id);
+}
+
+static int decode_parent_and_child(bufferlist *in, uint64_t *pool_id,
+ string *image_id, snapid_t *snap_id,
+ string *c_image_id)
+{
+ auto it = in->cbegin();
+ int r = decode_parent_common(it, pool_id, image_id, snap_id);
+ if (r < 0)
+ return r;
+ try {
+ decode(*c_image_id, it);
+ } catch (const buffer::error &err) {
+ CLS_ERR("error decoding child image id");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static string parent_key(uint64_t pool_id, string image_id, snapid_t snap_id)
+{
+ bufferlist key_bl;
+ encode(pool_id, key_bl);
+ encode(image_id, key_bl);
+ encode(snap_id, key_bl);
+ return string(key_bl.c_str(), key_bl.length());
+}
+
+/**
+ * add child to rbd_children directory object
+ *
+ * rbd_children is a map of (p_pool_id, p_image_id, p_snap_id) to
+ * [c_image_id, [c_image_id ... ]]
+ *
+ * Input:
+ * @param p_pool_id parent pool id
+ * @param p_image_id parent image oid
+ * @param p_snap_id parent snapshot id
+ * @param c_image_id new child image oid to add
+ *
+ * @returns 0 on success, negative error on failure
+ */
+
+int add_child(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ int r;
+
+ uint64_t p_pool_id;
+ snapid_t p_snap_id;
+ string p_image_id, c_image_id;
+ // Use set for ease of erase() for remove_child()
+ std::set<string> children;
+
+ r = decode_parent_and_child(in, &p_pool_id, &p_image_id, &p_snap_id,
+ &c_image_id);
+ if (r < 0)
+ return r;
+
+ CLS_LOG(20, "add_child %s to (%" PRIu64 ", %s, %" PRIu64 ")", c_image_id.c_str(),
+ p_pool_id, p_image_id.c_str(), p_snap_id.val);
+
+ string key = parent_key(p_pool_id, p_image_id, p_snap_id);
+
+ // get current child list for parent, if any
+ r = read_key(hctx, key, &children);
+ if ((r < 0) && (r != -ENOENT)) {
+ CLS_LOG(20, "add_child: omap read failed: %s", cpp_strerror(r).c_str());
+ return r;
+ }
+
+ if (children.find(c_image_id) != children.end()) {
+ CLS_LOG(20, "add_child: child already exists: %s", c_image_id.c_str());
+ return -EEXIST;
+ }
+ // add new child
+ children.insert(c_image_id);
+
+ // write back
+ bufferlist childbl;
+ encode(children, childbl);
+ r = cls_cxx_map_set_val(hctx, key, &childbl);
+ if (r < 0)
+ CLS_LOG(20, "add_child: omap write failed: %s", cpp_strerror(r).c_str());
+ return r;
+}
+
+/**
+ * remove child from rbd_children directory object
+ *
+ * Input:
+ * @param p_pool_id parent pool id
+ * @param p_image_id parent image oid
+ * @param p_snap_id parent snapshot id
+ * @param c_image_id new child image oid to add
+ *
+ * @returns 0 on success, negative error on failure
+ */
+
+int remove_child(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ int r;
+
+ uint64_t p_pool_id;
+ snapid_t p_snap_id;
+ string p_image_id, c_image_id;
+ std::set<string> children;
+
+ r = decode_parent_and_child(in, &p_pool_id, &p_image_id, &p_snap_id,
+ &c_image_id);
+ if (r < 0)
+ return r;
+
+ CLS_LOG(20, "remove_child %s from (%" PRIu64 ", %s, %" PRIu64 ")",
+ c_image_id.c_str(), p_pool_id, p_image_id.c_str(),
+ p_snap_id.val);
+
+ string key = parent_key(p_pool_id, p_image_id, p_snap_id);
+
+ // get current child list for parent. Unlike add_child(), an empty list
+ // is an error (how can we remove something that doesn't exist?)
+ r = read_key(hctx, key, &children);
+ if (r < 0) {
+ CLS_LOG(20, "remove_child: read omap failed: %s", cpp_strerror(r).c_str());
+ return r;
+ }
+
+ if (children.find(c_image_id) == children.end()) {
+ CLS_LOG(20, "remove_child: child not found: %s", c_image_id.c_str());
+ return -ENOENT;
+ }
+ // find and remove child
+ children.erase(c_image_id);
+
+ // now empty? remove key altogether
+ if (children.empty()) {
+ r = cls_cxx_map_remove_key(hctx, key);
+ if (r < 0)
+ CLS_LOG(20, "remove_child: remove key failed: %s", cpp_strerror(r).c_str());
+ } else {
+ // write back shortened children list
+ bufferlist childbl;
+ encode(children, childbl);
+ r = cls_cxx_map_set_val(hctx, key, &childbl);
+ if (r < 0)
+ CLS_LOG(20, "remove_child: write omap failed: %s", cpp_strerror(r).c_str());
+ }
+ return r;
+}
+
+/**
+ * Input:
+ * @param p_pool_id parent pool id
+ * @param p_image_id parent image oid
+ * @param p_snap_id parent snapshot id
+ * @param c_image_id new child image oid to add
+ *
+ * Output:
+ * @param children set<string> of children
+ *
+ * @returns 0 on success, negative error on failure
+ */
+int get_children(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ int r;
+ uint64_t p_pool_id;
+ snapid_t p_snap_id;
+ string p_image_id;
+ std::set<string> children;
+
+ r = decode_parent(in, &p_pool_id, &p_image_id, &p_snap_id);
+ if (r < 0)
+ return r;
+
+ CLS_LOG(20, "get_children of (%" PRIu64 ", %s, %" PRIu64 ")",
+ p_pool_id, p_image_id.c_str(), p_snap_id.val);
+
+ string key = parent_key(p_pool_id, p_image_id, p_snap_id);
+
+ r = read_key(hctx, key, &children);
+ if (r < 0) {
+ if (r != -ENOENT)
+ CLS_LOG(20, "get_children: read omap failed: %s", cpp_strerror(r).c_str());
+ return r;
+ }
+ encode(children, *out);
+ return 0;
+}
+
+
+/**
+ * Get the information needed to create a rados snap context for doing
+ * I/O to the data objects. This must include all snapshots.
+ *
+ * Output:
+ * @param snap_seq the highest snapshot id ever associated with the image (uint64_t)
+ * @param snap_ids existing snapshot ids in descending order (vector<uint64_t>)
+ * @returns 0 on success, negative error code on failure
+ */
+int get_snapcontext(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ CLS_LOG(20, "get_snapcontext");
+
+ int r;
+ int max_read = RBD_MAX_KEYS_READ;
+ vector<snapid_t> snap_ids;
+ string last_read = RBD_SNAP_KEY_PREFIX;
+ bool more;
+
+ do {
+ set<string> keys;
+ r = cls_cxx_map_get_keys(hctx, last_read, max_read, &keys, &more);
+ if (r < 0)
+ return r;
+
+ for (set<string>::const_iterator it = keys.begin();
+ it != keys.end(); ++it) {
+ if ((*it).find(RBD_SNAP_KEY_PREFIX) != 0)
+ break;
+ snapid_t snap_id = snap_id_from_key(*it);
+ snap_ids.push_back(snap_id);
+ }
+ if (!keys.empty())
+ last_read = *(keys.rbegin());
+ } while (more);
+
+ uint64_t snap_seq;
+ r = read_key(hctx, "snap_seq", &snap_seq);
+ if (r < 0) {
+ CLS_ERR("could not read the image's snap_seq off disk: %s", cpp_strerror(r).c_str());
+ return r;
+ }
+
+ // snap_ids must be descending in a snap context
+ std::reverse(snap_ids.begin(), snap_ids.end());
+
+ encode(snap_seq, *out);
+ encode(snap_ids, *out);
+
+ return 0;
+}
+
+/**
+ * Output:
+ * @param object_prefix prefix for data object names (string)
+ * @returns 0 on success, negative error code on failure
+ */
+int get_object_prefix(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ CLS_LOG(20, "get_object_prefix");
+
+ string object_prefix;
+ int r = read_key(hctx, "object_prefix", &object_prefix);
+ if (r < 0) {
+ CLS_ERR("failed to read the image's object prefix off of disk: %s",
+ cpp_strerror(r).c_str());
+ return r;
+ }
+
+ encode(object_prefix, *out);
+
+ return 0;
+}
+
+/**
+ * Input:
+ * none
+ *
+ * Output:
+ * @param pool_id (int64_t) of data pool or -1 if none
+ * @returns 0 on success, negative error code on failure
+ */
+int get_data_pool(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ CLS_LOG(20, "get_data_pool");
+
+ int64_t data_pool_id = -1;
+ int r = read_key(hctx, "data_pool_id", &data_pool_id);
+ if (r == -ENOENT) {
+ data_pool_id = -1;
+ } else if (r < 0) {
+ CLS_ERR("error reading image data pool id: %s", cpp_strerror(r).c_str());
+ return r;
+ }
+
+ encode(data_pool_id, *out);
+ return 0;
+}
+
+/**
+ * Input:
+ * @param snap_id which snapshot to query
+ *
+ * Output:
+ * @param name (string) of the snapshot
+ * @returns 0 on success, negative error code on failure
+ */
+int get_snapshot_name(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ uint64_t snap_id;
+
+ auto iter = in->cbegin();
+ try {
+ decode(snap_id, iter);
+ } catch (const buffer::error &err) {
+ return -EINVAL;
+ }
+
+ CLS_LOG(20, "get_snapshot_name snap_id=%llu", (unsigned long long)snap_id);
+
+ if (snap_id == CEPH_NOSNAP)
+ return -EINVAL;
+
+ cls_rbd_snap snap;
+ string snapshot_key;
+ key_from_snap_id(snap_id, &snapshot_key);
+ int r = read_key(hctx, snapshot_key, &snap);
+ if (r < 0)
+ return r;
+
+ encode(snap.name, *out);
+
+ return 0;
+}
+
+/**
+ * Input:
+ * @param snap_id which snapshot to query
+ *
+ * Output:
+ * @param timestamp (utime_t) of the snapshot
+ * @returns 0 on success, negative error code on failure
+ *
+ * NOTE: deprecated - remove this method after Luminous is unsupported
+ */
+int get_snapshot_timestamp(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ uint64_t snap_id;
+
+ auto iter = in->cbegin();
+ try {
+ decode(snap_id, iter);
+ } catch (const buffer::error &err) {
+ return -EINVAL;
+ }
+
+ CLS_LOG(20, "get_snapshot_timestamp snap_id=%llu", (unsigned long long)snap_id);
+
+ if (snap_id == CEPH_NOSNAP) {
+ return -EINVAL;
+ }
+
+ cls_rbd_snap snap;
+ string snapshot_key;
+ key_from_snap_id(snap_id, &snapshot_key);
+ int r = read_key(hctx, snapshot_key, &snap);
+ if (r < 0) {
+ return r;
+ }
+
+ encode(snap.timestamp, *out);
+ return 0;
+}
+
+/**
+ * Input:
+ * @param snap_id which snapshot to query
+ *
+ * Output:
+ * @param snapshot (cls::rbd::SnapshotInfo)
+ * @returns 0 on success, negative error code on failure
+ */
+int snapshot_get(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ uint64_t snap_id;
+
+ auto iter = in->cbegin();
+ try {
+ decode(snap_id, iter);
+ } catch (const buffer::error &err) {
+ return -EINVAL;
+ }
+
+ CLS_LOG(20, "snapshot_get snap_id=%llu", (unsigned long long)snap_id);
+ if (snap_id == CEPH_NOSNAP) {
+ return -EINVAL;
+ }
+
+ cls_rbd_snap snap;
+ string snapshot_key;
+ key_from_snap_id(snap_id, &snapshot_key);
+ int r = read_key(hctx, snapshot_key, &snap);
+ if (r < 0) {
+ return r;
+ }
+
+ cls::rbd::SnapshotInfo snapshot_info{snap.id, snap.snapshot_namespace,
+ snap.name, snap.image_size,
+ snap.timestamp, snap.child_count};
+ encode(snapshot_info, *out);
+ return 0;
+}
+
+/**
+ * Adds a snapshot to an rbd header. Ensures the id and name are unique.
+ *
+ * Input:
+ * @param snap_name name of the snapshot (string)
+ * @param snap_id id of the snapshot (uint64_t)
+ * @param snap_namespace namespace of the snapshot (cls::rbd::SnapshotNamespace)
+ *
+ * Output:
+ * @returns 0 on success, negative error code on failure.
+ * @returns -ESTALE if the input snap_id is less than the image's snap_seq
+ * @returns -EEXIST if the id or name are already used by another snapshot
+ */
+int snapshot_add(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ bufferlist snap_namebl, snap_idbl;
+ cls_rbd_snap snap_meta;
+ uint64_t snap_limit;
+
+ try {
+ auto iter = in->cbegin();
+ decode(snap_meta.name, iter);
+ decode(snap_meta.id, iter);
+ if (!iter.end()) {
+ decode(snap_meta.snapshot_namespace, iter);
+ }
+ } catch (const buffer::error &err) {
+ return -EINVAL;
+ }
+
+ if (boost::get<cls::rbd::UnknownSnapshotNamespace>(
+ &snap_meta.snapshot_namespace) != nullptr) {
+ CLS_ERR("Unknown snapshot namespace provided");
+ return -EINVAL;
+ }
+
+ CLS_LOG(20, "snapshot_add name=%s id=%llu", snap_meta.name.c_str(),
+ (unsigned long long)snap_meta.id.val);
+
+ if (snap_meta.id > CEPH_MAXSNAP)
+ return -EINVAL;
+
+ uint64_t cur_snap_seq;
+ int r = read_key(hctx, "snap_seq", &cur_snap_seq);
+ if (r < 0) {
+ CLS_ERR("Could not read image's snap_seq off disk: %s", cpp_strerror(r).c_str());
+ return r;
+ }
+
+ // client lost a race with another snapshot creation.
+ // snap_seq must be monotonically increasing.
+ if (snap_meta.id < cur_snap_seq)
+ return -ESTALE;
+
+ r = read_key(hctx, "size", &snap_meta.image_size);
+ if (r < 0) {
+ CLS_ERR("Could not read image's size off disk: %s", cpp_strerror(r).c_str());
+ return r;
+ }
+ r = read_key(hctx, "flags", &snap_meta.flags);
+ if (r < 0 && r != -ENOENT) {
+ CLS_ERR("Could not read image's flags off disk: %s", cpp_strerror(r).c_str());
+ return r;
+ }
+
+ r = read_key(hctx, "snap_limit", &snap_limit);
+ if (r == -ENOENT) {
+ snap_limit = UINT64_MAX;
+ } else if (r < 0) {
+ CLS_ERR("Could not read snapshot limit off disk: %s", cpp_strerror(r).c_str());
+ return r;
+ }
+
+ snap_meta.timestamp = ceph_clock_now();
+
+ uint64_t total_read = 0;
+ auto pre_check_lambda =
+ [&snap_meta, &total_read, snap_limit](const cls_rbd_snap& old_meta) {
+ ++total_read;
+ if (total_read >= snap_limit) {
+ CLS_ERR("Attempt to create snapshot over limit of %" PRIu64,
+ snap_limit);
+ return -EDQUOT;
+ }
+
+ if ((snap_meta.name == old_meta.name &&
+ snap_meta.snapshot_namespace == old_meta.snapshot_namespace) ||
+ snap_meta.id == old_meta.id) {
+ CLS_LOG(20, "snap_name %s or snap_id %" PRIu64 " matches existing snap "
+ "%s %" PRIu64, snap_meta.name.c_str(), snap_meta.id.val,
+ old_meta.name.c_str(), old_meta.id.val);
+ return -EEXIST;
+ }
+ return 0;
+ };
+
+ r = image::snapshot::iterate(hctx, pre_check_lambda);
+ if (r < 0) {
+ return r;
+ }
+
+ // snapshot inherits parent, if any
+ cls_rbd_parent parent;
+ r = read_key(hctx, "parent", &parent);
+ if (r < 0 && r != -ENOENT) {
+ return r;
+ }
+ if (r == 0) {
+ // write helper method will convert to normalized format if required
+ snap_meta.parent = parent;
+ }
+
+ if (cls::rbd::get_snap_namespace_type(snap_meta.snapshot_namespace) ==
+ cls::rbd::SNAPSHOT_NAMESPACE_TYPE_TRASH) {
+ // add snap_trash feature bit if not already enabled
+ r = image::set_op_features(hctx, RBD_OPERATION_FEATURE_SNAP_TRASH,
+ RBD_OPERATION_FEATURE_SNAP_TRASH);
+ if (r < 0) {
+ return r;
+ }
+ }
+
+ r = write_key(hctx, "snap_seq", snap_meta.id);
+ if (r < 0) {
+ return r;
+ }
+
+ std::string snapshot_key;
+ key_from_snap_id(snap_meta.id, &snapshot_key);
+ r = image::snapshot::write(hctx, snapshot_key, std::move(snap_meta));
+ if (r < 0) {
+ return r;
+ }
+
+ return 0;
+}
+
+/**
+ * rename snapshot .
+ *
+ * Input:
+ * @param src_snap_id old snap id of the snapshot (snapid_t)
+ * @param dst_snap_name new name of the snapshot (string)
+ *
+ * Output:
+ * @returns 0 on success, negative error code on failure.
+ */
+int snapshot_rename(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ bufferlist snap_namebl, snap_idbl;
+ snapid_t src_snap_id;
+ string dst_snap_name;
+ cls_rbd_snap snap_meta;
+ int r;
+
+ try {
+ auto iter = in->cbegin();
+ decode(src_snap_id, iter);
+ decode(dst_snap_name, iter);
+ } catch (const buffer::error &err) {
+ return -EINVAL;
+ }
+
+ CLS_LOG(20, "snapshot_rename id=%" PRIu64 ", dst_name=%s",
+ src_snap_id.val, dst_snap_name.c_str());
+
+ auto duplicate_name_lambda = [&dst_snap_name](const cls_rbd_snap& snap_meta) {
+ if (cls::rbd::get_snap_namespace_type(snap_meta.snapshot_namespace) ==
+ cls::rbd::SNAPSHOT_NAMESPACE_TYPE_USER &&
+ snap_meta.name == dst_snap_name) {
+ CLS_LOG(20, "snap_name %s matches existing snap with snap id %" PRIu64,
+ dst_snap_name.c_str(), snap_meta.id.val);
+ return -EEXIST;
+ }
+ return 0;
+ };
+ r = image::snapshot::iterate(hctx, duplicate_name_lambda);
+ if (r < 0) {
+ return r;
+ }
+
+ std::string src_snap_key;
+ key_from_snap_id(src_snap_id, &src_snap_key);
+ r = read_key(hctx, src_snap_key, &snap_meta);
+ if (r == -ENOENT) {
+ CLS_LOG(20, "cannot find existing snap with snap id = %" PRIu64,
+ src_snap_id.val);
+ return r;
+ }
+
+ if (cls::rbd::get_snap_namespace_type(snap_meta.snapshot_namespace) !=
+ cls::rbd::SNAPSHOT_NAMESPACE_TYPE_USER) {
+ // can only rename user snapshots
+ return -EINVAL;
+ }
+
+ snap_meta.name = dst_snap_name;
+ r = image::snapshot::write(hctx, src_snap_key, std::move(snap_meta));
+ if (r < 0) {
+ return r;
+ }
+
+ return 0;
+}
+
+/**
+ * Removes a snapshot from an rbd header.
+ *
+ * Input:
+ * @param snap_id the id of the snapshot to remove (uint64_t)
+ *
+ * Output:
+ * @returns 0 on success, negative error code on failure
+ */
+int snapshot_remove(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ snapid_t snap_id;
+
+ try {
+ auto iter = in->cbegin();
+ decode(snap_id, iter);
+ } catch (const buffer::error &err) {
+ return -EINVAL;
+ }
+
+ CLS_LOG(20, "snapshot_remove id=%llu", (unsigned long long)snap_id.val);
+
+ // check if the key exists. we can't rely on remove_key doing this for
+ // us, since OMAPRMKEYS returns success if the key is not there.
+ // bug or feature? sounds like a bug, since tmap did not have this
+ // behavior, but cls_rgw may rely on it...
+ cls_rbd_snap snap;
+ string snapshot_key;
+ key_from_snap_id(snap_id, &snapshot_key);
+ int r = read_key(hctx, snapshot_key, &snap);
+ if (r == -ENOENT) {
+ return -ENOENT;
+ }
+
+ if (snap.protection_status != RBD_PROTECTION_STATUS_UNPROTECTED) {
+ return -EBUSY;
+ }
+
+ // snapshot is in-use by clone v2 child
+ if (snap.child_count > 0) {
+ return -EBUSY;
+ }
+
+ r = remove_key(hctx, snapshot_key);
+ if (r < 0) {
+ return r;
+ }
+
+ bool has_child_snaps = false;
+ bool has_trash_snaps = false;
+ auto remove_lambda = [snap_id, &has_child_snaps, &has_trash_snaps](
+ const cls_rbd_snap& snap_meta) {
+ if (snap_meta.id != snap_id) {
+ if (snap_meta.parent.pool_id != -1 || snap_meta.parent_overlap) {
+ has_child_snaps = true;
+ }
+
+ if (cls::rbd::get_snap_namespace_type(snap_meta.snapshot_namespace) ==
+ cls::rbd::SNAPSHOT_NAMESPACE_TYPE_TRASH) {
+ has_trash_snaps = true;
+ }
+ }
+ return 0;
+ };
+
+ r = image::snapshot::iterate(hctx, remove_lambda);
+ if (r < 0) {
+ return r;
+ }
+
+ cls_rbd_parent parent;
+ r = read_key(hctx, "parent", &parent);
+ if (r < 0 && r != -ENOENT) {
+ return r;
+ }
+
+ bool has_parent = (r >= 0 && parent.exists());
+ bool is_head_child = (has_parent && parent.head_overlap);
+ int8_t require_osd_release = cls_get_required_osd_release(hctx);
+ if (has_parent && !is_head_child && !has_child_snaps &&
+ require_osd_release >= CEPH_RELEASE_NAUTILUS) {
+ // remove the unused parent image spec
+ r = remove_key(hctx, "parent");
+ if (r < 0 && r != -ENOENT) {
+ return r;
+ }
+ }
+
+ uint64_t op_features_mask = 0ULL;
+ if (!has_child_snaps && !is_head_child) {
+ // disable clone child op feature if no longer associated
+ op_features_mask |= RBD_OPERATION_FEATURE_CLONE_CHILD;
+ }
+ if (!has_trash_snaps) {
+ // remove the snap_trash op feature if not in-use by any other snapshots
+ op_features_mask |= RBD_OPERATION_FEATURE_SNAP_TRASH;
+ }
+
+ if (op_features_mask != 0ULL) {
+ r = image::set_op_features(hctx, 0, op_features_mask);
+ if (r < 0) {
+ return r;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * Moves a snapshot to the trash namespace.
+ *
+ * Input:
+ * @param snap_id the id of the snapshot to move to the trash (uint64_t)
+ *
+ * Output:
+ * @returns 0 on success, negative error code on failure
+ */
+int snapshot_trash_add(cls_method_context_t hctx, bufferlist *in,
+ bufferlist *out)
+{
+ snapid_t snap_id;
+
+ try {
+ auto iter = in->cbegin();
+ decode(snap_id, iter);
+ } catch (const buffer::error &err) {
+ return -EINVAL;
+ }
+
+ CLS_LOG(20, "snapshot_trash_add id=%" PRIu64, snap_id.val);
+
+ cls_rbd_snap snap;
+ std::string snapshot_key;
+ key_from_snap_id(snap_id, &snapshot_key);
+ int r = read_key(hctx, snapshot_key, &snap);
+ if (r == -ENOENT) {
+ return r;
+ }
+
+ if (snap.protection_status != RBD_PROTECTION_STATUS_UNPROTECTED) {
+ return -EBUSY;
+ }
+
+ auto snap_type = cls::rbd::get_snap_namespace_type(snap.snapshot_namespace);
+ if (snap_type == cls::rbd::SNAPSHOT_NAMESPACE_TYPE_TRASH) {
+ return -EEXIST;
+ }
+
+ // add snap_trash feature bit if not already enabled
+ r = image::set_op_features(hctx, RBD_OPERATION_FEATURE_SNAP_TRASH,
+ RBD_OPERATION_FEATURE_SNAP_TRASH);
+ if (r < 0) {
+ return r;
+ }
+
+ snap.snapshot_namespace = cls::rbd::TrashSnapshotNamespace{snap_type,
+ snap.name};
+ uuid_d uuid_gen;
+ uuid_gen.generate_random();
+ snap.name = uuid_gen.to_string();
+
+ r = image::snapshot::write(hctx, snapshot_key, std::move(snap));
+ if (r < 0) {
+ return r;
+ }
+
+ return 0;
+}
+
+/**
+ * Returns a uint64_t of all the features supported by this class.
+ */
+int get_all_features(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ uint64_t all_features = RBD_FEATURES_ALL;
+ encode(all_features, *out);
+ return 0;
+}
+
+/**
+ * "Copy up" data from the parent of a clone to the clone's object(s).
+ * Used for implementing copy-on-write for a clone image. Client
+ * will pass down a chunk of data that fits completely within one
+ * clone block (one object), and is aligned (starts at beginning of block),
+ * but may be shorter (for non-full parent blocks). The class method
+ * can't know the object size to validate the requested length,
+ * so it just writes the data as given if the child object doesn't
+ * already exist, and returns success if it does.
+ *
+ * Input:
+ * @param in bufferlist of data to write
+ *
+ * Output:
+ * @returns 0 on success, or if block already exists in child
+ * negative error code on other error
+ */
+
+int copyup(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ // check for existence; if child object exists, just return success
+ if (cls_cxx_stat(hctx, NULL, NULL) == 0)
+ return 0;
+ CLS_LOG(20, "copyup: writing length %d\n", in->length());
+ return cls_cxx_write(hctx, 0, in->length(), in);
+}
+
+
+/************************ rbd_id object methods **************************/
+
+/**
+ * Input:
+ * @param in ignored
+ *
+ * Output:
+ * @param id the id stored in the object
+ * @returns 0 on success, negative error code on failure
+ */
+int get_id(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ uint64_t size;
+ int r = cls_cxx_stat(hctx, &size, NULL);
+ if (r < 0)
+ return r;
+
+ if (size == 0)
+ return -ENOENT;
+
+ bufferlist read_bl;
+ r = cls_cxx_read(hctx, 0, size, &read_bl);
+ if (r < 0) {
+ CLS_ERR("get_id: could not read id: %s", cpp_strerror(r).c_str());
+ return r;
+ }
+
+ string id;
+ try {
+ auto iter = read_bl.cbegin();
+ decode(id, iter);
+ } catch (const buffer::error &err) {
+ return -EIO;
+ }
+
+ encode(id, *out);
+ return 0;
+}
+
+/**
+ * Set the id of an image. The object must already exist.
+ *
+ * Input:
+ * @param id the id of the image, as an alpha-numeric string
+ *
+ * Output:
+ * @returns 0 on success, -EEXIST if the atomic create fails,
+ * negative error code on other error
+ */
+int set_id(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ int r = check_exists(hctx);
+ if (r < 0)
+ return r;
+
+ string id;
+ try {
+ auto iter = in->cbegin();
+ decode(id, iter);
+ } catch (const buffer::error &err) {
+ return -EINVAL;
+ }
+
+ if (!is_valid_id(id)) {
+ CLS_ERR("set_id: invalid id '%s'", id.c_str());
+ return -EINVAL;
+ }
+
+ uint64_t size;
+ r = cls_cxx_stat(hctx, &size, NULL);
+ if (r < 0)
+ return r;
+ if (size != 0)
+ return -EEXIST;
+
+ CLS_LOG(20, "set_id: id=%s", id.c_str());
+
+ bufferlist write_bl;
+ encode(id, write_bl);
+ return cls_cxx_write(hctx, 0, write_bl.length(), &write_bl);
+}
+
+/**
+ * Update the access timestamp of an image
+ *
+ * Input:
+ * @param none
+ *
+ * Output:
+ * @returns 0 on success, negative error code on other error
+ */
+int set_access_timestamp(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ int r = check_exists(hctx);
+ if(r < 0)
+ return r;
+
+ utime_t timestamp = ceph_clock_now();
+ r = write_key(hctx, "access_timestamp", timestamp);
+ if(r < 0) {
+ CLS_ERR("error setting access_timestamp");
+ return r;
+ }
+
+ return 0;
+}
+
+/**
+ * Update the modify timestamp of an image
+ *
+ * Input:
+ * @param none
+ *
+ * Output:
+ * @returns 0 on success, negative error code on other error
+ */
+
+int set_modify_timestamp(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ int r = check_exists(hctx);
+ if(r < 0)
+ return r;
+
+ utime_t timestamp = ceph_clock_now();
+ r = write_key(hctx, "modify_timestamp", timestamp);
+ if(r < 0) {
+ CLS_ERR("error setting modify_timestamp");
+ return r;
+ }
+
+ return 0;
+}
+
+
+
+/*********************** methods for rbd_directory ***********************/
+
+static const string dir_key_for_id(const string &id)
+{
+ return RBD_DIR_ID_KEY_PREFIX + id;
+}
+
+static const string dir_key_for_name(const string &name)
+{
+ return RBD_DIR_NAME_KEY_PREFIX + name;
+}
+
+static const string dir_name_from_key(const string &key)
+{
+ return key.substr(strlen(RBD_DIR_NAME_KEY_PREFIX));
+}
+
+static int dir_add_image_helper(cls_method_context_t hctx,
+ const string &name, const string &id,
+ bool check_for_unique_id)
+{
+ if (!name.size() || !is_valid_id(id)) {
+ CLS_ERR("dir_add_image_helper: invalid name '%s' or id '%s'",
+ name.c_str(), id.c_str());
+ return -EINVAL;
+ }
+
+ CLS_LOG(20, "dir_add_image_helper name=%s id=%s", name.c_str(), id.c_str());
+
+ string tmp;
+ string name_key = dir_key_for_name(name);
+ string id_key = dir_key_for_id(id);
+ int r = read_key(hctx, name_key, &tmp);
+ if (r != -ENOENT) {
+ CLS_LOG(10, "name already exists");
+ return -EEXIST;
+ }
+ r = read_key(hctx, id_key, &tmp);
+ if (r != -ENOENT && check_for_unique_id) {
+ CLS_LOG(10, "id already exists");
+ return -EBADF;
+ }
+ bufferlist id_bl, name_bl;
+ encode(id, id_bl);
+ encode(name, name_bl);
+ map<string, bufferlist> omap_vals;
+ omap_vals[name_key] = id_bl;
+ omap_vals[id_key] = name_bl;
+ return cls_cxx_map_set_vals(hctx, &omap_vals);
+}
+
+static int dir_remove_image_helper(cls_method_context_t hctx,
+ const string &name, const string &id)
+{
+ CLS_LOG(20, "dir_remove_image_helper name=%s id=%s",
+ name.c_str(), id.c_str());
+
+ string stored_name, stored_id;
+ string name_key = dir_key_for_name(name);
+ string id_key = dir_key_for_id(id);
+ int r = read_key(hctx, name_key, &stored_id);
+ if (r < 0) {
+ if (r != -ENOENT)
+ CLS_ERR("error reading name to id mapping: %s", cpp_strerror(r).c_str());
+ return r;
+ }
+ r = read_key(hctx, id_key, &stored_name);
+ if (r < 0) {
+ CLS_ERR("error reading id to name mapping: %s", cpp_strerror(r).c_str());
+ return r;
+ }
+
+ // check if this op raced with a rename
+ if (stored_name != name || stored_id != id) {
+ CLS_ERR("stored name '%s' and id '%s' do not match args '%s' and '%s'",
+ stored_name.c_str(), stored_id.c_str(), name.c_str(), id.c_str());
+ return -ESTALE;
+ }
+
+ r = cls_cxx_map_remove_key(hctx, name_key);
+ if (r < 0) {
+ CLS_ERR("error removing name: %s", cpp_strerror(r).c_str());
+ return r;
+ }
+
+ r = cls_cxx_map_remove_key(hctx, id_key);
+ if (r < 0) {
+ CLS_ERR("error removing id: %s", cpp_strerror(r).c_str());
+ return r;
+ }
+
+ return 0;
+}
+
+/**
+ * Rename an image in the directory, updating both indexes
+ * atomically. This can't be done from the client calling
+ * dir_add_image and dir_remove_image in one transaction because the
+ * results of the first method are not visibale to later steps.
+ *
+ * Input:
+ * @param src original name of the image
+ * @param dest new name of the image
+ * @param id the id of the image
+ *
+ * Output:
+ * @returns -ESTALE if src and id do not map to each other
+ * @returns -ENOENT if src or id are not in the directory
+ * @returns -EEXIST if dest already exists
+ * @returns 0 on success, negative error code on failure
+ */
+int dir_rename_image(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ string src, dest, id;
+ try {
+ auto iter = in->cbegin();
+ decode(src, iter);
+ decode(dest, iter);
+ decode(id, iter);
+ } catch (const buffer::error &err) {
+ return -EINVAL;
+ }
+
+ int r = dir_remove_image_helper(hctx, src, id);
+ if (r < 0)
+ return r;
+ // ignore duplicate id because the result of
+ // remove_image_helper is not visible yet
+ return dir_add_image_helper(hctx, dest, id, false);
+}
+
+/**
+ * Get the id of an image given its name.
+ *
+ * Input:
+ * @param name the name of the image
+ *
+ * Output:
+ * @param id the id of the image
+ * @returns 0 on success, negative error code on failure
+ */
+int dir_get_id(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ string name;
+
+ try {
+ auto iter = in->cbegin();
+ decode(name, iter);
+ } catch (const buffer::error &err) {
+ return -EINVAL;
+ }
+
+ CLS_LOG(20, "dir_get_id: name=%s", name.c_str());
+
+ string id;
+ int r = read_key(hctx, dir_key_for_name(name), &id);
+ if (r < 0) {
+ if (r != -ENOENT)
+ CLS_ERR("error reading id for name '%s': %s", name.c_str(), cpp_strerror(r).c_str());
+ return r;
+ }
+ encode(id, *out);
+ return 0;
+}
+
+/**
+ * Get the name of an image given its id.
+ *
+ * Input:
+ * @param id the id of the image
+ *
+ * Output:
+ * @param name the name of the image
+ * @returns 0 on success, negative error code on failure
+ */
+int dir_get_name(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ string id;
+
+ try {
+ auto iter = in->cbegin();
+ decode(id, iter);
+ } catch (const buffer::error &err) {
+ return -EINVAL;
+ }
+
+ CLS_LOG(20, "dir_get_name: id=%s", id.c_str());
+
+ string name;
+ int r = read_key(hctx, dir_key_for_id(id), &name);
+ if (r < 0) {
+ if (r != -ENOENT) {
+ CLS_ERR("error reading name for id '%s': %s", id.c_str(),
+ cpp_strerror(r).c_str());
+ }
+ return r;
+ }
+ encode(name, *out);
+ return 0;
+}
+
+/**
+ * List the names and ids of the images in the directory, sorted by
+ * name.
+ *
+ * Input:
+ * @param start_after which name to begin listing after
+ * (use the empty string to start at the beginning)
+ * @param max_return the maximum number of names to list
+ *
+ * Output:
+ * @param images map from name to id of up to max_return images
+ * @returns 0 on success, negative error code on failure
+ */
+int dir_list(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ string start_after;
+ uint64_t max_return;
+
+ try {
+ auto iter = in->cbegin();
+ decode(start_after, iter);
+ decode(max_return, iter);
+ } catch (const buffer::error &err) {
+ return -EINVAL;
+ }
+
+ int max_read = RBD_MAX_KEYS_READ;
+ map<string, string> images;
+ string last_read = dir_key_for_name(start_after);
+ bool more = true;
+
+ while (more && images.size() < max_return) {
+ map<string, bufferlist> vals;
+ CLS_LOG(20, "last_read = '%s'", last_read.c_str());
+ int r = cls_cxx_map_get_vals(hctx, last_read, RBD_DIR_NAME_KEY_PREFIX,
+ max_read, &vals, &more);
+ if (r < 0) {
+ if (r != -ENOENT) {
+ CLS_ERR("error reading directory by name: %s", cpp_strerror(r).c_str());
+ }
+ return r;
+ }
+
+ for (map<string, bufferlist>::iterator it = vals.begin();
+ it != vals.end(); ++it) {
+ string id;
+ auto iter = it->second.cbegin();
+ try {
+ decode(id, iter);
+ } catch (const buffer::error &err) {
+ CLS_ERR("could not decode id of image '%s'", it->first.c_str());
+ return -EIO;
+ }
+ CLS_LOG(20, "adding '%s' -> '%s'", dir_name_from_key(it->first).c_str(), id.c_str());
+ images[dir_name_from_key(it->first)] = id;
+ if (images.size() >= max_return)
+ break;
+ }
+ if (!vals.empty()) {
+ last_read = dir_key_for_name(images.rbegin()->first);
+ }
+ }
+
+ encode(images, *out);
+
+ return 0;
+}
+
+/**
+ * Add an image to the rbd directory. Creates the directory object if
+ * needed, and updates the index from id to name and name to id.
+ *
+ * Input:
+ * @param name the name of the image
+ * @param id the id of the image
+ *
+ * Output:
+ * @returns -EEXIST if the image name is already in the directory
+ * @returns -EBADF if the image id is already in the directory
+ * @returns 0 on success, negative error code on failure
+ */
+int dir_add_image(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ int r = cls_cxx_create(hctx, false);
+ if (r < 0) {
+ CLS_ERR("could not create directory: %s", cpp_strerror(r).c_str());
+ return r;
+ }
+
+ string name, id;
+ try {
+ auto iter = in->cbegin();
+ decode(name, iter);
+ decode(id, iter);
+ } catch (const buffer::error &err) {
+ return -EINVAL;
+ }
+
+ return dir_add_image_helper(hctx, name, id, true);
+}
+
+/**
+ * Remove an image from the rbd directory.
+ *
+ * Input:
+ * @param name the name of the image
+ * @param id the id of the image
+ *
+ * Output:
+ * @returns -ESTALE if the name and id do not map to each other
+ * @returns 0 on success, negative error code on failure
+ */
+int dir_remove_image(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ string name, id;
+ try {
+ auto iter = in->cbegin();
+ decode(name, iter);
+ decode(id, iter);
+ } catch (const buffer::error &err) {
+ return -EINVAL;
+ }
+
+ return dir_remove_image_helper(hctx, name, id);
+}
+
+/**
+ * Verify the current state of the directory
+ *
+ * Input:
+ * @param state the DirectoryState of the directory
+ *
+ * Output:
+ * @returns -ENOENT if the state does not match
+ * @returns 0 on success, negative error code on failure
+ */
+int dir_state_assert(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ cls::rbd::DirectoryState directory_state = cls::rbd::DIRECTORY_STATE_READY;
+ try {
+ auto iter = in->cbegin();
+ decode(directory_state, iter);
+ } catch (const buffer::error &err) {
+ return -EINVAL;
+ }
+
+ cls::rbd::DirectoryState on_disk_directory_state = directory_state;
+ int r = read_key(hctx, "state", &on_disk_directory_state);
+ if (r < 0) {
+ return r;
+ }
+
+ if (directory_state != on_disk_directory_state) {
+ return -ENOENT;
+ }
+ return 0;
+}
+
+/**
+ * Set the current state of the directory
+ *
+ * Input:
+ * @param state the DirectoryState of the directory
+ *
+ * Output:
+ * @returns -ENOENT if the state does not match
+ * @returns 0 on success, negative error code on failure
+ */
+int dir_state_set(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ cls::rbd::DirectoryState directory_state;
+ try {
+ auto iter = in->cbegin();
+ decode(directory_state, iter);
+ } catch (const buffer::error &err) {
+ return -EINVAL;
+ }
+
+ int r = check_exists(hctx);
+ if (r < 0 && r != -ENOENT) {
+ return r;
+ }
+
+ switch (directory_state) {
+ case cls::rbd::DIRECTORY_STATE_READY:
+ break;
+ case cls::rbd::DIRECTORY_STATE_ADD_DISABLED:
+ {
+ if (r == -ENOENT) {
+ return r;
+ }
+
+ // verify that the directory is empty
+ std::map<std::string, bufferlist> vals;
+ bool more;
+ r = cls_cxx_map_get_vals(hctx, RBD_DIR_NAME_KEY_PREFIX,
+ RBD_DIR_NAME_KEY_PREFIX, 1, &vals, &more);
+ if (r < 0) {
+ return r;
+ } else if (!vals.empty()) {
+ return -EBUSY;
+ }
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ r = write_key(hctx, "state", directory_state);
+ if (r < 0) {
+ return r;
+ }
+
+ return 0;
+}
+
+int object_map_read(cls_method_context_t hctx, BitVector<2> &object_map)
+{
+ uint64_t size;
+ int r = cls_cxx_stat(hctx, &size, NULL);
+ if (r < 0) {
+ return r;
+ }
+ if (size == 0) {
+ return -ENOENT;
+ }
+
+ bufferlist bl;
+ r = cls_cxx_read(hctx, 0, size, &bl);
+ if (r < 0) {
+ return r;
+ }
+
+ try {
+ auto iter = bl.cbegin();
+ decode(object_map, iter);
+ } catch (const buffer::error &err) {
+ CLS_ERR("failed to decode object map: %s", err.what());
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/**
+ * Load an rbd image's object map
+ *
+ * Input:
+ * none
+ *
+ * Output:
+ * @param object map bit vector
+ * @returns 0 on success, negative error code on failure
+ */
+int object_map_load(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ BitVector<2> object_map;
+ int r = object_map_read(hctx, object_map);
+ if (r < 0) {
+ return r;
+ }
+
+ object_map.set_crc_enabled(false);
+ encode(object_map, *out);
+ return 0;
+}
+
+/**
+ * Save an rbd image's object map
+ *
+ * Input:
+ * @param object map bit vector
+ *
+ * Output:
+ * @returns 0 on success, negative error code on failure
+ */
+int object_map_save(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ BitVector<2> object_map;
+ try {
+ auto iter = in->cbegin();
+ decode(object_map, iter);
+ } catch (const buffer::error &err) {
+ return -EINVAL;
+ }
+
+ object_map.set_crc_enabled(true);
+
+ bufferlist bl;
+ encode(object_map, bl);
+ CLS_LOG(20, "object_map_save: object size=%" PRIu64 ", byte size=%u",
+ object_map.size(), bl.length());
+ return cls_cxx_write_full(hctx, &bl);
+}
+
+/**
+ * Resize an rbd image's object map
+ *
+ * Input:
+ * @param object_count the max number of objects in the image
+ * @param default_state the default state of newly created objects
+ *
+ * Output:
+ * @returns 0 on success, negative error code on failure
+ */
+int object_map_resize(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ uint64_t object_count;
+ uint8_t default_state;
+ try {
+ auto iter = in->cbegin();
+ decode(object_count, iter);
+ decode(default_state, iter);
+ } catch (const buffer::error &err) {
+ return -EINVAL;
+ }
+
+ // protect against excessive memory requirements
+ if (object_count > cls::rbd::MAX_OBJECT_MAP_OBJECT_COUNT) {
+ CLS_ERR("object map too large: %" PRIu64, object_count);
+ return -EINVAL;
+ }
+
+ BitVector<2> object_map;
+ int r = object_map_read(hctx, object_map);
+ if ((r < 0) && (r != -ENOENT)) {
+ return r;
+ }
+
+ size_t orig_object_map_size = object_map.size();
+ if (object_count < orig_object_map_size) {
+ auto it = object_map.begin() + object_count;
+ auto end_it = object_map.end() ;
+ uint64_t i = object_count;
+ for (; it != end_it; ++it, ++i) {
+ if (*it != default_state) {
+ CLS_ERR("object map indicates object still exists: %" PRIu64, i);
+ return -ESTALE;
+ }
+ }
+ object_map.resize(object_count);
+ } else if (object_count > orig_object_map_size) {
+ object_map.resize(object_count);
+ auto it = object_map.begin() + orig_object_map_size;
+ auto end_it = object_map.end();
+ for (; it != end_it; ++it) {
+ *it = default_state;
+ }
+ }
+
+ bufferlist map;
+ encode(object_map, map);
+ CLS_LOG(20, "object_map_resize: object size=%" PRIu64 ", byte size=%u",
+ object_count, map.length());
+ return cls_cxx_write_full(hctx, &map);
+}
+
+/**
+ * Update an rbd image's object map
+ *
+ * Input:
+ * @param start_object_no the start object iterator
+ * @param end_object_no the end object iterator
+ * @param new_object_state the new object state
+ * @param current_object_state optional current object state filter
+ *
+ * Output:
+ * @returns 0 on success, negative error code on failure
+ */
+int object_map_update(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ uint64_t start_object_no;
+ uint64_t end_object_no;
+ uint8_t new_object_state;
+ boost::optional<uint8_t> current_object_state;
+ try {
+ auto iter = in->cbegin();
+ decode(start_object_no, iter);
+ decode(end_object_no, iter);
+ decode(new_object_state, iter);
+ decode(current_object_state, iter);
+ } catch (const buffer::error &err) {
+ CLS_ERR("failed to decode message");
+ return -EINVAL;
+ }
+
+ uint64_t size;
+ int r = cls_cxx_stat(hctx, &size, NULL);
+ if (r < 0) {
+ return r;
+ }
+
+ BitVector<2> object_map;
+ bufferlist header_bl;
+ r = cls_cxx_read2(hctx, 0, object_map.get_header_length(), &header_bl,
+ CEPH_OSD_OP_FLAG_FADVISE_WILLNEED);
+ if (r < 0) {
+ CLS_ERR("object map header read failed");
+ return r;
+ }
+
+ try {
+ auto it = header_bl.cbegin();
+ object_map.decode_header(it);
+ } catch (const buffer::error &err) {
+ CLS_ERR("failed to decode object map header: %s", err.what());
+ return -EINVAL;
+ }
+
+ uint64_t object_byte_offset;
+ uint64_t byte_length;
+ object_map.get_header_crc_extents(&object_byte_offset, &byte_length);
+
+ bufferlist footer_bl;
+ r = cls_cxx_read2(hctx, object_byte_offset, byte_length, &footer_bl,
+ CEPH_OSD_OP_FLAG_FADVISE_WILLNEED);
+ if (r < 0) {
+ CLS_ERR("object map footer read header CRC failed");
+ return r;
+ }
+
+ try {
+ auto it = footer_bl.cbegin();
+ object_map.decode_header_crc(it);
+ } catch (const buffer::error &err) {
+ CLS_ERR("failed to decode object map header CRC: %s", err.what());
+ }
+
+ if (start_object_no >= end_object_no || end_object_no > object_map.size()) {
+ return -ERANGE;
+ }
+
+ uint64_t object_count = end_object_no - start_object_no;
+ object_map.get_data_crcs_extents(start_object_no, object_count,
+ &object_byte_offset, &byte_length);
+ const auto footer_object_offset = object_byte_offset;
+
+ footer_bl.clear();
+ r = cls_cxx_read2(hctx, object_byte_offset, byte_length, &footer_bl,
+ CEPH_OSD_OP_FLAG_FADVISE_WILLNEED);
+ if (r < 0) {
+ CLS_ERR("object map footer read data CRCs failed");
+ return r;
+ }
+
+ try {
+ auto it = footer_bl.cbegin();
+ object_map.decode_data_crcs(it, start_object_no);
+ } catch (const buffer::error &err) {
+ CLS_ERR("failed to decode object map data CRCs: %s", err.what());
+ }
+
+ uint64_t data_byte_offset;
+ object_map.get_data_extents(start_object_no, object_count,
+ &data_byte_offset, &object_byte_offset,
+ &byte_length);
+
+ bufferlist data_bl;
+ r = cls_cxx_read2(hctx, object_byte_offset, byte_length, &data_bl,
+ CEPH_OSD_OP_FLAG_FADVISE_WILLNEED);
+ if (r < 0) {
+ CLS_ERR("object map data read failed");
+ return r;
+ }
+
+ try {
+ auto it = data_bl.cbegin();
+ object_map.decode_data(it, data_byte_offset);
+ } catch (const buffer::error &err) {
+ CLS_ERR("failed to decode data chunk [%" PRIu64 "]: %s",
+ data_byte_offset, err.what());
+ return -EINVAL;
+ }
+
+ bool updated = false;
+ auto it = object_map.begin() + start_object_no;
+ auto end_it = object_map.begin() + end_object_no;
+ for (; it != end_it; ++it) {
+ uint8_t state = *it;
+ if ((!current_object_state || state == *current_object_state ||
+ (*current_object_state == OBJECT_EXISTS &&
+ state == OBJECT_EXISTS_CLEAN)) && state != new_object_state) {
+ *it = new_object_state;
+ updated = true;
+ }
+ }
+
+ if (updated) {
+ CLS_LOG(20, "object_map_update: %" PRIu64 "~%" PRIu64 " -> %" PRIu64,
+ data_byte_offset, byte_length, object_byte_offset);
+
+ bufferlist data_bl;
+ object_map.encode_data(data_bl, data_byte_offset, byte_length);
+ r = cls_cxx_write2(hctx, object_byte_offset, data_bl.length(), &data_bl,
+ CEPH_OSD_OP_FLAG_FADVISE_WILLNEED);
+ if (r < 0) {
+ CLS_ERR("failed to write object map header: %s", cpp_strerror(r).c_str());
+ return r;
+ }
+
+ footer_bl.clear();
+ object_map.encode_data_crcs(footer_bl, start_object_no, object_count);
+ r = cls_cxx_write2(hctx, footer_object_offset, footer_bl.length(),
+ &footer_bl, CEPH_OSD_OP_FLAG_FADVISE_WILLNEED);
+ if (r < 0) {
+ CLS_ERR("failed to write object map footer: %s", cpp_strerror(r).c_str());
+ return r;
+ }
+ } else {
+ CLS_LOG(20, "object_map_update: no update necessary");
+ }
+
+ return 0;
+}
+
+/**
+ * Mark all _EXISTS objects as _EXISTS_CLEAN so future writes to the
+ * image HEAD can be tracked.
+ *
+ * Input:
+ * none
+ *
+ * Output:
+ * @returns 0 on success, negative error code on failure
+ */
+int object_map_snap_add(cls_method_context_t hctx, bufferlist *in,
+ bufferlist *out)
+{
+ BitVector<2> object_map;
+ int r = object_map_read(hctx, object_map);
+ if (r < 0) {
+ return r;
+ }
+
+ bool updated = false;
+ auto it = object_map.begin();
+ auto end_it = object_map.end();
+ for (; it != end_it; ++it) {
+ if (*it == OBJECT_EXISTS) {
+ *it = OBJECT_EXISTS_CLEAN;
+ updated = true;
+ }
+ }
+
+ if (updated) {
+ bufferlist bl;
+ encode(object_map, bl);
+ r = cls_cxx_write_full(hctx, &bl);
+ }
+ return r;
+}
+
+/**
+ * Mark all _EXISTS_CLEAN objects as _EXISTS in the current object map
+ * if the provided snapshot object map object is marked as _EXISTS.
+ *
+ * Input:
+ * @param snapshot object map bit vector
+ *
+ * Output:
+ * @returns 0 on success, negative error code on failure
+ */
+int object_map_snap_remove(cls_method_context_t hctx, bufferlist *in,
+ bufferlist *out)
+{
+ BitVector<2> src_object_map;
+ try {
+ auto iter = in->cbegin();
+ decode(src_object_map, iter);
+ } catch (const buffer::error &err) {
+ return -EINVAL;
+ }
+
+ BitVector<2> dst_object_map;
+ int r = object_map_read(hctx, dst_object_map);
+ if (r < 0) {
+ return r;
+ }
+
+ bool updated = false;
+ auto src_it = src_object_map.begin();
+ auto dst_it = dst_object_map.begin();
+ auto dst_it_end = dst_object_map.end();
+ uint64_t i = 0;
+ for (; dst_it != dst_it_end; ++dst_it) {
+ if (*dst_it == OBJECT_EXISTS_CLEAN &&
+ (i >= src_object_map.size() || *src_it == OBJECT_EXISTS)) {
+ *dst_it = OBJECT_EXISTS;
+ updated = true;
+ }
+ if (i < src_object_map.size())
+ ++src_it;
+ ++i;
+ }
+
+ if (updated) {
+ bufferlist bl;
+ encode(dst_object_map, bl);
+ r = cls_cxx_write_full(hctx, &bl);
+ }
+ return r;
+}
+
+static const string metadata_key_for_name(const string &name)
+{
+ return RBD_METADATA_KEY_PREFIX + name;
+}
+
+static const string metadata_name_from_key(const string &key)
+{
+ return key.substr(strlen(RBD_METADATA_KEY_PREFIX));
+}
+
+/**
+ * Input:
+ * @param start_after which name to begin listing after
+ * (use the empty string to start at the beginning)
+ * @param max_return the maximum number of names to list
+
+ * Output:
+ * @param value
+ * @returns 0 on success, negative error code on failure
+ */
+int metadata_list(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ string start_after;
+ uint64_t max_return;
+
+ try {
+ auto iter = in->cbegin();
+ decode(start_after, iter);
+ decode(max_return, iter);
+ } catch (const buffer::error &err) {
+ return -EINVAL;
+ }
+
+ // TODO remove implicit support for zero during the N-release
+ if (max_return == 0) {
+ max_return = RBD_MAX_KEYS_READ;
+ }
+
+ map<string, bufferlist> data;
+ string last_read = metadata_key_for_name(start_after);
+ bool more = true;
+
+ while (more && data.size() < max_return) {
+ map<string, bufferlist> raw_data;
+ int max_read = std::min<uint64_t>(RBD_MAX_KEYS_READ, max_return - data.size());
+ int r = cls_cxx_map_get_vals(hctx, last_read, RBD_METADATA_KEY_PREFIX,
+ max_read, &raw_data, &more);
+ if (r < 0) {
+ if (r != -ENOENT) {
+ CLS_ERR("failed to read the vals off of disk: %s",
+ cpp_strerror(r).c_str());
+ }
+ return r;
+ }
+
+ for (auto& kv : raw_data) {
+ data[metadata_name_from_key(kv.first)].swap(kv.second);
+ }
+
+ if (!raw_data.empty()) {
+ last_read = raw_data.rbegin()->first;
+ }
+ }
+
+ encode(data, *out);
+ return 0;
+}
+
+/**
+ * Input:
+ * @param data <map(key, value)>
+ *
+ * Output:
+ * @returns 0 on success, negative error code on failure
+ */
+int metadata_set(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ map<string, bufferlist> data, raw_data;
+
+ auto iter = in->cbegin();
+ try {
+ decode(data, iter);
+ } catch (const buffer::error &err) {
+ return -EINVAL;
+ }
+
+ for (map<string, bufferlist>::iterator it = data.begin();
+ it != data.end(); ++it) {
+ CLS_LOG(20, "metadata_set key=%s value=%.*s", it->first.c_str(),
+ it->second.length(), it->second.c_str());
+ raw_data[metadata_key_for_name(it->first)].swap(it->second);
+ }
+ int r = cls_cxx_map_set_vals(hctx, &raw_data);
+ if (r < 0) {
+ CLS_ERR("error writing metadata: %s", cpp_strerror(r).c_str());
+ return r;
+ }
+
+ return 0;
+}
+
+/**
+ * Input:
+ * @param key
+ *
+ * Output:
+ * @returns 0 on success, negative error code on failure
+ */
+int metadata_remove(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ string key;
+
+ auto iter = in->cbegin();
+ try {
+ decode(key, iter);
+ } catch (const buffer::error &err) {
+ return -EINVAL;
+ }
+
+ CLS_LOG(20, "metadata_remove key=%s", key.c_str());
+
+ int r = cls_cxx_map_remove_key(hctx, metadata_key_for_name(key));
+ if (r < 0) {
+ CLS_ERR("error removing metadata: %s", cpp_strerror(r).c_str());
+ return r;
+ }
+
+ return 0;
+}
+
+/**
+ * Input:
+ * @param key
+ *
+ * Output:
+ * @param metadata value associated with the key
+ * @returns 0 on success, negative error code on failure
+ */
+int metadata_get(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ string key;
+ bufferlist value;
+
+ auto iter = in->cbegin();
+ try {
+ decode(key, iter);
+ } catch (const buffer::error &err) {
+ return -EINVAL;
+ }
+
+ CLS_LOG(20, "metadata_get key=%s", key.c_str());
+
+ int r = cls_cxx_map_get_val(hctx, metadata_key_for_name(key), &value);
+ if (r < 0) {
+ if (r != -ENOENT)
+ CLS_ERR("error getting metadata: %s", cpp_strerror(r).c_str());
+ return r;
+ }
+
+ encode(value, *out);
+ return 0;
+}
+
+int snapshot_get_limit(cls_method_context_t hctx, bufferlist *in,
+ bufferlist *out)
+{
+ uint64_t snap_limit;
+ int r = read_key(hctx, "snap_limit", &snap_limit);
+ if (r == -ENOENT) {
+ snap_limit = UINT64_MAX;
+ } else if (r < 0) {
+ CLS_ERR("error retrieving snapshot limit: %s", cpp_strerror(r).c_str());
+ return r;
+ }
+
+ CLS_LOG(20, "read snapshot limit %" PRIu64, snap_limit);
+ encode(snap_limit, *out);
+
+ return 0;
+}
+
+int snapshot_set_limit(cls_method_context_t hctx, bufferlist *in,
+ bufferlist *out)
+{
+ int rc;
+ uint64_t new_limit;
+ bufferlist bl;
+ size_t snap_count = 0;
+
+ try {
+ auto iter = in->cbegin();
+ decode(new_limit, iter);
+ } catch (const buffer::error &err) {
+ return -EINVAL;
+ }
+
+ if (new_limit == UINT64_MAX) {
+ CLS_LOG(20, "remove snapshot limit\n");
+ rc = cls_cxx_map_remove_key(hctx, "snap_limit");
+ return rc;
+ }
+
+ //try to read header as v1 format
+ rc = snap_read_header(hctx, bl);
+
+ // error when reading header
+ if (rc < 0 && rc != -EINVAL) {
+ return rc;
+ } else if (rc >= 0) {
+ // success, the image is v1 format
+ struct rbd_obj_header_ondisk *header;
+ header = (struct rbd_obj_header_ondisk *)bl.c_str();
+ snap_count = header->snap_count;
+ } else {
+ // else, the image is v2 format
+ int max_read = RBD_MAX_KEYS_READ;
+ string last_read = RBD_SNAP_KEY_PREFIX;
+ bool more;
+
+ do {
+ set<string> keys;
+ rc = cls_cxx_map_get_keys(hctx, last_read, max_read, &keys, &more);
+ if (rc < 0) {
+ CLS_ERR("error retrieving snapshots: %s", cpp_strerror(rc).c_str());
+ return rc;
+ }
+ for (auto& key : keys) {
+ if (key.find(RBD_SNAP_KEY_PREFIX) != 0)
+ break;
+ snap_count++;
+ }
+ if (!keys.empty())
+ last_read = *(keys.rbegin());
+ } while (more);
+ }
+
+ if (new_limit < snap_count) {
+ rc = -ERANGE;
+ CLS_LOG(10, "snapshot limit is less than the number of snapshots.\n");
+ } else {
+ CLS_LOG(20, "set snapshot limit to %" PRIu64 "\n", new_limit);
+ bl.clear();
+ encode(new_limit, bl);
+ rc = cls_cxx_map_set_val(hctx, "snap_limit", &bl);
+ }
+
+ return rc;
+}
+
+
+/**
+ * Input:
+ * @param snap id (uint64_t) parent snapshot id
+ * @param child spec (cls::rbd::ChildImageSpec) child image
+ *
+ * Output:
+ * @returns 0 on success, negative error code on failure
+ */
+int child_attach(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ uint64_t snap_id;
+ cls::rbd::ChildImageSpec child_image;
+ try {
+ auto it = in->cbegin();
+ decode(snap_id, it);
+ decode(child_image, it);
+ } catch (const buffer::error &err) {
+ return -EINVAL;
+ }
+
+ CLS_LOG(20, "child_attach snap_id=%" PRIu64 ", child_pool_id=%" PRIi64 ", "
+ "child_image_id=%s", snap_id, child_image.pool_id,
+ child_image.image_id.c_str());
+
+ cls_rbd_snap snap;
+ std::string snapshot_key;
+ key_from_snap_id(snap_id, &snapshot_key);
+ int r = read_key(hctx, snapshot_key, &snap);
+ if (r < 0) {
+ return r;
+ }
+
+ if (cls::rbd::get_snap_namespace_type(snap.snapshot_namespace) ==
+ cls::rbd::SNAPSHOT_NAMESPACE_TYPE_TRASH) {
+ // cannot attach to a deleted snapshot
+ return -ENOENT;
+ }
+
+ auto children_key = image::snap_children_key_from_snap_id(snap_id);
+ cls::rbd::ChildImageSpecs child_images;
+ r = read_key(hctx, children_key, &child_images);
+ if (r < 0 && r != -ENOENT) {
+ CLS_ERR("error reading snapshot children: %s", cpp_strerror(r).c_str());
+ return r;
+ }
+
+ auto it = child_images.insert(child_image);
+ if (!it.second) {
+ // child already attached to the snapshot
+ return -EEXIST;
+ }
+
+ r = write_key(hctx, children_key, child_images);
+ if (r < 0) {
+ CLS_ERR("error writing snapshot children: %s", cpp_strerror(r).c_str());
+ return r;
+ }
+
+ ++snap.child_count;
+ r = image::snapshot::write(hctx, snapshot_key, std::move(snap));
+ if (r < 0) {
+ return r;
+ }
+
+ r = image::set_op_features(hctx, RBD_OPERATION_FEATURE_CLONE_PARENT,
+ RBD_OPERATION_FEATURE_CLONE_PARENT);
+ if (r < 0) {
+ return r;
+ }
+
+ return 0;
+}
+
+/**
+ * Input:
+ * @param snap id (uint64_t) parent snapshot id
+ * @param child spec (cls::rbd::ChildImageSpec) child image
+ *
+ * Output:
+ * @returns 0 on success, negative error code on failure
+ */
+int child_detach(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ uint64_t snap_id;
+ cls::rbd::ChildImageSpec child_image;
+ try {
+ auto it = in->cbegin();
+ decode(snap_id, it);
+ decode(child_image, it);
+ } catch (const buffer::error &err) {
+ return -EINVAL;
+ }
+
+ CLS_LOG(20, "child_detach snap_id=%" PRIu64 ", child_pool_id=%" PRIi64 ", "
+ "child_image_id=%s", snap_id, child_image.pool_id,
+ child_image.image_id.c_str());
+
+ cls_rbd_snap snap;
+ std::string snapshot_key;
+ key_from_snap_id(snap_id, &snapshot_key);
+ int r = read_key(hctx, snapshot_key, &snap);
+ if (r < 0) {
+ return r;
+ }
+
+ auto children_key = image::snap_children_key_from_snap_id(snap_id);
+ cls::rbd::ChildImageSpecs child_images;
+ r = read_key(hctx, children_key, &child_images);
+ if (r < 0 && r != -ENOENT) {
+ CLS_ERR("error reading snapshot children: %s", cpp_strerror(r).c_str());
+ return r;
+ }
+
+ if (snap.child_count != child_images.size()) {
+ // children and reference count don't match
+ CLS_ERR("children reference count mismatch: %" PRIu64, snap_id);
+ return -EINVAL;
+ }
+
+ if (child_images.erase(child_image) == 0) {
+ // child not attached to the snapshot
+ return -ENOENT;
+ }
+
+ if (child_images.empty()) {
+ r = remove_key(hctx, children_key);
+ } else {
+ r = write_key(hctx, children_key, child_images);
+ if (r < 0) {
+ CLS_ERR("error writing snapshot children: %s", cpp_strerror(r).c_str());
+ return r;
+ }
+ }
+
+ --snap.child_count;
+ r = image::snapshot::write(hctx, snapshot_key, std::move(snap));
+ if (r < 0) {
+ return r;
+ }
+
+ if (snap.child_count == 0) {
+ auto clone_in_use_lambda = [snap_id](const cls_rbd_snap& snap_meta) {
+ if (snap_meta.id != snap_id && snap_meta.child_count > 0) {
+ return -EEXIST;
+ }
+ return 0;
+ };
+
+ r = image::snapshot::iterate(hctx, clone_in_use_lambda);
+ if (r < 0 && r != -EEXIST) {
+ return r;
+ }
+
+ if (r != -EEXIST) {
+ // remove the clone_v2 op feature if not in-use by any other snapshots
+ r = image::set_op_features(hctx, 0, RBD_OPERATION_FEATURE_CLONE_PARENT);
+ if (r < 0) {
+ return r;
+ }
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * Input:
+ * @param snap id (uint64_t) parent snapshot id
+ *
+ * Output:
+ * @param (cls::rbd::ChildImageSpecs) child images
+ * @returns 0 on success, negative error code on failure
+ */
+int children_list(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ uint64_t snap_id;
+ try {
+ auto it = in->cbegin();
+ decode(snap_id, it);
+ } catch (const buffer::error &err) {
+ return -EINVAL;
+ }
+
+ CLS_LOG(20, "child_detach snap_id=%" PRIu64, snap_id);
+
+ cls_rbd_snap snap;
+ std::string snapshot_key;
+ key_from_snap_id(snap_id, &snapshot_key);
+ int r = read_key(hctx, snapshot_key, &snap);
+ if (r < 0) {
+ return r;
+ }
+
+ auto children_key = image::snap_children_key_from_snap_id(snap_id);
+ cls::rbd::ChildImageSpecs child_images;
+ r = read_key(hctx, children_key, &child_images);
+ if (r == -ENOENT) {
+ return r;
+ } else if (r < 0) {
+ CLS_ERR("error reading snapshot children: %s", cpp_strerror(r).c_str());
+ return r;
+ }
+
+ encode(child_images, *out);
+ return 0;
+}
+
+/**
+ * Set image migration.
+ *
+ * Input:
+ * @param migration_spec (cls::rbd::MigrationSpec) image migration spec
+ *
+ * Output:
+ *
+ * @returns 0 on success, negative error code on failure
+ */
+int migration_set(cls_method_context_t hctx, bufferlist *in, bufferlist *out) {
+ cls::rbd::MigrationSpec migration_spec;
+ try {
+ auto it = in->cbegin();
+ decode(migration_spec, it);
+ } catch (const buffer::error &err) {
+ return -EINVAL;
+ }
+
+ int r = image::set_migration(hctx, migration_spec, true);
+ if (r < 0) {
+ return r;
+ }
+
+ return 0;
+}
+
+/**
+ * Set image migration state.
+ *
+ * Input:
+ * @param state (cls::rbd::MigrationState) migration state
+ * @param description (std::string) migration state description
+ *
+ * Output:
+ *
+ * @returns 0 on success, negative error code on failure
+ */
+int migration_set_state(cls_method_context_t hctx, bufferlist *in,
+ bufferlist *out) {
+ cls::rbd::MigrationState state;
+ std::string description;
+ try {
+ auto it = in->cbegin();
+ decode(state, it);
+ decode(description, it);
+ } catch (const buffer::error &err) {
+ return -EINVAL;
+ }
+
+ cls::rbd::MigrationSpec migration_spec;
+ int r = image::read_migration(hctx, &migration_spec);
+ if (r < 0) {
+ return r;
+ }
+
+ migration_spec.state = state;
+ migration_spec.state_description = description;
+
+ r = image::set_migration(hctx, migration_spec, false);
+ if (r < 0) {
+ return r;
+ }
+
+ return 0;
+}
+
+/**
+ * Get image migration spec.
+ *
+ * Input:
+ *
+ * Output:
+ * @param migration_spec (cls::rbd::MigrationSpec) image migration spec
+ *
+ * @returns 0 on success, negative error code on failure
+ */
+int migration_get(cls_method_context_t hctx, bufferlist *in, bufferlist *out) {
+ cls::rbd::MigrationSpec migration_spec;
+ int r = image::read_migration(hctx, &migration_spec);
+ if (r < 0) {
+ return r;
+ }
+
+ encode(migration_spec, *out);
+
+ return 0;
+}
+
+/**
+ * Remove image migration spec.
+ *
+ * Input:
+ *
+ * Output:
+ *
+ * @returns 0 on success, negative error code on failure
+ */
+int migration_remove(cls_method_context_t hctx, bufferlist *in,
+ bufferlist *out) {
+ int r = image::remove_migration(hctx);
+ if (r < 0) {
+ return r;
+ }
+
+ return 0;
+}
+
+/**
+ * Ensure writer snapc state
+ *
+ * Input:
+ * @param snap id (uint64_t) snap context sequence id
+ * @param state (cls::rbd::AssertSnapcSeqState) snap context state
+ *
+ * Output:
+ * @returns -ERANGE if assertion fails
+ * @returns 0 on success, negative error code on failure
+ */
+int assert_snapc_seq(cls_method_context_t hctx, bufferlist *in,
+ bufferlist *out)
+{
+ uint64_t snapc_seq;
+ cls::rbd::AssertSnapcSeqState state;
+ try {
+ auto it = in->cbegin();
+ decode(snapc_seq, it);
+ decode(state, it);
+ } catch (const buffer::error &err) {
+ return -EINVAL;
+ }
+
+ uint64_t snapset_seq;
+ int r = cls_get_snapset_seq(hctx, &snapset_seq);
+ if (r < 0 && r != -ENOENT) {
+ return r;
+ }
+
+ switch (state) {
+ case cls::rbd::ASSERT_SNAPC_SEQ_GT_SNAPSET_SEQ:
+ return (r == -ENOENT || snapc_seq > snapset_seq) ? 0 : -ERANGE;
+ case cls::rbd::ASSERT_SNAPC_SEQ_LE_SNAPSET_SEQ:
+ return (r == -ENOENT || snapc_seq > snapset_seq) ? -ERANGE : 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/****************************** Old format *******************************/
+
+int old_snapshots_list(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ bufferlist bl;
+ struct rbd_obj_header_ondisk *header;
+ int rc = snap_read_header(hctx, bl);
+ if (rc < 0)
+ return rc;
+
+ header = (struct rbd_obj_header_ondisk *)bl.c_str();
+ bufferptr p(header->snap_names_len);
+ char *buf = (char *)header;
+ char *name = buf + sizeof(*header) + header->snap_count * sizeof(struct rbd_obj_snap_ondisk);
+ char *end = name + header->snap_names_len;
+ memcpy(p.c_str(),
+ buf + sizeof(*header) + header->snap_count * sizeof(struct rbd_obj_snap_ondisk),
+ header->snap_names_len);
+
+ encode(header->snap_seq, *out);
+ encode(header->snap_count, *out);
+
+ for (unsigned i = 0; i < header->snap_count; i++) {
+ string s = name;
+ encode(header->snaps[i].id, *out);
+ encode(header->snaps[i].image_size, *out);
+ encode(s, *out);
+
+ name += strlen(name) + 1;
+ if (name > end)
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int old_snapshot_add(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ bufferlist bl;
+ struct rbd_obj_header_ondisk *header;
+ bufferlist newbl;
+ bufferptr header_bp(sizeof(*header));
+ struct rbd_obj_snap_ondisk *new_snaps;
+
+ int rc = snap_read_header(hctx, bl);
+ if (rc < 0)
+ return rc;
+
+ header = (struct rbd_obj_header_ondisk *)bl.c_str();
+
+ int snaps_id_ofs = sizeof(*header);
+ int names_ofs = snaps_id_ofs + sizeof(*new_snaps) * header->snap_count;
+ const char *snap_name;
+ const char *snap_names = ((char *)header) + names_ofs;
+ const char *end = snap_names + header->snap_names_len;
+ auto iter = in->cbegin();
+ string s;
+ uint64_t snap_id;
+
+ try {
+ decode(s, iter);
+ decode(snap_id, iter);
+ } catch (const buffer::error &err) {
+ return -EINVAL;
+ }
+ snap_name = s.c_str();
+
+ if (header->snap_seq > snap_id)
+ return -ESTALE;
+
+ uint64_t snap_limit;
+ rc = read_key(hctx, "snap_limit", &snap_limit);
+ if (rc == -ENOENT) {
+ snap_limit = UINT64_MAX;
+ } else if (rc < 0) {
+ return rc;
+ }
+
+ if (header->snap_count >= snap_limit)
+ return -EDQUOT;
+
+ const char *cur_snap_name;
+ for (cur_snap_name = snap_names; cur_snap_name < end; cur_snap_name += strlen(cur_snap_name) + 1) {
+ if (strncmp(cur_snap_name, snap_name, end - cur_snap_name) == 0)
+ return -EEXIST;
+ }
+ if (cur_snap_name > end)
+ return -EIO;
+
+ int snap_name_len = strlen(snap_name);
+
+ bufferptr new_names_bp(header->snap_names_len + snap_name_len + 1);
+ bufferptr new_snaps_bp(sizeof(*new_snaps) * (header->snap_count + 1));
+
+ /* copy snap names and append to new snap name */
+ char *new_snap_names = new_names_bp.c_str();
+ strcpy(new_snap_names, snap_name);
+ memcpy(new_snap_names + snap_name_len + 1, snap_names, header->snap_names_len);
+
+ /* append new snap id */
+ new_snaps = (struct rbd_obj_snap_ondisk *)new_snaps_bp.c_str();
+ memcpy(new_snaps + 1, header->snaps, sizeof(*new_snaps) * header->snap_count);
+
+ header->snap_count = header->snap_count + 1;
+ header->snap_names_len = header->snap_names_len + snap_name_len + 1;
+ header->snap_seq = snap_id;
+
+ new_snaps[0].id = snap_id;
+ new_snaps[0].image_size = header->image_size;
+
+ memcpy(header_bp.c_str(), header, sizeof(*header));
+
+ newbl.push_back(header_bp);
+ newbl.push_back(new_snaps_bp);
+ newbl.push_back(new_names_bp);
+
+ rc = cls_cxx_write_full(hctx, &newbl);
+ if (rc < 0)
+ return rc;
+
+ return 0;
+}
+
+int old_snapshot_remove(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ bufferlist bl;
+ struct rbd_obj_header_ondisk *header;
+ bufferlist newbl;
+ bufferptr header_bp(sizeof(*header));
+
+ int rc = snap_read_header(hctx, bl);
+ if (rc < 0)
+ return rc;
+
+ header = (struct rbd_obj_header_ondisk *)bl.c_str();
+
+ int snaps_id_ofs = sizeof(*header);
+ int names_ofs = snaps_id_ofs + sizeof(struct rbd_obj_snap_ondisk) * header->snap_count;
+ const char *snap_name;
+ const char *snap_names = ((char *)header) + names_ofs;
+ const char *orig_names = snap_names;
+ const char *end = snap_names + header->snap_names_len;
+ auto iter = in->cbegin();
+ string s;
+ unsigned i;
+ bool found = false;
+ struct rbd_obj_snap_ondisk snap;
+
+ try {
+ decode(s, iter);
+ } catch (const buffer::error &err) {
+ return -EINVAL;
+ }
+ snap_name = s.c_str();
+
+ for (i = 0; snap_names < end; i++) {
+ if (strcmp(snap_names, snap_name) == 0) {
+ snap = header->snaps[i];
+ found = true;
+ break;
+ }
+ snap_names += strlen(snap_names) + 1;
+ }
+ if (!found) {
+ CLS_ERR("couldn't find snap %s\n", snap_name);
+ return -ENOENT;
+ }
+
+ header->snap_names_len = header->snap_names_len - (s.length() + 1);
+ header->snap_count = header->snap_count - 1;
+
+ bufferptr new_names_bp(header->snap_names_len);
+ bufferptr new_snaps_bp(sizeof(header->snaps[0]) * header->snap_count);
+
+ memcpy(header_bp.c_str(), header, sizeof(*header));
+ newbl.push_back(header_bp);
+
+ if (header->snap_count) {
+ int snaps_len = 0;
+ int names_len = 0;
+ CLS_LOG(20, "i=%u\n", i);
+ if (i > 0) {
+ snaps_len = sizeof(header->snaps[0]) * i;
+ names_len = snap_names - orig_names;
+ memcpy(new_snaps_bp.c_str(), header->snaps, snaps_len);
+ memcpy(new_names_bp.c_str(), orig_names, names_len);
+ }
+ snap_names += s.length() + 1;
+
+ if (i < header->snap_count) {
+ memcpy(new_snaps_bp.c_str() + snaps_len,
+ header->snaps + i + 1,
+ sizeof(header->snaps[0]) * (header->snap_count - i));
+ memcpy(new_names_bp.c_str() + names_len, snap_names , end - snap_names);
+ }
+ newbl.push_back(new_snaps_bp);
+ newbl.push_back(new_names_bp);
+ }
+
+ rc = cls_cxx_write_full(hctx, &newbl);
+ if (rc < 0)
+ return rc;
+
+ return 0;
+}
+
+/**
+ * rename snapshot of old format.
+ *
+ * Input:
+ * @param src_snap_id old snap id of the snapshot (snapid_t)
+ * @param dst_snap_name new name of the snapshot (string)
+ *
+ * Output:
+ * @returns 0 on success, negative error code on failure.
+*/
+int old_snapshot_rename(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ bufferlist bl;
+ struct rbd_obj_header_ondisk *header;
+ bufferlist newbl;
+ bufferptr header_bp(sizeof(*header));
+ snapid_t src_snap_id;
+ const char *dst_snap_name;
+ string dst;
+
+ int rc = snap_read_header(hctx, bl);
+ if (rc < 0)
+ return rc;
+
+ header = (struct rbd_obj_header_ondisk *)bl.c_str();
+
+ int snaps_id_ofs = sizeof(*header);
+ int names_ofs = snaps_id_ofs + sizeof(rbd_obj_snap_ondisk) * header->snap_count;
+ const char *snap_names = ((char *)header) + names_ofs;
+ const char *orig_names = snap_names;
+ const char *end = snap_names + header->snap_names_len;
+ auto iter = in->cbegin();
+ unsigned i;
+ bool found = false;
+
+ try {
+ decode(src_snap_id, iter);
+ decode(dst, iter);
+ } catch (const buffer::error &err) {
+ return -EINVAL;
+ }
+ dst_snap_name = dst.c_str();
+
+ const char *cur_snap_name;
+ for (cur_snap_name = snap_names; cur_snap_name < end;
+ cur_snap_name += strlen(cur_snap_name) + 1) {
+ if (strcmp(cur_snap_name, dst_snap_name) == 0)
+ return -EEXIST;
+ }
+ if (cur_snap_name > end)
+ return -EIO;
+ for (i = 0; i < header->snap_count; i++) {
+ if (src_snap_id == header->snaps[i].id) {
+ found = true;
+ break;
+ }
+ snap_names += strlen(snap_names) + 1;
+ }
+ if (!found) {
+ CLS_ERR("couldn't find snap %llu\n", (unsigned long long)src_snap_id.val);
+ return -ENOENT;
+ }
+
+ CLS_LOG(20, "rename snap with snap id %llu to dest name %s", (unsigned long long)src_snap_id.val, dst_snap_name);
+ header->snap_names_len = header->snap_names_len - strlen(snap_names) + dst.length();
+
+ bufferptr new_names_bp(header->snap_names_len);
+ bufferptr new_snaps_bp(sizeof(header->snaps[0]) * header->snap_count);
+
+ if (header->snap_count) {
+ int names_len = 0;
+ CLS_LOG(20, "i=%u\n", i);
+ if (i > 0) {
+ names_len = snap_names - orig_names;
+ memcpy(new_names_bp.c_str(), orig_names, names_len);
+ }
+ strcpy(new_names_bp.c_str() + names_len, dst_snap_name);
+ names_len += strlen(dst_snap_name) + 1;
+ snap_names += strlen(snap_names) + 1;
+ if (i < header->snap_count) {
+ memcpy(new_names_bp.c_str() + names_len, snap_names , end - snap_names);
+ }
+ memcpy(new_snaps_bp.c_str(), header->snaps, sizeof(header->snaps[0]) * header->snap_count);
+ }
+
+ memcpy(header_bp.c_str(), header, sizeof(*header));
+ newbl.push_back(header_bp);
+ newbl.push_back(new_snaps_bp);
+ newbl.push_back(new_names_bp);
+
+ rc = cls_cxx_write_full(hctx, &newbl);
+ if (rc < 0)
+ return rc;
+ return 0;
+}
+
+
+namespace mirror {
+
+static const std::string UUID("mirror_uuid");
+static const std::string MODE("mirror_mode");
+static const std::string PEER_KEY_PREFIX("mirror_peer_");
+static const std::string IMAGE_KEY_PREFIX("image_");
+static const std::string GLOBAL_KEY_PREFIX("global_");
+static const std::string STATUS_GLOBAL_KEY_PREFIX("status_global_");
+static const std::string INSTANCE_KEY_PREFIX("instance_");
+static const std::string MIRROR_IMAGE_MAP_KEY_PREFIX("image_map_");
+
+std::string peer_key(const std::string &uuid) {
+ return PEER_KEY_PREFIX + uuid;
+}
+
+std::string image_key(const string &image_id) {
+ return IMAGE_KEY_PREFIX + image_id;
+}
+
+std::string global_key(const string &global_id) {
+ return GLOBAL_KEY_PREFIX + global_id;
+}
+
+std::string status_global_key(const string &global_id) {
+ return STATUS_GLOBAL_KEY_PREFIX + global_id;
+}
+
+std::string instance_key(const string &instance_id) {
+ return INSTANCE_KEY_PREFIX + instance_id;
+}
+
+std::string mirror_image_map_key(const string& global_image_id) {
+ return MIRROR_IMAGE_MAP_KEY_PREFIX + global_image_id;
+}
+
+int uuid_get(cls_method_context_t hctx, std::string *mirror_uuid) {
+ bufferlist mirror_uuid_bl;
+ int r = cls_cxx_map_get_val(hctx, mirror::UUID, &mirror_uuid_bl);
+ if (r < 0) {
+ if (r != -ENOENT) {
+ CLS_ERR("error reading mirror uuid: %s", cpp_strerror(r).c_str());
+ }
+ return r;
+ }
+
+ *mirror_uuid = std::string(mirror_uuid_bl.c_str(), mirror_uuid_bl.length());
+ return 0;
+}
+
+void sanitize_entity_inst(entity_inst_t* entity_inst) {
+ // make all addrs of type ANY because the type isn't what uniquely
+ // identifies them and clients and on-disk formats can be encoded
+ // with different backwards compatibility settings.
+ entity_inst->addr.set_type(entity_addr_t::TYPE_ANY);
+}
+
+int list_watchers(cls_method_context_t hctx,
+ std::set<entity_inst_t> *entities) {
+ obj_list_watch_response_t watchers;
+ int r = cls_cxx_list_watchers(hctx, &watchers);
+ if (r < 0 && r != -ENOENT) {
+ CLS_ERR("error listing watchers: '%s'", cpp_strerror(r).c_str());
+ return r;
+ }
+
+ entities->clear();
+ for (auto &w : watchers.entries) {
+ entity_inst_t entity_inst{w.name, w.addr};
+ sanitize_entity_inst(&entity_inst);
+
+ entities->insert(entity_inst);
+ }
+ return 0;
+}
+
+int read_peers(cls_method_context_t hctx,
+ std::vector<cls::rbd::MirrorPeer> *peers) {
+ std::string last_read = PEER_KEY_PREFIX;
+ int max_read = RBD_MAX_KEYS_READ;
+ bool more = true;
+ while (more) {
+ std::map<std::string, bufferlist> vals;
+ int r = cls_cxx_map_get_vals(hctx, last_read, PEER_KEY_PREFIX.c_str(),
+ max_read, &vals, &more);
+ if (r < 0) {
+ if (r != -ENOENT) {
+ CLS_ERR("error reading peers: %s", cpp_strerror(r).c_str());
+ }
+ return r;
+ }
+
+ for (auto &it : vals) {
+ try {
+ auto bl_it = it.second.cbegin();
+ cls::rbd::MirrorPeer peer;
+ decode(peer, bl_it);
+ peers->push_back(peer);
+ } catch (const buffer::error &err) {
+ CLS_ERR("could not decode peer '%s'", it.first.c_str());
+ return -EIO;
+ }
+ }
+
+ if (!vals.empty()) {
+ last_read = vals.rbegin()->first;
+ }
+ }
+ return 0;
+}
+
+int read_peer(cls_method_context_t hctx, const std::string &id,
+ cls::rbd::MirrorPeer *peer) {
+ bufferlist bl;
+ int r = cls_cxx_map_get_val(hctx, peer_key(id), &bl);
+ if (r < 0) {
+ CLS_ERR("error reading peer '%s': %s", id.c_str(),
+ cpp_strerror(r).c_str());
+ return r;
+ }
+
+ try {
+ auto bl_it = bl.cbegin();
+ decode(*peer, bl_it);
+ } catch (const buffer::error &err) {
+ CLS_ERR("could not decode peer '%s'", id.c_str());
+ return -EIO;
+ }
+ return 0;
+}
+
+int write_peer(cls_method_context_t hctx, const std::string &id,
+ const cls::rbd::MirrorPeer &peer) {
+ bufferlist bl;
+ encode(peer, bl);
+
+ int r = cls_cxx_map_set_val(hctx, peer_key(id), &bl);
+ if (r < 0) {
+ CLS_ERR("error writing peer '%s': %s", id.c_str(),
+ cpp_strerror(r).c_str());
+ return r;
+ }
+ return 0;
+}
+
+int image_get(cls_method_context_t hctx, const string &image_id,
+ cls::rbd::MirrorImage *mirror_image) {
+ bufferlist bl;
+ int r = cls_cxx_map_get_val(hctx, image_key(image_id), &bl);
+ if (r < 0) {
+ if (r != -ENOENT) {
+ CLS_ERR("error reading mirrored image '%s': '%s'", image_id.c_str(),
+ cpp_strerror(r).c_str());
+ }
+ return r;
+ }
+
+ try {
+ auto it = bl.cbegin();
+ decode(*mirror_image, it);
+ } catch (const buffer::error &err) {
+ CLS_ERR("could not decode mirrored image '%s'", image_id.c_str());
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int image_set(cls_method_context_t hctx, const string &image_id,
+ const cls::rbd::MirrorImage &mirror_image) {
+ bufferlist bl;
+ encode(mirror_image, bl);
+
+ cls::rbd::MirrorImage existing_mirror_image;
+ int r = image_get(hctx, image_id, &existing_mirror_image);
+ if (r == -ENOENT) {
+ // make sure global id doesn't already exist
+ std::string global_id_key = global_key(mirror_image.global_image_id);
+ std::string image_id;
+ r = read_key(hctx, global_id_key, &image_id);
+ if (r >= 0) {
+ return -EEXIST;
+ } else if (r != -ENOENT) {
+ CLS_ERR("error reading global image id: '%s': '%s'", image_id.c_str(),
+ cpp_strerror(r).c_str());
+ return r;
+ }
+
+ // make sure this was not a race for disabling
+ if (mirror_image.state == cls::rbd::MIRROR_IMAGE_STATE_DISABLING) {
+ CLS_ERR("image '%s' is already disabled", image_id.c_str());
+ return r;
+ }
+ } else if (r < 0) {
+ CLS_ERR("error reading mirrored image '%s': '%s'", image_id.c_str(),
+ cpp_strerror(r).c_str());
+ return r;
+ } else if (existing_mirror_image.global_image_id !=
+ mirror_image.global_image_id) {
+ // cannot change the global id
+ return -EINVAL;
+ }
+
+ r = cls_cxx_map_set_val(hctx, image_key(image_id), &bl);
+ if (r < 0) {
+ CLS_ERR("error adding mirrored image '%s': %s", image_id.c_str(),
+ cpp_strerror(r).c_str());
+ return r;
+ }
+
+ bufferlist image_id_bl;
+ encode(image_id, image_id_bl);
+ r = cls_cxx_map_set_val(hctx, global_key(mirror_image.global_image_id),
+ &image_id_bl);
+ if (r < 0) {
+ CLS_ERR("error adding global id for image '%s': %s", image_id.c_str(),
+ cpp_strerror(r).c_str());
+ return r;
+ }
+ return 0;
+}
+
+int image_remove(cls_method_context_t hctx, const string &image_id) {
+ bufferlist bl;
+ cls::rbd::MirrorImage mirror_image;
+ int r = image_get(hctx, image_id, &mirror_image);
+ if (r < 0) {
+ if (r != -ENOENT) {
+ CLS_ERR("error reading mirrored image '%s': '%s'", image_id.c_str(),
+ cpp_strerror(r).c_str());
+ }
+ return r;
+ }
+
+ if (mirror_image.state != cls::rbd::MIRROR_IMAGE_STATE_DISABLING) {
+ return -EBUSY;
+ }
+
+ r = cls_cxx_map_remove_key(hctx, image_key(image_id));
+ if (r < 0) {
+ CLS_ERR("error removing mirrored image '%s': %s", image_id.c_str(),
+ cpp_strerror(r).c_str());
+ return r;
+ }
+
+ r = cls_cxx_map_remove_key(hctx, global_key(mirror_image.global_image_id));
+ if (r < 0 && r != -ENOENT) {
+ CLS_ERR("error removing global id for image '%s': %s", image_id.c_str(),
+ cpp_strerror(r).c_str());
+ return r;
+ }
+
+ r = cls_cxx_map_remove_key(hctx,
+ status_global_key(mirror_image.global_image_id));
+ if (r < 0 && r != -ENOENT) {
+ CLS_ERR("error removing global status for image '%s': %s", image_id.c_str(),
+ cpp_strerror(r).c_str());
+ return r;
+ }
+
+ return 0;
+}
+
+struct MirrorImageStatusOnDisk : cls::rbd::MirrorImageStatus {
+ entity_inst_t origin;
+
+ MirrorImageStatusOnDisk() {
+ }
+ MirrorImageStatusOnDisk(const cls::rbd::MirrorImageStatus &status) :
+ cls::rbd::MirrorImageStatus(status) {
+ }
+
+ void encode_meta(bufferlist &bl, uint64_t features) const {
+ ENCODE_START(1, 1, bl);
+ auto sanitized_origin = origin;
+ sanitize_entity_inst(&sanitized_origin);
+ encode(sanitized_origin, bl, features);
+ ENCODE_FINISH(bl);
+ }
+
+ void encode(bufferlist &bl, uint64_t features) const {
+ encode_meta(bl, features);
+ cls::rbd::MirrorImageStatus::encode(bl);
+ }
+
+ void decode_meta(bufferlist::const_iterator &it) {
+ DECODE_START(1, it);
+ decode(origin, it);
+ sanitize_entity_inst(&origin);
+ DECODE_FINISH(it);
+ }
+
+ void decode(bufferlist::const_iterator &it) {
+ decode_meta(it);
+ cls::rbd::MirrorImageStatus::decode(it);
+ }
+};
+WRITE_CLASS_ENCODER_FEATURES(MirrorImageStatusOnDisk)
+
+int image_status_set(cls_method_context_t hctx, const string &global_image_id,
+ const cls::rbd::MirrorImageStatus &status) {
+ MirrorImageStatusOnDisk ondisk_status(status);
+ ondisk_status.up = false;
+ ondisk_status.last_update = ceph_clock_now();
+
+ int r = cls_get_request_origin(hctx, &ondisk_status.origin);
+ ceph_assert(r == 0);
+
+ bufferlist bl;
+ encode(ondisk_status, bl, cls_get_features(hctx));
+
+ r = cls_cxx_map_set_val(hctx, status_global_key(global_image_id), &bl);
+ if (r < 0) {
+ CLS_ERR("error setting status for mirrored image, global id '%s': %s",
+ global_image_id.c_str(), cpp_strerror(r).c_str());
+ return r;
+ }
+ return 0;
+}
+
+int image_status_remove(cls_method_context_t hctx,
+ const string &global_image_id) {
+
+ int r = cls_cxx_map_remove_key(hctx, status_global_key(global_image_id));
+ if (r < 0) {
+ CLS_ERR("error removing status for mirrored image, global id '%s': %s",
+ global_image_id.c_str(), cpp_strerror(r).c_str());
+ return r;
+ }
+ return 0;
+}
+
+int image_status_get(cls_method_context_t hctx, const string &global_image_id,
+ const std::set<entity_inst_t> &watchers,
+ cls::rbd::MirrorImageStatus *status) {
+
+ bufferlist bl;
+ int r = cls_cxx_map_get_val(hctx, status_global_key(global_image_id), &bl);
+ if (r < 0) {
+ if (r != -ENOENT) {
+ CLS_ERR("error reading status for mirrored image, global id '%s': '%s'",
+ global_image_id.c_str(), cpp_strerror(r).c_str());
+ }
+ return r;
+ }
+
+ MirrorImageStatusOnDisk ondisk_status;
+ try {
+ auto it = bl.cbegin();
+ decode(ondisk_status, it);
+ } catch (const buffer::error &err) {
+ CLS_ERR("could not decode status for mirrored image, global id '%s'",
+ global_image_id.c_str());
+ return -EIO;
+ }
+
+
+ *status = static_cast<cls::rbd::MirrorImageStatus>(ondisk_status);
+ status->up = (watchers.find(ondisk_status.origin) != watchers.end());
+ return 0;
+}
+
+int image_status_list(cls_method_context_t hctx,
+ const std::string &start_after, uint64_t max_return,
+ map<std::string, cls::rbd::MirrorImage> *mirror_images,
+ map<std::string, cls::rbd::MirrorImageStatus> *mirror_statuses) {
+ std::string last_read = image_key(start_after);
+ int max_read = RBD_MAX_KEYS_READ;
+ bool more = true;
+
+ std::set<entity_inst_t> watchers;
+ int r = list_watchers(hctx, &watchers);
+ if (r < 0) {
+ return r;
+ }
+
+ while (more && mirror_images->size() < max_return) {
+ std::map<std::string, bufferlist> vals;
+ CLS_LOG(20, "last_read = '%s'", last_read.c_str());
+ r = cls_cxx_map_get_vals(hctx, last_read, IMAGE_KEY_PREFIX, max_read, &vals,
+ &more);
+ if (r < 0) {
+ if (r != -ENOENT) {
+ CLS_ERR("error reading mirror image directory by name: %s",
+ cpp_strerror(r).c_str());
+ }
+ return r;
+ }
+
+ for (auto it = vals.begin(); it != vals.end() &&
+ mirror_images->size() < max_return; ++it) {
+ const std::string &image_id = it->first.substr(IMAGE_KEY_PREFIX.size());
+ cls::rbd::MirrorImage mirror_image;
+ auto iter = it->second.cbegin();
+ try {
+ decode(mirror_image, iter);
+ } catch (const buffer::error &err) {
+ CLS_ERR("could not decode mirror image payload of image '%s'",
+ image_id.c_str());
+ return -EIO;
+ }
+
+ (*mirror_images)[image_id] = mirror_image;
+
+ cls::rbd::MirrorImageStatus status;
+ int r1 = image_status_get(hctx, mirror_image.global_image_id, watchers,
+ &status);
+ if (r1 < 0) {
+ continue;
+ }
+
+ (*mirror_statuses)[image_id] = status;
+ }
+ if (!vals.empty()) {
+ last_read = image_key(mirror_images->rbegin()->first);
+ }
+ }
+
+ return 0;
+}
+
+int image_status_get_summary(
+ cls_method_context_t hctx,
+ std::map<cls::rbd::MirrorImageStatusState, int> *states) {
+ std::set<entity_inst_t> watchers;
+ int r = list_watchers(hctx, &watchers);
+ if (r < 0) {
+ return r;
+ }
+
+ states->clear();
+
+ string last_read = IMAGE_KEY_PREFIX;
+ int max_read = RBD_MAX_KEYS_READ;
+ bool more = true;
+ while (more) {
+ map<string, bufferlist> vals;
+ r = cls_cxx_map_get_vals(hctx, last_read, IMAGE_KEY_PREFIX,
+ max_read, &vals, &more);
+ if (r < 0) {
+ if (r != -ENOENT) {
+ CLS_ERR("error reading mirrored images: %s", cpp_strerror(r).c_str());
+ }
+ return r;
+ }
+
+ for (auto &list_it : vals) {
+ const string &key = list_it.first;
+
+ if (0 != key.compare(0, IMAGE_KEY_PREFIX.size(), IMAGE_KEY_PREFIX)) {
+ break;
+ }
+
+ cls::rbd::MirrorImage mirror_image;
+ auto iter = list_it.second.cbegin();
+ try {
+ decode(mirror_image, iter);
+ } catch (const buffer::error &err) {
+ CLS_ERR("could not decode mirror image payload for key '%s'",
+ key.c_str());
+ return -EIO;
+ }
+
+ cls::rbd::MirrorImageStatus status;
+ image_status_get(hctx, mirror_image.global_image_id, watchers, &status);
+
+ cls::rbd::MirrorImageStatusState state = status.up ? status.state :
+ cls::rbd::MIRROR_IMAGE_STATUS_STATE_UNKNOWN;
+ (*states)[state]++;
+ }
+
+ if (!vals.empty()) {
+ last_read = vals.rbegin()->first;
+ }
+ }
+
+ return 0;
+}
+
+int image_status_remove_down(cls_method_context_t hctx) {
+ std::set<entity_inst_t> watchers;
+ int r = list_watchers(hctx, &watchers);
+ if (r < 0) {
+ return r;
+ }
+
+ string last_read = STATUS_GLOBAL_KEY_PREFIX;
+ int max_read = RBD_MAX_KEYS_READ;
+ bool more = true;
+ while (more) {
+ map<string, bufferlist> vals;
+ r = cls_cxx_map_get_vals(hctx, last_read, STATUS_GLOBAL_KEY_PREFIX,
+ max_read, &vals, &more);
+ if (r < 0) {
+ if (r != -ENOENT) {
+ CLS_ERR("error reading mirrored images: %s", cpp_strerror(r).c_str());
+ }
+ return r;
+ }
+
+ for (auto &list_it : vals) {
+ const string &key = list_it.first;
+
+ if (0 != key.compare(0, STATUS_GLOBAL_KEY_PREFIX.size(),
+ STATUS_GLOBAL_KEY_PREFIX)) {
+ break;
+ }
+
+ MirrorImageStatusOnDisk status;
+ try {
+ auto it = list_it.second.cbegin();
+ status.decode_meta(it);
+ } catch (const buffer::error &err) {
+ CLS_ERR("could not decode status metadata for mirrored image '%s'",
+ key.c_str());
+ return -EIO;
+ }
+
+ if (watchers.find(status.origin) == watchers.end()) {
+ CLS_LOG(20, "removing stale status object for key %s",
+ key.c_str());
+ int r1 = cls_cxx_map_remove_key(hctx, key);
+ if (r1 < 0) {
+ CLS_ERR("error removing stale status for key '%s': %s",
+ key.c_str(), cpp_strerror(r1).c_str());
+ return r1;
+ }
+ }
+ }
+
+ if (!vals.empty()) {
+ last_read = vals.rbegin()->first;
+ }
+ }
+
+ return 0;
+}
+
+int image_instance_get(cls_method_context_t hctx,
+ const string &global_image_id,
+ const std::set<entity_inst_t> &watchers,
+ entity_inst_t *instance) {
+ bufferlist bl;
+ int r = cls_cxx_map_get_val(hctx, status_global_key(global_image_id), &bl);
+ if (r < 0) {
+ if (r != -ENOENT) {
+ CLS_ERR("error reading status for mirrored image, global id '%s': '%s'",
+ global_image_id.c_str(), cpp_strerror(r).c_str());
+ }
+ return r;
+ }
+
+ MirrorImageStatusOnDisk ondisk_status;
+ try {
+ auto it = bl.cbegin();
+ decode(ondisk_status, it);
+ } catch (const buffer::error &err) {
+ CLS_ERR("could not decode status for mirrored image, global id '%s'",
+ global_image_id.c_str());
+ return -EIO;
+ }
+
+ if (watchers.find(ondisk_status.origin) == watchers.end()) {
+ return -ESTALE;
+ }
+
+ *instance = ondisk_status.origin;
+ return 0;
+}
+
+int image_instance_list(cls_method_context_t hctx,
+ const std::string &start_after,
+ uint64_t max_return,
+ map<std::string, entity_inst_t> *instances) {
+ std::string last_read = image_key(start_after);
+ int max_read = RBD_MAX_KEYS_READ;
+ bool more = true;
+
+ std::set<entity_inst_t> watchers;
+ int r = list_watchers(hctx, &watchers);
+ if (r < 0) {
+ return r;
+ }
+
+ while (more && instances->size() < max_return) {
+ std::map<std::string, bufferlist> vals;
+ CLS_LOG(20, "last_read = '%s'", last_read.c_str());
+ r = cls_cxx_map_get_vals(hctx, last_read, IMAGE_KEY_PREFIX, max_read, &vals,
+ &more);
+ if (r < 0) {
+ if (r != -ENOENT) {
+ CLS_ERR("error reading mirror image directory by name: %s",
+ cpp_strerror(r).c_str());
+ }
+ return r;
+ }
+
+ for (auto it = vals.begin(); it != vals.end() &&
+ instances->size() < max_return; ++it) {
+ const std::string &image_id = it->first.substr(IMAGE_KEY_PREFIX.size());
+ cls::rbd::MirrorImage mirror_image;
+ auto iter = it->second.cbegin();
+ try {
+ decode(mirror_image, iter);
+ } catch (const buffer::error &err) {
+ CLS_ERR("could not decode mirror image payload of image '%s'",
+ image_id.c_str());
+ return -EIO;
+ }
+
+ entity_inst_t instance;
+ r = image_instance_get(hctx, mirror_image.global_image_id, watchers,
+ &instance);
+ if (r < 0) {
+ continue;
+ }
+
+ (*instances)[image_id] = instance;
+ }
+ if (!vals.empty()) {
+ last_read = vals.rbegin()->first;
+ }
+ }
+
+ return 0;
+}
+
+int instances_list(cls_method_context_t hctx,
+ std::vector<std::string> *instance_ids) {
+ std::string last_read = INSTANCE_KEY_PREFIX;
+ int max_read = RBD_MAX_KEYS_READ;
+ bool more = true;
+ while (more) {
+ std::map<std::string, bufferlist> vals;
+ int r = cls_cxx_map_get_vals(hctx, last_read, INSTANCE_KEY_PREFIX.c_str(),
+ max_read, &vals, &more);
+ if (r < 0) {
+ if (r != -ENOENT) {
+ CLS_ERR("error reading mirror instances: %s", cpp_strerror(r).c_str());
+ }
+ return r;
+ }
+
+ for (auto &it : vals) {
+ instance_ids->push_back(it.first.substr(INSTANCE_KEY_PREFIX.size()));
+ }
+
+ if (!vals.empty()) {
+ last_read = vals.rbegin()->first;
+ }
+ }
+ return 0;
+}
+
+int instances_add(cls_method_context_t hctx, const string &instance_id) {
+ bufferlist bl;
+
+ int r = cls_cxx_map_set_val(hctx, instance_key(instance_id), &bl);
+ if (r < 0) {
+ CLS_ERR("error setting mirror instance %s: %s", instance_id.c_str(),
+ cpp_strerror(r).c_str());
+ return r;
+ }
+ return 0;
+}
+
+int instances_remove(cls_method_context_t hctx, const string &instance_id) {
+
+ int r = cls_cxx_map_remove_key(hctx, instance_key(instance_id));
+ if (r < 0) {
+ CLS_ERR("error removing mirror instance %s: %s", instance_id.c_str(),
+ cpp_strerror(r).c_str());
+ return r;
+ }
+ return 0;
+}
+
+int mirror_image_map_list(cls_method_context_t hctx,
+ const std::string &start_after,
+ uint64_t max_return,
+ std::map<std::string, cls::rbd::MirrorImageMap> *image_mapping) {
+ bool more = true;
+ std::string last_read = mirror_image_map_key(start_after);
+
+ while (more && image_mapping->size() < max_return) {
+ std::map<std::string, bufferlist> vals;
+ CLS_LOG(20, "last read: '%s'", last_read.c_str());
+
+ int max_read = std::min<uint64_t>(RBD_MAX_KEYS_READ, max_return - image_mapping->size());
+ int r = cls_cxx_map_get_vals(hctx, last_read, MIRROR_IMAGE_MAP_KEY_PREFIX,
+ max_read, &vals, &more);
+ if (r < 0) {
+ CLS_ERR("error reading image map: %s", cpp_strerror(r).c_str());
+ return r;
+ }
+
+ if (vals.empty()) {
+ return 0;
+ }
+
+ for (auto it = vals.begin(); it != vals.end(); ++it) {
+ const std::string &global_image_id =
+ it->first.substr(MIRROR_IMAGE_MAP_KEY_PREFIX.size());
+
+ cls::rbd::MirrorImageMap mirror_image_map;
+ auto iter = it->second.cbegin();
+ try {
+ decode(mirror_image_map, iter);
+ } catch (const buffer::error &err) {
+ CLS_ERR("could not decode image map payload: %s",
+ cpp_strerror(r).c_str());
+ return -EINVAL;
+ }
+
+ image_mapping->insert(std::make_pair(global_image_id, mirror_image_map));
+ }
+
+ if (!vals.empty()) {
+ last_read = vals.rbegin()->first;
+ }
+ }
+
+ return 0;
+}
+
+} // namespace mirror
+
+/**
+ * Input:
+ * none
+ *
+ * Output:
+ * @param uuid (std::string)
+ * @returns 0 on success, negative error code on failure
+ */
+int mirror_uuid_get(cls_method_context_t hctx, bufferlist *in,
+ bufferlist *out) {
+ std::string mirror_uuid;
+ int r = mirror::uuid_get(hctx, &mirror_uuid);
+ if (r < 0) {
+ return r;
+ }
+
+ encode(mirror_uuid, *out);
+ return 0;
+}
+
+/**
+ * Input:
+ * @param mirror_uuid (std::string)
+ *
+ * Output:
+ * @returns 0 on success, negative error code on failure
+ */
+int mirror_uuid_set(cls_method_context_t hctx, bufferlist *in,
+ bufferlist *out) {
+ std::string mirror_uuid;
+ try {
+ auto bl_it = in->cbegin();
+ decode(mirror_uuid, bl_it);
+ } catch (const buffer::error &err) {
+ return -EINVAL;
+ }
+
+ if (mirror_uuid.empty()) {
+ CLS_ERR("cannot set empty mirror uuid");
+ return -EINVAL;
+ }
+
+ uint32_t mirror_mode;
+ int r = read_key(hctx, mirror::MODE, &mirror_mode);
+ if (r < 0 && r != -ENOENT) {
+ return r;
+ } else if (r == 0 && mirror_mode != cls::rbd::MIRROR_MODE_DISABLED) {
+ CLS_ERR("cannot set mirror uuid while mirroring enabled");
+ return -EINVAL;
+ }
+
+ bufferlist mirror_uuid_bl;
+ mirror_uuid_bl.append(mirror_uuid);
+ r = cls_cxx_map_set_val(hctx, mirror::UUID, &mirror_uuid_bl);
+ if (r < 0) {
+ CLS_ERR("failed to set mirror uuid");
+ return r;
+ }
+ return 0;
+}
+
+/**
+ * Input:
+ * none
+ *
+ * Output:
+ * @param cls::rbd::MirrorMode (uint32_t)
+ * @returns 0 on success, negative error code on failure
+ */
+int mirror_mode_get(cls_method_context_t hctx, bufferlist *in,
+ bufferlist *out) {
+ uint32_t mirror_mode_decode;
+ int r = read_key(hctx, mirror::MODE, &mirror_mode_decode);
+ if (r < 0) {
+ return r;
+ }
+
+ encode(mirror_mode_decode, *out);
+ return 0;
+}
+
+/**
+ * Input:
+ * @param mirror_mode (cls::rbd::MirrorMode) (uint32_t)
+ *
+ * Output:
+ * @returns 0 on success, negative error code on failure
+ */
+int mirror_mode_set(cls_method_context_t hctx, bufferlist *in,
+ bufferlist *out) {
+ uint32_t mirror_mode_decode;
+ try {
+ auto bl_it = in->cbegin();
+ decode(mirror_mode_decode, bl_it);
+ } catch (const buffer::error &err) {
+ return -EINVAL;
+ }
+
+ bool enabled;
+ switch (static_cast<cls::rbd::MirrorMode>(mirror_mode_decode)) {
+ case cls::rbd::MIRROR_MODE_DISABLED:
+ enabled = false;
+ break;
+ case cls::rbd::MIRROR_MODE_IMAGE:
+ case cls::rbd::MIRROR_MODE_POOL:
+ enabled = true;
+ break;
+ default:
+ CLS_ERR("invalid mirror mode: %d", mirror_mode_decode);
+ return -EINVAL;
+ }
+
+ int r;
+ if (enabled) {
+ std::string mirror_uuid;
+ r = mirror::uuid_get(hctx, &mirror_uuid);
+ if (r == -ENOENT) {
+ return -EINVAL;
+ } else if (r < 0) {
+ return r;
+ }
+
+ bufferlist bl;
+ encode(mirror_mode_decode, bl);
+
+ r = cls_cxx_map_set_val(hctx, mirror::MODE, &bl);
+ if (r < 0) {
+ CLS_ERR("error enabling mirroring: %s", cpp_strerror(r).c_str());
+ return r;
+ }
+ } else {
+ std::vector<cls::rbd::MirrorPeer> peers;
+ r = mirror::read_peers(hctx, &peers);
+ if (r < 0 && r != -ENOENT) {
+ return r;
+ }
+
+ if (!peers.empty()) {
+ CLS_ERR("mirroring peers still registered");
+ return -EBUSY;
+ }
+
+ r = remove_key(hctx, mirror::MODE);
+ if (r < 0) {
+ return r;
+ }
+
+ r = remove_key(hctx, mirror::UUID);
+ if (r < 0) {
+ return r;
+ }
+ }
+ return 0;
+}
+
+/**
+ * Input:
+ * none
+ *
+ * Output:
+ * @param std::vector<cls::rbd::MirrorPeer>: collection of peers
+ * @returns 0 on success, negative error code on failure
+ */
+int mirror_peer_list(cls_method_context_t hctx, bufferlist *in,
+ bufferlist *out) {
+ std::vector<cls::rbd::MirrorPeer> peers;
+ int r = mirror::read_peers(hctx, &peers);
+ if (r < 0 && r != -ENOENT) {
+ return r;
+ }
+
+ encode(peers, *out);
+ return 0;
+}
+
+/**
+ * Input:
+ * @param mirror_peer (cls::rbd::MirrorPeer)
+ *
+ * Output:
+ * @returns 0 on success, negative error code on failure
+ */
+int mirror_peer_add(cls_method_context_t hctx, bufferlist *in,
+ bufferlist *out) {
+ cls::rbd::MirrorPeer mirror_peer;
+ try {
+ auto it = in->cbegin();
+ decode(mirror_peer, it);
+ } catch (const buffer::error &err) {
+ return -EINVAL;
+ }
+
+ uint32_t mirror_mode_decode;
+ int r = read_key(hctx, mirror::MODE, &mirror_mode_decode);
+ if (r < 0 && r != -ENOENT) {
+ return r;
+ } else if (r == -ENOENT ||
+ mirror_mode_decode == cls::rbd::MIRROR_MODE_DISABLED) {
+ CLS_ERR("mirroring must be enabled on the pool");
+ return -EINVAL;
+ } else if (!mirror_peer.is_valid()) {
+ CLS_ERR("mirror peer is not valid");
+ return -EINVAL;
+ }
+
+ std::string mirror_uuid;
+ r = mirror::uuid_get(hctx, &mirror_uuid);
+ if (r < 0) {
+ CLS_ERR("error retrieving mirroring uuid: %s", cpp_strerror(r).c_str());
+ return r;
+ } else if (mirror_peer.uuid == mirror_uuid) {
+ CLS_ERR("peer uuid '%s' matches pool mirroring uuid",
+ mirror_uuid.c_str());
+ return -EINVAL;
+ }
+
+ std::vector<cls::rbd::MirrorPeer> peers;
+ r = mirror::read_peers(hctx, &peers);
+ if (r < 0 && r != -ENOENT) {
+ return r;
+ }
+
+ for (auto const &peer : peers) {
+ if (peer.uuid == mirror_peer.uuid) {
+ CLS_ERR("peer uuid '%s' already exists",
+ peer.uuid.c_str());
+ return -ESTALE;
+ } else if (peer.cluster_name == mirror_peer.cluster_name &&
+ (peer.pool_id == -1 || mirror_peer.pool_id == -1 ||
+ peer.pool_id == mirror_peer.pool_id)) {
+ CLS_ERR("peer cluster name '%s' already exists",
+ peer.cluster_name.c_str());
+ return -EEXIST;
+ }
+ }
+
+ bufferlist bl;
+ encode(mirror_peer, bl);
+ r = cls_cxx_map_set_val(hctx, mirror::peer_key(mirror_peer.uuid),
+ &bl);
+ if (r < 0) {
+ CLS_ERR("error adding peer: %s", cpp_strerror(r).c_str());
+ return r;
+ }
+ return 0;
+}
+
+/**
+ * Input:
+ * @param uuid (std::string)
+ *
+ * Output:
+ * @returns 0 on success, negative error code on failure
+ */
+int mirror_peer_remove(cls_method_context_t hctx, bufferlist *in,
+ bufferlist *out) {
+ std::string uuid;
+ try {
+ auto it = in->cbegin();
+ decode(uuid, it);
+ } catch (const buffer::error &err) {
+ return -EINVAL;
+ }
+
+ int r = cls_cxx_map_remove_key(hctx, mirror::peer_key(uuid));
+ if (r < 0 && r != -ENOENT) {
+ CLS_ERR("error removing peer: %s", cpp_strerror(r).c_str());
+ return r;
+ }
+ return 0;
+}
+
+/**
+ * Input:
+ * @param uuid (std::string)
+ * @param client_name (std::string)
+ *
+ * Output:
+ * @returns 0 on success, negative error code on failure
+ */
+int mirror_peer_set_client(cls_method_context_t hctx, bufferlist *in,
+ bufferlist *out) {
+ std::string uuid;
+ std::string client_name;
+ try {
+ auto it = in->cbegin();
+ decode(uuid, it);
+ decode(client_name, it);
+ } catch (const buffer::error &err) {
+ return -EINVAL;
+ }
+
+ cls::rbd::MirrorPeer peer;
+ int r = mirror::read_peer(hctx, uuid, &peer);
+ if (r < 0) {
+ return r;
+ }
+
+ peer.client_name = client_name;
+ r = mirror::write_peer(hctx, uuid, peer);
+ if (r < 0) {
+ return r;
+ }
+ return 0;
+}
+
+/**
+ * Input:
+ * @param uuid (std::string)
+ * @param cluster_name (std::string)
+ *
+ * Output:
+ * @returns 0 on success, negative error code on failure
+ */
+int mirror_peer_set_cluster(cls_method_context_t hctx, bufferlist *in,
+ bufferlist *out) {
+ std::string uuid;
+ std::string cluster_name;
+ try {
+ auto it = in->cbegin();
+ decode(uuid, it);
+ decode(cluster_name, it);
+ } catch (const buffer::error &err) {
+ return -EINVAL;
+ }
+
+ cls::rbd::MirrorPeer peer;
+ int r = mirror::read_peer(hctx, uuid, &peer);
+ if (r < 0) {
+ return r;
+ }
+
+ peer.cluster_name = cluster_name;
+ r = mirror::write_peer(hctx, uuid, peer);
+ if (r < 0) {
+ return r;
+ }
+ return 0;
+}
+
+/**
+ * Input:
+ * @param start_after which name to begin listing after
+ * (use the empty string to start at the beginning)
+ * @param max_return the maximum number of names to list
+ *
+ * Output:
+ * @param std::map<std::string, std::string>: local id to global id map
+ * @returns 0 on success, negative error code on failure
+ */
+int mirror_image_list(cls_method_context_t hctx, bufferlist *in,
+ bufferlist *out) {
+ std::string start_after;
+ uint64_t max_return;
+ try {
+ auto iter = in->cbegin();
+ decode(start_after, iter);
+ decode(max_return, iter);
+ } catch (const buffer::error &err) {
+ return -EINVAL;
+ }
+
+ int max_read = RBD_MAX_KEYS_READ;
+ bool more = true;
+ std::map<std::string, std::string> mirror_images;
+ std::string last_read = mirror::image_key(start_after);
+
+ while (more && mirror_images.size() < max_return) {
+ std::map<std::string, bufferlist> vals;
+ CLS_LOG(20, "last_read = '%s'", last_read.c_str());
+ int r = cls_cxx_map_get_vals(hctx, last_read, mirror::IMAGE_KEY_PREFIX,
+ max_read, &vals, &more);
+ if (r < 0) {
+ if (r != -ENOENT) {
+ CLS_ERR("error reading mirror image directory by name: %s",
+ cpp_strerror(r).c_str());
+ }
+ return r;
+ }
+
+ for (auto it = vals.begin(); it != vals.end(); ++it) {
+ const std::string &image_id =
+ it->first.substr(mirror::IMAGE_KEY_PREFIX.size());
+ cls::rbd::MirrorImage mirror_image;
+ auto iter = it->second.cbegin();
+ try {
+ decode(mirror_image, iter);
+ } catch (const buffer::error &err) {
+ CLS_ERR("could not decode mirror image payload of image '%s'",
+ image_id.c_str());
+ return -EIO;
+ }
+
+ mirror_images[image_id] = mirror_image.global_image_id;
+ if (mirror_images.size() >= max_return) {
+ break;
+ }
+ }
+ if (!vals.empty()) {
+ last_read = mirror::image_key(mirror_images.rbegin()->first);
+ }
+ }
+
+ encode(mirror_images, *out);
+ return 0;
+}
+
+/**
+ * Input:
+ * @param global_id (std::string)
+ *
+ * Output:
+ * @param std::string - image id
+ * @returns 0 on success, negative error code on failure
+ */
+int mirror_image_get_image_id(cls_method_context_t hctx, bufferlist *in,
+ bufferlist *out) {
+ std::string global_id;
+ try {
+ auto it = in->cbegin();
+ decode(global_id, it);
+ } catch (const buffer::error &err) {
+ return -EINVAL;
+ }
+
+ std::string image_id;
+ int r = read_key(hctx, mirror::global_key(global_id), &image_id);
+ if (r < 0) {
+ if (r != -ENOENT) {
+ CLS_ERR("error retrieving image id for global id '%s': %s",
+ global_id.c_str(), cpp_strerror(r).c_str());
+ }
+ return r;
+ }
+
+ encode(image_id, *out);
+ return 0;
+}
+
+/**
+ * Input:
+ * @param image_id (std::string)
+ *
+ * Output:
+ * @param cls::rbd::MirrorImage - metadata associated with the image_id
+ * @returns 0 on success, negative error code on failure
+ */
+int mirror_image_get(cls_method_context_t hctx, bufferlist *in,
+ bufferlist *out) {
+ string image_id;
+ try {
+ auto it = in->cbegin();
+ decode(image_id, it);
+ } catch (const buffer::error &err) {
+ return -EINVAL;
+ }
+
+ cls::rbd::MirrorImage mirror_image;
+ int r = mirror::image_get(hctx, image_id, &mirror_image);
+ if (r < 0) {
+ return r;
+ }
+
+ encode(mirror_image, *out);
+ return 0;
+}
+
+/**
+ * Input:
+ * @param image_id (std::string)
+ * @param mirror_image (cls::rbd::MirrorImage)
+ *
+ * Output:
+ * @returns 0 on success, negative error code on failure
+ * @returns -EEXIST if there's an existing image_id with a different global_image_id
+ */
+int mirror_image_set(cls_method_context_t hctx, bufferlist *in,
+ bufferlist *out) {
+ string image_id;
+ cls::rbd::MirrorImage mirror_image;
+ try {
+ auto it = in->cbegin();
+ decode(image_id, it);
+ decode(mirror_image, it);
+ } catch (const buffer::error &err) {
+ return -EINVAL;
+ }
+
+ int r = mirror::image_set(hctx, image_id, mirror_image);
+ if (r < 0) {
+ return r;
+ }
+ return 0;
+}
+
+/**
+ * Input:
+ * @param image_id (std::string)
+ *
+ * Output:
+ * @returns 0 on success, negative error code on failure
+ */
+int mirror_image_remove(cls_method_context_t hctx, bufferlist *in,
+ bufferlist *out) {
+ string image_id;
+ try {
+ auto it = in->cbegin();
+ decode(image_id, it);
+ } catch (const buffer::error &err) {
+ return -EINVAL;
+ }
+
+ int r = mirror::image_remove(hctx, image_id);
+ if (r < 0) {
+ return r;
+ }
+ return 0;
+}
+
+/**
+ * Input:
+ * @param global_image_id (std::string)
+ * @param status (cls::rbd::MirrorImageStatus)
+ *
+ * Output:
+ * @returns 0 on success, negative error code on failure
+ */
+int mirror_image_status_set(cls_method_context_t hctx, bufferlist *in,
+ bufferlist *out) {
+ string global_image_id;
+ cls::rbd::MirrorImageStatus status;
+ try {
+ auto it = in->cbegin();
+ decode(global_image_id, it);
+ decode(status, it);
+ } catch (const buffer::error &err) {
+ return -EINVAL;
+ }
+
+ int r = mirror::image_status_set(hctx, global_image_id, status);
+ if (r < 0) {
+ return r;
+ }
+ return 0;
+}
+
+/**
+ * Input:
+ * @param global_image_id (std::string)
+ *
+ * Output:
+ * @returns 0 on success, negative error code on failure
+ */
+int mirror_image_status_remove(cls_method_context_t hctx, bufferlist *in,
+ bufferlist *out) {
+ string global_image_id;
+ try {
+ auto it = in->cbegin();
+ decode(global_image_id, it);
+ } catch (const buffer::error &err) {
+ return -EINVAL;
+ }
+
+ int r = mirror::image_status_remove(hctx, global_image_id);
+ if (r < 0) {
+ return r;
+ }
+ return 0;
+}
+
+/**
+ * Input:
+ * @param global_image_id (std::string)
+ *
+ * Output:
+ * @param cls::rbd::MirrorImageStatus - metadata associated with the global_image_id
+ * @returns 0 on success, negative error code on failure
+ */
+int mirror_image_status_get(cls_method_context_t hctx, bufferlist *in,
+ bufferlist *out) {
+ string global_image_id;
+ try {
+ auto it = in->cbegin();
+ decode(global_image_id, it);
+ } catch (const buffer::error &err) {
+ return -EINVAL;
+ }
+
+ std::set<entity_inst_t> watchers;
+ int r = mirror::list_watchers(hctx, &watchers);
+ if (r < 0) {
+ return r;
+ }
+
+ cls::rbd::MirrorImageStatus status;
+ r = mirror::image_status_get(hctx, global_image_id, watchers, &status);
+ if (r < 0) {
+ return r;
+ }
+
+ encode(status, *out);
+ return 0;
+}
+
+/**
+ * Input:
+ * @param start_after which name to begin listing after
+ * (use the empty string to start at the beginning)
+ * @param max_return the maximum number of names to list
+ *
+ * Output:
+ * @param std::map<std::string, cls::rbd::MirrorImage>: image id to image map
+ * @param std::map<std::string, cls::rbd::MirrorImageStatus>: image it to status map
+ * @returns 0 on success, negative error code on failure
+ */
+int mirror_image_status_list(cls_method_context_t hctx, bufferlist *in,
+ bufferlist *out) {
+ std::string start_after;
+ uint64_t max_return;
+ try {
+ auto iter = in->cbegin();
+ decode(start_after, iter);
+ decode(max_return, iter);
+ } catch (const buffer::error &err) {
+ return -EINVAL;
+ }
+
+ map<std::string, cls::rbd::MirrorImage> images;
+ map<std::string, cls::rbd::MirrorImageStatus> statuses;
+ int r = mirror::image_status_list(hctx, start_after, max_return, &images,
+ &statuses);
+ if (r < 0) {
+ return r;
+ }
+
+ encode(images, *out);
+ encode(statuses, *out);
+ return 0;
+}
+
+/**
+ * Input:
+ * none
+ *
+ * Output:
+ * @param std::map<cls::rbd::MirrorImageStatusState, int>: states counts
+ * @returns 0 on success, negative error code on failure
+ */
+int mirror_image_status_get_summary(cls_method_context_t hctx, bufferlist *in,
+ bufferlist *out) {
+ std::map<cls::rbd::MirrorImageStatusState, int> states;
+
+ int r = mirror::image_status_get_summary(hctx, &states);
+ if (r < 0) {
+ return r;
+ }
+
+ encode(states, *out);
+ return 0;
+}
+
+/**
+ * Input:
+ * none
+ *
+ * Output:
+ * @returns 0 on success, negative error code on failure
+ */
+int mirror_image_status_remove_down(cls_method_context_t hctx, bufferlist *in,
+ bufferlist *out) {
+ int r = mirror::image_status_remove_down(hctx);
+ if (r < 0) {
+ return r;
+ }
+ return 0;
+}
+
+/**
+ * Input:
+ * @param global_image_id (std::string)
+ *
+ * Output:
+ * @param entity_inst_t - instance
+ * @returns 0 on success, negative error code on failure
+ */
+int mirror_image_instance_get(cls_method_context_t hctx, bufferlist *in,
+ bufferlist *out) {
+ string global_image_id;
+ try {
+ auto it = in->cbegin();
+ decode(global_image_id, it);
+ } catch (const buffer::error &err) {
+ return -EINVAL;
+ }
+
+ std::set<entity_inst_t> watchers;
+ int r = mirror::list_watchers(hctx, &watchers);
+ if (r < 0) {
+ return r;
+ }
+
+ entity_inst_t instance;
+ r = mirror::image_instance_get(hctx, global_image_id, watchers, &instance);
+ if (r < 0) {
+ return r;
+ }
+
+ encode(instance, *out, cls_get_features(hctx));
+ return 0;
+}
+
+/**
+ * Input:
+ * @param start_after which name to begin listing after
+ * (use the empty string to start at the beginning)
+ * @param max_return the maximum number of names to list
+ *
+ * Output:
+ * @param std::map<std::string, entity_inst_t>: image id to instance map
+ * @returns 0 on success, negative error code on failure
+ */
+int mirror_image_instance_list(cls_method_context_t hctx, bufferlist *in,
+ bufferlist *out) {
+ std::string start_after;
+ uint64_t max_return;
+ try {
+ auto iter = in->cbegin();
+ decode(start_after, iter);
+ decode(max_return, iter);
+ } catch (const buffer::error &err) {
+ return -EINVAL;
+ }
+
+ map<std::string, entity_inst_t> instances;
+ int r = mirror::image_instance_list(hctx, start_after, max_return,
+ &instances);
+ if (r < 0) {
+ return r;
+ }
+
+ encode(instances, *out, cls_get_features(hctx));
+ return 0;
+}
+
+/**
+ * Input:
+ * none
+ *
+ * Output:
+ * @param std::vector<std::string>: instance ids
+ * @returns 0 on success, negative error code on failure
+ */
+int mirror_instances_list(cls_method_context_t hctx, bufferlist *in,
+ bufferlist *out) {
+ std::vector<std::string> instance_ids;
+
+ int r = mirror::instances_list(hctx, &instance_ids);
+ if (r < 0) {
+ return r;
+ }
+
+ encode(instance_ids, *out);
+ return 0;
+}
+
+/**
+ * Input:
+ * @param instance_id (std::string)
+ *
+ * Output:
+ * @returns 0 on success, negative error code on failure
+ */
+int mirror_instances_add(cls_method_context_t hctx, bufferlist *in,
+ bufferlist *out) {
+ std::string instance_id;
+ try {
+ auto iter = in->cbegin();
+ decode(instance_id, iter);
+ } catch (const buffer::error &err) {
+ return -EINVAL;
+ }
+
+ int r = mirror::instances_add(hctx, instance_id);
+ if (r < 0) {
+ return r;
+ }
+ return 0;
+}
+
+/**
+ * Input:
+ * @param instance_id (std::string)
+ *
+ * Output:
+ * @returns 0 on success, negative error code on failure
+ */
+int mirror_instances_remove(cls_method_context_t hctx, bufferlist *in,
+ bufferlist *out) {
+ std::string instance_id;
+ try {
+ auto iter = in->cbegin();
+ decode(instance_id, iter);
+ } catch (const buffer::error &err) {
+ return -EINVAL;
+ }
+
+ int r = mirror::instances_remove(hctx, instance_id);
+ if (r < 0) {
+ return r;
+ }
+ return 0;
+}
+
+/**
+ * Input:
+ * @param start_after: key to start after
+ * @param max_return: max return items
+ *
+ * Output:
+ * @param std::map<std::string, cls::rbd::MirrorImageMap>: image mapping
+ * @returns 0 on success, negative error code on failure
+ */
+int mirror_image_map_list(cls_method_context_t hctx, bufferlist *in,
+ bufferlist *out) {
+ std::string start_after;
+ uint64_t max_return;
+ try {
+ auto it = in->cbegin();
+ decode(start_after, it);
+ decode(max_return, it);
+ } catch (const buffer::error &err) {
+ return -EINVAL;
+ }
+
+ std::map<std::string, cls::rbd::MirrorImageMap> image_mapping;
+ int r = mirror::mirror_image_map_list(hctx, start_after, max_return, &image_mapping);
+ if (r < 0) {
+ return r;
+ }
+
+ encode(image_mapping, *out);
+ return 0;
+}
+
+/**
+ * Input:
+ * @param global_image_id: global image id
+ * @param image_map: image map
+ *
+ * Output:
+ * @returns 0 on success, negative error code on failure
+ */
+int mirror_image_map_update(cls_method_context_t hctx, bufferlist *in,
+ bufferlist *out) {
+ std::string global_image_id;
+ cls::rbd::MirrorImageMap image_map;
+
+ try {
+ auto it = in->cbegin();
+ decode(global_image_id, it);
+ decode(image_map, it);
+ } catch (const buffer::error &err) {
+ return -EINVAL;
+ }
+
+ bufferlist bl;
+ encode(image_map, bl);
+
+ const std::string key = mirror::mirror_image_map_key(global_image_id);
+ int r = cls_cxx_map_set_val(hctx, key, &bl);
+ if (r < 0) {
+ CLS_ERR("error updating image map %s: %s", key.c_str(),
+ cpp_strerror(r).c_str());
+ return r;
+ }
+
+ return 0;
+}
+
+/**
+ * Input:
+ * @param global_image_id: global image id
+ *
+ * Output:
+ * @returns 0 on success, negative error code on failure
+ */
+int mirror_image_map_remove(cls_method_context_t hctx, bufferlist *in,
+ bufferlist *out) {
+ std::string global_image_id;
+
+ try {
+ auto it = in->cbegin();
+ decode(global_image_id, it);
+ } catch (const buffer::error &err) {
+ return -EINVAL;
+ }
+
+ const std::string key = mirror::mirror_image_map_key(global_image_id);
+ int r = cls_cxx_map_remove_key(hctx, key);
+ if (r < 0 && r != -ENOENT) {
+ CLS_ERR("error removing image map %s: %s", key.c_str(),
+ cpp_strerror(r).c_str());
+ return r;
+ }
+
+ return 0;
+}
+
+namespace group {
+
+/********************** methods for rbd_group_directory ***********************/
+
+int dir_add(cls_method_context_t hctx,
+ const string &name, const string &id,
+ bool check_for_unique_id)
+{
+ if (!name.size() || !is_valid_id(id)) {
+ CLS_ERR("invalid group name '%s' or id '%s'",
+ name.c_str(), id.c_str());
+ return -EINVAL;
+ }
+
+ CLS_LOG(20, "dir_add name=%s id=%s", name.c_str(), id.c_str());
+
+ string name_key = dir_key_for_name(name);
+ string id_key = dir_key_for_id(id);
+ string tmp;
+ int r = read_key(hctx, name_key, &tmp);
+ if (r != -ENOENT) {
+ CLS_LOG(10, "name already exists");
+ return -EEXIST;
+ }
+ r = read_key(hctx, id_key, &tmp);
+ if (r != -ENOENT && check_for_unique_id) {
+ CLS_LOG(10, "id already exists");
+ return -EBADF;
+ }
+ bufferlist id_bl, name_bl;
+ encode(id, id_bl);
+ encode(name, name_bl);
+ map<string, bufferlist> omap_vals;
+ omap_vals[name_key] = id_bl;
+ omap_vals[id_key] = name_bl;
+ return cls_cxx_map_set_vals(hctx, &omap_vals);
+}
+
+int dir_remove(cls_method_context_t hctx,
+ const string &name, const string &id)
+{
+ CLS_LOG(20, "dir_remove name=%s id=%s", name.c_str(), id.c_str());
+
+ string name_key = dir_key_for_name(name);
+ string id_key = dir_key_for_id(id);
+ string stored_name, stored_id;
+
+ int r = read_key(hctx, name_key, &stored_id);
+ if (r < 0) {
+ if (r != -ENOENT)
+ CLS_ERR("error reading name to id mapping: %s", cpp_strerror(r).c_str());
+ return r;
+ }
+ r = read_key(hctx, id_key, &stored_name);
+ if (r < 0) {
+ if (r != -ENOENT)
+ CLS_ERR("error reading id to name mapping: %s", cpp_strerror(r).c_str());
+ return r;
+ }
+
+ // check if this op raced with a rename
+ if (stored_name != name || stored_id != id) {
+ CLS_ERR("stored name '%s' and id '%s' do not match args '%s' and '%s'",
+ stored_name.c_str(), stored_id.c_str(), name.c_str(), id.c_str());
+ return -ESTALE;
+ }
+
+ r = cls_cxx_map_remove_key(hctx, name_key);
+ if (r < 0) {
+ CLS_ERR("error removing name: %s", cpp_strerror(r).c_str());
+ return r;
+ }
+
+ r = cls_cxx_map_remove_key(hctx, id_key);
+ if (r < 0) {
+ CLS_ERR("error removing id: %s", cpp_strerror(r).c_str());
+ return r;
+ }
+
+ return 0;
+}
+
+static const string RBD_GROUP_SNAP_KEY_PREFIX = "snapshot_";
+
+std::string snap_key(const std::string &snap_id) {
+ ostringstream oss;
+ oss << RBD_GROUP_SNAP_KEY_PREFIX << snap_id;
+ return oss.str();
+}
+
+int snap_list(cls_method_context_t hctx, cls::rbd::GroupSnapshot start_after,
+ uint64_t max_return,
+ std::vector<cls::rbd::GroupSnapshot> *group_snaps)
+{
+ int max_read = RBD_MAX_KEYS_READ;
+ std::map<string, bufferlist> vals;
+ string last_read = snap_key(start_after.id);
+
+ group_snaps->clear();
+
+ bool more;
+ do {
+ int r = cls_cxx_map_get_vals(hctx, last_read,
+ RBD_GROUP_SNAP_KEY_PREFIX,
+ max_read, &vals, &more);
+ if (r < 0)
+ return r;
+
+ for (map<string, bufferlist>::iterator it = vals.begin();
+ it != vals.end() && group_snaps->size() < max_return; ++it) {
+
+ auto iter = it->second.cbegin();
+ cls::rbd::GroupSnapshot snap;
+ try {
+ decode(snap, iter);
+ } catch (const buffer::error &err) {
+ CLS_ERR("error decoding snapshot: %s", it->first.c_str());
+ return -EIO;
+ }
+ CLS_LOG(20, "Discovered snapshot %s %s",
+ snap.name.c_str(),
+ snap.id.c_str());
+ group_snaps->push_back(snap);
+ }
+
+ } while (more && (group_snaps->size() < max_return));
+
+ return 0;
+}
+
+static int check_duplicate_snap_name(cls_method_context_t hctx,
+ const std::string &snap_name,
+ const std::string &snap_id)
+{
+ const int max_read = 1024;
+ cls::rbd::GroupSnapshot snap_last;
+ std::vector<cls::rbd::GroupSnapshot> page;
+
+ for (;;) {
+ int r = snap_list(hctx, snap_last, max_read, &page);
+ if (r < 0) {
+ return r;
+ }
+ for (auto& snap: page) {
+ if (snap.name == snap_name && snap.id != snap_id) {
+ return -EEXIST;
+ }
+ }
+
+ if (page.size() < max_read) {
+ break;
+ }
+
+ snap_last = *page.rbegin();
+ }
+
+ return 0;
+}
+
+} // namespace group
+
+/**
+ * List groups from the directory.
+ *
+ * Input:
+ * @param start_after (std::string)
+ * @param max_return (int64_t)
+ *
+ * Output:
+ * @param map of groups (name, id)
+ * @return 0 on success, negative error code on failure
+ */
+int group_dir_list(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ string start_after;
+ uint64_t max_return;
+
+ try {
+ auto iter = in->cbegin();
+ decode(start_after, iter);
+ decode(max_return, iter);
+ } catch (const buffer::error &err) {
+ return -EINVAL;
+ }
+
+ int max_read = RBD_MAX_KEYS_READ;
+ bool more = true;
+ map<string, string> groups;
+ string last_read = dir_key_for_name(start_after);
+
+ while (more && groups.size() < max_return) {
+ map<string, bufferlist> vals;
+ CLS_LOG(20, "last_read = '%s'", last_read.c_str());
+ int r = cls_cxx_map_get_vals(hctx, last_read, RBD_DIR_NAME_KEY_PREFIX,
+ max_read, &vals, &more);
+ if (r < 0) {
+ if (r != -ENOENT) {
+ CLS_ERR("error reading directory by name: %s", cpp_strerror(r).c_str());
+ }
+ return r;
+ }
+
+ for (pair<string, bufferlist> val: vals) {
+ string id;
+ auto iter = val.second.cbegin();
+ try {
+ decode(id, iter);
+ } catch (const buffer::error &err) {
+ CLS_ERR("could not decode id of group '%s'", val.first.c_str());
+ return -EIO;
+ }
+ CLS_LOG(20, "adding '%s' -> '%s'", dir_name_from_key(val.first).c_str(), id.c_str());
+ groups[dir_name_from_key(val.first)] = id;
+ if (groups.size() >= max_return)
+ break;
+ }
+ if (!vals.empty()) {
+ last_read = dir_key_for_name(groups.rbegin()->first);
+ }
+ }
+
+ encode(groups, *out);
+
+ return 0;
+}
+
+/**
+ * Add a group to the directory.
+ *
+ * Input:
+ * @param name (std::string)
+ * @param id (std::string)
+ *
+ * Output:
+ * @return 0 on success, negative error code on failure
+ */
+int group_dir_add(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ int r = cls_cxx_create(hctx, false);
+
+ if (r < 0) {
+ CLS_ERR("could not create group directory: %s",
+ cpp_strerror(r).c_str());
+ return r;
+ }
+
+ string name, id;
+ try {
+ auto iter = in->cbegin();
+ decode(name, iter);
+ decode(id, iter);
+ } catch (const buffer::error &err) {
+ return -EINVAL;
+ }
+
+ return group::dir_add(hctx, name, id, true);
+}
+
+/**
+ * Rename a group to the directory.
+ *
+ * Input:
+ * @param src original name of the group (std::string)
+ * @param dest new name of the group (std::string)
+ * @param id the id of the group (std::string)
+ *
+ * Output:
+ * @return 0 on success, negative error code on failure
+ */
+int group_dir_rename(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ string src, dest, id;
+ try {
+ auto iter = in->cbegin();
+ decode(src, iter);
+ decode(dest, iter);
+ decode(id, iter);
+ } catch (const buffer::error &err) {
+ return -EINVAL;
+ }
+
+ int r = group::dir_remove(hctx, src, id);
+ if (r < 0)
+ return r;
+
+ return group::dir_add(hctx, dest, id, false);
+}
+
+/**
+ * Remove a group from the directory.
+ *
+ * Input:
+ * @param name (std::string)
+ * @param id (std::string)
+ *
+ * Output:
+ * @return 0 on success, negative error code on failure
+ */
+int group_dir_remove(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ string name, id;
+ try {
+ auto iter = in->cbegin();
+ decode(name, iter);
+ decode(id, iter);
+ } catch (const buffer::error &err) {
+ return -EINVAL;
+ }
+
+ return group::dir_remove(hctx, name, id);
+}
+
+/**
+ * Set state of an image in the group.
+ *
+ * Input:
+ * @param image_status (cls::rbd::GroupImageStatus)
+ *
+ * Output:
+ * @return 0 on success, negative error code on failure
+ */
+int group_image_set(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ CLS_LOG(20, "group_image_set");
+
+ cls::rbd::GroupImageStatus st;
+ try {
+ auto iter = in->cbegin();
+ decode(st, iter);
+ } catch (const buffer::error &err) {
+ return -EINVAL;
+ }
+
+ string image_key = st.spec.image_key();
+
+ bufferlist image_val_bl;
+ encode(st.state, image_val_bl);
+ int r = cls_cxx_map_set_val(hctx, image_key, &image_val_bl);
+ if (r < 0) {
+ return r;
+ }
+
+ return 0;
+}
+
+/**
+ * Remove reference to an image from the group.
+ *
+ * Input:
+ * @param spec (cls::rbd::GroupImageSpec)
+ *
+ * Output:
+ * @return 0 on success, negative error code on failure
+ */
+int group_image_remove(cls_method_context_t hctx,
+ bufferlist *in, bufferlist *out)
+{
+ CLS_LOG(20, "group_image_remove");
+ cls::rbd::GroupImageSpec spec;
+ try {
+ auto iter = in->cbegin();
+ decode(spec, iter);
+ } catch (const buffer::error &err) {
+ return -EINVAL;
+ }
+
+ string image_key = spec.image_key();
+
+ int r = cls_cxx_map_remove_key(hctx, image_key);
+ if (r < 0) {
+ CLS_ERR("error removing image from group: %s", cpp_strerror(r).c_str());
+ return r;
+ }
+
+ return 0;
+}
+
+/*
+ * List images in the group.
+ *
+ * Input:
+ * @param start_after which name to begin listing after
+ * (use the empty string to start at the beginning)
+ * @param max_return the maximum number of names to list
+ *
+ * Output:
+ * @param tuples of descriptions of the images: image_id, pool_id, image reference state.
+ * @return 0 on success, negative error code on failure
+ */
+int group_image_list(cls_method_context_t hctx,
+ bufferlist *in, bufferlist *out)
+{
+ CLS_LOG(20, "group_image_list");
+ cls::rbd::GroupImageSpec start_after;
+ uint64_t max_return;
+ try {
+ auto iter = in->cbegin();
+ decode(start_after, iter);
+ decode(max_return, iter);
+ } catch (const buffer::error &err) {
+ return -EINVAL;
+ }
+
+ int max_read = RBD_MAX_KEYS_READ;
+ std::map<string, bufferlist> vals;
+ string last_read = start_after.image_key();
+ std::vector<cls::rbd::GroupImageStatus> res;
+ bool more;
+ do {
+ int r = cls_cxx_map_get_vals(hctx, last_read,
+ cls::rbd::RBD_GROUP_IMAGE_KEY_PREFIX,
+ max_read, &vals, &more);
+ if (r < 0)
+ return r;
+
+ for (map<string, bufferlist>::iterator it = vals.begin();
+ it != vals.end() && res.size() < max_return; ++it) {
+
+ auto iter = it->second.cbegin();
+ cls::rbd::GroupImageLinkState state;
+ try {
+ decode(state, iter);
+ } catch (const buffer::error &err) {
+ CLS_ERR("error decoding state for image: %s", it->first.c_str());
+ return -EIO;
+ }
+ cls::rbd::GroupImageSpec spec;
+ int r = cls::rbd::GroupImageSpec::from_key(it->first, &spec);
+ if (r < 0)
+ return r;
+
+ CLS_LOG(20, "Discovered image %s %" PRId64 " %d", spec.image_id.c_str(),
+ spec.pool_id,
+ (int)state);
+ res.push_back(cls::rbd::GroupImageStatus(spec, state));
+ }
+ if (res.size() > 0) {
+ last_read = res.rbegin()->spec.image_key();
+ }
+
+ } while (more && (res.size() < max_return));
+ encode(res, *out);
+
+ return 0;
+}
+
+/**
+ * Reference the group this image belongs to.
+ *
+ * Input:
+ * @param group_id (std::string)
+ * @param pool_id (int64_t)
+ *
+ * Output:
+ * @return 0 on success, negative error code on failure
+ */
+int image_group_add(cls_method_context_t hctx,
+ bufferlist *in, bufferlist *out)
+{
+ CLS_LOG(20, "image_group_add");
+ cls::rbd::GroupSpec new_group;
+ try {
+ auto iter = in->cbegin();
+ decode(new_group, iter);
+ } catch (const buffer::error &err) {
+ return -EINVAL;
+ }
+
+ bufferlist existing_refbl;
+
+ int r = cls_cxx_map_get_val(hctx, RBD_GROUP_REF, &existing_refbl);
+ if (r == 0) {
+ // If we are trying to link this image to the same group then return
+ // success. If this image already belongs to another group then abort.
+ cls::rbd::GroupSpec old_group;
+ try {
+ auto iter = existing_refbl.cbegin();
+ decode(old_group, iter);
+ } catch (const buffer::error &err) {
+ return -EINVAL;
+ }
+
+ if ((old_group.group_id != new_group.group_id) ||
+ (old_group.pool_id != new_group.pool_id)) {
+ return -EEXIST;
+ } else {
+ return 0; // In this case the values are already correct
+ }
+ } else if (r < 0 && r != -ENOENT) {
+ // No entry means this image is not a member of any group.
+ return r;
+ }
+
+ r = image::set_op_features(hctx, RBD_OPERATION_FEATURE_GROUP,
+ RBD_OPERATION_FEATURE_GROUP);
+ if (r < 0) {
+ return r;
+ }
+
+ bufferlist refbl;
+ encode(new_group, refbl);
+ r = cls_cxx_map_set_val(hctx, RBD_GROUP_REF, &refbl);
+ if (r < 0) {
+ return r;
+ }
+
+ return 0;
+}
+
+/**
+ * Remove image's pointer to the group.
+ *
+ * Input:
+ * @param cg_id (std::string)
+ * @param pool_id (int64_t)
+ *
+ * Output:
+ * @return 0 on success, negative error code on failure
+ */
+int image_group_remove(cls_method_context_t hctx,
+ bufferlist *in,
+ bufferlist *out)
+{
+ CLS_LOG(20, "image_group_remove");
+ cls::rbd::GroupSpec spec;
+ try {
+ auto iter = in->cbegin();
+ decode(spec, iter);
+ } catch (const buffer::error &err) {
+ return -EINVAL;
+ }
+
+ bufferlist refbl;
+ int r = cls_cxx_map_get_val(hctx, RBD_GROUP_REF, &refbl);
+ if (r < 0) {
+ return r;
+ }
+
+ cls::rbd::GroupSpec ref_spec;
+ auto iter = refbl.cbegin();
+ try {
+ decode(ref_spec, iter);
+ } catch (const buffer::error &err) {
+ return -EINVAL;
+ }
+
+ if (ref_spec.pool_id != spec.pool_id || ref_spec.group_id != spec.group_id) {
+ return -EBADF;
+ }
+
+ r = cls_cxx_map_remove_key(hctx, RBD_GROUP_REF);
+ if (r < 0) {
+ return r;
+ }
+
+ r = image::set_op_features(hctx, 0, RBD_OPERATION_FEATURE_GROUP);
+ if (r < 0) {
+ return r;
+ }
+
+ return 0;
+}
+
+/**
+ * Retrieve the id and pool of the group this image belongs to.
+ *
+ * Input:
+ * none
+ *
+ * Output:
+ * @param GroupSpec
+ * @return 0 on success, negative error code on failure
+ */
+int image_group_get(cls_method_context_t hctx,
+ bufferlist *in, bufferlist *out)
+{
+ CLS_LOG(20, "image_group_get");
+ bufferlist refbl;
+ int r = cls_cxx_map_get_val(hctx, RBD_GROUP_REF, &refbl);
+ if (r < 0 && r != -ENOENT) {
+ return r;
+ }
+
+ cls::rbd::GroupSpec spec;
+
+ if (r != -ENOENT) {
+ auto iter = refbl.cbegin();
+ try {
+ decode(spec, iter);
+ } catch (const buffer::error &err) {
+ return -EINVAL;
+ }
+ }
+
+ encode(spec, *out);
+ return 0;
+}
+
+/**
+ * Save initial snapshot record.
+ *
+ * Input:
+ * @param GroupSnapshot
+ *
+ * Output:
+ * @return 0 on success, negative error code on failure
+ */
+int group_snap_set(cls_method_context_t hctx,
+ bufferlist *in, bufferlist *out)
+{
+ CLS_LOG(20, "group_snap_set");
+ cls::rbd::GroupSnapshot group_snap;
+ try {
+ auto iter = in->cbegin();
+ decode(group_snap, iter);
+ } catch (const buffer::error &err) {
+ return -EINVAL;
+ }
+
+ if (group_snap.name.empty()) {
+ CLS_ERR("group snapshot name is empty");
+ return -EINVAL;
+ }
+ if (group_snap.id.empty()) {
+ CLS_ERR("group snapshot id is empty");
+ return -EINVAL;
+ }
+
+ int r = group::check_duplicate_snap_name(hctx, group_snap.name,
+ group_snap.id);
+ if (r < 0) {
+ return r;
+ }
+
+ std::string key = group::snap_key(group_snap.id);
+ if (group_snap.state == cls::rbd::GROUP_SNAPSHOT_STATE_INCOMPLETE) {
+ bufferlist snap_bl;
+ r = cls_cxx_map_get_val(hctx, key, &snap_bl);
+ if (r < 0 && r != -ENOENT) {
+ return r;
+ } else if (r >= 0) {
+ return -EEXIST;
+ }
+ }
+
+ bufferlist obl;
+ encode(group_snap, obl);
+ r = cls_cxx_map_set_val(hctx, key, &obl);
+ return r;
+}
+
+/**
+ * Remove snapshot record.
+ *
+ * Input:
+ * @param id Snapshot id
+ *
+ * Output:
+ * @return 0 on success, negative error code on failure
+ */
+int group_snap_remove(cls_method_context_t hctx,
+ bufferlist *in, bufferlist *out)
+{
+ CLS_LOG(20, "group_snap_remove");
+ std::string snap_id;
+ try {
+ auto iter = in->cbegin();
+ decode(snap_id, iter);
+ } catch (const buffer::error &err) {
+ return -EINVAL;
+ }
+
+ std::string snap_key = group::snap_key(snap_id);
+
+ CLS_LOG(20, "removing snapshot with key %s", snap_key.c_str());
+ int r = cls_cxx_map_remove_key(hctx, snap_key);
+ return r;
+}
+
+/**
+ * Get group's snapshot by id.
+ *
+ * Input:
+ * @param snapshot_id the id of the snapshot to look for.
+ *
+ * Output:
+ * @param GroupSnapshot the requested snapshot
+ * @return 0 on success, negative error code on failure
+ */
+int group_snap_get_by_id(cls_method_context_t hctx,
+ bufferlist *in, bufferlist *out)
+{
+ CLS_LOG(20, "group_snap_get_by_id");
+
+ std::string snap_id;
+ try {
+ auto iter = in->cbegin();
+ decode(snap_id, iter);
+ } catch (const buffer::error &err) {
+ return -EINVAL;
+ }
+
+ bufferlist snapbl;
+
+ int r = cls_cxx_map_get_val(hctx, group::snap_key(snap_id), &snapbl);
+ if (r < 0) {
+ return r;
+ }
+
+ cls::rbd::GroupSnapshot group_snap;
+ auto iter = snapbl.cbegin();
+ try {
+ decode(group_snap, iter);
+ } catch (const buffer::error &err) {
+ CLS_ERR("error decoding snapshot: %s", snap_id.c_str());
+ return -EIO;
+ }
+
+ encode(group_snap, *out);
+
+ return 0;
+}
+
+/**
+ * List group's snapshots.
+ *
+ * Input:
+ * @param start_after which name to begin listing after
+ * (use the empty string to start at the beginning)
+ * @param max_return the maximum number of snapshots to list
+ *
+ * Output:
+ * @param list of snapshots
+ * @return 0 on success, negative error code on failure
+ */
+int group_snap_list(cls_method_context_t hctx,
+ bufferlist *in, bufferlist *out)
+{
+ CLS_LOG(20, "group_snap_list");
+
+ cls::rbd::GroupSnapshot start_after;
+ uint64_t max_return;
+ try {
+ auto iter = in->cbegin();
+ decode(start_after, iter);
+ decode(max_return, iter);
+ } catch (const buffer::error &err) {
+ return -EINVAL;
+ }
+ std::vector<cls::rbd::GroupSnapshot> group_snaps;
+ group::snap_list(hctx, start_after, max_return, &group_snaps);
+
+ encode(group_snaps, *out);
+
+ return 0;
+}
+
+namespace trash {
+
+static const std::string IMAGE_KEY_PREFIX("id_");
+
+std::string image_key(const std::string &image_id) {
+ return IMAGE_KEY_PREFIX + image_id;
+}
+
+std::string image_id_from_key(const std::string &key) {
+ return key.substr(IMAGE_KEY_PREFIX.size());
+}
+
+} // namespace trash
+
+/**
+ * Add an image entry to the rbd trash. Creates the trash object if
+ * needed, and stores the trash spec information of the deleted image.
+ *
+ * Input:
+ * @param id the id of the image
+ * @param trash_spec the spec info of the deleted image
+ *
+ * Output:
+ * @returns -EEXIST if the image id is already in the trash
+ * @returns 0 on success, negative error code on failure
+ */
+int trash_add(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ int r = cls_cxx_create(hctx, false);
+ if (r < 0) {
+ CLS_ERR("could not create trash: %s", cpp_strerror(r).c_str());
+ return r;
+ }
+
+ string id;
+ cls::rbd::TrashImageSpec trash_spec;
+ try {
+ auto iter = in->cbegin();
+ decode(id, iter);
+ decode(trash_spec, iter);
+ } catch (const buffer::error &err) {
+ return -EINVAL;
+ }
+
+ if (!is_valid_id(id)) {
+ CLS_ERR("trash_add: invalid id '%s'", id.c_str());
+ return -EINVAL;
+ }
+
+ CLS_LOG(20, "trash_add id=%s", id.c_str());
+
+ string key = trash::image_key(id);
+ cls::rbd::TrashImageSpec tmp;
+ r = read_key(hctx, key, &tmp);
+ if (r < 0 && r != -ENOENT) {
+ CLS_ERR("could not read key %s entry from trash: %s", key.c_str(),
+ cpp_strerror(r).c_str());
+ return r;
+ } else if (r == 0) {
+ CLS_LOG(10, "id already exists");
+ return -EEXIST;
+ }
+
+ map<string, bufferlist> omap_vals;
+ encode(trash_spec, omap_vals[key]);
+ return cls_cxx_map_set_vals(hctx, &omap_vals);
+}
+
+/**
+ * Removes an image entry from the rbd trash object.
+ * image.
+ *
+ * Input:
+ * @param id the id of the image
+ *
+ * Output:
+ * @returns -ENOENT if the image id does not exist in the trash
+ * @returns 0 on success, negative error code on failure
+ */
+int trash_remove(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ string id;
+ try {
+ auto iter = in->cbegin();
+ decode(id, iter);
+ } catch (const buffer::error &err) {
+ return -EINVAL;
+ }
+
+ CLS_LOG(20, "trash_remove id=%s", id.c_str());
+
+ string key = trash::image_key(id);
+ bufferlist tmp;
+ int r = cls_cxx_map_get_val(hctx, key, &tmp);
+ if (r < 0) {
+ if (r != -ENOENT) {
+ CLS_ERR("error reading entry key %s: %s", key.c_str(), cpp_strerror(r).c_str());
+ }
+ return r;
+ }
+
+ r = cls_cxx_map_remove_key(hctx, key);
+ if (r < 0) {
+ CLS_ERR("error removing entry: %s", cpp_strerror(r).c_str());
+ return r;
+ }
+
+ return 0;
+}
+
+/**
+ * Returns the list of trash spec entries registered in the rbd_trash
+ * object.
+ *
+ * Input:
+ * @param start_after which name to begin listing after
+ * (use the empty string to start at the beginning)
+ * @param max_return the maximum number of names to list
+ *
+ * Output:
+ * @param data the map between image id and trash spec info
+ *
+ * @returns 0 on success, negative error code on failure
+ */
+int trash_list(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ string start_after;
+ uint64_t max_return;
+
+ try {
+ auto iter = in->cbegin();
+ decode(start_after, iter);
+ decode(max_return, iter);
+ } catch (const buffer::error &err) {
+ return -EINVAL;
+ }
+
+ map<string, cls::rbd::TrashImageSpec> data;
+ string last_read = trash::image_key(start_after);
+ bool more = true;
+
+ CLS_LOG(20, "trash_get_images");
+ while (data.size() < max_return) {
+ map<string, bufferlist> raw_data;
+ int max_read = std::min<int32_t>(RBD_MAX_KEYS_READ,
+ max_return - data.size());
+ int r = cls_cxx_map_get_vals(hctx, last_read, trash::IMAGE_KEY_PREFIX,
+ max_read, &raw_data, &more);
+ if (r < 0) {
+ if (r != -ENOENT) {
+ CLS_ERR("failed to read the vals off of disk: %s",
+ cpp_strerror(r).c_str());
+ }
+ return r;
+ }
+ if (raw_data.empty()) {
+ break;
+ }
+
+ map<string, bufferlist>::iterator it = raw_data.begin();
+ for (; it != raw_data.end(); ++it) {
+ decode(data[trash::image_id_from_key(it->first)], it->second);
+ }
+
+ if (!more) {
+ break;
+ }
+
+ last_read = raw_data.rbegin()->first;
+ }
+
+ encode(data, *out);
+ return 0;
+}
+
+/**
+ * Returns the trash spec entry of an image registered in the rbd_trash
+ * object.
+ *
+ * Input:
+ * @param id the id of the image
+ *
+ * Output:
+ * @param out the trash spec entry
+ *
+ * @returns 0 on success, negative error code on failure
+ */
+int trash_get(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ string id;
+ try {
+ auto iter = in->cbegin();
+ decode(id, iter);
+ } catch (const buffer::error &err) {
+ return -EINVAL;
+ }
+
+ CLS_LOG(20, "trash_get_image id=%s", id.c_str());
+
+
+ string key = trash::image_key(id);
+ bufferlist bl;
+ int r = cls_cxx_map_get_val(hctx, key, out);
+ if (r < 0 && r != -ENOENT) {
+ CLS_ERR("error reading image from trash '%s': '%s'", id.c_str(),
+ cpp_strerror(r).c_str());
+ }
+ return r;
+}
+
+/**
+ * Set state of an image in the rbd_trash object.
+ *
+ * Input:
+ * @param id the id of the image
+ * @param trash_state the state of the image to be set
+ * @param expect_state the expected state of the image
+ *
+ * Output:
+ * @returns 0 on success, negative error code on failure
+ */
+int trash_state_set(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ string id;
+ cls::rbd::TrashImageState trash_state;
+ cls::rbd::TrashImageState expect_state;
+ try {
+ bufferlist::const_iterator iter = in->begin();
+ decode(id, iter);
+ decode(trash_state, iter);
+ decode(expect_state, iter);
+ } catch (const buffer::error &err) {
+ return -EINVAL;
+ }
+
+ CLS_LOG(20, "trash_state_set id=%s", id.c_str());
+
+ string key = trash::image_key(id);
+ cls::rbd::TrashImageSpec trash_spec;
+ int r = read_key(hctx, key, &trash_spec);
+ if (r < 0) {
+ if (r != -ENOENT) {
+ CLS_ERR("Could not read trash image spec off disk: %s",
+ cpp_strerror(r).c_str());
+ }
+ return r;
+ }
+
+ if (trash_spec.state == expect_state) {
+ trash_spec.state = trash_state;
+ r = write_key(hctx, key, trash_spec);
+ if (r < 0) {
+ CLS_ERR("error setting trash image state: %s", cpp_strerror(r).c_str());
+ return r;
+ }
+
+ return 0;
+ } else if (trash_spec.state == trash_state) {
+ return 0;
+ } else {
+ CLS_ERR("Current trash state: %d do not match expected: %d or set: %d",
+ trash_spec.state, expect_state, trash_state);
+ return -ESTALE;
+ }
+}
+
+namespace nspace {
+
+const std::string NAME_KEY_PREFIX("name_");
+
+std::string key_for_name(const std::string& name) {
+ return NAME_KEY_PREFIX + name;
+}
+
+std::string name_from_key(const std::string &key) {
+ return key.substr(NAME_KEY_PREFIX.size());
+}
+
+} // namespace nspace
+
+/**
+ * Add a namespace to the namespace directory.
+ *
+ * Input:
+ * @param name the name of the namespace
+ *
+ * Output:
+ * @returns -EEXIST if the namespace is already exists
+ * @returns 0 on success, negative error code on failure
+ */
+int namespace_add(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ std::string name;
+ try {
+ auto iter = in->cbegin();
+ decode(name, iter);
+ } catch (const buffer::error &err) {
+ return -EINVAL;
+ }
+
+ std::string key(nspace::key_for_name(name));
+ bufferlist value;
+ int r = cls_cxx_map_get_val(hctx, key, &value);
+ if (r < 0 && r != -ENOENT) {
+ return r;
+ } else if (r == 0) {
+ return -EEXIST;
+ }
+
+ r = cls_cxx_map_set_val(hctx, key, &value);
+ if (r < 0) {
+ CLS_ERR("failed to set omap key: %s", key.c_str());
+ return r;
+ }
+
+ return 0;
+}
+
+/**
+ * Remove a namespace from the namespace directory.
+ *
+ * Input:
+ * @param name the name of the namespace
+ *
+ * Output:
+ * @returns 0 on success, negative error code on failure
+ */
+int namespace_remove(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ std::string name;
+ try {
+ auto iter = in->cbegin();
+ decode(name, iter);
+ } catch (const buffer::error &err) {
+ return -EINVAL;
+ }
+
+ std::string key(nspace::key_for_name(name));
+ bufferlist bl;
+ int r = cls_cxx_map_get_val(hctx, key, &bl);
+ if (r < 0) {
+ return r;
+ }
+
+ r = cls_cxx_map_remove_key(hctx, key);
+ if (r < 0) {
+ return r;
+ }
+
+ return 0;
+}
+
+/**
+ * Returns the list of namespaces in the rbd_namespace object
+ *
+ * Input:
+ * @param start_after which name to begin listing after
+ * (use the empty string to start at the beginning)
+ * @param max_return the maximum number of names to list
+ *
+ * Output:
+ * @param data list of namespace names
+ * @returns 0 on success, negative error code on failure
+ */
+int namespace_list(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ string start_after;
+ uint64_t max_return;
+ try {
+ auto iter = in->cbegin();
+ decode(start_after, iter);
+ decode(max_return, iter);
+ } catch (const buffer::error &err) {
+ return -EINVAL;
+ }
+
+ std::list<std::string> data;
+ std::string last_read = nspace::key_for_name(start_after);
+ bool more = true;
+
+ CLS_LOG(20, "namespace_list");
+ while (data.size() < max_return) {
+ std::map<std::string, bufferlist> raw_data;
+ int max_read = std::min<int32_t>(RBD_MAX_KEYS_READ,
+ max_return - data.size());
+ int r = cls_cxx_map_get_vals(hctx, last_read, nspace::NAME_KEY_PREFIX,
+ max_read, &raw_data, &more);
+ if (r < 0) {
+ if (r != -ENOENT) {
+ CLS_ERR("failed to read the vals off of disk: %s",
+ cpp_strerror(r).c_str());
+ }
+ return r;
+ }
+
+ for (auto& it : raw_data) {
+ data.push_back(nspace::name_from_key(it.first));
+ }
+
+ if (raw_data.empty() || !more) {
+ break;
+ }
+
+ last_read = raw_data.rbegin()->first;
+ }
+
+ encode(data, *out);
+ return 0;
+}
+
+/**
+ * Reclaim space for zeroed extents
+ *
+ * Input:
+ * @param sparse_size minimal zeroed block to sparse
+ * @param remove_empty boolean, true if the object should be removed if empty
+ *
+ * Output:
+ * @returns -ENOENT if the object does not exist or has been removed
+ * @returns 0 on success, negative error code on failure
+ */
+int sparsify(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ size_t sparse_size;
+ bool remove_empty;
+ try {
+ auto iter = in->cbegin();
+ decode(sparse_size, iter);
+ decode(remove_empty, iter);
+ } catch (const buffer::error &err) {
+ return -EINVAL;
+ }
+
+ int r = check_exists(hctx);
+ if (r < 0) {
+ return r;
+ }
+
+ bufferlist bl;
+ r = cls_cxx_read(hctx, 0, 0, &bl);
+ if (r < 0) {
+ CLS_ERR("failed to read data off of disk: %s", cpp_strerror(r).c_str());
+ return r;
+ }
+
+ if (bl.is_zero()) {
+ if (remove_empty) {
+ CLS_LOG(20, "remove");
+ r = cls_cxx_remove(hctx);
+ if (r < 0) {
+ CLS_ERR("remove failed: %s", cpp_strerror(r).c_str());
+ return r;
+ }
+ } else if (bl.length() > 0) {
+ CLS_LOG(20, "truncate");
+ bufferlist write_bl;
+ r = cls_cxx_replace(hctx, 0, 0, &write_bl);
+ if (r < 0) {
+ CLS_ERR("truncate failed: %s", cpp_strerror(r).c_str());
+ return r;
+ }
+ } else {
+ CLS_LOG(20, "skip empty");
+ }
+ return 0;
+ }
+
+ bl.rebuild(buffer::ptr_node::create(bl.length()));
+ size_t write_offset = 0;
+ size_t write_length = 0;
+ size_t offset = 0;
+ size_t length = bl.length();
+ const auto& ptr = bl.front();
+ bool replace = true;
+ while (offset < length) {
+ if (calc_sparse_extent(ptr, sparse_size, length, &write_offset,
+ &write_length, &offset)) {
+ if (write_offset == 0 && write_length == length) {
+ CLS_LOG(20, "nothing to do");
+ return 0;
+ }
+ CLS_LOG(20, "write%s %" PRIu64 "~%" PRIu64, (replace ? "(replace)" : ""),
+ write_offset, write_length);
+ bufferlist write_bl;
+ write_bl.push_back(buffer::ptr_node::create(ptr, write_offset,
+ write_length));
+ if (replace) {
+ r = cls_cxx_replace(hctx, write_offset, write_length, &write_bl);
+ replace = false;
+ } else {
+ r = cls_cxx_write(hctx, write_offset, write_length, &write_bl);
+ }
+ if (r < 0) {
+ CLS_ERR("write failed: %s", cpp_strerror(r).c_str());
+ return r;
+ }
+ write_offset = offset;
+ write_length = 0;
+ }
+ }
+
+ return 0;
+}
+
+CLS_INIT(rbd)
+{
+ CLS_LOG(20, "Loaded rbd class!");
+
+ cls_handle_t h_class;
+ cls_method_handle_t h_create;
+ cls_method_handle_t h_get_features;
+ cls_method_handle_t h_set_features;
+ cls_method_handle_t h_get_size;
+ cls_method_handle_t h_set_size;
+ cls_method_handle_t h_get_parent;
+ cls_method_handle_t h_set_parent;
+ cls_method_handle_t h_remove_parent;
+ cls_method_handle_t h_parent_get;
+ cls_method_handle_t h_parent_overlap_get;
+ cls_method_handle_t h_parent_attach;
+ cls_method_handle_t h_parent_detach;
+ cls_method_handle_t h_get_protection_status;
+ cls_method_handle_t h_set_protection_status;
+ cls_method_handle_t h_get_stripe_unit_count;
+ cls_method_handle_t h_set_stripe_unit_count;
+ cls_method_handle_t h_get_create_timestamp;
+ cls_method_handle_t h_get_access_timestamp;
+ cls_method_handle_t h_get_modify_timestamp;
+ cls_method_handle_t h_get_flags;
+ cls_method_handle_t h_set_flags;
+ cls_method_handle_t h_op_features_get;
+ cls_method_handle_t h_op_features_set;
+ cls_method_handle_t h_add_child;
+ cls_method_handle_t h_remove_child;
+ cls_method_handle_t h_get_children;
+ cls_method_handle_t h_get_snapcontext;
+ cls_method_handle_t h_get_object_prefix;
+ cls_method_handle_t h_get_data_pool;
+ cls_method_handle_t h_get_snapshot_name;
+ cls_method_handle_t h_get_snapshot_timestamp;
+ cls_method_handle_t h_snapshot_get;
+ cls_method_handle_t h_snapshot_add;
+ cls_method_handle_t h_snapshot_remove;
+ cls_method_handle_t h_snapshot_rename;
+ cls_method_handle_t h_snapshot_trash_add;
+ cls_method_handle_t h_get_all_features;
+ cls_method_handle_t h_get_id;
+ cls_method_handle_t h_set_id;
+ cls_method_handle_t h_set_modify_timestamp;
+ cls_method_handle_t h_set_access_timestamp;
+ cls_method_handle_t h_dir_get_id;
+ cls_method_handle_t h_dir_get_name;
+ cls_method_handle_t h_dir_list;
+ cls_method_handle_t h_dir_add_image;
+ cls_method_handle_t h_dir_remove_image;
+ cls_method_handle_t h_dir_rename_image;
+ cls_method_handle_t h_dir_state_assert;
+ cls_method_handle_t h_dir_state_set;
+ cls_method_handle_t h_object_map_load;
+ cls_method_handle_t h_object_map_save;
+ cls_method_handle_t h_object_map_resize;
+ cls_method_handle_t h_object_map_update;
+ cls_method_handle_t h_object_map_snap_add;
+ cls_method_handle_t h_object_map_snap_remove;
+ cls_method_handle_t h_metadata_set;
+ cls_method_handle_t h_metadata_remove;
+ cls_method_handle_t h_metadata_list;
+ cls_method_handle_t h_metadata_get;
+ cls_method_handle_t h_snapshot_get_limit;
+ cls_method_handle_t h_snapshot_set_limit;
+ cls_method_handle_t h_child_attach;
+ cls_method_handle_t h_child_detach;
+ cls_method_handle_t h_children_list;
+ cls_method_handle_t h_migration_set;
+ cls_method_handle_t h_migration_set_state;
+ cls_method_handle_t h_migration_get;
+ cls_method_handle_t h_migration_remove;
+ cls_method_handle_t h_old_snapshots_list;
+ cls_method_handle_t h_old_snapshot_add;
+ cls_method_handle_t h_old_snapshot_remove;
+ cls_method_handle_t h_old_snapshot_rename;
+ cls_method_handle_t h_mirror_uuid_get;
+ cls_method_handle_t h_mirror_uuid_set;
+ cls_method_handle_t h_mirror_mode_get;
+ cls_method_handle_t h_mirror_mode_set;
+ cls_method_handle_t h_mirror_peer_list;
+ cls_method_handle_t h_mirror_peer_add;
+ cls_method_handle_t h_mirror_peer_remove;
+ cls_method_handle_t h_mirror_peer_set_client;
+ cls_method_handle_t h_mirror_peer_set_cluster;
+ cls_method_handle_t h_mirror_image_list;
+ cls_method_handle_t h_mirror_image_get_image_id;
+ cls_method_handle_t h_mirror_image_get;
+ cls_method_handle_t h_mirror_image_set;
+ cls_method_handle_t h_mirror_image_remove;
+ cls_method_handle_t h_mirror_image_status_set;
+ cls_method_handle_t h_mirror_image_status_remove;
+ cls_method_handle_t h_mirror_image_status_get;
+ cls_method_handle_t h_mirror_image_status_list;
+ cls_method_handle_t h_mirror_image_status_get_summary;
+ cls_method_handle_t h_mirror_image_status_remove_down;
+ cls_method_handle_t h_mirror_image_instance_get;
+ cls_method_handle_t h_mirror_image_instance_list;
+ cls_method_handle_t h_mirror_instances_list;
+ cls_method_handle_t h_mirror_instances_add;
+ cls_method_handle_t h_mirror_instances_remove;
+ cls_method_handle_t h_mirror_image_map_list;
+ cls_method_handle_t h_mirror_image_map_update;
+ cls_method_handle_t h_mirror_image_map_remove;
+ cls_method_handle_t h_group_dir_list;
+ cls_method_handle_t h_group_dir_add;
+ cls_method_handle_t h_group_dir_remove;
+ cls_method_handle_t h_group_dir_rename;
+ cls_method_handle_t h_group_image_remove;
+ cls_method_handle_t h_group_image_list;
+ cls_method_handle_t h_group_image_set;
+ cls_method_handle_t h_image_group_add;
+ cls_method_handle_t h_image_group_remove;
+ cls_method_handle_t h_image_group_get;
+ cls_method_handle_t h_group_snap_set;
+ cls_method_handle_t h_group_snap_remove;
+ cls_method_handle_t h_group_snap_get_by_id;
+ cls_method_handle_t h_group_snap_list;
+ cls_method_handle_t h_trash_add;
+ cls_method_handle_t h_trash_remove;
+ cls_method_handle_t h_trash_list;
+ cls_method_handle_t h_trash_get;
+ cls_method_handle_t h_trash_state_set;
+ cls_method_handle_t h_namespace_add;
+ cls_method_handle_t h_namespace_remove;
+ cls_method_handle_t h_namespace_list;
+ cls_method_handle_t h_copyup;
+ cls_method_handle_t h_assert_snapc_seq;
+ cls_method_handle_t h_sparsify;
+
+ cls_register("rbd", &h_class);
+ cls_register_cxx_method(h_class, "create",
+ CLS_METHOD_RD | CLS_METHOD_WR,
+ create, &h_create);
+ cls_register_cxx_method(h_class, "get_features",
+ CLS_METHOD_RD,
+ get_features, &h_get_features);
+ cls_register_cxx_method(h_class, "set_features",
+ CLS_METHOD_RD | CLS_METHOD_WR,
+ set_features, &h_set_features);
+ cls_register_cxx_method(h_class, "get_size",
+ CLS_METHOD_RD,
+ get_size, &h_get_size);
+ cls_register_cxx_method(h_class, "set_size",
+ CLS_METHOD_RD | CLS_METHOD_WR,
+ set_size, &h_set_size);
+ cls_register_cxx_method(h_class, "get_snapcontext",
+ CLS_METHOD_RD,
+ get_snapcontext, &h_get_snapcontext);
+ cls_register_cxx_method(h_class, "get_object_prefix",
+ CLS_METHOD_RD,
+ get_object_prefix, &h_get_object_prefix);
+ cls_register_cxx_method(h_class, "get_data_pool", CLS_METHOD_RD,
+ get_data_pool, &h_get_data_pool);
+ cls_register_cxx_method(h_class, "get_snapshot_name",
+ CLS_METHOD_RD,
+ get_snapshot_name, &h_get_snapshot_name);
+ cls_register_cxx_method(h_class, "get_snapshot_timestamp",
+ CLS_METHOD_RD,
+ get_snapshot_timestamp, &h_get_snapshot_timestamp);
+ cls_register_cxx_method(h_class, "snapshot_get",
+ CLS_METHOD_RD,
+ snapshot_get, &h_snapshot_get);
+ cls_register_cxx_method(h_class, "snapshot_add",
+ CLS_METHOD_RD | CLS_METHOD_WR,
+ snapshot_add, &h_snapshot_add);
+ cls_register_cxx_method(h_class, "snapshot_remove",
+ CLS_METHOD_RD | CLS_METHOD_WR,
+ snapshot_remove, &h_snapshot_remove);
+ cls_register_cxx_method(h_class, "snapshot_rename",
+ CLS_METHOD_RD | CLS_METHOD_WR,
+ snapshot_rename, &h_snapshot_rename);
+ cls_register_cxx_method(h_class, "snapshot_trash_add",
+ CLS_METHOD_RD | CLS_METHOD_WR,
+ snapshot_trash_add, &h_snapshot_trash_add);
+ cls_register_cxx_method(h_class, "get_all_features",
+ CLS_METHOD_RD,
+ get_all_features, &h_get_all_features);
+
+ // NOTE: deprecate v1 parent APIs after mimic EOLed
+ cls_register_cxx_method(h_class, "get_parent",
+ CLS_METHOD_RD,
+ get_parent, &h_get_parent);
+ cls_register_cxx_method(h_class, "set_parent",
+ CLS_METHOD_RD | CLS_METHOD_WR,
+ set_parent, &h_set_parent);
+ cls_register_cxx_method(h_class, "remove_parent",
+ CLS_METHOD_RD | CLS_METHOD_WR,
+ remove_parent, &h_remove_parent);
+
+ cls_register_cxx_method(h_class, "parent_get",
+ CLS_METHOD_RD, parent_get, &h_parent_get);
+ cls_register_cxx_method(h_class, "parent_overlap_get",
+ CLS_METHOD_RD, parent_overlap_get,
+ &h_parent_overlap_get);
+ cls_register_cxx_method(h_class, "parent_attach",
+ CLS_METHOD_RD | CLS_METHOD_WR,
+ parent_attach, &h_parent_attach);
+ cls_register_cxx_method(h_class, "parent_detach",
+ CLS_METHOD_RD | CLS_METHOD_WR,
+ parent_detach, &h_parent_detach);
+
+ cls_register_cxx_method(h_class, "set_protection_status",
+ CLS_METHOD_RD | CLS_METHOD_WR,
+ set_protection_status, &h_set_protection_status);
+ cls_register_cxx_method(h_class, "get_protection_status",
+ CLS_METHOD_RD,
+ get_protection_status, &h_get_protection_status);
+ cls_register_cxx_method(h_class, "get_stripe_unit_count",
+ CLS_METHOD_RD,
+ get_stripe_unit_count, &h_get_stripe_unit_count);
+ cls_register_cxx_method(h_class, "set_stripe_unit_count",
+ CLS_METHOD_RD | CLS_METHOD_WR,
+ set_stripe_unit_count, &h_set_stripe_unit_count);
+ cls_register_cxx_method(h_class, "get_create_timestamp",
+ CLS_METHOD_RD,
+ get_create_timestamp, &h_get_create_timestamp);
+ cls_register_cxx_method(h_class, "get_access_timestamp",
+ CLS_METHOD_RD,
+ get_access_timestamp, &h_get_access_timestamp);
+ cls_register_cxx_method(h_class, "get_modify_timestamp",
+ CLS_METHOD_RD,
+ get_modify_timestamp, &h_get_modify_timestamp);
+ cls_register_cxx_method(h_class, "get_flags",
+ CLS_METHOD_RD,
+ get_flags, &h_get_flags);
+ cls_register_cxx_method(h_class, "set_flags",
+ CLS_METHOD_RD | CLS_METHOD_WR,
+ set_flags, &h_set_flags);
+ cls_register_cxx_method(h_class, "op_features_get", CLS_METHOD_RD,
+ op_features_get, &h_op_features_get);
+ cls_register_cxx_method(h_class, "op_features_set",
+ CLS_METHOD_RD | CLS_METHOD_WR,
+ op_features_set, &h_op_features_set);
+ cls_register_cxx_method(h_class, "metadata_list",
+ CLS_METHOD_RD,
+ metadata_list, &h_metadata_list);
+ cls_register_cxx_method(h_class, "metadata_set",
+ CLS_METHOD_RD | CLS_METHOD_WR,
+ metadata_set, &h_metadata_set);
+ cls_register_cxx_method(h_class, "metadata_remove",
+ CLS_METHOD_RD | CLS_METHOD_WR,
+ metadata_remove, &h_metadata_remove);
+ cls_register_cxx_method(h_class, "metadata_get",
+ CLS_METHOD_RD,
+ metadata_get, &h_metadata_get);
+ cls_register_cxx_method(h_class, "snapshot_get_limit",
+ CLS_METHOD_RD,
+ snapshot_get_limit, &h_snapshot_get_limit);
+ cls_register_cxx_method(h_class, "snapshot_set_limit",
+ CLS_METHOD_RD | CLS_METHOD_WR,
+ snapshot_set_limit, &h_snapshot_set_limit);
+ cls_register_cxx_method(h_class, "child_attach",
+ CLS_METHOD_RD | CLS_METHOD_WR,
+ child_attach, &h_child_attach);
+ cls_register_cxx_method(h_class, "child_detach",
+ CLS_METHOD_RD | CLS_METHOD_WR,
+ child_detach, &h_child_detach);
+ cls_register_cxx_method(h_class, "children_list",
+ CLS_METHOD_RD,
+ children_list, &h_children_list);
+ cls_register_cxx_method(h_class, "migration_set",
+ CLS_METHOD_RD | CLS_METHOD_WR,
+ migration_set, &h_migration_set);
+ cls_register_cxx_method(h_class, "migration_set_state",
+ CLS_METHOD_RD | CLS_METHOD_WR,
+ migration_set_state, &h_migration_set_state);
+ cls_register_cxx_method(h_class, "migration_get",
+ CLS_METHOD_RD,
+ migration_get, &h_migration_get);
+ cls_register_cxx_method(h_class, "migration_remove",
+ CLS_METHOD_RD | CLS_METHOD_WR,
+ migration_remove, &h_migration_remove);
+
+ cls_register_cxx_method(h_class, "set_modify_timestamp",
+ CLS_METHOD_RD | CLS_METHOD_WR,
+ set_modify_timestamp, &h_set_modify_timestamp);
+
+ cls_register_cxx_method(h_class, "set_access_timestamp",
+ CLS_METHOD_RD | CLS_METHOD_WR,
+ set_access_timestamp, &h_set_access_timestamp);
+
+ /* methods for the rbd_children object */
+ cls_register_cxx_method(h_class, "add_child",
+ CLS_METHOD_RD | CLS_METHOD_WR,
+ add_child, &h_add_child);
+ cls_register_cxx_method(h_class, "remove_child",
+ CLS_METHOD_RD | CLS_METHOD_WR,
+ remove_child, &h_remove_child);
+ cls_register_cxx_method(h_class, "get_children",
+ CLS_METHOD_RD,
+ get_children, &h_get_children);
+
+ /* methods for the rbd_id.$image_name objects */
+ cls_register_cxx_method(h_class, "get_id",
+ CLS_METHOD_RD,
+ get_id, &h_get_id);
+ cls_register_cxx_method(h_class, "set_id",
+ CLS_METHOD_RD | CLS_METHOD_WR,
+ set_id, &h_set_id);
+
+ /* methods for the rbd_directory object */
+ cls_register_cxx_method(h_class, "dir_get_id",
+ CLS_METHOD_RD,
+ dir_get_id, &h_dir_get_id);
+ cls_register_cxx_method(h_class, "dir_get_name",
+ CLS_METHOD_RD,
+ dir_get_name, &h_dir_get_name);
+ cls_register_cxx_method(h_class, "dir_list",
+ CLS_METHOD_RD,
+ dir_list, &h_dir_list);
+ cls_register_cxx_method(h_class, "dir_add_image",
+ CLS_METHOD_RD | CLS_METHOD_WR,
+ dir_add_image, &h_dir_add_image);
+ cls_register_cxx_method(h_class, "dir_remove_image",
+ CLS_METHOD_RD | CLS_METHOD_WR,
+ dir_remove_image, &h_dir_remove_image);
+ cls_register_cxx_method(h_class, "dir_rename_image",
+ CLS_METHOD_RD | CLS_METHOD_WR,
+ dir_rename_image, &h_dir_rename_image);
+ cls_register_cxx_method(h_class, "dir_state_assert", CLS_METHOD_RD,
+ dir_state_assert, &h_dir_state_assert);
+ cls_register_cxx_method(h_class, "dir_state_set",
+ CLS_METHOD_RD | CLS_METHOD_WR,
+ dir_state_set, &h_dir_state_set);
+
+ /* methods for the rbd_object_map.$image_id object */
+ cls_register_cxx_method(h_class, "object_map_load",
+ CLS_METHOD_RD,
+ object_map_load, &h_object_map_load);
+ cls_register_cxx_method(h_class, "object_map_save",
+ CLS_METHOD_RD | CLS_METHOD_WR,
+ object_map_save, &h_object_map_save);
+ cls_register_cxx_method(h_class, "object_map_resize",
+ CLS_METHOD_RD | CLS_METHOD_WR,
+ object_map_resize, &h_object_map_resize);
+ cls_register_cxx_method(h_class, "object_map_update",
+ CLS_METHOD_RD | CLS_METHOD_WR,
+ object_map_update, &h_object_map_update);
+ cls_register_cxx_method(h_class, "object_map_snap_add",
+ CLS_METHOD_RD | CLS_METHOD_WR,
+ object_map_snap_add, &h_object_map_snap_add);
+ cls_register_cxx_method(h_class, "object_map_snap_remove",
+ CLS_METHOD_RD | CLS_METHOD_WR,
+ object_map_snap_remove, &h_object_map_snap_remove);
+
+ /* methods for the old format */
+ cls_register_cxx_method(h_class, "snap_list",
+ CLS_METHOD_RD,
+ old_snapshots_list, &h_old_snapshots_list);
+ cls_register_cxx_method(h_class, "snap_add",
+ CLS_METHOD_RD | CLS_METHOD_WR,
+ old_snapshot_add, &h_old_snapshot_add);
+ cls_register_cxx_method(h_class, "snap_remove",
+ CLS_METHOD_RD | CLS_METHOD_WR,
+ old_snapshot_remove, &h_old_snapshot_remove);
+ cls_register_cxx_method(h_class, "snap_rename",
+ CLS_METHOD_RD | CLS_METHOD_WR,
+ old_snapshot_rename, &h_old_snapshot_rename);
+
+ /* methods for the rbd_mirroring object */
+ cls_register_cxx_method(h_class, "mirror_uuid_get", CLS_METHOD_RD,
+ mirror_uuid_get, &h_mirror_uuid_get);
+ cls_register_cxx_method(h_class, "mirror_uuid_set",
+ CLS_METHOD_RD | CLS_METHOD_WR,
+ mirror_uuid_set, &h_mirror_uuid_set);
+ cls_register_cxx_method(h_class, "mirror_mode_get", CLS_METHOD_RD,
+ mirror_mode_get, &h_mirror_mode_get);
+ cls_register_cxx_method(h_class, "mirror_mode_set",
+ CLS_METHOD_RD | CLS_METHOD_WR,
+ mirror_mode_set, &h_mirror_mode_set);
+ cls_register_cxx_method(h_class, "mirror_peer_list", CLS_METHOD_RD,
+ mirror_peer_list, &h_mirror_peer_list);
+ cls_register_cxx_method(h_class, "mirror_peer_add",
+ CLS_METHOD_RD | CLS_METHOD_WR,
+ mirror_peer_add, &h_mirror_peer_add);
+ cls_register_cxx_method(h_class, "mirror_peer_remove",
+ CLS_METHOD_RD | CLS_METHOD_WR,
+ mirror_peer_remove, &h_mirror_peer_remove);
+ cls_register_cxx_method(h_class, "mirror_peer_set_client",
+ CLS_METHOD_RD | CLS_METHOD_WR,
+ mirror_peer_set_client, &h_mirror_peer_set_client);
+ cls_register_cxx_method(h_class, "mirror_peer_set_cluster",
+ CLS_METHOD_RD | CLS_METHOD_WR,
+ mirror_peer_set_cluster, &h_mirror_peer_set_cluster);
+ cls_register_cxx_method(h_class, "mirror_image_list", CLS_METHOD_RD,
+ mirror_image_list, &h_mirror_image_list);
+ cls_register_cxx_method(h_class, "mirror_image_get_image_id", CLS_METHOD_RD,
+ mirror_image_get_image_id,
+ &h_mirror_image_get_image_id);
+ cls_register_cxx_method(h_class, "mirror_image_get", CLS_METHOD_RD,
+ mirror_image_get, &h_mirror_image_get);
+ cls_register_cxx_method(h_class, "mirror_image_set",
+ CLS_METHOD_RD | CLS_METHOD_WR,
+ mirror_image_set, &h_mirror_image_set);
+ cls_register_cxx_method(h_class, "mirror_image_remove",
+ CLS_METHOD_RD | CLS_METHOD_WR,
+ mirror_image_remove, &h_mirror_image_remove);
+ cls_register_cxx_method(h_class, "mirror_image_status_set",
+ CLS_METHOD_RD | CLS_METHOD_WR | CLS_METHOD_PROMOTE,
+ mirror_image_status_set, &h_mirror_image_status_set);
+ cls_register_cxx_method(h_class, "mirror_image_status_remove",
+ CLS_METHOD_RD | CLS_METHOD_WR,
+ mirror_image_status_remove,
+ &h_mirror_image_status_remove);
+ cls_register_cxx_method(h_class, "mirror_image_status_get", CLS_METHOD_RD,
+ mirror_image_status_get, &h_mirror_image_status_get);
+ cls_register_cxx_method(h_class, "mirror_image_status_list", CLS_METHOD_RD,
+ mirror_image_status_list,
+ &h_mirror_image_status_list);
+ cls_register_cxx_method(h_class, "mirror_image_status_get_summary",
+ CLS_METHOD_RD, mirror_image_status_get_summary,
+ &h_mirror_image_status_get_summary);
+ cls_register_cxx_method(h_class, "mirror_image_status_remove_down",
+ CLS_METHOD_RD | CLS_METHOD_WR,
+ mirror_image_status_remove_down,
+ &h_mirror_image_status_remove_down);
+ cls_register_cxx_method(h_class, "mirror_image_instance_get", CLS_METHOD_RD,
+ mirror_image_instance_get,
+ &h_mirror_image_instance_get);
+ cls_register_cxx_method(h_class, "mirror_image_instance_list", CLS_METHOD_RD,
+ mirror_image_instance_list,
+ &h_mirror_image_instance_list);
+ cls_register_cxx_method(h_class, "mirror_instances_list", CLS_METHOD_RD,
+ mirror_instances_list, &h_mirror_instances_list);
+ cls_register_cxx_method(h_class, "mirror_instances_add",
+ CLS_METHOD_RD | CLS_METHOD_WR | CLS_METHOD_PROMOTE,
+ mirror_instances_add, &h_mirror_instances_add);
+ cls_register_cxx_method(h_class, "mirror_instances_remove",
+ CLS_METHOD_RD | CLS_METHOD_WR,
+ mirror_instances_remove,
+ &h_mirror_instances_remove);
+ cls_register_cxx_method(h_class, "mirror_image_map_list",
+ CLS_METHOD_RD, mirror_image_map_list,
+ &h_mirror_image_map_list);
+ cls_register_cxx_method(h_class, "mirror_image_map_update",
+ CLS_METHOD_WR, mirror_image_map_update,
+ &h_mirror_image_map_update);
+ cls_register_cxx_method(h_class, "mirror_image_map_remove",
+ CLS_METHOD_WR, mirror_image_map_remove,
+ &h_mirror_image_map_remove);
+
+ /* methods for the groups feature */
+ cls_register_cxx_method(h_class, "group_dir_list",
+ CLS_METHOD_RD,
+ group_dir_list, &h_group_dir_list);
+ cls_register_cxx_method(h_class, "group_dir_add",
+ CLS_METHOD_RD | CLS_METHOD_WR,
+ group_dir_add, &h_group_dir_add);
+ cls_register_cxx_method(h_class, "group_dir_remove",
+ CLS_METHOD_RD | CLS_METHOD_WR,
+ group_dir_remove, &h_group_dir_remove);
+ cls_register_cxx_method(h_class, "group_dir_rename",
+ CLS_METHOD_RD | CLS_METHOD_WR,
+ group_dir_rename, &h_group_dir_rename);
+ cls_register_cxx_method(h_class, "group_image_remove",
+ CLS_METHOD_RD | CLS_METHOD_WR,
+ group_image_remove, &h_group_image_remove);
+ cls_register_cxx_method(h_class, "group_image_list",
+ CLS_METHOD_RD,
+ group_image_list, &h_group_image_list);
+ cls_register_cxx_method(h_class, "group_image_set",
+ CLS_METHOD_RD | CLS_METHOD_WR,
+ group_image_set, &h_group_image_set);
+ cls_register_cxx_method(h_class, "image_group_add",
+ CLS_METHOD_RD | CLS_METHOD_WR,
+ image_group_add, &h_image_group_add);
+ cls_register_cxx_method(h_class, "image_group_remove",
+ CLS_METHOD_RD | CLS_METHOD_WR,
+ image_group_remove, &h_image_group_remove);
+ cls_register_cxx_method(h_class, "image_group_get",
+ CLS_METHOD_RD,
+ image_group_get, &h_image_group_get);
+ cls_register_cxx_method(h_class, "group_snap_set",
+ CLS_METHOD_RD | CLS_METHOD_WR,
+ group_snap_set, &h_group_snap_set);
+ cls_register_cxx_method(h_class, "group_snap_remove",
+ CLS_METHOD_RD | CLS_METHOD_WR,
+ group_snap_remove, &h_group_snap_remove);
+ cls_register_cxx_method(h_class, "group_snap_get_by_id",
+ CLS_METHOD_RD,
+ group_snap_get_by_id, &h_group_snap_get_by_id);
+ cls_register_cxx_method(h_class, "group_snap_list",
+ CLS_METHOD_RD,
+ group_snap_list, &h_group_snap_list);
+
+ /* rbd_trash object methods */
+ cls_register_cxx_method(h_class, "trash_add",
+ CLS_METHOD_RD | CLS_METHOD_WR,
+ trash_add, &h_trash_add);
+ cls_register_cxx_method(h_class, "trash_remove",
+ CLS_METHOD_RD | CLS_METHOD_WR,
+ trash_remove, &h_trash_remove);
+ cls_register_cxx_method(h_class, "trash_list",
+ CLS_METHOD_RD,
+ trash_list, &h_trash_list);
+ cls_register_cxx_method(h_class, "trash_get",
+ CLS_METHOD_RD,
+ trash_get, &h_trash_get);
+ cls_register_cxx_method(h_class, "trash_state_set",
+ CLS_METHOD_RD | CLS_METHOD_WR,
+ trash_state_set, &h_trash_state_set);
+
+ /* rbd_namespace object methods */
+ cls_register_cxx_method(h_class, "namespace_add",
+ CLS_METHOD_RD | CLS_METHOD_WR,
+ namespace_add, &h_namespace_add);
+ cls_register_cxx_method(h_class, "namespace_remove",
+ CLS_METHOD_RD | CLS_METHOD_WR,
+ namespace_remove, &h_namespace_remove);
+ cls_register_cxx_method(h_class, "namespace_list", CLS_METHOD_RD,
+ namespace_list, &h_namespace_list);
+
+ /* data object methods */
+ cls_register_cxx_method(h_class, "copyup",
+ CLS_METHOD_RD | CLS_METHOD_WR,
+ copyup, &h_copyup);
+ cls_register_cxx_method(h_class, "assert_snapc_seq",
+ CLS_METHOD_RD | CLS_METHOD_WR,
+ assert_snapc_seq,
+ &h_assert_snapc_seq);
+ cls_register_cxx_method(h_class, "sparsify",
+ CLS_METHOD_RD | CLS_METHOD_WR,
+ sparsify, &h_sparsify);
+}
diff --git a/src/cls/rbd/cls_rbd.h b/src/cls/rbd/cls_rbd.h
new file mode 100644
index 00000000..0dc0a9b9
--- /dev/null
+++ b/src/cls/rbd/cls_rbd.h
@@ -0,0 +1,243 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#ifndef __CEPH_CLS_RBD_H
+#define __CEPH_CLS_RBD_H
+
+#include "include/types.h"
+#include "include/buffer_fwd.h"
+#include "include/rbd_types.h"
+#include "common/Formatter.h"
+#include "cls/rbd/cls_rbd_types.h"
+
+/// information about our parent image, if any
+struct cls_rbd_parent {
+ int64_t pool_id = -1;
+ std::string pool_namespace;
+ std::string image_id;
+ snapid_t snap_id = CEPH_NOSNAP;
+ std::optional<uint64_t> head_overlap = std::nullopt;
+
+ cls_rbd_parent() {
+ }
+ cls_rbd_parent(const cls::rbd::ParentImageSpec& parent_image_spec,
+ const std::optional<uint64_t>& head_overlap)
+ : pool_id(parent_image_spec.pool_id),
+ pool_namespace(parent_image_spec.pool_namespace),
+ image_id(parent_image_spec.image_id), snap_id(parent_image_spec.snap_id),
+ head_overlap(head_overlap) {
+ }
+
+ inline bool exists() const {
+ return (pool_id >= 0 && !image_id.empty() && snap_id != CEPH_NOSNAP);
+ }
+
+ inline bool operator==(const cls_rbd_parent& rhs) const {
+ return (pool_id == rhs.pool_id &&
+ pool_namespace == rhs.pool_namespace &&
+ image_id == rhs.image_id &&
+ snap_id == rhs.snap_id);
+ }
+ inline bool operator!=(const cls_rbd_parent& rhs) const {
+ return !(*this == rhs);
+ }
+
+ void encode(bufferlist& bl, uint64_t features) const {
+ // NOTE: remove support for version 1 after Nautilus EOLed
+ uint8_t version = 1;
+ if ((features & CEPH_FEATURE_SERVER_NAUTILUS) != 0ULL) {
+ // break backwards compatability when using nautilus or later OSDs
+ version = 2;
+ }
+
+ ENCODE_START(version, version, bl);
+ encode(pool_id, bl);
+ if (version >= 2) {
+ encode(pool_namespace, bl);
+ }
+ encode(image_id, bl);
+ encode(snap_id, bl);
+ if (version == 1) {
+ encode(head_overlap.value_or(0ULL), bl);
+ } else {
+ encode(head_overlap, bl);
+ }
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(bufferlist::const_iterator& bl) {
+ DECODE_START(2, bl);
+ decode(pool_id, bl);
+ if (struct_v >= 2) {
+ decode(pool_namespace, bl);
+ }
+ decode(image_id, bl);
+ decode(snap_id, bl);
+ if (struct_v == 1) {
+ uint64_t overlap;
+ decode(overlap, bl);
+ head_overlap = overlap;
+ } else {
+ decode(head_overlap, bl);
+ }
+ DECODE_FINISH(bl);
+ }
+
+ void dump(Formatter *f) const {
+ f->dump_int("pool_id", pool_id);
+ f->dump_string("pool_namespace", pool_namespace);
+ f->dump_string("image_id", image_id);
+ f->dump_unsigned("snap_id", snap_id);
+ if (head_overlap) {
+ f->dump_unsigned("head_overlap", *head_overlap);
+ }
+ }
+
+ static void generate_test_instances(list<cls_rbd_parent*>& o) {
+ o.push_back(new cls_rbd_parent{});
+ o.push_back(new cls_rbd_parent{{1, "", "image id", 234}, {}});
+ o.push_back(new cls_rbd_parent{{1, "", "image id", 234}, {123}});
+ o.push_back(new cls_rbd_parent{{1, "ns", "image id", 234}, {123}});
+ }
+};
+WRITE_CLASS_ENCODER_FEATURES(cls_rbd_parent)
+
+struct cls_rbd_snap {
+ snapid_t id = CEPH_NOSNAP;
+ std::string name;
+ uint64_t image_size = 0;
+ uint8_t protection_status = RBD_PROTECTION_STATUS_UNPROTECTED;
+ cls_rbd_parent parent;
+ uint64_t flags = 0;
+ utime_t timestamp;
+ cls::rbd::SnapshotNamespace snapshot_namespace = {
+ cls::rbd::UserSnapshotNamespace{}};
+ uint32_t child_count = 0;
+ std::optional<uint64_t> parent_overlap = std::nullopt;
+
+ cls_rbd_snap() {
+ }
+ cls_rbd_snap(snapid_t id, const std::string& name, uint64_t image_size,
+ uint8_t protection_status, const cls_rbd_parent& parent,
+ uint64_t flags, utime_t timestamp,
+ const cls::rbd::SnapshotNamespace& snapshot_namespace,
+ uint32_t child_count,
+ const std::optional<uint64_t>& parent_overlap)
+ : id(id), name(name), image_size(image_size),
+ protection_status(protection_status), parent(parent), flags(flags),
+ timestamp(timestamp), snapshot_namespace(snapshot_namespace),
+ child_count(child_count), parent_overlap(parent_overlap) {
+ }
+
+ bool migrate_parent_format(uint64_t features) const {
+ return (((features & CEPH_FEATURE_SERVER_NAUTILUS) != 0) &&
+ (parent.exists()));
+ }
+
+ void encode(bufferlist& bl, uint64_t features) const {
+ // NOTE: remove support for versions < 8 after Nautilus EOLed
+ uint8_t min_version = 1;
+ if ((features & CEPH_FEATURE_SERVER_NAUTILUS) != 0ULL) {
+ // break backwards compatability when using nautilus or later OSDs
+ min_version = 8;
+ }
+
+ ENCODE_START(8, min_version, bl);
+ encode(id, bl);
+ encode(name, bl);
+ encode(image_size, bl);
+ if (min_version < 8) {
+ uint64_t image_features = 0;
+ encode(image_features, bl); // unused -- preserve ABI
+ encode(parent, bl, features);
+ }
+ encode(protection_status, bl);
+ encode(flags, bl);
+ encode(snapshot_namespace, bl);
+ encode(timestamp, bl);
+ encode(child_count, bl);
+ encode(parent_overlap, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(bufferlist::const_iterator& p) {
+ DECODE_START(8, p);
+ decode(id, p);
+ decode(name, p);
+ decode(image_size, p);
+ if (struct_compat < 8) {
+ uint64_t features;
+ decode(features, p); // unused -- preserve ABI
+ }
+ if (struct_v >= 2 && struct_compat < 8) {
+ decode(parent, p);
+ }
+ if (struct_v >= 3) {
+ decode(protection_status, p);
+ }
+ if (struct_v >= 4) {
+ decode(flags, p);
+ }
+ if (struct_v >= 5) {
+ decode(snapshot_namespace, p);
+ }
+ if (struct_v >= 6) {
+ decode(timestamp, p);
+ }
+ if (struct_v >= 7) {
+ decode(child_count, p);
+ }
+ if (struct_v >= 8) {
+ decode(parent_overlap, p);
+ }
+ DECODE_FINISH(p);
+ }
+
+ void dump(Formatter *f) const {
+ f->dump_unsigned("id", id);
+ f->dump_string("name", name);
+ f->dump_unsigned("image_size", image_size);
+ if (parent.exists()) {
+ f->open_object_section("parent");
+ parent.dump(f);
+ f->close_section();
+ }
+ switch (protection_status) {
+ case RBD_PROTECTION_STATUS_UNPROTECTED:
+ f->dump_string("protection_status", "unprotected");
+ break;
+ case RBD_PROTECTION_STATUS_UNPROTECTING:
+ f->dump_string("protection_status", "unprotecting");
+ break;
+ case RBD_PROTECTION_STATUS_PROTECTED:
+ f->dump_string("protection_status", "protected");
+ break;
+ default:
+ ceph_abort();
+ }
+ f->dump_unsigned("child_count", child_count);
+ if (parent_overlap) {
+ f->dump_unsigned("parent_overlap", *parent_overlap);
+ }
+ }
+
+ static void generate_test_instances(list<cls_rbd_snap*>& o) {
+ o.push_back(new cls_rbd_snap{});
+ o.push_back(new cls_rbd_snap{1, "snap", 123456,
+ RBD_PROTECTION_STATUS_PROTECTED,
+ {{1, "", "image", 123}, 234}, 31, {},
+ cls::rbd::UserSnapshotNamespace{}, 543, {}});
+ o.push_back(new cls_rbd_snap{1, "snap", 123456,
+ RBD_PROTECTION_STATUS_PROTECTED,
+ {{1, "", "image", 123}, 234}, 31, {},
+ cls::rbd::UserSnapshotNamespace{}, 543, {0}});
+ o.push_back(new cls_rbd_snap{1, "snap", 123456,
+ RBD_PROTECTION_STATUS_PROTECTED,
+ {{1, "ns", "image", 123}, 234}, 31, {},
+ cls::rbd::UserSnapshotNamespace{}, 543,
+ {123}});
+ }
+};
+WRITE_CLASS_ENCODER_FEATURES(cls_rbd_snap)
+
+#endif // __CEPH_CLS_RBD_H
diff --git a/src/cls/rbd/cls_rbd_client.cc b/src/cls/rbd/cls_rbd_client.cc
new file mode 100644
index 00000000..9b868c55
--- /dev/null
+++ b/src/cls/rbd/cls_rbd_client.cc
@@ -0,0 +1,2807 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#include "cls/rbd/cls_rbd_client.h"
+#include "cls/lock/cls_lock_client.h"
+#include "include/buffer.h"
+#include "include/encoding.h"
+#include "include/rbd_types.h"
+#include "include/rados/librados.hpp"
+#include "common/bit_vector.hpp"
+
+#include <errno.h>
+
+namespace librbd {
+namespace cls_client {
+
+void create_image(librados::ObjectWriteOperation *op, uint64_t size,
+ uint8_t order, uint64_t features,
+ const std::string &object_prefix, int64_t data_pool_id)
+{
+ bufferlist bl;
+ encode(size, bl);
+ encode(order, bl);
+ encode(features, bl);
+ encode(object_prefix, bl);
+ encode(data_pool_id, bl);
+
+ op->exec("rbd", "create", bl);
+}
+
+int create_image(librados::IoCtx *ioctx, const std::string &oid,
+ uint64_t size, uint8_t order, uint64_t features,
+ const std::string &object_prefix, int64_t data_pool_id)
+{
+ librados::ObjectWriteOperation op;
+ create_image(&op, size, order, features, object_prefix, data_pool_id);
+
+ return ioctx->operate(oid, &op);
+}
+
+void get_features_start(librados::ObjectReadOperation *op, bool read_only)
+{
+ bufferlist bl;
+ encode(static_cast<uint64_t>(CEPH_NOSNAP), bl);
+ encode(read_only, bl);
+ op->exec("rbd", "get_features", bl);
+}
+
+int get_features_finish(bufferlist::const_iterator *it, uint64_t *features,
+ uint64_t *incompatible_features)
+{
+ try {
+ decode(*features, *it);
+ decode(*incompatible_features, *it);
+ } catch (const buffer::error &err) {
+ return -EBADMSG;
+ }
+
+ return 0;
+}
+
+int get_features(librados::IoCtx *ioctx, const std::string &oid,
+ bool read_only, uint64_t *features,
+ uint64_t *incompatible_features)
+{
+ librados::ObjectReadOperation op;
+ get_features_start(&op, read_only);
+
+ bufferlist out_bl;
+ int r = ioctx->operate(oid, &op, &out_bl);
+ if (r < 0) {
+ return r;
+ }
+
+ auto it = out_bl.cbegin();
+ return get_features_finish(&it, features, incompatible_features);
+}
+
+void set_features(librados::ObjectWriteOperation *op, uint64_t features,
+ uint64_t mask)
+{
+ bufferlist bl;
+ encode(features, bl);
+ encode(mask, bl);
+
+ op->exec("rbd", "set_features", bl);
+}
+
+int set_features(librados::IoCtx *ioctx, const std::string &oid,
+ uint64_t features, uint64_t mask)
+{
+ librados::ObjectWriteOperation op;
+ set_features(&op, features, mask);
+
+ return ioctx->operate(oid, &op);
+}
+
+void get_object_prefix_start(librados::ObjectReadOperation *op)
+{
+ bufferlist bl;
+ op->exec("rbd", "get_object_prefix", bl);
+}
+
+int get_object_prefix_finish(bufferlist::const_iterator *it,
+ std::string *object_prefix)
+{
+ try {
+ decode(*object_prefix, *it);
+ } catch (const buffer::error &err) {
+ return -EBADMSG;
+ }
+ return 0;
+}
+
+int get_object_prefix(librados::IoCtx *ioctx, const std::string &oid,
+ std::string *object_prefix)
+{
+ librados::ObjectReadOperation op;
+ get_object_prefix_start(&op);
+
+ bufferlist out_bl;
+ int r = ioctx->operate(oid, &op, &out_bl);
+ if (r < 0) {
+ return r;
+ }
+
+ auto it = out_bl.cbegin();
+ return get_object_prefix_finish(&it, object_prefix);
+}
+
+void get_data_pool_start(librados::ObjectReadOperation *op) {
+ bufferlist bl;
+ op->exec("rbd", "get_data_pool", bl);
+}
+
+int get_data_pool_finish(bufferlist::const_iterator *it, int64_t *data_pool_id) {
+ try {
+ decode(*data_pool_id, *it);
+ } catch (const buffer::error &err) {
+ return -EBADMSG;
+ }
+ return 0;
+}
+
+int get_data_pool(librados::IoCtx *ioctx, const std::string &oid,
+ int64_t *data_pool_id) {
+ librados::ObjectReadOperation op;
+ get_data_pool_start(&op);
+
+ bufferlist out_bl;
+ int r = ioctx->operate(oid, &op, &out_bl);
+ if (r < 0) {
+ return r;
+ }
+
+ auto it = out_bl.cbegin();
+ return get_data_pool_finish(&it, data_pool_id);
+}
+
+void get_size_start(librados::ObjectReadOperation *op, snapid_t snap_id)
+{
+ bufferlist bl;
+ encode(snap_id, bl);
+ op->exec("rbd", "get_size", bl);
+}
+
+int get_size_finish(bufferlist::const_iterator *it, uint64_t *size,
+ uint8_t *order)
+{
+ try {
+ decode(*order, *it);
+ decode(*size, *it);
+ } catch (const buffer::error &err) {
+ return -EBADMSG;
+ }
+ return 0;
+}
+
+int get_size(librados::IoCtx *ioctx, const std::string &oid,
+ snapid_t snap_id, uint64_t *size, uint8_t *order)
+{
+ librados::ObjectReadOperation op;
+ get_size_start(&op, snap_id);
+
+ bufferlist out_bl;
+ int r = ioctx->operate(oid, &op, &out_bl);
+ if (r < 0) {
+ return r;
+ }
+
+ auto it = out_bl.cbegin();
+ return get_size_finish(&it, size, order);
+}
+
+int set_size(librados::IoCtx *ioctx, const std::string &oid,
+ uint64_t size)
+{
+ librados::ObjectWriteOperation op;
+ set_size(&op, size);
+ return ioctx->operate(oid, &op);
+}
+
+void set_size(librados::ObjectWriteOperation *op, uint64_t size)
+{
+ bufferlist bl;
+ encode(size, bl);
+ op->exec("rbd", "set_size", bl);
+}
+
+void get_flags_start(librados::ObjectReadOperation *op, snapid_t snap_id) {
+ bufferlist in_bl;
+ encode(static_cast<snapid_t>(snap_id), in_bl);
+ op->exec("rbd", "get_flags", in_bl);
+}
+
+int get_flags_finish(bufferlist::const_iterator *it, uint64_t *flags) {
+ try {
+ decode(*flags, *it);
+ } catch (const buffer::error &err) {
+ return -EBADMSG;
+ }
+ return 0;
+}
+
+int get_flags(librados::IoCtx *ioctx, const std::string &oid,
+ snapid_t snap_id, uint64_t *flags)
+{
+ librados::ObjectReadOperation op;
+ get_flags_start(&op, snap_id);
+
+ bufferlist out_bl;
+ int r = ioctx->operate(oid, &op, &out_bl);
+ if (r < 0) {
+ return r;
+ }
+
+ auto it = out_bl.cbegin();
+ return get_flags_finish(&it, flags);
+}
+
+void set_flags(librados::ObjectWriteOperation *op, snapid_t snap_id,
+ uint64_t flags, uint64_t mask)
+{
+ bufferlist inbl;
+ encode(flags, inbl);
+ encode(mask, inbl);
+ encode(snap_id, inbl);
+ op->exec("rbd", "set_flags", inbl);
+}
+
+void op_features_get_start(librados::ObjectReadOperation *op)
+{
+ bufferlist in_bl;
+ op->exec("rbd", "op_features_get", in_bl);
+}
+
+int op_features_get_finish(bufferlist::const_iterator *it, uint64_t *op_features)
+{
+ try {
+ decode(*op_features, *it);
+ } catch (const buffer::error &err) {
+ return -EBADMSG;
+ }
+ return 0;
+}
+
+int op_features_get(librados::IoCtx *ioctx, const std::string &oid,
+ uint64_t *op_features)
+{
+ librados::ObjectReadOperation op;
+ op_features_get_start(&op);
+
+ bufferlist out_bl;
+ int r = ioctx->operate(oid, &op, &out_bl);
+ if (r < 0) {
+ return r;
+ }
+
+ auto it = out_bl.cbegin();
+ return op_features_get_finish(&it, op_features);
+}
+
+void op_features_set(librados::ObjectWriteOperation *op,
+ uint64_t op_features, uint64_t mask)
+{
+ bufferlist inbl;
+ encode(op_features, inbl);
+ encode(mask, inbl);
+ op->exec("rbd", "op_features_set", inbl);
+}
+
+int op_features_set(librados::IoCtx *ioctx, const std::string &oid,
+ uint64_t op_features, uint64_t mask)
+{
+ librados::ObjectWriteOperation op;
+ op_features_set(&op, op_features, mask);
+
+ return ioctx->operate(oid, &op);
+}
+
+void get_parent_start(librados::ObjectReadOperation *op, snapid_t snap_id)
+{
+ bufferlist bl;
+ encode(snap_id, bl);
+ op->exec("rbd", "get_parent", bl);
+}
+
+int get_parent_finish(bufferlist::const_iterator *it,
+ cls::rbd::ParentImageSpec *pspec,
+ uint64_t *parent_overlap)
+{
+ *pspec = {};
+ try {
+ decode(pspec->pool_id, *it);
+ decode(pspec->image_id, *it);
+ decode(pspec->snap_id, *it);
+ decode(*parent_overlap, *it);
+ } catch (const buffer::error &) {
+ return -EBADMSG;
+ }
+ return 0;
+}
+
+int get_parent(librados::IoCtx *ioctx, const std::string &oid,
+ snapid_t snap_id, cls::rbd::ParentImageSpec *pspec,
+ uint64_t *parent_overlap)
+{
+ librados::ObjectReadOperation op;
+ get_parent_start(&op, snap_id);
+
+ bufferlist out_bl;
+ int r = ioctx->operate(oid, &op, &out_bl);
+ if (r < 0) {
+ return r;
+ }
+
+ auto it = out_bl.cbegin();
+ return get_parent_finish(&it, pspec, parent_overlap);
+}
+
+int set_parent(librados::IoCtx *ioctx, const std::string &oid,
+ const cls::rbd::ParentImageSpec &pspec, uint64_t parent_overlap)
+{
+ librados::ObjectWriteOperation op;
+ set_parent(&op, pspec, parent_overlap);
+ return ioctx->operate(oid, &op);
+}
+
+void set_parent(librados::ObjectWriteOperation *op,
+ const cls::rbd::ParentImageSpec &pspec,
+ uint64_t parent_overlap) {
+ assert(pspec.pool_namespace.empty());
+
+ bufferlist in_bl;
+ encode(pspec.pool_id, in_bl);
+ encode(pspec.image_id, in_bl);
+ encode(pspec.snap_id, in_bl);
+ encode(parent_overlap, in_bl);
+
+ op->exec("rbd", "set_parent", in_bl);
+}
+
+int remove_parent(librados::IoCtx *ioctx, const std::string &oid)
+{
+ librados::ObjectWriteOperation op;
+ remove_parent(&op);
+ return ioctx->operate(oid, &op);
+}
+
+void remove_parent(librados::ObjectWriteOperation *op)
+{
+ bufferlist inbl;
+ op->exec("rbd", "remove_parent", inbl);
+}
+
+void parent_get_start(librados::ObjectReadOperation* op) {
+ bufferlist in_bl;
+ op->exec("rbd", "parent_get", in_bl);
+}
+
+int parent_get_finish(bufferlist::const_iterator* it,
+ cls::rbd::ParentImageSpec* parent_image_spec) {
+ try {
+ decode(*parent_image_spec, *it);
+ } catch (const buffer::error &) {
+ return -EBADMSG;
+ }
+ return 0;
+}
+
+int parent_get(librados::IoCtx* ioctx, const std::string &oid,
+ cls::rbd::ParentImageSpec* parent_image_spec) {
+ librados::ObjectReadOperation op;
+ parent_get_start(&op);
+
+ bufferlist out_bl;
+ int r = ioctx->operate(oid, &op, &out_bl);
+ if (r < 0) {
+ return r;
+ }
+
+ auto it = out_bl.cbegin();
+ r = parent_get_finish(&it, parent_image_spec);
+ if (r < 0) {
+ return r;
+ }
+ return 0;
+}
+
+void parent_overlap_get_start(librados::ObjectReadOperation* op,
+ snapid_t snap_id) {
+ bufferlist in_bl;
+ encode(snap_id, in_bl);
+ op->exec("rbd", "parent_overlap_get", in_bl);
+}
+
+int parent_overlap_get_finish(bufferlist::const_iterator* it,
+ std::optional<uint64_t>* parent_overlap) {
+ try {
+ decode(*parent_overlap, *it);
+ } catch (const buffer::error &) {
+ return -EBADMSG;
+ }
+ return 0;
+}
+
+int parent_overlap_get(librados::IoCtx* ioctx, const std::string &oid,
+ snapid_t snap_id,
+ std::optional<uint64_t>* parent_overlap) {
+ librados::ObjectReadOperation op;
+ parent_overlap_get_start(&op, snap_id);
+
+ bufferlist out_bl;
+ int r = ioctx->operate(oid, &op, &out_bl);
+ if (r < 0) {
+ return r;
+ }
+
+ auto it = out_bl.cbegin();
+ r = parent_overlap_get_finish(&it, parent_overlap);
+ if (r < 0) {
+ return r;
+ }
+ return 0;
+}
+
+void parent_attach(librados::ObjectWriteOperation* op,
+ const cls::rbd::ParentImageSpec& parent_image_spec,
+ uint64_t parent_overlap, bool reattach) {
+ bufferlist in_bl;
+ encode(parent_image_spec, in_bl);
+ encode(parent_overlap, in_bl);
+ encode(reattach, in_bl);
+ op->exec("rbd", "parent_attach", in_bl);
+}
+
+int parent_attach(librados::IoCtx *ioctx, const std::string &oid,
+ const cls::rbd::ParentImageSpec& parent_image_spec,
+ uint64_t parent_overlap, bool reattach) {
+ librados::ObjectWriteOperation op;
+ parent_attach(&op, parent_image_spec, parent_overlap, reattach);
+ return ioctx->operate(oid, &op);
+}
+
+void parent_detach(librados::ObjectWriteOperation* op) {
+ bufferlist in_bl;
+ op->exec("rbd", "parent_detach", in_bl);
+}
+
+int parent_detach(librados::IoCtx *ioctx, const std::string &oid) {
+ librados::ObjectWriteOperation op;
+ parent_detach(&op);
+ return ioctx->operate(oid, &op);
+}
+
+int add_child(librados::IoCtx *ioctx, const std::string &oid,
+ const cls::rbd::ParentImageSpec &pspec,
+ const std::string &c_imageid)
+{
+ librados::ObjectWriteOperation op;
+ add_child(&op, pspec, c_imageid);
+ return ioctx->operate(oid, &op);
+}
+
+void add_child(librados::ObjectWriteOperation *op,
+ const cls::rbd::ParentImageSpec& pspec,
+ const std::string &c_imageid)
+{
+ assert(pspec.pool_namespace.empty());
+
+ bufferlist in;
+ encode(pspec.pool_id, in);
+ encode(pspec.image_id, in);
+ encode(pspec.snap_id, in);
+ encode(c_imageid, in);
+
+ op->exec("rbd", "add_child", in);
+}
+
+void remove_child(librados::ObjectWriteOperation *op,
+ const cls::rbd::ParentImageSpec &pspec,
+ const std::string &c_imageid)
+{
+ assert(pspec.pool_namespace.empty());
+
+ bufferlist in;
+ encode(pspec.pool_id, in);
+ encode(pspec.image_id, in);
+ encode(pspec.snap_id, in);
+ encode(c_imageid, in);
+ op->exec("rbd", "remove_child", in);
+}
+
+int remove_child(librados::IoCtx *ioctx, const std::string &oid,
+ const cls::rbd::ParentImageSpec &pspec,
+ const std::string &c_imageid)
+{
+ librados::ObjectWriteOperation op;
+ remove_child(&op, pspec, c_imageid);
+ return ioctx->operate(oid, &op);
+}
+
+void get_children_start(librados::ObjectReadOperation *op,
+ const cls::rbd::ParentImageSpec &pspec) {
+ bufferlist in_bl;
+ encode(pspec.pool_id, in_bl);
+ encode(pspec.image_id, in_bl);
+ encode(pspec.snap_id, in_bl);
+ op->exec("rbd", "get_children", in_bl);
+}
+
+int get_children_finish(bufferlist::const_iterator *it,
+ std::set<std::string>* children) {
+ try {
+ decode(*children, *it);
+ } catch (const buffer::error &err) {
+ return -EBADMSG;
+ }
+ return 0;
+}
+
+int get_children(librados::IoCtx *ioctx, const std::string &oid,
+ const cls::rbd::ParentImageSpec &pspec, set<string>& children)
+{
+ librados::ObjectReadOperation op;
+ get_children_start(&op, pspec);
+
+ bufferlist out_bl;
+ int r = ioctx->operate(oid, &op, &out_bl);
+ if (r < 0) {
+ return r;
+ }
+
+ auto it = out_bl.cbegin();
+ return get_children_finish(&it, &children);
+}
+
+void snapshot_get_start(librados::ObjectReadOperation *op, snapid_t snap_id)
+{
+ bufferlist bl;
+ encode(snap_id, bl);
+ op->exec("rbd", "snapshot_get", bl);
+}
+
+int snapshot_get_finish(bufferlist::const_iterator* it,
+ cls::rbd::SnapshotInfo* snap_info)
+{
+ try {
+ decode(*snap_info, *it);
+ } catch (const buffer::error &err) {
+ return -EBADMSG;
+ }
+ return 0;
+}
+
+int snapshot_get(librados::IoCtx *ioctx, const std::string &oid,
+ snapid_t snap_id, cls::rbd::SnapshotInfo* snap_info)
+{
+ librados::ObjectReadOperation op;
+ snapshot_get_start(&op, snap_id);
+
+ bufferlist out_bl;
+ int r = ioctx->operate(oid, &op, &out_bl);
+ if (r < 0) {
+ return r;
+ }
+
+ auto it = out_bl.cbegin();
+ return snapshot_get_finish(&it, snap_info);
+}
+
+void snapshot_add(librados::ObjectWriteOperation *op, snapid_t snap_id,
+ const std::string &snap_name,
+ const cls::rbd::SnapshotNamespace &snap_namespace)
+{
+ bufferlist bl;
+ encode(snap_name, bl);
+ encode(snap_id, bl);
+ encode(snap_namespace, bl);
+ op->exec("rbd", "snapshot_add", bl);
+}
+
+void snapshot_remove(librados::ObjectWriteOperation *op, snapid_t snap_id)
+{
+ bufferlist bl;
+ encode(snap_id, bl);
+ op->exec("rbd", "snapshot_remove", bl);
+}
+
+void snapshot_rename(librados::ObjectWriteOperation *op,
+ snapid_t src_snap_id,
+ const std::string &dst_name)
+{
+ bufferlist bl;
+ encode(src_snap_id, bl);
+ encode(dst_name, bl);
+ op->exec("rbd", "snapshot_rename", bl);
+}
+
+void snapshot_trash_add(librados::ObjectWriteOperation *op,
+ snapid_t snap_id)
+{
+ bufferlist bl;
+ encode(snap_id, bl);
+ op->exec("rbd", "snapshot_trash_add", bl);
+}
+
+void get_snapcontext_start(librados::ObjectReadOperation *op)
+{
+ bufferlist bl;
+ op->exec("rbd", "get_snapcontext", bl);
+}
+
+int get_snapcontext_finish(bufferlist::const_iterator *it,
+ ::SnapContext *snapc)
+{
+ try {
+ decode(*snapc, *it);
+ } catch (const buffer::error &err) {
+ return -EBADMSG;
+ }
+ if (!snapc->is_valid()) {
+ return -EBADMSG;
+ }
+ return 0;
+}
+
+int get_snapcontext(librados::IoCtx *ioctx, const std::string &oid,
+ ::SnapContext *snapc)
+{
+ librados::ObjectReadOperation op;
+ get_snapcontext_start(&op);
+
+ bufferlist out_bl;
+ int r = ioctx->operate(oid, &op, &out_bl);
+ if (r < 0) {
+ return r;
+ }
+
+ auto bl_it = out_bl.cbegin();
+ return get_snapcontext_finish(&bl_it, snapc);
+}
+
+void get_snapshot_name_start(librados::ObjectReadOperation *op,
+ snapid_t snap_id)
+{
+ bufferlist bl;
+ encode(snap_id, bl);
+ op->exec("rbd", "get_snapshot_name", bl);
+}
+
+int get_snapshot_name_finish(bufferlist::const_iterator *it,
+ std::string *name)
+{
+ try {
+ decode(*name, *it);
+ } catch (const buffer::error &err) {
+ return -EBADMSG;
+ }
+ return 0;
+}
+
+int get_snapshot_name(librados::IoCtx *ioctx, const std::string &oid,
+ snapid_t snap_id, std::string *name)
+{
+ librados::ObjectReadOperation op;
+ get_snapshot_name_start(&op, snap_id);
+
+ bufferlist out_bl;
+ int r = ioctx->operate(oid, &op, &out_bl);
+ if (r < 0) {
+ return r;
+ }
+
+ auto it = out_bl.cbegin();
+ return get_snapshot_name_finish(&it, name);
+}
+
+void get_snapshot_timestamp_start(librados::ObjectReadOperation *op,
+ snapid_t snap_id)
+{
+ bufferlist bl;
+ encode(snap_id, bl);
+ op->exec("rbd", "get_snapshot_timestamp", bl);
+}
+
+int get_snapshot_timestamp_finish(bufferlist::const_iterator *it,
+ utime_t *timestamp)
+{
+ try {
+ decode(*timestamp, *it);
+ } catch (const buffer::error &err) {
+ return -EBADMSG;
+ }
+ return 0;
+}
+
+int get_snapshot_timestamp(librados::IoCtx *ioctx, const std::string &oid,
+ snapid_t snap_id, utime_t *timestamp)
+{
+ librados::ObjectReadOperation op;
+ get_snapshot_timestamp_start(&op, snap_id);
+
+ bufferlist out_bl;
+ int r = ioctx->operate(oid, &op, &out_bl);
+ if (r < 0) {
+ return r;
+ }
+
+ auto it = out_bl.cbegin();
+ return get_snapshot_timestamp_finish(&it, timestamp);
+}
+
+void old_snapshot_add(librados::ObjectWriteOperation *op,
+ snapid_t snap_id, const std::string &snap_name)
+{
+ bufferlist bl;
+ encode(snap_name, bl);
+ encode(snap_id, bl);
+ op->exec("rbd", "snap_add", bl);
+}
+
+void old_snapshot_remove(librados::ObjectWriteOperation *op,
+ const std::string &snap_name)
+{
+ bufferlist bl;
+ encode(snap_name, bl);
+ op->exec("rbd", "snap_remove", bl);
+}
+
+void old_snapshot_rename(librados::ObjectWriteOperation *op,
+ snapid_t src_snap_id, const std::string &dst_name)
+{
+ bufferlist bl;
+ encode(src_snap_id, bl);
+ encode(dst_name, bl);
+ op->exec("rbd", "snap_rename", bl);
+}
+
+void old_snapshot_list_start(librados::ObjectReadOperation *op) {
+ bufferlist in_bl;
+ op->exec("rbd", "snap_list", in_bl);
+}
+
+int old_snapshot_list_finish(bufferlist::const_iterator *it,
+ std::vector<string> *names,
+ std::vector<uint64_t> *sizes,
+ ::SnapContext *snapc) {
+ try {
+ uint32_t num_snaps;
+ decode(snapc->seq, *it);
+ decode(num_snaps, *it);
+
+ names->resize(num_snaps);
+ sizes->resize(num_snaps);
+ snapc->snaps.resize(num_snaps);
+ for (uint32_t i = 0; i < num_snaps; ++i) {
+ decode(snapc->snaps[i], *it);
+ decode((*sizes)[i], *it);
+ decode((*names)[i], *it);
+ }
+ } catch (const buffer::error &err) {
+ return -EBADMSG;
+ }
+ return 0;
+}
+
+int old_snapshot_list(librados::IoCtx *ioctx, const std::string &oid,
+ std::vector<string> *names,
+ std::vector<uint64_t> *sizes,
+ ::SnapContext *snapc)
+{
+ librados::ObjectReadOperation op;
+ old_snapshot_list_start(&op);
+
+ bufferlist out_bl;
+ int r = ioctx->operate(oid, &op, &out_bl);
+ if (r < 0) {
+ return r;
+ }
+
+ auto it = out_bl.cbegin();
+ return old_snapshot_list_finish(&it, names, sizes, snapc);
+}
+
+void get_all_features_start(librados::ObjectReadOperation *op) {
+ bufferlist in;
+ op->exec("rbd", "get_all_features", in);
+}
+
+int get_all_features_finish(bufferlist::const_iterator *it,
+ uint64_t *all_features) {
+ try {
+ decode(*all_features, *it);
+ } catch (const buffer::error &err) {
+ return -EBADMSG;
+ }
+ return 0;
+}
+
+int get_all_features(librados::IoCtx *ioctx, const std::string &oid,
+ uint64_t *all_features) {
+ librados::ObjectReadOperation op;
+ get_all_features_start(&op);
+
+ bufferlist out_bl;
+ int r = ioctx->operate(oid, &op, &out_bl);
+ if (r < 0) {
+ return r;
+ }
+
+ auto it = out_bl.cbegin();
+ return get_all_features_finish(&it, all_features);
+}
+
+int copyup(librados::IoCtx *ioctx, const std::string &oid,
+ bufferlist data) {
+ bufferlist out;
+ return ioctx->exec(oid, "rbd", "copyup", data, out);
+}
+
+void get_protection_status_start(librados::ObjectReadOperation *op,
+ snapid_t snap_id)
+{
+ bufferlist bl;
+ encode(snap_id, bl);
+ op->exec("rbd", "get_protection_status", bl);
+}
+
+int get_protection_status_finish(bufferlist::const_iterator *it,
+ uint8_t *protection_status)
+{
+ try {
+ decode(*protection_status, *it);
+ } catch (const buffer::error &) {
+ return -EBADMSG;
+ }
+ return 0;
+}
+
+int get_protection_status(librados::IoCtx *ioctx, const std::string &oid,
+ snapid_t snap_id, uint8_t *protection_status)
+{
+ librados::ObjectReadOperation op;
+ get_protection_status_start(&op, snap_id);
+
+ bufferlist out_bl;
+ int r = ioctx->operate(oid, &op, &out_bl);
+ if (r < 0) {
+ return r;
+ }
+
+ auto it = out_bl.cbegin();
+ return get_protection_status_finish(&it, protection_status);
+}
+
+int set_protection_status(librados::IoCtx *ioctx, const std::string &oid,
+ snapid_t snap_id, uint8_t protection_status)
+{
+ // TODO remove
+ librados::ObjectWriteOperation op;
+ set_protection_status(&op, snap_id, protection_status);
+ return ioctx->operate(oid, &op);
+}
+
+void set_protection_status(librados::ObjectWriteOperation *op,
+ snapid_t snap_id, uint8_t protection_status)
+{
+ bufferlist in;
+ encode(snap_id, in);
+ encode(protection_status, in);
+ op->exec("rbd", "set_protection_status", in);
+}
+
+int snapshot_get_limit(librados::IoCtx *ioctx, const std::string &oid,
+ uint64_t *limit)
+{
+ bufferlist in, out;
+ int r = ioctx->exec(oid, "rbd", "snapshot_get_limit", in, out);
+
+ if (r < 0) {
+ return r;
+ }
+
+ try {
+ auto iter = out.cbegin();
+ decode(*limit, iter);
+ } catch (const buffer::error &err) {
+ return -EBADMSG;
+ }
+
+ return 0;
+}
+
+void snapshot_set_limit(librados::ObjectWriteOperation *op, uint64_t limit)
+{
+ bufferlist in;
+ encode(limit, in);
+ op->exec("rbd", "snapshot_set_limit", in);
+}
+
+void get_stripe_unit_count_start(librados::ObjectReadOperation *op) {
+ bufferlist empty_bl;
+ op->exec("rbd", "get_stripe_unit_count", empty_bl);
+}
+
+int get_stripe_unit_count_finish(bufferlist::const_iterator *it,
+ uint64_t *stripe_unit,
+ uint64_t *stripe_count) {
+ ceph_assert(stripe_unit);
+ ceph_assert(stripe_count);
+
+ try {
+ decode(*stripe_unit, *it);
+ decode(*stripe_count, *it);
+ } catch (const buffer::error &err) {
+ return -EBADMSG;
+ }
+ return 0;
+}
+
+int get_stripe_unit_count(librados::IoCtx *ioctx, const std::string &oid,
+ uint64_t *stripe_unit, uint64_t *stripe_count)
+{
+ librados::ObjectReadOperation op;
+ get_stripe_unit_count_start(&op);
+
+ bufferlist out_bl;
+ int r = ioctx->operate(oid, &op, &out_bl);
+ if (r < 0) {
+ return r;
+ }
+
+ auto it = out_bl.cbegin();
+ return get_stripe_unit_count_finish(&it, stripe_unit, stripe_count);
+}
+
+void set_stripe_unit_count(librados::ObjectWriteOperation *op,
+ uint64_t stripe_unit, uint64_t stripe_count)
+{
+ bufferlist bl;
+ encode(stripe_unit, bl);
+ encode(stripe_count, bl);
+
+ op->exec("rbd", "set_stripe_unit_count", bl);
+}
+
+int set_stripe_unit_count(librados::IoCtx *ioctx, const std::string &oid,
+ uint64_t stripe_unit, uint64_t stripe_count)
+{
+ librados::ObjectWriteOperation op;
+ set_stripe_unit_count(&op, stripe_unit, stripe_count);
+
+ return ioctx->operate(oid, &op);
+}
+
+void get_create_timestamp_start(librados::ObjectReadOperation *op) {
+ bufferlist empty_bl;
+ op->exec("rbd", "get_create_timestamp", empty_bl);
+}
+
+int get_create_timestamp_finish(bufferlist::const_iterator *it,
+ utime_t *timestamp) {
+ ceph_assert(timestamp);
+
+ try {
+ decode(*timestamp, *it);
+ } catch (const buffer::error &err) {
+ return -EBADMSG;
+ }
+ return 0;
+}
+
+int get_create_timestamp(librados::IoCtx *ioctx, const std::string &oid,
+ utime_t *timestamp)
+{
+ librados::ObjectReadOperation op;
+ get_create_timestamp_start(&op);
+
+ bufferlist out_bl;
+ int r = ioctx->operate(oid, &op, &out_bl);
+ if (r < 0) {
+ return r;
+ }
+
+ auto it = out_bl.cbegin();
+ return get_create_timestamp_finish(&it, timestamp);
+}
+
+void get_access_timestamp_start(librados::ObjectReadOperation *op) {
+ bufferlist empty_bl;
+ op->exec("rbd", "get_access_timestamp", empty_bl);
+}
+
+int get_access_timestamp_finish(bufferlist::const_iterator *it,
+ utime_t *timestamp) {
+ ceph_assert(timestamp);
+
+ try {
+ decode(*timestamp, *it);
+ } catch (const buffer::error &err) {
+ return -EBADMSG;
+ }
+ return 0;
+}
+
+int get_access_timestamp(librados::IoCtx *ioctx, const std::string &oid,
+ utime_t *timestamp)
+{
+ librados::ObjectReadOperation op;
+ get_access_timestamp_start(&op);
+
+ bufferlist out_bl;
+ int r = ioctx->operate(oid, &op, &out_bl);
+ if (r < 0) {
+ return r;
+ }
+
+ auto it = out_bl.cbegin();
+ return get_access_timestamp_finish(&it, timestamp);
+}
+
+void set_access_timestamp(librados::ObjectWriteOperation *op)
+{
+ bufferlist empty_bl;
+ op->exec("rbd","set_access_timestamp",empty_bl);
+}
+
+int set_access_timestamp(librados::IoCtx *ioctx, const std::string &oid)
+{
+ librados::ObjectWriteOperation op;
+ set_access_timestamp(&op);
+ return ioctx->operate(oid, &op);
+}
+
+void get_modify_timestamp_start(librados::ObjectReadOperation *op) {
+ bufferlist empty_bl;
+ op->exec("rbd", "get_modify_timestamp", empty_bl);
+}
+
+int get_modify_timestamp_finish(bufferlist::const_iterator *it,
+ utime_t *timestamp) {
+ ceph_assert(timestamp);
+
+ try {
+ decode(*timestamp, *it);
+ } catch (const buffer::error &err) {
+ return -EBADMSG;
+ }
+ return 0;
+}
+
+int get_modify_timestamp(librados::IoCtx *ioctx, const std::string &oid,
+ utime_t *timestamp)
+{
+ librados::ObjectReadOperation op;
+ get_modify_timestamp_start(&op);
+
+ bufferlist out_bl;
+ int r = ioctx->operate(oid, &op, &out_bl);
+ if (r < 0) {
+ return r;
+ }
+
+ auto it = out_bl.cbegin();
+ return get_modify_timestamp_finish(&it, timestamp);
+}
+
+void set_modify_timestamp(librados::ObjectWriteOperation *op)
+{
+ bufferlist empty_bl;
+ op->exec("rbd","set_modify_timestamp",empty_bl);
+}
+
+int set_modify_timestamp(librados::IoCtx *ioctx, const std::string &oid)
+{
+ librados::ObjectWriteOperation op;
+ set_modify_timestamp(&op);
+ return ioctx->operate(oid, &op);
+}
+
+
+/************************ rbd_id object methods ************************/
+
+void get_id_start(librados::ObjectReadOperation *op) {
+ bufferlist empty_bl;
+ op->exec("rbd", "get_id", empty_bl);
+}
+
+int get_id_finish(bufferlist::const_iterator *it, std::string *id) {
+ try {
+ decode(*id, *it);
+ } catch (const buffer::error &err) {
+ return -EBADMSG;
+ }
+ return 0;
+}
+
+int get_id(librados::IoCtx *ioctx, const std::string &oid, std::string *id)
+{
+ librados::ObjectReadOperation op;
+ get_id_start(&op);
+
+ bufferlist out_bl;
+ int r = ioctx->operate(oid, &op, &out_bl);
+ if (r < 0) {
+ return r;
+ }
+
+ auto it = out_bl.cbegin();
+ return get_id_finish(&it, id);
+}
+
+void set_id(librados::ObjectWriteOperation *op, const std::string &id)
+{
+ bufferlist bl;
+ encode(id, bl);
+ op->exec("rbd", "set_id", bl);
+}
+
+int set_id(librados::IoCtx *ioctx, const std::string &oid, const std::string &id)
+{
+ librados::ObjectWriteOperation op;
+ set_id(&op, id);
+
+ return ioctx->operate(oid, &op);
+}
+
+/******************** rbd_directory object methods ********************/
+
+void dir_get_id_start(librados::ObjectReadOperation *op,
+ const std::string &image_name) {
+ bufferlist bl;
+ encode(image_name, bl);
+
+ op->exec("rbd", "dir_get_id", bl);
+}
+
+int dir_get_id_finish(bufferlist::const_iterator *iter, std::string *image_id) {
+ try {
+ decode(*image_id, *iter);
+ } catch (const buffer::error &err) {
+ return -EBADMSG;
+ }
+
+ return 0;
+}
+
+int dir_get_id(librados::IoCtx *ioctx, const std::string &oid,
+ const std::string &name, std::string *id) {
+ librados::ObjectReadOperation op;
+ dir_get_id_start(&op, name);
+
+ bufferlist out_bl;
+ int r = ioctx->operate(oid, &op, &out_bl);
+ if (r < 0) {
+ return r;
+ }
+
+ auto iter = out_bl.cbegin();
+ return dir_get_id_finish(&iter, id);
+}
+
+void dir_get_name_start(librados::ObjectReadOperation *op,
+ const std::string &id) {
+ bufferlist in_bl;
+ encode(id, in_bl);
+ op->exec("rbd", "dir_get_name", in_bl);
+}
+
+int dir_get_name_finish(bufferlist::const_iterator *it, std::string *name) {
+ try {
+ decode(*name, *it);
+ } catch (const buffer::error &err) {
+ return -EBADMSG;
+ }
+ return 0;
+}
+
+int dir_get_name(librados::IoCtx *ioctx, const std::string &oid,
+ const std::string &id, std::string *name) {
+ librados::ObjectReadOperation op;
+ dir_get_name_start(&op, id);
+
+ bufferlist out_bl;
+ int r = ioctx->operate(oid, &op, &out_bl);
+ if (r < 0) {
+ return r;
+ }
+
+ auto it = out_bl.cbegin();
+ return dir_get_name_finish(&it, name);
+}
+
+void dir_list_start(librados::ObjectReadOperation *op,
+ const std::string &start, uint64_t max_return)
+{
+ bufferlist in_bl;
+ encode(start, in_bl);
+ encode(max_return, in_bl);
+
+ op->exec("rbd", "dir_list", in_bl);
+}
+
+int dir_list_finish(bufferlist::const_iterator *it, map<string, string> *images)
+{
+ try {
+ decode(*images, *it);
+ } catch (const buffer::error &err) {
+ return -EBADMSG;
+ }
+ return 0;
+}
+
+int dir_list(librados::IoCtx *ioctx, const std::string &oid,
+ const std::string &start, uint64_t max_return,
+ map<string, string> *images)
+{
+ librados::ObjectReadOperation op;
+ dir_list_start(&op, start, max_return);
+
+ bufferlist out_bl;
+ int r = ioctx->operate(oid, &op, &out_bl);
+ if (r < 0) {
+ return r;
+ }
+
+ auto iter = out_bl.cbegin();
+ return dir_list_finish(&iter, images);
+}
+
+void dir_add_image(librados::ObjectWriteOperation *op,
+ const std::string &name, const std::string &id)
+{
+ bufferlist bl;
+ encode(name, bl);
+ encode(id, bl);
+ op->exec("rbd", "dir_add_image", bl);
+}
+
+int dir_add_image(librados::IoCtx *ioctx, const std::string &oid,
+ const std::string &name, const std::string &id)
+{
+ librados::ObjectWriteOperation op;
+ dir_add_image(&op, name, id);
+
+ return ioctx->operate(oid, &op);
+}
+
+int dir_remove_image(librados::IoCtx *ioctx, const std::string &oid,
+ const std::string &name, const std::string &id)
+{
+ librados::ObjectWriteOperation op;
+ dir_remove_image(&op, name, id);
+
+ return ioctx->operate(oid, &op);
+}
+
+void dir_state_assert(librados::ObjectOperation *op,
+ cls::rbd::DirectoryState directory_state)
+{
+ bufferlist bl;
+ encode(directory_state, bl);
+ op->exec("rbd", "dir_state_assert", bl);
+}
+
+int dir_state_assert(librados::IoCtx *ioctx, const std::string &oid,
+ cls::rbd::DirectoryState directory_state)
+{
+ librados::ObjectWriteOperation op;
+ dir_state_assert(&op, directory_state);
+
+ return ioctx->operate(oid, &op);
+}
+
+void dir_state_set(librados::ObjectWriteOperation *op,
+ cls::rbd::DirectoryState directory_state)
+{
+ bufferlist bl;
+ encode(directory_state, bl);
+ op->exec("rbd", "dir_state_set", bl);
+}
+
+int dir_state_set(librados::IoCtx *ioctx, const std::string &oid,
+ cls::rbd::DirectoryState directory_state)
+{
+ librados::ObjectWriteOperation op;
+ dir_state_set(&op, directory_state);
+
+ return ioctx->operate(oid, &op);
+}
+
+void dir_remove_image(librados::ObjectWriteOperation *op,
+ const std::string &name, const std::string &id)
+{
+ bufferlist bl;
+ encode(name, bl);
+ encode(id, bl);
+
+ op->exec("rbd", "dir_remove_image", bl);
+}
+
+void dir_rename_image(librados::ObjectWriteOperation *op,
+ const std::string &src, const std::string &dest,
+ const std::string &id)
+{
+ bufferlist in;
+ encode(src, in);
+ encode(dest, in);
+ encode(id, in);
+ op->exec("rbd", "dir_rename_image", in);
+}
+
+void object_map_load_start(librados::ObjectReadOperation *op) {
+ bufferlist in_bl;
+ op->exec("rbd", "object_map_load", in_bl);
+}
+
+int object_map_load_finish(bufferlist::const_iterator *it,
+ ceph::BitVector<2> *object_map) {
+ try {
+ decode(*object_map, *it);
+ } catch (const buffer::error &err) {
+ return -EBADMSG;
+ }
+ return 0;
+}
+
+int object_map_load(librados::IoCtx *ioctx, const std::string &oid,
+ ceph::BitVector<2> *object_map)
+{
+ librados::ObjectReadOperation op;
+ object_map_load_start(&op);
+
+ bufferlist out_bl;
+ int r = ioctx->operate(oid, &op, &out_bl);
+ if (r < 0) {
+ return r;
+ }
+
+ auto it = out_bl.cbegin();
+ return object_map_load_finish(&it, object_map);
+}
+
+void object_map_save(librados::ObjectWriteOperation *rados_op,
+ const ceph::BitVector<2> &object_map)
+{
+ ceph::BitVector<2> object_map_copy(object_map);
+ object_map_copy.set_crc_enabled(false);
+
+ bufferlist in;
+ encode(object_map_copy, in);
+ rados_op->exec("rbd", "object_map_save", in);
+}
+
+void object_map_resize(librados::ObjectWriteOperation *rados_op,
+ uint64_t object_count, uint8_t default_state)
+{
+ bufferlist in;
+ encode(object_count, in);
+ encode(default_state, in);
+ rados_op->exec("rbd", "object_map_resize", in);
+}
+
+void object_map_update(librados::ObjectWriteOperation *rados_op,
+ uint64_t start_object_no, uint64_t end_object_no,
+ uint8_t new_object_state,
+ const boost::optional<uint8_t> &current_object_state)
+{
+ bufferlist in;
+ encode(start_object_no, in);
+ encode(end_object_no, in);
+ encode(new_object_state, in);
+ encode(current_object_state, in);
+ rados_op->exec("rbd", "object_map_update", in);
+}
+
+void object_map_snap_add(librados::ObjectWriteOperation *rados_op)
+{
+ bufferlist in;
+ rados_op->exec("rbd", "object_map_snap_add", in);
+}
+
+void object_map_snap_remove(librados::ObjectWriteOperation *rados_op,
+ const ceph::BitVector<2> &object_map)
+{
+ ceph::BitVector<2> object_map_copy(object_map);
+ object_map_copy.set_crc_enabled(false);
+
+ bufferlist in;
+ encode(object_map_copy, in);
+ rados_op->exec("rbd", "object_map_snap_remove", in);
+}
+
+void metadata_set(librados::ObjectWriteOperation *op,
+ const map<string, bufferlist> &data)
+{
+ bufferlist bl;
+ encode(data, bl);
+
+ op->exec("rbd", "metadata_set", bl);
+}
+
+int metadata_set(librados::IoCtx *ioctx, const std::string &oid,
+ const map<string, bufferlist> &data)
+{
+ librados::ObjectWriteOperation op;
+ metadata_set(&op, data);
+
+ return ioctx->operate(oid, &op);
+}
+
+void metadata_remove(librados::ObjectWriteOperation *op,
+ const std::string &key)
+{
+ bufferlist bl;
+ encode(key, bl);
+
+ op->exec("rbd", "metadata_remove", bl);
+}
+
+int metadata_remove(librados::IoCtx *ioctx, const std::string &oid,
+ const std::string &key)
+{
+ librados::ObjectWriteOperation op;
+ metadata_remove(&op, key);
+
+ return ioctx->operate(oid, &op);
+}
+
+int metadata_list(librados::IoCtx *ioctx, const std::string &oid,
+ const std::string &start, uint64_t max_return,
+ map<string, bufferlist> *pairs)
+{
+ librados::ObjectReadOperation op;
+ metadata_list_start(&op, start, max_return);
+
+ bufferlist out_bl;
+ int r = ioctx->operate(oid, &op, &out_bl);
+ if (r < 0) {
+ return r;
+ }
+
+ auto it = out_bl.cbegin();
+ return metadata_list_finish(&it, pairs);
+}
+
+void metadata_list_start(librados::ObjectReadOperation *op,
+ const std::string &start, uint64_t max_return)
+{
+ bufferlist in_bl;
+ encode(start, in_bl);
+ encode(max_return, in_bl);
+ op->exec("rbd", "metadata_list", in_bl);
+}
+
+int metadata_list_finish(bufferlist::const_iterator *it,
+ std::map<std::string, bufferlist> *pairs)
+{
+ ceph_assert(pairs);
+ try {
+ decode(*pairs, *it);
+ } catch (const buffer::error &err) {
+ return -EBADMSG;
+ }
+ return 0;
+}
+
+int metadata_get(librados::IoCtx *ioctx, const std::string &oid,
+ const std::string &key, string *s)
+{
+ ceph_assert(s);
+ bufferlist in, out;
+ encode(key, in);
+ int r = ioctx->exec(oid, "rbd", "metadata_get", in, out);
+ if (r < 0)
+ return r;
+
+ auto iter = out.cbegin();
+ try {
+ decode(*s, iter);
+ } catch (const buffer::error &err) {
+ return -EBADMSG;
+ }
+
+ return 0;
+}
+
+void child_attach(librados::ObjectWriteOperation *op, snapid_t snap_id,
+ const cls::rbd::ChildImageSpec& child_image)
+{
+ bufferlist bl;
+ encode(snap_id, bl);
+ encode(child_image, bl);
+ op->exec("rbd", "child_attach", bl);
+}
+
+int child_attach(librados::IoCtx *ioctx, const std::string &oid,
+ snapid_t snap_id,
+ const cls::rbd::ChildImageSpec& child_image)
+{
+ librados::ObjectWriteOperation op;
+ child_attach(&op, snap_id, child_image);
+
+ int r = ioctx->operate(oid, &op);
+ if (r < 0) {
+ return r;
+ }
+ return 0;
+}
+
+void child_detach(librados::ObjectWriteOperation *op, snapid_t snap_id,
+ const cls::rbd::ChildImageSpec& child_image)
+{
+ bufferlist bl;
+ encode(snap_id, bl);
+ encode(child_image, bl);
+ op->exec("rbd", "child_detach", bl);
+}
+
+int child_detach(librados::IoCtx *ioctx, const std::string &oid,
+ snapid_t snap_id,
+ const cls::rbd::ChildImageSpec& child_image)
+{
+ librados::ObjectWriteOperation op;
+ child_detach(&op, snap_id, child_image);
+
+ int r = ioctx->operate(oid, &op);
+ if (r < 0) {
+ return r;
+ }
+ return 0;
+}
+
+void children_list_start(librados::ObjectReadOperation *op,
+ snapid_t snap_id)
+{
+ bufferlist bl;
+ encode(snap_id, bl);
+ op->exec("rbd", "children_list", bl);
+}
+
+int children_list_finish(bufferlist::const_iterator *it,
+ cls::rbd::ChildImageSpecs *child_images)
+{
+ child_images->clear();
+ try {
+ decode(*child_images, *it);
+ } catch (const buffer::error &err) {
+ return -EBADMSG;
+ }
+ return 0;
+}
+
+int children_list(librados::IoCtx *ioctx, const std::string &oid,
+ snapid_t snap_id,
+ cls::rbd::ChildImageSpecs *child_images)
+{
+ librados::ObjectReadOperation op;
+ children_list_start(&op, snap_id);
+
+ bufferlist out_bl;
+ int r = ioctx->operate(oid, &op, &out_bl);
+ if (r < 0) {
+ return r;
+ }
+
+ auto it = out_bl.cbegin();
+ r = children_list_finish(&it, child_images);
+ if (r < 0) {
+ return r;
+ }
+ return 0;
+}
+
+int migration_set(librados::IoCtx *ioctx, const std::string &oid,
+ const cls::rbd::MigrationSpec &migration_spec) {
+ librados::ObjectWriteOperation op;
+ migration_set(&op, migration_spec);
+ return ioctx->operate(oid, &op);
+}
+
+void migration_set(librados::ObjectWriteOperation *op,
+ const cls::rbd::MigrationSpec &migration_spec) {
+ bufferlist bl;
+ encode(migration_spec, bl);
+ op->exec("rbd", "migration_set", bl);
+}
+
+int migration_set_state(librados::IoCtx *ioctx, const std::string &oid,
+ cls::rbd::MigrationState state,
+ const std::string &description) {
+ librados::ObjectWriteOperation op;
+ migration_set_state(&op, state, description);
+ return ioctx->operate(oid, &op);
+}
+
+void migration_set_state(librados::ObjectWriteOperation *op,
+ cls::rbd::MigrationState state,
+ const std::string &description) {
+ bufferlist bl;
+ encode(state, bl);
+ encode(description, bl);
+ op->exec("rbd", "migration_set_state", bl);
+}
+
+void migration_get_start(librados::ObjectReadOperation *op) {
+ bufferlist bl;
+ op->exec("rbd", "migration_get", bl);
+}
+
+int migration_get_finish(bufferlist::const_iterator *it,
+ cls::rbd::MigrationSpec *migration_spec) {
+ try {
+ decode(*migration_spec, *it);
+ } catch (const buffer::error &err) {
+ return -EBADMSG;
+ }
+ return 0;
+}
+
+int migration_get(librados::IoCtx *ioctx, const std::string &oid,
+ cls::rbd::MigrationSpec *migration_spec) {
+ librados::ObjectReadOperation op;
+ migration_get_start(&op);
+
+ bufferlist out_bl;
+ int r = ioctx->operate(oid, &op, &out_bl);
+ if (r < 0) {
+ return r;
+ }
+
+ auto iter = out_bl.cbegin();
+ r = migration_get_finish(&iter, migration_spec);
+ if (r < 0) {
+ return r;
+ }
+ return 0;
+}
+
+int migration_remove(librados::IoCtx *ioctx, const std::string &oid) {
+ librados::ObjectWriteOperation op;
+ migration_remove(&op);
+ return ioctx->operate(oid, &op);
+}
+
+void migration_remove(librados::ObjectWriteOperation *op) {
+ bufferlist bl;
+ op->exec("rbd", "migration_remove", bl);
+}
+
+int assert_snapc_seq(librados::IoCtx *ioctx, const std::string &oid,
+ uint64_t snapc_seq,
+ cls::rbd::AssertSnapcSeqState state) {
+ librados::ObjectWriteOperation op;
+ assert_snapc_seq(&op, snapc_seq, state);
+ return ioctx->operate(oid, &op);
+}
+
+void assert_snapc_seq(librados::ObjectWriteOperation *op,
+ uint64_t snapc_seq,
+ cls::rbd::AssertSnapcSeqState state) {
+ bufferlist bl;
+ encode(snapc_seq, bl);
+ encode(state, bl);
+ op->exec("rbd", "assert_snapc_seq", bl);
+}
+
+void mirror_uuid_get_start(librados::ObjectReadOperation *op) {
+ bufferlist bl;
+ op->exec("rbd", "mirror_uuid_get", bl);
+}
+
+int mirror_uuid_get_finish(bufferlist::const_iterator *it,
+ std::string *uuid) {
+ try {
+ decode(*uuid, *it);
+ } catch (const buffer::error &err) {
+ return -EBADMSG;
+ }
+ return 0;
+}
+
+int mirror_uuid_get(librados::IoCtx *ioctx, std::string *uuid) {
+ librados::ObjectReadOperation op;
+ mirror_uuid_get_start(&op);
+
+ bufferlist out_bl;
+ int r = ioctx->operate(RBD_MIRRORING, &op, &out_bl);
+ if (r < 0) {
+ return r;
+ }
+
+ auto it = out_bl.cbegin();
+ r = mirror_uuid_get_finish(&it, uuid);
+ if (r < 0) {
+ return r;
+ }
+ return 0;
+}
+
+int mirror_uuid_set(librados::IoCtx *ioctx, const std::string &uuid) {
+ bufferlist in_bl;
+ encode(uuid, in_bl);
+
+ bufferlist out_bl;
+ int r = ioctx->exec(RBD_MIRRORING, "rbd", "mirror_uuid_set", in_bl,
+ out_bl);
+ if (r < 0) {
+ return r;
+ }
+ return 0;
+}
+
+void mirror_mode_get_start(librados::ObjectReadOperation *op) {
+ bufferlist bl;
+ op->exec("rbd", "mirror_mode_get", bl);
+}
+
+int mirror_mode_get_finish(bufferlist::const_iterator *it,
+ cls::rbd::MirrorMode *mirror_mode) {
+ try {
+ uint32_t mirror_mode_decode;
+ decode(mirror_mode_decode, *it);
+ *mirror_mode = static_cast<cls::rbd::MirrorMode>(mirror_mode_decode);
+ } catch (const buffer::error &err) {
+ return -EBADMSG;
+ }
+
+ return 0;
+}
+
+int mirror_mode_get(librados::IoCtx *ioctx,
+ cls::rbd::MirrorMode *mirror_mode) {
+ librados::ObjectReadOperation op;
+ mirror_mode_get_start(&op);
+
+ bufferlist out_bl;
+ int r = ioctx->operate(RBD_MIRRORING, &op, &out_bl);
+ if (r == -ENOENT) {
+ *mirror_mode = cls::rbd::MIRROR_MODE_DISABLED;
+ return 0;
+ } else if (r < 0) {
+ return r;
+ }
+
+ auto it = out_bl.cbegin();
+ r = mirror_mode_get_finish(&it, mirror_mode);
+ if (r < 0) {
+ return r;
+ }
+ return 0;
+}
+
+int mirror_mode_set(librados::IoCtx *ioctx,
+ cls::rbd::MirrorMode mirror_mode) {
+ bufferlist in_bl;
+ encode(static_cast<uint32_t>(mirror_mode), in_bl);
+
+ bufferlist out_bl;
+ int r = ioctx->exec(RBD_MIRRORING, "rbd", "mirror_mode_set", in_bl,
+ out_bl);
+ if (r < 0) {
+ return r;
+ }
+ return 0;
+}
+
+int mirror_peer_list(librados::IoCtx *ioctx,
+ std::vector<cls::rbd::MirrorPeer> *peers) {
+ bufferlist in_bl;
+ bufferlist out_bl;
+ int r = ioctx->exec(RBD_MIRRORING, "rbd", "mirror_peer_list", in_bl,
+ out_bl);
+ if (r < 0) {
+ return r;
+ }
+
+ peers->clear();
+ try {
+ auto bl_it = out_bl.cbegin();
+ decode(*peers, bl_it);
+ } catch (const buffer::error &err) {
+ return -EBADMSG;
+ }
+ return 0;
+}
+
+int mirror_peer_add(librados::IoCtx *ioctx, const std::string &uuid,
+ const std::string &cluster_name,
+ const std::string &client_name) {
+ cls::rbd::MirrorPeer peer(uuid, cluster_name, client_name, -1);
+ bufferlist in_bl;
+ encode(peer, in_bl);
+
+ bufferlist out_bl;
+ int r = ioctx->exec(RBD_MIRRORING, "rbd", "mirror_peer_add", in_bl,
+ out_bl);
+ if (r < 0) {
+ return r;
+ }
+ return 0;
+}
+
+int mirror_peer_remove(librados::IoCtx *ioctx,
+ const std::string &uuid) {
+ bufferlist in_bl;
+ encode(uuid, in_bl);
+
+ bufferlist out_bl;
+ int r = ioctx->exec(RBD_MIRRORING, "rbd", "mirror_peer_remove", in_bl,
+ out_bl);
+ if (r < 0) {
+ return r;
+ }
+ return 0;
+}
+
+int mirror_peer_set_client(librados::IoCtx *ioctx,
+ const std::string &uuid,
+ const std::string &client_name) {
+ bufferlist in_bl;
+ encode(uuid, in_bl);
+ encode(client_name, in_bl);
+
+ bufferlist out_bl;
+ int r = ioctx->exec(RBD_MIRRORING, "rbd", "mirror_peer_set_client",
+ in_bl, out_bl);
+ if (r < 0) {
+ return r;
+ }
+ return 0;
+}
+
+int mirror_peer_set_cluster(librados::IoCtx *ioctx,
+ const std::string &uuid,
+ const std::string &cluster_name) {
+ bufferlist in_bl;
+ encode(uuid, in_bl);
+ encode(cluster_name, in_bl);
+
+ bufferlist out_bl;
+ int r = ioctx->exec(RBD_MIRRORING, "rbd", "mirror_peer_set_cluster",
+ in_bl, out_bl);
+ if (r < 0) {
+ return r;
+ }
+ return 0;
+}
+
+void mirror_image_list_start(librados::ObjectReadOperation *op,
+ const std::string &start, uint64_t max_return)
+{
+ bufferlist in_bl;
+ encode(start, in_bl);
+ encode(max_return, in_bl);
+ op->exec("rbd", "mirror_image_list", in_bl);
+}
+
+int mirror_image_list_finish(bufferlist::const_iterator *it,
+ std::map<string, string> *mirror_image_ids)
+{
+ try {
+ decode(*mirror_image_ids, *it);
+ } catch (const buffer::error &err) {
+ return -EBADMSG;
+ }
+ return 0;
+}
+
+int mirror_image_list(librados::IoCtx *ioctx,
+ const std::string &start, uint64_t max_return,
+ std::map<std::string, std::string> *mirror_image_ids) {
+ librados::ObjectReadOperation op;
+ mirror_image_list_start(&op, start, max_return);
+
+ bufferlist out_bl;
+ int r = ioctx->operate(RBD_MIRRORING, &op, &out_bl);
+ if (r < 0) {
+ return r;
+ }
+
+ auto bl_it = out_bl.cbegin();
+ return mirror_image_list_finish(&bl_it, mirror_image_ids);
+}
+
+void mirror_image_get_image_id_start(librados::ObjectReadOperation *op,
+ const std::string &global_image_id) {
+ bufferlist in_bl;
+ encode(global_image_id, in_bl);
+ op->exec( "rbd", "mirror_image_get_image_id", in_bl);
+}
+
+int mirror_image_get_image_id_finish(bufferlist::const_iterator *it,
+ std::string *image_id) {
+ try {
+ decode(*image_id, *it);
+ } catch (const buffer::error &err) {
+ return -EBADMSG;
+ }
+ return 0;
+}
+
+int mirror_image_get_image_id(librados::IoCtx *ioctx,
+ const std::string &global_image_id,
+ std::string *image_id) {
+ librados::ObjectReadOperation op;
+ mirror_image_get_image_id_start(&op, global_image_id);
+
+ bufferlist out_bl;
+ int r = ioctx->operate(RBD_MIRRORING, &op, &out_bl);
+ if (r < 0) {
+ return r;
+ }
+
+ auto it = out_bl.cbegin();
+ return mirror_image_get_image_id_finish(&it, image_id);
+}
+
+int mirror_image_get(librados::IoCtx *ioctx, const std::string &image_id,
+ cls::rbd::MirrorImage *mirror_image) {
+ librados::ObjectReadOperation op;
+ mirror_image_get_start(&op, image_id);
+
+ bufferlist out_bl;
+ int r = ioctx->operate(RBD_MIRRORING, &op, &out_bl);
+ if (r < 0) {
+ return r;
+ }
+
+ auto iter = out_bl.cbegin();
+ r = mirror_image_get_finish(&iter, mirror_image);
+ if (r < 0) {
+ return r;
+ }
+ return 0;
+}
+
+void mirror_image_get_start(librados::ObjectReadOperation *op,
+ const std::string &image_id) {
+ bufferlist in_bl;
+ encode(image_id, in_bl);
+
+ op->exec("rbd", "mirror_image_get", in_bl);
+}
+
+int mirror_image_get_finish(bufferlist::const_iterator *iter,
+ cls::rbd::MirrorImage *mirror_image) {
+ try {
+ decode(*mirror_image, *iter);
+ } catch (const buffer::error &err) {
+ return -EBADMSG;
+ }
+ return 0;
+}
+
+void mirror_image_set(librados::ObjectWriteOperation *op,
+ const std::string &image_id,
+ const cls::rbd::MirrorImage &mirror_image) {
+ bufferlist bl;
+ encode(image_id, bl);
+ encode(mirror_image, bl);
+
+ op->exec("rbd", "mirror_image_set", bl);
+}
+
+int mirror_image_set(librados::IoCtx *ioctx, const std::string &image_id,
+ const cls::rbd::MirrorImage &mirror_image) {
+ librados::ObjectWriteOperation op;
+ mirror_image_set(&op, image_id, mirror_image);
+
+ int r = ioctx->operate(RBD_MIRRORING, &op);
+ if (r < 0) {
+ return r;
+ }
+ return 0;
+}
+
+void mirror_image_remove(librados::ObjectWriteOperation *op,
+ const std::string &image_id) {
+ bufferlist bl;
+ encode(image_id, bl);
+
+ op->exec("rbd", "mirror_image_remove", bl);
+}
+
+int mirror_image_remove(librados::IoCtx *ioctx, const std::string &image_id) {
+ librados::ObjectWriteOperation op;
+ mirror_image_remove(&op, image_id);
+
+ int r = ioctx->operate(RBD_MIRRORING, &op);
+ if (r < 0) {
+ return r;
+ }
+ return 0;
+}
+
+int mirror_image_status_set(librados::IoCtx *ioctx,
+ const std::string &global_image_id,
+ const cls::rbd::MirrorImageStatus &status) {
+ librados::ObjectWriteOperation op;
+ mirror_image_status_set(&op, global_image_id, status);
+ return ioctx->operate(RBD_MIRRORING, &op);
+}
+
+void mirror_image_status_set(librados::ObjectWriteOperation *op,
+ const std::string &global_image_id,
+ const cls::rbd::MirrorImageStatus &status) {
+ bufferlist bl;
+ encode(global_image_id, bl);
+ encode(status, bl);
+ op->exec("rbd", "mirror_image_status_set", bl);
+}
+
+int mirror_image_status_remove(librados::IoCtx *ioctx,
+ const std::string &global_image_id) {
+ librados::ObjectWriteOperation op;
+ mirror_image_status_remove(&op, global_image_id);
+ return ioctx->operate(RBD_MIRRORING, &op);
+}
+
+void mirror_image_status_remove(librados::ObjectWriteOperation *op,
+ const std::string &global_image_id) {
+ bufferlist bl;
+ encode(global_image_id, bl);
+ op->exec("rbd", "mirror_image_status_remove", bl);
+}
+
+int mirror_image_status_get(librados::IoCtx *ioctx,
+ const std::string &global_image_id,
+ cls::rbd::MirrorImageStatus *status) {
+ librados::ObjectReadOperation op;
+ mirror_image_status_get_start(&op, global_image_id);
+
+ bufferlist out_bl;
+ int r = ioctx->operate(RBD_MIRRORING, &op, &out_bl);
+ if (r < 0) {
+ return r;
+ }
+
+ auto iter = out_bl.cbegin();
+ r = mirror_image_status_get_finish(&iter, status);
+ if (r < 0) {
+ return r;
+ }
+ return 0;
+}
+
+void mirror_image_status_get_start(librados::ObjectReadOperation *op,
+ const std::string &global_image_id) {
+ bufferlist bl;
+ encode(global_image_id, bl);
+ op->exec("rbd", "mirror_image_status_get", bl);
+}
+
+int mirror_image_status_get_finish(bufferlist::const_iterator *iter,
+ cls::rbd::MirrorImageStatus *status) {
+ try {
+ decode(*status, *iter);
+ } catch (const buffer::error &err) {
+ return -EBADMSG;
+ }
+ return 0;
+}
+
+int mirror_image_status_list(librados::IoCtx *ioctx,
+ const std::string &start, uint64_t max_return,
+ std::map<std::string, cls::rbd::MirrorImage> *images,
+ std::map<std::string, cls::rbd::MirrorImageStatus> *statuses) {
+ librados::ObjectReadOperation op;
+ mirror_image_status_list_start(&op, start, max_return);
+
+ bufferlist out_bl;
+ int r = ioctx->operate(RBD_MIRRORING, &op, &out_bl);
+ if (r < 0) {
+ return r;
+ }
+
+ auto iter = out_bl.cbegin();
+ r = mirror_image_status_list_finish(&iter, images, statuses);
+ if (r < 0) {
+ return r;
+ }
+ return 0;
+}
+
+void mirror_image_status_list_start(librados::ObjectReadOperation *op,
+ const std::string &start,
+ uint64_t max_return) {
+ bufferlist bl;
+ encode(start, bl);
+ encode(max_return, bl);
+ op->exec("rbd", "mirror_image_status_list", bl);
+}
+
+int mirror_image_status_list_finish(bufferlist::const_iterator *iter,
+ std::map<std::string, cls::rbd::MirrorImage> *images,
+ std::map<std::string, cls::rbd::MirrorImageStatus> *statuses) {
+ images->clear();
+ statuses->clear();
+ try {
+ decode(*images, *iter);
+ decode(*statuses, *iter);
+ } catch (const buffer::error &err) {
+ return -EBADMSG;
+ }
+ return 0;
+}
+
+int mirror_image_status_get_summary(librados::IoCtx *ioctx,
+ std::map<cls::rbd::MirrorImageStatusState, int> *states) {
+ librados::ObjectReadOperation op;
+ mirror_image_status_get_summary_start(&op);
+
+ bufferlist out_bl;
+ int r = ioctx->operate(RBD_MIRRORING, &op, &out_bl);
+ if (r < 0) {
+ return r;
+ }
+
+ auto iter = out_bl.cbegin();
+ r = mirror_image_status_get_summary_finish(&iter, states);
+ if (r < 0) {
+ return r;
+ }
+ return 0;
+}
+
+void mirror_image_status_get_summary_start(librados::ObjectReadOperation *op) {
+ bufferlist bl;
+ op->exec("rbd", "mirror_image_status_get_summary", bl);
+}
+
+int mirror_image_status_get_summary_finish(bufferlist::const_iterator *iter,
+ std::map<cls::rbd::MirrorImageStatusState, int> *states) {
+ try {
+ decode(*states, *iter);
+ } catch (const buffer::error &err) {
+ return -EBADMSG;
+ }
+ return 0;
+}
+
+int mirror_image_status_remove_down(librados::IoCtx *ioctx) {
+ librados::ObjectWriteOperation op;
+ mirror_image_status_remove_down(&op);
+ return ioctx->operate(RBD_MIRRORING, &op);
+}
+
+void mirror_image_status_remove_down(librados::ObjectWriteOperation *op) {
+ bufferlist bl;
+ op->exec("rbd", "mirror_image_status_remove_down", bl);
+}
+
+int mirror_image_instance_get(librados::IoCtx *ioctx,
+ const std::string &global_image_id,
+ entity_inst_t *instance) {
+ librados::ObjectReadOperation op;
+ mirror_image_instance_get_start(&op, global_image_id);
+
+ bufferlist out_bl;
+ int r = ioctx->operate(RBD_MIRRORING, &op, &out_bl);
+ if (r < 0) {
+ return r;
+ }
+
+ auto iter = out_bl.cbegin();
+ r = mirror_image_instance_get_finish(&iter, instance);
+ if (r < 0) {
+ return r;
+ }
+ return 0;
+}
+
+void mirror_image_instance_get_start(librados::ObjectReadOperation *op,
+ const std::string &global_image_id) {
+ bufferlist bl;
+ encode(global_image_id, bl);
+ op->exec("rbd", "mirror_image_instance_get", bl);
+}
+
+int mirror_image_instance_get_finish(bufferlist::const_iterator *iter,
+ entity_inst_t *instance) {
+ try {
+ decode(*instance, *iter);
+ } catch (const buffer::error &err) {
+ return -EBADMSG;
+ }
+ return 0;
+}
+
+int mirror_image_instance_list(
+ librados::IoCtx *ioctx, const std::string &start, uint64_t max_return,
+ std::map<std::string, entity_inst_t> *instances) {
+ librados::ObjectReadOperation op;
+ mirror_image_instance_list_start(&op, start, max_return);
+
+ bufferlist out_bl;
+ int r = ioctx->operate(RBD_MIRRORING, &op, &out_bl);
+ if (r < 0) {
+ return r;
+ }
+
+ auto iter = out_bl.cbegin();
+ r = mirror_image_instance_list_finish(&iter, instances);
+ if (r < 0) {
+ return r;
+ }
+ return 0;
+}
+
+void mirror_image_instance_list_start(librados::ObjectReadOperation *op,
+ const std::string &start,
+ uint64_t max_return) {
+ bufferlist bl;
+ encode(start, bl);
+ encode(max_return, bl);
+ op->exec("rbd", "mirror_image_instance_list", bl);
+}
+
+int mirror_image_instance_list_finish(
+ bufferlist::const_iterator *iter,
+ std::map<std::string, entity_inst_t> *instances) {
+ instances->clear();
+ try {
+ decode(*instances, *iter);
+ } catch (const buffer::error &err) {
+ return -EBADMSG;
+ }
+ return 0;
+}
+
+void mirror_instances_list_start(librados::ObjectReadOperation *op) {
+ bufferlist bl;
+ op->exec("rbd", "mirror_instances_list", bl);
+}
+
+int mirror_instances_list_finish(bufferlist::const_iterator *iter,
+ std::vector<std::string> *instance_ids) {
+ instance_ids->clear();
+ try {
+ decode(*instance_ids, *iter);
+ } catch (const buffer::error &err) {
+ return -EBADMSG;
+ }
+ return 0;
+}
+
+int mirror_instances_list(librados::IoCtx *ioctx,
+ std::vector<std::string> *instance_ids) {
+ librados::ObjectReadOperation op;
+ mirror_instances_list_start(&op);
+
+ bufferlist out_bl;
+ int r = ioctx->operate(RBD_MIRROR_LEADER, &op, &out_bl);
+ if (r < 0) {
+ return r;
+ }
+
+ auto iter = out_bl.cbegin();
+ r = mirror_instances_list_finish(&iter, instance_ids);
+ if (r < 0) {
+ return r;
+ }
+ return 0;
+}
+
+void mirror_instances_add(librados::ObjectWriteOperation *op,
+ const std::string &instance_id) {
+ bufferlist bl;
+ encode(instance_id, bl);
+ op->exec("rbd", "mirror_instances_add", bl);
+}
+
+int mirror_instances_add(librados::IoCtx *ioctx,
+ const std::string &instance_id) {
+ librados::ObjectWriteOperation op;
+ mirror_instances_add(&op, instance_id);
+ return ioctx->operate(RBD_MIRROR_LEADER, &op);
+}
+
+void mirror_instances_remove(librados::ObjectWriteOperation *op,
+ const std::string &instance_id) {
+ bufferlist bl;
+ encode(instance_id, bl);
+ op->exec("rbd", "mirror_instances_remove", bl);
+}
+
+int mirror_instances_remove(librados::IoCtx *ioctx,
+ const std::string &instance_id) {
+ librados::ObjectWriteOperation op;
+ mirror_instances_remove(&op, instance_id);
+ return ioctx->operate(RBD_MIRROR_LEADER, &op);
+}
+
+void mirror_image_map_list_start(librados::ObjectReadOperation *op,
+ const std::string &start_after,
+ uint64_t max_read) {
+ bufferlist bl;
+ encode(start_after, bl);
+ encode(max_read, bl);
+
+ op->exec("rbd", "mirror_image_map_list", bl);
+}
+
+int mirror_image_map_list_finish(bufferlist::const_iterator *iter,
+ std::map<std::string, cls::rbd::MirrorImageMap> *image_mapping) {
+ try {
+ decode(*image_mapping, *iter);
+ } catch (const buffer::error &err) {
+ return -EBADMSG;
+ }
+ return 0;
+}
+
+int mirror_image_map_list(
+ librados::IoCtx *ioctx, const std::string &start_after,
+ uint64_t max_read,
+ std::map<std::string, cls::rbd::MirrorImageMap> *image_mapping) {
+ librados::ObjectReadOperation op;
+ mirror_image_map_list_start(&op, start_after, max_read);
+
+ bufferlist out_bl;
+ int r = ioctx->operate(RBD_MIRRORING, &op, &out_bl);
+ if (r < 0) {
+ return r;
+ }
+
+ auto iter = out_bl.cbegin();
+ return mirror_image_map_list_finish(&iter, image_mapping);
+}
+
+void mirror_image_map_update(librados::ObjectWriteOperation *op,
+ const std::string &global_image_id,
+ const cls::rbd::MirrorImageMap &image_map) {
+ bufferlist bl;
+ encode(global_image_id, bl);
+ encode(image_map, bl);
+
+ op->exec("rbd", "mirror_image_map_update", bl);
+}
+
+void mirror_image_map_remove(librados::ObjectWriteOperation *op,
+ const std::string &global_image_id) {
+ bufferlist bl;
+ encode(global_image_id, bl);
+
+ op->exec("rbd", "mirror_image_map_remove", bl);
+}
+
+// Groups functions
+int group_dir_list(librados::IoCtx *ioctx, const std::string &oid,
+ const std::string &start, uint64_t max_return,
+ map<string, string> *cgs)
+{
+ bufferlist in, out;
+ encode(start, in);
+ encode(max_return, in);
+ int r = ioctx->exec(oid, "rbd", "group_dir_list", in, out);
+ if (r < 0)
+ return r;
+
+ auto iter = out.cbegin();
+ try {
+ decode(*cgs, iter);
+ } catch (const buffer::error &err) {
+ return -EBADMSG;
+ }
+
+ return 0;
+}
+
+int group_dir_add(librados::IoCtx *ioctx, const std::string &oid,
+ const std::string &name, const std::string &id)
+{
+ bufferlist in, out;
+ encode(name, in);
+ encode(id, in);
+ return ioctx->exec(oid, "rbd", "group_dir_add", in, out);
+}
+
+int group_dir_rename(librados::IoCtx *ioctx, const std::string &oid,
+ const std::string &src, const std::string &dest,
+ const std::string &id)
+{
+ bufferlist in, out;
+ encode(src, in);
+ encode(dest, in);
+ encode(id, in);
+ return ioctx->exec(oid, "rbd", "group_dir_rename", in, out);
+}
+
+int group_dir_remove(librados::IoCtx *ioctx, const std::string &oid,
+ const std::string &name, const std::string &id)
+{
+ bufferlist in, out;
+ encode(name, in);
+ encode(id, in);
+ return ioctx->exec(oid, "rbd", "group_dir_remove", in, out);
+}
+
+int group_image_remove(librados::IoCtx *ioctx, const std::string &oid,
+ const cls::rbd::GroupImageSpec &spec)
+{
+ bufferlist bl, bl2;
+ encode(spec, bl);
+
+ return ioctx->exec(oid, "rbd", "group_image_remove", bl, bl2);
+}
+
+int group_image_list(librados::IoCtx *ioctx,
+ const std::string &oid,
+ const cls::rbd::GroupImageSpec &start,
+ uint64_t max_return,
+ std::vector<cls::rbd::GroupImageStatus> *images)
+{
+ bufferlist bl, bl2;
+ encode(start, bl);
+ encode(max_return, bl);
+
+ int r = ioctx->exec(oid, "rbd", "group_image_list", bl, bl2);
+ if (r < 0)
+ return r;
+
+ auto iter = bl2.cbegin();
+ try {
+ decode(*images, iter);
+ } catch (const buffer::error &err) {
+ return -EBADMSG;
+ }
+
+ return 0;
+}
+
+int group_image_set(librados::IoCtx *ioctx, const std::string &oid,
+ const cls::rbd::GroupImageStatus &st)
+{
+ bufferlist bl, bl2;
+ encode(st, bl);
+
+ return ioctx->exec(oid, "rbd", "group_image_set", bl, bl2);
+}
+
+int image_group_add(librados::IoCtx *ioctx, const std::string &oid,
+ const cls::rbd::GroupSpec &group_spec)
+{
+ bufferlist bl, bl2;
+ encode(group_spec, bl);
+
+ return ioctx->exec(oid, "rbd", "image_group_add", bl, bl2);
+}
+
+int image_group_remove(librados::IoCtx *ioctx, const std::string &oid,
+ const cls::rbd::GroupSpec &group_spec)
+{
+ bufferlist bl, bl2;
+ encode(group_spec, bl);
+
+ return ioctx->exec(oid, "rbd", "image_group_remove", bl, bl2);
+}
+
+void image_group_get_start(librados::ObjectReadOperation *op)
+{
+ bufferlist in_bl;
+ op->exec("rbd", "image_group_get", in_bl);
+}
+
+int image_group_get_finish(bufferlist::const_iterator *iter,
+ cls::rbd::GroupSpec *group_spec)
+{
+ try {
+ decode(*group_spec, *iter);
+ } catch (const buffer::error &err) {
+ return -EBADMSG;
+ }
+ return 0;
+}
+
+int image_group_get(librados::IoCtx *ioctx, const std::string &oid,
+ cls::rbd::GroupSpec *group_spec)
+{
+ librados::ObjectReadOperation op;
+ image_group_get_start(&op);
+
+ bufferlist out_bl;
+ int r = ioctx->operate(oid, &op, &out_bl);
+ if (r < 0) {
+ return r;
+ }
+
+ auto iter = out_bl.cbegin();
+ return image_group_get_finish(&iter, group_spec);
+}
+
+int group_snap_set(librados::IoCtx *ioctx, const std::string &oid,
+ const cls::rbd::GroupSnapshot &snapshot)
+{
+ using ceph::encode;
+ bufferlist inbl, outbl;
+ encode(snapshot, inbl);
+ int r = ioctx->exec(oid, "rbd", "group_snap_set", inbl, outbl);
+ return r;
+}
+
+int group_snap_remove(librados::IoCtx *ioctx, const std::string &oid,
+ const std::string &snap_id)
+{
+ using ceph::encode;
+ bufferlist inbl, outbl;
+ encode(snap_id, inbl);
+ return ioctx->exec(oid, "rbd", "group_snap_remove", inbl, outbl);
+}
+
+int group_snap_get_by_id(librados::IoCtx *ioctx, const std::string &oid,
+ const std::string &snap_id,
+ cls::rbd::GroupSnapshot *snapshot)
+{
+ using ceph::encode;
+ using ceph::decode;
+ bufferlist inbl, outbl;
+
+ encode(snap_id, inbl);
+ int r = ioctx->exec(oid, "rbd", "group_snap_get_by_id", inbl, outbl);
+ if (r < 0) {
+ return r;
+ }
+
+ auto iter = outbl.cbegin();
+ try {
+ decode(*snapshot, iter);
+ } catch (const buffer::error &err) {
+ return -EBADMSG;
+ }
+
+ return 0;
+}
+int group_snap_list(librados::IoCtx *ioctx, const std::string &oid,
+ const cls::rbd::GroupSnapshot &start,
+ uint64_t max_return,
+ std::vector<cls::rbd::GroupSnapshot> *snapshots)
+{
+ using ceph::encode;
+ using ceph::decode;
+ bufferlist inbl, outbl;
+ encode(start, inbl);
+ encode(max_return, inbl);
+
+ int r = ioctx->exec(oid, "rbd", "group_snap_list", inbl, outbl);
+ if (r < 0) {
+ return r;
+ }
+ auto iter = outbl.cbegin();
+ try {
+ decode(*snapshots, iter);
+ } catch (const buffer::error &err) {
+ return -EBADMSG;
+ }
+
+ return 0;
+}
+
+// rbd_trash functions
+void trash_add(librados::ObjectWriteOperation *op,
+ const std::string &id,
+ const cls::rbd::TrashImageSpec &trash_spec)
+{
+ bufferlist bl;
+ encode(id, bl);
+ encode(trash_spec, bl);
+ op->exec("rbd", "trash_add", bl);
+}
+
+int trash_add(librados::IoCtx *ioctx, const std::string &id,
+ const cls::rbd::TrashImageSpec &trash_spec)
+{
+ librados::ObjectWriteOperation op;
+ trash_add(&op, id, trash_spec);
+
+ return ioctx->operate(RBD_TRASH, &op);
+}
+
+void trash_remove(librados::ObjectWriteOperation *op,
+ const std::string &id)
+{
+ bufferlist bl;
+ encode(id, bl);
+ op->exec("rbd", "trash_remove", bl);
+}
+
+int trash_remove(librados::IoCtx *ioctx, const std::string &id)
+{
+ librados::ObjectWriteOperation op;
+ trash_remove(&op, id);
+
+ return ioctx->operate(RBD_TRASH, &op);
+}
+
+void trash_list_start(librados::ObjectReadOperation *op,
+ const std::string &start, uint64_t max_return)
+{
+ bufferlist bl;
+ encode(start, bl);
+ encode(max_return, bl);
+ op->exec("rbd", "trash_list", bl);
+}
+
+int trash_list_finish(bufferlist::const_iterator *it,
+ map<string, cls::rbd::TrashImageSpec> *entries)
+{
+ ceph_assert(entries);
+
+ try {
+ decode(*entries, *it);
+ } catch (const buffer::error &err) {
+ return -EBADMSG;
+ }
+
+ return 0;
+}
+
+int trash_list(librados::IoCtx *ioctx,
+ const std::string &start, uint64_t max_return,
+ map<string, cls::rbd::TrashImageSpec> *entries)
+{
+ librados::ObjectReadOperation op;
+ trash_list_start(&op, start, max_return);
+
+ bufferlist out_bl;
+ int r = ioctx->operate(RBD_TRASH, &op, &out_bl);
+ if (r < 0) {
+ return r;
+ }
+
+ auto iter = out_bl.cbegin();
+ return trash_list_finish(&iter, entries);
+}
+
+void trash_get_start(librados::ObjectReadOperation *op,
+ const std::string &id)
+{
+ bufferlist bl;
+ encode(id, bl);
+ op->exec("rbd", "trash_get", bl);
+}
+
+int trash_get_finish(bufferlist::const_iterator *it,
+ cls::rbd::TrashImageSpec *trash_spec) {
+ ceph_assert(trash_spec);
+ try {
+ decode(*trash_spec, *it);
+ } catch (const buffer::error &err) {
+ return -EBADMSG;
+ }
+
+ return 0;
+}
+
+int trash_get(librados::IoCtx *ioctx, const std::string &id,
+ cls::rbd::TrashImageSpec *trash_spec)
+{
+ librados::ObjectReadOperation op;
+ trash_get_start(&op, id);
+
+ bufferlist out_bl;
+ int r = ioctx->operate(RBD_TRASH, &op, &out_bl);
+ if (r < 0) {
+ return r;
+ }
+
+ auto it = out_bl.cbegin();
+ return trash_get_finish(&it, trash_spec);
+}
+
+void trash_state_set(librados::ObjectWriteOperation *op,
+ const std::string &id,
+ const cls::rbd::TrashImageState &trash_state,
+ const cls::rbd::TrashImageState &expect_state)
+{
+ bufferlist bl;
+ encode(id, bl);
+ encode(trash_state, bl);
+ encode(expect_state, bl);
+ op->exec("rbd", "trash_state_set", bl);
+}
+
+int trash_state_set(librados::IoCtx *ioctx, const std::string &id,
+ const cls::rbd::TrashImageState &trash_state,
+ const cls::rbd::TrashImageState &expect_state)
+{
+ librados::ObjectWriteOperation op;
+ trash_state_set(&op, id, trash_state, expect_state);
+
+ return ioctx->operate(RBD_TRASH, &op);
+}
+
+void namespace_add(librados::ObjectWriteOperation *op,
+ const std::string &name)
+{
+ bufferlist bl;
+ encode(name, bl);
+ op->exec("rbd", "namespace_add", bl);
+}
+
+int namespace_add(librados::IoCtx *ioctx, const std::string &name)
+{
+ librados::ObjectWriteOperation op;
+ namespace_add(&op, name);
+
+ return ioctx->operate(RBD_NAMESPACE, &op);
+}
+
+void namespace_remove(librados::ObjectWriteOperation *op,
+ const std::string &name)
+{
+ bufferlist bl;
+ encode(name, bl);
+ op->exec("rbd", "namespace_remove", bl);
+}
+
+int namespace_remove(librados::IoCtx *ioctx, const std::string &name)
+{
+ librados::ObjectWriteOperation op;
+ namespace_remove(&op, name);
+
+ return ioctx->operate(RBD_NAMESPACE, &op);
+}
+
+void namespace_list_start(librados::ObjectReadOperation *op,
+ const std::string &start, uint64_t max_return)
+{
+ bufferlist bl;
+ encode(start, bl);
+ encode(max_return, bl);
+ op->exec("rbd", "namespace_list", bl);
+}
+
+int namespace_list_finish(bufferlist::const_iterator *it,
+ std::list<std::string> *entries)
+{
+ ceph_assert(entries);
+
+ try {
+ decode(*entries, *it);
+ } catch (const buffer::error &err) {
+ return -EBADMSG;
+ }
+
+ return 0;
+}
+
+int namespace_list(librados::IoCtx *ioctx,
+ const std::string &start, uint64_t max_return,
+ std::list<std::string> *entries)
+{
+ librados::ObjectReadOperation op;
+ namespace_list_start(&op, start, max_return);
+
+ bufferlist out_bl;
+ int r = ioctx->operate(RBD_NAMESPACE, &op, &out_bl);
+ if (r < 0) {
+ return r;
+ }
+
+ auto iter = out_bl.cbegin();
+ return namespace_list_finish(&iter, entries);
+}
+
+void sparsify(librados::ObjectWriteOperation *op, size_t sparse_size,
+ bool remove_empty)
+{
+ bufferlist bl;
+ encode(sparse_size, bl);
+ encode(remove_empty, bl);
+ op->exec("rbd", "sparsify", bl);
+}
+
+int sparsify(librados::IoCtx *ioctx, const std::string &oid, size_t sparse_size,
+ bool remove_empty)
+{
+ librados::ObjectWriteOperation op;
+ sparsify(&op, sparse_size, remove_empty);
+
+ return ioctx->operate(oid, &op);
+}
+
+} // namespace cls_client
+} // namespace librbd
diff --git a/src/cls/rbd/cls_rbd_client.h b/src/cls/rbd/cls_rbd_client.h
new file mode 100644
index 00000000..27a64cc2
--- /dev/null
+++ b/src/cls/rbd/cls_rbd_client.h
@@ -0,0 +1,610 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#ifndef CEPH_LIBRBD_CLS_RBD_CLIENT_H
+#define CEPH_LIBRBD_CLS_RBD_CLIENT_H
+
+#include "cls/lock/cls_lock_types.h"
+#include "cls/rbd/cls_rbd_types.h"
+#include "common/snap_types.h"
+#include "include/types.h"
+#include "include/rados/librados_fwd.hpp"
+
+class Context;
+namespace ceph { template <uint8_t> class BitVector; }
+
+namespace librbd {
+namespace cls_client {
+
+// low-level interface (mainly for testing)
+void create_image(librados::ObjectWriteOperation *op, uint64_t size,
+ uint8_t order, uint64_t features,
+ const std::string &object_prefix, int64_t data_pool_id);
+int create_image(librados::IoCtx *ioctx, const std::string &oid,
+ uint64_t size, uint8_t order, uint64_t features,
+ const std::string &object_prefix, int64_t data_pool_id);
+
+void get_features_start(librados::ObjectReadOperation *op, bool read_only);
+int get_features_finish(bufferlist::const_iterator *it, uint64_t *features,
+ uint64_t *incompatible_features);
+int get_features(librados::IoCtx *ioctx, const std::string &oid,
+ bool read_only, uint64_t *features,
+ uint64_t *incompatible_features);
+void set_features(librados::ObjectWriteOperation *op, uint64_t features,
+ uint64_t mask);
+int set_features(librados::IoCtx *ioctx, const std::string &oid,
+ uint64_t features, uint64_t mask);
+
+void get_object_prefix_start(librados::ObjectReadOperation *op);
+int get_object_prefix_finish(bufferlist::const_iterator *it,
+ std::string *object_prefix);
+int get_object_prefix(librados::IoCtx *ioctx, const std::string &oid,
+ std::string *object_prefix);
+
+void get_data_pool_start(librados::ObjectReadOperation *op);
+int get_data_pool_finish(bufferlist::const_iterator *it, int64_t *data_pool_id);
+int get_data_pool(librados::IoCtx *ioctx, const std::string &oid,
+ int64_t *data_pool_id);
+
+void get_size_start(librados::ObjectReadOperation *op, snapid_t snap_id);
+int get_size_finish(bufferlist::const_iterator *it, uint64_t *size,
+ uint8_t *order);
+int get_size(librados::IoCtx *ioctx, const std::string &oid,
+ snapid_t snap_id, uint64_t *size, uint8_t *order);
+int set_size(librados::IoCtx *ioctx, const std::string &oid,
+ uint64_t size);
+void set_size(librados::ObjectWriteOperation *op, uint64_t size);
+
+void get_flags_start(librados::ObjectReadOperation *op, snapid_t snap_id);
+int get_flags_finish(bufferlist::const_iterator *it, uint64_t *flags);
+int get_flags(librados::IoCtx *ioctx, const std::string &oid,
+ snapid_t snap_id, uint64_t *flags);
+
+void set_flags(librados::ObjectWriteOperation *op, snapid_t snap_id,
+ uint64_t flags, uint64_t mask);
+
+void op_features_get_start(librados::ObjectReadOperation *op);
+int op_features_get_finish(bufferlist::const_iterator *it,
+ uint64_t *op_features);
+int op_features_get(librados::IoCtx *ioctx, const std::string &oid,
+ uint64_t *op_features);
+void op_features_set(librados::ObjectWriteOperation *op,
+ uint64_t op_features, uint64_t mask);
+int op_features_set(librados::IoCtx *ioctx, const std::string &oid,
+ uint64_t op_features, uint64_t mask);
+
+// NOTE: deprecate v1 parent APIs after mimic EOLed
+void get_parent_start(librados::ObjectReadOperation *op, snapid_t snap_id);
+int get_parent_finish(bufferlist::const_iterator *it,
+ cls::rbd::ParentImageSpec *pspec,
+ uint64_t *parent_overlap);
+int get_parent(librados::IoCtx *ioctx, const std::string &oid,
+ snapid_t snap_id, cls::rbd::ParentImageSpec *pspec,
+ uint64_t *parent_overlap);
+int set_parent(librados::IoCtx *ioctx, const std::string &oid,
+ const cls::rbd::ParentImageSpec &pspec, uint64_t parent_overlap);
+void set_parent(librados::ObjectWriteOperation *op,
+ const cls::rbd::ParentImageSpec &pspec,
+ uint64_t parent_overlap);
+int remove_parent(librados::IoCtx *ioctx, const std::string &oid);
+void remove_parent(librados::ObjectWriteOperation *op);
+
+// v2 parent APIs
+void parent_get_start(librados::ObjectReadOperation* op);
+int parent_get_finish(bufferlist::const_iterator* it,
+ cls::rbd::ParentImageSpec* parent_image_spec);
+int parent_get(librados::IoCtx* ioctx, const std::string &oid,
+ cls::rbd::ParentImageSpec* parent_image_spec);
+
+void parent_overlap_get_start(librados::ObjectReadOperation* op,
+ snapid_t snap_id);
+int parent_overlap_get_finish(bufferlist::const_iterator* it,
+ std::optional<uint64_t>* parent_overlap);
+int parent_overlap_get(librados::IoCtx* ioctx, const std::string &oid,
+ snapid_t snap_id,
+ std::optional<uint64_t>* parent_overlap);
+
+void parent_attach(librados::ObjectWriteOperation* op,
+ const cls::rbd::ParentImageSpec& parent_image_spec,
+ uint64_t parent_overlap, bool reattach);
+int parent_attach(librados::IoCtx *ioctx, const std::string &oid,
+ const cls::rbd::ParentImageSpec& parent_image_spec,
+ uint64_t parent_overlap, bool reattach);
+
+void parent_detach(librados::ObjectWriteOperation* op);
+int parent_detach(librados::IoCtx *ioctx, const std::string &oid);
+
+int add_child(librados::IoCtx *ioctx, const std::string &oid,
+ const cls::rbd::ParentImageSpec &pspec,
+ const std::string &c_imageid);
+void add_child(librados::ObjectWriteOperation *op,
+ const cls::rbd::ParentImageSpec& pspec,
+ const std::string &c_imageid);
+void remove_child(librados::ObjectWriteOperation *op,
+ const cls::rbd::ParentImageSpec &pspec,
+ const std::string &c_imageid);
+int remove_child(librados::IoCtx *ioctx, const std::string &oid,
+ const cls::rbd::ParentImageSpec &pspec,
+ const std::string &c_imageid);
+void get_children_start(librados::ObjectReadOperation *op,
+ const cls::rbd::ParentImageSpec &pspec);
+int get_children_finish(bufferlist::const_iterator *it,
+ std::set<string> *children);
+int get_children(librados::IoCtx *ioctx, const std::string &oid,
+ const cls::rbd::ParentImageSpec& pspec, set<string>& children);
+
+void snapshot_get_start(librados::ObjectReadOperation* op,
+ snapid_t snap_id);
+int snapshot_get_finish(bufferlist::const_iterator* it,
+ cls::rbd::SnapshotInfo* snap_info);
+int snapshot_get(librados::IoCtx *ioctx, const std::string &oid,
+ snapid_t snap_id, cls::rbd::SnapshotInfo* snap_info);
+
+void snapshot_add(librados::ObjectWriteOperation *op, snapid_t snap_id,
+ const std::string &snap_name,
+ const cls::rbd::SnapshotNamespace &snap_namespace);
+void snapshot_remove(librados::ObjectWriteOperation *op, snapid_t snap_id);
+void snapshot_rename(librados::ObjectWriteOperation *op,
+ snapid_t src_snap_id,
+ const std::string &dst_name);
+void snapshot_trash_add(librados::ObjectWriteOperation *op,
+ snapid_t snap_id);
+
+void get_snapcontext_start(librados::ObjectReadOperation *op);
+int get_snapcontext_finish(bufferlist::const_iterator *it,
+ ::SnapContext *snapc);
+int get_snapcontext(librados::IoCtx *ioctx, const std::string &oid,
+ ::SnapContext *snapc);
+
+/// NOTE: remove after Luminous is retired
+void get_snapshot_name_start(librados::ObjectReadOperation *op,
+ snapid_t snap_id);
+int get_snapshot_name_finish(bufferlist::const_iterator *it,
+ std::string *name);
+int get_snapshot_name(librados::IoCtx *ioctx, const std::string &oid,
+ snapid_t snap_id, std::string *name);
+
+/// NOTE: remove after Luminous is retired
+void get_snapshot_timestamp_start(librados::ObjectReadOperation *op,
+ snapid_t snap_id);
+int get_snapshot_timestamp_finish(bufferlist::const_iterator *it,
+ utime_t *timestamp);
+int get_snapshot_timestamp(librados::IoCtx *ioctx, const std::string &oid,
+ snapid_t snap_id, utime_t *timestamp);
+
+void get_all_features_start(librados::ObjectReadOperation *op);
+int get_all_features_finish(bufferlist::const_iterator *it,
+ uint64_t *all_features);
+int get_all_features(librados::IoCtx *ioctx, const std::string &oid,
+ uint64_t *all_features);
+
+/// NOTE: remove protection after clone v1 is retired
+void get_protection_status_start(librados::ObjectReadOperation *op,
+ snapid_t snap_id);
+int get_protection_status_finish(bufferlist::const_iterator *it,
+ uint8_t *protection_status);
+int get_protection_status(librados::IoCtx *ioctx, const std::string &oid,
+ snapid_t snap_id, uint8_t *protection_status);
+
+int set_protection_status(librados::IoCtx *ioctx, const std::string &oid,
+ snapid_t snap_id, uint8_t protection_status);
+void set_protection_status(librados::ObjectWriteOperation *op,
+ snapid_t snap_id, uint8_t protection_status);
+
+int snapshot_get_limit(librados::IoCtx *ioctx, const std::string &oid,
+ uint64_t *limit);
+void snapshot_set_limit(librados::ObjectWriteOperation *op,
+ uint64_t limit);
+
+void get_stripe_unit_count_start(librados::ObjectReadOperation *op);
+int get_stripe_unit_count_finish(bufferlist::const_iterator *it,
+ uint64_t *stripe_unit,
+ uint64_t *stripe_count);
+int get_stripe_unit_count(librados::IoCtx *ioctx, const std::string &oid,
+ uint64_t *stripe_unit, uint64_t *stripe_count);
+
+void set_stripe_unit_count(librados::ObjectWriteOperation *op,
+ uint64_t stripe_unit, uint64_t stripe_count);
+int set_stripe_unit_count(librados::IoCtx *ioctx, const std::string &oid,
+ uint64_t stripe_unit, uint64_t stripe_count);
+
+void get_create_timestamp_start(librados::ObjectReadOperation *op);
+int get_create_timestamp_finish(bufferlist::const_iterator *it,
+ utime_t *timestamp);
+int get_create_timestamp(librados::IoCtx *ioctx, const std::string &oid,
+ utime_t *timestamp);
+
+void get_access_timestamp_start(librados::ObjectReadOperation *op);
+int get_access_timestamp_finish(bufferlist::const_iterator *it,
+ utime_t *timestamp);
+int get_access_timestamp(librados::IoCtx *ioctx, const std::string &oid,
+ utime_t *timestamp);
+
+void set_access_timestamp(librados::ObjectWriteOperation *op);
+int set_access_timestamp(librados::IoCtx *ioctx, const std::string &oid);
+
+void get_modify_timestamp_start(librados::ObjectReadOperation *op);
+int get_modify_timestamp_finish(bufferlist::const_iterator *it,
+ utime_t *timestamp);
+int get_modify_timestamp(librados::IoCtx *ioctx, const std::string &oid,
+ utime_t *timestamp);
+
+void set_modify_timestamp(librados::ObjectWriteOperation *op);
+int set_modify_timestamp(librados::IoCtx *ioctx, const std::string &oid);
+
+int metadata_list(librados::IoCtx *ioctx, const std::string &oid,
+ const std::string &start, uint64_t max_return,
+ map<string, bufferlist> *pairs);
+void metadata_list_start(librados::ObjectReadOperation *op,
+ const std::string &start, uint64_t max_return);
+int metadata_list_finish(bufferlist::const_iterator *it,
+ std::map<std::string, bufferlist> *pairs);
+void metadata_set(librados::ObjectWriteOperation *op,
+ const map<std::string, bufferlist> &data);
+int metadata_set(librados::IoCtx *ioctx, const std::string &oid,
+ const map<std::string, bufferlist> &data);
+void metadata_remove(librados::ObjectWriteOperation *op,
+ const std::string &key);
+int metadata_remove(librados::IoCtx *ioctx, const std::string &oid,
+ const std::string &key);
+int metadata_get(librados::IoCtx *ioctx, const std::string &oid,
+ const std::string &key, string *v);
+
+void child_attach(librados::ObjectWriteOperation *op, snapid_t snap_id,
+ const cls::rbd::ChildImageSpec& child_image);
+int child_attach(librados::IoCtx *ioctx, const std::string &oid,
+ snapid_t snap_id,
+ const cls::rbd::ChildImageSpec& child_image);
+void child_detach(librados::ObjectWriteOperation *op, snapid_t snap_id,
+ const cls::rbd::ChildImageSpec& child_image);
+int child_detach(librados::IoCtx *ioctx, const std::string &oid,
+ snapid_t snap_id,
+ const cls::rbd::ChildImageSpec& child_image);
+void children_list_start(librados::ObjectReadOperation *op,
+ snapid_t snap_id);
+int children_list_finish(bufferlist::const_iterator *it,
+ cls::rbd::ChildImageSpecs *child_images);
+int children_list(librados::IoCtx *ioctx, const std::string &oid,
+ snapid_t snap_id,
+ cls::rbd::ChildImageSpecs *child_images);
+int migration_set(librados::IoCtx *ioctx, const std::string &oid,
+ const cls::rbd::MigrationSpec &migration_spec);
+void migration_set(librados::ObjectWriteOperation *op,
+ const cls::rbd::MigrationSpec &migration_spec);
+int migration_set_state(librados::IoCtx *ioctx, const std::string &oid,
+ cls::rbd::MigrationState state,
+ const std::string &description);
+void migration_set_state(librados::ObjectWriteOperation *op,
+ cls::rbd::MigrationState state,
+ const std::string &description);
+void migration_get_start(librados::ObjectReadOperation *op);
+int migration_get_finish(bufferlist::const_iterator *it,
+ cls::rbd::MigrationSpec *migration_spec);
+int migration_get(librados::IoCtx *ioctx, const std::string &oid,
+ cls::rbd::MigrationSpec *migration_spec);
+int migration_remove(librados::IoCtx *ioctx, const std::string &oid);
+void migration_remove(librados::ObjectWriteOperation *op);
+
+// operations on rbd_id objects
+void get_id_start(librados::ObjectReadOperation *op);
+int get_id_finish(bufferlist::const_iterator *it, std::string *id);
+int get_id(librados::IoCtx *ioctx, const std::string &oid, std::string *id);
+
+void set_id(librados::ObjectWriteOperation *op, const std::string &id);
+int set_id(librados::IoCtx *ioctx, const std::string &oid, const std::string &id);
+
+// operations on rbd_directory objects
+int dir_get_id(librados::IoCtx *ioctx, const std::string &oid,
+ const std::string &name, std::string *id);
+void dir_get_id_start(librados::ObjectReadOperation *op,
+ const std::string &image_name);
+int dir_get_id_finish(bufferlist::const_iterator *iter, std::string *image_id);
+void dir_get_name_start(librados::ObjectReadOperation *op,
+ const std::string &id);
+int dir_get_name_finish(bufferlist::const_iterator *it, std::string *name);
+int dir_get_name(librados::IoCtx *ioctx, const std::string &oid,
+ const std::string &id, std::string *name);
+void dir_list_start(librados::ObjectReadOperation *op,
+ const std::string &start, uint64_t max_return);
+int dir_list_finish(bufferlist::const_iterator *it, map<string, string> *images);
+int dir_list(librados::IoCtx *ioctx, const std::string &oid,
+ const std::string &start, uint64_t max_return,
+ map<string, string> *images);
+void dir_add_image(librados::ObjectWriteOperation *op,
+ const std::string &name, const std::string &id);
+int dir_add_image(librados::IoCtx *ioctx, const std::string &oid,
+ const std::string &name, const std::string &id);
+int dir_remove_image(librados::IoCtx *ioctx, const std::string &oid,
+ const std::string &name, const std::string &id);
+void dir_remove_image(librados::ObjectWriteOperation *op,
+ const std::string &name, const std::string &id);
+// atomic remove and add
+void dir_rename_image(librados::ObjectWriteOperation *op,
+ const std::string &src, const std::string &dest,
+ const std::string &id);
+void dir_state_assert(librados::ObjectOperation *op,
+ cls::rbd::DirectoryState directory_state);
+int dir_state_assert(librados::IoCtx *ioctx, const std::string &oid,
+ cls::rbd::DirectoryState directory_state);
+void dir_state_set(librados::ObjectWriteOperation *op,
+ cls::rbd::DirectoryState directory_state);
+int dir_state_set(librados::IoCtx *ioctx, const std::string &oid,
+ cls::rbd::DirectoryState directory_state);
+
+// operations on the rbd_object_map.$image_id object
+void object_map_load_start(librados::ObjectReadOperation *op);
+int object_map_load_finish(bufferlist::const_iterator *it,
+ ceph::BitVector<2> *object_map);
+int object_map_load(librados::IoCtx *ioctx, const std::string &oid,
+ ceph::BitVector<2> *object_map);
+void object_map_save(librados::ObjectWriteOperation *rados_op,
+ const ceph::BitVector<2> &object_map);
+void object_map_resize(librados::ObjectWriteOperation *rados_op,
+ uint64_t object_count, uint8_t default_state);
+void object_map_update(librados::ObjectWriteOperation *rados_op,
+ uint64_t start_object_no, uint64_t end_object_no,
+ uint8_t new_object_state,
+ const boost::optional<uint8_t> &current_object_state);
+void object_map_snap_add(librados::ObjectWriteOperation *rados_op);
+void object_map_snap_remove(librados::ObjectWriteOperation *rados_op,
+ const ceph::BitVector<2> &object_map);
+
+// class operations on the old format, kept for
+// backwards compatibility
+void old_snapshot_add(librados::ObjectWriteOperation *rados_op,
+ snapid_t snap_id, const std::string &snap_name);
+void old_snapshot_remove(librados::ObjectWriteOperation *rados_op,
+ const std::string &snap_name);
+void old_snapshot_rename(librados::ObjectWriteOperation *rados_op,
+ snapid_t src_snap_id, const std::string &dst_name);
+
+void old_snapshot_list_start(librados::ObjectReadOperation *op);
+int old_snapshot_list_finish(bufferlist::const_iterator *it,
+ std::vector<string> *names,
+ std::vector<uint64_t> *sizes,
+ ::SnapContext *snapc);
+int old_snapshot_list(librados::IoCtx *ioctx, const std::string &oid,
+ std::vector<string> *names,
+ std::vector<uint64_t> *sizes,
+ ::SnapContext *snapc);
+
+// operations on the rbd_mirroring object
+void mirror_uuid_get_start(librados::ObjectReadOperation *op);
+int mirror_uuid_get_finish(bufferlist::const_iterator *it,
+ std::string *uuid);
+int mirror_uuid_get(librados::IoCtx *ioctx, std::string *uuid);
+int mirror_uuid_set(librados::IoCtx *ioctx, const std::string &uuid);
+void mirror_mode_get_start(librados::ObjectReadOperation *op);
+int mirror_mode_get_finish(bufferlist::const_iterator *it,
+ cls::rbd::MirrorMode *mirror_mode);
+int mirror_mode_get(librados::IoCtx *ioctx,
+ cls::rbd::MirrorMode *mirror_mode);
+int mirror_mode_set(librados::IoCtx *ioctx,
+ cls::rbd::MirrorMode mirror_mode);
+int mirror_peer_list(librados::IoCtx *ioctx,
+ std::vector<cls::rbd::MirrorPeer> *peers);
+int mirror_peer_add(librados::IoCtx *ioctx, const std::string &uuid,
+ const std::string &cluster_name,
+ const std::string &client_name);
+int mirror_peer_remove(librados::IoCtx *ioctx,
+ const std::string &uuid);
+int mirror_peer_set_client(librados::IoCtx *ioctx,
+ const std::string &uuid,
+ const std::string &client_name);
+int mirror_peer_set_cluster(librados::IoCtx *ioctx,
+ const std::string &uuid,
+ const std::string &cluster_name);
+void mirror_image_list_start(librados::ObjectReadOperation *op,
+ const std::string &start, uint64_t max_return);
+int mirror_image_list_finish(bufferlist::const_iterator *it,
+ std::map<string, string> *mirror_image_ids);
+int mirror_image_list(librados::IoCtx *ioctx,
+ const std::string &start, uint64_t max_return,
+ std::map<std::string, std::string> *mirror_image_ids);
+void mirror_image_get_image_id_start(librados::ObjectReadOperation *op,
+ const std::string &global_image_id);
+int mirror_image_get_image_id_finish(bufferlist::const_iterator *it,
+ std::string *image_id);
+int mirror_image_get_image_id(librados::IoCtx *ioctx,
+ const std::string &global_image_id,
+ std::string *image_id);
+int mirror_image_get(librados::IoCtx *ioctx, const std::string &image_id,
+ cls::rbd::MirrorImage *mirror_image);
+void mirror_image_get_start(librados::ObjectReadOperation *op,
+ const std::string &image_id);
+int mirror_image_get_finish(bufferlist::const_iterator *iter,
+ cls::rbd::MirrorImage *mirror_image);
+void mirror_image_set(librados::ObjectWriteOperation *op,
+ const std::string &image_id,
+ const cls::rbd::MirrorImage &mirror_image);
+int mirror_image_set(librados::IoCtx *ioctx, const std::string &image_id,
+ const cls::rbd::MirrorImage &mirror_image);
+void mirror_image_remove(librados::ObjectWriteOperation *op,
+ const std::string &image_id);
+int mirror_image_remove(librados::IoCtx *ioctx,
+ const std::string &image_id);
+int mirror_image_status_set(librados::IoCtx *ioctx,
+ const std::string &global_image_id,
+ const cls::rbd::MirrorImageStatus &status);
+void mirror_image_status_set(librados::ObjectWriteOperation *op,
+ const std::string &global_image_id,
+ const cls::rbd::MirrorImageStatus &status);
+int mirror_image_status_remove(librados::IoCtx *ioctx,
+ const std::string &global_image_id);
+void mirror_image_status_remove(librados::ObjectWriteOperation *op,
+ const std::string &global_image_id);
+int mirror_image_status_get(librados::IoCtx *ioctx,
+ const std::string &global_image_id,
+ cls::rbd::MirrorImageStatus *status);
+void mirror_image_status_get_start(librados::ObjectReadOperation *op,
+ const std::string &global_image_id);
+int mirror_image_status_get_finish(bufferlist::const_iterator *iter,
+ cls::rbd::MirrorImageStatus *status);
+int mirror_image_status_list(librados::IoCtx *ioctx,
+ const std::string &start, uint64_t max_return,
+ std::map<std::string, cls::rbd::MirrorImage> *images,
+ std::map<std::string, cls::rbd::MirrorImageStatus> *statuses);
+void mirror_image_status_list_start(librados::ObjectReadOperation *op,
+ const std::string &start,
+ uint64_t max_return);
+int mirror_image_status_list_finish(bufferlist::const_iterator *iter,
+ std::map<std::string, cls::rbd::MirrorImage> *images,
+ std::map<std::string, cls::rbd::MirrorImageStatus> *statuses);
+int mirror_image_status_get_summary(librados::IoCtx *ioctx,
+ std::map<cls::rbd::MirrorImageStatusState, int> *states);
+void mirror_image_status_get_summary_start(librados::ObjectReadOperation *op);
+int mirror_image_status_get_summary_finish(bufferlist::const_iterator *iter,
+ std::map<cls::rbd::MirrorImageStatusState, int> *states);
+int mirror_image_status_remove_down(librados::IoCtx *ioctx);
+void mirror_image_status_remove_down(librados::ObjectWriteOperation *op);
+
+int mirror_image_instance_get(librados::IoCtx *ioctx,
+ const std::string &global_image_id,
+ entity_inst_t *instance);
+void mirror_image_instance_get_start(librados::ObjectReadOperation *op,
+ const std::string &global_image_id);
+int mirror_image_instance_get_finish(bufferlist::const_iterator *iter,
+ entity_inst_t *instance);
+int mirror_image_instance_list(librados::IoCtx *ioctx,
+ const std::string &start, uint64_t max_return,
+ std::map<std::string, entity_inst_t> *instances);
+void mirror_image_instance_list_start(librados::ObjectReadOperation *op,
+ const std::string &start,
+ uint64_t max_return);
+int mirror_image_instance_list_finish(bufferlist::const_iterator *iter,
+ std::map<std::string, entity_inst_t> *instances);
+
+void mirror_instances_list_start(librados::ObjectReadOperation *op);
+int mirror_instances_list_finish(bufferlist::const_iterator *iter,
+ std::vector<std::string> *instance_ids);
+int mirror_instances_list(librados::IoCtx *ioctx,
+ std::vector<std::string> *instance_ids);
+void mirror_instances_add(librados::ObjectWriteOperation *op,
+ const std::string &instance_id);
+int mirror_instances_add(librados::IoCtx *ioctx,
+ const std::string &instance_id);
+void mirror_instances_remove(librados::ObjectWriteOperation *op,
+ const std::string &instance_id);
+int mirror_instances_remove(librados::IoCtx *ioctx,
+ const std::string &instance_id);
+
+// image mapping related routines
+void mirror_image_map_list_start(librados::ObjectReadOperation *op,
+ const std::string &start_after,
+ uint64_t max_read);
+int mirror_image_map_list_finish(bufferlist::const_iterator *iter,
+ std::map<std::string, cls::rbd::MirrorImageMap> *image_mapping);
+int mirror_image_map_list(librados::IoCtx *ioctx,
+ const std::string &start_after, uint64_t max_read,
+ std::map<std::string, cls::rbd::MirrorImageMap> *image_mapping);
+void mirror_image_map_update(librados::ObjectWriteOperation *op,
+ const std::string &global_image_id,
+ const cls::rbd::MirrorImageMap &image_map);
+void mirror_image_map_remove(librados::ObjectWriteOperation *op,
+ const std::string &global_image_id);
+
+// Groups functions
+int group_dir_list(librados::IoCtx *ioctx, const std::string &oid,
+ const std::string &start, uint64_t max_return,
+ map<string, string> *groups);
+int group_dir_add(librados::IoCtx *ioctx, const std::string &oid,
+ const std::string &name, const std::string &id);
+int group_dir_rename(librados::IoCtx *ioctx, const std::string &oid,
+ const std::string &src, const std::string &dest,
+ const std::string &id);
+int group_dir_remove(librados::IoCtx *ioctx, const std::string &oid,
+ const std::string &name, const std::string &id);
+int group_image_remove(librados::IoCtx *ioctx, const std::string &oid,
+ const cls::rbd::GroupImageSpec &spec);
+int group_image_list(librados::IoCtx *ioctx, const std::string &oid,
+ const cls::rbd::GroupImageSpec &start,
+ uint64_t max_return,
+ std::vector<cls::rbd::GroupImageStatus> *images);
+int group_image_set(librados::IoCtx *ioctx, const std::string &oid,
+ const cls::rbd::GroupImageStatus &st);
+int image_group_add(librados::IoCtx *ioctx, const std::string &oid,
+ const cls::rbd::GroupSpec &group_spec);
+int image_group_remove(librados::IoCtx *ioctx, const std::string &oid,
+ const cls::rbd::GroupSpec &group_spec);
+void image_group_get_start(librados::ObjectReadOperation *op);
+int image_group_get_finish(bufferlist::const_iterator *iter,
+ cls::rbd::GroupSpec *group_spec);
+int image_group_get(librados::IoCtx *ioctx, const std::string &oid,
+ cls::rbd::GroupSpec *group_spec);
+int group_snap_set(librados::IoCtx *ioctx, const std::string &oid,
+ const cls::rbd::GroupSnapshot &snapshot);
+int group_snap_remove(librados::IoCtx *ioctx, const std::string &oid,
+ const std::string &snap_id);
+int group_snap_get_by_id(librados::IoCtx *ioctx, const std::string &oid,
+ const std::string &snap_id,
+ cls::rbd::GroupSnapshot *snapshot);
+int group_snap_list(librados::IoCtx *ioctx, const std::string &oid,
+ const cls::rbd::GroupSnapshot &start,
+ uint64_t max_return,
+ std::vector<cls::rbd::GroupSnapshot> *snapshots);
+
+// operations on rbd_trash object
+void trash_add(librados::ObjectWriteOperation *op,
+ const std::string &id,
+ const cls::rbd::TrashImageSpec &trash_spec);
+int trash_add(librados::IoCtx *ioctx, const std::string &id,
+ const cls::rbd::TrashImageSpec &trash_spec);
+void trash_remove(librados::ObjectWriteOperation *op,
+ const std::string &id);
+int trash_remove(librados::IoCtx *ioctx, const std::string &id);
+void trash_list_start(librados::ObjectReadOperation *op,
+ const std::string &start, uint64_t max_return);
+int trash_list_finish(bufferlist::const_iterator *it,
+ map<string, cls::rbd::TrashImageSpec> *entries);
+int trash_list(librados::IoCtx *ioctx,
+ const std::string &start, uint64_t max_return,
+ map<string, cls::rbd::TrashImageSpec> *entries);
+void trash_get_start(librados::ObjectReadOperation *op,
+ const std::string &id);
+int trash_get_finish(bufferlist::const_iterator *it,
+ cls::rbd::TrashImageSpec *trash_spec);
+int trash_get(librados::IoCtx *ioctx, const std::string &id,
+ cls::rbd::TrashImageSpec *trash_spec);
+void trash_state_set(librados::ObjectWriteOperation *op,
+ const std::string &id,
+ const cls::rbd::TrashImageState &trash_state,
+ const cls::rbd::TrashImageState &expect_state);
+int trash_state_set(librados::IoCtx *ioctx, const std::string &id,
+ const cls::rbd::TrashImageState &trash_state,
+ const cls::rbd::TrashImageState &expect_state);
+
+// operations on rbd_namespace object
+void namespace_add(librados::ObjectWriteOperation *op,
+ const std::string &name);
+int namespace_add(librados::IoCtx *ioctx, const std::string &name);
+void namespace_remove(librados::ObjectWriteOperation *op,
+ const std::string &name);
+int namespace_remove(librados::IoCtx *ioctx, const std::string &name);
+void namespace_list_start(librados::ObjectReadOperation *op,
+ const std::string &start, uint64_t max_return);
+int namespace_list_finish(bufferlist::const_iterator *it,
+ std::list<std::string> *entries);
+int namespace_list(librados::IoCtx *ioctx,
+ const std::string &start, uint64_t max_return,
+ std::list<std::string> *entries);
+
+// operations on data objects
+int assert_snapc_seq(librados::IoCtx *ioctx, const std::string &oid,
+ uint64_t snapc_seq,
+ cls::rbd::AssertSnapcSeqState state);
+void assert_snapc_seq(librados::ObjectWriteOperation *op,
+ uint64_t snapc_seq,
+ cls::rbd::AssertSnapcSeqState state);
+
+int copyup(librados::IoCtx *ioctx, const std::string &oid,
+ bufferlist data);
+
+void sparsify(librados::ObjectWriteOperation *op, size_t sparse_size,
+ bool remove_empty);
+int sparsify(librados::IoCtx *ioctx, const std::string &oid, size_t sparse_size,
+ bool remove_empty);
+
+} // namespace cls_client
+} // namespace librbd
+
+#endif // CEPH_LIBRBD_CLS_RBD_CLIENT_H
diff --git a/src/cls/rbd/cls_rbd_types.cc b/src/cls/rbd/cls_rbd_types.cc
new file mode 100644
index 00000000..0d2c9c75
--- /dev/null
+++ b/src/cls/rbd/cls_rbd_types.cc
@@ -0,0 +1,909 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#include <boost/variant.hpp>
+#include "cls/rbd/cls_rbd_types.h"
+#include "common/Formatter.h"
+
+namespace cls {
+namespace rbd {
+
+void MirrorPeer::encode(bufferlist &bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(uuid, bl);
+ encode(cluster_name, bl);
+ encode(client_name, bl);
+ encode(pool_id, bl);
+ ENCODE_FINISH(bl);
+}
+
+void MirrorPeer::decode(bufferlist::const_iterator &it) {
+ DECODE_START(1, it);
+ decode(uuid, it);
+ decode(cluster_name, it);
+ decode(client_name, it);
+ decode(pool_id, it);
+ DECODE_FINISH(it);
+}
+
+void MirrorPeer::dump(Formatter *f) const {
+ f->dump_string("uuid", uuid);
+ f->dump_string("cluster_name", cluster_name);
+ f->dump_string("client_name", client_name);
+ f->dump_int("pool_id", pool_id);
+}
+
+void MirrorPeer::generate_test_instances(std::list<MirrorPeer*> &o) {
+ o.push_back(new MirrorPeer());
+ o.push_back(new MirrorPeer("uuid-123", "cluster name", "client name", 123));
+}
+
+bool MirrorPeer::operator==(const MirrorPeer &rhs) const {
+ return (uuid == rhs.uuid &&
+ cluster_name == rhs.cluster_name &&
+ client_name == rhs.client_name &&
+ pool_id == rhs.pool_id);
+}
+
+std::ostream& operator<<(std::ostream& os, const MirrorMode& mirror_mode) {
+ switch (mirror_mode) {
+ case MIRROR_MODE_DISABLED:
+ os << "disabled";
+ break;
+ case MIRROR_MODE_IMAGE:
+ os << "image";
+ break;
+ case MIRROR_MODE_POOL:
+ os << "pool";
+ break;
+ default:
+ os << "unknown (" << static_cast<uint32_t>(mirror_mode) << ")";
+ break;
+ }
+ return os;
+}
+
+std::ostream& operator<<(std::ostream& os, const MirrorPeer& peer) {
+ os << "["
+ << "uuid=" << peer.uuid << ", "
+ << "cluster_name=" << peer.cluster_name << ", "
+ << "client_name=" << peer.client_name;
+ if (peer.pool_id != -1) {
+ os << ", pool_id=" << peer.pool_id;
+ }
+ os << "]";
+ return os;
+}
+
+void MirrorImage::encode(bufferlist &bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(global_image_id, bl);
+ encode(static_cast<uint8_t>(state), bl);
+ ENCODE_FINISH(bl);
+}
+
+void MirrorImage::decode(bufferlist::const_iterator &it) {
+ uint8_t int_state;
+ DECODE_START(1, it);
+ decode(global_image_id, it);
+ decode(int_state, it);
+ state = static_cast<MirrorImageState>(int_state);
+ DECODE_FINISH(it);
+}
+
+void MirrorImage::dump(Formatter *f) const {
+ f->dump_string("global_image_id", global_image_id);
+ f->dump_int("state", state);
+}
+
+void MirrorImage::generate_test_instances(std::list<MirrorImage*> &o) {
+ o.push_back(new MirrorImage());
+ o.push_back(new MirrorImage("uuid-123", MIRROR_IMAGE_STATE_ENABLED));
+ o.push_back(new MirrorImage("uuid-abc", MIRROR_IMAGE_STATE_DISABLING));
+}
+
+bool MirrorImage::operator==(const MirrorImage &rhs) const {
+ return global_image_id == rhs.global_image_id && state == rhs.state;
+}
+
+bool MirrorImage::operator<(const MirrorImage &rhs) const {
+ return global_image_id < rhs.global_image_id ||
+ (global_image_id == rhs.global_image_id && state < rhs.state);
+}
+
+std::ostream& operator<<(std::ostream& os, const MirrorImageState& mirror_state) {
+ switch (mirror_state) {
+ case MIRROR_IMAGE_STATE_DISABLING:
+ os << "disabling";
+ break;
+ case MIRROR_IMAGE_STATE_ENABLED:
+ os << "enabled";
+ break;
+ default:
+ os << "unknown (" << static_cast<uint32_t>(mirror_state) << ")";
+ break;
+ }
+ return os;
+}
+
+std::ostream& operator<<(std::ostream& os, const MirrorImage& mirror_image) {
+ os << "["
+ << "global_image_id=" << mirror_image.global_image_id << ", "
+ << "state=" << mirror_image.state << "]";
+ return os;
+}
+
+void MirrorImageStatus::encode(bufferlist &bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(state, bl);
+ encode(description, bl);
+ encode(last_update, bl);
+ encode(up, bl);
+ ENCODE_FINISH(bl);
+}
+
+void MirrorImageStatus::decode(bufferlist::const_iterator &it) {
+ DECODE_START(1, it);
+ decode(state, it);
+ decode(description, it);
+ decode(last_update, it);
+ decode(up, it);
+ DECODE_FINISH(it);
+}
+
+void MirrorImageStatus::dump(Formatter *f) const {
+ f->dump_string("state", state_to_string());
+ f->dump_string("description", description);
+ f->dump_stream("last_update") << last_update;
+}
+
+std::string MirrorImageStatus::state_to_string() const {
+ std::stringstream ss;
+ ss << (up ? "up+" : "down+") << state;
+ return ss.str();
+}
+
+void MirrorImageStatus::generate_test_instances(
+ std::list<MirrorImageStatus*> &o) {
+ o.push_back(new MirrorImageStatus());
+ o.push_back(new MirrorImageStatus(MIRROR_IMAGE_STATUS_STATE_REPLAYING));
+ o.push_back(new MirrorImageStatus(MIRROR_IMAGE_STATUS_STATE_ERROR, "error"));
+}
+
+bool MirrorImageStatus::operator==(const MirrorImageStatus &rhs) const {
+ return state == rhs.state && description == rhs.description && up == rhs.up;
+}
+
+std::ostream& operator<<(std::ostream& os, const MirrorImageStatusState& state) {
+ switch (state) {
+ case MIRROR_IMAGE_STATUS_STATE_UNKNOWN:
+ os << "unknown";
+ break;
+ case MIRROR_IMAGE_STATUS_STATE_ERROR:
+ os << "error";
+ break;
+ case MIRROR_IMAGE_STATUS_STATE_SYNCING:
+ os << "syncing";
+ break;
+ case MIRROR_IMAGE_STATUS_STATE_STARTING_REPLAY:
+ os << "starting_replay";
+ break;
+ case MIRROR_IMAGE_STATUS_STATE_REPLAYING:
+ os << "replaying";
+ break;
+ case MIRROR_IMAGE_STATUS_STATE_STOPPING_REPLAY:
+ os << "stopping_replay";
+ break;
+ case MIRROR_IMAGE_STATUS_STATE_STOPPED:
+ os << "stopped";
+ break;
+ default:
+ os << "unknown (" << static_cast<uint32_t>(state) << ")";
+ break;
+ }
+ return os;
+}
+
+std::ostream& operator<<(std::ostream& os, const MirrorImageStatus& status) {
+ os << "["
+ << "state=" << status.state_to_string() << ", "
+ << "description=" << status.description << ", "
+ << "last_update=" << status.last_update << "]";
+ return os;
+}
+
+void ParentImageSpec::encode(bufferlist& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(pool_id, bl);
+ encode(pool_namespace, bl);
+ encode(image_id, bl);
+ encode(snap_id, bl);
+ ENCODE_FINISH(bl);
+}
+
+void ParentImageSpec::decode(bufferlist::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(pool_id, bl);
+ decode(pool_namespace, bl);
+ decode(image_id, bl);
+ decode(snap_id, bl);
+ DECODE_FINISH(bl);
+}
+
+void ParentImageSpec::dump(Formatter *f) const {
+ f->dump_int("pool_id", pool_id);
+ f->dump_string("pool_namespace", pool_namespace);
+ f->dump_string("image_id", image_id);
+ f->dump_unsigned("snap_id", snap_id);
+}
+
+void ParentImageSpec::generate_test_instances(std::list<ParentImageSpec*>& o) {
+ o.push_back(new ParentImageSpec{});
+ o.push_back(new ParentImageSpec{1, "", "foo", 3});
+ o.push_back(new ParentImageSpec{1, "ns", "foo", 3});
+}
+
+void ChildImageSpec::encode(bufferlist &bl) const {
+ ENCODE_START(2, 1, bl);
+ encode(pool_id, bl);
+ encode(image_id, bl);
+ encode(pool_namespace, bl);
+ ENCODE_FINISH(bl);
+}
+
+void ChildImageSpec::decode(bufferlist::const_iterator &it) {
+ DECODE_START(2, it);
+ decode(pool_id, it);
+ decode(image_id, it);
+ if (struct_v >= 2) {
+ decode(pool_namespace, it);
+ }
+ DECODE_FINISH(it);
+}
+
+void ChildImageSpec::dump(Formatter *f) const {
+ f->dump_int("pool_id", pool_id);
+ f->dump_string("pool_namespace", pool_namespace);
+ f->dump_string("image_id", image_id);
+}
+
+void ChildImageSpec::generate_test_instances(std::list<ChildImageSpec*> &o) {
+ o.push_back(new ChildImageSpec());
+ o.push_back(new ChildImageSpec(123, "", "abc"));
+ o.push_back(new ChildImageSpec(123, "ns", "abc"));
+}
+
+void GroupImageSpec::encode(bufferlist &bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(image_id, bl);
+ encode(pool_id, bl);
+ ENCODE_FINISH(bl);
+}
+
+void GroupImageSpec::decode(bufferlist::const_iterator &it) {
+ DECODE_START(1, it);
+ decode(image_id, it);
+ decode(pool_id, it);
+ DECODE_FINISH(it);
+}
+
+void GroupImageSpec::dump(Formatter *f) const {
+ f->dump_string("image_id", image_id);
+ f->dump_int("pool_id", pool_id);
+}
+
+int GroupImageSpec::from_key(const std::string &image_key,
+ GroupImageSpec *spec) {
+ if (nullptr == spec) return -EINVAL;
+ int prefix_len = cls::rbd::RBD_GROUP_IMAGE_KEY_PREFIX.size();
+ std::string data_string = image_key.substr(prefix_len,
+ image_key.size() - prefix_len);
+ size_t p = data_string.find("_");
+ if (std::string::npos == p) {
+ return -EIO;
+ }
+ data_string[p] = ' ';
+
+ istringstream iss(data_string);
+ uint64_t pool_id;
+ string image_id;
+ iss >> std::hex >> pool_id >> image_id;
+
+ spec->image_id = image_id;
+ spec->pool_id = pool_id;
+ return 0;
+}
+
+std::string GroupImageSpec::image_key() {
+ if (-1 == pool_id)
+ return "";
+ else {
+ ostringstream oss;
+ oss << RBD_GROUP_IMAGE_KEY_PREFIX << std::setw(16)
+ << std::setfill('0') << std::hex << pool_id << "_" << image_id;
+ return oss.str();
+ }
+}
+
+void GroupImageSpec::generate_test_instances(std::list<GroupImageSpec*> &o) {
+ o.push_back(new GroupImageSpec("10152ae8944a", 0));
+ o.push_back(new GroupImageSpec("1018643c9869", 3));
+}
+
+void GroupImageStatus::encode(bufferlist &bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(spec, bl);
+ encode(state, bl);
+ ENCODE_FINISH(bl);
+}
+
+void GroupImageStatus::decode(bufferlist::const_iterator &it) {
+ DECODE_START(1, it);
+ decode(spec, it);
+ decode(state, it);
+ DECODE_FINISH(it);
+}
+
+std::string GroupImageStatus::state_to_string() const {
+ std::stringstream ss;
+ if (state == GROUP_IMAGE_LINK_STATE_INCOMPLETE) {
+ ss << "incomplete";
+ }
+ if (state == GROUP_IMAGE_LINK_STATE_ATTACHED) {
+ ss << "attached";
+ }
+ return ss.str();
+}
+
+void GroupImageStatus::dump(Formatter *f) const {
+ spec.dump(f);
+ f->dump_string("state", state_to_string());
+}
+
+void GroupImageStatus::generate_test_instances(std::list<GroupImageStatus*> &o) {
+ o.push_back(new GroupImageStatus(GroupImageSpec("10152ae8944a", 0), GROUP_IMAGE_LINK_STATE_ATTACHED));
+ o.push_back(new GroupImageStatus(GroupImageSpec("1018643c9869", 3), GROUP_IMAGE_LINK_STATE_ATTACHED));
+ o.push_back(new GroupImageStatus(GroupImageSpec("10152ae8944a", 0), GROUP_IMAGE_LINK_STATE_INCOMPLETE));
+ o.push_back(new GroupImageStatus(GroupImageSpec("1018643c9869", 3), GROUP_IMAGE_LINK_STATE_INCOMPLETE));
+}
+
+
+void GroupSpec::encode(bufferlist &bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(pool_id, bl);
+ encode(group_id, bl);
+ ENCODE_FINISH(bl);
+}
+
+void GroupSpec::decode(bufferlist::const_iterator &it) {
+ DECODE_START(1, it);
+ decode(pool_id, it);
+ decode(group_id, it);
+ DECODE_FINISH(it);
+}
+
+void GroupSpec::dump(Formatter *f) const {
+ f->dump_string("group_id", group_id);
+ f->dump_int("pool_id", pool_id);
+}
+
+bool GroupSpec::is_valid() const {
+ return (!group_id.empty()) && (pool_id != -1);
+}
+
+void GroupSpec::generate_test_instances(std::list<GroupSpec *> &o) {
+ o.push_back(new GroupSpec("10152ae8944a", 0));
+ o.push_back(new GroupSpec("1018643c9869", 3));
+}
+
+void GroupSnapshotNamespace::encode(bufferlist& bl) const {
+ using ceph::encode;
+ encode(group_pool, bl);
+ encode(group_id, bl);
+ encode(group_snapshot_id, bl);
+}
+
+void GroupSnapshotNamespace::decode(bufferlist::const_iterator& it) {
+ using ceph::decode;
+ decode(group_pool, it);
+ decode(group_id, it);
+ decode(group_snapshot_id, it);
+}
+
+void GroupSnapshotNamespace::dump(Formatter *f) const {
+ f->dump_int("group_pool", group_pool);
+ f->dump_string("group_id", group_id);
+ f->dump_string("group_snapshot_id", group_snapshot_id);
+}
+
+void TrashSnapshotNamespace::encode(bufferlist& bl) const {
+ using ceph::encode;
+ encode(original_name, bl);
+ encode(static_cast<uint32_t>(original_snapshot_namespace_type), bl);
+}
+
+void TrashSnapshotNamespace::decode(bufferlist::const_iterator& it) {
+ using ceph::decode;
+ decode(original_name, it);
+ uint32_t snap_type;
+ decode(snap_type, it);
+ original_snapshot_namespace_type = static_cast<SnapshotNamespaceType>(
+ snap_type);
+}
+
+void TrashSnapshotNamespace::dump(Formatter *f) const {
+ f->dump_string("original_name", original_name);
+ f->dump_stream("original_snapshot_namespace")
+ << original_snapshot_namespace_type;
+}
+
+class EncodeSnapshotNamespaceVisitor : public boost::static_visitor<void> {
+public:
+ explicit EncodeSnapshotNamespaceVisitor(bufferlist &bl) : m_bl(bl) {
+ }
+
+ template <typename T>
+ inline void operator()(const T& t) const {
+ using ceph::encode;
+ encode(static_cast<uint32_t>(T::SNAPSHOT_NAMESPACE_TYPE), m_bl);
+ t.encode(m_bl);
+ }
+
+private:
+ bufferlist &m_bl;
+};
+
+class DecodeSnapshotNamespaceVisitor : public boost::static_visitor<void> {
+public:
+ DecodeSnapshotNamespaceVisitor(bufferlist::const_iterator &iter)
+ : m_iter(iter) {
+ }
+
+ template <typename T>
+ inline void operator()(T& t) const {
+ t.decode(m_iter);
+ }
+private:
+ bufferlist::const_iterator &m_iter;
+};
+
+class DumpSnapshotNamespaceVisitor : public boost::static_visitor<void> {
+public:
+ explicit DumpSnapshotNamespaceVisitor(Formatter *formatter, const std::string &key)
+ : m_formatter(formatter), m_key(key) {}
+
+ template <typename T>
+ inline void operator()(const T& t) const {
+ auto type = T::SNAPSHOT_NAMESPACE_TYPE;
+ m_formatter->dump_string(m_key.c_str(), stringify(type));
+ t.dump(m_formatter);
+ }
+private:
+ ceph::Formatter *m_formatter;
+ std::string m_key;
+};
+
+class GetTypeVisitor : public boost::static_visitor<SnapshotNamespaceType> {
+public:
+ template <typename T>
+ inline SnapshotNamespaceType operator()(const T&) const {
+ return static_cast<SnapshotNamespaceType>(T::SNAPSHOT_NAMESPACE_TYPE);
+ }
+};
+
+SnapshotNamespaceType get_snap_namespace_type(
+ const SnapshotNamespace& snapshot_namespace) {
+ return static_cast<SnapshotNamespaceType>(boost::apply_visitor(
+ GetTypeVisitor(), snapshot_namespace));
+}
+
+void SnapshotInfo::encode(bufferlist& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(id, bl);
+ encode(snapshot_namespace, bl);
+ encode(name, bl);
+ encode(image_size, bl);
+ encode(timestamp, bl);
+ encode(child_count, bl);
+ ENCODE_FINISH(bl);
+}
+
+void SnapshotInfo::decode(bufferlist::const_iterator& it) {
+ DECODE_START(1, it);
+ decode(id, it);
+ decode(snapshot_namespace, it);
+ decode(name, it);
+ decode(image_size, it);
+ decode(timestamp, it);
+ decode(child_count, it);
+ DECODE_FINISH(it);
+}
+
+void SnapshotInfo::dump(Formatter *f) const {
+ f->dump_unsigned("id", id);
+ f->open_object_section("namespace");
+ boost::apply_visitor(DumpSnapshotNamespaceVisitor(f, "type"),
+ snapshot_namespace);
+ f->close_section();
+ f->dump_string("name", name);
+ f->dump_unsigned("image_size", image_size);
+ f->dump_stream("timestamp") << timestamp;
+}
+
+void SnapshotInfo::generate_test_instances(std::list<SnapshotInfo*> &o) {
+ o.push_back(new SnapshotInfo(1ULL, UserSnapshotNamespace{}, "snap1", 123,
+ {123456, 0}, 12));
+ o.push_back(new SnapshotInfo(2ULL,
+ GroupSnapshotNamespace{567, "group1", "snap1"},
+ "snap1", 123, {123456, 0}, 987));
+ o.push_back(new SnapshotInfo(3ULL,
+ TrashSnapshotNamespace{
+ SNAPSHOT_NAMESPACE_TYPE_USER, "snap1"},
+ "12345", 123, {123456, 0}, 429));
+}
+
+void SnapshotNamespace::encode(bufferlist& bl) const {
+ ENCODE_START(1, 1, bl);
+ boost::apply_visitor(EncodeSnapshotNamespaceVisitor(bl), *this);
+ ENCODE_FINISH(bl);
+}
+
+void SnapshotNamespace::decode(bufferlist::const_iterator &p)
+{
+ DECODE_START(1, p);
+ uint32_t snap_type;
+ decode(snap_type, p);
+ switch (snap_type) {
+ case cls::rbd::SNAPSHOT_NAMESPACE_TYPE_USER:
+ *this = UserSnapshotNamespace();
+ break;
+ case cls::rbd::SNAPSHOT_NAMESPACE_TYPE_GROUP:
+ *this = GroupSnapshotNamespace();
+ break;
+ case cls::rbd::SNAPSHOT_NAMESPACE_TYPE_TRASH:
+ *this = TrashSnapshotNamespace();
+ break;
+ default:
+ *this = UnknownSnapshotNamespace();
+ break;
+ }
+ boost::apply_visitor(DecodeSnapshotNamespaceVisitor(p), *this);
+ DECODE_FINISH(p);
+}
+
+void SnapshotNamespace::dump(Formatter *f) const {
+ boost::apply_visitor(
+ DumpSnapshotNamespaceVisitor(f, "snapshot_namespace_type"), *this);
+}
+
+void SnapshotNamespace::generate_test_instances(std::list<SnapshotNamespace*> &o) {
+ o.push_back(new SnapshotNamespace(UserSnapshotNamespace()));
+ o.push_back(new SnapshotNamespace(GroupSnapshotNamespace(0, "10152ae8944a",
+ "2118643c9732")));
+ o.push_back(new SnapshotNamespace(GroupSnapshotNamespace(5, "1018643c9869",
+ "33352be8933c")));
+ o.push_back(new SnapshotNamespace(TrashSnapshotNamespace()));
+}
+
+std::ostream& operator<<(std::ostream& os, const SnapshotNamespaceType& type) {
+ switch (type) {
+ case SNAPSHOT_NAMESPACE_TYPE_USER:
+ os << "user";
+ break;
+ case SNAPSHOT_NAMESPACE_TYPE_GROUP:
+ os << "group";
+ break;
+ case SNAPSHOT_NAMESPACE_TYPE_TRASH:
+ os << "trash";
+ break;
+ default:
+ os << "unknown";
+ break;
+ }
+ return os;
+}
+
+std::ostream& operator<<(std::ostream& os, const UserSnapshotNamespace& ns) {
+ os << "[" << SNAPSHOT_NAMESPACE_TYPE_USER << "]";
+ return os;
+}
+
+std::ostream& operator<<(std::ostream& os, const GroupSnapshotNamespace& ns) {
+ os << "[" << SNAPSHOT_NAMESPACE_TYPE_GROUP << " "
+ << "group_pool=" << ns.group_pool << ", "
+ << "group_id=" << ns.group_id << ", "
+ << "group_snapshot_id=" << ns.group_snapshot_id << "]";
+ return os;
+}
+
+std::ostream& operator<<(std::ostream& os, const TrashSnapshotNamespace& ns) {
+ os << "[" << SNAPSHOT_NAMESPACE_TYPE_TRASH << " "
+ << "original_name=" << ns.original_name << ", "
+ << "original_snapshot_namespace=" << ns.original_snapshot_namespace_type
+ << "]";
+ return os;
+}
+
+std::ostream& operator<<(std::ostream& os, const UnknownSnapshotNamespace& ns) {
+ os << "[unknown]";
+ return os;
+}
+
+void ImageSnapshotSpec::encode(bufferlist& bl) const {
+ using ceph::encode;
+ ENCODE_START(1, 1, bl);
+ encode(pool, bl);
+ encode(image_id, bl);
+ encode(snap_id, bl);
+ ENCODE_FINISH(bl);
+}
+
+void ImageSnapshotSpec::decode(bufferlist::const_iterator& it) {
+ using ceph::decode;
+ DECODE_START(1, it);
+ decode(pool, it);
+ decode(image_id, it);
+ decode(snap_id, it);
+ DECODE_FINISH(it);
+}
+
+void ImageSnapshotSpec::dump(Formatter *f) const {
+ f->dump_int("pool", pool);
+ f->dump_string("image_id", image_id);
+ f->dump_int("snap_id", snap_id);
+}
+
+void ImageSnapshotSpec::generate_test_instances(std::list<ImageSnapshotSpec *> &o) {
+ o.push_back(new ImageSnapshotSpec(0, "myimage", 2));
+ o.push_back(new ImageSnapshotSpec(1, "testimage", 7));
+}
+
+void GroupSnapshot::encode(bufferlist& bl) const {
+ using ceph::encode;
+ ENCODE_START(1, 1, bl);
+ encode(id, bl);
+ encode(name, bl);
+ encode(state, bl);
+ encode(snaps, bl);
+ ENCODE_FINISH(bl);
+}
+
+void GroupSnapshot::decode(bufferlist::const_iterator& it) {
+ using ceph::decode;
+ DECODE_START(1, it);
+ decode(id, it);
+ decode(name, it);
+ decode(state, it);
+ decode(snaps, it);
+ DECODE_FINISH(it);
+}
+
+void GroupSnapshot::dump(Formatter *f) const {
+ f->dump_string("id", id);
+ f->dump_string("name", name);
+ f->dump_int("state", state);
+}
+
+void GroupSnapshot::generate_test_instances(std::list<GroupSnapshot *> &o) {
+ o.push_back(new GroupSnapshot("10152ae8944a", "groupsnapshot1", GROUP_SNAPSHOT_STATE_INCOMPLETE));
+ o.push_back(new GroupSnapshot("1018643c9869", "groupsnapshot2", GROUP_SNAPSHOT_STATE_COMPLETE));
+}
+void TrashImageSpec::encode(bufferlist& bl) const {
+ ENCODE_START(2, 1, bl);
+ encode(source, bl);
+ encode(name, bl);
+ encode(deletion_time, bl);
+ encode(deferment_end_time, bl);
+ encode(state, bl);
+ ENCODE_FINISH(bl);
+}
+
+void TrashImageSpec::decode(bufferlist::const_iterator &it) {
+ DECODE_START(2, it);
+ decode(source, it);
+ decode(name, it);
+ decode(deletion_time, it);
+ decode(deferment_end_time, it);
+ if (struct_v >= 2) {
+ decode(state, it);
+ }
+ DECODE_FINISH(it);
+}
+
+void TrashImageSpec::dump(Formatter *f) const {
+ f->dump_stream("source") << source;
+ f->dump_string("name", name);
+ f->dump_unsigned("deletion_time", deletion_time);
+ f->dump_unsigned("deferment_end_time", deferment_end_time);
+}
+
+void MirrorImageMap::encode(bufferlist &bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(instance_id, bl);
+ encode(mapped_time, bl);
+ encode(data, bl);
+ ENCODE_FINISH(bl);
+}
+
+void MirrorImageMap::decode(bufferlist::const_iterator &it) {
+ DECODE_START(1, it);
+ decode(instance_id, it);
+ decode(mapped_time, it);
+ decode(data, it);
+ DECODE_FINISH(it);
+}
+
+void MirrorImageMap::dump(Formatter *f) const {
+ f->dump_string("instance_id", instance_id);
+ f->dump_stream("mapped_time") << mapped_time;
+
+ std::stringstream data_ss;
+ data.hexdump(data_ss);
+ f->dump_string("data", data_ss.str());
+}
+
+void MirrorImageMap::generate_test_instances(
+ std::list<MirrorImageMap*> &o) {
+ bufferlist data;
+ data.append(std::string(128, '1'));
+
+ o.push_back(new MirrorImageMap("uuid-123", utime_t(), data));
+ o.push_back(new MirrorImageMap("uuid-abc", utime_t(), data));
+}
+
+bool MirrorImageMap::operator==(const MirrorImageMap &rhs) const {
+ return instance_id == rhs.instance_id && mapped_time == rhs.mapped_time &&
+ data.contents_equal(rhs.data);
+}
+
+bool MirrorImageMap::operator<(const MirrorImageMap &rhs) const {
+ return instance_id < rhs.instance_id ||
+ (instance_id == rhs.instance_id && mapped_time < rhs.mapped_time);
+}
+
+std::ostream& operator<<(std::ostream& os,
+ const MirrorImageMap &image_map) {
+ return os << "[" << "instance_id=" << image_map.instance_id << ", mapped_time="
+ << image_map.mapped_time << "]";
+}
+
+std::ostream& operator<<(std::ostream& os,
+ const MigrationHeaderType& type) {
+ switch (type) {
+ case MIGRATION_HEADER_TYPE_SRC:
+ os << "source";
+ break;
+ case MIGRATION_HEADER_TYPE_DST:
+ os << "destination";
+ break;
+ default:
+ os << "unknown (" << static_cast<uint32_t>(type) << ")";
+ break;
+ }
+ return os;
+}
+
+std::ostream& operator<<(std::ostream& os,
+ const MigrationState& migration_state) {
+ switch (migration_state) {
+ case MIGRATION_STATE_ERROR:
+ os << "error";
+ break;
+ case MIGRATION_STATE_PREPARING:
+ os << "preparing";
+ break;
+ case MIGRATION_STATE_PREPARED:
+ os << "prepared";
+ break;
+ case MIGRATION_STATE_EXECUTING:
+ os << "executing";
+ break;
+ case MIGRATION_STATE_EXECUTED:
+ os << "executed";
+ break;
+ case MIGRATION_STATE_ABORTING:
+ os << "aborting";
+ break;
+ default:
+ os << "unknown (" << static_cast<uint32_t>(migration_state) << ")";
+ break;
+ }
+ return os;
+}
+
+void MigrationSpec::encode(bufferlist& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(header_type, bl);
+ encode(pool_id, bl);
+ encode(pool_namespace, bl);
+ encode(image_name, bl);
+ encode(image_id, bl);
+ encode(snap_seqs, bl);
+ encode(overlap, bl);
+ encode(flatten, bl);
+ encode(mirroring, bl);
+ encode(state, bl);
+ encode(state_description, bl);
+ ENCODE_FINISH(bl);
+}
+
+void MigrationSpec::decode(bufferlist::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(header_type, bl);
+ decode(pool_id, bl);
+ decode(pool_namespace, bl);
+ decode(image_name, bl);
+ decode(image_id, bl);
+ decode(snap_seqs, bl);
+ decode(overlap, bl);
+ decode(flatten, bl);
+ decode(mirroring, bl);
+ decode(state, bl);
+ decode(state_description, bl);
+ DECODE_FINISH(bl);
+}
+
+std::ostream& operator<<(std::ostream& os,
+ const std::map<uint64_t, uint64_t>& snap_seqs) {
+ os << "{";
+ size_t count = 0;
+ for (auto &it : snap_seqs) {
+ os << (count++ > 0 ? ", " : "") << "(" << it.first << ", " << it.second
+ << ")";
+ }
+ os << "}";
+ return os;
+}
+
+void MigrationSpec::dump(Formatter *f) const {
+ f->dump_stream("header_type") << header_type;
+ f->dump_int("pool_id", pool_id);
+ f->dump_string("pool_namespace", pool_namespace);
+ f->dump_string("image_name", image_name);
+ f->dump_string("image_id", image_id);
+ f->dump_stream("snap_seqs") << snap_seqs;
+ f->dump_unsigned("overlap", overlap);
+ f->dump_bool("mirroring", mirroring);
+}
+
+void MigrationSpec::generate_test_instances(std::list<MigrationSpec*> &o) {
+ o.push_back(new MigrationSpec());
+ o.push_back(new MigrationSpec(MIGRATION_HEADER_TYPE_SRC, 1, "ns",
+ "image_name", "image_id", {{1, 2}}, 123, true,
+ true, MIGRATION_STATE_PREPARED, "description"));
+}
+
+std::ostream& operator<<(std::ostream& os,
+ const MigrationSpec& migration_spec) {
+ os << "["
+ << "header_type=" << migration_spec.header_type << ", "
+ << "pool_id=" << migration_spec.pool_id << ", "
+ << "pool_namespace=" << migration_spec.pool_namespace << ", "
+ << "image_name=" << migration_spec.image_name << ", "
+ << "image_id=" << migration_spec.image_id << ", "
+ << "snap_seqs=" << migration_spec.snap_seqs << ", "
+ << "overlap=" << migration_spec.overlap << ", "
+ << "flatten=" << migration_spec.flatten << ", "
+ << "mirroring=" << migration_spec.mirroring << ", "
+ << "state=" << migration_spec.state << ", "
+ << "state_description=" << migration_spec.state_description << "]";
+ return os;
+}
+
+std::ostream& operator<<(std::ostream& os, const AssertSnapcSeqState& state) {
+ switch (state) {
+ case ASSERT_SNAPC_SEQ_GT_SNAPSET_SEQ:
+ os << "gt";
+ break;
+ case ASSERT_SNAPC_SEQ_LE_SNAPSET_SEQ:
+ os << "le";
+ break;
+ default:
+ os << "unknown (" << static_cast<uint32_t>(state) << ")";
+ break;
+ }
+ return os;
+}
+
+} // namespace rbd
+} // namespace cls
diff --git a/src/cls/rbd/cls_rbd_types.h b/src/cls/rbd/cls_rbd_types.h
new file mode 100644
index 00000000..073006b3
--- /dev/null
+++ b/src/cls/rbd/cls_rbd_types.h
@@ -0,0 +1,791 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#ifndef CEPH_CLS_RBD_TYPES_H
+#define CEPH_CLS_RBD_TYPES_H
+
+#include <boost/variant.hpp>
+#include "include/int_types.h"
+#include "include/buffer.h"
+#include "include/encoding.h"
+#include "include/stringify.h"
+#include "include/utime.h"
+#include <iosfwd>
+#include <string>
+#include <set>
+
+#define RBD_GROUP_REF "rbd_group_ref"
+
+namespace ceph { class Formatter; }
+
+namespace cls {
+namespace rbd {
+
+static const uint32_t MAX_OBJECT_MAP_OBJECT_COUNT = 256000000;
+static const string RBD_GROUP_IMAGE_KEY_PREFIX = "image_";
+
+enum DirectoryState {
+ DIRECTORY_STATE_READY = 0,
+ DIRECTORY_STATE_ADD_DISABLED = 1
+};
+
+inline void encode(DirectoryState state, bufferlist& bl,
+ uint64_t features=0)
+{
+ ceph::encode(static_cast<uint8_t>(state), bl);
+}
+
+inline void decode(DirectoryState &state, bufferlist::const_iterator& it)
+{
+ uint8_t int_state;
+ ceph::decode(int_state, it);
+ state = static_cast<DirectoryState>(int_state);
+}
+
+enum MirrorMode {
+ MIRROR_MODE_DISABLED = 0,
+ MIRROR_MODE_IMAGE = 1,
+ MIRROR_MODE_POOL = 2
+};
+
+enum GroupImageLinkState {
+ GROUP_IMAGE_LINK_STATE_ATTACHED,
+ GROUP_IMAGE_LINK_STATE_INCOMPLETE
+};
+
+inline void encode(const GroupImageLinkState &state, bufferlist& bl,
+ uint64_t features=0)
+{
+ using ceph::encode;
+ encode(static_cast<uint8_t>(state), bl);
+}
+
+inline void decode(GroupImageLinkState &state, bufferlist::const_iterator& it)
+{
+ uint8_t int_state;
+ using ceph::decode;
+ decode(int_state, it);
+ state = static_cast<GroupImageLinkState>(int_state);
+}
+
+struct MirrorPeer {
+ MirrorPeer() {
+ }
+ MirrorPeer(const std::string &uuid, const std::string &cluster_name,
+ const std::string &client_name, int64_t pool_id)
+ : uuid(uuid), cluster_name(cluster_name), client_name(client_name),
+ pool_id(pool_id) {
+ }
+
+ std::string uuid;
+ std::string cluster_name;
+ std::string client_name;
+ int64_t pool_id = -1;
+
+ inline bool is_valid() const {
+ return (!uuid.empty() && !cluster_name.empty() && !client_name.empty());
+ }
+
+ void encode(bufferlist &bl) const;
+ void decode(bufferlist::const_iterator &it);
+ void dump(Formatter *f) const;
+
+ static void generate_test_instances(std::list<MirrorPeer*> &o);
+
+ bool operator==(const MirrorPeer &rhs) const;
+};
+
+std::ostream& operator<<(std::ostream& os, const MirrorMode& mirror_mode);
+std::ostream& operator<<(std::ostream& os, const MirrorPeer& peer);
+
+WRITE_CLASS_ENCODER(MirrorPeer);
+
+enum MirrorImageState {
+ MIRROR_IMAGE_STATE_DISABLING = 0,
+ MIRROR_IMAGE_STATE_ENABLED = 1,
+ MIRROR_IMAGE_STATE_DISABLED = 2,
+};
+
+struct MirrorImage {
+ MirrorImage() {}
+ MirrorImage(const std::string &global_image_id, MirrorImageState state)
+ : global_image_id(global_image_id), state(state) {}
+
+ std::string global_image_id;
+ MirrorImageState state = MIRROR_IMAGE_STATE_DISABLING;
+
+ void encode(bufferlist &bl) const;
+ void decode(bufferlist::const_iterator &it);
+ void dump(Formatter *f) const;
+
+ static void generate_test_instances(std::list<MirrorImage*> &o);
+
+ bool operator==(const MirrorImage &rhs) const;
+ bool operator<(const MirrorImage &rhs) const;
+};
+
+std::ostream& operator<<(std::ostream& os, const MirrorImageState& mirror_state);
+std::ostream& operator<<(std::ostream& os, const MirrorImage& mirror_image);
+
+WRITE_CLASS_ENCODER(MirrorImage);
+
+enum MirrorImageStatusState {
+ MIRROR_IMAGE_STATUS_STATE_UNKNOWN = 0,
+ MIRROR_IMAGE_STATUS_STATE_ERROR = 1,
+ MIRROR_IMAGE_STATUS_STATE_SYNCING = 2,
+ MIRROR_IMAGE_STATUS_STATE_STARTING_REPLAY = 3,
+ MIRROR_IMAGE_STATUS_STATE_REPLAYING = 4,
+ MIRROR_IMAGE_STATUS_STATE_STOPPING_REPLAY = 5,
+ MIRROR_IMAGE_STATUS_STATE_STOPPED = 6,
+};
+
+inline void encode(const MirrorImageStatusState &state, bufferlist& bl,
+ uint64_t features=0)
+{
+ using ceph::encode;
+ encode(static_cast<uint8_t>(state), bl);
+}
+
+inline void decode(MirrorImageStatusState &state, bufferlist::const_iterator& it)
+{
+ uint8_t int_state;
+ using ceph::decode;
+ decode(int_state, it);
+ state = static_cast<MirrorImageStatusState>(int_state);
+}
+
+struct MirrorImageStatus {
+ MirrorImageStatus() {}
+ MirrorImageStatus(MirrorImageStatusState state,
+ const std::string &description = "")
+ : state(state), description(description) {}
+
+ MirrorImageStatusState state = MIRROR_IMAGE_STATUS_STATE_UNKNOWN;
+ std::string description;
+ utime_t last_update;
+ bool up = false;
+
+ void encode(bufferlist &bl) const;
+ void decode(bufferlist::const_iterator &it);
+ void dump(Formatter *f) const;
+
+ std::string state_to_string() const;
+
+ static void generate_test_instances(std::list<MirrorImageStatus*> &o);
+
+ bool operator==(const MirrorImageStatus &rhs) const;
+};
+
+std::ostream& operator<<(std::ostream& os, const MirrorImageStatus& status);
+std::ostream& operator<<(std::ostream& os, const MirrorImageStatusState& state);
+
+WRITE_CLASS_ENCODER(MirrorImageStatus);
+
+struct ParentImageSpec {
+ int64_t pool_id = -1;
+ std::string pool_namespace;
+ std::string image_id;
+ snapid_t snap_id = CEPH_NOSNAP;
+
+ ParentImageSpec() {
+ }
+ ParentImageSpec(int64_t pool_id, const std::string& pool_namespace,
+ const std::string& image_id, snapid_t snap_id)
+ : pool_id(pool_id), pool_namespace(pool_namespace), image_id(image_id),
+ snap_id(snap_id) {
+ }
+
+ bool exists() const {
+ return (pool_id >= 0 && !image_id.empty() && snap_id != CEPH_NOSNAP);
+ }
+
+ bool operator==(const ParentImageSpec& rhs) const {
+ return ((pool_id == rhs.pool_id) &&
+ (pool_namespace == rhs.pool_namespace) &&
+ (image_id == rhs.image_id) &&
+ (snap_id == rhs.snap_id));
+ }
+
+ bool operator!=(const ParentImageSpec& rhs) const {
+ return !(*this == rhs);
+ }
+
+ void encode(bufferlist &bl) const;
+ void decode(bufferlist::const_iterator &it);
+ void dump(Formatter *f) const;
+
+ static void generate_test_instances(std::list<ParentImageSpec*> &o);
+};
+
+WRITE_CLASS_ENCODER(ParentImageSpec);
+
+struct ChildImageSpec {
+ int64_t pool_id = -1;
+ std::string pool_namespace;
+ std::string image_id;
+
+ ChildImageSpec() {}
+ ChildImageSpec(int64_t pool_id, const std::string& pool_namespace,
+ const std::string& image_id)
+ : pool_id(pool_id), pool_namespace(pool_namespace), image_id(image_id) {
+ }
+
+ void encode(bufferlist &bl) const;
+ void decode(bufferlist::const_iterator &it);
+ void dump(Formatter *f) const;
+
+ static void generate_test_instances(std::list<ChildImageSpec*> &o);
+
+ inline bool operator==(const ChildImageSpec& rhs) const {
+ return (pool_id == rhs.pool_id &&
+ pool_namespace == rhs.pool_namespace &&
+ image_id == rhs.image_id);
+ }
+ inline bool operator<(const ChildImageSpec& rhs) const {
+ if (pool_id != rhs.pool_id) {
+ return pool_id < rhs.pool_id;
+ }
+ if (pool_namespace != rhs.pool_namespace) {
+ return pool_namespace < rhs.pool_namespace;
+ }
+ return image_id < rhs.image_id;
+ }
+};
+WRITE_CLASS_ENCODER(ChildImageSpec);
+
+typedef std::set<ChildImageSpec> ChildImageSpecs;
+
+struct GroupImageSpec {
+ GroupImageSpec() {}
+
+ GroupImageSpec(const std::string &image_id, int64_t pool_id)
+ : image_id(image_id), pool_id(pool_id) {}
+
+ static int from_key(const std::string &image_key, GroupImageSpec *spec);
+
+ std::string image_id;
+ int64_t pool_id = -1;
+
+ void encode(bufferlist &bl) const;
+ void decode(bufferlist::const_iterator &it);
+ void dump(Formatter *f) const;
+
+ static void generate_test_instances(std::list<GroupImageSpec*> &o);
+
+ std::string image_key();
+
+};
+WRITE_CLASS_ENCODER(GroupImageSpec);
+
+struct GroupImageStatus {
+ GroupImageStatus() {}
+ GroupImageStatus(const std::string &image_id,
+ int64_t pool_id,
+ GroupImageLinkState state)
+ : spec(image_id, pool_id), state(state) {}
+
+ GroupImageStatus(GroupImageSpec spec,
+ GroupImageLinkState state)
+ : spec(spec), state(state) {}
+
+ GroupImageSpec spec;
+ GroupImageLinkState state = GROUP_IMAGE_LINK_STATE_INCOMPLETE;
+
+ void encode(bufferlist &bl) const;
+ void decode(bufferlist::const_iterator &it);
+ void dump(Formatter *f) const;
+
+ static void generate_test_instances(std::list<GroupImageStatus*> &o);
+
+ std::string state_to_string() const;
+};
+
+WRITE_CLASS_ENCODER(GroupImageStatus);
+
+struct GroupSpec {
+ GroupSpec() {}
+ GroupSpec(const std::string &group_id, int64_t pool_id)
+ : group_id(group_id), pool_id(pool_id) {}
+
+ std::string group_id;
+ int64_t pool_id = -1;
+
+ void encode(bufferlist &bl) const;
+ void decode(bufferlist::const_iterator &it);
+ void dump(Formatter *f) const;
+ bool is_valid() const;
+
+ static void generate_test_instances(std::list<GroupSpec *> &o);
+};
+
+WRITE_CLASS_ENCODER(GroupSpec);
+
+enum SnapshotNamespaceType {
+ SNAPSHOT_NAMESPACE_TYPE_USER = 0,
+ SNAPSHOT_NAMESPACE_TYPE_GROUP = 1,
+ SNAPSHOT_NAMESPACE_TYPE_TRASH = 2
+};
+
+struct UserSnapshotNamespace {
+ static const SnapshotNamespaceType SNAPSHOT_NAMESPACE_TYPE =
+ SNAPSHOT_NAMESPACE_TYPE_USER;
+
+ UserSnapshotNamespace() {}
+
+ void encode(bufferlist& bl) const {}
+ void decode(bufferlist::const_iterator& it) {}
+
+ void dump(Formatter *f) const {}
+
+ inline bool operator==(const UserSnapshotNamespace& usn) const {
+ return true;
+ }
+
+ inline bool operator<(const UserSnapshotNamespace& usn) const {
+ return false;
+ }
+};
+
+struct GroupSnapshotNamespace {
+ static const SnapshotNamespaceType SNAPSHOT_NAMESPACE_TYPE =
+ SNAPSHOT_NAMESPACE_TYPE_GROUP;
+
+ GroupSnapshotNamespace() {}
+
+ GroupSnapshotNamespace(int64_t _group_pool,
+ const string &_group_id,
+ const string &_group_snapshot_id)
+ : group_id(_group_id), group_pool(_group_pool),
+ group_snapshot_id(_group_snapshot_id) {}
+
+ string group_id;
+ int64_t group_pool = 0;
+ string group_snapshot_id;
+
+ void encode(bufferlist& bl) const;
+ void decode(bufferlist::const_iterator& it);
+
+ void dump(Formatter *f) const;
+
+ inline bool operator==(const GroupSnapshotNamespace& gsn) const {
+ return group_pool == gsn.group_pool &&
+ group_id == gsn.group_id &&
+ group_snapshot_id == gsn.group_snapshot_id;
+ }
+
+ inline bool operator<(const GroupSnapshotNamespace& gsn) const {
+ if (group_pool < gsn.group_pool) {
+ return true;
+ } else if (group_id < gsn.group_id) {
+ return true;
+ } else {
+ return (group_snapshot_id < gsn.group_snapshot_id);
+ }
+ return false;
+ }
+};
+
+struct TrashSnapshotNamespace {
+ static const SnapshotNamespaceType SNAPSHOT_NAMESPACE_TYPE =
+ SNAPSHOT_NAMESPACE_TYPE_TRASH;
+
+ std::string original_name;
+ SnapshotNamespaceType original_snapshot_namespace_type =
+ SNAPSHOT_NAMESPACE_TYPE_USER;
+
+ TrashSnapshotNamespace() {}
+ TrashSnapshotNamespace(SnapshotNamespaceType original_snapshot_namespace_type,
+ const std::string& original_name)
+ : original_name(original_name),
+ original_snapshot_namespace_type(original_snapshot_namespace_type) {}
+
+ void encode(bufferlist& bl) const;
+ void decode(bufferlist::const_iterator& it);
+ void dump(Formatter *f) const;
+
+ inline bool operator==(const TrashSnapshotNamespace& usn) const {
+ return true;
+ }
+ inline bool operator<(const TrashSnapshotNamespace& usn) const {
+ return false;
+ }
+};
+
+struct UnknownSnapshotNamespace {
+ static const SnapshotNamespaceType SNAPSHOT_NAMESPACE_TYPE =
+ static_cast<SnapshotNamespaceType>(-1);
+
+ UnknownSnapshotNamespace() {}
+
+ void encode(bufferlist& bl) const {}
+ void decode(bufferlist::const_iterator& it) {}
+ void dump(Formatter *f) const {}
+
+ inline bool operator==(const UnknownSnapshotNamespace& gsn) const {
+ return true;
+ }
+
+ inline bool operator<(const UnknownSnapshotNamespace& gsn) const {
+ return false;
+ }
+};
+
+std::ostream& operator<<(std::ostream& os, const SnapshotNamespaceType& type);
+std::ostream& operator<<(std::ostream& os, const UserSnapshotNamespace& ns);
+std::ostream& operator<<(std::ostream& os, const GroupSnapshotNamespace& ns);
+std::ostream& operator<<(std::ostream& os, const TrashSnapshotNamespace& ns);
+std::ostream& operator<<(std::ostream& os, const UnknownSnapshotNamespace& ns);
+
+typedef boost::variant<UserSnapshotNamespace,
+ GroupSnapshotNamespace,
+ TrashSnapshotNamespace,
+ UnknownSnapshotNamespace> SnapshotNamespaceVariant;
+
+struct SnapshotNamespace : public SnapshotNamespaceVariant {
+ SnapshotNamespace() {
+ }
+
+ template <typename T>
+ SnapshotNamespace(T&& t) : SnapshotNamespaceVariant(std::forward<T>(t)) {
+ }
+
+ void encode(bufferlist& bl) const;
+ void decode(bufferlist::const_iterator& it);
+ void dump(Formatter *f) const;
+
+ static void generate_test_instances(std::list<SnapshotNamespace*> &o);
+
+ inline bool operator==(const SnapshotNamespaceVariant& sn) const {
+ return static_cast<const SnapshotNamespaceVariant&>(*this) == sn;
+ }
+ inline bool operator<(const SnapshotNamespaceVariant& sn) const {
+ return static_cast<const SnapshotNamespaceVariant&>(*this) < sn;
+ }
+};
+WRITE_CLASS_ENCODER(SnapshotNamespace);
+
+SnapshotNamespaceType get_snap_namespace_type(
+ const SnapshotNamespace& snapshot_namespace);
+
+struct SnapshotInfo {
+ snapid_t id = CEPH_NOSNAP;
+ cls::rbd::SnapshotNamespace snapshot_namespace = {UserSnapshotNamespace{}};
+ std::string name;
+ uint64_t image_size = 0;
+ utime_t timestamp;
+ uint32_t child_count = 0;
+
+ SnapshotInfo() {
+ }
+ SnapshotInfo(snapid_t id,
+ const cls::rbd::SnapshotNamespace& snapshot_namespace,
+ const std::string& name, uint64_t image_size,
+ const utime_t& timestamp, uint32_t child_count)
+ : id(id), snapshot_namespace(snapshot_namespace),
+ name(name), image_size(image_size), timestamp(timestamp),
+ child_count(child_count) {
+ }
+
+ void encode(bufferlist& bl) const;
+ void decode(bufferlist::const_iterator& it);
+ void dump(Formatter *f) const;
+
+ static void generate_test_instances(std::list<SnapshotInfo*> &o);
+};
+WRITE_CLASS_ENCODER(SnapshotInfo);
+
+enum GroupSnapshotState {
+ GROUP_SNAPSHOT_STATE_INCOMPLETE = 0,
+ GROUP_SNAPSHOT_STATE_COMPLETE = 1,
+};
+
+inline void encode(const GroupSnapshotState &state, bufferlist& bl, uint64_t features=0)
+{
+ using ceph::encode;
+ encode(static_cast<uint8_t>(state), bl);
+}
+
+inline void decode(GroupSnapshotState &state, bufferlist::const_iterator& it)
+{
+ using ceph::decode;
+ uint8_t int_state;
+ decode(int_state, it);
+ state = static_cast<GroupSnapshotState>(int_state);
+}
+
+struct ImageSnapshotSpec {
+ int64_t pool;
+ string image_id;
+ snapid_t snap_id;
+
+ ImageSnapshotSpec() {}
+ ImageSnapshotSpec(int64_t _pool,
+ string _image_id,
+ snapid_t _snap_id) : pool(_pool),
+ image_id(_image_id),
+ snap_id(_snap_id) {}
+
+ void encode(bufferlist& bl) const;
+ void decode(bufferlist::const_iterator& it);
+
+ void dump(Formatter *f) const;
+
+ static void generate_test_instances(std::list<ImageSnapshotSpec *> &o);
+};
+WRITE_CLASS_ENCODER(ImageSnapshotSpec);
+
+struct GroupSnapshot {
+ std::string id;
+ std::string name;
+ GroupSnapshotState state = GROUP_SNAPSHOT_STATE_INCOMPLETE;
+
+ GroupSnapshot() {}
+ GroupSnapshot(std::string _id,
+ std::string _name,
+ GroupSnapshotState _state) : id(_id),
+ name(_name),
+ state(_state) {}
+
+ vector<ImageSnapshotSpec> snaps;
+
+ void encode(bufferlist& bl) const;
+ void decode(bufferlist::const_iterator& it);
+ void dump(Formatter *f) const;
+
+ static void generate_test_instances(std::list<GroupSnapshot *> &o);
+};
+WRITE_CLASS_ENCODER(GroupSnapshot);
+enum TrashImageSource {
+ TRASH_IMAGE_SOURCE_USER = 0,
+ TRASH_IMAGE_SOURCE_MIRRORING = 1,
+ TRASH_IMAGE_SOURCE_MIGRATION = 2,
+ TRASH_IMAGE_SOURCE_REMOVING = 3,
+};
+
+inline std::ostream& operator<<(std::ostream& os,
+ const TrashImageSource& source) {
+ switch (source) {
+ case TRASH_IMAGE_SOURCE_USER:
+ os << "user";
+ break;
+ case TRASH_IMAGE_SOURCE_MIRRORING:
+ os << "mirroring";
+ break;
+ case TRASH_IMAGE_SOURCE_MIGRATION:
+ os << "migration";
+ break;
+ case TRASH_IMAGE_SOURCE_REMOVING:
+ os << "removing";
+ break;
+ default:
+ os << "unknown (" << static_cast<uint32_t>(source) << ")";
+ break;
+ }
+ return os;
+}
+
+inline void encode(const TrashImageSource &source, bufferlist& bl,
+ uint64_t features=0)
+{
+ using ceph::encode;
+ encode(static_cast<uint8_t>(source), bl);
+}
+
+inline void decode(TrashImageSource &source, bufferlist::const_iterator& it)
+{
+ uint8_t int_source;
+ using ceph::decode;
+ decode(int_source, it);
+ source = static_cast<TrashImageSource>(int_source);
+}
+
+enum TrashImageState {
+ TRASH_IMAGE_STATE_NORMAL = 0,
+ TRASH_IMAGE_STATE_MOVING = 1,
+ TRASH_IMAGE_STATE_REMOVING = 2,
+ TRASH_IMAGE_STATE_RESTORING = 3
+};
+
+inline void encode(const TrashImageState &state, bufferlist &bl)
+{
+ using ceph::encode;
+ encode(static_cast<uint8_t>(state), bl);
+}
+
+inline void decode(TrashImageState &state, bufferlist::const_iterator &it)
+{
+ uint8_t int_state;
+ using ceph::decode;
+ decode(int_state, it);
+ state = static_cast<TrashImageState>(int_state);
+}
+
+struct TrashImageSpec {
+ TrashImageSource source = TRASH_IMAGE_SOURCE_USER;
+ std::string name;
+ utime_t deletion_time; // time of deletion
+ utime_t deferment_end_time;
+ TrashImageState state = TRASH_IMAGE_STATE_NORMAL;
+
+ TrashImageSpec() {}
+ TrashImageSpec(TrashImageSource source, const std::string &name,
+ const utime_t& deletion_time,
+ const utime_t& deferment_end_time)
+ : source(source), name(name), deletion_time(deletion_time),
+ deferment_end_time(deferment_end_time) {
+ }
+
+ void encode(bufferlist &bl) const;
+ void decode(bufferlist::const_iterator& it);
+ void dump(Formatter *f) const;
+
+ inline bool operator==(const TrashImageSpec& rhs) const {
+ return (source == rhs.source &&
+ name == rhs.name &&
+ deletion_time == rhs.deletion_time &&
+ deferment_end_time == rhs.deferment_end_time);
+ }
+};
+WRITE_CLASS_ENCODER(TrashImageSpec);
+
+struct MirrorImageMap {
+ MirrorImageMap() {
+ }
+
+ MirrorImageMap(const std::string &instance_id, utime_t mapped_time,
+ const bufferlist &data)
+ : instance_id(instance_id),
+ mapped_time(mapped_time),
+ data(data) {
+ }
+
+ std::string instance_id;
+ utime_t mapped_time;
+ bufferlist data;
+
+ void encode(bufferlist &bl) const;
+ void decode(bufferlist::const_iterator &it);
+ void dump(Formatter *f) const;
+
+ static void generate_test_instances(std::list<MirrorImageMap*> &o);
+
+ bool operator==(const MirrorImageMap &rhs) const;
+ bool operator<(const MirrorImageMap &rhs) const;
+};
+
+std::ostream& operator<<(std::ostream& os, const MirrorImageMap &image_map);
+
+WRITE_CLASS_ENCODER(MirrorImageMap);
+
+enum MigrationHeaderType {
+ MIGRATION_HEADER_TYPE_SRC = 1,
+ MIGRATION_HEADER_TYPE_DST = 2,
+};
+
+inline void encode(const MigrationHeaderType &type, bufferlist& bl) {
+ using ceph::encode;
+ encode(static_cast<uint8_t>(type), bl);
+}
+
+inline void decode(MigrationHeaderType &type, bufferlist::const_iterator& it) {
+ uint8_t int_type;
+ using ceph::decode;
+ decode(int_type, it);
+ type = static_cast<MigrationHeaderType>(int_type);
+}
+
+enum MigrationState {
+ MIGRATION_STATE_ERROR = 0,
+ MIGRATION_STATE_PREPARING = 1,
+ MIGRATION_STATE_PREPARED = 2,
+ MIGRATION_STATE_EXECUTING = 3,
+ MIGRATION_STATE_EXECUTED = 4,
+ MIGRATION_STATE_ABORTING = 5,
+};
+
+inline void encode(const MigrationState &state, bufferlist& bl) {
+ using ceph::encode;
+ encode(static_cast<uint8_t>(state), bl);
+}
+
+inline void decode(MigrationState &state, bufferlist::const_iterator& it) {
+ uint8_t int_state;
+ using ceph::decode;
+ decode(int_state, it);
+ state = static_cast<MigrationState>(int_state);
+}
+
+std::ostream& operator<<(std::ostream& os,
+ const MigrationState& migration_state);
+
+struct MigrationSpec {
+ MigrationHeaderType header_type = MIGRATION_HEADER_TYPE_SRC;
+ int64_t pool_id = -1;
+ std::string pool_namespace;
+ std::string image_name;
+ std::string image_id;
+ std::map<uint64_t, uint64_t> snap_seqs;
+ uint64_t overlap = 0;
+ bool flatten = false;
+ bool mirroring = false;
+ MigrationState state = MIGRATION_STATE_ERROR;
+ std::string state_description;
+
+ MigrationSpec() {
+ }
+ MigrationSpec(MigrationHeaderType header_type, int64_t pool_id,
+ const std::string& pool_namespace,
+ const std::string &image_name, const std::string &image_id,
+ const std::map<uint64_t, uint64_t> &snap_seqs, uint64_t overlap,
+ bool mirroring, bool flatten, MigrationState state,
+ const std::string &state_description)
+ : header_type(header_type), pool_id(pool_id),
+ pool_namespace(pool_namespace), image_name(image_name),
+ image_id(image_id), snap_seqs(snap_seqs), overlap(overlap),
+ flatten(flatten), mirroring(mirroring), state(state),
+ state_description(state_description) {
+ }
+
+ void encode(bufferlist &bl) const;
+ void decode(bufferlist::const_iterator& it);
+ void dump(Formatter *f) const;
+
+ static void generate_test_instances(std::list<MigrationSpec*> &o);
+
+ inline bool operator==(const MigrationSpec& ms) const {
+ return header_type == ms.header_type && pool_id == ms.pool_id &&
+ pool_namespace == ms.pool_namespace && image_name == ms.image_name &&
+ image_id == ms.image_id && snap_seqs == ms.snap_seqs &&
+ overlap == ms.overlap && flatten == ms.flatten &&
+ mirroring == ms.mirroring && state == ms.state &&
+ state_description == ms.state_description;
+ }
+};
+
+std::ostream& operator<<(std::ostream& os, const MigrationSpec& migration_spec);
+
+WRITE_CLASS_ENCODER(MigrationSpec);
+
+enum AssertSnapcSeqState {
+ ASSERT_SNAPC_SEQ_GT_SNAPSET_SEQ = 0,
+ ASSERT_SNAPC_SEQ_LE_SNAPSET_SEQ = 1,
+};
+
+inline void encode(const AssertSnapcSeqState &state, bufferlist& bl) {
+ using ceph::encode;
+ encode(static_cast<uint8_t>(state), bl);
+}
+
+inline void decode(AssertSnapcSeqState &state, bufferlist::const_iterator& it) {
+ uint8_t int_state;
+ using ceph::decode;
+ decode(int_state, it);
+ state = static_cast<AssertSnapcSeqState>(int_state);
+}
+
+std::ostream& operator<<(std::ostream& os, const AssertSnapcSeqState& state);
+
+} // namespace rbd
+} // namespace cls
+
+#endif // CEPH_CLS_RBD_TYPES_H
diff --git a/src/cls/refcount/cls_refcount.cc b/src/cls/refcount/cls_refcount.cc
new file mode 100644
index 00000000..73cc3b85
--- /dev/null
+++ b/src/cls/refcount/cls_refcount.cc
@@ -0,0 +1,216 @@
+// -*- mode:C; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#include <errno.h>
+
+#include "objclass/objclass.h"
+#include "cls/refcount/cls_refcount_ops.h"
+
+#include "include/compat.h"
+
+CLS_VER(1,0)
+CLS_NAME(refcount)
+
+
+#define REFCOUNT_ATTR "refcount"
+
+static string wildcard_tag;
+
+static int read_refcount(cls_method_context_t hctx, bool implicit_ref, obj_refcount *objr)
+{
+ bufferlist bl;
+ objr->refs.clear();
+ int ret = cls_cxx_getxattr(hctx, REFCOUNT_ATTR, &bl);
+ if (ret == -ENODATA) {
+ if (implicit_ref) {
+ objr->refs[wildcard_tag] = true;
+ }
+ return 0;
+ }
+ if (ret < 0)
+ return ret;
+
+ try {
+ auto iter = bl.cbegin();
+ decode(*objr, iter);
+ } catch (buffer::error& err) {
+ CLS_LOG(0, "ERROR: read_refcount(): failed to decode refcount entry\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int set_refcount(cls_method_context_t hctx, const struct obj_refcount& objr)
+{
+ bufferlist bl;
+
+ encode(objr, bl);
+
+ int ret = cls_cxx_setxattr(hctx, REFCOUNT_ATTR, &bl);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int cls_rc_refcount_get(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ auto in_iter = in->cbegin();
+
+ cls_refcount_get_op op;
+ try {
+ decode(op, in_iter);
+ } catch (buffer::error& err) {
+ CLS_LOG(1, "ERROR: cls_rc_refcount_get(): failed to decode entry\n");
+ return -EINVAL;
+ }
+
+ obj_refcount objr;
+ int ret = read_refcount(hctx, op.implicit_ref, &objr);
+ if (ret < 0)
+ return ret;
+
+ CLS_LOG(10, "cls_rc_refcount_get() tag=%s\n", op.tag.c_str());
+
+ objr.refs[op.tag] = true;
+
+ ret = set_refcount(hctx, objr);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int cls_rc_refcount_put(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ auto in_iter = in->cbegin();
+
+ cls_refcount_put_op op;
+ try {
+ decode(op, in_iter);
+ } catch (buffer::error& err) {
+ CLS_LOG(1, "ERROR: cls_rc_refcount_put(): failed to decode entry\n");
+ return -EINVAL;
+ }
+
+ obj_refcount objr;
+ int ret = read_refcount(hctx, op.implicit_ref, &objr);
+ if (ret < 0)
+ return ret;
+
+ if (objr.refs.empty()) {// shouldn't happen!
+ CLS_LOG(0, "ERROR: cls_rc_refcount_put() was called without any references!\n");
+ return -EINVAL;
+ }
+
+ CLS_LOG(10, "cls_rc_refcount_put() tag=%s\n", op.tag.c_str());
+
+ bool found = false;
+ map<string, bool>::iterator iter = objr.refs.find(op.tag);
+ if (iter != objr.refs.end()) {
+ found = true;
+ } else if (op.implicit_ref) {
+ iter = objr.refs.find(wildcard_tag);
+ if (iter != objr.refs.end()) {
+ found = true;
+ }
+ }
+
+ if (!found ||
+ objr.retired_refs.find(op.tag) != objr.retired_refs.end())
+ return 0;
+
+ objr.retired_refs.insert(op.tag);
+ objr.refs.erase(iter);
+
+ if (objr.refs.empty()) {
+ return cls_cxx_remove(hctx);
+ }
+
+ ret = set_refcount(hctx, objr);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int cls_rc_refcount_set(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ auto in_iter = in->cbegin();
+
+ cls_refcount_set_op op;
+ try {
+ decode(op, in_iter);
+ } catch (buffer::error& err) {
+ CLS_LOG(1, "ERROR: cls_refcount_set(): failed to decode entry\n");
+ return -EINVAL;
+ }
+
+ if (!op.refs.size()) {
+ return cls_cxx_remove(hctx);
+ }
+
+ obj_refcount objr;
+ list<string>::iterator iter;
+ for (iter = op.refs.begin(); iter != op.refs.end(); ++iter) {
+ objr.refs[*iter] = true;
+ }
+
+ int ret = set_refcount(hctx, objr);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int cls_rc_refcount_read(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ auto in_iter = in->cbegin();
+
+ cls_refcount_read_op op;
+ try {
+ decode(op, in_iter);
+ } catch (buffer::error& err) {
+ CLS_LOG(1, "ERROR: cls_rc_refcount_read(): failed to decode entry\n");
+ return -EINVAL;
+ }
+
+ obj_refcount objr;
+
+ cls_refcount_read_ret read_ret;
+ int ret = read_refcount(hctx, op.implicit_ref, &objr);
+ if (ret < 0)
+ return ret;
+
+ map<string, bool>::iterator iter;
+ for (iter = objr.refs.begin(); iter != objr.refs.end(); ++iter) {
+ read_ret.refs.push_back(iter->first);
+ }
+
+ encode(read_ret, *out);
+
+ return 0;
+}
+
+CLS_INIT(refcount)
+{
+ CLS_LOG(1, "Loaded refcount class!");
+
+ cls_handle_t h_class;
+ cls_method_handle_t h_refcount_get;
+ cls_method_handle_t h_refcount_put;
+ cls_method_handle_t h_refcount_set;
+ cls_method_handle_t h_refcount_read;
+
+ cls_register("refcount", &h_class);
+
+ /* refcount */
+ cls_register_cxx_method(h_class, "get", CLS_METHOD_RD | CLS_METHOD_WR, cls_rc_refcount_get, &h_refcount_get);
+ cls_register_cxx_method(h_class, "put", CLS_METHOD_RD | CLS_METHOD_WR, cls_rc_refcount_put, &h_refcount_put);
+ cls_register_cxx_method(h_class, "set", CLS_METHOD_RD | CLS_METHOD_WR, cls_rc_refcount_set, &h_refcount_set);
+ cls_register_cxx_method(h_class, "read", CLS_METHOD_RD, cls_rc_refcount_read, &h_refcount_read);
+
+ return;
+}
+
diff --git a/src/cls/refcount/cls_refcount_client.cc b/src/cls/refcount/cls_refcount_client.cc
new file mode 100644
index 00000000..9d5210c8
--- /dev/null
+++ b/src/cls/refcount/cls_refcount_client.cc
@@ -0,0 +1,61 @@
+#include <errno.h>
+
+#include "cls/refcount/cls_refcount_client.h"
+#include "cls/refcount/cls_refcount_ops.h"
+#include "include/rados/librados.hpp"
+
+using namespace librados;
+
+
+void cls_refcount_get(librados::ObjectWriteOperation& op, const string& tag, bool implicit_ref)
+{
+ bufferlist in;
+ cls_refcount_get_op call;
+ call.tag = tag;
+ call.implicit_ref = implicit_ref;
+ encode(call, in);
+ op.exec("refcount", "get", in);
+}
+
+void cls_refcount_put(librados::ObjectWriteOperation& op, const string& tag, bool implicit_ref)
+{
+ bufferlist in;
+ cls_refcount_put_op call;
+ call.tag = tag;
+ call.implicit_ref = implicit_ref;
+ encode(call, in);
+ op.exec("refcount", "put", in);
+}
+
+void cls_refcount_set(librados::ObjectWriteOperation& op, list<string>& refs)
+{
+ bufferlist in;
+ cls_refcount_set_op call;
+ call.refs = refs;
+ encode(call, in);
+ op.exec("refcount", "set", in);
+}
+
+int cls_refcount_read(librados::IoCtx& io_ctx, string& oid, list<string> *refs, bool implicit_ref)
+{
+ bufferlist in, out;
+ cls_refcount_read_op call;
+ call.implicit_ref = implicit_ref;
+ encode(call, in);
+ int r = io_ctx.exec(oid, "refcount", "read", in, out);
+ if (r < 0)
+ return r;
+
+ cls_refcount_read_ret ret;
+ try {
+ auto iter = out.cbegin();
+ decode(ret, iter);
+ } catch (buffer::error& err) {
+ return -EIO;
+ }
+
+ *refs = ret.refs;
+
+ return r;
+}
+
diff --git a/src/cls/refcount/cls_refcount_client.h b/src/cls/refcount/cls_refcount_client.h
new file mode 100644
index 00000000..c41a4f6c
--- /dev/null
+++ b/src/cls/refcount/cls_refcount_client.h
@@ -0,0 +1,34 @@
+#ifndef CEPH_CLS_REFCOUNT_CLIENT_H
+#define CEPH_CLS_REFCOUNT_CLIENT_H
+
+#include "include/rados/librados_fwd.hpp"
+#include "include/types.h"
+
+/*
+ * refcount objclass
+ *
+ * The refcount objclass implements a refcounting scheme that allows having multiple references
+ * to a single rados object. The canonical way to use it is to add a reference and to remove a
+ * reference using a specific tag. This way we ensure that refcounting operations are idempotent,
+ * that is, a single client can only increase/decrease the refcount once using a single tag, so
+ * any replay of operations (implicit or explicit) is possible.
+ *
+ * So, the regular usage would be to create an object, to increase the refcount. Then, when
+ * wanting to have another reference to it, increase the refcount using a different tag. When
+ * removing a reference it is required to drop the refcount (using the same tag that was used
+ * for that reference). When the refcount drops to zero, the object is removed automaticfally.
+ *
+ * In order to maintain backwards compatibility with objects that were created without having
+ * their refcount increased, the implicit_ref was added. Any object that was created without
+ * having it's refcount increased (explicitly) is having an implicit refcount of 1. Since
+ * we don't have a tag for this refcount, we consider this tag as a wildcard. So if the refcount
+ * is being decreased by an unknown tag and we still have one wildcard tag, we'll accept it
+ * as the relevant tag, and the refcount will be decreased.
+ */
+
+void cls_refcount_get(librados::ObjectWriteOperation& op, const string& tag, bool implicit_ref = false);
+void cls_refcount_put(librados::ObjectWriteOperation& op, const string& tag, bool implicit_ref = false);
+void cls_refcount_set(librados::ObjectWriteOperation& op, list<string>& refs);
+int cls_refcount_read(librados::IoCtx& io_ctx, string& oid, list<string> *refs, bool implicit_ref = false);
+
+#endif
diff --git a/src/cls/refcount/cls_refcount_ops.cc b/src/cls/refcount/cls_refcount_ops.cc
new file mode 100644
index 00000000..ca7f93ed
--- /dev/null
+++ b/src/cls/refcount/cls_refcount_ops.cc
@@ -0,0 +1,104 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#include "cls_refcount_ops.h"
+#include "common/Formatter.h"
+#include "common/ceph_json.h"
+
+void cls_refcount_get_op::dump(ceph::Formatter *f) const
+{
+ f->dump_string("tag", tag);
+ f->dump_int("implicit_ref", (int)implicit_ref);
+}
+
+void cls_refcount_get_op::generate_test_instances(list<cls_refcount_get_op*>& ls)
+{
+ ls.push_back(new cls_refcount_get_op);
+ ls.push_back(new cls_refcount_get_op);
+ ls.back()->tag = "foo";
+ ls.back()->implicit_ref = true;
+}
+
+
+void cls_refcount_put_op::dump(ceph::Formatter *f) const
+{
+ f->dump_string("tag", tag);
+ f->dump_int("implicit_ref", (int)implicit_ref);
+}
+
+void cls_refcount_put_op::generate_test_instances(list<cls_refcount_put_op*>& ls)
+{
+ ls.push_back(new cls_refcount_put_op);
+ ls.push_back(new cls_refcount_put_op);
+ ls.back()->tag = "foo";
+ ls.back()->implicit_ref = true;
+}
+
+
+
+void cls_refcount_set_op::dump(ceph::Formatter *f) const
+{
+ encode_json("refs", refs, f);
+}
+
+void cls_refcount_set_op::generate_test_instances(list<cls_refcount_set_op*>& ls)
+{
+ ls.push_back(new cls_refcount_set_op);
+ ls.push_back(new cls_refcount_set_op);
+ ls.back()->refs.push_back("foo");
+ ls.back()->refs.push_back("bar");
+}
+
+
+void cls_refcount_read_op::dump(ceph::Formatter *f) const
+{
+ f->dump_int("implicit_ref", (int)implicit_ref);
+}
+
+void cls_refcount_read_op::generate_test_instances(list<cls_refcount_read_op*>& ls)
+{
+ ls.push_back(new cls_refcount_read_op);
+ ls.push_back(new cls_refcount_read_op);
+ ls.back()->implicit_ref = true;
+}
+
+
+void cls_refcount_read_ret::dump(ceph::Formatter *f) const
+{
+ f->open_array_section("refs");
+ for (list<string>::const_iterator p = refs.begin(); p != refs.end(); ++p)
+ f->dump_string("ref", *p);
+ f->close_section();
+}
+
+void cls_refcount_read_ret::generate_test_instances(list<cls_refcount_read_ret*>& ls)
+{
+ ls.push_back(new cls_refcount_read_ret);
+ ls.push_back(new cls_refcount_read_ret);
+ ls.back()->refs.push_back("foo");
+ ls.back()->refs.push_back("bar");
+}
+
+void obj_refcount::dump(ceph::Formatter *f) const
+{
+ f->open_array_section("refs");
+ for (const auto &kv: refs) {
+ f->open_object_section("ref");
+ f->dump_string("oid", kv.first.c_str());
+ f->dump_bool("active",kv.second);
+ f->close_section();
+ }
+ f->close_section();
+
+ f->open_array_section("retired_refs");
+ for (const auto& it: retired_refs)
+ f->dump_string("ref", it.c_str());
+ f->close_section();
+}
+
+void obj_refcount::generate_test_instances(list<obj_refcount*>& ls)
+{
+ ls.push_back(new obj_refcount);
+ ls.back()->refs.emplace("foo",true);
+ ls.back()->retired_refs.emplace("bar");
+}
diff --git a/src/cls/refcount/cls_refcount_ops.h b/src/cls/refcount/cls_refcount_ops.h
new file mode 100644
index 00000000..946c11aa
--- /dev/null
+++ b/src/cls/refcount/cls_refcount_ops.h
@@ -0,0 +1,154 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#ifndef CEPH_CLS_REFCOUNT_OPS_H
+#define CEPH_CLS_REFCOUNT_OPS_H
+
+#include "include/types.h"
+#include "common/hobject.h"
+
+struct cls_refcount_get_op {
+ string tag;
+ bool implicit_ref;
+
+ cls_refcount_get_op() : implicit_ref(false) {}
+
+ void encode(bufferlist& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(tag, bl);
+ encode(implicit_ref, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(bufferlist::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(tag, bl);
+ decode(implicit_ref, bl);
+ DECODE_FINISH(bl);
+ }
+ void dump(ceph::Formatter *f) const;
+ static void generate_test_instances(list<cls_refcount_get_op*>& ls);
+};
+WRITE_CLASS_ENCODER(cls_refcount_get_op)
+
+struct cls_refcount_put_op {
+ string tag;
+ bool implicit_ref; // assume wildcard reference for
+ // objects without a set ref
+
+ cls_refcount_put_op() : implicit_ref(false) {}
+
+ void encode(bufferlist& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(tag, bl);
+ encode(implicit_ref, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(bufferlist::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(tag, bl);
+ decode(implicit_ref, bl);
+ DECODE_FINISH(bl);
+ }
+
+ void dump(ceph::Formatter *f) const;
+ static void generate_test_instances(list<cls_refcount_put_op*>& ls);
+};
+WRITE_CLASS_ENCODER(cls_refcount_put_op)
+
+struct cls_refcount_set_op {
+ list<string> refs;
+
+ cls_refcount_set_op() {}
+
+ void encode(bufferlist& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(refs, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(bufferlist::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(refs, bl);
+ DECODE_FINISH(bl);
+ }
+
+ void dump(ceph::Formatter *f) const;
+ static void generate_test_instances(list<cls_refcount_set_op*>& ls);
+};
+WRITE_CLASS_ENCODER(cls_refcount_set_op)
+
+struct cls_refcount_read_op {
+ bool implicit_ref; // assume wildcard reference for
+ // objects without a set ref
+
+ cls_refcount_read_op() : implicit_ref(false) {}
+
+ void encode(bufferlist& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(implicit_ref, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(bufferlist::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(implicit_ref, bl);
+ DECODE_FINISH(bl);
+ }
+
+ void dump(ceph::Formatter *f) const;
+ static void generate_test_instances(list<cls_refcount_read_op*>& ls);
+};
+WRITE_CLASS_ENCODER(cls_refcount_read_op)
+
+struct cls_refcount_read_ret {
+ list<string> refs;
+
+ cls_refcount_read_ret() {}
+
+ void encode(bufferlist& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(refs, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(bufferlist::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(refs, bl);
+ DECODE_FINISH(bl);
+ }
+
+ void dump(ceph::Formatter *f) const;
+ static void generate_test_instances(list<cls_refcount_read_ret*>& ls);
+};
+WRITE_CLASS_ENCODER(cls_refcount_read_ret)
+
+struct obj_refcount {
+ map<string, bool> refs;
+ set<string> retired_refs;
+
+ obj_refcount() {}
+
+ void encode(bufferlist& bl) const {
+ ENCODE_START(2, 1, bl);
+ encode(refs, bl);
+ encode(retired_refs, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(bufferlist::const_iterator& bl) {
+ DECODE_START(2, bl);
+ decode(refs, bl);
+ if (struct_v >= 2) {
+ decode(retired_refs, bl);
+ }
+ DECODE_FINISH(bl);
+ }
+
+ void dump(ceph::Formatter *f) const;
+ static void generate_test_instances(list<obj_refcount*>& ls);
+};
+WRITE_CLASS_ENCODER(obj_refcount)
+
+#endif
diff --git a/src/cls/rgw/cls_rgw.cc b/src/cls/rgw/cls_rgw.cc
new file mode 100644
index 00000000..741d8c57
--- /dev/null
+++ b/src/cls/rgw/cls_rgw.cc
@@ -0,0 +1,4031 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#include "include/types.h"
+
+#include <errno.h>
+
+#include "objclass/objclass.h"
+#include "cls/rgw/cls_rgw_ops.h"
+#include "cls/rgw/cls_rgw_const.h"
+#include "common/Clock.h"
+#include "common/strtol.h"
+#include "common/escape.h"
+
+#include "include/compat.h"
+
+CLS_VER(1,0)
+CLS_NAME(rgw)
+
+
+#define BI_PREFIX_CHAR 0x80
+
+#define BI_BUCKET_OBJS_INDEX 0
+#define BI_BUCKET_LOG_INDEX 1
+#define BI_BUCKET_OBJ_INSTANCE_INDEX 2
+#define BI_BUCKET_OLH_DATA_INDEX 3
+
+#define BI_BUCKET_LAST_INDEX 4
+
+static std::string bucket_index_prefixes[] = { "", /* special handling for the objs list index */
+ "0_", /* bucket log index */
+ "1000_", /* obj instance index */
+ "1001_", /* olh data index */
+
+ /* this must be the last index */
+ "9999_",};
+
+static bool bi_is_objs_index(const string& s) {
+ return ((unsigned char)s[0] != BI_PREFIX_CHAR);
+}
+
+int bi_entry_type(const string& s)
+{
+ if (bi_is_objs_index(s)) {
+ return BI_BUCKET_OBJS_INDEX;
+ }
+
+ for (size_t i = 1;
+ i < sizeof(bucket_index_prefixes) / sizeof(bucket_index_prefixes[0]);
+ ++i) {
+ const string& t = bucket_index_prefixes[i];
+
+ if (s.compare(1, t.size(), t) == 0) {
+ return i;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static bool bi_entry_gt(const string& first, const string& second)
+{
+ int fi = bi_entry_type(first);
+ int si = bi_entry_type(second);
+
+ if (fi > si) {
+ return true;
+ } else if (fi < si) {
+ return false;
+ }
+
+ return first > second;
+}
+
+static void get_time_key(real_time& ut, string *key)
+{
+ char buf[32];
+ ceph_timespec ts = ceph::real_clock::to_ceph_timespec(ut);
+ snprintf(buf, 32, "%011llu.%09u", (unsigned long long)ts.tv_sec, (unsigned int)ts.tv_nsec);
+ *key = buf;
+}
+
+static void get_index_ver_key(cls_method_context_t hctx, uint64_t index_ver, string *key)
+{
+ char buf[48];
+ snprintf(buf, sizeof(buf), "%011llu.%llu.%d", (unsigned long long)index_ver,
+ (unsigned long long)cls_current_version(hctx),
+ cls_current_subop_num(hctx));
+ *key = buf;
+}
+
+static void bi_log_prefix(string& key)
+{
+ key = BI_PREFIX_CHAR;
+ key.append(bucket_index_prefixes[BI_BUCKET_LOG_INDEX]);
+}
+
+static void bi_log_index_key(cls_method_context_t hctx, string& key, string& id, uint64_t index_ver)
+{
+ bi_log_prefix(key);
+ get_index_ver_key(hctx, index_ver, &id);
+ key.append(id);
+}
+
+static int log_index_operation(cls_method_context_t hctx, cls_rgw_obj_key& obj_key, RGWModifyOp op,
+ string& tag, real_time& timestamp,
+ rgw_bucket_entry_ver& ver, RGWPendingState state, uint64_t index_ver,
+ string& max_marker, uint16_t bilog_flags, string *owner, string *owner_display_name, rgw_zone_set *zones_trace)
+{
+ bufferlist bl;
+
+ rgw_bi_log_entry entry;
+
+ entry.object = obj_key.name;
+ entry.instance = obj_key.instance;
+ entry.timestamp = timestamp;
+ entry.op = op;
+ entry.ver = ver;
+ entry.state = state;
+ entry.index_ver = index_ver;
+ entry.tag = tag;
+ entry.bilog_flags = bilog_flags;
+ if (owner) {
+ entry.owner = *owner;
+ }
+ if (owner_display_name) {
+ entry.owner_display_name = *owner_display_name;
+ }
+ if (zones_trace) {
+ entry.zones_trace = std::move(*zones_trace);
+ }
+
+ string key;
+ bi_log_index_key(hctx, key, entry.id, index_ver);
+
+ encode(entry, bl);
+
+ if (entry.id > max_marker)
+ max_marker = entry.id;
+
+ return cls_cxx_map_set_val(hctx, key, &bl);
+}
+
+/*
+ * read list of objects, skips objects in the ugly namespace
+ */
+static int get_obj_vals(cls_method_context_t hctx, const string& start, const string& filter_prefix,
+ int num_entries, map<string, bufferlist> *pkeys, bool *pmore)
+{
+ int ret = cls_cxx_map_get_vals(hctx, start, filter_prefix, num_entries, pkeys, pmore);
+ if (ret < 0)
+ return ret;
+
+ if (pkeys->empty())
+ return 0;
+
+ auto last_element = pkeys->rbegin();
+ if ((unsigned char)last_element->first[0] < BI_PREFIX_CHAR) {
+ /* nothing to see here, move along */
+ return 0;
+ }
+
+ auto first_element = pkeys->begin();
+ if ((unsigned char)first_element->first[0] > BI_PREFIX_CHAR) {
+ return 0;
+ }
+
+ /* let's rebuild the list, only keep entries we're interested in */
+ auto comp = [](const pair<string, bufferlist>& l, const string &r) { return l.first < r; };
+ string new_start = {static_cast<char>(BI_PREFIX_CHAR + 1)};
+
+ auto lower = pkeys->lower_bound(string{static_cast<char>(BI_PREFIX_CHAR)});
+ auto upper = std::lower_bound(lower, pkeys->end(), new_start, comp);
+ pkeys->erase(lower, upper);
+
+ if (num_entries == (int)pkeys->size() || !(*pmore))
+ return 0;
+
+ if (pkeys->size() && new_start < pkeys->rbegin()->first) {
+ new_start = pkeys->rbegin()->first;
+ }
+
+ map<string, bufferlist> new_keys;
+
+ /* now get some more keys */
+ ret = cls_cxx_map_get_vals(hctx, new_start, filter_prefix, num_entries - pkeys->size(), &new_keys, pmore);
+ if (ret < 0)
+ return ret;
+
+ pkeys->insert(std::make_move_iterator(new_keys.begin()),
+ std::make_move_iterator(new_keys.end()));
+ return 0;
+}
+
+/*
+ * get a monotonically decreasing string representation.
+ * For num = x, num = y, where x > y, str(x) < str(y)
+ * Another property is that string size starts short and grows as num increases
+ */
+static void decreasing_str(uint64_t num, string *str)
+{
+ char buf[32];
+ if (num < 0x10) { /* 16 */
+ snprintf(buf, sizeof(buf), "9%02lld", 15 - (long long)num);
+ } else if (num < 0x100) { /* 256 */
+ snprintf(buf, sizeof(buf), "8%03lld", 255 - (long long)num);
+ } else if (num < 0x1000) /* 4096 */ {
+ snprintf(buf, sizeof(buf), "7%04lld", 4095 - (long long)num);
+ } else if (num < 0x10000) /* 65536 */ {
+ snprintf(buf, sizeof(buf), "6%05lld", 65535 - (long long)num);
+ } else if (num < 0x100000000) /* 4G */ {
+ snprintf(buf, sizeof(buf), "5%010lld", 0xFFFFFFFF - (long long)num);
+ } else {
+ snprintf(buf, sizeof(buf), "4%020lld", (long long)-num);
+ }
+
+ *str = buf;
+}
+
+/*
+ * we now hold two different indexes for objects. The first one holds the list of objects in the
+ * order that we want them to be listed. The second one only holds the objects instances (for
+ * versioned objects), and they're not arranged in any particular order.
+ * When listing objects we'll use the first index, when doing operations on the objects themselves
+ * we'll use the second index. Note that regular objects only map to the first index anyway
+ */
+
+static void get_list_index_key(rgw_bucket_dir_entry& entry, string *index_key)
+{
+ *index_key = entry.key.name;
+
+ string ver_str;
+ decreasing_str(entry.versioned_epoch, &ver_str);
+ string instance_delim("\0i", 2);
+ string ver_delim("\0v", 2);
+
+ index_key->append(ver_delim);
+ index_key->append(ver_str);
+ index_key->append(instance_delim);
+ index_key->append(entry.key.instance);
+}
+
+static void encode_obj_versioned_data_key(const cls_rgw_obj_key& key, string *index_key, bool append_delete_marker_suffix = false)
+{
+ *index_key = BI_PREFIX_CHAR;
+ index_key->append(bucket_index_prefixes[BI_BUCKET_OBJ_INSTANCE_INDEX]);
+ index_key->append(key.name);
+ string delim("\0i", 2);
+ index_key->append(delim);
+ index_key->append(key.instance);
+ if (append_delete_marker_suffix) {
+ string dm("\0d", 2);
+ index_key->append(dm);
+ }
+}
+
+static void encode_obj_index_key(const cls_rgw_obj_key& key, string *index_key)
+{
+ if (key.instance.empty()) {
+ *index_key = key.name;
+ } else {
+ encode_obj_versioned_data_key(key, index_key);
+ }
+}
+
+static void encode_olh_data_key(const cls_rgw_obj_key& key, string *index_key)
+{
+ *index_key = BI_PREFIX_CHAR;
+ index_key->append(bucket_index_prefixes[BI_BUCKET_OLH_DATA_INDEX]);
+ index_key->append(key.name);
+}
+
+template <class T>
+static int read_index_entry(cls_method_context_t hctx, string& name, T *entry);
+
+static int encode_list_index_key(cls_method_context_t hctx, const cls_rgw_obj_key& key, string *index_key)
+{
+ if (key.instance.empty()) {
+ *index_key = key.name;
+ return 0;
+ }
+
+ string obj_index_key;
+ cls_rgw_obj_key tmp_key(key);
+ if (tmp_key.instance == "null") {
+ tmp_key.instance.clear();
+ }
+ encode_obj_versioned_data_key(tmp_key, &obj_index_key);
+
+ rgw_bucket_dir_entry entry;
+
+ int ret = read_index_entry(hctx, obj_index_key, &entry);
+ if (ret == -ENOENT) {
+ /* couldn't find the entry, set key value after the current object */
+ char buf[2] = { 0x1, 0 };
+ string s(buf);
+ *index_key = key.name + s;
+ return 0;
+ }
+ if (ret < 0) {
+ CLS_LOG(1, "ERROR: encode_list_index_key(): cls_cxx_map_get_val returned %d\n", ret);
+ return ret;
+ }
+
+ get_list_index_key(entry, index_key);
+
+ return 0;
+}
+
+static void split_key(const string& key, list<string>& vals)
+{
+ size_t pos = 0;
+ const char *p = key.c_str();
+ while (pos < key.size()) {
+ size_t len = strlen(p);
+ vals.push_back(p);
+ pos += len + 1;
+ p += len + 1;
+ }
+}
+
+static string escape_str(const string& s)
+{
+ int len = escape_json_attr_len(s.c_str(), s.size());
+ std::string escaped(len, 0);
+ escape_json_attr(s.c_str(), s.size(), escaped.data());
+ return escaped;
+}
+
+/*
+ * list index key structure:
+ *
+ * <obj name>\0[v<ver>\0i<instance id>]
+ */
+static int decode_list_index_key(const string& index_key, cls_rgw_obj_key *key, uint64_t *ver)
+{
+ size_t len = strlen(index_key.c_str());
+
+ key->instance.clear();
+ *ver = 0;
+
+ if (len == index_key.size()) {
+ key->name = index_key;
+ return 0;
+ }
+
+ list<string> vals;
+ split_key(index_key, vals);
+
+ if (vals.empty()) {
+ CLS_LOG(0, "ERROR: %s(): bad index_key (%s): split_key() returned empty vals", __func__, escape_str(index_key).c_str());
+ return -EIO;
+ }
+
+ list<string>::iterator iter = vals.begin();
+ key->name = *iter;
+ ++iter;
+
+ if (iter == vals.end()) {
+ CLS_LOG(0, "ERROR: %s(): bad index_key (%s): no vals", __func__, escape_str(index_key).c_str());
+ return -EIO;
+ }
+
+ for (; iter != vals.end(); ++iter) {
+ string& val = *iter;
+ if (val[0] == 'i') {
+ key->instance = val.substr(1);
+ } else if (val[0] == 'v') {
+ string err;
+ const char *s = val.c_str() + 1;
+ *ver = strict_strtoll(s, 10, &err);
+ if (!err.empty()) {
+ CLS_LOG(0, "ERROR: %s(): bad index_key (%s): could not parse val (v=%s)", __func__, escape_str(index_key).c_str(), s);
+ return -EIO;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int read_bucket_header(cls_method_context_t hctx,
+ rgw_bucket_dir_header *header)
+{
+ bufferlist bl;
+ int rc = cls_cxx_map_read_header(hctx, &bl);
+ if (rc < 0)
+ return rc;
+
+ if (bl.length() == 0) {
+ *header = rgw_bucket_dir_header();
+ return 0;
+ }
+ auto iter = bl.cbegin();
+ try {
+ decode(*header, iter);
+ } catch (buffer::error& err) {
+ CLS_LOG(1, "ERROR: read_bucket_header(): failed to decode header\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int rgw_bucket_list(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ auto iter = in->cbegin();
+
+ rgw_cls_list_op op;
+ try {
+ decode(op, iter);
+ } catch (buffer::error& err) {
+ CLS_LOG(1, "ERROR: rgw_bucket_list(): failed to decode request\n");
+ return -EINVAL;
+ }
+
+ rgw_cls_list_ret ret;
+ rgw_bucket_dir& new_dir = ret.dir;
+ int rc = read_bucket_header(hctx, &new_dir.header);
+ if (rc < 0) {
+ CLS_LOG(1, "ERROR: rgw_bucket_list(): failed to read header\n");
+ return rc;
+ }
+
+ map<string, bufferlist> keys;
+ std::map<string, bufferlist>::iterator kiter;
+ string start_key;
+ encode_list_index_key(hctx, op.start_obj, &start_key);
+ bool done = false;
+ uint32_t left_to_read = op.num_entries;
+ bool more;
+
+ do {
+ rc = get_obj_vals(hctx, start_key, op.filter_prefix, left_to_read, &keys, &more);
+ if (rc < 0)
+ return rc;
+
+ std::map<string, rgw_bucket_dir_entry>& m = new_dir.m;
+
+ done = keys.empty();
+
+ for (kiter = keys.begin(); kiter != keys.end(); ++kiter) {
+ rgw_bucket_dir_entry entry;
+
+ if (!bi_is_objs_index(kiter->first)) {
+ done = true;
+ break;
+ }
+
+ bufferlist& entrybl = kiter->second;
+ auto eiter = entrybl.cbegin();
+ try {
+ decode(entry, eiter);
+ } catch (buffer::error& err) {
+ CLS_LOG(1, "ERROR: rgw_bucket_list(): failed to decode entry, key=%s\n", kiter->first.c_str());
+ return -EINVAL;
+ }
+
+ cls_rgw_obj_key key;
+ uint64_t ver;
+
+ start_key = kiter->first;
+ CLS_LOG(20, "start_key=%s len=%zu", start_key.c_str(), start_key.size());
+
+ int ret = decode_list_index_key(kiter->first, &key, &ver);
+ if (ret < 0) {
+ CLS_LOG(0, "ERROR: failed to decode list index key (%s)\n", escape_str(kiter->first).c_str());
+ continue;
+ }
+
+ if (!entry.is_valid()) {
+ CLS_LOG(20, "entry %s[%s] is not valid\n", key.name.c_str(), key.instance.c_str());
+ continue;
+ }
+
+ // filter out noncurrent versions, delete markers, and initial marker
+ if (!op.list_versions && (!entry.is_visible() || op.start_obj.name == key.name)) {
+ CLS_LOG(20, "entry %s[%s] is not visible\n", key.name.c_str(), key.instance.c_str());
+ continue;
+ }
+ if (m.size() < op.num_entries) {
+ m[kiter->first] = entry;
+ }
+ left_to_read--;
+
+ CLS_LOG(20, "got entry %s[%s] m.size()=%d\n", key.name.c_str(), key.instance.c_str(), (int)m.size());
+ }
+ } while (left_to_read > 0 && !done);
+
+ ret.is_truncated = more && !done;
+
+ encode(ret, *out);
+ return 0;
+}
+
+static int check_index(cls_method_context_t hctx,
+ rgw_bucket_dir_header *existing_header,
+ rgw_bucket_dir_header *calc_header)
+{
+ int rc = read_bucket_header(hctx, existing_header);
+ if (rc < 0) {
+ CLS_LOG(1, "ERROR: check_index(): failed to read header\n");
+ return rc;
+ }
+
+ calc_header->tag_timeout = existing_header->tag_timeout;
+ calc_header->ver = existing_header->ver;
+ calc_header->syncstopped = existing_header->syncstopped;
+
+ map<string, bufferlist> keys;
+ string start_obj;
+ string filter_prefix;
+
+#define CHECK_CHUNK_SIZE 1000
+ bool done = false;
+ bool more;
+
+ do {
+ rc = get_obj_vals(hctx, start_obj, filter_prefix, CHECK_CHUNK_SIZE, &keys, &more);
+ if (rc < 0)
+ return rc;
+
+ std::map<string, bufferlist>::iterator kiter = keys.begin();
+ for (; kiter != keys.end(); ++kiter) {
+ if (!bi_is_objs_index(kiter->first)) {
+ done = true;
+ break;
+ }
+
+ rgw_bucket_dir_entry entry;
+ auto eiter = kiter->second.cbegin();
+ try {
+ decode(entry, eiter);
+ } catch (buffer::error& err) {
+ CLS_LOG(1, "ERROR: rgw_bucket_list(): failed to decode entry, key=%s\n", kiter->first.c_str());
+ return -EIO;
+ }
+ rgw_bucket_category_stats& stats = calc_header->stats[entry.meta.category];
+ stats.num_entries++;
+ stats.total_size += entry.meta.accounted_size;
+ stats.total_size_rounded += cls_rgw_get_rounded_size(entry.meta.accounted_size);
+ stats.actual_size += entry.meta.size;
+
+ start_obj = kiter->first;
+ }
+ } while (keys.size() == CHECK_CHUNK_SIZE && !done);
+
+ return 0;
+}
+
+int rgw_bucket_check_index(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ rgw_cls_check_index_ret ret;
+
+ int rc = check_index(hctx, &ret.existing_header, &ret.calculated_header);
+ if (rc < 0)
+ return rc;
+
+ encode(ret, *out);
+
+ return 0;
+}
+
+static int write_bucket_header(cls_method_context_t hctx, rgw_bucket_dir_header *header)
+{
+ header->ver++;
+
+ bufferlist header_bl;
+ encode(*header, header_bl);
+ return cls_cxx_map_write_header(hctx, &header_bl);
+}
+
+
+int rgw_bucket_rebuild_index(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ rgw_bucket_dir_header existing_header;
+ rgw_bucket_dir_header calc_header;
+ int rc = check_index(hctx, &existing_header, &calc_header);
+ if (rc < 0)
+ return rc;
+
+ return write_bucket_header(hctx, &calc_header);
+}
+
+int rgw_bucket_update_stats(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ // decode request
+ rgw_cls_bucket_update_stats_op op;
+ auto iter = in->cbegin();
+ try {
+ decode(op, iter);
+ } catch (buffer::error& err) {
+ CLS_LOG(1, "ERROR: %s(): failed to decode request\n", __func__);
+ return -EINVAL;
+ }
+
+ rgw_bucket_dir_header header;
+ int rc = read_bucket_header(hctx, &header);
+ if (rc < 0) {
+ CLS_LOG(1, "ERROR: %s(): failed to read header\n", __func__);
+ return rc;
+ }
+
+ for (auto& s : op.stats) {
+ auto& dest = header.stats[s.first];
+ if (op.absolute) {
+ dest = s.second;
+ } else {
+ dest.total_size += s.second.total_size;
+ dest.total_size_rounded += s.second.total_size_rounded;
+ dest.num_entries += s.second.num_entries;
+ dest.actual_size += s.second.actual_size;
+ }
+ }
+
+ return write_bucket_header(hctx, &header);
+}
+
+int rgw_bucket_init_index(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ bufferlist header_bl;
+ int rc = cls_cxx_map_read_header(hctx, &header_bl);
+ if (rc < 0) {
+ switch (rc) {
+ case -ENODATA:
+ case -ENOENT:
+ break;
+ default:
+ return rc;
+ }
+ }
+
+ if (header_bl.length() != 0) {
+ CLS_LOG(1, "ERROR: index already initialized\n");
+ return -EINVAL;
+ }
+
+ rgw_bucket_dir dir;
+
+ return write_bucket_header(hctx, &dir.header);
+}
+
+int rgw_bucket_set_tag_timeout(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ // decode request
+ rgw_cls_tag_timeout_op op;
+ auto iter = in->cbegin();
+ try {
+ decode(op, iter);
+ } catch (buffer::error& err) {
+ CLS_LOG(1, "ERROR: rgw_bucket_set_tag_timeout(): failed to decode request\n");
+ return -EINVAL;
+ }
+
+ rgw_bucket_dir_header header;
+ int rc = read_bucket_header(hctx, &header);
+ if (rc < 0) {
+ CLS_LOG(1, "ERROR: rgw_bucket_set_tag_timeout(): failed to read header\n");
+ return rc;
+ }
+
+ header.tag_timeout = op.tag_timeout;
+
+ return write_bucket_header(hctx, &header);
+}
+
+static int read_key_entry(cls_method_context_t hctx, cls_rgw_obj_key& key,
+ string *idx, rgw_bucket_dir_entry *entry,
+ bool special_delete_marker_name = false);
+
+int rgw_bucket_prepare_op(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ // decode request
+ rgw_cls_obj_prepare_op op;
+ auto iter = in->cbegin();
+ try {
+ decode(op, iter);
+ } catch (buffer::error& err) {
+ CLS_LOG(1, "ERROR: rgw_bucket_prepare_op(): failed to decode request\n");
+ return -EINVAL;
+ }
+
+ if (op.tag.empty()) {
+ CLS_LOG(1, "ERROR: tag is empty\n");
+ return -EINVAL;
+ }
+
+ CLS_LOG(1, "rgw_bucket_prepare_op(): request: op=%d name=%s instance=%s tag=%s\n",
+ op.op, op.key.name.c_str(), op.key.instance.c_str(), op.tag.c_str());
+
+ // get on-disk state
+ string idx;
+
+ rgw_bucket_dir_entry entry;
+ int rc = read_key_entry(hctx, op.key, &idx, &entry);
+ if (rc < 0 && rc != -ENOENT)
+ return rc;
+
+ bool noent = (rc == -ENOENT);
+
+ rc = 0;
+
+ if (noent) { // no entry, initialize fields
+ entry.key = op.key;
+ entry.ver = rgw_bucket_entry_ver();
+ entry.exists = false;
+ entry.locator = op.locator;
+ }
+
+ // fill in proper state
+ rgw_bucket_pending_info info;
+ info.timestamp = real_clock::now();
+ info.state = CLS_RGW_STATE_PENDING_MODIFY;
+ info.op = op.op;
+ entry.pending_map.insert(pair<string, rgw_bucket_pending_info>(op.tag, info));
+
+ rgw_bucket_dir_header header;
+ rc = read_bucket_header(hctx, &header);
+ if (rc < 0) {
+ CLS_LOG(1, "ERROR: rgw_bucket_prepare_op(): failed to read header\n");
+ return rc;
+ }
+
+ if (op.log_op && !header.syncstopped) {
+ rc = log_index_operation(hctx, op.key, op.op, op.tag, entry.meta.mtime,
+ entry.ver, info.state, header.ver, header.max_marker, op.bilog_flags, NULL, NULL, &op.zones_trace);
+ if (rc < 0)
+ return rc;
+ }
+
+ // write out new key to disk
+ bufferlist info_bl;
+ encode(entry, info_bl);
+ rc = cls_cxx_map_set_val(hctx, idx, &info_bl);
+ if (rc < 0)
+ return rc;
+
+ if (op.log_op && !header.syncstopped)
+ return write_bucket_header(hctx, &header);
+ return 0;
+}
+
+static void unaccount_entry(rgw_bucket_dir_header& header,
+ rgw_bucket_dir_entry& entry)
+{
+ rgw_bucket_category_stats& stats = header.stats[entry.meta.category];
+ stats.num_entries--;
+ stats.total_size -= entry.meta.accounted_size;
+ stats.total_size_rounded -= cls_rgw_get_rounded_size(entry.meta.accounted_size);
+ stats.actual_size -= entry.meta.size;
+}
+
+static void log_entry(const char *func, const char *str, rgw_bucket_dir_entry *entry)
+{
+ CLS_LOG(1, "%s(): %s: ver=%ld:%llu name=%s instance=%s locator=%s\n", func, str,
+ (long)entry->ver.pool, (unsigned long long)entry->ver.epoch,
+ entry->key.name.c_str(), entry->key.instance.c_str(), entry->locator.c_str());
+}
+
+static void log_entry(const char *func, const char *str, rgw_bucket_olh_entry *entry)
+{
+ CLS_LOG(1, "%s(): %s: epoch=%llu name=%s instance=%s tag=%s\n", func, str,
+ (unsigned long long)entry->epoch, entry->key.name.c_str(), entry->key.instance.c_str(),
+ entry->tag.c_str());
+}
+
+template <class T>
+static int read_omap_entry(cls_method_context_t hctx, const std::string& name,
+ T* entry)
+{
+ bufferlist current_entry;
+ int rc = cls_cxx_map_get_val(hctx, name, &current_entry);
+ if (rc < 0) {
+ return rc;
+ }
+
+ auto cur_iter = current_entry.cbegin();
+ try {
+ decode(*entry, cur_iter);
+ } catch (buffer::error& err) {
+ CLS_LOG(1, "ERROR: %s(): failed to decode entry\n", __func__);
+ return -EIO;
+ }
+ return 0;
+}
+
+template <class T>
+static int read_index_entry(cls_method_context_t hctx, string& name, T* entry)
+{
+ int ret = read_omap_entry(hctx, name, entry);
+ if (ret < 0) {
+ return ret;
+ }
+
+ log_entry(__func__, "existing entry", entry);
+ return 0;
+}
+
+static int read_key_entry(cls_method_context_t hctx, cls_rgw_obj_key& key,
+ string *idx, rgw_bucket_dir_entry *entry,
+ bool special_delete_marker_name)
+{
+ encode_obj_index_key(key, idx);
+ int rc = read_index_entry(hctx, *idx, entry);
+ if (rc < 0) {
+ return rc;
+ }
+
+ if (key.instance.empty() &&
+ entry->flags & RGW_BUCKET_DIRENT_FLAG_VER_MARKER) {
+ /* we only do it where key.instance is empty. In this case the delete marker will have a
+ * separate entry in the index to avoid collisions with the actual object, as it's mutable
+ */
+ if (special_delete_marker_name) {
+ encode_obj_versioned_data_key(key, idx, true);
+ rc = read_index_entry(hctx, *idx, entry);
+ if (rc == 0) {
+ return 0;
+ }
+ }
+ encode_obj_versioned_data_key(key, idx);
+ rc = read_index_entry(hctx, *idx, entry);
+ if (rc < 0) {
+ *entry = rgw_bucket_dir_entry(); /* need to reset entry because we initialized it earlier */
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+int rgw_bucket_complete_op(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ // decode request
+ rgw_cls_obj_complete_op op;
+ auto iter = in->cbegin();
+ try {
+ decode(op, iter);
+ } catch (buffer::error& err) {
+ CLS_LOG(1, "ERROR: rgw_bucket_complete_op(): failed to decode request\n");
+ return -EINVAL;
+ }
+ CLS_LOG(1, "rgw_bucket_complete_op(): request: op=%d name=%s instance=%s ver=%lu:%llu tag=%s\n",
+ op.op, op.key.name.c_str(), op.key.instance.c_str(),
+ (unsigned long)op.ver.pool, (unsigned long long)op.ver.epoch,
+ op.tag.c_str());
+
+ rgw_bucket_dir_header header;
+ int rc = read_bucket_header(hctx, &header);
+ if (rc < 0) {
+ CLS_LOG(1, "ERROR: rgw_bucket_complete_op(): failed to read header\n");
+ return -EINVAL;
+ }
+
+ rgw_bucket_dir_entry entry;
+ bool ondisk = true;
+
+ string idx;
+ rc = read_key_entry(hctx, op.key, &idx, &entry);
+ if (rc == -ENOENT) {
+ entry.key = op.key;
+ entry.ver = op.ver;
+ entry.meta = op.meta;
+ entry.locator = op.locator;
+ ondisk = false;
+ } else if (rc < 0) {
+ return rc;
+ }
+
+ entry.index_ver = header.ver;
+ /* resetting entry flags, entry might have been previously a delete
+ * marker */
+ entry.flags &= RGW_BUCKET_DIRENT_FLAG_VER;
+
+ if (op.tag.size()) {
+ map<string, rgw_bucket_pending_info>::iterator pinter = entry.pending_map.find(op.tag);
+ if (pinter == entry.pending_map.end()) {
+ CLS_LOG(1, "ERROR: couldn't find tag for pending operation\n");
+ return -EINVAL;
+ }
+ entry.pending_map.erase(pinter);
+ }
+
+ bool cancel = false;
+ bufferlist update_bl;
+
+ if (op.tag.size() && op.op == CLS_RGW_OP_CANCEL) {
+ CLS_LOG(1, "rgw_bucket_complete_op(): cancel requested\n");
+ cancel = true;
+ } else if (op.ver.pool == entry.ver.pool &&
+ op.ver.epoch && op.ver.epoch <= entry.ver.epoch) {
+ CLS_LOG(1, "rgw_bucket_complete_op(): skipping request, old epoch\n");
+ cancel = true;
+ }
+
+ bufferlist op_bl;
+ if (cancel) {
+ if (op.log_op && !header.syncstopped) {
+ rc = log_index_operation(hctx, op.key, op.op, op.tag, entry.meta.mtime, entry.ver,
+ CLS_RGW_STATE_COMPLETE, header.ver, header.max_marker, op.bilog_flags, NULL, NULL, &op.zones_trace);
+ if (rc < 0)
+ return rc;
+ }
+
+ if (op.tag.size()) {
+ bufferlist new_key_bl;
+ encode(entry, new_key_bl);
+ rc = cls_cxx_map_set_val(hctx, idx, &new_key_bl);
+ if (rc < 0)
+ return rc;
+ }
+
+ if (op.log_op && !header.syncstopped) {
+ return write_bucket_header(hctx, &header);
+ }
+ return 0;
+ }
+
+ if (entry.exists) {
+ unaccount_entry(header, entry);
+ }
+
+ entry.ver = op.ver;
+ switch ((int)op.op) {
+ case CLS_RGW_OP_DEL:
+ entry.meta = op.meta;
+ if (ondisk) {
+ if (!entry.pending_map.size()) {
+ int ret = cls_cxx_map_remove_key(hctx, idx);
+ if (ret < 0)
+ return ret;
+ } else {
+ entry.exists = false;
+ bufferlist new_key_bl;
+ encode(entry, new_key_bl);
+ int ret = cls_cxx_map_set_val(hctx, idx, &new_key_bl);
+ if (ret < 0)
+ return ret;
+ }
+ } else {
+ return -ENOENT;
+ }
+ break;
+ case CLS_RGW_OP_ADD:
+ {
+ rgw_bucket_dir_entry_meta& meta = op.meta;
+ rgw_bucket_category_stats& stats = header.stats[meta.category];
+ entry.meta = meta;
+ entry.key = op.key;
+ entry.exists = true;
+ entry.tag = op.tag;
+ stats.num_entries++;
+ stats.total_size += meta.accounted_size;
+ stats.total_size_rounded += cls_rgw_get_rounded_size(meta.accounted_size);
+ stats.actual_size += meta.size;
+ bufferlist new_key_bl;
+ encode(entry, new_key_bl);
+ int ret = cls_cxx_map_set_val(hctx, idx, &new_key_bl);
+ if (ret < 0)
+ return ret;
+ }
+ break;
+ }
+
+ if (op.log_op && !header.syncstopped) {
+ rc = log_index_operation(hctx, op.key, op.op, op.tag, entry.meta.mtime, entry.ver,
+ CLS_RGW_STATE_COMPLETE, header.ver, header.max_marker, op.bilog_flags, NULL, NULL, &op.zones_trace);
+ if (rc < 0)
+ return rc;
+ }
+
+ list<cls_rgw_obj_key>::iterator remove_iter;
+ CLS_LOG(20, "rgw_bucket_complete_op(): remove_objs.size()=%d\n", (int)op.remove_objs.size());
+ for (remove_iter = op.remove_objs.begin(); remove_iter != op.remove_objs.end(); ++remove_iter) {
+ cls_rgw_obj_key& remove_key = *remove_iter;
+ CLS_LOG(1, "rgw_bucket_complete_op(): removing entries, read_index_entry name=%s instance=%s\n",
+ remove_key.name.c_str(), remove_key.instance.c_str());
+ rgw_bucket_dir_entry remove_entry;
+ string k;
+ int ret = read_key_entry(hctx, remove_key, &k, &remove_entry);
+ if (ret < 0) {
+ CLS_LOG(1, "rgw_bucket_complete_op(): removing entries, read_index_entry name=%s instance=%s ret=%d\n",
+ remove_key.name.c_str(), remove_key.instance.c_str(), ret);
+ continue;
+ }
+ CLS_LOG(0,
+ "rgw_bucket_complete_op(): entry.name=%s entry.instance=%s entry.meta.category=%d\n",
+ remove_entry.key.name.c_str(),
+ remove_entry.key.instance.c_str(),
+ int(remove_entry.meta.category));
+ unaccount_entry(header, remove_entry);
+
+ if (op.log_op && !header.syncstopped) {
+ ++header.ver; // increment index version, or we'll overwrite keys previously written
+ rc = log_index_operation(hctx, remove_key, CLS_RGW_OP_DEL, op.tag, remove_entry.meta.mtime,
+ remove_entry.ver, CLS_RGW_STATE_COMPLETE, header.ver, header.max_marker, op.bilog_flags, NULL, NULL, &op.zones_trace);
+ if (rc < 0)
+ continue;
+ }
+
+ ret = cls_cxx_map_remove_key(hctx, k);
+ if (ret < 0) {
+ CLS_LOG(1, "rgw_bucket_complete_op(): cls_cxx_map_remove_key, failed to remove entry, name=%s instance=%s read_index_entry ret=%d\n", remove_key.name.c_str(), remove_key.instance.c_str(), rc);
+ continue;
+ }
+ }
+
+ return write_bucket_header(hctx, &header);
+}
+
+template <class T>
+static int write_entry(cls_method_context_t hctx, T& entry, const string& key)
+{
+ bufferlist bl;
+ encode(entry, bl);
+ return cls_cxx_map_set_val(hctx, key, &bl);
+}
+
+static int read_olh(cls_method_context_t hctx,cls_rgw_obj_key& obj_key, rgw_bucket_olh_entry *olh_data_entry, string *index_key, bool *found)
+{
+ cls_rgw_obj_key olh_key;
+ olh_key.name = obj_key.name;
+
+ encode_olh_data_key(olh_key, index_key);
+ int ret = read_index_entry(hctx, *index_key, olh_data_entry);
+ if (ret < 0 && ret != -ENOENT) {
+ CLS_LOG(0, "ERROR: read_index_entry() olh_key=%s ret=%d", olh_key.name.c_str(), ret);
+ return ret;
+ }
+ if (found) {
+ *found = (ret != -ENOENT);
+ }
+ return 0;
+}
+
+static void update_olh_log(rgw_bucket_olh_entry& olh_data_entry, OLHLogOp op, const string& op_tag,
+ cls_rgw_obj_key& key, bool delete_marker, uint64_t epoch)
+{
+ vector<rgw_bucket_olh_log_entry>& log = olh_data_entry.pending_log[olh_data_entry.epoch];
+ rgw_bucket_olh_log_entry log_entry;
+ log_entry.epoch = epoch;
+ log_entry.op = op;
+ log_entry.op_tag = op_tag;
+ log_entry.key = key;
+ log_entry.delete_marker = delete_marker;
+ log.push_back(log_entry);
+}
+
+static int write_obj_instance_entry(cls_method_context_t hctx, rgw_bucket_dir_entry& instance_entry, const string& instance_idx)
+{
+ CLS_LOG(20, "write_entry() instance=%s idx=%s flags=%d", escape_str(instance_entry.key.instance).c_str(), instance_idx.c_str(), instance_entry.flags);
+ /* write the instance entry */
+ int ret = write_entry(hctx, instance_entry, instance_idx);
+ if (ret < 0) {
+ CLS_LOG(0, "ERROR: write_entry() instance_key=%s ret=%d", escape_str(instance_idx).c_str(), ret);
+ return ret;
+ }
+ return 0;
+}
+
+/*
+ * write object instance entry, and if needed also the list entry
+ */
+static int write_obj_entries(cls_method_context_t hctx, rgw_bucket_dir_entry& instance_entry, const string& instance_idx)
+{
+ int ret = write_obj_instance_entry(hctx, instance_entry, instance_idx);
+ if (ret < 0) {
+ return ret;
+ }
+ string instance_list_idx;
+ get_list_index_key(instance_entry, &instance_list_idx);
+
+ if (instance_idx != instance_list_idx) {
+ CLS_LOG(20, "write_entry() idx=%s flags=%d", escape_str(instance_list_idx).c_str(), instance_entry.flags);
+ /* write a new list entry for the object instance */
+ ret = write_entry(hctx, instance_entry, instance_list_idx);
+ if (ret < 0) {
+ CLS_LOG(0, "ERROR: write_entry() instance=%s instance_list_idx=%s ret=%d", instance_entry.key.instance.c_str(), instance_list_idx.c_str(), ret);
+ return ret;
+ }
+ }
+ return 0;
+}
+
+
+class BIVerObjEntry {
+ cls_method_context_t hctx;
+ cls_rgw_obj_key key;
+ string instance_idx;
+
+ rgw_bucket_dir_entry instance_entry;
+
+ bool initialized;
+
+public:
+ BIVerObjEntry(cls_method_context_t& _hctx, const cls_rgw_obj_key& _key) : hctx(_hctx), key(_key), initialized(false) {
+ }
+
+ int init(bool check_delete_marker = true) {
+ int ret = read_key_entry(hctx, key, &instance_idx, &instance_entry,
+ check_delete_marker && key.instance.empty()); /* this is potentially a delete marker, for null objects we
+ keep separate instance entry for the delete markers */
+
+ if (ret < 0) {
+ CLS_LOG(0, "ERROR: read_key_entry() idx=%s ret=%d", instance_idx.c_str(), ret);
+ return ret;
+ }
+ initialized = true;
+ CLS_LOG(20, "read instance_entry key.name=%s key.instance=%s flags=%d", instance_entry.key.name.c_str(), instance_entry.key.instance.c_str(), instance_entry.flags);
+ return 0;
+ }
+
+ rgw_bucket_dir_entry& get_dir_entry() {
+ return instance_entry;
+ }
+
+ void init_as_delete_marker(rgw_bucket_dir_entry_meta& meta) {
+ /* a deletion marker, need to initialize it, there's no instance entry for it yet */
+ instance_entry.key = key;
+ instance_entry.flags = RGW_BUCKET_DIRENT_FLAG_DELETE_MARKER;
+ instance_entry.meta = meta;
+ instance_entry.tag = "delete-marker";
+
+ initialized = true;
+ }
+
+ void set_epoch(uint64_t epoch) {
+ instance_entry.versioned_epoch = epoch;
+ }
+
+ int unlink_list_entry() {
+ string list_idx;
+ /* this instance has a previous list entry, remove that entry */
+ get_list_index_key(instance_entry, &list_idx);
+ CLS_LOG(20, "unlink_list_entry() list_idx=%s", escape_str(list_idx).c_str());
+ int ret = cls_cxx_map_remove_key(hctx, list_idx);
+ if (ret < 0) {
+ CLS_LOG(0, "ERROR: cls_cxx_map_remove_key() list_idx=%s ret=%d", list_idx.c_str(), ret);
+ return ret;
+ }
+ return 0;
+ }
+
+ int unlink() {
+ /* remove the instance entry */
+ CLS_LOG(20, "unlink() idx=%s", escape_str(instance_idx).c_str());
+ int ret = cls_cxx_map_remove_key(hctx, instance_idx);
+ if (ret < 0) {
+ CLS_LOG(0, "ERROR: cls_cxx_map_remove_key() instance_idx=%s ret=%d", instance_idx.c_str(), ret);
+ return ret;
+ }
+ return 0;
+ }
+
+ int write_entries(uint64_t flags_set, uint64_t flags_reset) {
+ if (!initialized) {
+ int ret = init();
+ if (ret < 0) {
+ return ret;
+ }
+ }
+ instance_entry.flags &= ~flags_reset;
+ instance_entry.flags |= flags_set;
+
+ /* write the instance and list entries */
+ bool special_delete_marker_key = (instance_entry.is_delete_marker() && instance_entry.key.instance.empty());
+ encode_obj_versioned_data_key(key, &instance_idx, special_delete_marker_key);
+ int ret = write_obj_entries(hctx, instance_entry, instance_idx);
+ if (ret < 0) {
+ CLS_LOG(0, "ERROR: write_obj_entries() instance_idx=%s ret=%d", instance_idx.c_str(), ret);
+ return ret;
+ }
+
+ return 0;
+ }
+
+ int write(uint64_t epoch, bool current) {
+ if (instance_entry.versioned_epoch > 0) {
+ CLS_LOG(20, "%s(): instance_entry.versioned_epoch=%d epoch=%d", __func__, (int)instance_entry.versioned_epoch, (int)epoch);
+ /* this instance has a previous list entry, remove that entry */
+ int ret = unlink_list_entry();
+ if (ret < 0) {
+ return ret;
+ }
+ }
+
+ uint64_t flags = RGW_BUCKET_DIRENT_FLAG_VER;
+ if (current) {
+ flags |= RGW_BUCKET_DIRENT_FLAG_CURRENT;
+ }
+
+ instance_entry.versioned_epoch = epoch;
+ return write_entries(flags, 0);
+ }
+
+ int demote_current() {
+ return write_entries(0, RGW_BUCKET_DIRENT_FLAG_CURRENT);
+ }
+
+ bool is_delete_marker() {
+ return instance_entry.is_delete_marker();
+ }
+
+ int find_next_key(cls_rgw_obj_key *next_key, bool *found) {
+ string list_idx;
+ /* this instance has a previous list entry, remove that entry */
+ get_list_index_key(instance_entry, &list_idx);
+ /* this is the current head, need to update! */
+ map<string, bufferlist> keys;
+ bool more;
+ string filter = key.name; /* list key starts with key name, filter it to avoid a case where we cross to
+ different namespace */
+ int ret = cls_cxx_map_get_vals(hctx, list_idx, filter, 1, &keys, &more);
+ if (ret < 0) {
+ return ret;
+ }
+
+ if (keys.size() < 1) {
+ *found = false;
+ return 0;
+ }
+
+ rgw_bucket_dir_entry next_entry;
+
+ map<string, bufferlist>::reverse_iterator last = keys.rbegin();
+ try {
+ auto iter = last->second.cbegin();
+ decode(next_entry, iter);
+ } catch (buffer::error& err) {
+ CLS_LOG(0, "ERROR; failed to decode entry: %s", last->first.c_str());
+ return -EIO;
+ }
+
+ *found = (key.name == next_entry.key.name);
+ if (*found) {
+ *next_key = next_entry.key;
+ }
+
+ return 0;
+ }
+
+ real_time mtime() {
+ return instance_entry.meta.mtime;
+ }
+};
+
+
+class BIOLHEntry {
+ cls_method_context_t hctx;
+ cls_rgw_obj_key key;
+
+ string olh_data_idx;
+ rgw_bucket_olh_entry olh_data_entry;
+
+ bool initialized;
+public:
+ BIOLHEntry(cls_method_context_t& _hctx, const cls_rgw_obj_key& _key) : hctx(_hctx), key(_key), initialized(false) { }
+
+ int init(bool *exists) {
+ /* read olh */
+ int ret = read_olh(hctx, key, &olh_data_entry, &olh_data_idx, exists);
+ if (ret < 0) {
+ return ret;
+ }
+
+ initialized = true;
+ return 0;
+ }
+
+ bool start_modify(uint64_t candidate_epoch) {
+ if (candidate_epoch) {
+ if (candidate_epoch < olh_data_entry.epoch) {
+ return false; /* olh cannot be modified, old epoch */
+ }
+ olh_data_entry.epoch = candidate_epoch;
+ } else {
+ if (olh_data_entry.epoch == 0) {
+ olh_data_entry.epoch = 2; /* versioned epoch should start with 2, 1 is reserved to converted plain entries */
+ } else {
+ olh_data_entry.epoch++;
+ }
+ }
+ return true;
+ }
+
+ uint64_t get_epoch() {
+ return olh_data_entry.epoch;
+ }
+
+ rgw_bucket_olh_entry& get_entry() {
+ return olh_data_entry;
+ }
+
+ void update(cls_rgw_obj_key& key, bool delete_marker) {
+ olh_data_entry.delete_marker = delete_marker;
+ olh_data_entry.key = key;
+ }
+
+ int write() {
+ /* write the olh data entry */
+ int ret = write_entry(hctx, olh_data_entry, olh_data_idx);
+ if (ret < 0) {
+ CLS_LOG(0, "ERROR: write_entry() olh_key=%s ret=%d", olh_data_idx.c_str(), ret);
+ return ret;
+ }
+
+ return 0;
+ }
+
+ void update_log(OLHLogOp op, const string& op_tag, cls_rgw_obj_key& key, bool delete_marker, uint64_t epoch = 0) {
+ if (epoch == 0) {
+ epoch = olh_data_entry.epoch;
+ }
+ update_olh_log(olh_data_entry, op, op_tag, key, delete_marker, epoch);
+ }
+
+ bool exists() { return olh_data_entry.exists; }
+
+ void set_exists(bool exists) {
+ olh_data_entry.exists = exists;
+ }
+
+ bool pending_removal() { return olh_data_entry.pending_removal; }
+
+ void set_pending_removal(bool pending_removal) {
+ olh_data_entry.pending_removal = pending_removal;
+ }
+
+ const string& get_tag() { return olh_data_entry.tag; }
+ void set_tag(const string& tag) {
+ olh_data_entry.tag = tag;
+ }
+};
+
+static int write_version_marker(cls_method_context_t hctx, cls_rgw_obj_key& key)
+{
+ rgw_bucket_dir_entry entry;
+ entry.key = key;
+ entry.flags = RGW_BUCKET_DIRENT_FLAG_VER_MARKER;
+ int ret = write_entry(hctx, entry, key.name);
+ if (ret < 0) {
+ CLS_LOG(0, "ERROR: write_entry returned ret=%d", ret);
+ return ret;
+ }
+ return 0;
+}
+
+/*
+ * plain entries are the ones who were created when bucket was not versioned,
+ * if we override these objects, we need to convert these to versioned entries -- ones that have
+ * both data entry, and listing key. Their version is going to be empty though
+ */
+static int convert_plain_entry_to_versioned(cls_method_context_t hctx, cls_rgw_obj_key& key, bool demote_current, bool instance_only)
+{
+ if (!key.instance.empty()) {
+ return -EINVAL;
+ }
+
+ rgw_bucket_dir_entry entry;
+
+ string orig_idx;
+ int ret = read_key_entry(hctx, key, &orig_idx, &entry);
+ if (ret != -ENOENT) {
+ if (ret < 0) {
+ CLS_LOG(0, "ERROR: read_key_entry() returned ret=%d", ret);
+ return ret;
+ }
+
+ entry.versioned_epoch = 1; /* converted entries are always 1 */
+ entry.flags |= RGW_BUCKET_DIRENT_FLAG_VER;
+
+ if (demote_current) {
+ entry.flags &= ~RGW_BUCKET_DIRENT_FLAG_CURRENT;
+ }
+
+ string new_idx;
+ encode_obj_versioned_data_key(key, &new_idx);
+
+ if (instance_only) {
+ ret = write_obj_instance_entry(hctx, entry, new_idx);
+ } else {
+ ret = write_obj_entries(hctx, entry, new_idx);
+ }
+ if (ret < 0) {
+ CLS_LOG(0, "ERROR: write_obj_entries new_idx=%s returned %d", new_idx.c_str(), ret);
+ return ret;
+ }
+ }
+
+ ret = write_version_marker(hctx, key);
+ if (ret < 0) {
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * link an object version to an olh, update the relevant index entries. It will also handle the
+ * deletion marker case. We have a few entries that we need to take care of. For object 'foo',
+ * instance BAR, we'd update the following (not actual encoding):
+ * - olh data: [BI_BUCKET_OLH_DATA_INDEX]foo
+ * - object instance data: [BI_BUCKET_OBJ_INSTANCE_INDEX]foo,BAR
+ * - object instance list entry: foo,123,BAR
+ *
+ * The instance list entry needs to be ordered by newer to older, so we generate an appropriate
+ * number string that follows the name.
+ * The top instance for each object is marked appropriately.
+ * We generate instance entry for deletion markers here, as they are not created prior.
+ */
+static int rgw_bucket_link_olh(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ string olh_data_idx;
+ string instance_idx;
+
+ // decode request
+ rgw_cls_link_olh_op op;
+ auto iter = in->cbegin();
+ try {
+ decode(op, iter);
+ } catch (buffer::error& err) {
+ CLS_LOG(0, "ERROR: rgw_bucket_link_olh_op(): failed to decode request\n");
+ return -EINVAL;
+ }
+
+ BIVerObjEntry obj(hctx, op.key);
+ BIOLHEntry olh(hctx, op.key);
+
+ /* read instance entry */
+ int ret = obj.init(op.delete_marker);
+ bool existed = (ret == 0);
+ if (ret == -ENOENT && op.delete_marker) {
+ ret = 0;
+ }
+ if (ret < 0) {
+ return ret;
+ }
+
+ if (existed && !real_clock::is_zero(op.unmod_since)) {
+ timespec mtime = ceph::real_clock::to_timespec(obj.mtime());
+ timespec unmod = ceph::real_clock::to_timespec(op.unmod_since);
+ if (!op.high_precision_time) {
+ mtime.tv_nsec = 0;
+ unmod.tv_nsec = 0;
+ }
+ if (mtime >= unmod) {
+ return 0; /* no need to set error, we just return 0 and avoid writing to the bi log */
+ }
+ }
+
+ bool removing;
+
+ /*
+ * Special handling for null instance object / delete-marker. For these objects we're going to
+ * have separate instances for a data object vs. delete-marker to avoid collisions. We now check
+ * if we got to overwrite a previous entry, and in that case we'll remove its list entry.
+ */
+ if (op.key.instance.empty()) {
+ BIVerObjEntry other_obj(hctx, op.key);
+ ret = other_obj.init(!op.delete_marker); /* try reading the other null versioned entry */
+ existed = (ret >= 0 && !other_obj.is_delete_marker());
+ if (ret >= 0 && other_obj.is_delete_marker() != op.delete_marker) {
+ ret = other_obj.unlink_list_entry();
+ if (ret < 0) {
+ return ret;
+ }
+ }
+
+ removing = existed && op.delete_marker;
+ if (!removing) {
+ ret = other_obj.unlink();
+ if (ret < 0) {
+ return ret;
+ }
+ }
+ } else {
+ removing = (existed && !obj.is_delete_marker() && op.delete_marker);
+ }
+
+ if (op.delete_marker) {
+ /* a deletion marker, need to initialize entry as such */
+ obj.init_as_delete_marker(op.meta);
+ }
+
+ /* read olh */
+ bool olh_found;
+ ret = olh.init(&olh_found);
+ if (ret < 0) {
+ return ret;
+ }
+ const uint64_t prev_epoch = olh.get_epoch();
+
+ if (!olh.start_modify(op.olh_epoch)) {
+ ret = obj.write(op.olh_epoch, false);
+ if (ret < 0) {
+ return ret;
+ }
+ if (removing) {
+ olh.update_log(CLS_RGW_OLH_OP_REMOVE_INSTANCE, op.op_tag, op.key, false, op.olh_epoch);
+ }
+ return 0;
+ }
+
+ // promote this version to current if it's a newer epoch, or if it matches the
+ // current epoch and sorts after the current instance
+ const bool promote = (olh.get_epoch() > prev_epoch) ||
+ (olh.get_epoch() == prev_epoch &&
+ olh.get_entry().key.instance >= op.key.instance);
+
+ if (olh_found) {
+ const string& olh_tag = olh.get_tag();
+ if (op.olh_tag != olh_tag) {
+ if (!olh.pending_removal()) {
+ CLS_LOG(5, "NOTICE: op.olh_tag (%s) != olh.tag (%s)", op.olh_tag.c_str(), olh_tag.c_str());
+ return -ECANCELED;
+ }
+ /* if pending removal, this is a new olh instance */
+ olh.set_tag(op.olh_tag);
+ }
+ if (promote && olh.exists()) {
+ rgw_bucket_olh_entry& olh_entry = olh.get_entry();
+ /* found olh, previous instance is no longer the latest, need to update */
+ if (!(olh_entry.key == op.key)) {
+ BIVerObjEntry old_obj(hctx, olh_entry.key);
+
+ ret = old_obj.demote_current();
+ if (ret < 0) {
+ CLS_LOG(0, "ERROR: could not demote current on previous key ret=%d", ret);
+ return ret;
+ }
+ }
+ }
+ olh.set_pending_removal(false);
+ } else {
+ bool instance_only = (op.key.instance.empty() && op.delete_marker);
+ cls_rgw_obj_key key(op.key.name);
+ ret = convert_plain_entry_to_versioned(hctx, key, promote, instance_only);
+ if (ret < 0) {
+ CLS_LOG(0, "ERROR: convert_plain_entry_to_versioned ret=%d", ret);
+ return ret;
+ }
+ olh.set_tag(op.olh_tag);
+ }
+
+ /* update the olh log */
+ olh.update_log(CLS_RGW_OLH_OP_LINK_OLH, op.op_tag, op.key, op.delete_marker);
+ if (removing) {
+ olh.update_log(CLS_RGW_OLH_OP_REMOVE_INSTANCE, op.op_tag, op.key, false);
+ }
+
+ if (promote) {
+ olh.update(op.key, op.delete_marker);
+ }
+ olh.set_exists(true);
+
+ ret = olh.write();
+ if (ret < 0) {
+ CLS_LOG(0, "ERROR: failed to update olh ret=%d", ret);
+ return ret;
+ }
+
+ /* write the instance and list entries */
+ ret = obj.write(olh.get_epoch(), promote);
+ if (ret < 0) {
+ return ret;
+ }
+
+ rgw_bucket_dir_header header;
+ ret = read_bucket_header(hctx, &header);
+ if (ret < 0) {
+ CLS_LOG(1, "ERROR: rgw_bucket_link_olh(): failed to read header\n");
+ return ret;
+ }
+
+ if (op.log_op && !header.syncstopped) {
+ rgw_bucket_dir_entry& entry = obj.get_dir_entry();
+
+ rgw_bucket_entry_ver ver;
+ ver.epoch = (op.olh_epoch ? op.olh_epoch : olh.get_epoch());
+
+ string *powner = NULL;
+ string *powner_display_name = NULL;
+
+ if (op.delete_marker) {
+ powner = &entry.meta.owner;
+ powner_display_name = &entry.meta.owner_display_name;
+ }
+
+ RGWModifyOp operation = (op.delete_marker ? CLS_RGW_OP_LINK_OLH_DM : CLS_RGW_OP_LINK_OLH);
+ ret = log_index_operation(hctx, op.key, operation, op.op_tag,
+ entry.meta.mtime, ver,
+ CLS_RGW_STATE_COMPLETE, header.ver, header.max_marker, op.bilog_flags | RGW_BILOG_FLAG_VERSIONED_OP,
+ powner, powner_display_name, &op.zones_trace);
+ if (ret < 0)
+ return ret;
+
+ return write_bucket_header(hctx, &header); /* updates header version */
+ }
+
+ return 0;
+}
+
+static int rgw_bucket_unlink_instance(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ string olh_data_idx;
+ string instance_idx;
+
+ // decode request
+ rgw_cls_unlink_instance_op op;
+ auto iter = in->cbegin();
+ try {
+ decode(op, iter);
+ } catch (buffer::error& err) {
+ CLS_LOG(0, "ERROR: rgw_bucket_rm_obj_instance_op(): failed to decode request\n");
+ return -EINVAL;
+ }
+
+ cls_rgw_obj_key dest_key = op.key;
+ if (dest_key.instance == "null") {
+ dest_key.instance.clear();
+ }
+
+ BIVerObjEntry obj(hctx, dest_key);
+ BIOLHEntry olh(hctx, dest_key);
+
+ int ret = obj.init();
+ if (ret == -ENOENT) {
+ return 0; /* already removed */
+ }
+ if (ret < 0) {
+ CLS_LOG(0, "ERROR: obj.init() returned ret=%d", ret);
+ return ret;
+ }
+
+ bool olh_found;
+ ret = olh.init(&olh_found);
+ if (ret < 0) {
+ CLS_LOG(0, "ERROR: olh.init() returned ret=%d", ret);
+ return ret;
+ }
+
+ if (!olh_found) {
+ bool instance_only = false;
+ cls_rgw_obj_key key(dest_key.name);
+ ret = convert_plain_entry_to_versioned(hctx, key, true, instance_only);
+ if (ret < 0) {
+ CLS_LOG(0, "ERROR: convert_plain_entry_to_versioned ret=%d", ret);
+ return ret;
+ }
+ olh.update(dest_key, false);
+ olh.set_tag(op.olh_tag);
+
+ obj.set_epoch(1);
+ }
+
+ if (!olh.start_modify(op.olh_epoch)) {
+ ret = obj.unlink_list_entry();
+ if (ret < 0) {
+ return ret;
+ }
+
+ if (!obj.is_delete_marker()) {
+ olh.update_log(CLS_RGW_OLH_OP_REMOVE_INSTANCE, op.op_tag, op.key, false, op.olh_epoch);
+ }
+
+ return 0;
+ }
+
+ rgw_bucket_olh_entry& olh_entry = olh.get_entry();
+ cls_rgw_obj_key& olh_key = olh_entry.key;
+ CLS_LOG(20, "%s(): updating olh log: existing olh entry: %s[%s] (delete_marker=%d)", __func__,
+ olh_key.name.c_str(), olh_key.instance.c_str(), olh_entry.delete_marker);
+
+ if (olh_key == dest_key) {
+ /* this is the current head, need to update! */
+ cls_rgw_obj_key next_key;
+ bool found = false;
+ ret = obj.find_next_key(&next_key, &found);
+ if (ret < 0) {
+ CLS_LOG(0, "ERROR: obj.find_next_key() returned ret=%d", ret);
+ return ret;
+ }
+
+ if (found) {
+ BIVerObjEntry next(hctx, next_key);
+ ret = next.write(olh.get_epoch(), true);
+ if (ret < 0) {
+ CLS_LOG(0, "ERROR: next.write() returned ret=%d", ret);
+ return ret;
+ }
+
+ CLS_LOG(20, "%s(): updating olh log: link olh -> %s[%s] (is_delete=%d)", __func__,
+ next_key.name.c_str(), next_key.instance.c_str(), (int)next.is_delete_marker());
+
+ olh.update(next_key, next.is_delete_marker());
+ olh.update_log(CLS_RGW_OLH_OP_LINK_OLH, op.op_tag, next_key, next.is_delete_marker());
+ } else {
+ // next_key is empty, but we need to preserve its name in case this entry
+ // gets resharded, because this key is used for hash placement
+ next_key.name = dest_key.name;
+ olh.update(next_key, false);
+ olh.update_log(CLS_RGW_OLH_OP_UNLINK_OLH, op.op_tag, next_key, false);
+ olh.set_exists(false);
+ olh.set_pending_removal(true);
+ }
+ }
+
+ if (!obj.is_delete_marker()) {
+ olh.update_log(CLS_RGW_OLH_OP_REMOVE_INSTANCE, op.op_tag, op.key, false);
+ } else {
+ /* this is a delete marker, it's our responsibility to remove its instance entry */
+ ret = obj.unlink();
+ if (ret < 0) {
+ return ret;
+ }
+ }
+
+ ret = obj.unlink_list_entry();
+ if (ret < 0) {
+ return ret;
+ }
+
+ ret = olh.write();
+ if (ret < 0) {
+ return ret;
+ }
+
+ rgw_bucket_dir_header header;
+ ret = read_bucket_header(hctx, &header);
+ if (ret < 0) {
+ CLS_LOG(1, "ERROR: rgw_bucket_unlink_instance(): failed to read header\n");
+ return ret;
+ }
+
+ if (op.log_op && !header.syncstopped) {
+ rgw_bucket_entry_ver ver;
+ ver.epoch = (op.olh_epoch ? op.olh_epoch : olh.get_epoch());
+
+ real_time mtime = obj.mtime(); /* mtime has no real meaning in instance removal context */
+ ret = log_index_operation(hctx, op.key, CLS_RGW_OP_UNLINK_INSTANCE, op.op_tag,
+ mtime, ver,
+ CLS_RGW_STATE_COMPLETE, header.ver, header.max_marker,
+ op.bilog_flags | RGW_BILOG_FLAG_VERSIONED_OP, NULL, NULL, &op.zones_trace);
+ if (ret < 0)
+ return ret;
+
+ return write_bucket_header(hctx, &header); /* updates header version */
+ }
+
+ return 0;
+}
+
+static int rgw_bucket_read_olh_log(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ // decode request
+ rgw_cls_read_olh_log_op op;
+ auto iter = in->cbegin();
+ try {
+ decode(op, iter);
+ } catch (buffer::error& err) {
+ CLS_LOG(0, "ERROR: rgw_bucket_read_olh_log(): failed to decode request\n");
+ return -EINVAL;
+ }
+
+ if (!op.olh.instance.empty()) {
+ CLS_LOG(1, "bad key passed in (non empty instance)");
+ return -EINVAL;
+ }
+
+ rgw_bucket_olh_entry olh_data_entry;
+ string olh_data_key;
+ encode_olh_data_key(op.olh, &olh_data_key);
+ int ret = read_index_entry(hctx, olh_data_key, &olh_data_entry);
+ if (ret < 0 && ret != -ENOENT) {
+ CLS_LOG(0, "ERROR: read_index_entry() olh_key=%s ret=%d", olh_data_key.c_str(), ret);
+ return ret;
+ }
+
+ if (olh_data_entry.tag != op.olh_tag) {
+ CLS_LOG(1, "NOTICE: %s(): olh_tag_mismatch olh_data_entry.tag=%s op.olh_tag=%s", __func__, olh_data_entry.tag.c_str(), op.olh_tag.c_str());
+ return -ECANCELED;
+ }
+
+ rgw_cls_read_olh_log_ret op_ret;
+
+#define MAX_OLH_LOG_ENTRIES 1000
+ map<uint64_t, vector<rgw_bucket_olh_log_entry> >& log = olh_data_entry.pending_log;
+
+ if (log.begin()->first > op.ver_marker && log.size() <= MAX_OLH_LOG_ENTRIES) {
+ op_ret.log = log;
+ op_ret.is_truncated = false;
+ } else {
+ map<uint64_t, vector<rgw_bucket_olh_log_entry> >::iterator iter = log.upper_bound(op.ver_marker);
+
+ for (int i = 0; i < MAX_OLH_LOG_ENTRIES && iter != log.end(); ++i, ++iter) {
+ op_ret.log[iter->first] = iter->second;
+ }
+ op_ret.is_truncated = (iter != log.end());
+ }
+
+ encode(op_ret, *out);
+
+ return 0;
+}
+
+static int rgw_bucket_trim_olh_log(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ // decode request
+ rgw_cls_trim_olh_log_op op;
+ auto iter = in->cbegin();
+ try {
+ decode(op, iter);
+ } catch (buffer::error& err) {
+ CLS_LOG(0, "ERROR: rgw_bucket_trim_olh_log(): failed to decode request\n");
+ return -EINVAL;
+ }
+
+ if (!op.olh.instance.empty()) {
+ CLS_LOG(1, "bad key passed in (non empty instance)");
+ return -EINVAL;
+ }
+
+ /* read olh entry */
+ rgw_bucket_olh_entry olh_data_entry;
+ string olh_data_key;
+ encode_olh_data_key(op.olh, &olh_data_key);
+ int ret = read_index_entry(hctx, olh_data_key, &olh_data_entry);
+ if (ret < 0 && ret != -ENOENT) {
+ CLS_LOG(0, "ERROR: read_index_entry() olh_key=%s ret=%d", olh_data_key.c_str(), ret);
+ return ret;
+ }
+
+ if (olh_data_entry.tag != op.olh_tag) {
+ CLS_LOG(1, "NOTICE: %s(): olh_tag_mismatch olh_data_entry.tag=%s op.olh_tag=%s", __func__, olh_data_entry.tag.c_str(), op.olh_tag.c_str());
+ return -ECANCELED;
+ }
+
+ /* remove all versions up to and including ver from the pending map */
+ map<uint64_t, vector<rgw_bucket_olh_log_entry> >& log = olh_data_entry.pending_log;
+ map<uint64_t, vector<rgw_bucket_olh_log_entry> >::iterator liter = log.begin();
+ while (liter != log.end() && liter->first <= op.ver) {
+ map<uint64_t, vector<rgw_bucket_olh_log_entry> >::iterator rm_iter = liter;
+ ++liter;
+ log.erase(rm_iter);
+ }
+
+ /* write the olh data entry */
+ ret = write_entry(hctx, olh_data_entry, olh_data_key);
+ if (ret < 0) {
+ CLS_LOG(0, "ERROR: write_entry() olh_key=%s ret=%d", olh_data_key.c_str(), ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int rgw_bucket_clear_olh(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ // decode request
+ rgw_cls_bucket_clear_olh_op op;
+ auto iter = in->cbegin();
+ try {
+ decode(op, iter);
+ } catch (buffer::error& err) {
+ CLS_LOG(0, "ERROR: rgw_bucket_clear_olh(): failed to decode request\n");
+ return -EINVAL;
+ }
+
+ if (!op.key.instance.empty()) {
+ CLS_LOG(1, "bad key passed in (non empty instance)");
+ return -EINVAL;
+ }
+
+ /* read olh entry */
+ rgw_bucket_olh_entry olh_data_entry;
+ string olh_data_key;
+ encode_olh_data_key(op.key, &olh_data_key);
+ int ret = read_index_entry(hctx, olh_data_key, &olh_data_entry);
+ if (ret < 0 && ret != -ENOENT) {
+ CLS_LOG(0, "ERROR: read_index_entry() olh_key=%s ret=%d", olh_data_key.c_str(), ret);
+ return ret;
+ }
+
+ if (olh_data_entry.tag != op.olh_tag) {
+ CLS_LOG(1, "NOTICE: %s(): olh_tag_mismatch olh_data_entry.tag=%s op.olh_tag=%s", __func__, olh_data_entry.tag.c_str(), op.olh_tag.c_str());
+ return -ECANCELED;
+ }
+
+ ret = cls_cxx_map_remove_key(hctx, olh_data_key);
+ if (ret < 0) {
+ CLS_LOG(1, "NOTICE: %s(): can't remove key %s ret=%d", __func__, olh_data_key.c_str(), ret);
+ return ret;
+ }
+
+ rgw_bucket_dir_entry plain_entry;
+
+ /* read plain entry, make sure it's a versioned place holder */
+ ret = read_index_entry(hctx, op.key.name, &plain_entry);
+ if (ret == -ENOENT) {
+ /* we're done, no entry existing */
+ return 0;
+ }
+ if (ret < 0) {
+ CLS_LOG(0, "ERROR: read_index_entry key=%s ret=%d", op.key.name.c_str(), ret);
+ return ret;
+ }
+
+ if ((plain_entry.flags & RGW_BUCKET_DIRENT_FLAG_VER_MARKER) == 0) {
+ /* it's not a version marker, don't remove it */
+ return 0;
+ }
+
+ ret = cls_cxx_map_remove_key(hctx, op.key.name);
+ if (ret < 0) {
+ CLS_LOG(1, "NOTICE: %s(): can't remove key %s ret=%d", __func__, op.key.name.c_str(), ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+int rgw_dir_suggest_changes(cls_method_context_t hctx,
+ bufferlist *in, bufferlist *out)
+{
+ CLS_LOG(1, "rgw_dir_suggest_changes()");
+
+ bufferlist header_bl;
+ rgw_bucket_dir_header header;
+ bool header_changed = false;
+
+ int rc = read_bucket_header(hctx, &header);
+ if (rc < 0) {
+ CLS_LOG(1, "ERROR: rgw_dir_suggest_changes(): failed to read header\n");
+ return rc;
+ }
+
+ timespan tag_timeout(
+ std::chrono::seconds(
+ header.tag_timeout ? header.tag_timeout : CEPH_RGW_TAG_TIMEOUT));
+
+ auto in_iter = in->cbegin();
+
+ while (!in_iter.end()) {
+ __u8 op;
+ rgw_bucket_dir_entry cur_change;
+ rgw_bucket_dir_entry cur_disk;
+ try {
+ decode(op, in_iter);
+ decode(cur_change, in_iter);
+ } catch (buffer::error& err) {
+ CLS_LOG(1, "ERROR: rgw_dir_suggest_changes(): failed to decode request\n");
+ return -EINVAL;
+ }
+
+ bufferlist cur_disk_bl;
+ string cur_change_key;
+ encode_obj_index_key(cur_change.key, &cur_change_key);
+ int ret = cls_cxx_map_get_val(hctx, cur_change_key, &cur_disk_bl);
+ if (ret < 0 && ret != -ENOENT)
+ return -EINVAL;
+
+ if (ret == -ENOENT) {
+ continue;
+ }
+
+ if (cur_disk_bl.length()) {
+ auto cur_disk_iter = cur_disk_bl.cbegin();
+ try {
+ decode(cur_disk, cur_disk_iter);
+ } catch (buffer::error& error) {
+ CLS_LOG(1, "ERROR: rgw_dir_suggest_changes(): failed to decode cur_disk\n");
+ return -EINVAL;
+ }
+
+ real_time cur_time = real_clock::now();
+ map<string, rgw_bucket_pending_info>::iterator iter =
+ cur_disk.pending_map.begin();
+ while(iter != cur_disk.pending_map.end()) {
+ map<string, rgw_bucket_pending_info>::iterator cur_iter=iter++;
+ if (cur_time > (cur_iter->second.timestamp + timespan(tag_timeout))) {
+ cur_disk.pending_map.erase(cur_iter);
+ }
+ }
+ }
+
+ CLS_LOG(20, "cur_disk.pending_map.empty()=%d op=%d cur_disk.exists=%d cur_change.pending_map.size()=%d cur_change.exists=%d\n",
+ cur_disk.pending_map.empty(), (int)op, cur_disk.exists,
+ (int)cur_change.pending_map.size(), cur_change.exists);
+
+ if (cur_disk.pending_map.empty()) {
+ if (cur_disk.exists) {
+ rgw_bucket_category_stats& old_stats = header.stats[cur_disk.meta.category];
+ CLS_LOG(10, "total_entries: %" PRId64 " -> %" PRId64 "\n", old_stats.num_entries, old_stats.num_entries - 1);
+ old_stats.num_entries--;
+ old_stats.total_size -= cur_disk.meta.accounted_size;
+ old_stats.total_size_rounded -= cls_rgw_get_rounded_size(cur_disk.meta.accounted_size);
+ old_stats.actual_size -= cur_disk.meta.size;
+ header_changed = true;
+ }
+ rgw_bucket_category_stats& stats = header.stats[cur_change.meta.category];
+ bool log_op = (op & CEPH_RGW_DIR_SUGGEST_LOG_OP) != 0;
+ op &= CEPH_RGW_DIR_SUGGEST_OP_MASK;
+ switch(op) {
+ case CEPH_RGW_REMOVE:
+ CLS_LOG(10, "CEPH_RGW_REMOVE name=%s instance=%s\n", cur_change.key.name.c_str(), cur_change.key.instance.c_str());
+ ret = cls_cxx_map_remove_key(hctx, cur_change_key);
+ if (ret < 0)
+ return ret;
+ if (log_op && cur_disk.exists && !header.syncstopped) {
+ ret = log_index_operation(hctx, cur_disk.key, CLS_RGW_OP_DEL, cur_disk.tag, cur_disk.meta.mtime,
+ cur_disk.ver, CLS_RGW_STATE_COMPLETE, header.ver, header.max_marker, 0, NULL, NULL, NULL);
+ if (ret < 0) {
+ CLS_LOG(0, "ERROR: %s(): failed to log operation ret=%d", __func__, ret);
+ return ret;
+ }
+ }
+ break;
+ case CEPH_RGW_UPDATE:
+ CLS_LOG(10, "CEPH_RGW_UPDATE name=%s instance=%s total_entries: %" PRId64 " -> %" PRId64 "\n",
+ cur_change.key.name.c_str(), cur_change.key.instance.c_str(), stats.num_entries, stats.num_entries + 1);
+
+ stats.num_entries++;
+ stats.total_size += cur_change.meta.accounted_size;
+ stats.total_size_rounded += cls_rgw_get_rounded_size(cur_change.meta.accounted_size);
+ stats.actual_size += cur_change.meta.size;
+ header_changed = true;
+ cur_change.index_ver = header.ver;
+ bufferlist cur_state_bl;
+ encode(cur_change, cur_state_bl);
+ ret = cls_cxx_map_set_val(hctx, cur_change_key, &cur_state_bl);
+ if (ret < 0)
+ return ret;
+ if (log_op && !header.syncstopped) {
+ ret = log_index_operation(hctx, cur_change.key, CLS_RGW_OP_ADD, cur_change.tag, cur_change.meta.mtime,
+ cur_change.ver, CLS_RGW_STATE_COMPLETE, header.ver, header.max_marker, 0, NULL, NULL, NULL);
+ if (ret < 0) {
+ CLS_LOG(0, "ERROR: %s(): failed to log operation ret=%d", __func__, ret);
+ return ret;
+ }
+ }
+ break;
+ } // switch(op)
+ } // if (cur_disk.pending_map.empty())
+ } // while (!in_iter.end())
+
+ if (header_changed) {
+ return write_bucket_header(hctx, &header);
+ }
+ return 0;
+}
+
+static int rgw_obj_remove(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ // decode request
+ rgw_cls_obj_remove_op op;
+ auto iter = in->cbegin();
+ try {
+ decode(op, iter);
+ } catch (buffer::error& err) {
+ CLS_LOG(0, "ERROR: %s(): failed to decode request", __func__);
+ return -EINVAL;
+ }
+
+ if (op.keep_attr_prefixes.empty()) {
+ return cls_cxx_remove(hctx);
+ }
+
+ map<string, bufferlist> attrset;
+ int ret = cls_cxx_getxattrs(hctx, &attrset);
+ if (ret < 0 && ret != -ENOENT) {
+ CLS_LOG(0, "ERROR: %s(): cls_cxx_getxattrs() returned %d", __func__, ret);
+ return ret;
+ }
+
+ map<string, bufferlist> new_attrs;
+ for (list<string>::iterator iter = op.keep_attr_prefixes.begin();
+ iter != op.keep_attr_prefixes.end(); ++iter) {
+ string& check_prefix = *iter;
+
+ for (map<string, bufferlist>::iterator aiter = attrset.lower_bound(check_prefix);
+ aiter != attrset.end(); ++aiter) {
+ const string& attr = aiter->first;
+
+ if (attr.substr(0, check_prefix.size()) > check_prefix) {
+ break;
+ }
+
+ new_attrs[attr] = aiter->second;
+ }
+ }
+
+ CLS_LOG(20, "%s(): removing object", __func__);
+ ret = cls_cxx_remove(hctx);
+ if (ret < 0) {
+ CLS_LOG(0, "ERROR: %s(): cls_cxx_remove returned %d", __func__, ret);
+ return ret;
+ }
+
+ if (new_attrs.empty()) {
+ /* no data to keep */
+ return 0;
+ }
+
+ ret = cls_cxx_create(hctx, false);
+ if (ret < 0) {
+ CLS_LOG(0, "ERROR: %s(): cls_cxx_create returned %d", __func__, ret);
+ return ret;
+ }
+
+ for (map<string, bufferlist>::iterator aiter = new_attrs.begin();
+ aiter != new_attrs.end(); ++aiter) {
+ const string& attr = aiter->first;
+
+ ret = cls_cxx_setxattr(hctx, attr.c_str(), &aiter->second);
+ CLS_LOG(20, "%s(): setting attr: %s", __func__, attr.c_str());
+ if (ret < 0) {
+ CLS_LOG(0, "ERROR: %s(): cls_cxx_setxattr (attr=%s) returned %d", __func__, attr.c_str(), ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int rgw_obj_store_pg_ver(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ // decode request
+ rgw_cls_obj_store_pg_ver_op op;
+ auto iter = in->cbegin();
+ try {
+ decode(op, iter);
+ } catch (buffer::error& err) {
+ CLS_LOG(0, "ERROR: %s(): failed to decode request", __func__);
+ return -EINVAL;
+ }
+
+ bufferlist bl;
+ uint64_t ver = cls_current_version(hctx);
+ encode(ver, bl);
+ int ret = cls_cxx_setxattr(hctx, op.attr.c_str(), &bl);
+ if (ret < 0) {
+ CLS_LOG(0, "ERROR: %s(): cls_cxx_setxattr (attr=%s) returned %d", __func__, op.attr.c_str(), ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int rgw_obj_check_attrs_prefix(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ // decode request
+ rgw_cls_obj_check_attrs_prefix op;
+ auto iter = in->cbegin();
+ try {
+ decode(op, iter);
+ } catch (buffer::error& err) {
+ CLS_LOG(0, "ERROR: %s(): failed to decode request", __func__);
+ return -EINVAL;
+ }
+
+ if (op.check_prefix.empty()) {
+ return -EINVAL;
+ }
+
+ map<string, bufferlist> attrset;
+ int ret = cls_cxx_getxattrs(hctx, &attrset);
+ if (ret < 0 && ret != -ENOENT) {
+ CLS_LOG(0, "ERROR: %s(): cls_cxx_getxattrs() returned %d", __func__, ret);
+ return ret;
+ }
+
+ bool exist = false;
+
+ for (map<string, bufferlist>::iterator aiter = attrset.lower_bound(op.check_prefix);
+ aiter != attrset.end(); ++aiter) {
+ const string& attr = aiter->first;
+
+ if (attr.substr(0, op.check_prefix.size()) > op.check_prefix) {
+ break;
+ }
+
+ exist = true;
+ }
+
+ if (exist == op.fail_if_exist) {
+ return -ECANCELED;
+ }
+
+ return 0;
+}
+
+static int rgw_obj_check_mtime(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ // decode request
+ rgw_cls_obj_check_mtime op;
+ auto iter = in->cbegin();
+ try {
+ decode(op, iter);
+ } catch (buffer::error& err) {
+ CLS_LOG(0, "ERROR: %s(): failed to decode request", __func__);
+ return -EINVAL;
+ }
+
+ real_time obj_ut;
+ int ret = cls_cxx_stat2(hctx, NULL, &obj_ut);
+ if (ret < 0 && ret != -ENOENT) {
+ CLS_LOG(0, "ERROR: %s(): cls_cxx_stat() returned %d", __func__, ret);
+ return ret;
+ }
+ if (ret == -ENOENT) {
+ CLS_LOG(10, "object does not exist, skipping check");
+ }
+
+ ceph_timespec obj_ts = ceph::real_clock::to_ceph_timespec(obj_ut);
+ ceph_timespec op_ts = ceph::real_clock::to_ceph_timespec(op.mtime);
+
+ if (!op.high_precision_time) {
+ obj_ts.tv_nsec = 0;
+ op_ts.tv_nsec = 0;
+ }
+
+ CLS_LOG(10, "%s: obj_ut=%lld.%06lld op.mtime=%lld.%06lld", __func__,
+ (long long)obj_ts.tv_sec, (long long)obj_ts.tv_nsec,
+ (long long)op_ts.tv_sec, (long long)op_ts.tv_nsec);
+
+ bool check;
+
+ switch (op.type) {
+ case CLS_RGW_CHECK_TIME_MTIME_EQ:
+ check = (obj_ts == op_ts);
+ break;
+ case CLS_RGW_CHECK_TIME_MTIME_LT:
+ check = (obj_ts < op_ts);
+ break;
+ case CLS_RGW_CHECK_TIME_MTIME_LE:
+ check = (obj_ts <= op_ts);
+ break;
+ case CLS_RGW_CHECK_TIME_MTIME_GT:
+ check = (obj_ts > op_ts);
+ break;
+ case CLS_RGW_CHECK_TIME_MTIME_GE:
+ check = (obj_ts >= op_ts);
+ break;
+ default:
+ return -EINVAL;
+ };
+
+ if (!check) {
+ return -ECANCELED;
+ }
+
+ return 0;
+}
+
+static int rgw_bi_get_op(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ // decode request
+ rgw_cls_bi_get_op op;
+ auto iter = in->cbegin();
+ try {
+ decode(op, iter);
+ } catch (buffer::error& err) {
+ CLS_LOG(0, "ERROR: %s(): failed to decode request", __func__);
+ return -EINVAL;
+ }
+
+ string idx;
+
+ switch (op.type) {
+ case BIIndexType::Plain:
+ idx = op.key.name;
+ break;
+ case BIIndexType::Instance:
+ encode_obj_index_key(op.key, &idx);
+ break;
+ case BIIndexType::OLH:
+ encode_olh_data_key(op.key, &idx);
+ break;
+ default:
+ CLS_LOG(10, "%s(): invalid key type encoding: %d",
+ __func__, int(op.type));
+ return -EINVAL;
+ }
+
+ rgw_cls_bi_get_ret op_ret;
+
+ rgw_cls_bi_entry& entry = op_ret.entry;
+
+ entry.type = op.type;
+ entry.idx = idx;
+
+ int r = cls_cxx_map_get_val(hctx, idx, &entry.data);
+ if (r < 0) {
+ CLS_LOG(10, "%s(): cls_cxx_map_get_val() returned %d", __func__, r);
+ return r;
+ }
+
+ encode(op_ret, *out);
+
+ return 0;
+}
+
+static int rgw_bi_put_op(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ // decode request
+ rgw_cls_bi_put_op op;
+ auto iter = in->cbegin();
+ try {
+ decode(op, iter);
+ } catch (buffer::error& err) {
+ CLS_LOG(0, "ERROR: %s(): failed to decode request", __func__);
+ return -EINVAL;
+ }
+
+ rgw_cls_bi_entry& entry = op.entry;
+
+ int r = cls_cxx_map_set_val(hctx, entry.idx, &entry.data);
+ if (r < 0) {
+ CLS_LOG(0, "ERROR: %s(): cls_cxx_map_set_val() returned r=%d", __func__, r);
+ }
+
+ return 0;
+}
+
+static int list_plain_entries(cls_method_context_t hctx, const string& name, const string& marker, uint32_t max,
+ list<rgw_cls_bi_entry> *entries, bool *pmore)
+{
+ string filter = name;
+ string start_key = marker;
+
+ string end_key; // stop listing at bi_log_prefix
+ bi_log_prefix(end_key);
+
+ int count = 0;
+ map<string, bufferlist> keys;
+ int ret = cls_cxx_map_get_vals(hctx, start_key, filter, max, &keys, pmore);
+ if (ret < 0) {
+ return ret;
+ }
+
+ map<string, bufferlist>::iterator iter;
+ for (iter = keys.begin(); iter != keys.end(); ++iter) {
+ if (iter->first >= end_key) {
+ /* past the end of plain namespace */
+ if (pmore) {
+ *pmore = false;
+ }
+ return count;
+ }
+
+ rgw_cls_bi_entry entry;
+ entry.type = BIIndexType::Plain;
+ entry.idx = iter->first;
+ entry.data = iter->second;
+
+ auto biter = entry.data.cbegin();
+
+ rgw_bucket_dir_entry e;
+ try {
+ decode(e, biter);
+ } catch (buffer::error& err) {
+ CLS_LOG(0, "ERROR: %s(): failed to decode buffer", __func__);
+ return -EIO;
+ }
+
+ CLS_LOG(20, "%s(): entry.idx=%s e.key.name=%s", __func__, escape_str(entry.idx).c_str(), escape_str(e.key.name).c_str());
+
+ if (!name.empty() && e.key.name != name) {
+ /* we are skipping the rest of the entries */
+ if (pmore) {
+ *pmore = false;
+ }
+ return count;
+ }
+
+ entries->push_back(entry);
+ count++;
+ if (count >= (int)max) {
+ return count;
+ }
+ start_key = entry.idx;
+ }
+
+ return count;
+}
+
+static int list_instance_entries(cls_method_context_t hctx, const string& name, const string& marker, uint32_t max,
+ list<rgw_cls_bi_entry> *entries, bool *pmore)
+{
+ cls_rgw_obj_key key(name);
+ string first_instance_idx;
+ encode_obj_versioned_data_key(key, &first_instance_idx);
+ string start_key;
+
+ if (!name.empty()) {
+ start_key = first_instance_idx;
+ } else {
+ start_key = BI_PREFIX_CHAR;
+ start_key.append(bucket_index_prefixes[BI_BUCKET_OBJ_INSTANCE_INDEX]);
+ }
+ string filter = start_key;
+ if (bi_entry_gt(marker, start_key)) {
+ start_key = marker;
+ }
+ int count = 0;
+ map<string, bufferlist> keys;
+ bufferlist k;
+ int ret = cls_cxx_map_get_val(hctx, start_key, &k);
+ if (ret < 0 && ret != -ENOENT) {
+ return ret;
+ }
+ bool found_first = (ret == 0);
+ if (found_first) {
+ --max;
+ }
+ if (max > 0) {
+ ret = cls_cxx_map_get_vals(hctx, start_key, string(), max, &keys, pmore);
+ CLS_LOG(20, "%s(): start_key=%s first_instance_idx=%s keys.size()=%d", __func__, escape_str(start_key).c_str(), escape_str(first_instance_idx).c_str(), (int)keys.size());
+ if (ret < 0) {
+ return ret;
+ }
+ }
+ if (found_first) {
+ keys[start_key].claim(k);
+ }
+
+ map<string, bufferlist>::iterator iter;
+ for (iter = keys.begin(); iter != keys.end(); ++iter) {
+ rgw_cls_bi_entry entry;
+ entry.type = BIIndexType::Instance;
+ entry.idx = iter->first;
+ entry.data = iter->second;
+
+ if (!filter.empty() && entry.idx.compare(0, filter.size(), filter) != 0) {
+ /* we are skipping the rest of the entries */
+ if (pmore) {
+ *pmore = false;
+ }
+ return count;
+ }
+
+ CLS_LOG(20, "%s(): entry.idx=%s", __func__, escape_str(entry.idx).c_str());
+
+ auto biter = entry.data.cbegin();
+
+ rgw_bucket_dir_entry e;
+ try {
+ decode(e, biter);
+ } catch (buffer::error& err) {
+ CLS_LOG(0, "ERROR: %s(): failed to decode buffer (size=%d)", __func__, entry.data.length());
+ return -EIO;
+ }
+
+ if (!name.empty() && e.key.name != name) {
+ /* we are skipping the rest of the entries */
+ if (pmore) {
+ *pmore = false;
+ }
+ return count;
+ }
+
+ entries->push_back(entry);
+ count++;
+ start_key = entry.idx;
+ }
+
+ return count;
+}
+
+static int list_olh_entries(cls_method_context_t hctx, const string& name, const string& marker, uint32_t max,
+ list<rgw_cls_bi_entry> *entries, bool *pmore)
+{
+ cls_rgw_obj_key key(name);
+ string first_instance_idx;
+ encode_olh_data_key(key, &first_instance_idx);
+ string start_key;
+
+ if (!name.empty()) {
+ start_key = first_instance_idx;
+ } else {
+ start_key = BI_PREFIX_CHAR;
+ start_key.append(bucket_index_prefixes[BI_BUCKET_OLH_DATA_INDEX]);
+ }
+ string filter = start_key;
+ if (bi_entry_gt(marker, start_key)) {
+ start_key = marker;
+ }
+ int count = 0;
+ map<string, bufferlist> keys;
+ int ret;
+ bufferlist k;
+ ret = cls_cxx_map_get_val(hctx, start_key, &k);
+ if (ret < 0 && ret != -ENOENT) {
+ return ret;
+ }
+ bool found_first = (ret == 0);
+ if (found_first) {
+ --max;
+ }
+ if (max > 0) {
+ ret = cls_cxx_map_get_vals(hctx, start_key, string(), max, &keys, pmore);
+ CLS_LOG(20, "%s(): start_key=%s first_instance_idx=%s keys.size()=%d", __func__, escape_str(start_key).c_str(), escape_str(first_instance_idx).c_str(), (int)keys.size());
+ if (ret < 0) {
+ return ret;
+ }
+ }
+
+ if (found_first) {
+ keys[start_key].claim(k);
+ }
+
+ map<string, bufferlist>::iterator iter;
+ for (iter = keys.begin(); iter != keys.end(); ++iter) {
+ rgw_cls_bi_entry entry;
+ entry.type = BIIndexType::OLH;
+ entry.idx = iter->first;
+ entry.data = iter->second;
+
+ if (!filter.empty() && entry.idx.compare(0, filter.size(), filter) != 0) {
+ /* we are skipping the rest of the entries */
+ if (pmore) {
+ *pmore = false;
+ }
+ return count;
+ }
+
+ CLS_LOG(20, "%s(): entry.idx=%s", __func__, escape_str(entry.idx).c_str());
+
+ auto biter = entry.data.cbegin();
+
+ rgw_bucket_olh_entry e;
+ try {
+ decode(e, biter);
+ } catch (buffer::error& err) {
+ CLS_LOG(0, "ERROR: %s(): failed to decode buffer (size=%d)", __func__, entry.data.length());
+ return -EIO;
+ }
+
+ if (!name.empty() && e.key.name != name) {
+ /* we are skipping the rest of the entries */
+ if (pmore) {
+ *pmore = false;
+ }
+ return count;
+ }
+
+ entries->push_back(entry);
+ count++;
+ start_key = entry.idx;
+ }
+
+ return count;
+}
+
+static int rgw_bi_list_op(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ // decode request
+ rgw_cls_bi_list_op op;
+ auto iter = in->cbegin();
+ try {
+ decode(op, iter);
+ } catch (buffer::error& err) {
+ CLS_LOG(0, "ERROR: %s(): failed to decode request", __func__);
+ return -EINVAL;
+ }
+
+ rgw_cls_bi_list_ret op_ret;
+
+ string filter = op.name;
+#define MAX_BI_LIST_ENTRIES 1000
+ int32_t max = (op.max < MAX_BI_LIST_ENTRIES ? op.max : MAX_BI_LIST_ENTRIES);
+ string start_key = op.marker;
+ bool more;
+ int ret = list_plain_entries(hctx, op.name, op.marker, max, &op_ret.entries, &more);
+ if (ret < 0) {
+ CLS_LOG(0, "ERROR: %s(): list_plain_entries returned ret=%d", __func__, ret);
+ return ret;
+ }
+ int count = ret;
+
+ CLS_LOG(20, "found %d plain entries", count);
+
+ if (!more) {
+ ret = list_instance_entries(hctx, op.name, op.marker, max - count, &op_ret.entries, &more);
+ if (ret < 0) {
+ CLS_LOG(0, "ERROR: %s(): list_instance_entries returned ret=%d", __func__, ret);
+ return ret;
+ }
+
+ count += ret;
+ }
+
+ if (!more) {
+ ret = list_olh_entries(hctx, op.name, op.marker, max - count, &op_ret.entries, &more);
+ if (ret < 0) {
+ CLS_LOG(0, "ERROR: %s(): list_olh_entries returned ret=%d", __func__, ret);
+ return ret;
+ }
+
+ count += ret;
+ }
+
+ op_ret.is_truncated = (count >= max) || more;
+ while (count > max) {
+ op_ret.entries.pop_back();
+ count--;
+ }
+
+ encode(op_ret, *out);
+
+ return 0;
+}
+
+int bi_log_record_decode(bufferlist& bl, rgw_bi_log_entry& e)
+{
+ auto iter = bl.cbegin();
+ try {
+ decode(e, iter);
+ } catch (buffer::error& err) {
+ CLS_LOG(0, "ERROR: failed to decode rgw_bi_log_entry");
+ return -EIO;
+ }
+ return 0;
+}
+
+static int bi_log_iterate_entries(cls_method_context_t hctx, const string& marker, const string& end_marker,
+ string& key_iter, uint32_t max_entries, bool *truncated,
+ int (*cb)(cls_method_context_t, const string&, rgw_bi_log_entry&, void *),
+ void *param)
+{
+ CLS_LOG(10, "bi_log_iterate_range");
+
+ map<string, bufferlist> keys;
+ string filter_prefix, end_key;
+ uint32_t i = 0;
+ string key;
+
+ if (truncated)
+ *truncated = false;
+
+ string start_key;
+ if (key_iter.empty()) {
+ key = BI_PREFIX_CHAR;
+ key.append(bucket_index_prefixes[BI_BUCKET_LOG_INDEX]);
+ key.append(marker);
+
+ start_key = key;
+ } else {
+ start_key = key_iter;
+ }
+
+ if (end_marker.empty()) {
+ end_key = BI_PREFIX_CHAR;
+ end_key.append(bucket_index_prefixes[BI_BUCKET_LOG_INDEX + 1]);
+ } else {
+ end_key = BI_PREFIX_CHAR;
+ end_key.append(bucket_index_prefixes[BI_BUCKET_LOG_INDEX]);
+ end_key.append(end_marker);
+ }
+
+ CLS_LOG(10, "bi_log_iterate_entries start_key=%s end_key=%s\n", start_key.c_str(), end_key.c_str());
+
+ string filter;
+
+ int ret = cls_cxx_map_get_vals(hctx, start_key, filter, max_entries, &keys, truncated);
+ if (ret < 0)
+ return ret;
+
+ map<string, bufferlist>::iterator iter = keys.begin();
+ if (iter == keys.end())
+ return 0;
+
+ uint32_t num_keys = keys.size();
+
+ for (; iter != keys.end(); ++iter,++i) {
+ const string& key = iter->first;
+ rgw_bi_log_entry e;
+
+ CLS_LOG(10, "bi_log_iterate_entries key=%s bl.length=%d\n", key.c_str(), (int)iter->second.length());
+
+ if (key.compare(end_key) > 0) {
+ key_iter = key;
+ if (truncated) {
+ *truncated = false;
+ }
+ return 0;
+ }
+
+ ret = bi_log_record_decode(iter->second, e);
+ if (ret < 0)
+ return ret;
+
+ ret = cb(hctx, key, e, param);
+ if (ret < 0)
+ return ret;
+
+ if (i == num_keys - 1) {
+ key_iter = key;
+ }
+ }
+
+ return 0;
+}
+
+static int bi_log_list_cb(cls_method_context_t hctx, const string& key, rgw_bi_log_entry& info, void *param)
+{
+ list<rgw_bi_log_entry> *l = (list<rgw_bi_log_entry> *)param;
+ l->push_back(info);
+ return 0;
+}
+
+static int bi_log_list_entries(cls_method_context_t hctx, const string& marker,
+ uint32_t max, list<rgw_bi_log_entry>& entries, bool *truncated)
+{
+ string key_iter;
+ string end_marker;
+ int ret = bi_log_iterate_entries(hctx, marker, end_marker,
+ key_iter, max, truncated,
+ bi_log_list_cb, &entries);
+ return ret;
+}
+
+static int rgw_bi_log_list(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ auto in_iter = in->cbegin();
+
+ cls_rgw_bi_log_list_op op;
+ try {
+ decode(op, in_iter);
+ } catch (buffer::error& err) {
+ CLS_LOG(1, "ERROR: rgw_bi_log_list(): failed to decode entry\n");
+ return -EINVAL;
+ }
+
+ cls_rgw_bi_log_list_ret op_ret;
+ int ret = bi_log_list_entries(hctx, op.marker, op.max, op_ret.entries, &op_ret.truncated);
+ if (ret < 0)
+ return ret;
+
+ encode(op_ret, *out);
+
+ return 0;
+}
+
+static int bi_log_list_trim_cb(cls_method_context_t hctx, const string& key, rgw_bi_log_entry& info, void *param)
+{
+ list<rgw_bi_log_entry> *entries = (list<rgw_bi_log_entry> *)param;
+
+ entries->push_back(info);
+ return 0;
+}
+
+static int bi_log_remove_entry(cls_method_context_t hctx, rgw_bi_log_entry& entry)
+{
+ string key;
+ key = BI_PREFIX_CHAR;
+ key.append(bucket_index_prefixes[BI_BUCKET_LOG_INDEX]);
+ key.append(entry.id);
+ return cls_cxx_map_remove_key(hctx, key);
+}
+
+static int bi_log_list_trim_entries(cls_method_context_t hctx,
+ const string& start_marker, const string& end_marker,
+ list<rgw_bi_log_entry>& entries, bool *truncated)
+{
+ string key_iter;
+#define MAX_TRIM_ENTRIES 1000 /* max entries to trim in a single operation */
+ int ret = bi_log_iterate_entries(hctx, start_marker, end_marker,
+ key_iter, MAX_TRIM_ENTRIES, truncated,
+ bi_log_list_trim_cb, &entries);
+ return ret;
+}
+
+static int rgw_bi_log_trim(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ auto in_iter = in->cbegin();
+
+ cls_rgw_bi_log_trim_op op;
+ try {
+ decode(op, in_iter);
+ } catch (buffer::error& err) {
+ CLS_LOG(1, "ERROR: rgw_bi_log_list(): failed to decode entry\n");
+ return -EINVAL;
+ }
+
+ cls_rgw_bi_log_list_ret op_ret;
+ list<rgw_bi_log_entry> entries;
+#define MAX_TRIM_ENTRIES 1000 /* don't do more than that in a single operation */
+ bool truncated;
+ int ret = bi_log_list_trim_entries(hctx, op.start_marker, op.end_marker, entries, &truncated);
+ if (ret < 0)
+ return ret;
+
+ if (entries.empty())
+ return -ENODATA;
+
+ list<rgw_bi_log_entry>::iterator iter;
+ for (iter = entries.begin(); iter != entries.end(); ++iter) {
+ rgw_bi_log_entry& entry = *iter;
+
+ ret = bi_log_remove_entry(hctx, entry);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int rgw_bi_log_resync(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ rgw_bucket_dir_header header;
+ int rc = read_bucket_header(hctx, &header);
+ if (rc < 0) {
+ CLS_LOG(1, "ERROR: rgw_bucket_complete_op(): failed to read header\n");
+ return rc;
+ }
+
+ bufferlist bl;
+
+ rgw_bi_log_entry entry;
+
+ entry.timestamp = real_clock::now();
+ entry.op = RGWModifyOp::CLS_RGW_OP_RESYNC;
+ entry.state = RGWPendingState::CLS_RGW_STATE_COMPLETE;
+
+ string key;
+ bi_log_index_key(hctx, key, entry.id, header.ver);
+
+ encode(entry, bl);
+
+ if (entry.id > header.max_marker)
+ header.max_marker = entry.id;
+
+ header.syncstopped = false;
+
+ rc = cls_cxx_map_set_val(hctx, key, &bl);
+ if (rc < 0)
+ return rc;
+
+ return write_bucket_header(hctx, &header);
+}
+
+static int rgw_bi_log_stop(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ rgw_bucket_dir_header header;
+ int rc = read_bucket_header(hctx, &header);
+ if (rc < 0) {
+ CLS_LOG(1, "ERROR: rgw_bucket_complete_op(): failed to read header\n");
+ return rc;
+ }
+
+ bufferlist bl;
+
+ rgw_bi_log_entry entry;
+
+ entry.timestamp = real_clock::now();
+ entry.op = RGWModifyOp::CLS_RGW_OP_SYNCSTOP;
+ entry.state = RGWPendingState::CLS_RGW_STATE_COMPLETE;
+
+ string key;
+ bi_log_index_key(hctx, key, entry.id, header.ver);
+
+ encode(entry, bl);
+
+ if (entry.id > header.max_marker)
+ header.max_marker = entry.id;
+ header.syncstopped = true;
+
+ rc = cls_cxx_map_set_val(hctx, key, &bl);
+ if (rc < 0)
+ return rc;
+
+ return write_bucket_header(hctx, &header);
+}
+
+
+static void usage_record_prefix_by_time(uint64_t epoch, string& key)
+{
+ char buf[32];
+ snprintf(buf, sizeof(buf), "%011llu", (long long unsigned)epoch);
+ key = buf;
+}
+
+static void usage_record_prefix_by_user(const string& user, uint64_t epoch, string& key)
+{
+ char buf[user.size() + 32];
+ snprintf(buf, sizeof(buf), "%s_%011llu_", user.c_str(), (long long unsigned)epoch);
+ key = buf;
+}
+
+static void usage_record_name_by_time(uint64_t epoch, const string& user, const string& bucket, string& key)
+{
+ char buf[32 + user.size() + bucket.size()];
+ snprintf(buf, sizeof(buf), "%011llu_%s_%s", (long long unsigned)epoch, user.c_str(), bucket.c_str());
+ key = buf;
+}
+
+static void usage_record_name_by_user(const string& user, uint64_t epoch, const string& bucket, string& key)
+{
+ char buf[32 + user.size() + bucket.size()];
+ snprintf(buf, sizeof(buf), "%s_%011llu_%s", user.c_str(), (long long unsigned)epoch, bucket.c_str());
+ key = buf;
+}
+
+static int usage_record_decode(bufferlist& record_bl, rgw_usage_log_entry& e)
+{
+ auto kiter = record_bl.cbegin();
+ try {
+ decode(e, kiter);
+ } catch (buffer::error& err) {
+ CLS_LOG(1, "ERROR: usage_record_decode(): failed to decode record_bl\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int rgw_user_usage_log_add(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ CLS_LOG(10, "rgw_user_usage_log_add()");
+
+ auto in_iter = in->cbegin();
+ rgw_cls_usage_log_add_op op;
+
+ try {
+ decode(op, in_iter);
+ } catch (buffer::error& err) {
+ CLS_LOG(1, "ERROR: rgw_user_usage_log_add(): failed to decode request\n");
+ return -EINVAL;
+ }
+
+ rgw_usage_log_info& info = op.info;
+ vector<rgw_usage_log_entry>::iterator iter;
+
+ for (iter = info.entries.begin(); iter != info.entries.end(); ++iter) {
+ rgw_usage_log_entry& entry = *iter;
+ string key_by_time;
+
+ rgw_user *puser = (entry.payer.empty() ? &entry.owner : &entry.payer);
+
+ usage_record_name_by_time(entry.epoch, puser->to_str(), entry.bucket, key_by_time);
+
+ CLS_LOG(10, "rgw_user_usage_log_add user=%s bucket=%s\n", puser->to_str().c_str(), entry.bucket.c_str());
+
+ bufferlist record_bl;
+ int ret = cls_cxx_map_get_val(hctx, key_by_time, &record_bl);
+ if (ret < 0 && ret != -ENOENT) {
+ CLS_LOG(1, "ERROR: rgw_user_usage_log_add(): cls_cxx_map_read_key returned %d\n", ret);
+ return -EINVAL;
+ }
+ if (ret >= 0) {
+ rgw_usage_log_entry e;
+ ret = usage_record_decode(record_bl, e);
+ if (ret < 0)
+ return ret;
+ CLS_LOG(10, "rgw_user_usage_log_add aggregating existing bucket\n");
+ entry.aggregate(e);
+ }
+
+ bufferlist new_record_bl;
+ encode(entry, new_record_bl);
+ ret = cls_cxx_map_set_val(hctx, key_by_time, &new_record_bl);
+ if (ret < 0)
+ return ret;
+
+ string key_by_user;
+ usage_record_name_by_user(puser->to_str(), entry.epoch, entry.bucket, key_by_user);
+ ret = cls_cxx_map_set_val(hctx, key_by_user, &new_record_bl);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int usage_iterate_range(cls_method_context_t hctx, uint64_t start, uint64_t end, const string& user,
+ const string& bucket, string& key_iter, uint32_t max_entries, bool *truncated,
+ int (*cb)(cls_method_context_t, const string&, rgw_usage_log_entry&, void *),
+ void *param)
+{
+ CLS_LOG(10, "usage_iterate_range");
+
+ map<string, bufferlist> keys;
+ string filter_prefix;
+ string start_key, end_key;
+ bool by_user = !user.empty();
+ string user_key;
+ bool truncated_status = false;
+
+ ceph_assert(truncated != nullptr);
+
+ if (!by_user) {
+ usage_record_prefix_by_time(end, end_key);
+ } else {
+ user_key = user;
+ user_key.append("_");
+ }
+
+ if (key_iter.empty()) {
+ if (by_user) {
+ usage_record_prefix_by_user(user, start, start_key);
+ } else {
+ usage_record_prefix_by_time(start, start_key);
+ }
+ } else {
+ start_key = key_iter;
+ }
+
+ CLS_LOG(20, "usage_iterate_range start_key=%s", start_key.c_str());
+ int ret = cls_cxx_map_get_vals(hctx, start_key, filter_prefix, max_entries, &keys, &truncated_status);
+ if (ret < 0)
+ return ret;
+
+ *truncated = truncated_status;
+
+ map<string, bufferlist>::iterator iter = keys.begin();
+ if (iter == keys.end())
+ return 0;
+
+ for (; iter != keys.end(); ++iter) {
+ const string& key = iter->first;
+ rgw_usage_log_entry e;
+
+ key_iter = key;
+ if (!by_user && key.compare(end_key) >= 0) {
+ CLS_LOG(20, "usage_iterate_range reached key=%s, done", key.c_str());
+ *truncated = false;
+ key_iter = key;
+ return 0;
+ }
+
+ if (by_user && key.compare(0, user_key.size(), user_key) != 0) {
+ CLS_LOG(20, "usage_iterate_range reached key=%s, done", key.c_str());
+ *truncated = false;
+ key_iter = key;
+ return 0;
+ }
+
+ ret = usage_record_decode(iter->second, e);
+ if (ret < 0)
+ return ret;
+
+ if (!bucket.empty() && bucket.compare(e.bucket))
+ continue;
+
+ if (e.epoch < start)
+ continue;
+
+ /* keys are sorted by epoch, so once we're past end we're done */
+ if (e.epoch >= end) {
+ *truncated = false;
+ return 0;
+ }
+
+ ret = cb(hctx, key, e, param);
+ if (ret < 0)
+ return ret;
+ }
+ return 0;
+}
+
+static int usage_log_read_cb(cls_method_context_t hctx, const string& key, rgw_usage_log_entry& entry, void *param)
+{
+ map<rgw_user_bucket, rgw_usage_log_entry> *usage = (map<rgw_user_bucket, rgw_usage_log_entry> *)param;
+ rgw_user *puser;
+ if (!entry.payer.empty()) {
+ puser = &entry.payer;
+ } else {
+ puser = &entry.owner;
+ }
+ rgw_user_bucket ub(puser->to_str(), entry.bucket);
+ rgw_usage_log_entry& le = (*usage)[ub];
+ le.aggregate(entry);
+
+ return 0;
+}
+
+int rgw_user_usage_log_read(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ CLS_LOG(10, "rgw_user_usage_log_read()");
+
+ auto in_iter = in->cbegin();
+ rgw_cls_usage_log_read_op op;
+
+ try {
+ decode(op, in_iter);
+ } catch (buffer::error& err) {
+ CLS_LOG(1, "ERROR: rgw_user_usage_log_read(): failed to decode request\n");
+ return -EINVAL;
+ }
+
+ rgw_cls_usage_log_read_ret ret_info;
+ map<rgw_user_bucket, rgw_usage_log_entry> *usage = &ret_info.usage;
+ string iter = op.iter;
+#define MAX_ENTRIES 1000
+ uint32_t max_entries = (op.max_entries ? op.max_entries : MAX_ENTRIES);
+ int ret = usage_iterate_range(hctx, op.start_epoch, op.end_epoch, op.owner, op.bucket, iter, max_entries, &ret_info.truncated, usage_log_read_cb, (void *)usage);
+ if (ret < 0)
+ return ret;
+
+ if (ret_info.truncated)
+ ret_info.next_iter = iter;
+
+ encode(ret_info, *out);
+ return 0;
+}
+
+static int usage_log_trim_cb(cls_method_context_t hctx, const string& key, rgw_usage_log_entry& entry, void *param)
+{
+ bool *found = (bool *)param;
+ if (found) {
+ *found = true;
+ }
+ string key_by_time;
+ string key_by_user;
+
+ string o = entry.owner.to_str();
+ usage_record_name_by_time(entry.epoch, o, entry.bucket, key_by_time);
+ usage_record_name_by_user(o, entry.epoch, entry.bucket, key_by_user);
+
+ int ret = cls_cxx_map_remove_key(hctx, key_by_time);
+ if (ret < 0)
+ return ret;
+
+ return cls_cxx_map_remove_key(hctx, key_by_user);
+}
+
+int rgw_user_usage_log_trim(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ CLS_LOG(10, "rgw_user_usage_log_trim()");
+
+ /* only continue if object exists! */
+ int ret = cls_cxx_stat(hctx, NULL, NULL);
+ if (ret < 0)
+ return ret;
+
+ auto in_iter = in->cbegin();
+ rgw_cls_usage_log_trim_op op;
+
+ try {
+ decode(op, in_iter);
+ } catch (buffer::error& err) {
+ CLS_LOG(1, "ERROR: rgw_user_log_usage_log_trim(): failed to decode request\n");
+ return -EINVAL;
+ }
+
+ string iter;
+ bool more;
+ bool found = false;
+#define MAX_USAGE_TRIM_ENTRIES 128
+ ret = usage_iterate_range(hctx, op.start_epoch, op.end_epoch, op.user, op.bucket, iter, MAX_USAGE_TRIM_ENTRIES, &more, usage_log_trim_cb, (void *)&found);
+ if (ret < 0)
+ return ret;
+
+ if (!more && !found)
+ return -ENODATA;
+
+ return 0;
+}
+
+int rgw_usage_log_clear(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ CLS_LOG(10,"%s", __func__);
+
+ int ret = cls_cxx_map_clear(hctx);
+ /* if object doesn't exist all the logs are cleared anyway */
+ if (ret == -ENOENT)
+ ret = 0;
+
+ return ret;
+}
+/*
+ * We hold the garbage collection chain data under two different indexes: the first 'name' index
+ * keeps them under a unique tag that represents the chains, and a second 'time' index keeps
+ * them by their expiration timestamp
+ */
+#define GC_OBJ_NAME_INDEX 0
+#define GC_OBJ_TIME_INDEX 1
+
+static string gc_index_prefixes[] = { "0_",
+ "1_" };
+
+static void prepend_index_prefix(const string& src, int index, string *dest)
+{
+ *dest = gc_index_prefixes[index];
+ dest->append(src);
+}
+
+static int gc_omap_get(cls_method_context_t hctx, int type, const string& key, cls_rgw_gc_obj_info *info)
+{
+ string index;
+ prepend_index_prefix(key, type, &index);
+
+ int ret = read_omap_entry(hctx, index, info);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int gc_omap_set(cls_method_context_t hctx, int type, const string& key, const cls_rgw_gc_obj_info *info)
+{
+ bufferlist bl;
+ encode(*info, bl);
+
+ string index = gc_index_prefixes[type];
+ index.append(key);
+
+ int ret = cls_cxx_map_set_val(hctx, index, &bl);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int gc_omap_remove(cls_method_context_t hctx, int type, const string& key)
+{
+ string index = gc_index_prefixes[type];
+ index.append(key);
+
+ int ret = cls_cxx_map_remove_key(hctx, index);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static bool key_in_index(const string& key, int index_type)
+{
+ const string& prefix = gc_index_prefixes[index_type];
+ return (key.compare(0, prefix.size(), prefix) == 0);
+}
+
+
+static int gc_update_entry(cls_method_context_t hctx, uint32_t expiration_secs,
+ cls_rgw_gc_obj_info& info)
+{
+ cls_rgw_gc_obj_info old_info;
+ int ret = gc_omap_get(hctx, GC_OBJ_NAME_INDEX, info.tag, &old_info);
+ if (ret == 0) {
+ string key;
+ get_time_key(old_info.time, &key);
+ ret = gc_omap_remove(hctx, GC_OBJ_TIME_INDEX, key);
+ if (ret < 0 && ret != -ENOENT) {
+ CLS_LOG(0, "ERROR: failed to remove key=%s\n", key.c_str());
+ return ret;
+ }
+ }
+
+ // calculate time and time key
+ info.time = ceph::real_clock::now();
+ info.time += make_timespan(expiration_secs);
+ string time_key;
+ get_time_key(info.time, &time_key);
+
+ if (info.chain.objs.empty()) {
+ CLS_LOG(0,
+ "WARNING: %s setting GC log entry with zero-length chain, "
+ "tag='%s', timekey='%s'",
+ __func__, info.tag.c_str(), time_key.c_str());
+ }
+
+ ret = gc_omap_set(hctx, GC_OBJ_NAME_INDEX, info.tag, &info);
+ if (ret < 0)
+ return ret;
+
+ ret = gc_omap_set(hctx, GC_OBJ_TIME_INDEX, time_key, &info);
+ if (ret < 0)
+ goto done_err;
+
+ return 0;
+
+done_err:
+
+ CLS_LOG(0, "ERROR: gc_set_entry error info.tag=%s, ret=%d\n",
+ info.tag.c_str(), ret);
+ gc_omap_remove(hctx, GC_OBJ_NAME_INDEX, info.tag);
+
+ return ret;
+}
+
+static int gc_defer_entry(cls_method_context_t hctx, const string& tag, uint32_t expiration_secs)
+{
+ cls_rgw_gc_obj_info info;
+ int ret = gc_omap_get(hctx, GC_OBJ_NAME_INDEX, tag, &info);
+ if (ret == -ENOENT)
+ return 0;
+ if (ret < 0)
+ return ret;
+ return gc_update_entry(hctx, expiration_secs, info);
+}
+
+int gc_record_decode(bufferlist& bl, cls_rgw_gc_obj_info& e)
+{
+ auto iter = bl.cbegin();
+ try {
+ decode(e, iter);
+ } catch (buffer::error& err) {
+ CLS_LOG(0, "ERROR: failed to decode cls_rgw_gc_obj_info");
+ return -EIO;
+ }
+ return 0;
+}
+
+static int rgw_cls_gc_set_entry(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ auto in_iter = in->cbegin();
+
+ cls_rgw_gc_set_entry_op op;
+ try {
+ decode(op, in_iter);
+ } catch (buffer::error& err) {
+ CLS_LOG(1, "ERROR: rgw_cls_gc_set_entry(): failed to decode entry\n");
+ return -EINVAL;
+ }
+
+ return gc_update_entry(hctx, op.expiration_secs, op.info);
+}
+
+static int rgw_cls_gc_defer_entry(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ auto in_iter = in->cbegin();
+
+ cls_rgw_gc_defer_entry_op op;
+ try {
+ decode(op, in_iter);
+ } catch (buffer::error& err) {
+ CLS_LOG(1, "ERROR: rgw_cls_gc_defer_entry(): failed to decode entry\n");
+ return -EINVAL;
+ }
+
+ return gc_defer_entry(hctx, op.tag, op.expiration_secs);
+}
+
+static int gc_iterate_entries(cls_method_context_t hctx,
+ const string& marker,
+ bool expired_only,
+ string& out_marker,
+ uint32_t max_entries,
+ bool *truncated,
+ int (*cb)(cls_method_context_t,
+ const string&,
+ cls_rgw_gc_obj_info&,
+ void *),
+ void *param)
+{
+ CLS_LOG(10, "gc_iterate_entries");
+
+ map<string, bufferlist> keys;
+ string filter_prefix, end_key;
+ string key;
+
+ if (truncated)
+ *truncated = false;
+
+ string start_key;
+ if (marker.empty()) {
+ prepend_index_prefix(marker, GC_OBJ_TIME_INDEX, &start_key);
+ } else {
+ start_key = marker;
+ }
+
+ if (expired_only) {
+ real_time now = ceph::real_clock::now();
+ string now_str;
+ get_time_key(now, &now_str);
+ prepend_index_prefix(now_str, GC_OBJ_TIME_INDEX, &end_key);
+
+ CLS_LOG(10, "gc_iterate_entries end_key=%s\n", end_key.c_str());
+ }
+
+ string filter;
+
+ int ret = cls_cxx_map_get_vals(hctx, start_key, filter, max_entries,
+ &keys, truncated);
+ if (ret < 0)
+ return ret;
+
+ map<string, bufferlist>::iterator iter = keys.begin();
+ if (iter == keys.end()) {
+ // if keys empty must not come back as truncated
+ ceph_assert(!truncated || !(*truncated));
+ return 0;
+ }
+
+ const string* last_key = nullptr; // last key processed, for end-marker
+ for (; iter != keys.end(); ++iter) {
+ const string& key = iter->first;
+ cls_rgw_gc_obj_info e;
+
+ CLS_LOG(10, "gc_iterate_entries key=%s\n", key.c_str());
+
+ if (!end_key.empty() && key.compare(end_key) >= 0) {
+ if (truncated)
+ *truncated = false;
+ return 0;
+ }
+
+ if (!key_in_index(key, GC_OBJ_TIME_INDEX)) {
+ if (truncated)
+ *truncated = false;
+ return 0;
+ }
+
+ ret = gc_record_decode(iter->second, e);
+ if (ret < 0)
+ return ret;
+
+ ret = cb(hctx, key, e, param);
+ if (ret < 0)
+ return ret;
+ last_key = &(iter->first); // update when callback successful
+ }
+
+ // set the out marker if either caller does not capture truncated or
+ // if they do capture and we are truncated
+ if (!truncated || *truncated) {
+ assert(last_key);
+ out_marker = *last_key;
+ }
+
+ return 0;
+}
+
+static int gc_list_cb(cls_method_context_t hctx, const string& key, cls_rgw_gc_obj_info& info, void *param)
+{
+ list<cls_rgw_gc_obj_info> *l = (list<cls_rgw_gc_obj_info> *)param;
+ l->push_back(info);
+ return 0;
+}
+
+static int gc_list_entries(cls_method_context_t hctx, const string& marker,
+ uint32_t max, bool expired_only,
+ list<cls_rgw_gc_obj_info>& entries, bool *truncated, string& next_marker)
+{
+ int ret = gc_iterate_entries(hctx, marker, expired_only,
+ next_marker, max, truncated,
+ gc_list_cb, &entries);
+ return ret;
+}
+
+static int rgw_cls_gc_list(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ auto in_iter = in->cbegin();
+
+ cls_rgw_gc_list_op op;
+ try {
+ decode(op, in_iter);
+ } catch (buffer::error& err) {
+ CLS_LOG(1, "ERROR: rgw_cls_gc_list(): failed to decode entry\n");
+ return -EINVAL;
+ }
+
+ cls_rgw_gc_list_ret op_ret;
+#define GC_LIST_ENTRIES_DEFAULT 128
+ int ret = gc_list_entries(hctx, op.marker, (op.max ? op.max : GC_LIST_ENTRIES_DEFAULT), op.expired_only,
+ op_ret.entries, &op_ret.truncated, op_ret.next_marker);
+ if (ret < 0)
+ return ret;
+
+ encode(op_ret, *out);
+
+ return 0;
+}
+
+static int gc_remove(cls_method_context_t hctx, vector<string>& tags)
+{
+ for (auto iter = tags.begin(); iter != tags.end(); ++iter) {
+ string& tag = *iter;
+ cls_rgw_gc_obj_info info;
+ int ret = gc_omap_get(hctx, GC_OBJ_NAME_INDEX, tag, &info);
+ if (ret == -ENOENT) {
+ CLS_LOG(0, "couldn't find tag in name index tag=%s\n", tag.c_str());
+ continue;
+ }
+
+ if (ret < 0)
+ return ret;
+
+ string time_key;
+ get_time_key(info.time, &time_key);
+ ret = gc_omap_remove(hctx, GC_OBJ_TIME_INDEX, time_key);
+ if (ret < 0 && ret != -ENOENT)
+ return ret;
+ if (ret == -ENOENT) {
+ CLS_LOG(0, "couldn't find key in time index key=%s\n", time_key.c_str());
+ }
+
+ ret = gc_omap_remove(hctx, GC_OBJ_NAME_INDEX, tag);
+ if (ret < 0 && ret != -ENOENT)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int rgw_cls_gc_remove(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ auto in_iter = in->cbegin();
+
+ cls_rgw_gc_remove_op op;
+ try {
+ decode(op, in_iter);
+ } catch (buffer::error& err) {
+ CLS_LOG(1, "ERROR: rgw_cls_gc_remove(): failed to decode entry\n");
+ return -EINVAL;
+ }
+
+ return gc_remove(hctx, op.tags);
+}
+
+static int rgw_cls_lc_get_entry(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ auto in_iter = in->cbegin();
+
+ cls_rgw_lc_get_entry_op op;
+ try {
+ decode(op, in_iter);
+ } catch (buffer::error& err) {
+ CLS_LOG(1, "ERROR: rgw_cls_lc_set_entry(): failed to decode entry\n");
+ return -EINVAL;
+ }
+
+ rgw_lc_entry_t lc_entry;
+ int ret = read_omap_entry(hctx, op.marker, &lc_entry);
+ if (ret < 0)
+ return ret;
+
+ cls_rgw_lc_get_entry_ret op_ret(std::move(lc_entry));
+ encode(op_ret, *out);
+ return 0;
+}
+
+
+static int rgw_cls_lc_set_entry(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ auto in_iter = in->cbegin();
+
+ cls_rgw_lc_set_entry_op op;
+ try {
+ decode(op, in_iter);
+ } catch (buffer::error& err) {
+ CLS_LOG(1, "ERROR: rgw_cls_lc_set_entry(): failed to decode entry\n");
+ return -EINVAL;
+ }
+
+ bufferlist bl;
+ encode(op.entry, bl);
+
+ int ret = cls_cxx_map_set_val(hctx, op.entry.first, &bl);
+ return ret;
+}
+
+static int rgw_cls_lc_rm_entry(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ auto in_iter = in->cbegin();
+
+ cls_rgw_lc_rm_entry_op op;
+ try {
+ decode(op, in_iter);
+ } catch (buffer::error& err) {
+ CLS_LOG(1, "ERROR: rgw_cls_lc_rm_entry(): failed to decode entry\n");
+ return -EINVAL;
+ }
+
+ int ret = cls_cxx_map_remove_key(hctx, op.entry.first);
+ return ret;
+}
+
+static int rgw_cls_lc_get_next_entry(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ auto in_iter = in->cbegin();
+ cls_rgw_lc_get_next_entry_ret op_ret;
+ cls_rgw_lc_get_next_entry_op op;
+ try {
+ decode(op, in_iter);
+ } catch (buffer::error& err) {
+ CLS_LOG(1, "ERROR: rgw_cls_lc_get_next_entry: failed to decode op\n");
+ return -EINVAL;
+ }
+
+ map<string, bufferlist> vals;
+ string filter_prefix;
+ bool more;
+ int ret = cls_cxx_map_get_vals(hctx, op.marker, filter_prefix, 1, &vals, &more);
+ if (ret < 0)
+ return ret;
+ map<string, bufferlist>::iterator it;
+ pair<string, int> entry;
+ if (!vals.empty()) {
+ it=vals.begin();
+ in_iter = it->second.begin();
+ try {
+ decode(entry, in_iter);
+ } catch (buffer::error& err) {
+ CLS_LOG(1, "ERROR: rgw_cls_lc_get_next_entry(): failed to decode entry\n");
+ return -EIO;
+ }
+ }
+ op_ret.entry = entry;
+ encode(op_ret, *out);
+ return 0;
+}
+
+static int rgw_cls_lc_list_entries(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ cls_rgw_lc_list_entries_op op;
+ auto in_iter = in->cbegin();
+ try {
+ decode(op, in_iter);
+ } catch (buffer::error& err) {
+ CLS_LOG(1, "ERROR: rgw_cls_lc_list_entries(): failed to decode op\n");
+ return -EINVAL;
+ }
+
+ cls_rgw_lc_list_entries_ret op_ret;
+ bufferlist::const_iterator iter;
+ map<string, bufferlist> vals;
+ string filter_prefix;
+ int ret = cls_cxx_map_get_vals(hctx, op.marker, filter_prefix, op.max_entries, &vals, &op_ret.is_truncated);
+ if (ret < 0)
+ return ret;
+ map<string, bufferlist>::iterator it;
+ pair<string, int> entry;
+ for (it = vals.begin(); it != vals.end(); ++it) {
+ iter = it->second.cbegin();
+ try {
+ decode(entry, iter);
+ } catch (buffer::error& err) {
+ CLS_LOG(1, "ERROR: rgw_cls_lc_list_entries(): failed to decode entry\n");
+ return -EIO;
+ }
+ op_ret.entries.insert(entry);
+ }
+ encode(op_ret, *out);
+ return 0;
+}
+
+static int rgw_cls_lc_put_head(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ auto in_iter = in->cbegin();
+
+ cls_rgw_lc_put_head_op op;
+ try {
+ decode(op, in_iter);
+ } catch (buffer::error& err) {
+ CLS_LOG(1, "ERROR: rgw_cls_lc_put_head(): failed to decode entry\n");
+ return -EINVAL;
+ }
+
+ bufferlist bl;
+ encode(op.head, bl);
+ int ret = cls_cxx_map_write_header(hctx,&bl);
+ return ret;
+}
+
+static int rgw_cls_lc_get_head(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ bufferlist bl;
+ int ret = cls_cxx_map_read_header(hctx, &bl);
+ if (ret < 0)
+ return ret;
+ cls_rgw_lc_obj_head head;
+ if (bl.length() != 0) {
+ auto iter = bl.cbegin();
+ try {
+ decode(head, iter);
+ } catch (buffer::error& err) {
+ CLS_LOG(0, "ERROR: rgw_cls_lc_get_head(): failed to decode entry %s\n",err.what());
+ return -EINVAL;
+ }
+ } else {
+ head.start_date = 0;
+ head.marker.clear();
+ }
+ cls_rgw_lc_get_head_ret op_ret;
+ op_ret.head = head;
+ encode(op_ret, *out);
+ return 0;
+}
+
+static int rgw_reshard_add(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ auto in_iter = in->cbegin();
+
+ cls_rgw_reshard_add_op op;
+ try {
+ decode(op, in_iter);
+ } catch (buffer::error& err) {
+ CLS_LOG(1, "ERROR: rgw_reshard_add: failed to decode entry\n");
+ return -EINVAL;
+ }
+
+
+ string key;
+ op.entry.get_key(&key);
+
+ bufferlist bl;
+ encode(op.entry, bl);
+ int ret = cls_cxx_map_set_val(hctx, key, &bl);
+ if (ret < 0) {
+ CLS_ERR("error adding reshard job for bucket %s with key %s",op.entry.bucket_name.c_str(), key.c_str());
+ return ret;
+ }
+
+ return ret;
+}
+
+static int rgw_reshard_list(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ cls_rgw_reshard_list_op op;
+ auto in_iter = in->cbegin();
+ try {
+ decode(op, in_iter);
+ } catch (buffer::error& err) {
+ CLS_LOG(1, "ERROR: rgw_cls_rehard_list(): failed to decode entry\n");
+ return -EINVAL;
+ }
+ cls_rgw_reshard_list_ret op_ret;
+ bufferlist::const_iterator iter;
+ map<string, bufferlist> vals;
+ string filter_prefix;
+#define MAX_RESHARD_LIST_ENTRIES 1000
+ /* one extra entry for identifying truncation */
+ int32_t max = (op.max && (op.max < MAX_RESHARD_LIST_ENTRIES) ? op.max : MAX_RESHARD_LIST_ENTRIES);
+ int ret = cls_cxx_map_get_vals(hctx, op.marker, filter_prefix, max, &vals, &op_ret.is_truncated);
+ if (ret < 0)
+ return ret;
+ map<string, bufferlist>::iterator it;
+ cls_rgw_reshard_entry entry;
+ int i = 0;
+ for (it = vals.begin(); i < (int)op.max && it != vals.end(); ++it, ++i) {
+ iter = it->second.cbegin();
+ try {
+ decode(entry, iter);
+ } catch (buffer::error& err) {
+ CLS_LOG(1, "ERROR: rgw_cls_rehard_list(): failed to decode entry\n");
+ return -EIO;
+ }
+ op_ret.entries.push_back(entry);
+ }
+ encode(op_ret, *out);
+ return 0;
+}
+
+static int rgw_reshard_get(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ auto in_iter = in->cbegin();
+
+ cls_rgw_reshard_get_op op;
+ try {
+ decode(op, in_iter);
+ } catch (buffer::error& err) {
+ CLS_LOG(1, "ERROR: rgw_reshard_get: failed to decode entry\n");
+ return -EINVAL;
+ }
+
+ string key;
+ cls_rgw_reshard_entry entry;
+ op.entry.get_key(&key);
+ int ret = read_omap_entry(hctx, key, &entry);
+ if (ret < 0) {
+ return ret;
+ }
+
+ cls_rgw_reshard_get_ret op_ret;
+ op_ret.entry = entry;
+ encode(op_ret, *out);
+ return 0;
+}
+
+static int rgw_reshard_remove(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ auto in_iter = in->cbegin();
+
+ cls_rgw_reshard_remove_op op;
+ try {
+ decode(op, in_iter);
+ } catch (buffer::error& err) {
+ CLS_LOG(1, "ERROR: rgw_cls_rehard_remove: failed to decode entry\n");
+ return -EINVAL;
+ }
+
+ string key;
+ cls_rgw_reshard_entry entry;
+ cls_rgw_reshard_entry::generate_key(op.tenant, op.bucket_name, &key);
+ int ret = read_omap_entry(hctx, key, &entry);
+ if (ret < 0) {
+ return ret;
+ }
+
+ if (!op.bucket_id.empty() &&
+ entry.bucket_id != op.bucket_id) {
+ return 0;
+ }
+
+ ret = cls_cxx_map_remove_key(hctx, key);
+ if (ret < 0) {
+ CLS_LOG(0, "ERROR: failed to remove key: key=%s ret=%d", key.c_str(), ret);
+ return 0;
+ }
+ return ret;
+}
+
+static int rgw_set_bucket_resharding(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ cls_rgw_set_bucket_resharding_op op;
+
+ auto in_iter = in->cbegin();
+ try {
+ decode(op, in_iter);
+ } catch (buffer::error& err) {
+ CLS_LOG(1, "ERROR: cls_rgw_set_bucket_resharding: failed to decode entry\n");
+ return -EINVAL;
+ }
+
+ rgw_bucket_dir_header header;
+ int rc = read_bucket_header(hctx, &header);
+ if (rc < 0) {
+ CLS_LOG(1, "ERROR: %s(): failed to read header\n", __func__);
+ return rc;
+ }
+
+ header.new_instance.set_status(op.entry.new_bucket_instance_id, op.entry.num_shards, op.entry.reshard_status);
+
+ return write_bucket_header(hctx, &header);
+}
+
+static int rgw_clear_bucket_resharding(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ cls_rgw_clear_bucket_resharding_op op;
+
+ auto in_iter = in->cbegin();
+ try {
+ decode(op, in_iter);
+ } catch (buffer::error& err) {
+ CLS_LOG(1, "ERROR: cls_rgw_clear_bucket_resharding: failed to decode entry\n");
+ return -EINVAL;
+ }
+
+ rgw_bucket_dir_header header;
+ int rc = read_bucket_header(hctx, &header);
+ if (rc < 0) {
+ CLS_LOG(1, "ERROR: %s(): failed to read header\n", __func__);
+ return rc;
+ }
+ header.new_instance.clear();
+
+ return write_bucket_header(hctx, &header);
+}
+
+static int rgw_guard_bucket_resharding(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ cls_rgw_guard_bucket_resharding_op op;
+
+ auto in_iter = in->cbegin();
+ try {
+ decode(op, in_iter);
+ } catch (buffer::error& err) {
+ CLS_LOG(1, "ERROR: cls_rgw_clear_bucket_resharding: failed to decode entry\n");
+ return -EINVAL;
+ }
+
+ rgw_bucket_dir_header header;
+ int rc = read_bucket_header(hctx, &header);
+ if (rc < 0) {
+ CLS_LOG(1, "ERROR: %s(): failed to read header\n", __func__);
+ return rc;
+ }
+
+ if (header.resharding()) {
+ return op.ret_err;
+ }
+
+ return 0;
+}
+
+static int rgw_get_bucket_resharding(cls_method_context_t hctx,
+ bufferlist *in, bufferlist *out)
+{
+ cls_rgw_get_bucket_resharding_op op;
+
+ auto in_iter = in->cbegin();
+ try {
+ decode(op, in_iter);
+ } catch (buffer::error& err) {
+ CLS_LOG(1, "ERROR: cls_rgw_clear_bucket_resharding: failed to decode entry\n");
+ return -EINVAL;
+ }
+
+ rgw_bucket_dir_header header;
+ int rc = read_bucket_header(hctx, &header);
+ if (rc < 0) {
+ CLS_LOG(1, "ERROR: %s(): failed to read header\n", __func__);
+ return rc;
+ }
+
+ cls_rgw_get_bucket_resharding_ret op_ret;
+ op_ret.new_instance = header.new_instance;
+
+ encode(op_ret, *out);
+
+ return 0;
+}
+
+CLS_INIT(rgw)
+{
+ CLS_LOG(1, "Loaded rgw class!");
+
+ cls_handle_t h_class;
+ cls_method_handle_t h_rgw_bucket_init_index;
+ cls_method_handle_t h_rgw_bucket_set_tag_timeout;
+ cls_method_handle_t h_rgw_bucket_list;
+ cls_method_handle_t h_rgw_bucket_check_index;
+ cls_method_handle_t h_rgw_bucket_rebuild_index;
+ cls_method_handle_t h_rgw_bucket_update_stats;
+ cls_method_handle_t h_rgw_bucket_prepare_op;
+ cls_method_handle_t h_rgw_bucket_complete_op;
+ cls_method_handle_t h_rgw_bucket_link_olh;
+ cls_method_handle_t h_rgw_bucket_unlink_instance_op;
+ cls_method_handle_t h_rgw_bucket_read_olh_log;
+ cls_method_handle_t h_rgw_bucket_trim_olh_log;
+ cls_method_handle_t h_rgw_bucket_clear_olh;
+ cls_method_handle_t h_rgw_obj_remove;
+ cls_method_handle_t h_rgw_obj_store_pg_ver;
+ cls_method_handle_t h_rgw_obj_check_attrs_prefix;
+ cls_method_handle_t h_rgw_obj_check_mtime;
+ cls_method_handle_t h_rgw_bi_get_op;
+ cls_method_handle_t h_rgw_bi_put_op;
+ cls_method_handle_t h_rgw_bi_list_op;
+ cls_method_handle_t h_rgw_bi_log_list_op;
+ cls_method_handle_t h_rgw_bi_log_resync_op;
+ cls_method_handle_t h_rgw_bi_log_stop_op;
+ cls_method_handle_t h_rgw_dir_suggest_changes;
+ cls_method_handle_t h_rgw_user_usage_log_add;
+ cls_method_handle_t h_rgw_user_usage_log_read;
+ cls_method_handle_t h_rgw_user_usage_log_trim;
+ cls_method_handle_t h_rgw_usage_log_clear;
+ cls_method_handle_t h_rgw_gc_set_entry;
+ cls_method_handle_t h_rgw_gc_list;
+ cls_method_handle_t h_rgw_gc_remove;
+ cls_method_handle_t h_rgw_lc_get_entry;
+ cls_method_handle_t h_rgw_lc_set_entry;
+ cls_method_handle_t h_rgw_lc_rm_entry;
+ cls_method_handle_t h_rgw_lc_get_next_entry;
+ cls_method_handle_t h_rgw_lc_put_head;
+ cls_method_handle_t h_rgw_lc_get_head;
+ cls_method_handle_t h_rgw_lc_list_entries;
+ cls_method_handle_t h_rgw_reshard_add;
+ cls_method_handle_t h_rgw_reshard_list;
+ cls_method_handle_t h_rgw_reshard_get;
+ cls_method_handle_t h_rgw_reshard_remove;
+ cls_method_handle_t h_rgw_set_bucket_resharding;
+ cls_method_handle_t h_rgw_clear_bucket_resharding;
+ cls_method_handle_t h_rgw_guard_bucket_resharding;
+ cls_method_handle_t h_rgw_get_bucket_resharding;
+
+
+ cls_register(RGW_CLASS, &h_class);
+
+ /* bucket index */
+ cls_register_cxx_method(h_class, RGW_BUCKET_INIT_INDEX, CLS_METHOD_RD | CLS_METHOD_WR, rgw_bucket_init_index, &h_rgw_bucket_init_index);
+ cls_register_cxx_method(h_class, RGW_BUCKET_SET_TAG_TIMEOUT, CLS_METHOD_RD | CLS_METHOD_WR, rgw_bucket_set_tag_timeout, &h_rgw_bucket_set_tag_timeout);
+ cls_register_cxx_method(h_class, RGW_BUCKET_LIST, CLS_METHOD_RD, rgw_bucket_list, &h_rgw_bucket_list);
+ cls_register_cxx_method(h_class, RGW_BUCKET_CHECK_INDEX, CLS_METHOD_RD, rgw_bucket_check_index, &h_rgw_bucket_check_index);
+ cls_register_cxx_method(h_class, RGW_BUCKET_REBUILD_INDEX, CLS_METHOD_RD | CLS_METHOD_WR, rgw_bucket_rebuild_index, &h_rgw_bucket_rebuild_index);
+ cls_register_cxx_method(h_class, RGW_BUCKET_UPDATE_STATS, CLS_METHOD_RD | CLS_METHOD_WR, rgw_bucket_update_stats, &h_rgw_bucket_update_stats);
+ cls_register_cxx_method(h_class, RGW_BUCKET_PREPARE_OP, CLS_METHOD_RD | CLS_METHOD_WR, rgw_bucket_prepare_op, &h_rgw_bucket_prepare_op);
+ cls_register_cxx_method(h_class, RGW_BUCKET_COMPLETE_OP, CLS_METHOD_RD | CLS_METHOD_WR, rgw_bucket_complete_op, &h_rgw_bucket_complete_op);
+ cls_register_cxx_method(h_class, RGW_BUCKET_LINK_OLH, CLS_METHOD_RD | CLS_METHOD_WR, rgw_bucket_link_olh, &h_rgw_bucket_link_olh);
+ cls_register_cxx_method(h_class, RGW_BUCKET_UNLINK_INSTANCE, CLS_METHOD_RD | CLS_METHOD_WR, rgw_bucket_unlink_instance, &h_rgw_bucket_unlink_instance_op);
+ cls_register_cxx_method(h_class, RGW_BUCKET_READ_OLH_LOG, CLS_METHOD_RD, rgw_bucket_read_olh_log, &h_rgw_bucket_read_olh_log);
+ cls_register_cxx_method(h_class, RGW_BUCKET_TRIM_OLH_LOG, CLS_METHOD_RD | CLS_METHOD_WR, rgw_bucket_trim_olh_log, &h_rgw_bucket_trim_olh_log);
+ cls_register_cxx_method(h_class, RGW_BUCKET_CLEAR_OLH, CLS_METHOD_RD | CLS_METHOD_WR, rgw_bucket_clear_olh, &h_rgw_bucket_clear_olh);
+
+ cls_register_cxx_method(h_class, RGW_OBJ_REMOVE, CLS_METHOD_RD | CLS_METHOD_WR, rgw_obj_remove, &h_rgw_obj_remove);
+ cls_register_cxx_method(h_class, RGW_OBJ_STORE_PG_VER, CLS_METHOD_WR, rgw_obj_store_pg_ver, &h_rgw_obj_store_pg_ver);
+ cls_register_cxx_method(h_class, RGW_OBJ_CHECK_ATTRS_PREFIX, CLS_METHOD_RD, rgw_obj_check_attrs_prefix, &h_rgw_obj_check_attrs_prefix);
+ cls_register_cxx_method(h_class, RGW_OBJ_CHECK_MTIME, CLS_METHOD_RD, rgw_obj_check_mtime, &h_rgw_obj_check_mtime);
+
+ cls_register_cxx_method(h_class, RGW_BI_GET, CLS_METHOD_RD, rgw_bi_get_op, &h_rgw_bi_get_op);
+ cls_register_cxx_method(h_class, RGW_BI_PUT, CLS_METHOD_RD | CLS_METHOD_WR, rgw_bi_put_op, &h_rgw_bi_put_op);
+ cls_register_cxx_method(h_class, RGW_BI_LIST, CLS_METHOD_RD, rgw_bi_list_op, &h_rgw_bi_list_op);
+
+ cls_register_cxx_method(h_class, RGW_BI_LOG_LIST, CLS_METHOD_RD, rgw_bi_log_list, &h_rgw_bi_log_list_op);
+ cls_register_cxx_method(h_class, RGW_BI_LOG_TRIM, CLS_METHOD_RD | CLS_METHOD_WR, rgw_bi_log_trim, &h_rgw_bi_log_list_op);
+ cls_register_cxx_method(h_class, RGW_DIR_SUGGEST_CHANGES, CLS_METHOD_RD | CLS_METHOD_WR, rgw_dir_suggest_changes, &h_rgw_dir_suggest_changes);
+
+ cls_register_cxx_method(h_class, RGW_BI_LOG_RESYNC, CLS_METHOD_RD | CLS_METHOD_WR, rgw_bi_log_resync, &h_rgw_bi_log_resync_op);
+ cls_register_cxx_method(h_class, RGW_BI_LOG_STOP, CLS_METHOD_RD | CLS_METHOD_WR, rgw_bi_log_stop, &h_rgw_bi_log_stop_op);
+
+ /* usage logging */
+ cls_register_cxx_method(h_class, RGW_USER_USAGE_LOG_ADD, CLS_METHOD_RD | CLS_METHOD_WR, rgw_user_usage_log_add, &h_rgw_user_usage_log_add);
+ cls_register_cxx_method(h_class, RGW_USER_USAGE_LOG_READ, CLS_METHOD_RD, rgw_user_usage_log_read, &h_rgw_user_usage_log_read);
+ cls_register_cxx_method(h_class, RGW_USER_USAGE_LOG_TRIM, CLS_METHOD_RD | CLS_METHOD_WR, rgw_user_usage_log_trim, &h_rgw_user_usage_log_trim);
+ cls_register_cxx_method(h_class, RGW_USAGE_LOG_CLEAR, CLS_METHOD_WR, rgw_usage_log_clear, &h_rgw_usage_log_clear);
+
+ /* garbage collection */
+ cls_register_cxx_method(h_class, RGW_GC_SET_ENTRY, CLS_METHOD_RD | CLS_METHOD_WR, rgw_cls_gc_set_entry, &h_rgw_gc_set_entry);
+ cls_register_cxx_method(h_class, RGW_GC_DEFER_ENTRY, CLS_METHOD_RD | CLS_METHOD_WR, rgw_cls_gc_defer_entry, &h_rgw_gc_set_entry);
+ cls_register_cxx_method(h_class, RGW_GC_LIST, CLS_METHOD_RD, rgw_cls_gc_list, &h_rgw_gc_list);
+ cls_register_cxx_method(h_class, RGW_GC_REMOVE, CLS_METHOD_RD | CLS_METHOD_WR, rgw_cls_gc_remove, &h_rgw_gc_remove);
+
+ /* lifecycle bucket list */
+ cls_register_cxx_method(h_class, RGW_LC_GET_ENTRY, CLS_METHOD_RD, rgw_cls_lc_get_entry, &h_rgw_lc_get_entry);
+ cls_register_cxx_method(h_class, RGW_LC_SET_ENTRY, CLS_METHOD_RD | CLS_METHOD_WR, rgw_cls_lc_set_entry, &h_rgw_lc_set_entry);
+ cls_register_cxx_method(h_class, RGW_LC_RM_ENTRY, CLS_METHOD_RD | CLS_METHOD_WR, rgw_cls_lc_rm_entry, &h_rgw_lc_rm_entry);
+ cls_register_cxx_method(h_class, RGW_LC_GET_NEXT_ENTRY, CLS_METHOD_RD, rgw_cls_lc_get_next_entry, &h_rgw_lc_get_next_entry);
+ cls_register_cxx_method(h_class, RGW_LC_PUT_HEAD, CLS_METHOD_RD| CLS_METHOD_WR, rgw_cls_lc_put_head, &h_rgw_lc_put_head);
+ cls_register_cxx_method(h_class, RGW_LC_GET_HEAD, CLS_METHOD_RD, rgw_cls_lc_get_head, &h_rgw_lc_get_head);
+ cls_register_cxx_method(h_class, RGW_LC_LIST_ENTRIES, CLS_METHOD_RD, rgw_cls_lc_list_entries, &h_rgw_lc_list_entries);
+
+ /* resharding */
+ cls_register_cxx_method(h_class, RGW_RESHARD_ADD, CLS_METHOD_RD | CLS_METHOD_WR, rgw_reshard_add, &h_rgw_reshard_add);
+ cls_register_cxx_method(h_class, RGW_RESHARD_LIST, CLS_METHOD_RD, rgw_reshard_list, &h_rgw_reshard_list);
+ cls_register_cxx_method(h_class, RGW_RESHARD_GET, CLS_METHOD_RD,rgw_reshard_get, &h_rgw_reshard_get);
+ cls_register_cxx_method(h_class, RGW_RESHARD_REMOVE, CLS_METHOD_RD | CLS_METHOD_WR, rgw_reshard_remove, &h_rgw_reshard_remove);
+
+ /* resharding attribute */
+ cls_register_cxx_method(h_class, RGW_SET_BUCKET_RESHARDING, CLS_METHOD_RD | CLS_METHOD_WR,
+ rgw_set_bucket_resharding, &h_rgw_set_bucket_resharding);
+ cls_register_cxx_method(h_class, RGW_CLEAR_BUCKET_RESHARDING, CLS_METHOD_RD | CLS_METHOD_WR,
+ rgw_clear_bucket_resharding, &h_rgw_clear_bucket_resharding);
+ cls_register_cxx_method(h_class, RGW_GUARD_BUCKET_RESHARDING, CLS_METHOD_RD ,
+ rgw_guard_bucket_resharding, &h_rgw_guard_bucket_resharding);
+ cls_register_cxx_method(h_class, RGW_GET_BUCKET_RESHARDING, CLS_METHOD_RD ,
+ rgw_get_bucket_resharding, &h_rgw_get_bucket_resharding);
+
+ return;
+}
+
diff --git a/src/cls/rgw/cls_rgw_client.cc b/src/cls/rgw/cls_rgw_client.cc
new file mode 100644
index 00000000..97426ed7
--- /dev/null
+++ b/src/cls/rgw/cls_rgw_client.cc
@@ -0,0 +1,1018 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#include <errno.h>
+
+#include "cls/rgw/cls_rgw_const.h"
+#include "cls/rgw/cls_rgw_client.h"
+
+#include "common/debug.h"
+
+using namespace librados;
+
+const string BucketIndexShardsManager::KEY_VALUE_SEPARATOR = "#";
+const string BucketIndexShardsManager::SHARDS_SEPARATOR = ",";
+
+/**
+ * This class represents the bucket index object operation callback context.
+ */
+template <typename T>
+class ClsBucketIndexOpCtx : public ObjectOperationCompletion {
+private:
+ T *data;
+ int *ret_code;
+public:
+ ClsBucketIndexOpCtx(T* _data, int *_ret_code) : data(_data), ret_code(_ret_code) { ceph_assert(data); }
+ ~ClsBucketIndexOpCtx() override {}
+ void handle_completion(int r, bufferlist& outbl) override {
+ if (r >= 0) {
+ try {
+ auto iter = outbl.cbegin();
+ decode((*data), iter);
+ } catch (buffer::error& err) {
+ r = -EIO;
+ }
+ }
+ if (ret_code) {
+ *ret_code = r;
+ }
+ }
+};
+
+void BucketIndexAioManager::do_completion(int id) {
+ Mutex::Locker l(lock);
+
+ map<int, librados::AioCompletion*>::iterator iter = pendings.find(id);
+ ceph_assert(iter != pendings.end());
+ completions[id] = iter->second;
+ pendings.erase(iter);
+
+ // If the caller needs a list of finished objects, store them
+ // for further processing
+ map<int, string>::iterator miter = pending_objs.find(id);
+ if (miter != pending_objs.end()) {
+ completion_objs[id] = miter->second;
+ pending_objs.erase(miter);
+ }
+
+ cond.Signal();
+}
+
+bool BucketIndexAioManager::wait_for_completions(int valid_ret_code,
+ int *num_completions, int *ret_code, map<int, string> *objs) {
+ lock.Lock();
+ if (pendings.empty() && completions.empty()) {
+ lock.Unlock();
+ return false;
+ }
+
+ if (completions.empty()) {
+ // Wait for AIO completion
+ cond.Wait(lock);
+ }
+
+ // Clear the completed AIOs
+ map<int, librados::AioCompletion*>::iterator iter = completions.begin();
+ for (; iter != completions.end(); ++iter) {
+ int r = iter->second->get_return_value();
+ if (objs && r == 0) { /* update list of successfully completed objs */
+ map<int, string>::iterator liter = completion_objs.find(iter->first);
+ if (liter != completion_objs.end()) {
+ (*objs)[liter->first] = liter->second;
+ }
+ }
+ if (ret_code && (r < 0 && r != valid_ret_code))
+ (*ret_code) = r;
+ iter->second->release();
+ }
+ if (num_completions)
+ (*num_completions) = completions.size();
+ completions.clear();
+ lock.Unlock();
+
+ return true;
+}
+
+// note: currently only called by tesing code
+void cls_rgw_bucket_init_index(ObjectWriteOperation& o)
+{
+ bufferlist in;
+ o.exec(RGW_CLASS, RGW_BUCKET_INIT_INDEX, in);
+}
+
+static bool issue_bucket_index_init_op(librados::IoCtx& io_ctx,
+ const string& oid,
+ BucketIndexAioManager *manager) {
+ bufferlist in;
+ librados::ObjectWriteOperation op;
+ op.create(true);
+ op.exec(RGW_CLASS, RGW_BUCKET_INIT_INDEX, in);
+ return manager->aio_operate(io_ctx, oid, &op);
+}
+
+static bool issue_bucket_index_clean_op(librados::IoCtx& io_ctx,
+ const string& oid,
+ BucketIndexAioManager *manager) {
+ bufferlist in;
+ librados::ObjectWriteOperation op;
+ op.remove();
+ return manager->aio_operate(io_ctx, oid, &op);
+}
+
+static bool issue_bucket_set_tag_timeout_op(librados::IoCtx& io_ctx,
+ const string& oid, uint64_t timeout, BucketIndexAioManager *manager) {
+ bufferlist in;
+ rgw_cls_tag_timeout_op call;
+ call.tag_timeout = timeout;
+ encode(call, in);
+ ObjectWriteOperation op;
+ op.exec(RGW_CLASS, RGW_BUCKET_SET_TAG_TIMEOUT, in);
+ return manager->aio_operate(io_ctx, oid, &op);
+}
+
+int CLSRGWIssueBucketIndexInit::issue_op(int shard_id, const string& oid)
+{
+ return issue_bucket_index_init_op(io_ctx, oid, &manager);
+}
+
+void CLSRGWIssueBucketIndexInit::cleanup()
+{
+ // Do best effort removal
+ for (auto citer = objs_container.begin(); citer != iter; ++citer) {
+ io_ctx.remove(citer->second);
+ }
+}
+
+int CLSRGWIssueBucketIndexClean::issue_op(int shard_id, const string& oid)
+{
+ return issue_bucket_index_clean_op(io_ctx, oid, &manager);
+}
+
+int CLSRGWIssueSetTagTimeout::issue_op(int shard_id, const string& oid)
+{
+ return issue_bucket_set_tag_timeout_op(io_ctx, oid, tag_timeout, &manager);
+}
+
+void cls_rgw_bucket_update_stats(librados::ObjectWriteOperation& o,
+ bool absolute,
+ const map<RGWObjCategory, rgw_bucket_category_stats>& stats)
+{
+ rgw_cls_bucket_update_stats_op call;
+ call.absolute = absolute;
+ call.stats = stats;
+ bufferlist in;
+ encode(call, in);
+ o.exec(RGW_CLASS, RGW_BUCKET_UPDATE_STATS, in);
+}
+
+void cls_rgw_bucket_prepare_op(ObjectWriteOperation& o, RGWModifyOp op, string& tag,
+ const cls_rgw_obj_key& key, const string& locator, bool log_op,
+ uint16_t bilog_flags, rgw_zone_set& zones_trace)
+{
+ rgw_cls_obj_prepare_op call;
+ call.op = op;
+ call.tag = tag;
+ call.key = key;
+ call.locator = locator;
+ call.log_op = log_op;
+ call.bilog_flags = bilog_flags;
+ call.zones_trace = zones_trace;
+ bufferlist in;
+ encode(call, in);
+ o.exec(RGW_CLASS, RGW_BUCKET_PREPARE_OP, in);
+}
+
+void cls_rgw_bucket_complete_op(ObjectWriteOperation& o, RGWModifyOp op, string& tag,
+ rgw_bucket_entry_ver& ver,
+ const cls_rgw_obj_key& key,
+ rgw_bucket_dir_entry_meta& dir_meta,
+ list<cls_rgw_obj_key> *remove_objs, bool log_op,
+ uint16_t bilog_flags,
+ rgw_zone_set *zones_trace)
+{
+
+ bufferlist in;
+ rgw_cls_obj_complete_op call;
+ call.op = op;
+ call.tag = tag;
+ call.key = key;
+ call.ver = ver;
+ call.meta = dir_meta;
+ call.log_op = log_op;
+ call.bilog_flags = bilog_flags;
+ if (remove_objs)
+ call.remove_objs = *remove_objs;
+ if (zones_trace) {
+ call.zones_trace = *zones_trace;
+ }
+ encode(call, in);
+ o.exec(RGW_CLASS, RGW_BUCKET_COMPLETE_OP, in);
+}
+
+void cls_rgw_bucket_list_op(librados::ObjectReadOperation& op,
+ const cls_rgw_obj_key& start_obj,
+ const std::string& filter_prefix,
+ uint32_t num_entries,
+ bool list_versions,
+ rgw_cls_list_ret* result)
+{
+ bufferlist in;
+ rgw_cls_list_op call;
+ call.start_obj = start_obj;
+ call.filter_prefix = filter_prefix;
+ call.num_entries = num_entries;
+ call.list_versions = list_versions;
+ encode(call, in);
+
+ op.exec(RGW_CLASS, RGW_BUCKET_LIST, in, new ClsBucketIndexOpCtx<rgw_cls_list_ret>(result, NULL));
+}
+
+static bool issue_bucket_list_op(librados::IoCtx& io_ctx, const string& oid,
+ const cls_rgw_obj_key& start_obj,
+ const string& filter_prefix,
+ uint32_t num_entries, bool list_versions,
+ BucketIndexAioManager *manager,
+ rgw_cls_list_ret *pdata) {
+ librados::ObjectReadOperation op;
+ cls_rgw_bucket_list_op(op, start_obj, filter_prefix,
+ num_entries, list_versions, pdata);
+ return manager->aio_operate(io_ctx, oid, &op);
+}
+
+int CLSRGWIssueBucketList::issue_op(int shard_id, const string& oid)
+{
+ return issue_bucket_list_op(io_ctx, oid, start_obj, filter_prefix, num_entries, list_versions, &manager, &result[shard_id]);
+}
+
+void cls_rgw_remove_obj(librados::ObjectWriteOperation& o, list<string>& keep_attr_prefixes)
+{
+ bufferlist in;
+ rgw_cls_obj_remove_op call;
+ call.keep_attr_prefixes = keep_attr_prefixes;
+ encode(call, in);
+ o.exec(RGW_CLASS, RGW_OBJ_REMOVE, in);
+}
+
+void cls_rgw_obj_store_pg_ver(librados::ObjectWriteOperation& o, const string& attr)
+{
+ bufferlist in;
+ rgw_cls_obj_store_pg_ver_op call;
+ call.attr = attr;
+ encode(call, in);
+ o.exec(RGW_CLASS, RGW_OBJ_STORE_PG_VER, in);
+}
+
+void cls_rgw_obj_check_attrs_prefix(librados::ObjectOperation& o, const string& prefix, bool fail_if_exist)
+{
+ bufferlist in;
+ rgw_cls_obj_check_attrs_prefix call;
+ call.check_prefix = prefix;
+ call.fail_if_exist = fail_if_exist;
+ encode(call, in);
+ o.exec(RGW_CLASS, RGW_OBJ_CHECK_ATTRS_PREFIX, in);
+}
+
+void cls_rgw_obj_check_mtime(librados::ObjectOperation& o, const real_time& mtime, bool high_precision_time, RGWCheckMTimeType type)
+{
+ bufferlist in;
+ rgw_cls_obj_check_mtime call;
+ call.mtime = mtime;
+ call.high_precision_time = high_precision_time;
+ call.type = type;
+ encode(call, in);
+ o.exec(RGW_CLASS, RGW_OBJ_CHECK_MTIME, in);
+}
+
+int cls_rgw_bi_get(librados::IoCtx& io_ctx, const string oid,
+ BIIndexType index_type, cls_rgw_obj_key& key,
+ rgw_cls_bi_entry *entry)
+{
+ bufferlist in, out;
+ rgw_cls_bi_get_op call;
+ call.key = key;
+ call.type = index_type;
+ encode(call, in);
+ int r = io_ctx.exec(oid, RGW_CLASS, RGW_BI_GET, in, out);
+ if (r < 0)
+ return r;
+
+ rgw_cls_bi_get_ret op_ret;
+ auto iter = out.cbegin();
+ try {
+ decode(op_ret, iter);
+ } catch (buffer::error& err) {
+ return -EIO;
+ }
+
+ *entry = op_ret.entry;
+
+ return 0;
+}
+
+int cls_rgw_bi_put(librados::IoCtx& io_ctx, const string oid, rgw_cls_bi_entry& entry)
+{
+ bufferlist in, out;
+ rgw_cls_bi_put_op call;
+ call.entry = entry;
+ encode(call, in);
+ int r = io_ctx.exec(oid, RGW_CLASS, RGW_BI_PUT, in, out);
+ if (r < 0)
+ return r;
+
+ return 0;
+}
+
+void cls_rgw_bi_put(ObjectWriteOperation& op, const string oid, rgw_cls_bi_entry& entry)
+{
+ bufferlist in, out;
+ rgw_cls_bi_put_op call;
+ call.entry = entry;
+ encode(call, in);
+ op.exec(RGW_CLASS, RGW_BI_PUT, in);
+}
+
+int cls_rgw_bi_list(librados::IoCtx& io_ctx, const string oid,
+ const string& name, const string& marker, uint32_t max,
+ list<rgw_cls_bi_entry> *entries, bool *is_truncated)
+{
+ bufferlist in, out;
+ rgw_cls_bi_list_op call;
+ call.name = name;
+ call.marker = marker;
+ call.max = max;
+ encode(call, in);
+ int r = io_ctx.exec(oid, RGW_CLASS, RGW_BI_LIST, in, out);
+ if (r < 0)
+ return r;
+
+ rgw_cls_bi_list_ret op_ret;
+ auto iter = out.cbegin();
+ try {
+ decode(op_ret, iter);
+ } catch (buffer::error& err) {
+ return -EIO;
+ }
+
+ entries->swap(op_ret.entries);
+ *is_truncated = op_ret.is_truncated;
+
+ return 0;
+}
+
+int cls_rgw_bucket_link_olh(librados::IoCtx& io_ctx, librados::ObjectWriteOperation& op,
+ const string& oid, const cls_rgw_obj_key& key, bufferlist& olh_tag,
+ bool delete_marker, const string& op_tag, rgw_bucket_dir_entry_meta *meta,
+ uint64_t olh_epoch, ceph::real_time unmod_since, bool high_precision_time, bool log_op, rgw_zone_set& zones_trace)
+{
+ bufferlist in, out;
+ rgw_cls_link_olh_op call;
+ call.key = key;
+ call.olh_tag = string(olh_tag.c_str(), olh_tag.length());
+ call.op_tag = op_tag;
+ call.delete_marker = delete_marker;
+ if (meta) {
+ call.meta = *meta;
+ }
+ call.olh_epoch = olh_epoch;
+ call.log_op = log_op;
+ call.unmod_since = unmod_since;
+ call.high_precision_time = high_precision_time;
+ call.zones_trace = zones_trace;
+ encode(call, in);
+ op.exec(RGW_CLASS, RGW_BUCKET_LINK_OLH, in);
+ int r = io_ctx.operate(oid, &op);
+ if (r < 0)
+ return r;
+
+ return 0;
+}
+
+int cls_rgw_bucket_unlink_instance(librados::IoCtx& io_ctx, librados::ObjectWriteOperation& op,
+ const string& oid,
+ const cls_rgw_obj_key& key, const string& op_tag,
+ const string& olh_tag, uint64_t olh_epoch, bool log_op, rgw_zone_set& zones_trace)
+{
+ bufferlist in, out;
+ rgw_cls_unlink_instance_op call;
+ call.key = key;
+ call.op_tag = op_tag;
+ call.olh_epoch = olh_epoch;
+ call.olh_tag = olh_tag;
+ call.log_op = log_op;
+ call.zones_trace = zones_trace;
+ encode(call, in);
+ op.exec(RGW_CLASS, RGW_BUCKET_UNLINK_INSTANCE, in);
+ int r = io_ctx.operate(oid, &op);
+ if (r < 0)
+ return r;
+
+ return 0;
+}
+
+int cls_rgw_get_olh_log(IoCtx& io_ctx, string& oid, librados::ObjectReadOperation& op, const cls_rgw_obj_key& olh, uint64_t ver_marker,
+ const string& olh_tag,
+ map<uint64_t, vector<rgw_bucket_olh_log_entry> > *log, bool *is_truncated)
+{
+ bufferlist in, out;
+ rgw_cls_read_olh_log_op call;
+ call.olh = olh;
+ call.ver_marker = ver_marker;
+ call.olh_tag = olh_tag;
+ encode(call, in);
+ int op_ret;
+ op.exec(RGW_CLASS, RGW_BUCKET_READ_OLH_LOG, in, &out, &op_ret);
+ int r = io_ctx.operate(oid, &op, NULL);
+ if (r < 0) {
+ return r;
+ }
+ if (op_ret < 0) {
+ return op_ret;
+ }
+
+ rgw_cls_read_olh_log_ret ret;
+ try {
+ auto iter = out.cbegin();
+ decode(ret, iter);
+ } catch (buffer::error& err) {
+ return -EIO;
+ }
+
+ if (log) {
+ *log = ret.log;
+ }
+ if (is_truncated) {
+ *is_truncated = ret.is_truncated;
+ }
+
+ return r;
+}
+
+void cls_rgw_trim_olh_log(librados::ObjectWriteOperation& op, const cls_rgw_obj_key& olh, uint64_t ver, const string& olh_tag)
+{
+ bufferlist in;
+ rgw_cls_trim_olh_log_op call;
+ call.olh = olh;
+ call.ver = ver;
+ call.olh_tag = olh_tag;
+ encode(call, in);
+ op.exec(RGW_CLASS, RGW_BUCKET_TRIM_OLH_LOG, in);
+}
+
+int cls_rgw_clear_olh(IoCtx& io_ctx, librados::ObjectWriteOperation& op, string& oid, const cls_rgw_obj_key& olh, const string& olh_tag)
+{
+ bufferlist in, out;
+ rgw_cls_bucket_clear_olh_op call;
+ call.key = olh;
+ call.olh_tag = olh_tag;
+ encode(call, in);
+ int op_ret;
+ op.exec(RGW_CLASS, RGW_BUCKET_CLEAR_OLH, in, &out, &op_ret);
+ int r = io_ctx.operate(oid, &op);
+ if (r < 0) {
+ return r;
+ }
+ return op_ret;
+}
+
+static bool issue_bi_log_list_op(librados::IoCtx& io_ctx, const string& oid, int shard_id,
+ BucketIndexShardsManager& marker_mgr, uint32_t max, BucketIndexAioManager *manager,
+ cls_rgw_bi_log_list_ret *pdata) {
+ bufferlist in;
+ cls_rgw_bi_log_list_op call;
+ call.marker = marker_mgr.get(shard_id, "");
+ call.max = max;
+ encode(call, in);
+
+ librados::ObjectReadOperation op;
+ op.exec(RGW_CLASS, RGW_BI_LOG_LIST, in, new ClsBucketIndexOpCtx<cls_rgw_bi_log_list_ret>(pdata, NULL));
+ return manager->aio_operate(io_ctx, oid, &op);
+}
+
+int CLSRGWIssueBILogList::issue_op(int shard_id, const string& oid)
+{
+ return issue_bi_log_list_op(io_ctx, oid, shard_id, marker_mgr, max, &manager, &result[shard_id]);
+}
+
+static bool issue_bi_log_trim(librados::IoCtx& io_ctx, const string& oid, int shard_id,
+ BucketIndexShardsManager& start_marker_mgr,
+ BucketIndexShardsManager& end_marker_mgr, BucketIndexAioManager *manager) {
+ bufferlist in;
+ cls_rgw_bi_log_trim_op call;
+ call.start_marker = start_marker_mgr.get(shard_id, "");
+ call.end_marker = end_marker_mgr.get(shard_id, "");
+ encode(call, in);
+ ObjectWriteOperation op;
+ op.exec(RGW_CLASS, RGW_BI_LOG_TRIM, in);
+ return manager->aio_operate(io_ctx, oid, &op);
+}
+
+int CLSRGWIssueBILogTrim::issue_op(int shard_id, const string& oid)
+{
+ return issue_bi_log_trim(io_ctx, oid, shard_id, start_marker_mgr, end_marker_mgr, &manager);
+}
+
+static bool issue_bucket_check_index_op(IoCtx& io_ctx, const string& oid, BucketIndexAioManager *manager,
+ rgw_cls_check_index_ret *pdata) {
+ bufferlist in;
+ librados::ObjectReadOperation op;
+ op.exec(RGW_CLASS, RGW_BUCKET_CHECK_INDEX, in, new ClsBucketIndexOpCtx<rgw_cls_check_index_ret>(
+ pdata, NULL));
+ return manager->aio_operate(io_ctx, oid, &op);
+}
+
+int CLSRGWIssueBucketCheck::issue_op(int shard_id, const string& oid)
+{
+ return issue_bucket_check_index_op(io_ctx, oid, &manager, &result[shard_id]);
+}
+
+static bool issue_bucket_rebuild_index_op(IoCtx& io_ctx, const string& oid,
+ BucketIndexAioManager *manager) {
+ bufferlist in;
+ librados::ObjectWriteOperation op;
+ op.exec(RGW_CLASS, RGW_BUCKET_REBUILD_INDEX, in);
+ return manager->aio_operate(io_ctx, oid, &op);
+}
+
+int CLSRGWIssueBucketRebuild::issue_op(int shard_id, const string& oid)
+{
+ return issue_bucket_rebuild_index_op(io_ctx, oid, &manager);
+}
+
+void cls_rgw_encode_suggestion(char op, rgw_bucket_dir_entry& dirent, bufferlist& updates)
+{
+ updates.append(op);
+ encode(dirent, updates);
+}
+
+void cls_rgw_suggest_changes(ObjectWriteOperation& o, bufferlist& updates)
+{
+ o.exec(RGW_CLASS, RGW_DIR_SUGGEST_CHANGES, updates);
+}
+
+int CLSRGWIssueGetDirHeader::issue_op(int shard_id, const string& oid)
+{
+ cls_rgw_obj_key nokey;
+ return issue_bucket_list_op(io_ctx, oid, nokey, "", 0, false, &manager, &result[shard_id]);
+}
+
+static bool issue_resync_bi_log(librados::IoCtx& io_ctx, const string& oid, BucketIndexAioManager *manager)
+{
+ bufferlist in;
+ librados::ObjectWriteOperation op;
+ op.exec("rgw", "bi_log_resync", in);
+ return manager->aio_operate(io_ctx, oid, &op);
+}
+
+int CLSRGWIssueResyncBucketBILog::issue_op(int shard_id, const string& oid)
+{
+ return issue_resync_bi_log(io_ctx, oid, &manager);
+}
+
+static bool issue_bi_log_stop(librados::IoCtx& io_ctx, const string& oid, BucketIndexAioManager *manager)
+{
+ bufferlist in;
+ librados::ObjectWriteOperation op;
+ op.exec("rgw", "bi_log_stop", in);
+ return manager->aio_operate(io_ctx, oid, &op);
+}
+
+int CLSRGWIssueBucketBILogStop::issue_op(int shard_id, const string& oid)
+{
+ return issue_bi_log_stop(io_ctx, oid, &manager);
+}
+
+class GetDirHeaderCompletion : public ObjectOperationCompletion {
+ RGWGetDirHeader_CB *ret_ctx;
+public:
+ explicit GetDirHeaderCompletion(RGWGetDirHeader_CB *_ctx) : ret_ctx(_ctx) {}
+ ~GetDirHeaderCompletion() override {
+ ret_ctx->put();
+ }
+ void handle_completion(int r, bufferlist& outbl) override {
+ rgw_cls_list_ret ret;
+ try {
+ auto iter = outbl.cbegin();
+ decode(ret, iter);
+ } catch (buffer::error& err) {
+ r = -EIO;
+ }
+
+ ret_ctx->handle_response(r, ret.dir.header);
+ }
+};
+
+int cls_rgw_get_dir_header_async(IoCtx& io_ctx, string& oid, RGWGetDirHeader_CB *ctx)
+{
+ bufferlist in, out;
+ rgw_cls_list_op call;
+ call.num_entries = 0;
+ encode(call, in);
+ ObjectReadOperation op;
+ GetDirHeaderCompletion *cb = new GetDirHeaderCompletion(ctx);
+ op.exec(RGW_CLASS, RGW_BUCKET_LIST, in, cb);
+ AioCompletion *c = librados::Rados::aio_create_completion(NULL, NULL, NULL);
+ int r = io_ctx.aio_operate(oid, c, &op, NULL);
+ c->release();
+ if (r < 0)
+ return r;
+
+ return 0;
+}
+
+int cls_rgw_usage_log_read(IoCtx& io_ctx, const string& oid, const string& user, const string& bucket,
+ uint64_t start_epoch, uint64_t end_epoch, uint32_t max_entries,
+ string& read_iter, map<rgw_user_bucket, rgw_usage_log_entry>& usage,
+ bool *is_truncated)
+{
+ if (is_truncated)
+ *is_truncated = false;
+
+ bufferlist in, out;
+ rgw_cls_usage_log_read_op call;
+ call.start_epoch = start_epoch;
+ call.end_epoch = end_epoch;
+ call.owner = user;
+ call.max_entries = max_entries;
+ call.bucket = bucket;
+ call.iter = read_iter;
+ encode(call, in);
+ int r = io_ctx.exec(oid, RGW_CLASS, RGW_USER_USAGE_LOG_READ, in, out);
+ if (r < 0)
+ return r;
+
+ try {
+ rgw_cls_usage_log_read_ret result;
+ auto iter = out.cbegin();
+ decode(result, iter);
+ read_iter = result.next_iter;
+ if (is_truncated)
+ *is_truncated = result.truncated;
+
+ usage = result.usage;
+ } catch (buffer::error& e) {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int cls_rgw_usage_log_trim(IoCtx& io_ctx, const string& oid, const string& user, const string& bucket,
+ uint64_t start_epoch, uint64_t end_epoch)
+{
+ bufferlist in;
+ rgw_cls_usage_log_trim_op call;
+ call.start_epoch = start_epoch;
+ call.end_epoch = end_epoch;
+ call.user = user;
+ call.bucket = bucket;
+ encode(call, in);
+
+ bool done = false;
+ do {
+ ObjectWriteOperation op;
+ op.exec(RGW_CLASS, RGW_USER_USAGE_LOG_TRIM, in);
+ int r = io_ctx.operate(oid, &op);
+ if (r == -ENODATA)
+ done = true;
+ else if (r < 0)
+ return r;
+ } while (!done);
+
+ return 0;
+}
+
+void cls_rgw_usage_log_clear(ObjectWriteOperation& op)
+{
+ bufferlist in;
+ op.exec(RGW_CLASS, RGW_USAGE_LOG_CLEAR, in);
+}
+
+void cls_rgw_usage_log_add(ObjectWriteOperation& op, rgw_usage_log_info& info)
+{
+ bufferlist in;
+ rgw_cls_usage_log_add_op call;
+ call.info = info;
+ encode(call, in);
+ op.exec(RGW_CLASS, RGW_USER_USAGE_LOG_ADD, in);
+}
+
+/* garbage collection */
+
+void cls_rgw_gc_set_entry(ObjectWriteOperation& op, uint32_t expiration_secs, cls_rgw_gc_obj_info& info)
+{
+ bufferlist in;
+ cls_rgw_gc_set_entry_op call;
+ call.expiration_secs = expiration_secs;
+ call.info = info;
+ encode(call, in);
+ op.exec(RGW_CLASS, RGW_GC_SET_ENTRY, in);
+}
+
+void cls_rgw_gc_defer_entry(ObjectWriteOperation& op, uint32_t expiration_secs, const string& tag)
+{
+ bufferlist in;
+ cls_rgw_gc_defer_entry_op call;
+ call.expiration_secs = expiration_secs;
+ call.tag = tag;
+ encode(call, in);
+ op.exec(RGW_CLASS, RGW_GC_DEFER_ENTRY, in);
+}
+
+int cls_rgw_gc_list(IoCtx& io_ctx, string& oid, string& marker, uint32_t max, bool expired_only,
+ list<cls_rgw_gc_obj_info>& entries, bool *truncated, string& next_marker)
+{
+ bufferlist in, out;
+ cls_rgw_gc_list_op call;
+ call.marker = marker;
+ call.max = max;
+ call.expired_only = expired_only;
+ encode(call, in);
+ int r = io_ctx.exec(oid, RGW_CLASS, RGW_GC_LIST, in, out);
+ if (r < 0)
+ return r;
+
+ cls_rgw_gc_list_ret ret;
+ try {
+ auto iter = out.cbegin();
+ decode(ret, iter);
+ } catch (buffer::error& err) {
+ return -EIO;
+ }
+
+ entries.swap(ret.entries);
+
+ if (truncated)
+ *truncated = ret.truncated;
+ next_marker = std::move(ret.next_marker);
+ return r;
+}
+
+void cls_rgw_gc_remove(librados::ObjectWriteOperation& op, const vector<string>& tags)
+{
+ bufferlist in;
+ cls_rgw_gc_remove_op call;
+ call.tags = tags;
+ encode(call, in);
+ op.exec(RGW_CLASS, RGW_GC_REMOVE, in);
+}
+
+int cls_rgw_lc_get_head(IoCtx& io_ctx, const string& oid, cls_rgw_lc_obj_head& head)
+{
+ bufferlist in, out;
+ int r = io_ctx.exec(oid, RGW_CLASS, RGW_LC_GET_HEAD, in, out);
+ if (r < 0)
+ return r;
+
+ cls_rgw_lc_get_head_ret ret;
+ try {
+ auto iter = out.cbegin();
+ decode(ret, iter);
+ } catch (buffer::error& err) {
+ return -EIO;
+ }
+ head = ret.head;
+
+ return r;
+}
+
+int cls_rgw_lc_put_head(IoCtx& io_ctx, const string& oid, cls_rgw_lc_obj_head& head)
+{
+ bufferlist in, out;
+ cls_rgw_lc_put_head_op call;
+ call.head = head;
+ encode(call, in);
+ int r = io_ctx.exec(oid, RGW_CLASS, RGW_LC_PUT_HEAD, in, out);
+ return r;
+}
+
+int cls_rgw_lc_get_next_entry(IoCtx& io_ctx, const string& oid, string& marker, pair<string, int>& entry)
+{
+ bufferlist in, out;
+ cls_rgw_lc_get_next_entry_op call;
+ call.marker = marker;
+ encode(call, in);
+ int r = io_ctx.exec(oid, RGW_CLASS, RGW_LC_GET_NEXT_ENTRY, in, out);
+ if (r < 0)
+ return r;
+
+ cls_rgw_lc_get_next_entry_ret ret;
+ try {
+ auto iter = out.cbegin();
+ decode(ret, iter);
+ } catch (buffer::error& err) {
+ return -EIO;
+ }
+ entry = ret.entry;
+
+ return r;
+}
+
+int cls_rgw_lc_rm_entry(IoCtx& io_ctx, const string& oid, const pair<string, int>& entry)
+{
+ bufferlist in, out;
+ cls_rgw_lc_rm_entry_op call;
+ call.entry = entry;
+ encode(call, in);
+ int r = io_ctx.exec(oid, RGW_CLASS, RGW_LC_RM_ENTRY, in, out);
+ return r;
+}
+
+int cls_rgw_lc_set_entry(IoCtx& io_ctx, const string& oid, const pair<string, int>& entry)
+{
+ bufferlist in, out;
+ cls_rgw_lc_set_entry_op call;
+ call.entry = entry;
+ encode(call, in);
+ int r = io_ctx.exec(oid, RGW_CLASS, RGW_LC_SET_ENTRY, in, out);
+ return r;
+}
+
+int cls_rgw_lc_get_entry(IoCtx& io_ctx, const string& oid, const std::string& marker, rgw_lc_entry_t& entry)
+{
+ bufferlist in, out;
+ cls_rgw_lc_get_entry_op call{marker};;
+ encode(call, in);
+ int r = io_ctx.exec(oid, RGW_CLASS, RGW_LC_GET_ENTRY, in, out);
+
+ if (r < 0) {
+ return r;
+ }
+
+ cls_rgw_lc_get_entry_ret ret;
+ try {
+ auto iter = out.cbegin();
+ decode(ret, iter);
+ } catch (buffer::error& err) {
+ return -EIO;
+ }
+
+ entry = std::move(ret.entry);
+ return r;
+}
+
+int cls_rgw_lc_list(IoCtx& io_ctx, const string& oid,
+ const string& marker,
+ uint32_t max_entries,
+ map<string, int>& entries)
+{
+ bufferlist in, out;
+ cls_rgw_lc_list_entries_op op;
+
+ entries.clear();
+
+ op.marker = marker;
+ op.max_entries = max_entries;
+
+ encode(op, in);
+
+ int r = io_ctx.exec(oid, RGW_CLASS, RGW_LC_LIST_ENTRIES, in, out);
+ if (r < 0)
+ return r;
+
+ cls_rgw_lc_list_entries_ret ret;
+ try {
+ auto iter = out.cbegin();
+ decode(ret, iter);
+ } catch (buffer::error& err) {
+ return -EIO;
+ }
+ entries.insert(ret.entries.begin(),ret.entries.end());
+
+ return r;
+}
+
+void cls_rgw_reshard_add(librados::ObjectWriteOperation& op, const cls_rgw_reshard_entry& entry)
+{
+ bufferlist in;
+ cls_rgw_reshard_add_op call;
+ call.entry = entry;
+ encode(call, in);
+ op.exec("rgw", "reshard_add", in);
+}
+
+int cls_rgw_reshard_list(librados::IoCtx& io_ctx, const string& oid, string& marker, uint32_t max,
+ list<cls_rgw_reshard_entry>& entries, bool* is_truncated)
+{
+ bufferlist in, out;
+ cls_rgw_reshard_list_op call;
+ call.marker = marker;
+ call.max = max;
+ encode(call, in);
+ int r = io_ctx.exec(oid, "rgw", "reshard_list", in, out);
+ if (r < 0)
+ return r;
+
+ cls_rgw_reshard_list_ret op_ret;
+ auto iter = out.cbegin();
+ try {
+ decode(op_ret, iter);
+ } catch (buffer::error& err) {
+ return -EIO;
+ }
+
+ entries.swap(op_ret.entries);
+ *is_truncated = op_ret.is_truncated;
+
+ return 0;
+}
+
+int cls_rgw_reshard_get(librados::IoCtx& io_ctx, const string& oid, cls_rgw_reshard_entry& entry)
+{
+ bufferlist in, out;
+ cls_rgw_reshard_get_op call;
+ call.entry = entry;
+ encode(call, in);
+ int r = io_ctx.exec(oid, "rgw", "reshard_get", in, out);
+ if (r < 0)
+ return r;
+
+ cls_rgw_reshard_get_ret op_ret;
+ auto iter = out.cbegin();
+ try {
+ decode(op_ret, iter);
+ } catch (buffer::error& err) {
+ return -EIO;
+ }
+
+ entry = op_ret.entry;
+
+ return 0;
+}
+
+void cls_rgw_reshard_remove(librados::ObjectWriteOperation& op, const cls_rgw_reshard_entry& entry)
+{
+ bufferlist in;
+ cls_rgw_reshard_remove_op call;
+ call.tenant = entry.tenant;
+ call.bucket_name = entry.bucket_name;
+ call.bucket_id = entry.bucket_id;
+ encode(call, in);
+ op.exec("rgw", "reshard_remove", in);
+}
+
+int cls_rgw_set_bucket_resharding(librados::IoCtx& io_ctx, const string& oid,
+ const cls_rgw_bucket_instance_entry& entry)
+{
+ bufferlist in, out;
+ cls_rgw_set_bucket_resharding_op call;
+ call.entry = entry;
+ encode(call, in);
+ return io_ctx.exec(oid, "rgw", "set_bucket_resharding", in, out);
+}
+
+int cls_rgw_clear_bucket_resharding(librados::IoCtx& io_ctx, const string& oid)
+{
+ bufferlist in, out;
+ cls_rgw_clear_bucket_resharding_op call;
+ encode(call, in);
+ return io_ctx.exec(oid, "rgw", "clear_bucket_resharding", in, out);
+}
+
+int cls_rgw_get_bucket_resharding(librados::IoCtx& io_ctx, const string& oid,
+ cls_rgw_bucket_instance_entry *entry)
+{
+ bufferlist in, out;
+ cls_rgw_get_bucket_resharding_op call;
+ encode(call, in);
+ int r= io_ctx.exec(oid, "rgw", "get_bucket_resharding", in, out);
+ if (r < 0)
+ return r;
+
+ cls_rgw_get_bucket_resharding_ret op_ret;
+ auto iter = out.cbegin();
+ try {
+ decode(op_ret, iter);
+ } catch (buffer::error& err) {
+ return -EIO;
+ }
+
+ *entry = op_ret.new_instance;
+
+ return 0;
+}
+
+void cls_rgw_guard_bucket_resharding(librados::ObjectOperation& op, int ret_err)
+{
+ bufferlist in, out;
+ cls_rgw_guard_bucket_resharding_op call;
+ call.ret_err = ret_err;
+ encode(call, in);
+ op.exec("rgw", "guard_bucket_resharding", in);
+}
+
+static bool issue_set_bucket_resharding(librados::IoCtx& io_ctx, const string& oid,
+ const cls_rgw_bucket_instance_entry& entry,
+ BucketIndexAioManager *manager) {
+ bufferlist in;
+ cls_rgw_set_bucket_resharding_op call;
+ call.entry = entry;
+ encode(call, in);
+ librados::ObjectWriteOperation op;
+ op.exec("rgw", "set_bucket_resharding", in);
+ return manager->aio_operate(io_ctx, oid, &op);
+}
+
+int CLSRGWIssueSetBucketResharding::issue_op(int shard_id, const string& oid)
+{
+ return issue_set_bucket_resharding(io_ctx, oid, entry, &manager);
+}
diff --git a/src/cls/rgw/cls_rgw_client.h b/src/cls/rgw/cls_rgw_client.h
new file mode 100644
index 00000000..b090fbd7
--- /dev/null
+++ b/src/cls/rgw/cls_rgw_client.h
@@ -0,0 +1,593 @@
+// -*- mode:C; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#ifndef CEPH_CLS_RGW_CLIENT_H
+#define CEPH_CLS_RGW_CLIENT_H
+
+#include "include/str_list.h"
+#include "include/rados/librados.hpp"
+#include "cls_rgw_ops.h"
+#include "cls_rgw_const.h"
+#include "common/RefCountedObj.h"
+#include "include/compat.h"
+#include "common/ceph_time.h"
+#include "common/Mutex.h"
+#include "common/Cond.h"
+
+// Forward declaration
+class BucketIndexAioManager;
+/*
+ * Bucket index AIO request argument, this is used to pass a argument
+ * to callback.
+ */
+struct BucketIndexAioArg : public RefCountedObject {
+ BucketIndexAioArg(int _id, BucketIndexAioManager* _manager) :
+ id(_id), manager(_manager) {}
+ int id;
+ BucketIndexAioManager* manager;
+};
+
+/*
+ * This class manages AIO completions. This class is not completely thread-safe,
+ * methods like *get_next* is not thread-safe and is expected to be called from
+ * within one thread.
+ */
+class BucketIndexAioManager {
+private:
+ map<int, librados::AioCompletion*> pendings;
+ map<int, librados::AioCompletion*> completions;
+ map<int, string> pending_objs;
+ map<int, string> completion_objs;
+ int next;
+ Mutex lock;
+ Cond cond;
+ /*
+ * Callback implementation for AIO request.
+ */
+ static void bucket_index_op_completion_cb(void* cb, void* arg) {
+ BucketIndexAioArg* cb_arg = (BucketIndexAioArg*) arg;
+ cb_arg->manager->do_completion(cb_arg->id);
+ cb_arg->put();
+ }
+
+ /*
+ * Get next request ID. This method is not thread-safe.
+ *
+ * Return next request ID.
+ */
+ int get_next() { return next++; }
+
+ /*
+ * Add a new pending AIO completion instance.
+ *
+ * @param id - the request ID.
+ * @param completion - the AIO completion instance.
+ * @param oid - the object id associated with the object, if it is NULL, we don't
+ * track the object id per callback.
+ */
+ void add_pending(int id, librados::AioCompletion* completion, const string& oid) {
+ pendings[id] = completion;
+ pending_objs[id] = oid;
+ }
+public:
+ /*
+ * Create a new instance.
+ */
+ BucketIndexAioManager() : next(0), lock("BucketIndexAioManager::lock") {}
+
+
+ /*
+ * Do completion for the given AIO request.
+ */
+ void do_completion(int id);
+
+ /*
+ * Wait for AIO completions.
+ *
+ * valid_ret_code - valid AIO return code.
+ * num_completions - number of completions.
+ * ret_code - return code of failed AIO.
+ * objs - a list of objects that has been finished the AIO.
+ *
+ * Return false if there is no pending AIO, true otherwise.
+ */
+ bool wait_for_completions(int valid_ret_code, int *num_completions, int *ret_code,
+ map<int, string> *objs);
+
+ /**
+ * Do aio read operation.
+ */
+ bool aio_operate(librados::IoCtx& io_ctx, const string& oid, librados::ObjectReadOperation *op) {
+ Mutex::Locker l(lock);
+ BucketIndexAioArg *arg = new BucketIndexAioArg(get_next(), this);
+ librados::AioCompletion *c = librados::Rados::aio_create_completion((void*)arg, NULL, bucket_index_op_completion_cb);
+ int r = io_ctx.aio_operate(oid, c, (librados::ObjectReadOperation*)op, NULL);
+ if (r >= 0) {
+ add_pending(arg->id, c, oid);
+ } else {
+ c->release();
+ }
+ return r;
+ }
+
+ /**
+ * Do aio write operation.
+ */
+ bool aio_operate(librados::IoCtx& io_ctx, const string& oid, librados::ObjectWriteOperation *op) {
+ Mutex::Locker l(lock);
+ BucketIndexAioArg *arg = new BucketIndexAioArg(get_next(), this);
+ librados::AioCompletion *c = librados::Rados::aio_create_completion((void*)arg, NULL, bucket_index_op_completion_cb);
+ int r = io_ctx.aio_operate(oid, c, (librados::ObjectWriteOperation*)op);
+ if (r >= 0) {
+ add_pending(arg->id, c, oid);
+ } else {
+ c->release();
+ }
+ return r;
+ }
+};
+
+class RGWGetDirHeader_CB : public RefCountedObject {
+public:
+ ~RGWGetDirHeader_CB() override {}
+ virtual void handle_response(int r, rgw_bucket_dir_header& header) = 0;
+};
+
+class BucketIndexShardsManager {
+private:
+ // Per shard setting manager, for example, marker.
+ map<int, string> value_by_shards;
+public:
+ const static string KEY_VALUE_SEPARATOR;
+ const static string SHARDS_SEPARATOR;
+
+ void add(int shard, const string& value) {
+ value_by_shards[shard] = value;
+ }
+
+ const string& get(int shard, const string& default_value) {
+ map<int, string>::iterator iter = value_by_shards.find(shard);
+ return (iter == value_by_shards.end() ? default_value : iter->second);
+ }
+
+ map<int, string>& get() {
+ return value_by_shards;
+ }
+
+ bool empty() {
+ return value_by_shards.empty();
+ }
+
+ void to_string(string *out) const {
+ if (!out) {
+ return;
+ }
+ out->clear();
+ map<int, string>::const_iterator iter = value_by_shards.begin();
+ for (; iter != value_by_shards.end(); ++iter) {
+ if (out->length()) {
+ // Not the first item, append a separator first
+ out->append(SHARDS_SEPARATOR);
+ }
+ char buf[16];
+ snprintf(buf, sizeof(buf), "%d", iter->first);
+ out->append(buf);
+ out->append(KEY_VALUE_SEPARATOR);
+ out->append(iter->second);
+ }
+ }
+
+ static bool is_shards_marker(const string& marker) {
+ return marker.find(KEY_VALUE_SEPARATOR) != string::npos;
+ }
+
+ /*
+ * convert from string. There are two options of how the string looks like:
+ *
+ * 1. Single shard, no shard id specified, e.g. 000001.23.1
+ *
+ * for this case, if passed shard_id >= 0, use this shard id, otherwise assume that it's a
+ * bucket with no shards.
+ *
+ * 2. One or more shards, shard id specified for each shard, e.g., 0#00002.12,1#00003.23.2
+ *
+ */
+ int from_string(const string& composed_marker, int shard_id) {
+ value_by_shards.clear();
+ vector<string> shards;
+ get_str_vec(composed_marker, SHARDS_SEPARATOR.c_str(), shards);
+ if (shards.size() > 1 && shard_id >= 0) {
+ return -EINVAL;
+ }
+ vector<string>::const_iterator iter = shards.begin();
+ for (; iter != shards.end(); ++iter) {
+ size_t pos = iter->find(KEY_VALUE_SEPARATOR);
+ if (pos == string::npos) {
+ if (!value_by_shards.empty()) {
+ return -EINVAL;
+ }
+ if (shard_id < 0) {
+ add(0, *iter);
+ } else {
+ add(shard_id, *iter);
+ }
+ return 0;
+ }
+ string shard_str = iter->substr(0, pos);
+ string err;
+ int shard = (int)strict_strtol(shard_str.c_str(), 10, &err);
+ if (!err.empty()) {
+ return -EINVAL;
+ }
+ add(shard, iter->substr(pos + 1));
+ }
+ return 0;
+ }
+
+ // trim the '<shard-id>#' prefix from a single shard marker if present
+ static std::string get_shard_marker(const std::string& marker) {
+ auto p = marker.find(KEY_VALUE_SEPARATOR);
+ if (p == marker.npos) {
+ return marker;
+ }
+ return marker.substr(p + 1);
+ }
+};
+
+/* bucket index */
+void cls_rgw_bucket_init_index(librados::ObjectWriteOperation& o);
+
+class CLSRGWConcurrentIO {
+protected:
+ librados::IoCtx& io_ctx;
+ map<int, string>& objs_container;
+ map<int, string>::iterator iter;
+ uint32_t max_aio;
+ BucketIndexAioManager manager;
+
+ virtual int issue_op(int shard_id, const string& oid) = 0;
+
+ virtual void cleanup() {}
+ virtual int valid_ret_code() { return 0; }
+ // Return true if multiple rounds of OPs might be needed, this happens when
+ // OP needs to be re-send until a certain code is returned.
+ virtual bool need_multiple_rounds() { return false; }
+ // Add a new object to the end of the container.
+ virtual void add_object(int shard, const string& oid) {}
+ virtual void reset_container(map<int, string>& objs) {}
+
+public:
+
+ CLSRGWConcurrentIO(librados::IoCtx& ioc,
+ map<int, string>& _objs_container,
+ uint32_t _max_aio) :
+ io_ctx(ioc), objs_container(_objs_container), max_aio(_max_aio)
+ {}
+
+ virtual ~CLSRGWConcurrentIO()
+ {}
+
+ int operator()() {
+ int ret = 0;
+ iter = objs_container.begin();
+ for (; iter != objs_container.end() && max_aio-- > 0; ++iter) {
+ ret = issue_op(iter->first, iter->second);
+ if (ret < 0)
+ break;
+ }
+
+ int num_completions = 0, r = 0;
+ map<int, string> objs;
+ map<int, string> *pobjs = (need_multiple_rounds() ? &objs : NULL);
+ while (manager.wait_for_completions(valid_ret_code(), &num_completions, &r, pobjs)) {
+ if (r >= 0 && ret >= 0) {
+ for (; num_completions && iter != objs_container.end(); --num_completions, ++iter) {
+ int issue_ret = issue_op(iter->first, iter->second);
+ if (issue_ret < 0) {
+ ret = issue_ret;
+ break;
+ }
+ }
+ } else if (ret >= 0) {
+ ret = r;
+ }
+ if (need_multiple_rounds() && iter == objs_container.end() && !objs.empty()) {
+ // For those objects which need another round, use them to reset
+ // the container
+ reset_container(objs);
+ iter = objs_container.begin();
+ for (; num_completions && iter != objs_container.end(); --num_completions, ++iter) {
+ int issue_ret = issue_op(iter->first, iter->second);
+ if (issue_ret < 0) {
+ ret = issue_ret;
+ break;
+ }
+ }
+ }
+ }
+
+ if (ret < 0) {
+ cleanup();
+ }
+ return ret;
+ }
+};
+
+class CLSRGWIssueBucketIndexInit : public CLSRGWConcurrentIO {
+protected:
+ int issue_op(int shard_id, const string& oid) override;
+ int valid_ret_code() override { return -EEXIST; }
+ void cleanup() override;
+public:
+ CLSRGWIssueBucketIndexInit(librados::IoCtx& ioc, map<int, string>& _bucket_objs,
+ uint32_t _max_aio) :
+ CLSRGWConcurrentIO(ioc, _bucket_objs, _max_aio) {}
+};
+
+
+class CLSRGWIssueBucketIndexClean : public CLSRGWConcurrentIO {
+protected:
+ int issue_op(int shard_id, const string& oid) override;
+ int valid_ret_code() override {
+ return -ENOENT;
+ }
+
+public:
+ CLSRGWIssueBucketIndexClean(librados::IoCtx& ioc,
+ map<int, string>& _bucket_objs,
+ uint32_t _max_aio) :
+ CLSRGWConcurrentIO(ioc, _bucket_objs, _max_aio)
+ {}
+};
+
+
+class CLSRGWIssueSetTagTimeout : public CLSRGWConcurrentIO {
+ uint64_t tag_timeout;
+protected:
+ int issue_op(int shard_id, const string& oid) override;
+public:
+ CLSRGWIssueSetTagTimeout(librados::IoCtx& ioc, map<int, string>& _bucket_objs,
+ uint32_t _max_aio, uint64_t _tag_timeout) :
+ CLSRGWConcurrentIO(ioc, _bucket_objs, _max_aio), tag_timeout(_tag_timeout) {}
+};
+
+void cls_rgw_bucket_update_stats(librados::ObjectWriteOperation& o,
+ bool absolute,
+ const map<RGWObjCategory, rgw_bucket_category_stats>& stats);
+
+void cls_rgw_bucket_prepare_op(librados::ObjectWriteOperation& o, RGWModifyOp op, string& tag,
+ const cls_rgw_obj_key& key, const string& locator, bool log_op,
+ uint16_t bilog_op, rgw_zone_set& zones_trace);
+
+void cls_rgw_bucket_complete_op(librados::ObjectWriteOperation& o, RGWModifyOp op, string& tag,
+ rgw_bucket_entry_ver& ver,
+ const cls_rgw_obj_key& key,
+ rgw_bucket_dir_entry_meta& dir_meta,
+ list<cls_rgw_obj_key> *remove_objs, bool log_op,
+ uint16_t bilog_op, rgw_zone_set *zones_trace);
+
+void cls_rgw_remove_obj(librados::ObjectWriteOperation& o, list<string>& keep_attr_prefixes);
+void cls_rgw_obj_store_pg_ver(librados::ObjectWriteOperation& o, const string& attr);
+void cls_rgw_obj_check_attrs_prefix(librados::ObjectOperation& o, const string& prefix, bool fail_if_exist);
+void cls_rgw_obj_check_mtime(librados::ObjectOperation& o, const ceph::real_time& mtime, bool high_precision_time, RGWCheckMTimeType type);
+
+int cls_rgw_bi_get(librados::IoCtx& io_ctx, const string oid,
+ BIIndexType index_type, cls_rgw_obj_key& key,
+ rgw_cls_bi_entry *entry);
+int cls_rgw_bi_put(librados::IoCtx& io_ctx, const string oid, rgw_cls_bi_entry& entry);
+void cls_rgw_bi_put(librados::ObjectWriteOperation& op, const string oid, rgw_cls_bi_entry& entry);
+int cls_rgw_bi_list(librados::IoCtx& io_ctx, const string oid,
+ const string& name, const string& marker, uint32_t max,
+ list<rgw_cls_bi_entry> *entries, bool *is_truncated);
+
+
+int cls_rgw_bucket_link_olh(librados::IoCtx& io_ctx, librados::ObjectWriteOperation& op,
+ const string& oid, const cls_rgw_obj_key& key, bufferlist& olh_tag,
+ bool delete_marker, const string& op_tag, rgw_bucket_dir_entry_meta *meta,
+ uint64_t olh_epoch, ceph::real_time unmod_since, bool high_precision_time, bool log_op, rgw_zone_set& zones_trace);
+int cls_rgw_bucket_unlink_instance(librados::IoCtx& io_ctx, librados::ObjectWriteOperation& op,
+ const string& oid, const cls_rgw_obj_key& key, const string& op_tag,
+ const string& olh_tag, uint64_t olh_epoch, bool log_op, rgw_zone_set& zones_trace);
+int cls_rgw_get_olh_log(librados::IoCtx& io_ctx, string& oid, librados::ObjectReadOperation& op, const cls_rgw_obj_key& olh, uint64_t ver_marker,
+ const string& olh_tag,
+ map<uint64_t, vector<rgw_bucket_olh_log_entry> > *log, bool *is_truncated);
+void cls_rgw_trim_olh_log(librados::ObjectWriteOperation& op, const cls_rgw_obj_key& olh, uint64_t ver, const string& olh_tag);
+int cls_rgw_clear_olh(librados::IoCtx& io_ctx, librados::ObjectWriteOperation& op, string& oid, const cls_rgw_obj_key& olh, const string& olh_tag);
+
+/**
+ * List the bucket with the starting object and filter prefix.
+ * NOTE: this method do listing requests for each bucket index shards identified by
+ * the keys of the *list_results* map, which means the map should be popludated
+ * by the caller to fill with each bucket index object id.
+ *
+ * io_ctx - IO context for rados.
+ * start_obj - marker for the listing.
+ * filter_prefix - filter prefix.
+ * num_entries - number of entries to request for each object (note the total
+ * amount of entries returned depends on the number of shardings).
+ * list_results - the list results keyed by bucket index object id.
+ * max_aio - the maximum number of AIO (for throttling).
+ *
+ * Return 0 on success, a failure code otherwise.
+*/
+
+class CLSRGWIssueBucketList : public CLSRGWConcurrentIO {
+ cls_rgw_obj_key start_obj;
+ string filter_prefix;
+ uint32_t num_entries;
+ bool list_versions;
+ map<int, rgw_cls_list_ret>& result;
+protected:
+ int issue_op(int shard_id, const string& oid) override;
+public:
+ CLSRGWIssueBucketList(librados::IoCtx& io_ctx, const cls_rgw_obj_key& _start_obj,
+ const string& _filter_prefix, uint32_t _num_entries,
+ bool _list_versions,
+ map<int, string>& oids,
+ map<int, rgw_cls_list_ret>& list_results,
+ uint32_t max_aio) :
+ CLSRGWConcurrentIO(io_ctx, oids, max_aio),
+ start_obj(_start_obj), filter_prefix(_filter_prefix), num_entries(_num_entries), list_versions(_list_versions), result(list_results) {}
+};
+
+void cls_rgw_bucket_list_op(librados::ObjectReadOperation& op,
+ const cls_rgw_obj_key& start_obj,
+ const std::string& filter_prefix,
+ uint32_t num_entries,
+ bool list_versions,
+ rgw_cls_list_ret* result);
+
+class CLSRGWIssueBILogList : public CLSRGWConcurrentIO {
+ map<int, cls_rgw_bi_log_list_ret>& result;
+ BucketIndexShardsManager& marker_mgr;
+ uint32_t max;
+protected:
+ int issue_op(int shard_id, const string& oid) override;
+public:
+ CLSRGWIssueBILogList(librados::IoCtx& io_ctx, BucketIndexShardsManager& _marker_mgr, uint32_t _max,
+ map<int, string>& oids,
+ map<int, cls_rgw_bi_log_list_ret>& bi_log_lists, uint32_t max_aio) :
+ CLSRGWConcurrentIO(io_ctx, oids, max_aio), result(bi_log_lists),
+ marker_mgr(_marker_mgr), max(_max) {}
+};
+
+class CLSRGWIssueBILogTrim : public CLSRGWConcurrentIO {
+ BucketIndexShardsManager& start_marker_mgr;
+ BucketIndexShardsManager& end_marker_mgr;
+protected:
+ int issue_op(int shard_id, const string& oid) override;
+ // Trim until -ENODATA is returned.
+ int valid_ret_code() override { return -ENODATA; }
+ bool need_multiple_rounds() override { return true; }
+ void add_object(int shard, const string& oid) override { objs_container[shard] = oid; }
+ void reset_container(map<int, string>& objs) override {
+ objs_container.swap(objs);
+ iter = objs_container.begin();
+ objs.clear();
+ }
+public:
+ CLSRGWIssueBILogTrim(librados::IoCtx& io_ctx, BucketIndexShardsManager& _start_marker_mgr,
+ BucketIndexShardsManager& _end_marker_mgr, map<int, string>& _bucket_objs, uint32_t max_aio) :
+ CLSRGWConcurrentIO(io_ctx, _bucket_objs, max_aio),
+ start_marker_mgr(_start_marker_mgr), end_marker_mgr(_end_marker_mgr) {}
+};
+
+/**
+ * Check the bucket index.
+ *
+ * io_ctx - IO context for rados.
+ * bucket_objs_ret - check result for all shards.
+ * max_aio - the maximum number of AIO (for throttling).
+ *
+ * Return 0 on success, a failure code otherwise.
+ */
+class CLSRGWIssueBucketCheck : public CLSRGWConcurrentIO /*<map<string, rgw_cls_check_index_ret> >*/ {
+ map<int, rgw_cls_check_index_ret>& result;
+protected:
+ int issue_op(int shard_id, const string& oid) override;
+public:
+ CLSRGWIssueBucketCheck(librados::IoCtx& ioc, map<int, string>& oids,
+ map<int, rgw_cls_check_index_ret>& bucket_objs_ret,
+ uint32_t _max_aio) :
+ CLSRGWConcurrentIO(ioc, oids, _max_aio), result(bucket_objs_ret) {}
+};
+
+class CLSRGWIssueBucketRebuild : public CLSRGWConcurrentIO {
+protected:
+ int issue_op(int shard_id, const string& oid) override;
+public:
+ CLSRGWIssueBucketRebuild(librados::IoCtx& io_ctx, map<int, string>& bucket_objs,
+ uint32_t max_aio) : CLSRGWConcurrentIO(io_ctx, bucket_objs, max_aio) {}
+};
+
+class CLSRGWIssueGetDirHeader : public CLSRGWConcurrentIO {
+ map<int, rgw_cls_list_ret>& result;
+protected:
+ int issue_op(int shard_id, const string& oid) override;
+public:
+ CLSRGWIssueGetDirHeader(librados::IoCtx& io_ctx, map<int, string>& oids, map<int, rgw_cls_list_ret>& dir_headers,
+ uint32_t max_aio) :
+ CLSRGWConcurrentIO(io_ctx, oids, max_aio), result(dir_headers) {}
+};
+
+class CLSRGWIssueSetBucketResharding : public CLSRGWConcurrentIO {
+ cls_rgw_bucket_instance_entry entry;
+protected:
+ int issue_op(int shard_id, const string& oid) override;
+public:
+ CLSRGWIssueSetBucketResharding(librados::IoCtx& ioc, map<int, string>& _bucket_objs,
+ const cls_rgw_bucket_instance_entry& _entry,
+ uint32_t _max_aio) : CLSRGWConcurrentIO(ioc, _bucket_objs, _max_aio), entry(_entry) {}
+};
+
+class CLSRGWIssueResyncBucketBILog : public CLSRGWConcurrentIO {
+protected:
+ int issue_op(int shard_id, const string& oid);
+public:
+ CLSRGWIssueResyncBucketBILog(librados::IoCtx& io_ctx, map<int, string>& _bucket_objs, uint32_t max_aio) :
+ CLSRGWConcurrentIO(io_ctx, _bucket_objs, max_aio) {}
+};
+
+class CLSRGWIssueBucketBILogStop : public CLSRGWConcurrentIO {
+protected:
+ int issue_op(int shard_id, const string& oid);
+public:
+ CLSRGWIssueBucketBILogStop(librados::IoCtx& io_ctx, map<int, string>& _bucket_objs, uint32_t max_aio) :
+ CLSRGWConcurrentIO(io_ctx, _bucket_objs, max_aio) {}
+};
+
+int cls_rgw_get_dir_header_async(librados::IoCtx& io_ctx, string& oid, RGWGetDirHeader_CB *ctx);
+
+void cls_rgw_encode_suggestion(char op, rgw_bucket_dir_entry& dirent, bufferlist& updates);
+
+void cls_rgw_suggest_changes(librados::ObjectWriteOperation& o, bufferlist& updates);
+
+/* usage logging */
+int cls_rgw_usage_log_read(librados::IoCtx& io_ctx, const string& oid, const string& user, const string& bucket,
+ uint64_t start_epoch, uint64_t end_epoch, uint32_t max_entries, string& read_iter,
+ map<rgw_user_bucket, rgw_usage_log_entry>& usage, bool *is_truncated);
+
+int cls_rgw_usage_log_trim(librados::IoCtx& io_ctx, const string& oid, const string& user, const string& bucket,
+ uint64_t start_epoch, uint64_t end_epoch);
+
+void cls_rgw_usage_log_clear(librados::ObjectWriteOperation& op);
+void cls_rgw_usage_log_add(librados::ObjectWriteOperation& op, rgw_usage_log_info& info);
+
+/* garbage collection */
+void cls_rgw_gc_set_entry(librados::ObjectWriteOperation& op, uint32_t expiration_secs, cls_rgw_gc_obj_info& info);
+void cls_rgw_gc_defer_entry(librados::ObjectWriteOperation& op, uint32_t expiration_secs, const string& tag);
+
+int cls_rgw_gc_list(librados::IoCtx& io_ctx, string& oid, string& marker, uint32_t max, bool expired_only,
+ list<cls_rgw_gc_obj_info>& entries, bool *truncated, string& next_marker);
+
+void cls_rgw_gc_remove(librados::ObjectWriteOperation& op, const vector<string>& tags);
+
+/* lifecycle */
+int cls_rgw_lc_get_head(librados::IoCtx& io_ctx, const string& oid, cls_rgw_lc_obj_head& head);
+int cls_rgw_lc_put_head(librados::IoCtx& io_ctx, const string& oid, cls_rgw_lc_obj_head& head);
+int cls_rgw_lc_get_next_entry(librados::IoCtx& io_ctx, const string& oid, string& marker, pair<string, int>& entry);
+int cls_rgw_lc_rm_entry(librados::IoCtx& io_ctx, const string& oid, const pair<string, int>& entry);
+int cls_rgw_lc_set_entry(librados::IoCtx& io_ctx, const string& oid, const pair<string, int>& entry);
+int cls_rgw_lc_get_entry(librados::IoCtx& io_ctx, const string& oid, const std::string& marker, rgw_lc_entry_t& entry);
+int cls_rgw_lc_list(librados::IoCtx& io_ctx, const string& oid,
+ const string& marker,
+ uint32_t max_entries,
+ map<string, int>& entries);
+
+/* resharding */
+void cls_rgw_reshard_add(librados::ObjectWriteOperation& op, const cls_rgw_reshard_entry& entry);
+int cls_rgw_reshard_list(librados::IoCtx& io_ctx, const string& oid, string& marker, uint32_t max,
+ list<cls_rgw_reshard_entry>& entries, bool* is_truncated);
+int cls_rgw_reshard_get(librados::IoCtx& io_ctx, const string& oid, cls_rgw_reshard_entry& entry);
+int cls_rgw_reshard_get_head(librados::IoCtx& io_ctx, const string& oid, cls_rgw_reshard_entry& entry);
+void cls_rgw_reshard_remove(librados::ObjectWriteOperation& op, const cls_rgw_reshard_entry& entry);
+
+/* resharding attribute on bucket index shard headers */
+int cls_rgw_set_bucket_resharding(librados::IoCtx& io_ctx, const string& oid,
+ const cls_rgw_bucket_instance_entry& entry);
+int cls_rgw_clear_bucket_resharding(librados::IoCtx& io_ctx, const string& oid);
+void cls_rgw_guard_bucket_resharding(librados::ObjectOperation& op, int ret_err);
+int cls_rgw_get_bucket_resharding(librados::IoCtx& io_ctx, const string& oid,
+ cls_rgw_bucket_instance_entry *entry);
+
+#endif
diff --git a/src/cls/rgw/cls_rgw_const.h b/src/cls/rgw/cls_rgw_const.h
new file mode 100644
index 00000000..fc3537ea
--- /dev/null
+++ b/src/cls/rgw/cls_rgw_const.h
@@ -0,0 +1,76 @@
+// -*- mode:C; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#ifndef CEPH_CLS_RGW_CONST_H
+#define CEPH_CLS_RGW_CONST_H
+
+#define RGW_CLASS "rgw"
+
+/* bucket index */
+#define RGW_BUCKET_INIT_INDEX "bucket_init_index"
+
+
+#define RGW_BUCKET_SET_TAG_TIMEOUT "bucket_set_tag_timeout"
+#define RGW_BUCKET_LIST "bucket_list"
+#define RGW_BUCKET_CHECK_INDEX "bucket_check_index"
+#define RGW_BUCKET_REBUILD_INDEX "bucket_rebuild_index"
+#define RGW_BUCKET_UPDATE_STATS "bucket_update_stats"
+#define RGW_BUCKET_PREPARE_OP "bucket_prepare_op"
+#define RGW_BUCKET_COMPLETE_OP "bucket_complete_op"
+#define RGW_BUCKET_LINK_OLH "bucket_link_olh"
+#define RGW_BUCKET_UNLINK_INSTANCE "bucket_unlink_instance"
+#define RGW_BUCKET_READ_OLH_LOG "bucket_read_olh_log"
+#define RGW_BUCKET_TRIM_OLH_LOG "bucket_trim_olh_log"
+#define RGW_BUCKET_CLEAR_OLH "bucket_clear_olh"
+
+#define RGW_OBJ_REMOVE "obj_remove"
+#define RGW_OBJ_STORE_PG_VER "obj_store_pg_ver"
+#define RGW_OBJ_CHECK_ATTRS_PREFIX "obj_check_attrs_prefix"
+#define RGW_OBJ_CHECK_MTIME "obj_check_mtime"
+
+#define RGW_BI_GET "bi_get"
+#define RGW_BI_PUT "bi_put"
+#define RGW_BI_LIST "bi_list"
+
+#define RGW_BI_LOG_LIST "bi_log_list"
+#define RGW_BI_LOG_TRIM "bi_log_trim"
+#define RGW_DIR_SUGGEST_CHANGES "dir_suggest_changes"
+
+#define RGW_BI_LOG_RESYNC "bi_log_resync"
+#define RGW_BI_LOG_STOP "bi_log_stop"
+
+/* usage logging */
+#define RGW_USER_USAGE_LOG_ADD "user_usage_log_add"
+#define RGW_USER_USAGE_LOG_READ "user_usage_log_read"
+#define RGW_USER_USAGE_LOG_TRIM "user_usage_log_trim"
+#define RGW_USAGE_LOG_CLEAR "usage_log_clear"
+
+/* garbage collection */
+#define RGW_GC_SET_ENTRY "gc_set_entry"
+#define RGW_GC_DEFER_ENTRY "gc_defer_entry"
+#define RGW_GC_LIST "gc_list"
+#define RGW_GC_REMOVE "gc_remove"
+
+/* lifecycle bucket list */
+#define RGW_LC_GET_ENTRY "lc_get_entry"
+#define RGW_LC_SET_ENTRY "lc_set_entry"
+#define RGW_LC_RM_ENTRY "lc_rm_entry"
+#define RGW_LC_GET_NEXT_ENTRY "lc_get_next_entry"
+#define RGW_LC_PUT_HEAD "lc_put_head"
+#define RGW_LC_GET_HEAD "lc_get_head"
+#define RGW_LC_LIST_ENTRIES "lc_list_entries"
+
+/* resharding */
+#define RGW_RESHARD_ADD "reshard_add"
+#define RGW_RESHARD_LIST "reshard_list"
+#define RGW_RESHARD_GET "reshard_get"
+#define RGW_RESHARD_REMOVE "reshard_remove"
+
+/* resharding attribute */
+#define RGW_SET_BUCKET_RESHARDING "set_bucket_resharding"
+#define RGW_CLEAR_BUCKET_RESHARDING "clear_bucket_resharding"
+#define RGW_GUARD_BUCKET_RESHARDING "guard_bucket_resharding"
+#define RGW_GET_BUCKET_RESHARDING "get_bucket_resharding"
+
+
+#endif
diff --git a/src/cls/rgw/cls_rgw_ops.cc b/src/cls/rgw/cls_rgw_ops.cc
new file mode 100644
index 00000000..713cee41
--- /dev/null
+++ b/src/cls/rgw/cls_rgw_ops.cc
@@ -0,0 +1,548 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#include "cls/rgw/cls_rgw_ops.h"
+
+#include "common/Formatter.h"
+#include "common/ceph_json.h"
+#include "include/utime.h"
+
+void rgw_cls_tag_timeout_op::dump(Formatter *f) const
+{
+ f->dump_int("tag_timeout", tag_timeout);
+}
+
+void rgw_cls_tag_timeout_op::generate_test_instances(list<rgw_cls_tag_timeout_op*>& ls)
+{
+ ls.push_back(new rgw_cls_tag_timeout_op);
+ ls.push_back(new rgw_cls_tag_timeout_op);
+ ls.back()->tag_timeout = 23323;
+}
+
+void cls_rgw_gc_set_entry_op::dump(Formatter *f) const
+{
+ f->dump_unsigned("expiration_secs", expiration_secs);
+ f->open_object_section("obj_info");
+ info.dump(f);
+ f->close_section();
+}
+
+void cls_rgw_gc_set_entry_op::generate_test_instances(list<cls_rgw_gc_set_entry_op*>& ls)
+{
+ ls.push_back(new cls_rgw_gc_set_entry_op);
+ ls.push_back(new cls_rgw_gc_set_entry_op);
+ ls.back()->expiration_secs = 123;
+}
+
+void cls_rgw_gc_defer_entry_op::dump(Formatter *f) const
+{
+ f->dump_unsigned("expiration_secs", expiration_secs);
+ f->dump_string("tag", tag);
+}
+
+void cls_rgw_gc_defer_entry_op::generate_test_instances(list<cls_rgw_gc_defer_entry_op*>& ls)
+{
+ ls.push_back(new cls_rgw_gc_defer_entry_op);
+ ls.push_back(new cls_rgw_gc_defer_entry_op);
+ ls.back()->expiration_secs = 123;
+ ls.back()->tag = "footag";
+}
+
+void cls_rgw_gc_list_op::dump(Formatter *f) const
+{
+ f->dump_string("marker", marker);
+ f->dump_unsigned("max", max);
+ f->dump_bool("expired_only", expired_only);
+}
+
+void cls_rgw_gc_list_op::generate_test_instances(list<cls_rgw_gc_list_op*>& ls)
+{
+ ls.push_back(new cls_rgw_gc_list_op);
+ ls.push_back(new cls_rgw_gc_list_op);
+ ls.back()->marker = "mymarker";
+ ls.back()->max = 2312;
+}
+
+void cls_rgw_gc_list_ret::dump(Formatter *f) const
+{
+ encode_json("entries", entries, f);
+ f->dump_string("next_marker", next_marker);
+ f->dump_int("truncated", (int)truncated);
+}
+
+void cls_rgw_gc_list_ret::generate_test_instances(list<cls_rgw_gc_list_ret*>& ls)
+{
+ ls.push_back(new cls_rgw_gc_list_ret);
+ ls.push_back(new cls_rgw_gc_list_ret);
+ ls.back()->entries.push_back(cls_rgw_gc_obj_info());
+ ls.back()->truncated = true;
+}
+
+
+void cls_rgw_gc_remove_op::dump(Formatter *f) const
+{
+ encode_json("tags", tags, f);
+}
+
+void cls_rgw_gc_remove_op::generate_test_instances(list<cls_rgw_gc_remove_op*>& ls)
+{
+ ls.push_back(new cls_rgw_gc_remove_op);
+ ls.push_back(new cls_rgw_gc_remove_op);
+ ls.back()->tags.push_back("tag1");
+ ls.back()->tags.push_back("tag2");
+}
+
+void rgw_cls_obj_prepare_op::generate_test_instances(list<rgw_cls_obj_prepare_op*>& o)
+{
+ rgw_cls_obj_prepare_op *op = new rgw_cls_obj_prepare_op;
+ op->op = CLS_RGW_OP_ADD;
+ op->key.name = "name";
+ op->tag = "tag";
+ op->locator = "locator";
+ o.push_back(op);
+ o.push_back(new rgw_cls_obj_prepare_op);
+}
+
+void rgw_cls_obj_prepare_op::dump(Formatter *f) const
+{
+ f->dump_int("op", op);
+ f->dump_string("name", key.name);
+ f->dump_string("tag", tag);
+ f->dump_string("locator", locator);
+ f->dump_bool("log_op", log_op);
+ f->dump_int("bilog_flags", bilog_flags);
+ ::encode_json("zones_trace", zones_trace, f);
+}
+
+void rgw_cls_obj_complete_op::generate_test_instances(list<rgw_cls_obj_complete_op*>& o)
+{
+ rgw_cls_obj_complete_op *op = new rgw_cls_obj_complete_op;
+ op->op = CLS_RGW_OP_DEL;
+ op->key.name = "name";
+ op->locator = "locator";
+ op->ver.pool = 2;
+ op->ver.epoch = 100;
+ op->tag = "tag";
+
+ list<rgw_bucket_dir_entry_meta *> l;
+ rgw_bucket_dir_entry_meta::generate_test_instances(l);
+ list<rgw_bucket_dir_entry_meta *>::iterator iter = l.begin();
+ op->meta = *(*iter);
+
+ o.push_back(op);
+
+ o.push_back(new rgw_cls_obj_complete_op);
+}
+
+void rgw_cls_obj_complete_op::dump(Formatter *f) const
+{
+ f->dump_int("op", (int)op);
+ f->dump_string("name", key.name);
+ f->dump_string("instance", key.instance);
+ f->dump_string("locator", locator);
+ f->open_object_section("ver");
+ ver.dump(f);
+ f->close_section();
+ f->open_object_section("meta");
+ meta.dump(f);
+ f->close_section();
+ f->dump_string("tag", tag);
+ f->dump_bool("log_op", log_op);
+ f->dump_int("bilog_flags", bilog_flags);
+ ::encode_json("zones_trace", zones_trace, f);
+}
+
+void rgw_cls_link_olh_op::generate_test_instances(list<rgw_cls_link_olh_op*>& o)
+{
+ rgw_cls_link_olh_op *op = new rgw_cls_link_olh_op;
+ op->key.name = "name";
+ op->olh_tag = "olh_tag";
+ op->delete_marker = true;
+ op->op_tag = "op_tag";
+ op->olh_epoch = 123;
+ list<rgw_bucket_dir_entry_meta *> l;
+ rgw_bucket_dir_entry_meta::generate_test_instances(l);
+ list<rgw_bucket_dir_entry_meta *>::iterator iter = l.begin();
+ op->meta = *(*iter);
+ op->log_op = true;
+
+ o.push_back(op);
+
+ o.push_back(new rgw_cls_link_olh_op);
+}
+
+void rgw_cls_link_olh_op::dump(Formatter *f) const
+{
+ ::encode_json("key", key, f);
+ ::encode_json("olh_tag", olh_tag, f);
+ ::encode_json("delete_marker", delete_marker, f);
+ ::encode_json("op_tag", op_tag, f);
+ ::encode_json("meta", meta, f);
+ ::encode_json("olh_epoch", olh_epoch, f);
+ ::encode_json("log_op", log_op, f);
+ ::encode_json("bilog_flags", (uint32_t)bilog_flags, f);
+ utime_t ut(unmod_since);
+ ::encode_json("unmod_since", ut, f);
+ ::encode_json("high_precision_time", high_precision_time, f);
+ ::encode_json("zones_trace", zones_trace, f);
+}
+
+void rgw_cls_unlink_instance_op::generate_test_instances(list<rgw_cls_unlink_instance_op*>& o)
+{
+ rgw_cls_unlink_instance_op *op = new rgw_cls_unlink_instance_op;
+ op->key.name = "name";
+ op->op_tag = "op_tag";
+ op->olh_epoch = 124;
+ op->log_op = true;
+
+ o.push_back(op);
+
+ o.push_back(new rgw_cls_unlink_instance_op);
+}
+
+void rgw_cls_unlink_instance_op::dump(Formatter *f) const
+{
+ ::encode_json("key", key, f);
+ ::encode_json("op_tag", op_tag, f);
+ ::encode_json("olh_epoch", olh_epoch, f);
+ ::encode_json("log_op", log_op, f);
+ ::encode_json("bilog_flags", (uint32_t)bilog_flags, f);
+ ::encode_json("zones_trace", zones_trace, f);
+}
+
+void rgw_cls_read_olh_log_op::generate_test_instances(list<rgw_cls_read_olh_log_op*>& o)
+{
+ rgw_cls_read_olh_log_op *op = new rgw_cls_read_olh_log_op;
+ op->olh.name = "name";
+ op->ver_marker = 123;
+ op->olh_tag = "olh_tag";
+
+ o.push_back(op);
+
+ o.push_back(new rgw_cls_read_olh_log_op);
+}
+
+void rgw_cls_read_olh_log_op::dump(Formatter *f) const
+{
+ ::encode_json("olh", olh, f);
+ ::encode_json("ver_marker", ver_marker, f);
+ ::encode_json("olh_tag", olh_tag, f);
+}
+
+void rgw_cls_read_olh_log_ret::generate_test_instances(list<rgw_cls_read_olh_log_ret*>& o)
+{
+ rgw_cls_read_olh_log_ret *r = new rgw_cls_read_olh_log_ret;
+ r->is_truncated = true;
+ list<rgw_bucket_olh_log_entry *> l;
+ rgw_bucket_olh_log_entry::generate_test_instances(l);
+ list<rgw_bucket_olh_log_entry *>::iterator iter = l.begin();
+ r->log[1].push_back(*(*iter));
+
+ o.push_back(r);
+
+ o.push_back(new rgw_cls_read_olh_log_ret);
+}
+
+void rgw_cls_read_olh_log_ret::dump(Formatter *f) const
+{
+ ::encode_json("log", log, f);
+ ::encode_json("is_truncated", is_truncated, f);
+}
+
+void rgw_cls_trim_olh_log_op::generate_test_instances(list<rgw_cls_trim_olh_log_op*>& o)
+{
+ rgw_cls_trim_olh_log_op *op = new rgw_cls_trim_olh_log_op;
+ op->olh.name = "olh.name";
+ op->ver = 100;
+ op->olh_tag = "olh_tag";
+
+ o.push_back(op);
+
+ o.push_back(new rgw_cls_trim_olh_log_op);
+}
+
+void rgw_cls_trim_olh_log_op::dump(Formatter *f) const
+{
+ ::encode_json("olh", olh, f);
+ ::encode_json("ver", ver, f);
+ ::encode_json("olh_tag", olh_tag, f);
+}
+
+void rgw_cls_bucket_clear_olh_op::generate_test_instances(list<rgw_cls_bucket_clear_olh_op *>& o)
+{
+
+ rgw_cls_bucket_clear_olh_op *op = new rgw_cls_bucket_clear_olh_op;
+ op->key.name = "key.name";
+ op->olh_tag = "olh_tag";
+
+ o.push_back(op);
+ o.push_back(new rgw_cls_bucket_clear_olh_op);
+}
+
+void rgw_cls_bucket_clear_olh_op::dump(Formatter *f) const
+{
+ ::encode_json("key", key, f);
+ ::encode_json("olh_tag", olh_tag, f);
+}
+
+void rgw_cls_list_op::generate_test_instances(list<rgw_cls_list_op*>& o)
+{
+ rgw_cls_list_op *op = new rgw_cls_list_op;
+ op->start_obj.name = "start_obj";
+ op->num_entries = 100;
+ op->filter_prefix = "filter_prefix";
+ o.push_back(op);
+ o.push_back(new rgw_cls_list_op);
+}
+
+void rgw_cls_list_op::dump(Formatter *f) const
+{
+ f->dump_string("start_obj", start_obj.name);
+ f->dump_unsigned("num_entries", num_entries);
+}
+
+void rgw_cls_list_ret::generate_test_instances(list<rgw_cls_list_ret*>& o)
+{
+ list<rgw_bucket_dir *> l;
+ rgw_bucket_dir::generate_test_instances(l);
+ list<rgw_bucket_dir *>::iterator iter;
+ for (iter = l.begin(); iter != l.end(); ++iter) {
+ rgw_bucket_dir *d = *iter;
+
+ rgw_cls_list_ret *ret = new rgw_cls_list_ret;
+ ret->dir = *d;
+ ret->is_truncated = true;
+
+ o.push_back(ret);
+
+ delete d;
+ }
+
+ o.push_back(new rgw_cls_list_ret);
+}
+
+void rgw_cls_list_ret::dump(Formatter *f) const
+{
+ f->open_object_section("dir");
+ dir.dump(f);
+ f->close_section();
+ f->dump_int("is_truncated", (int)is_truncated);
+}
+
+void rgw_cls_check_index_ret::generate_test_instances(list<rgw_cls_check_index_ret*>& o)
+{
+ list<rgw_bucket_dir_header *> h;
+ rgw_bucket_dir_header::generate_test_instances(h);
+ rgw_cls_check_index_ret *r = new rgw_cls_check_index_ret;
+ r->existing_header = *(h.front());
+ r->calculated_header = *(h.front());
+ o.push_back(r);
+
+ for (list<rgw_bucket_dir_header *>::iterator iter = h.begin(); iter != h.end(); ++iter) {
+ delete *iter;
+ }
+ o.push_back(new rgw_cls_check_index_ret);
+}
+
+void rgw_cls_check_index_ret::dump(Formatter *f) const
+{
+ ::encode_json("existing_header", existing_header, f);
+ ::encode_json("calculated_header", calculated_header, f);
+}
+
+void rgw_cls_bucket_update_stats_op::generate_test_instances(list<rgw_cls_bucket_update_stats_op*>& o)
+{
+ rgw_cls_bucket_update_stats_op *r = new rgw_cls_bucket_update_stats_op;
+ r->absolute = true;
+ rgw_bucket_category_stats& s = r->stats[RGWObjCategory::None];
+ s.total_size = 1;
+ s.total_size_rounded = 4096;
+ s.num_entries = 1;
+ o.push_back(r);
+
+ o.push_back(new rgw_cls_bucket_update_stats_op);
+}
+
+void rgw_cls_bucket_update_stats_op::dump(Formatter *f) const
+{
+ ::encode_json("absolute", absolute, f);
+ map<int, rgw_bucket_category_stats> s;
+ for (auto& entry : stats) {
+ s[(int)entry.first] = entry.second;
+ }
+ ::encode_json("stats", s, f);
+}
+
+void cls_rgw_bi_log_list_op::dump(Formatter *f) const
+{
+ f->dump_string("marker", marker);
+ f->dump_unsigned("max", max);
+}
+
+void cls_rgw_bi_log_list_op::generate_test_instances(list<cls_rgw_bi_log_list_op*>& ls)
+{
+ ls.push_back(new cls_rgw_bi_log_list_op);
+ ls.push_back(new cls_rgw_bi_log_list_op);
+ ls.back()->marker = "mark";
+ ls.back()->max = 123;
+}
+
+void cls_rgw_bi_log_trim_op::dump(Formatter *f) const
+{
+ f->dump_string("start_marker", start_marker);
+ f->dump_string("end_marker", end_marker);
+}
+
+void cls_rgw_bi_log_trim_op::generate_test_instances(list<cls_rgw_bi_log_trim_op*>& ls)
+{
+ ls.push_back(new cls_rgw_bi_log_trim_op);
+ ls.push_back(new cls_rgw_bi_log_trim_op);
+ ls.back()->start_marker = "foo";
+ ls.back()->end_marker = "bar";
+}
+
+void cls_rgw_bi_log_list_ret::dump(Formatter *f) const
+{
+ encode_json("entries", entries, f);
+ f->dump_unsigned("truncated", (int)truncated);
+}
+
+void cls_rgw_bi_log_list_ret::generate_test_instances(list<cls_rgw_bi_log_list_ret*>& ls)
+{
+ ls.push_back(new cls_rgw_bi_log_list_ret);
+ ls.push_back(new cls_rgw_bi_log_list_ret);
+ ls.back()->entries.push_back(rgw_bi_log_entry());
+ ls.back()->truncated = true;
+}
+
+void cls_rgw_reshard_add_op::generate_test_instances(list<cls_rgw_reshard_add_op*>& ls)
+{
+ ls.push_back(new cls_rgw_reshard_add_op);
+ ls.push_back(new cls_rgw_reshard_add_op);
+ list<cls_rgw_reshard_entry *> l;
+ cls_rgw_reshard_entry::generate_test_instances(l);
+ list<cls_rgw_reshard_entry *>::iterator iter = l.begin();
+ ls.back()->entry = *(*iter);
+}
+
+void cls_rgw_reshard_add_op::dump(Formatter *f) const
+{
+ ::encode_json("entry", entry, f);
+}
+
+void cls_rgw_reshard_list_op::generate_test_instances(list<cls_rgw_reshard_list_op*>& ls)
+{
+ ls.push_back(new cls_rgw_reshard_list_op);
+ ls.push_back(new cls_rgw_reshard_list_op);
+ ls.back()->max = 1000;
+ ls.back()->marker = "foo";
+}
+
+void cls_rgw_reshard_list_op::dump(Formatter *f) const
+{
+ ::encode_json("max", max, f);
+ ::encode_json("marker", marker, f);
+}
+
+void cls_rgw_reshard_list_ret::generate_test_instances(list<cls_rgw_reshard_list_ret*>& ls)
+{
+ ls.push_back(new cls_rgw_reshard_list_ret);
+ ls.push_back(new cls_rgw_reshard_list_ret);
+ ls.back()->entries.push_back(cls_rgw_reshard_entry());
+ ls.back()->is_truncated = true;
+}
+
+void cls_rgw_reshard_list_ret::dump(Formatter *f) const
+{
+ ::encode_json("entries", entries, f);
+ ::encode_json("is_truncated", is_truncated, f);
+}
+
+void cls_rgw_reshard_get_op::generate_test_instances(list<cls_rgw_reshard_get_op*>& ls)
+{
+ ls.push_back(new cls_rgw_reshard_get_op);
+ ls.push_back(new cls_rgw_reshard_get_op);
+}
+
+void cls_rgw_reshard_get_op::dump(Formatter *f) const
+{
+ ::encode_json("entry", entry, f);
+}
+
+void cls_rgw_reshard_get_ret::generate_test_instances(list<cls_rgw_reshard_get_ret*>& ls)
+{
+ ls.push_back(new cls_rgw_reshard_get_ret);
+ ls.push_back(new cls_rgw_reshard_get_ret);
+}
+
+void cls_rgw_reshard_get_ret::dump(Formatter *f) const
+{
+ ::encode_json("entry", entry, f);
+}
+
+void cls_rgw_reshard_remove_op::generate_test_instances(list<cls_rgw_reshard_remove_op*>& ls)
+{
+ ls.push_back(new cls_rgw_reshard_remove_op);
+ ls.push_back(new cls_rgw_reshard_remove_op);
+ ls.back()->bucket_name = "foo";
+ ls.back()->bucket_id = "bucket_id";
+}
+
+void cls_rgw_reshard_remove_op::dump(Formatter *f) const
+{
+ ::encode_json("bucket_name", bucket_name, f);
+ ::encode_json("bucket_id", bucket_name, f);
+}
+
+
+void cls_rgw_set_bucket_resharding_op::generate_test_instances(
+ list<cls_rgw_set_bucket_resharding_op*>& ls)
+{
+ ls.push_back(new cls_rgw_set_bucket_resharding_op);
+ ls.push_back(new cls_rgw_set_bucket_resharding_op);
+}
+
+void cls_rgw_set_bucket_resharding_op::dump(Formatter *f) const
+{
+ ::encode_json("entry", entry, f);
+}
+
+void cls_rgw_clear_bucket_resharding_op::generate_test_instances(
+ list<cls_rgw_clear_bucket_resharding_op*>& ls)
+{
+ ls.push_back(new cls_rgw_clear_bucket_resharding_op);
+ ls.push_back(new cls_rgw_clear_bucket_resharding_op);
+}
+
+void cls_rgw_clear_bucket_resharding_op::dump(Formatter *f) const
+{
+}
+
+void cls_rgw_guard_bucket_resharding_op::generate_test_instances(
+ list<cls_rgw_guard_bucket_resharding_op*>& ls)
+{
+ ls.push_back(new cls_rgw_guard_bucket_resharding_op);
+ ls.push_back(new cls_rgw_guard_bucket_resharding_op);
+}
+
+void cls_rgw_guard_bucket_resharding_op::dump(Formatter *f) const
+{
+ ::encode_json("ret_err", ret_err, f);
+}
+
+
+void cls_rgw_get_bucket_resharding_op::generate_test_instances(
+ list<cls_rgw_get_bucket_resharding_op*>& ls)
+{
+ ls.push_back(new cls_rgw_get_bucket_resharding_op);
+ ls.push_back(new cls_rgw_get_bucket_resharding_op);
+}
+
+void cls_rgw_get_bucket_resharding_op::dump(Formatter *f) const
+{
+}
+
+
+
+
+
diff --git a/src/cls/rgw/cls_rgw_ops.h b/src/cls/rgw/cls_rgw_ops.h
new file mode 100644
index 00000000..86f08bcf
--- /dev/null
+++ b/src/cls/rgw/cls_rgw_ops.h
@@ -0,0 +1,1429 @@
+#ifndef CEPH_CLS_RGW_OPS_H
+#define CEPH_CLS_RGW_OPS_H
+
+#include "cls/rgw/cls_rgw_types.h"
+
+struct rgw_cls_tag_timeout_op
+{
+ uint64_t tag_timeout;
+
+ rgw_cls_tag_timeout_op() : tag_timeout(0) {}
+
+ void encode(bufferlist &bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(tag_timeout, bl);
+ ENCODE_FINISH(bl);
+ }
+ void decode(bufferlist::const_iterator &bl) {
+ DECODE_START(1, bl);
+ decode(tag_timeout, bl);
+ DECODE_FINISH(bl);
+ }
+ void dump(Formatter *f) const;
+ static void generate_test_instances(list<rgw_cls_tag_timeout_op*>& ls);
+};
+WRITE_CLASS_ENCODER(rgw_cls_tag_timeout_op)
+
+struct rgw_cls_obj_prepare_op
+{
+ RGWModifyOp op;
+ cls_rgw_obj_key key;
+ string tag;
+ string locator;
+ bool log_op;
+ uint16_t bilog_flags;
+ rgw_zone_set zones_trace;
+
+ rgw_cls_obj_prepare_op() : op(CLS_RGW_OP_UNKNOWN), log_op(false), bilog_flags(0) {}
+
+ void encode(bufferlist &bl) const {
+ ENCODE_START(7, 5, bl);
+ uint8_t c = (uint8_t)op;
+ encode(c, bl);
+ encode(tag, bl);
+ encode(locator, bl);
+ encode(log_op, bl);
+ encode(key, bl);
+ encode(bilog_flags, bl);
+ encode(zones_trace, bl);
+ ENCODE_FINISH(bl);
+ }
+ void decode(bufferlist::const_iterator &bl) {
+ DECODE_START_LEGACY_COMPAT_LEN(7, 3, 3, bl);
+ uint8_t c;
+ decode(c, bl);
+ op = (RGWModifyOp)c;
+ if (struct_v < 5) {
+ decode(key.name, bl);
+ }
+ decode(tag, bl);
+ if (struct_v >= 2) {
+ decode(locator, bl);
+ }
+ if (struct_v >= 4) {
+ decode(log_op, bl);
+ }
+ if (struct_v >= 5) {
+ decode(key, bl);
+ }
+ if (struct_v >= 6) {
+ decode(bilog_flags, bl);
+ }
+ if (struct_v >= 7) {
+ decode(zones_trace, bl);
+ }
+ DECODE_FINISH(bl);
+ }
+ void dump(Formatter *f) const;
+ static void generate_test_instances(list<rgw_cls_obj_prepare_op*>& o);
+};
+WRITE_CLASS_ENCODER(rgw_cls_obj_prepare_op)
+
+struct rgw_cls_obj_complete_op
+{
+ RGWModifyOp op;
+ cls_rgw_obj_key key;
+ string locator;
+ rgw_bucket_entry_ver ver;
+ rgw_bucket_dir_entry_meta meta;
+ string tag;
+ bool log_op;
+ uint16_t bilog_flags;
+
+ list<cls_rgw_obj_key> remove_objs;
+ rgw_zone_set zones_trace;
+
+ rgw_cls_obj_complete_op() : op(CLS_RGW_OP_ADD), log_op(false), bilog_flags(0) {}
+
+ void encode(bufferlist &bl) const {
+ ENCODE_START(9, 7, bl);
+ uint8_t c = (uint8_t)op;
+ encode(c, bl);
+ encode(ver.epoch, bl);
+ encode(meta, bl);
+ encode(tag, bl);
+ encode(locator, bl);
+ encode(remove_objs, bl);
+ encode(ver, bl);
+ encode(log_op, bl);
+ encode(key, bl);
+ encode(bilog_flags, bl);
+ encode(zones_trace, bl);
+ ENCODE_FINISH(bl);
+ }
+ void decode(bufferlist::const_iterator &bl) {
+ DECODE_START_LEGACY_COMPAT_LEN(9, 3, 3, bl);
+ uint8_t c;
+ decode(c, bl);
+ op = (RGWModifyOp)c;
+ if (struct_v < 7) {
+ decode(key.name, bl);
+ }
+ decode(ver.epoch, bl);
+ decode(meta, bl);
+ decode(tag, bl);
+ if (struct_v >= 2) {
+ decode(locator, bl);
+ }
+ if (struct_v >= 4 && struct_v < 7) {
+ list<string> old_remove_objs;
+ decode(old_remove_objs, bl);
+
+ for (list<string>::iterator iter = old_remove_objs.begin();
+ iter != old_remove_objs.end(); ++iter) {
+ cls_rgw_obj_key k;
+ k.name = *iter;
+ remove_objs.push_back(k);
+ }
+ } else {
+ decode(remove_objs, bl);
+ }
+ if (struct_v >= 5) {
+ decode(ver, bl);
+ } else {
+ ver.pool = -1;
+ }
+ if (struct_v >= 6) {
+ decode(log_op, bl);
+ }
+ if (struct_v >= 7) {
+ decode(key, bl);
+ }
+ if (struct_v >= 8) {
+ decode(bilog_flags, bl);
+ }
+ if (struct_v >= 9) {
+ decode(zones_trace, bl);
+ }
+ DECODE_FINISH(bl);
+ }
+ void dump(Formatter *f) const;
+ static void generate_test_instances(list<rgw_cls_obj_complete_op*>& o);
+};
+WRITE_CLASS_ENCODER(rgw_cls_obj_complete_op)
+
+struct rgw_cls_link_olh_op {
+ cls_rgw_obj_key key;
+ string olh_tag;
+ bool delete_marker;
+ string op_tag;
+ rgw_bucket_dir_entry_meta meta;
+ uint64_t olh_epoch;
+ bool log_op;
+ uint16_t bilog_flags;
+ real_time unmod_since; /* only create delete marker if newer then this */
+ bool high_precision_time;
+ rgw_zone_set zones_trace;
+
+ rgw_cls_link_olh_op() : delete_marker(false), olh_epoch(0), log_op(false), bilog_flags(0), high_precision_time(false) {}
+
+ void encode(bufferlist& bl) const {
+ ENCODE_START(5, 1, bl);
+ encode(key, bl);
+ encode(olh_tag, bl);
+ encode(delete_marker, bl);
+ encode(op_tag, bl);
+ encode(meta, bl);
+ encode(olh_epoch, bl);
+ encode(log_op, bl);
+ encode(bilog_flags, bl);
+ uint64_t t = ceph::real_clock::to_time_t(unmod_since);
+ encode(t, bl);
+ encode(unmod_since, bl);
+ encode(high_precision_time, bl);
+ encode(zones_trace, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(bufferlist::const_iterator& bl) {
+ DECODE_START(5, bl);
+ decode(key, bl);
+ decode(olh_tag, bl);
+ decode(delete_marker, bl);
+ decode(op_tag, bl);
+ decode(meta, bl);
+ decode(olh_epoch, bl);
+ decode(log_op, bl);
+ decode(bilog_flags, bl);
+ if (struct_v == 2) {
+ uint64_t t;
+ decode(t, bl);
+ unmod_since = ceph::real_clock::from_time_t(static_cast<time_t>(t));
+ }
+ if (struct_v >= 3) {
+ uint64_t t;
+ decode(t, bl);
+ decode(unmod_since, bl);
+ }
+ if (struct_v >= 4) {
+ decode(high_precision_time, bl);
+ }
+ if (struct_v >= 5) {
+ decode(zones_trace, bl);
+ }
+ DECODE_FINISH(bl);
+ }
+
+ static void generate_test_instances(list<rgw_cls_link_olh_op *>& o);
+ void dump(Formatter *f) const;
+};
+WRITE_CLASS_ENCODER(rgw_cls_link_olh_op)
+
+struct rgw_cls_unlink_instance_op {
+ cls_rgw_obj_key key;
+ string op_tag;
+ uint64_t olh_epoch;
+ bool log_op;
+ uint16_t bilog_flags;
+ string olh_tag;
+ rgw_zone_set zones_trace;
+
+ rgw_cls_unlink_instance_op() : olh_epoch(0), log_op(false), bilog_flags(0) {}
+
+ void encode(bufferlist& bl) const {
+ ENCODE_START(3, 1, bl);
+ encode(key, bl);
+ encode(op_tag, bl);
+ encode(olh_epoch, bl);
+ encode(log_op, bl);
+ encode(bilog_flags, bl);
+ encode(olh_tag, bl);
+ encode(zones_trace, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(bufferlist::const_iterator& bl) {
+ DECODE_START(3, bl);
+ decode(key, bl);
+ decode(op_tag, bl);
+ decode(olh_epoch, bl);
+ decode(log_op, bl);
+ decode(bilog_flags, bl);
+ if (struct_v >= 2) {
+ decode(olh_tag, bl);
+ }
+ if (struct_v >= 3) {
+ decode(zones_trace, bl);
+ }
+ DECODE_FINISH(bl);
+ }
+
+ static void generate_test_instances(list<rgw_cls_unlink_instance_op *>& o);
+ void dump(Formatter *f) const;
+};
+WRITE_CLASS_ENCODER(rgw_cls_unlink_instance_op)
+
+struct rgw_cls_read_olh_log_op
+{
+ cls_rgw_obj_key olh;
+ uint64_t ver_marker;
+ string olh_tag;
+
+ rgw_cls_read_olh_log_op() : ver_marker(0) {}
+
+ void encode(bufferlist &bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(olh, bl);
+ encode(ver_marker, bl);
+ encode(olh_tag, bl);
+ ENCODE_FINISH(bl);
+ }
+ void decode(bufferlist::const_iterator &bl) {
+ DECODE_START(1, bl);
+ decode(olh, bl);
+ decode(ver_marker, bl);
+ decode(olh_tag, bl);
+ DECODE_FINISH(bl);
+ }
+ static void generate_test_instances(list<rgw_cls_read_olh_log_op *>& o);
+ void dump(Formatter *f) const;
+};
+WRITE_CLASS_ENCODER(rgw_cls_read_olh_log_op)
+
+
+struct rgw_cls_read_olh_log_ret
+{
+ map<uint64_t, vector<rgw_bucket_olh_log_entry> > log;
+ bool is_truncated;
+
+ rgw_cls_read_olh_log_ret() : is_truncated(false) {}
+
+ void encode(bufferlist &bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(log, bl);
+ encode(is_truncated, bl);
+ ENCODE_FINISH(bl);
+ }
+ void decode(bufferlist::const_iterator &bl) {
+ DECODE_START(1, bl);
+ decode(log, bl);
+ decode(is_truncated, bl);
+ DECODE_FINISH(bl);
+ }
+ static void generate_test_instances(list<rgw_cls_read_olh_log_ret *>& o);
+ void dump(Formatter *f) const;
+};
+WRITE_CLASS_ENCODER(rgw_cls_read_olh_log_ret)
+
+struct rgw_cls_trim_olh_log_op
+{
+ cls_rgw_obj_key olh;
+ uint64_t ver;
+ string olh_tag;
+
+ rgw_cls_trim_olh_log_op() : ver(0) {}
+
+ void encode(bufferlist &bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(olh, bl);
+ encode(ver, bl);
+ encode(olh_tag, bl);
+ ENCODE_FINISH(bl);
+ }
+ void decode(bufferlist::const_iterator &bl) {
+ DECODE_START(1, bl);
+ decode(olh, bl);
+ decode(ver, bl);
+ decode(olh_tag, bl);
+ DECODE_FINISH(bl);
+ }
+ static void generate_test_instances(list<rgw_cls_trim_olh_log_op *>& o);
+ void dump(Formatter *f) const;
+};
+WRITE_CLASS_ENCODER(rgw_cls_trim_olh_log_op)
+
+struct rgw_cls_bucket_clear_olh_op {
+ cls_rgw_obj_key key;
+ string olh_tag;
+
+ rgw_cls_bucket_clear_olh_op() {}
+
+ void encode(bufferlist& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(key, bl);
+ encode(olh_tag, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(bufferlist::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(key, bl);
+ decode(olh_tag, bl);
+ DECODE_FINISH(bl);
+ }
+
+ static void generate_test_instances(list<rgw_cls_bucket_clear_olh_op *>& o);
+ void dump(Formatter *f) const;
+};
+WRITE_CLASS_ENCODER(rgw_cls_bucket_clear_olh_op)
+
+struct rgw_cls_list_op
+{
+ cls_rgw_obj_key start_obj;
+ uint32_t num_entries;
+ string filter_prefix;
+ bool list_versions;
+
+ rgw_cls_list_op() : num_entries(0), list_versions(false) {}
+
+ void encode(bufferlist &bl) const {
+ ENCODE_START(5, 4, bl);
+ encode(num_entries, bl);
+ encode(filter_prefix, bl);
+ encode(start_obj, bl);
+ encode(list_versions, bl);
+ ENCODE_FINISH(bl);
+ }
+ void decode(bufferlist::const_iterator &bl) {
+ DECODE_START_LEGACY_COMPAT_LEN(5, 2, 2, bl);
+ if (struct_v < 4) {
+ decode(start_obj.name, bl);
+ }
+ decode(num_entries, bl);
+ if (struct_v >= 3)
+ decode(filter_prefix, bl);
+ if (struct_v >= 4)
+ decode(start_obj, bl);
+ if (struct_v >= 5)
+ decode(list_versions, bl);
+ DECODE_FINISH(bl);
+ }
+ void dump(Formatter *f) const;
+ static void generate_test_instances(list<rgw_cls_list_op*>& o);
+};
+WRITE_CLASS_ENCODER(rgw_cls_list_op)
+
+struct rgw_cls_list_ret {
+ rgw_bucket_dir dir;
+ bool is_truncated;
+
+ rgw_cls_list_ret() : is_truncated(false) {}
+
+ void encode(bufferlist &bl) const {
+ ENCODE_START(2, 2, bl);
+ encode(dir, bl);
+ encode(is_truncated, bl);
+ ENCODE_FINISH(bl);
+ }
+ void decode(bufferlist::const_iterator &bl) {
+ DECODE_START_LEGACY_COMPAT_LEN(2, 2, 2, bl);
+ decode(dir, bl);
+ decode(is_truncated, bl);
+ DECODE_FINISH(bl);
+ }
+ void dump(Formatter *f) const;
+ static void generate_test_instances(list<rgw_cls_list_ret*>& o);
+};
+WRITE_CLASS_ENCODER(rgw_cls_list_ret)
+
+struct rgw_cls_check_index_ret
+{
+ rgw_bucket_dir_header existing_header;
+ rgw_bucket_dir_header calculated_header;
+
+ rgw_cls_check_index_ret() {}
+
+ void encode(bufferlist &bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(existing_header, bl);
+ encode(calculated_header, bl);
+ ENCODE_FINISH(bl);
+ }
+ void decode(bufferlist::const_iterator &bl) {
+ DECODE_START(1, bl);
+ decode(existing_header, bl);
+ decode(calculated_header, bl);
+ DECODE_FINISH(bl);
+ }
+ void dump(Formatter *f) const;
+ static void generate_test_instances(list<rgw_cls_check_index_ret *>& o);
+};
+WRITE_CLASS_ENCODER(rgw_cls_check_index_ret)
+
+struct rgw_cls_bucket_update_stats_op
+{
+ bool absolute{false};
+ map<RGWObjCategory, rgw_bucket_category_stats> stats;
+
+ rgw_cls_bucket_update_stats_op() {}
+
+ void encode(bufferlist &bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(absolute, bl);
+ encode(stats, bl);
+ ENCODE_FINISH(bl);
+ }
+ void decode(bufferlist::const_iterator &bl) {
+ DECODE_START(1, bl);
+ decode(absolute, bl);
+ decode(stats, bl);
+ DECODE_FINISH(bl);
+ }
+ void dump(Formatter *f) const;
+ static void generate_test_instances(list<rgw_cls_bucket_update_stats_op *>& o);
+};
+WRITE_CLASS_ENCODER(rgw_cls_bucket_update_stats_op)
+
+struct rgw_cls_obj_remove_op {
+ list<string> keep_attr_prefixes;
+
+ void encode(bufferlist& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(keep_attr_prefixes, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(bufferlist::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(keep_attr_prefixes, bl);
+ DECODE_FINISH(bl);
+ }
+};
+WRITE_CLASS_ENCODER(rgw_cls_obj_remove_op)
+
+struct rgw_cls_obj_store_pg_ver_op {
+ string attr;
+
+ void encode(bufferlist& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(attr, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(bufferlist::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(attr, bl);
+ DECODE_FINISH(bl);
+ }
+};
+WRITE_CLASS_ENCODER(rgw_cls_obj_store_pg_ver_op)
+
+struct rgw_cls_obj_check_attrs_prefix {
+ string check_prefix;
+ bool fail_if_exist;
+
+ rgw_cls_obj_check_attrs_prefix() : fail_if_exist(false) {}
+
+ void encode(bufferlist& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(check_prefix, bl);
+ encode(fail_if_exist, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(bufferlist::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(check_prefix, bl);
+ decode(fail_if_exist, bl);
+ DECODE_FINISH(bl);
+ }
+};
+WRITE_CLASS_ENCODER(rgw_cls_obj_check_attrs_prefix)
+
+struct rgw_cls_obj_check_mtime {
+ ceph::real_time mtime;
+ RGWCheckMTimeType type;
+ bool high_precision_time;
+
+ rgw_cls_obj_check_mtime() : type(CLS_RGW_CHECK_TIME_MTIME_EQ), high_precision_time(false) {}
+
+ void encode(bufferlist& bl) const {
+ ENCODE_START(2, 1, bl);
+ encode(mtime, bl);
+ encode((uint8_t)type, bl);
+ encode(high_precision_time, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(bufferlist::const_iterator& bl) {
+ DECODE_START(2, bl);
+ decode(mtime, bl);
+ uint8_t c;
+ decode(c, bl);
+ type = (RGWCheckMTimeType)c;
+ if (struct_v >= 2) {
+ decode(high_precision_time, bl);
+ }
+ DECODE_FINISH(bl);
+ }
+};
+WRITE_CLASS_ENCODER(rgw_cls_obj_check_mtime)
+
+struct rgw_cls_usage_log_add_op {
+ rgw_usage_log_info info;
+ rgw_user user;
+
+ void encode(bufferlist& bl) const {
+ ENCODE_START(2, 1, bl);
+ encode(info, bl);
+ encode(user.to_str(), bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(bufferlist::const_iterator& bl) {
+ DECODE_START(2, bl);
+ decode(info, bl);
+ if (struct_v >= 2) {
+ string s;
+ decode(s, bl);
+ user.from_str(s);
+ }
+ DECODE_FINISH(bl);
+ }
+};
+WRITE_CLASS_ENCODER(rgw_cls_usage_log_add_op)
+
+struct rgw_cls_bi_get_op {
+ cls_rgw_obj_key key;
+ BIIndexType type; /* namespace: plain, instance, olh */
+
+ rgw_cls_bi_get_op() : type(BIIndexType::Plain) {}
+
+ void encode(bufferlist& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(key, bl);
+ encode((uint8_t)type, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(bufferlist::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(key, bl);
+ uint8_t c;
+ decode(c, bl);
+ type = (BIIndexType)c;
+ DECODE_FINISH(bl);
+ }
+};
+WRITE_CLASS_ENCODER(rgw_cls_bi_get_op)
+
+struct rgw_cls_bi_get_ret {
+ rgw_cls_bi_entry entry;
+
+ rgw_cls_bi_get_ret() {}
+
+ void encode(bufferlist& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(entry, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(bufferlist::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(entry, bl);
+ DECODE_FINISH(bl);
+ }
+};
+WRITE_CLASS_ENCODER(rgw_cls_bi_get_ret)
+
+struct rgw_cls_bi_put_op {
+ rgw_cls_bi_entry entry;
+
+ rgw_cls_bi_put_op() {}
+
+ void encode(bufferlist& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(entry, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(bufferlist::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(entry, bl);
+ DECODE_FINISH(bl);
+ }
+};
+WRITE_CLASS_ENCODER(rgw_cls_bi_put_op)
+
+struct rgw_cls_bi_list_op {
+ uint32_t max;
+ string name;
+ string marker;
+
+ rgw_cls_bi_list_op() : max(0) {}
+
+ void encode(bufferlist& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(max, bl);
+ encode(name, bl);
+ encode(marker, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(bufferlist::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(max, bl);
+ decode(name, bl);
+ decode(marker, bl);
+ DECODE_FINISH(bl);
+ }
+};
+WRITE_CLASS_ENCODER(rgw_cls_bi_list_op)
+
+struct rgw_cls_bi_list_ret {
+ list<rgw_cls_bi_entry> entries;
+ bool is_truncated;
+
+ rgw_cls_bi_list_ret() : is_truncated(false) {}
+
+ void encode(bufferlist& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(entries, bl);
+ encode(is_truncated, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(bufferlist::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(entries, bl);
+ decode(is_truncated, bl);
+ DECODE_FINISH(bl);
+ }
+};
+WRITE_CLASS_ENCODER(rgw_cls_bi_list_ret)
+
+struct rgw_cls_usage_log_read_op {
+ uint64_t start_epoch;
+ uint64_t end_epoch;
+ string owner;
+ string bucket;
+
+ string iter; // should be empty for the first call, non empty for subsequent calls
+ uint32_t max_entries;
+
+ void encode(bufferlist& bl) const {
+ ENCODE_START(2, 1, bl);
+ encode(start_epoch, bl);
+ encode(end_epoch, bl);
+ encode(owner, bl);
+ encode(iter, bl);
+ encode(max_entries, bl);
+ encode(bucket, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(bufferlist::const_iterator& bl) {
+ DECODE_START(2, bl);
+ decode(start_epoch, bl);
+ decode(end_epoch, bl);
+ decode(owner, bl);
+ decode(iter, bl);
+ decode(max_entries, bl);
+ if (struct_v >= 2) {
+ decode(bucket, bl);
+ }
+ DECODE_FINISH(bl);
+ }
+};
+WRITE_CLASS_ENCODER(rgw_cls_usage_log_read_op)
+
+struct rgw_cls_usage_log_read_ret {
+ map<rgw_user_bucket, rgw_usage_log_entry> usage;
+ bool truncated;
+ string next_iter;
+
+ void encode(bufferlist& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(usage, bl);
+ encode(truncated, bl);
+ encode(next_iter, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(bufferlist::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(usage, bl);
+ decode(truncated, bl);
+ decode(next_iter, bl);
+ DECODE_FINISH(bl);
+ }
+};
+WRITE_CLASS_ENCODER(rgw_cls_usage_log_read_ret)
+
+struct rgw_cls_usage_log_trim_op {
+ uint64_t start_epoch;
+ uint64_t end_epoch;
+ string user;
+ string bucket;
+
+ void encode(bufferlist& bl) const {
+ ENCODE_START(3, 2, bl);
+ encode(start_epoch, bl);
+ encode(end_epoch, bl);
+ encode(user, bl);
+ encode(bucket, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(bufferlist::const_iterator& bl) {
+ DECODE_START(3, bl);
+ decode(start_epoch, bl);
+ decode(end_epoch, bl);
+ decode(user, bl);
+ if (struct_v >= 3) {
+ decode(bucket, bl);
+ }
+ DECODE_FINISH(bl);
+ }
+};
+WRITE_CLASS_ENCODER(rgw_cls_usage_log_trim_op)
+
+struct cls_rgw_gc_set_entry_op {
+ uint32_t expiration_secs;
+ cls_rgw_gc_obj_info info;
+ cls_rgw_gc_set_entry_op() : expiration_secs(0) {}
+
+ void encode(bufferlist& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(expiration_secs, bl);
+ encode(info, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(bufferlist::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(expiration_secs, bl);
+ decode(info, bl);
+ DECODE_FINISH(bl);
+ }
+
+ void dump(Formatter *f) const;
+ static void generate_test_instances(list<cls_rgw_gc_set_entry_op*>& ls);
+};
+WRITE_CLASS_ENCODER(cls_rgw_gc_set_entry_op)
+
+struct cls_rgw_gc_defer_entry_op {
+ uint32_t expiration_secs;
+ string tag;
+ cls_rgw_gc_defer_entry_op() : expiration_secs(0) {}
+
+ void encode(bufferlist& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(expiration_secs, bl);
+ encode(tag, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(bufferlist::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(expiration_secs, bl);
+ decode(tag, bl);
+ DECODE_FINISH(bl);
+ }
+
+ void dump(Formatter *f) const;
+ static void generate_test_instances(list<cls_rgw_gc_defer_entry_op*>& ls);
+};
+WRITE_CLASS_ENCODER(cls_rgw_gc_defer_entry_op)
+
+struct cls_rgw_gc_list_op {
+ string marker;
+ uint32_t max;
+ bool expired_only;
+
+ cls_rgw_gc_list_op() : max(0), expired_only(true) {}
+
+ void encode(bufferlist& bl) const {
+ ENCODE_START(2, 1, bl);
+ encode(marker, bl);
+ encode(max, bl);
+ encode(expired_only, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(bufferlist::const_iterator& bl) {
+ DECODE_START(2, bl);
+ decode(marker, bl);
+ decode(max, bl);
+ if (struct_v >= 2) {
+ decode(expired_only, bl);
+ }
+ DECODE_FINISH(bl);
+ }
+
+ void dump(Formatter *f) const;
+ static void generate_test_instances(list<cls_rgw_gc_list_op*>& ls);
+};
+WRITE_CLASS_ENCODER(cls_rgw_gc_list_op)
+
+struct cls_rgw_gc_list_ret {
+ list<cls_rgw_gc_obj_info> entries;
+ string next_marker;
+ bool truncated;
+
+ cls_rgw_gc_list_ret() : truncated(false) {}
+
+ void encode(bufferlist& bl) const {
+ ENCODE_START(2, 1, bl);
+ encode(entries, bl);
+ encode(next_marker, bl);
+ encode(truncated, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(bufferlist::const_iterator& bl) {
+ DECODE_START(2, bl);
+ decode(entries, bl);
+ if (struct_v >= 2)
+ decode(next_marker, bl);
+ decode(truncated, bl);
+ DECODE_FINISH(bl);
+ }
+
+ void dump(Formatter *f) const;
+ static void generate_test_instances(list<cls_rgw_gc_list_ret*>& ls);
+};
+WRITE_CLASS_ENCODER(cls_rgw_gc_list_ret)
+
+struct cls_rgw_gc_remove_op {
+ vector<string> tags;
+
+ cls_rgw_gc_remove_op() {}
+
+ void encode(bufferlist& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(tags, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(bufferlist::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(tags, bl);
+ DECODE_FINISH(bl);
+ }
+
+ void dump(Formatter *f) const;
+ static void generate_test_instances(list<cls_rgw_gc_remove_op*>& ls);
+};
+WRITE_CLASS_ENCODER(cls_rgw_gc_remove_op)
+
+struct cls_rgw_bi_log_list_op {
+ string marker;
+ uint32_t max;
+
+ cls_rgw_bi_log_list_op() : max(0) {}
+
+ void encode(bufferlist& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(marker, bl);
+ encode(max, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(bufferlist::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(marker, bl);
+ decode(max, bl);
+ DECODE_FINISH(bl);
+ }
+
+ void dump(Formatter *f) const;
+ static void generate_test_instances(list<cls_rgw_bi_log_list_op*>& ls);
+};
+WRITE_CLASS_ENCODER(cls_rgw_bi_log_list_op)
+
+struct cls_rgw_bi_log_trim_op {
+ string start_marker;
+ string end_marker;
+
+ cls_rgw_bi_log_trim_op() {}
+
+ void encode(bufferlist& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(start_marker, bl);
+ encode(end_marker, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(bufferlist::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(start_marker, bl);
+ decode(end_marker, bl);
+ DECODE_FINISH(bl);
+ }
+
+ void dump(Formatter *f) const;
+ static void generate_test_instances(list<cls_rgw_bi_log_trim_op*>& ls);
+};
+WRITE_CLASS_ENCODER(cls_rgw_bi_log_trim_op)
+
+struct cls_rgw_bi_log_list_ret {
+ list<rgw_bi_log_entry> entries;
+ bool truncated;
+
+ cls_rgw_bi_log_list_ret() : truncated(false) {}
+
+ void encode(bufferlist& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(entries, bl);
+ encode(truncated, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(bufferlist::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(entries, bl);
+ decode(truncated, bl);
+ DECODE_FINISH(bl);
+ }
+
+ void dump(Formatter *f) const;
+ static void generate_test_instances(list<cls_rgw_bi_log_list_ret*>& ls);
+};
+WRITE_CLASS_ENCODER(cls_rgw_bi_log_list_ret)
+
+struct cls_rgw_lc_get_next_entry_op {
+ string marker;
+ cls_rgw_lc_get_next_entry_op() {}
+
+ void encode(bufferlist& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(marker, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(bufferlist::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(marker, bl);
+ DECODE_FINISH(bl);
+ }
+};
+WRITE_CLASS_ENCODER(cls_rgw_lc_get_next_entry_op)
+
+using rgw_lc_entry_t = std::pair<std::string, int>;
+
+struct cls_rgw_lc_get_next_entry_ret {
+ rgw_lc_entry_t entry;
+ cls_rgw_lc_get_next_entry_ret() {}
+
+ void encode(bufferlist& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(entry, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(bufferlist::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(entry, bl);
+ DECODE_FINISH(bl);
+ }
+
+};
+WRITE_CLASS_ENCODER(cls_rgw_lc_get_next_entry_ret)
+
+struct cls_rgw_lc_get_entry_op {
+ string marker;
+ cls_rgw_lc_get_entry_op() {}
+ cls_rgw_lc_get_entry_op(const std::string& _marker) : marker(_marker) {}
+
+ void encode(bufferlist& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(marker, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(bufferlist::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(marker, bl);
+ DECODE_FINISH(bl);
+ }
+};
+WRITE_CLASS_ENCODER(cls_rgw_lc_get_entry_op)
+
+struct cls_rgw_lc_get_entry_ret {
+ rgw_lc_entry_t entry;
+ cls_rgw_lc_get_entry_ret() {}
+ cls_rgw_lc_get_entry_ret(rgw_lc_entry_t&& _entry) : entry(std::move(_entry)) {}
+
+ void encode(bufferlist& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(entry, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(bufferlist::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(entry, bl);
+ DECODE_FINISH(bl);
+ }
+
+};
+WRITE_CLASS_ENCODER(cls_rgw_lc_get_entry_ret)
+
+
+struct cls_rgw_lc_rm_entry_op {
+ rgw_lc_entry_t entry;
+ cls_rgw_lc_rm_entry_op() {}
+
+ void encode(bufferlist& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(entry, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(bufferlist::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(entry, bl);
+ DECODE_FINISH(bl);
+ }
+};
+WRITE_CLASS_ENCODER(cls_rgw_lc_rm_entry_op)
+
+struct cls_rgw_lc_set_entry_op {
+ rgw_lc_entry_t entry;
+ cls_rgw_lc_set_entry_op() {}
+
+ void encode(bufferlist& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(entry, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(bufferlist::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(entry, bl);
+ DECODE_FINISH(bl);
+ }
+};
+WRITE_CLASS_ENCODER(cls_rgw_lc_set_entry_op)
+
+struct cls_rgw_lc_put_head_op {
+ cls_rgw_lc_obj_head head;
+
+
+ cls_rgw_lc_put_head_op() {}
+
+ void encode(bufferlist& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(head, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(bufferlist::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(head, bl);
+ DECODE_FINISH(bl);
+ }
+
+};
+WRITE_CLASS_ENCODER(cls_rgw_lc_put_head_op)
+
+struct cls_rgw_lc_get_head_ret {
+ cls_rgw_lc_obj_head head;
+
+ cls_rgw_lc_get_head_ret() {}
+
+ void encode(bufferlist& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(head, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(bufferlist::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(head, bl);
+ DECODE_FINISH(bl);
+ }
+
+};
+WRITE_CLASS_ENCODER(cls_rgw_lc_get_head_ret)
+
+struct cls_rgw_lc_list_entries_op {
+ string marker;
+ uint32_t max_entries = 0;
+
+ cls_rgw_lc_list_entries_op() {}
+
+ void encode(bufferlist& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(marker, bl);
+ encode(max_entries, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(bufferlist::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(marker, bl);
+ decode(max_entries, bl);
+ DECODE_FINISH(bl);
+ }
+
+};
+WRITE_CLASS_ENCODER(cls_rgw_lc_list_entries_op)
+
+struct cls_rgw_lc_list_entries_ret {
+ map<string, int> entries;
+ bool is_truncated{false};
+
+ cls_rgw_lc_list_entries_ret() {}
+
+ void encode(bufferlist& bl) const {
+ ENCODE_START(2, 1, bl);
+ encode(entries, bl);
+ encode(is_truncated, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(bufferlist::const_iterator& bl) {
+ DECODE_START(2, bl);
+ decode(entries, bl);
+ if (struct_v >= 2) {
+ decode(is_truncated, bl);
+ }
+ DECODE_FINISH(bl);
+ }
+
+};
+WRITE_CLASS_ENCODER(cls_rgw_lc_list_entries_ret)
+
+struct cls_rgw_reshard_add_op {
+ cls_rgw_reshard_entry entry;
+
+ cls_rgw_reshard_add_op() {}
+
+ void encode(bufferlist& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(entry, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(bufferlist::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(entry, bl);
+ DECODE_FINISH(bl);
+ }
+ static void generate_test_instances(list<cls_rgw_reshard_add_op*>& o);
+ void dump(Formatter *f) const;
+};
+WRITE_CLASS_ENCODER(cls_rgw_reshard_add_op)
+
+struct cls_rgw_reshard_list_op {
+ uint32_t max{0};
+ string marker;
+
+ cls_rgw_reshard_list_op() {}
+
+ void encode(bufferlist& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(max, bl);
+ encode(marker, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(bufferlist::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(max, bl);
+ decode(marker, bl);
+ DECODE_FINISH(bl);
+ }
+ static void generate_test_instances(list<cls_rgw_reshard_list_op*>& o);
+ void dump(Formatter *f) const;
+};
+WRITE_CLASS_ENCODER(cls_rgw_reshard_list_op)
+
+
+struct cls_rgw_reshard_list_ret {
+ list<cls_rgw_reshard_entry> entries;
+ bool is_truncated{false};
+
+ cls_rgw_reshard_list_ret() {}
+
+ void encode(bufferlist& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(entries, bl);
+ encode(is_truncated, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(bufferlist::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(entries, bl);
+ decode(is_truncated, bl);
+ DECODE_FINISH(bl);
+ }
+ static void generate_test_instances(list<cls_rgw_reshard_list_ret*>& o);
+ void dump(Formatter *f) const;
+};
+WRITE_CLASS_ENCODER(cls_rgw_reshard_list_ret)
+
+struct cls_rgw_reshard_get_op {
+ cls_rgw_reshard_entry entry;
+
+ cls_rgw_reshard_get_op() {}
+
+ void encode(bufferlist& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(entry, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(bufferlist::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(entry, bl);
+ DECODE_FINISH(bl);
+ }
+ static void generate_test_instances(list<cls_rgw_reshard_get_op*>& o);
+ void dump(Formatter *f) const;
+};
+WRITE_CLASS_ENCODER(cls_rgw_reshard_get_op)
+
+struct cls_rgw_reshard_get_ret {
+ cls_rgw_reshard_entry entry;
+
+ cls_rgw_reshard_get_ret() {}
+
+ void encode(bufferlist& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(entry, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(bufferlist::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(entry, bl);
+ DECODE_FINISH(bl);
+ }
+ static void generate_test_instances(list<cls_rgw_reshard_get_ret*>& o);
+ void dump(Formatter *f) const;
+};
+WRITE_CLASS_ENCODER(cls_rgw_reshard_get_ret)
+
+struct cls_rgw_reshard_remove_op {
+ string tenant;
+ string bucket_name;
+ string bucket_id;
+
+ cls_rgw_reshard_remove_op() {}
+
+ void encode(bufferlist& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(tenant, bl);
+ encode(bucket_name, bl);
+ encode(bucket_id, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(bufferlist::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(tenant, bl);
+ decode(bucket_name, bl);
+ decode(bucket_id, bl);
+ DECODE_FINISH(bl);
+ }
+ static void generate_test_instances(list<cls_rgw_reshard_remove_op*>& o);
+ void dump(Formatter *f) const;
+};
+WRITE_CLASS_ENCODER(cls_rgw_reshard_remove_op)
+
+struct cls_rgw_set_bucket_resharding_op {
+ cls_rgw_bucket_instance_entry entry;
+
+ void encode(bufferlist& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(entry, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(bufferlist::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(entry, bl);
+ DECODE_FINISH(bl);
+ }
+ static void generate_test_instances(list<cls_rgw_set_bucket_resharding_op*>& o);
+ void dump(Formatter *f) const;
+};
+WRITE_CLASS_ENCODER(cls_rgw_set_bucket_resharding_op)
+
+struct cls_rgw_clear_bucket_resharding_op {
+ void encode(bufferlist& bl) const {
+ ENCODE_START(1, 1, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(bufferlist::const_iterator& bl) {
+ DECODE_START(1, bl);
+ DECODE_FINISH(bl);
+ }
+ static void generate_test_instances(list<cls_rgw_clear_bucket_resharding_op*>& o);
+ void dump(Formatter *f) const;
+};
+WRITE_CLASS_ENCODER(cls_rgw_clear_bucket_resharding_op)
+
+struct cls_rgw_guard_bucket_resharding_op {
+ int ret_err{0};
+
+ void encode(bufferlist& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(ret_err, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(bufferlist::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(ret_err, bl);
+ DECODE_FINISH(bl);
+ }
+
+ static void generate_test_instances(list<cls_rgw_guard_bucket_resharding_op*>& o);
+ void dump(Formatter *f) const;
+};
+WRITE_CLASS_ENCODER(cls_rgw_guard_bucket_resharding_op)
+
+struct cls_rgw_get_bucket_resharding_op {
+
+ void encode(bufferlist& bl) const {
+ ENCODE_START(1, 1, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(bufferlist::const_iterator& bl) {
+ DECODE_START(1, bl);
+ DECODE_FINISH(bl);
+ }
+
+ static void generate_test_instances(list<cls_rgw_get_bucket_resharding_op*>& o);
+ void dump(Formatter *f) const;
+};
+WRITE_CLASS_ENCODER(cls_rgw_get_bucket_resharding_op)
+
+struct cls_rgw_get_bucket_resharding_ret {
+ cls_rgw_bucket_instance_entry new_instance;
+
+ void encode(bufferlist& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(new_instance, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(bufferlist::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(new_instance, bl);
+ DECODE_FINISH(bl);
+ }
+
+ static void generate_test_instances(list<cls_rgw_get_bucket_resharding_ret*>& o);
+ void dump(Formatter *f) const;
+};
+WRITE_CLASS_ENCODER(cls_rgw_get_bucket_resharding_ret)
+
+#endif /* CEPH_CLS_RGW_OPS_H */
diff --git a/src/cls/rgw/cls_rgw_types.cc b/src/cls/rgw/cls_rgw_types.cc
new file mode 100644
index 00000000..b533ec05
--- /dev/null
+++ b/src/cls/rgw/cls_rgw_types.cc
@@ -0,0 +1,700 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#include "cls/rgw/cls_rgw_types.h"
+#include "common/ceph_json.h"
+#include "include/utime.h"
+
+
+void rgw_bucket_pending_info::generate_test_instances(list<rgw_bucket_pending_info*>& o)
+{
+ rgw_bucket_pending_info *i = new rgw_bucket_pending_info;
+ i->state = CLS_RGW_STATE_COMPLETE;
+ i->op = CLS_RGW_OP_DEL;
+ o.push_back(i);
+ o.push_back(new rgw_bucket_pending_info);
+}
+
+void rgw_bucket_pending_info::dump(Formatter *f) const
+{
+ encode_json("state", (int)state, f);
+ utime_t ut(timestamp);
+ encode_json("timestamp", ut, f);
+ encode_json("op", (int)op, f);
+}
+
+void rgw_bucket_pending_info::decode_json(JSONObj *obj) {
+ int val;
+ JSONDecoder::decode_json("state", val, obj);
+ state = (RGWPendingState)val;
+ utime_t ut(timestamp);
+ JSONDecoder::decode_json("timestamp", ut, obj);
+ JSONDecoder::decode_json("op", val, obj);
+ op = (uint8_t)val;
+}
+
+void cls_rgw_obj_key::decode_json(JSONObj *obj) {
+ JSONDecoder::decode_json("name", name, obj);
+ JSONDecoder::decode_json("instance", instance, obj);
+}
+
+void rgw_bucket_dir_entry_meta::generate_test_instances(list<rgw_bucket_dir_entry_meta*>& o)
+{
+ rgw_bucket_dir_entry_meta *m = new rgw_bucket_dir_entry_meta;
+ m->category = RGWObjCategory::Main;
+ m->size = 100;
+ m->etag = "etag";
+ m->owner = "owner";
+ m->owner_display_name = "display name";
+ m->content_type = "content/type";
+ o.push_back(m);
+ o.push_back(new rgw_bucket_dir_entry_meta);
+}
+
+void rgw_bucket_dir_entry_meta::dump(Formatter *f) const
+{
+ encode_json("category", (int)category, f);
+ encode_json("size", size, f);
+ utime_t ut(mtime);
+ encode_json("mtime", ut, f);
+ encode_json("etag", etag, f);
+ encode_json("storage_class", storage_class, f);
+ encode_json("owner", owner, f);
+ encode_json("owner_display_name", owner_display_name, f);
+ encode_json("content_type", content_type, f);
+ encode_json("accounted_size", accounted_size, f);
+ encode_json("user_data", user_data, f);
+ encode_json("appendable", appendable, f);
+}
+
+void rgw_bucket_dir_entry_meta::decode_json(JSONObj *obj) {
+ int val;
+ JSONDecoder::decode_json("category", val, obj);
+ category = static_cast<RGWObjCategory>(val);
+ JSONDecoder::decode_json("size", size, obj);
+ utime_t ut;
+ JSONDecoder::decode_json("mtime", ut, obj);
+ mtime = ut.to_real_time();
+ JSONDecoder::decode_json("etag", etag, obj);
+ JSONDecoder::decode_json("storage_class", storage_class, obj);
+ JSONDecoder::decode_json("owner", owner, obj);
+ JSONDecoder::decode_json("owner_display_name", owner_display_name, obj);
+ JSONDecoder::decode_json("content_type", content_type, obj);
+ JSONDecoder::decode_json("accounted_size", accounted_size, obj);
+ JSONDecoder::decode_json("user_data", user_data, obj);
+ JSONDecoder::decode_json("appendable", appendable, obj);
+}
+
+void rgw_bucket_dir_entry::generate_test_instances(list<rgw_bucket_dir_entry*>& o)
+{
+ list<rgw_bucket_dir_entry_meta *> l;
+ rgw_bucket_dir_entry_meta::generate_test_instances(l);
+
+ list<rgw_bucket_dir_entry_meta *>::iterator iter;
+ for (iter = l.begin(); iter != l.end(); ++iter) {
+ rgw_bucket_dir_entry_meta *m = *iter;
+ rgw_bucket_dir_entry *e = new rgw_bucket_dir_entry;
+ e->key.name = "name";
+ e->ver.pool = 1;
+ e->ver.epoch = 1234;
+ e->locator = "locator";
+ e->exists = true;
+ e->meta = *m;
+ e->tag = "tag";
+
+ o.push_back(e);
+
+ delete m;
+ }
+ o.push_back(new rgw_bucket_dir_entry);
+}
+
+void rgw_bucket_entry_ver::dump(Formatter *f) const
+{
+ encode_json("pool", pool, f);
+ encode_json("epoch", epoch, f);
+}
+
+void rgw_bucket_entry_ver::decode_json(JSONObj *obj) {
+ JSONDecoder::decode_json("pool", pool, obj);
+ JSONDecoder::decode_json("epoch", epoch, obj);
+}
+
+void rgw_bucket_entry_ver::generate_test_instances(list<rgw_bucket_entry_ver*>& ls)
+{
+ ls.push_back(new rgw_bucket_entry_ver);
+ ls.push_back(new rgw_bucket_entry_ver);
+ ls.back()->pool = 123;
+ ls.back()->epoch = 12322;
+}
+
+
+void rgw_bucket_dir_entry::dump(Formatter *f) const
+{
+ encode_json("name", key.name, f);
+ encode_json("instance", key.instance , f);
+ encode_json("ver", ver , f);
+ encode_json("locator", locator , f);
+ encode_json("exists", exists , f);
+ encode_json("meta", meta , f);
+ encode_json("tag", tag , f);
+ encode_json("flags", (int)flags , f);
+ encode_json("pending_map", pending_map, f);
+ encode_json("versioned_epoch", versioned_epoch , f);
+}
+
+void rgw_bucket_dir_entry::decode_json(JSONObj *obj) {
+ JSONDecoder::decode_json("name", key.name, obj);
+ JSONDecoder::decode_json("instance", key.instance , obj);
+ JSONDecoder::decode_json("ver", ver , obj);
+ JSONDecoder::decode_json("locator", locator , obj);
+ JSONDecoder::decode_json("exists", exists , obj);
+ JSONDecoder::decode_json("meta", meta , obj);
+ JSONDecoder::decode_json("tag", tag , obj);
+ int val;
+ JSONDecoder::decode_json("flags", val , obj);
+ flags = (uint16_t)val;
+ JSONDecoder::decode_json("pending_map", pending_map, obj);
+ JSONDecoder::decode_json("versioned_epoch", versioned_epoch, obj);
+}
+
+static void dump_bi_entry(bufferlist bl, BIIndexType index_type, Formatter *formatter)
+{
+ auto iter = bl.cbegin();
+ switch (index_type) {
+ case BIIndexType::Plain:
+ case BIIndexType::Instance:
+ {
+ rgw_bucket_dir_entry entry;
+ decode(entry, iter);
+ encode_json("entry", entry, formatter);
+ }
+ break;
+ case BIIndexType::OLH:
+ {
+ rgw_bucket_olh_entry entry;
+ decode(entry, iter);
+ encode_json("entry", entry, formatter);
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+void rgw_cls_bi_entry::decode_json(JSONObj *obj, cls_rgw_obj_key *effective_key) {
+ JSONDecoder::decode_json("idx", idx, obj);
+ string s;
+ JSONDecoder::decode_json("type", s, obj);
+ if (s == "plain") {
+ type = BIIndexType::Plain;
+ } else if (s == "instance") {
+ type = BIIndexType::Instance;
+ } else if (s == "olh") {
+ type = BIIndexType::OLH;
+ } else {
+ type = BIIndexType::Invalid;
+ }
+ using ceph::encode;
+ switch (type) {
+ case BIIndexType::Plain:
+ case BIIndexType::Instance:
+ {
+ rgw_bucket_dir_entry entry;
+ JSONDecoder::decode_json("entry", entry, obj);
+ encode(entry, data);
+
+ if (effective_key) {
+ *effective_key = entry.key;
+ }
+ }
+ break;
+ case BIIndexType::OLH:
+ {
+ rgw_bucket_olh_entry entry;
+ JSONDecoder::decode_json("entry", entry, obj);
+ encode(entry, data);
+
+ if (effective_key) {
+ *effective_key = entry.key;
+ }
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+void rgw_cls_bi_entry::dump(Formatter *f) const
+{
+ string type_str;
+ switch (type) {
+ case BIIndexType::Plain:
+ type_str = "plain";
+ break;
+ case BIIndexType::Instance:
+ type_str = "instance";
+ break;
+ case BIIndexType::OLH:
+ type_str = "olh";
+ break;
+ default:
+ type_str = "invalid";
+ }
+ encode_json("type", type_str, f);
+ encode_json("idx", idx, f);
+ dump_bi_entry(data, type, f);
+}
+
+bool rgw_cls_bi_entry::get_info(cls_rgw_obj_key *key,
+ RGWObjCategory *category,
+ rgw_bucket_category_stats *accounted_stats)
+{
+ bool account = false;
+ auto iter = data.cbegin();
+ using ceph::decode;
+ switch (type) {
+ case BIIndexType::Plain:
+ account = true;
+ // NO BREAK; falls through to case InstanceIdx:
+ case BIIndexType::Instance:
+ {
+ rgw_bucket_dir_entry entry;
+ decode(entry, iter);
+ account = (account && entry.exists);
+ *key = entry.key;
+ *category = entry.meta.category;
+ accounted_stats->num_entries++;
+ accounted_stats->total_size += entry.meta.accounted_size;
+ accounted_stats->total_size_rounded += cls_rgw_get_rounded_size(entry.meta.accounted_size);
+ accounted_stats->actual_size += entry.meta.size;
+ }
+ break;
+ case BIIndexType::OLH:
+ {
+ rgw_bucket_olh_entry entry;
+ decode(entry, iter);
+ *key = entry.key;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return account;
+}
+
+void rgw_bucket_olh_entry::dump(Formatter *f) const
+{
+ encode_json("key", key, f);
+ encode_json("delete_marker", delete_marker, f);
+ encode_json("epoch", epoch, f);
+ encode_json("pending_log", pending_log, f);
+ encode_json("tag", tag, f);
+ encode_json("exists", exists, f);
+ encode_json("pending_removal", pending_removal, f);
+}
+
+void rgw_bucket_olh_entry::decode_json(JSONObj *obj)
+{
+ JSONDecoder::decode_json("key", key, obj);
+ JSONDecoder::decode_json("delete_marker", delete_marker, obj);
+ JSONDecoder::decode_json("epoch", epoch, obj);
+ JSONDecoder::decode_json("pending_log", pending_log, obj);
+ JSONDecoder::decode_json("tag", tag, obj);
+ JSONDecoder::decode_json("exists", exists, obj);
+ JSONDecoder::decode_json("pending_removal", pending_removal, obj);
+}
+
+void rgw_bucket_olh_log_entry::generate_test_instances(list<rgw_bucket_olh_log_entry*>& o)
+{
+ rgw_bucket_olh_log_entry *entry = new rgw_bucket_olh_log_entry;
+ entry->epoch = 1234;
+ entry->op = CLS_RGW_OLH_OP_LINK_OLH;
+ entry->op_tag = "op_tag";
+ entry->key.name = "key.name";
+ entry->key.instance = "key.instance";
+ entry->delete_marker = true;
+ o.push_back(entry);
+ o.push_back(new rgw_bucket_olh_log_entry);
+}
+
+void rgw_bucket_olh_log_entry::dump(Formatter *f) const
+{
+ encode_json("epoch", epoch, f);
+ const char *op_str;
+ switch (op) {
+ case CLS_RGW_OLH_OP_LINK_OLH:
+ op_str = "link_olh";
+ break;
+ case CLS_RGW_OLH_OP_UNLINK_OLH:
+ op_str = "unlink_olh";
+ break;
+ case CLS_RGW_OLH_OP_REMOVE_INSTANCE:
+ op_str = "remove_instance";
+ break;
+ default:
+ op_str = "unknown";
+ }
+ encode_json("op", op_str, f);
+ encode_json("op_tag", op_tag, f);
+ encode_json("key", key, f);
+ encode_json("delete_marker", delete_marker, f);
+}
+
+void rgw_bucket_olh_log_entry::decode_json(JSONObj *obj)
+{
+ JSONDecoder::decode_json("epoch", epoch, obj);
+ string op_str;
+ JSONDecoder::decode_json("op", op_str, obj);
+ if (op_str == "link_olh") {
+ op = CLS_RGW_OLH_OP_LINK_OLH;
+ } else if (op_str == "unlink_olh") {
+ op = CLS_RGW_OLH_OP_UNLINK_OLH;
+ } else if (op_str == "remove_instance") {
+ op = CLS_RGW_OLH_OP_REMOVE_INSTANCE;
+ } else {
+ op = CLS_RGW_OLH_OP_UNKNOWN;
+ }
+ JSONDecoder::decode_json("op_tag", op_tag, obj);
+ JSONDecoder::decode_json("key", key, obj);
+ JSONDecoder::decode_json("delete_marker", delete_marker, obj);
+}
+void rgw_bi_log_entry::decode_json(JSONObj *obj)
+{
+ JSONDecoder::decode_json("op_id", id, obj);
+ JSONDecoder::decode_json("op_tag", tag, obj);
+ string op_str;
+ JSONDecoder::decode_json("op", op_str, obj);
+ if (op_str == "write") {
+ op = CLS_RGW_OP_ADD;
+ } else if (op_str == "del") {
+ op = CLS_RGW_OP_DEL;
+ } else if (op_str == "cancel") {
+ op = CLS_RGW_OP_CANCEL;
+ } else if (op_str == "unknown") {
+ op = CLS_RGW_OP_UNKNOWN;
+ } else if (op_str == "link_olh") {
+ op = CLS_RGW_OP_LINK_OLH;
+ } else if (op_str == "link_olh_del") {
+ op = CLS_RGW_OP_LINK_OLH_DM;
+ } else if (op_str == "unlink_instance") {
+ op = CLS_RGW_OP_UNLINK_INSTANCE;
+ } else if (op_str == "syncstop") {
+ op = CLS_RGW_OP_SYNCSTOP;
+ } else if (op_str == "resync") {
+ op = CLS_RGW_OP_RESYNC;
+ } else {
+ op = CLS_RGW_OP_UNKNOWN;
+ }
+ JSONDecoder::decode_json("object", object, obj);
+ JSONDecoder::decode_json("instance", instance, obj);
+ string state_str;
+ JSONDecoder::decode_json("state", state_str, obj);
+ if (state_str == "pending") {
+ state = CLS_RGW_STATE_PENDING_MODIFY;
+ } else if (state_str == "complete") {
+ state = CLS_RGW_STATE_COMPLETE;
+ } else {
+ state = CLS_RGW_STATE_UNKNOWN;
+ }
+ JSONDecoder::decode_json("index_ver", index_ver, obj);
+ utime_t ut;
+ JSONDecoder::decode_json("timestamp", ut, obj);
+ timestamp = ut.to_real_time();
+ uint32_t f;
+ JSONDecoder::decode_json("bilog_flags", f, obj);
+ JSONDecoder::decode_json("ver", ver, obj);
+ bilog_flags = (uint16_t)f;
+ JSONDecoder::decode_json("owner", owner, obj);
+ JSONDecoder::decode_json("owner_display_name", owner_display_name, obj);
+ JSONDecoder::decode_json("zones_trace", zones_trace, obj);
+}
+
+void rgw_bi_log_entry::dump(Formatter *f) const
+{
+ f->dump_string("op_id", id);
+ f->dump_string("op_tag", tag);
+ switch (op) {
+ case CLS_RGW_OP_ADD:
+ f->dump_string("op", "write");
+ break;
+ case CLS_RGW_OP_DEL:
+ f->dump_string("op", "del");
+ break;
+ case CLS_RGW_OP_CANCEL:
+ f->dump_string("op", "cancel");
+ break;
+ case CLS_RGW_OP_UNKNOWN:
+ f->dump_string("op", "unknown");
+ break;
+ case CLS_RGW_OP_LINK_OLH:
+ f->dump_string("op", "link_olh");
+ break;
+ case CLS_RGW_OP_LINK_OLH_DM:
+ f->dump_string("op", "link_olh_del");
+ break;
+ case CLS_RGW_OP_UNLINK_INSTANCE:
+ f->dump_string("op", "unlink_instance");
+ break;
+ case CLS_RGW_OP_SYNCSTOP:
+ f->dump_string("op", "syncstop");
+ break;
+ case CLS_RGW_OP_RESYNC:
+ f->dump_string("op", "resync");
+ break;
+ default:
+ f->dump_string("op", "invalid");
+ break;
+ }
+
+ f->dump_string("object", object);
+ f->dump_string("instance", instance);
+
+ switch (state) {
+ case CLS_RGW_STATE_PENDING_MODIFY:
+ f->dump_string("state", "pending");
+ break;
+ case CLS_RGW_STATE_COMPLETE:
+ f->dump_string("state", "complete");
+ break;
+ default:
+ f->dump_string("state", "invalid");
+ break;
+ }
+
+ f->dump_int("index_ver", index_ver);
+ utime_t ut(timestamp);
+ ut.gmtime_nsec(f->dump_stream("timestamp"));
+ f->open_object_section("ver");
+ ver.dump(f);
+ f->close_section();
+ f->dump_int("bilog_flags", bilog_flags);
+ f->dump_bool("versioned", (bilog_flags & RGW_BILOG_FLAG_VERSIONED_OP) != 0);
+ f->dump_string("owner", owner);
+ f->dump_string("owner_display_name", owner_display_name);
+ encode_json("zones_trace", zones_trace, f);
+}
+
+void rgw_bi_log_entry::generate_test_instances(list<rgw_bi_log_entry*>& ls)
+{
+ ls.push_back(new rgw_bi_log_entry);
+ ls.push_back(new rgw_bi_log_entry);
+ ls.back()->id = "midf";
+ ls.back()->object = "obj";
+ ls.back()->timestamp = ceph::real_clock::from_ceph_timespec({init_le32(2), init_le32(3)});
+ ls.back()->index_ver = 4323;
+ ls.back()->tag = "tagasdfds";
+ ls.back()->op = CLS_RGW_OP_DEL;
+ ls.back()->state = CLS_RGW_STATE_PENDING_MODIFY;
+}
+
+void rgw_bucket_category_stats::generate_test_instances(list<rgw_bucket_category_stats*>& o)
+{
+ rgw_bucket_category_stats *s = new rgw_bucket_category_stats;
+ s->total_size = 1024;
+ s->total_size_rounded = 4096;
+ s->num_entries = 2;
+ s->actual_size = 1024;
+ o.push_back(s);
+ o.push_back(new rgw_bucket_category_stats);
+}
+
+void rgw_bucket_category_stats::dump(Formatter *f) const
+{
+ f->dump_unsigned("total_size", total_size);
+ f->dump_unsigned("total_size_rounded", total_size_rounded);
+ f->dump_unsigned("num_entries", num_entries);
+ f->dump_unsigned("actual_size", actual_size);
+}
+
+void rgw_bucket_dir_header::generate_test_instances(list<rgw_bucket_dir_header*>& o)
+{
+ list<rgw_bucket_category_stats *> l;
+ list<rgw_bucket_category_stats *>::iterator iter;
+ rgw_bucket_category_stats::generate_test_instances(l);
+
+ uint8_t i;
+ for (i = 0, iter = l.begin(); iter != l.end(); ++iter, ++i) {
+ RGWObjCategory c = static_cast<RGWObjCategory>(i);
+ rgw_bucket_dir_header *h = new rgw_bucket_dir_header;
+ rgw_bucket_category_stats *s = *iter;
+ h->stats[c] = *s;
+
+ o.push_back(h);
+
+ delete s;
+ }
+
+ o.push_back(new rgw_bucket_dir_header);
+}
+
+void rgw_bucket_dir_header::dump(Formatter *f) const
+{
+ f->dump_int("ver", ver);
+ f->dump_int("master_ver", master_ver);
+ f->open_array_section("stats");
+ for (auto iter = stats.begin(); iter != stats.end(); ++iter) {
+ f->dump_int("category", int(iter->first));
+ f->open_object_section("category_stats");
+ iter->second.dump(f);
+ f->close_section();
+ }
+ f->close_section();
+ ::encode_json("new_instance", new_instance, f);
+}
+
+void rgw_bucket_dir::generate_test_instances(list<rgw_bucket_dir*>& o)
+{
+ list<rgw_bucket_dir_header *> l;
+ list<rgw_bucket_dir_header *>::iterator iter;
+ rgw_bucket_dir_header::generate_test_instances(l);
+
+ uint8_t i;
+ for (i = 0, iter = l.begin(); iter != l.end(); ++iter, ++i) {
+ rgw_bucket_dir *d = new rgw_bucket_dir;
+ rgw_bucket_dir_header *h = *iter;
+ d->header = *h;
+
+ list<rgw_bucket_dir_entry *> el;
+ list<rgw_bucket_dir_entry *>::iterator eiter;
+ for (eiter = el.begin(); eiter != el.end(); ++eiter) {
+ rgw_bucket_dir_entry *e = *eiter;
+ d->m[e->key.name] = *e;
+
+ delete e;
+ }
+
+ o.push_back(d);
+
+ delete h;
+ }
+
+ o.push_back(new rgw_bucket_dir);
+}
+
+void rgw_bucket_dir::dump(Formatter *f) const
+{
+ f->open_object_section("header");
+ header.dump(f);
+ f->close_section();
+ map<string, rgw_bucket_dir_entry>::const_iterator iter = m.begin();
+ f->open_array_section("map");
+ for (; iter != m.end(); ++iter) {
+ f->dump_string("key", iter->first);
+ f->open_object_section("dir_entry");
+ iter->second.dump(f);
+ f->close_section();
+ }
+ f->close_section();
+}
+
+void rgw_usage_log_entry::dump(Formatter *f) const
+{
+ f->dump_string("owner", owner.to_str());
+ f->dump_string("payer", payer.to_str());
+ f->dump_string("bucket", bucket);
+ f->dump_unsigned("epoch", epoch);
+
+ f->open_object_section("total_usage");
+ f->dump_unsigned("bytes_sent", total_usage.bytes_sent);
+ f->dump_unsigned("bytes_received", total_usage.bytes_received);
+ f->dump_unsigned("ops", total_usage.ops);
+ f->dump_unsigned("successful_ops", total_usage.successful_ops);
+ f->close_section();
+
+ f->open_array_section("categories");
+ if (usage_map.size() > 0) {
+ map<string, rgw_usage_data>::const_iterator it;
+ for (it = usage_map.begin(); it != usage_map.end(); it++) {
+ const rgw_usage_data& total_usage = it->second;
+ f->open_object_section("entry");
+ f->dump_string("category", it->first.c_str());
+ f->dump_unsigned("bytes_sent", total_usage.bytes_sent);
+ f->dump_unsigned("bytes_received", total_usage.bytes_received);
+ f->dump_unsigned("ops", total_usage.ops);
+ f->dump_unsigned("successful_ops", total_usage.successful_ops);
+ f->close_section();
+ }
+ }
+ f->close_section();
+}
+
+void rgw_usage_log_entry::generate_test_instances(list<rgw_usage_log_entry *> &o)
+{
+ rgw_usage_log_entry *entry = new rgw_usage_log_entry;
+ rgw_usage_data usage_data{1024, 2048};
+ entry->owner = rgw_user("owner");
+ entry->payer = rgw_user("payer");
+ entry->bucket = "bucket";
+ entry->epoch = 1234;
+ entry->total_usage.bytes_sent = usage_data.bytes_sent;
+ entry->total_usage.bytes_received = usage_data.bytes_received;
+ entry->total_usage.ops = usage_data.ops;
+ entry->total_usage.successful_ops = usage_data.successful_ops;
+ entry->usage_map["get_obj"] = usage_data;
+ o.push_back(entry);
+ o.push_back(new rgw_usage_log_entry);
+}
+
+void cls_rgw_reshard_entry::generate_key(const string& tenant, const string& bucket_name, string *key)
+{
+ *key = tenant + ":" + bucket_name;
+}
+
+void cls_rgw_reshard_entry::get_key(string *key) const
+{
+ generate_key(tenant, bucket_name, key);
+}
+
+void cls_rgw_reshard_entry::dump(Formatter *f) const
+{
+ utime_t ut(time);
+ encode_json("time",ut, f);
+ encode_json("tenant", tenant, f);
+ encode_json("bucket_name", bucket_name, f);
+ encode_json("bucket_id", bucket_id, f);
+ encode_json("new_instance_id", new_instance_id, f);
+ encode_json("old_num_shards", old_num_shards, f);
+ encode_json("new_num_shards", new_num_shards, f);
+
+}
+
+void cls_rgw_reshard_entry::generate_test_instances(list<cls_rgw_reshard_entry*>& ls)
+{
+ ls.push_back(new cls_rgw_reshard_entry);
+ ls.push_back(new cls_rgw_reshard_entry);
+ ls.back()->time = ceph::real_clock::from_ceph_timespec({init_le32(2), init_le32(3)});
+ ls.back()->tenant = "tenant";
+ ls.back()->bucket_name = "bucket1""";
+ ls.back()->bucket_id = "bucket_id";
+ ls.back()->new_instance_id = "new_instance_id";
+ ls.back()->old_num_shards = 8;
+ ls.back()->new_num_shards = 64;
+}
+
+void cls_rgw_bucket_instance_entry::dump(Formatter *f) const
+{
+ encode_json("reshard_status", to_string(reshard_status), f);
+ encode_json("new_bucket_instance_id", new_bucket_instance_id, f);
+ encode_json("num_shards", num_shards, f);
+
+}
+
+void cls_rgw_bucket_instance_entry::generate_test_instances(list<cls_rgw_bucket_instance_entry*>& ls)
+{
+ ls.push_back(new cls_rgw_bucket_instance_entry);
+ ls.push_back(new cls_rgw_bucket_instance_entry);
+ ls.back()->reshard_status = CLS_RGW_RESHARD_IN_PROGRESS;
+ ls.back()->new_bucket_instance_id = "new_instance_id";
+}
+
+void cls_rgw_lc_obj_head::dump(Formatter *f) const
+{
+ encode_json("start_date", start_date, f);
+ encode_json("marker", marker, f);
+}
+
+void cls_rgw_lc_obj_head::generate_test_instances(list<cls_rgw_lc_obj_head*>& ls)
+{
+}
diff --git a/src/cls/rgw/cls_rgw_types.h b/src/cls/rgw/cls_rgw_types.h
new file mode 100644
index 00000000..01dbe192
--- /dev/null
+++ b/src/cls/rgw/cls_rgw_types.h
@@ -0,0 +1,1183 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#ifndef CEPH_CLS_RGW_TYPES_H
+#define CEPH_CLS_RGW_TYPES_H
+
+#include "common/ceph_time.h"
+#include "common/Formatter.h"
+
+#include "rgw/rgw_basic_types.h"
+
+#define CEPH_RGW_REMOVE 'r'
+#define CEPH_RGW_UPDATE 'u'
+#define CEPH_RGW_TAG_TIMEOUT 120
+#define CEPH_RGW_DIR_SUGGEST_LOG_OP 0x80
+#define CEPH_RGW_DIR_SUGGEST_OP_MASK 0x7f
+
+class JSONObj;
+
+namespace ceph {
+ class Formatter;
+}
+using ceph::operator <<;
+
+using rgw_zone_set = std::set<std::string>;
+
+enum RGWPendingState {
+ CLS_RGW_STATE_PENDING_MODIFY = 0,
+ CLS_RGW_STATE_COMPLETE = 1,
+ CLS_RGW_STATE_UNKNOWN = 2,
+};
+
+enum RGWModifyOp {
+ CLS_RGW_OP_ADD = 0,
+ CLS_RGW_OP_DEL = 1,
+ CLS_RGW_OP_CANCEL = 2,
+ CLS_RGW_OP_UNKNOWN = 3,
+ CLS_RGW_OP_LINK_OLH = 4,
+ CLS_RGW_OP_LINK_OLH_DM = 5, /* creation of delete marker */
+ CLS_RGW_OP_UNLINK_INSTANCE = 6,
+ CLS_RGW_OP_SYNCSTOP = 7,
+ CLS_RGW_OP_RESYNC = 8,
+};
+
+enum RGWBILogFlags {
+ RGW_BILOG_FLAG_VERSIONED_OP = 0x1,
+};
+
+enum RGWCheckMTimeType {
+ CLS_RGW_CHECK_TIME_MTIME_EQ = 0,
+ CLS_RGW_CHECK_TIME_MTIME_LT = 1,
+ CLS_RGW_CHECK_TIME_MTIME_LE = 2,
+ CLS_RGW_CHECK_TIME_MTIME_GT = 3,
+ CLS_RGW_CHECK_TIME_MTIME_GE = 4,
+};
+
+#define ROUND_BLOCK_SIZE 4096
+
+static inline uint64_t cls_rgw_get_rounded_size(uint64_t size)
+{
+ return (size + ROUND_BLOCK_SIZE - 1) & ~(ROUND_BLOCK_SIZE - 1);
+}
+
+struct rgw_bucket_pending_info {
+ RGWPendingState state;
+ ceph::real_time timestamp;
+ uint8_t op;
+
+ rgw_bucket_pending_info() : state(CLS_RGW_STATE_PENDING_MODIFY), op(0) {}
+
+ void encode(bufferlist &bl) const {
+ ENCODE_START(2, 2, bl);
+ uint8_t s = (uint8_t)state;
+ encode(s, bl);
+ encode(timestamp, bl);
+ encode(op, bl);
+ ENCODE_FINISH(bl);
+ }
+ void decode(bufferlist::const_iterator &bl) {
+ DECODE_START_LEGACY_COMPAT_LEN(2, 2, 2, bl);
+ uint8_t s;
+ decode(s, bl);
+ state = (RGWPendingState)s;
+ decode(timestamp, bl);
+ decode(op, bl);
+ DECODE_FINISH(bl);
+ }
+ void dump(Formatter *f) const;
+ void decode_json(JSONObj *obj);
+ static void generate_test_instances(list<rgw_bucket_pending_info*>& o);
+};
+WRITE_CLASS_ENCODER(rgw_bucket_pending_info)
+
+
+// categories of objects stored in a bucket index (b-i) and used to
+// differentiate their associated statistics (bucket stats, and in
+// some cases user stats)
+enum class RGWObjCategory : uint8_t {
+ None = 0, // b-i entries for delete markers; also used in
+ // testing and for default values in default
+ // constructors
+
+ Main = 1, // b-i entries for standard objs
+
+ Shadow = 2, // presumfably intended for multipart shadow
+ // uploads; not currently used in the codebase
+
+ MultiMeta = 3, // b-i entries for multipart upload metadata objs
+};
+
+
+struct rgw_bucket_dir_entry_meta {
+ RGWObjCategory category;
+ uint64_t size;
+ ceph::real_time mtime;
+ string etag;
+ string owner;
+ string owner_display_name;
+ string content_type;
+ uint64_t accounted_size;
+ string user_data;
+ string storage_class;
+ bool appendable;
+
+ rgw_bucket_dir_entry_meta() :
+ category(RGWObjCategory::None), size(0), accounted_size(0), appendable(false) { }
+
+ void encode(bufferlist &bl) const {
+ ENCODE_START(7, 3, bl);
+ encode(category, bl);
+ encode(size, bl);
+ encode(mtime, bl);
+ encode(etag, bl);
+ encode(owner, bl);
+ encode(owner_display_name, bl);
+ encode(content_type, bl);
+ encode(accounted_size, bl);
+ encode(user_data, bl);
+ encode(storage_class, bl);
+ encode(appendable, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(bufferlist::const_iterator &bl) {
+ DECODE_START_LEGACY_COMPAT_LEN(6, 3, 3, bl);
+ decode(category, bl);
+ decode(size, bl);
+ decode(mtime, bl);
+ decode(etag, bl);
+ decode(owner, bl);
+ decode(owner_display_name, bl);
+ if (struct_v >= 2)
+ decode(content_type, bl);
+ if (struct_v >= 4)
+ decode(accounted_size, bl);
+ else
+ accounted_size = size;
+ if (struct_v >= 5)
+ decode(user_data, bl);
+ if (struct_v >= 6)
+ decode(storage_class, bl);
+ if (struct_v >= 7)
+ decode(appendable, bl);
+ DECODE_FINISH(bl);
+ }
+ void dump(Formatter *f) const;
+ void decode_json(JSONObj *obj);
+ static void generate_test_instances(list<rgw_bucket_dir_entry_meta*>& o);
+};
+WRITE_CLASS_ENCODER(rgw_bucket_dir_entry_meta)
+
+template<class T>
+void encode_packed_val(T val, bufferlist& bl)
+{
+ using ceph::encode;
+ if ((uint64_t)val < 0x80) {
+ encode((uint8_t)val, bl);
+ } else {
+ unsigned char c = 0x80;
+
+ if ((uint64_t)val < 0x100) {
+ c |= 1;
+ encode(c, bl);
+ encode((uint8_t)val, bl);
+ } else if ((uint64_t)val <= 0x10000) {
+ c |= 2;
+ encode(c, bl);
+ encode((uint16_t)val, bl);
+ } else if ((uint64_t)val <= 0x1000000) {
+ c |= 4;
+ encode(c, bl);
+ encode((uint32_t)val, bl);
+ } else {
+ c |= 8;
+ encode(c, bl);
+ encode((uint64_t)val, bl);
+ }
+ }
+}
+
+template<class T>
+void decode_packed_val(T& val, bufferlist::const_iterator& bl)
+{
+ using ceph::decode;
+ unsigned char c;
+ decode(c, bl);
+ if (c < 0x80) {
+ val = c;
+ return;
+ }
+
+ c &= ~0x80;
+
+ switch (c) {
+ case 1:
+ {
+ uint8_t v;
+ decode(v, bl);
+ val = v;
+ }
+ break;
+ case 2:
+ {
+ uint16_t v;
+ decode(v, bl);
+ val = v;
+ }
+ break;
+ case 4:
+ {
+ uint32_t v;
+ decode(v, bl);
+ val = v;
+ }
+ break;
+ case 8:
+ {
+ uint64_t v;
+ decode(v, bl);
+ val = v;
+ }
+ break;
+ default:
+ throw buffer::error();
+ }
+}
+
+struct rgw_bucket_entry_ver {
+ int64_t pool;
+ uint64_t epoch;
+
+ rgw_bucket_entry_ver() : pool(-1), epoch(0) {}
+
+ void encode(bufferlist &bl) const {
+ ENCODE_START(1, 1, bl);
+ encode_packed_val(pool, bl);
+ encode_packed_val(epoch, bl);
+ ENCODE_FINISH(bl);
+ }
+ void decode(bufferlist::const_iterator &bl) {
+ DECODE_START(1, bl);
+ decode_packed_val(pool, bl);
+ decode_packed_val(epoch, bl);
+ DECODE_FINISH(bl);
+ }
+ void dump(Formatter *f) const;
+ void decode_json(JSONObj *obj);
+ static void generate_test_instances(list<rgw_bucket_entry_ver*>& o);
+};
+WRITE_CLASS_ENCODER(rgw_bucket_entry_ver)
+
+struct cls_rgw_obj_key {
+ string name;
+ string instance;
+
+ cls_rgw_obj_key() {}
+ cls_rgw_obj_key(const string &_name) : name(_name) {}
+ cls_rgw_obj_key(const string& n, const string& i) : name(n), instance(i) {}
+
+ void set(const string& _name) {
+ name = _name;
+ }
+
+ bool operator==(const cls_rgw_obj_key& k) const {
+ return (name.compare(k.name) == 0) &&
+ (instance.compare(k.instance) == 0);
+ }
+ bool operator<(const cls_rgw_obj_key& k) const {
+ int r = name.compare(k.name);
+ if (r == 0) {
+ r = instance.compare(k.instance);
+ }
+ return (r < 0);
+ }
+ bool operator>(const cls_rgw_obj_key& k) const {
+ return k < *this;
+ }
+ bool operator<=(const cls_rgw_obj_key& k) const {
+ return !(k < *this);
+ }
+ bool operator>=(const cls_rgw_obj_key& k) const {
+ return !(k > *this);
+ }
+ bool empty() const {
+ return name.empty();
+ }
+ void encode(bufferlist &bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(name, bl);
+ encode(instance, bl);
+ ENCODE_FINISH(bl);
+ }
+ void decode(bufferlist::const_iterator &bl) {
+ DECODE_START(1, bl);
+ decode(name, bl);
+ decode(instance, bl);
+ DECODE_FINISH(bl);
+ }
+ void dump(Formatter *f) const {
+ f->dump_string("name", name);
+ f->dump_string("instance", instance);
+ }
+ void decode_json(JSONObj *obj);
+ static void generate_test_instances(list<cls_rgw_obj_key*>& ls) {
+ ls.push_back(new cls_rgw_obj_key);
+ ls.push_back(new cls_rgw_obj_key);
+ ls.back()->name = "name";
+ ls.back()->instance = "instance";
+ }
+};
+WRITE_CLASS_ENCODER(cls_rgw_obj_key)
+
+
+#define RGW_BUCKET_DIRENT_FLAG_VER 0x1 /* a versioned object instance */
+#define RGW_BUCKET_DIRENT_FLAG_CURRENT 0x2 /* the last object instance of a versioned object */
+#define RGW_BUCKET_DIRENT_FLAG_DELETE_MARKER 0x4 /* delete marker */
+#define RGW_BUCKET_DIRENT_FLAG_VER_MARKER 0x8 /* object is versioned, a placeholder for the plain entry */
+
+struct rgw_bucket_dir_entry {
+ cls_rgw_obj_key key;
+ rgw_bucket_entry_ver ver;
+ std::string locator;
+ bool exists;
+ rgw_bucket_dir_entry_meta meta;
+ multimap<string, rgw_bucket_pending_info> pending_map;
+ uint64_t index_ver;
+ string tag;
+ uint16_t flags;
+ uint64_t versioned_epoch;
+
+ rgw_bucket_dir_entry() :
+ exists(false), index_ver(0), flags(0), versioned_epoch(0) {}
+
+ void encode(bufferlist &bl) const {
+ ENCODE_START(8, 3, bl);
+ encode(key.name, bl);
+ encode(ver.epoch, bl);
+ encode(exists, bl);
+ encode(meta, bl);
+ encode(pending_map, bl);
+ encode(locator, bl);
+ encode(ver, bl);
+ encode_packed_val(index_ver, bl);
+ encode(tag, bl);
+ encode(key.instance, bl);
+ encode(flags, bl);
+ encode(versioned_epoch, bl);
+ ENCODE_FINISH(bl);
+ }
+ void decode(bufferlist::const_iterator &bl) {
+ DECODE_START_LEGACY_COMPAT_LEN(8, 3, 3, bl);
+ decode(key.name, bl);
+ decode(ver.epoch, bl);
+ decode(exists, bl);
+ decode(meta, bl);
+ decode(pending_map, bl);
+ if (struct_v >= 2) {
+ decode(locator, bl);
+ }
+ if (struct_v >= 4) {
+ decode(ver, bl);
+ } else {
+ ver.pool = -1;
+ }
+ if (struct_v >= 5) {
+ decode_packed_val(index_ver, bl);
+ decode(tag, bl);
+ }
+ if (struct_v >= 6) {
+ decode(key.instance, bl);
+ }
+ if (struct_v >= 7) {
+ decode(flags, bl);
+ }
+ if (struct_v >= 8) {
+ decode(versioned_epoch, bl);
+ }
+ DECODE_FINISH(bl);
+ }
+
+ bool is_current() const {
+ int test_flags = RGW_BUCKET_DIRENT_FLAG_VER | RGW_BUCKET_DIRENT_FLAG_CURRENT;
+ return (flags & RGW_BUCKET_DIRENT_FLAG_VER) == 0 ||
+ (flags & test_flags) == test_flags;
+ }
+ bool is_delete_marker() const {
+ return (flags & RGW_BUCKET_DIRENT_FLAG_DELETE_MARKER) != 0;
+ }
+ bool is_visible() const {
+ return is_current() && !is_delete_marker();
+ }
+ bool is_valid() const { return (flags & RGW_BUCKET_DIRENT_FLAG_VER_MARKER) == 0; }
+
+ void dump(Formatter *f) const;
+ void decode_json(JSONObj *obj);
+ static void generate_test_instances(list<rgw_bucket_dir_entry*>& o);
+};
+WRITE_CLASS_ENCODER(rgw_bucket_dir_entry)
+
+enum class BIIndexType : uint8_t {
+ Invalid = 0,
+ Plain = 1,
+ Instance = 2,
+ OLH = 3,
+};
+
+struct rgw_bucket_category_stats;
+
+struct rgw_cls_bi_entry {
+ BIIndexType type;
+ string idx;
+ bufferlist data;
+
+ rgw_cls_bi_entry() : type(BIIndexType::Invalid) {}
+
+ void encode(bufferlist& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(type, bl);
+ encode(idx, bl);
+ encode(data, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(bufferlist::const_iterator& bl) {
+ DECODE_START(1, bl);
+ uint8_t c;
+ decode(c, bl);
+ type = (BIIndexType)c;
+ decode(idx, bl);
+ decode(data, bl);
+ DECODE_FINISH(bl);
+ }
+
+ void dump(Formatter *f) const;
+ void decode_json(JSONObj *obj, cls_rgw_obj_key *effective_key = NULL);
+
+ bool get_info(cls_rgw_obj_key *key, RGWObjCategory *category,
+ rgw_bucket_category_stats *accounted_stats);
+};
+WRITE_CLASS_ENCODER(rgw_cls_bi_entry)
+
+enum OLHLogOp {
+ CLS_RGW_OLH_OP_UNKNOWN = 0,
+ CLS_RGW_OLH_OP_LINK_OLH = 1,
+ CLS_RGW_OLH_OP_UNLINK_OLH = 2, /* object does not exist */
+ CLS_RGW_OLH_OP_REMOVE_INSTANCE = 3,
+};
+
+struct rgw_bucket_olh_log_entry {
+ uint64_t epoch;
+ OLHLogOp op;
+ string op_tag;
+ cls_rgw_obj_key key;
+ bool delete_marker;
+
+ rgw_bucket_olh_log_entry() : epoch(0), op(CLS_RGW_OLH_OP_UNKNOWN), delete_marker(false) {}
+
+
+ void encode(bufferlist &bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(epoch, bl);
+ encode((__u8)op, bl);
+ encode(op_tag, bl);
+ encode(key, bl);
+ encode(delete_marker, bl);
+ ENCODE_FINISH(bl);
+ }
+ void decode(bufferlist::const_iterator &bl) {
+ DECODE_START(1, bl);
+ decode(epoch, bl);
+ uint8_t c;
+ decode(c, bl);
+ op = (OLHLogOp)c;
+ decode(op_tag, bl);
+ decode(key, bl);
+ decode(delete_marker, bl);
+ DECODE_FINISH(bl);
+ }
+ static void generate_test_instances(list<rgw_bucket_olh_log_entry*>& o);
+ void dump(Formatter *f) const;
+ void decode_json(JSONObj *obj);
+};
+WRITE_CLASS_ENCODER(rgw_bucket_olh_log_entry)
+
+struct rgw_bucket_olh_entry {
+ cls_rgw_obj_key key;
+ bool delete_marker;
+ uint64_t epoch;
+ map<uint64_t, vector<struct rgw_bucket_olh_log_entry> > pending_log;
+ string tag;
+ bool exists;
+ bool pending_removal;
+
+ rgw_bucket_olh_entry() : delete_marker(false), epoch(0), exists(false), pending_removal(false) {}
+
+ void encode(bufferlist &bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(key, bl);
+ encode(delete_marker, bl);
+ encode(epoch, bl);
+ encode(pending_log, bl);
+ encode(tag, bl);
+ encode(exists, bl);
+ encode(pending_removal, bl);
+ ENCODE_FINISH(bl);
+ }
+ void decode(bufferlist::const_iterator &bl) {
+ DECODE_START(1, bl);
+ decode(key, bl);
+ decode(delete_marker, bl);
+ decode(epoch, bl);
+ decode(pending_log, bl);
+ decode(tag, bl);
+ decode(exists, bl);
+ decode(pending_removal, bl);
+ DECODE_FINISH(bl);
+ }
+ void dump(Formatter *f) const;
+ void decode_json(JSONObj *obj);
+};
+WRITE_CLASS_ENCODER(rgw_bucket_olh_entry)
+
+struct rgw_bi_log_entry {
+ string id;
+ string object;
+ string instance;
+ ceph::real_time timestamp;
+ rgw_bucket_entry_ver ver;
+ RGWModifyOp op;
+ RGWPendingState state;
+ uint64_t index_ver;
+ string tag;
+ uint16_t bilog_flags;
+ string owner; /* only being set if it's a delete marker */
+ string owner_display_name; /* only being set if it's a delete marker */
+ rgw_zone_set zones_trace;
+
+ rgw_bi_log_entry() : op(CLS_RGW_OP_UNKNOWN), state(CLS_RGW_STATE_PENDING_MODIFY), index_ver(0), bilog_flags(0) {}
+
+ void encode(bufferlist &bl) const {
+ ENCODE_START(4, 1, bl);
+ encode(id, bl);
+ encode(object, bl);
+ encode(timestamp, bl);
+ encode(ver, bl);
+ encode(tag, bl);
+ uint8_t c = (uint8_t)op;
+ encode(c, bl);
+ c = (uint8_t)state;
+ encode(c, bl);
+ encode_packed_val(index_ver, bl);
+ encode(instance, bl);
+ encode(bilog_flags, bl);
+ encode(owner, bl);
+ encode(owner_display_name, bl);
+ encode(zones_trace, bl);
+ ENCODE_FINISH(bl);
+ }
+ void decode(bufferlist::const_iterator &bl) {
+ DECODE_START(4, bl);
+ decode(id, bl);
+ decode(object, bl);
+ decode(timestamp, bl);
+ decode(ver, bl);
+ decode(tag, bl);
+ uint8_t c;
+ decode(c, bl);
+ op = (RGWModifyOp)c;
+ decode(c, bl);
+ state = (RGWPendingState)c;
+ decode_packed_val(index_ver, bl);
+ if (struct_v >= 2) {
+ decode(instance, bl);
+ decode(bilog_flags, bl);
+ }
+ if (struct_v >= 3) {
+ decode(owner, bl);
+ decode(owner_display_name, bl);
+ }
+ if (struct_v >= 4) {
+ decode(zones_trace, bl);
+ }
+ DECODE_FINISH(bl);
+ }
+ void dump(Formatter *f) const;
+ void decode_json(JSONObj *obj);
+ static void generate_test_instances(list<rgw_bi_log_entry*>& o);
+
+ bool is_versioned() {
+ return ((bilog_flags & RGW_BILOG_FLAG_VERSIONED_OP) != 0);
+ }
+};
+WRITE_CLASS_ENCODER(rgw_bi_log_entry)
+
+struct rgw_bucket_category_stats {
+ uint64_t total_size;
+ uint64_t total_size_rounded;
+ uint64_t num_entries;
+ uint64_t actual_size{0}; //< account for compression, encryption
+
+ rgw_bucket_category_stats() : total_size(0), total_size_rounded(0), num_entries(0) {}
+
+ void encode(bufferlist &bl) const {
+ ENCODE_START(3, 2, bl);
+ encode(total_size, bl);
+ encode(total_size_rounded, bl);
+ encode(num_entries, bl);
+ encode(actual_size, bl);
+ ENCODE_FINISH(bl);
+ }
+ void decode(bufferlist::const_iterator &bl) {
+ DECODE_START_LEGACY_COMPAT_LEN(3, 2, 2, bl);
+ decode(total_size, bl);
+ decode(total_size_rounded, bl);
+ decode(num_entries, bl);
+ if (struct_v >= 3) {
+ decode(actual_size, bl);
+ } else {
+ actual_size = total_size;
+ }
+ DECODE_FINISH(bl);
+ }
+ void dump(Formatter *f) const;
+ static void generate_test_instances(list<rgw_bucket_category_stats*>& o);
+};
+WRITE_CLASS_ENCODER(rgw_bucket_category_stats)
+
+enum cls_rgw_reshard_status {
+ CLS_RGW_RESHARD_NOT_RESHARDING = 0,
+ CLS_RGW_RESHARD_IN_PROGRESS = 1,
+ CLS_RGW_RESHARD_DONE = 2,
+};
+
+static inline std::string to_string(const enum cls_rgw_reshard_status status)
+{
+ switch (status) {
+ case CLS_RGW_RESHARD_NOT_RESHARDING:
+ return "not-resharding";
+ break;
+ case CLS_RGW_RESHARD_IN_PROGRESS:
+ return "in-progress";
+ break;
+ case CLS_RGW_RESHARD_DONE:
+ return "done";
+ break;
+ default:
+ break;
+ };
+ return "Unknown reshard status";
+}
+
+struct cls_rgw_bucket_instance_entry {
+ cls_rgw_reshard_status reshard_status{CLS_RGW_RESHARD_NOT_RESHARDING};
+ string new_bucket_instance_id;
+ int32_t num_shards{-1};
+
+ void encode(bufferlist& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode((uint8_t)reshard_status, bl);
+ encode(new_bucket_instance_id, bl);
+ encode(num_shards, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(bufferlist::const_iterator& bl) {
+ DECODE_START(1, bl);
+ uint8_t s;
+ decode(s, bl);
+ reshard_status = (cls_rgw_reshard_status)s;
+ decode(new_bucket_instance_id, bl);
+ decode(num_shards, bl);
+ DECODE_FINISH(bl);
+ }
+
+ void dump(Formatter *f) const;
+ static void generate_test_instances(list<cls_rgw_bucket_instance_entry*>& o);
+
+ void clear() {
+ reshard_status = CLS_RGW_RESHARD_NOT_RESHARDING;
+ new_bucket_instance_id.clear();
+ }
+
+ void set_status(const string& new_instance_id, int32_t new_num_shards, cls_rgw_reshard_status s) {
+ reshard_status = s;
+ new_bucket_instance_id = new_instance_id;
+ num_shards = new_num_shards;
+ }
+
+ bool resharding() const {
+ return reshard_status != CLS_RGW_RESHARD_NOT_RESHARDING;
+ }
+ bool resharding_in_progress() const {
+ return reshard_status == CLS_RGW_RESHARD_IN_PROGRESS;
+ }
+};
+WRITE_CLASS_ENCODER(cls_rgw_bucket_instance_entry)
+
+struct rgw_bucket_dir_header {
+ map<RGWObjCategory, rgw_bucket_category_stats> stats;
+ uint64_t tag_timeout;
+ uint64_t ver;
+ uint64_t master_ver;
+ string max_marker;
+ cls_rgw_bucket_instance_entry new_instance;
+ bool syncstopped;
+
+ rgw_bucket_dir_header() : tag_timeout(0), ver(0), master_ver(0), syncstopped(false) {}
+
+ void encode(bufferlist &bl) const {
+ ENCODE_START(7, 2, bl);
+ encode(stats, bl);
+ encode(tag_timeout, bl);
+ encode(ver, bl);
+ encode(master_ver, bl);
+ encode(max_marker, bl);
+ encode(new_instance, bl);
+ encode(syncstopped,bl);
+ ENCODE_FINISH(bl);
+ }
+ void decode(bufferlist::const_iterator &bl) {
+ DECODE_START_LEGACY_COMPAT_LEN(6, 2, 2, bl);
+ decode(stats, bl);
+ if (struct_v > 2) {
+ decode(tag_timeout, bl);
+ } else {
+ tag_timeout = 0;
+ }
+ if (struct_v >= 4) {
+ decode(ver, bl);
+ decode(master_ver, bl);
+ } else {
+ ver = 0;
+ }
+ if (struct_v >= 5) {
+ decode(max_marker, bl);
+ }
+ if (struct_v >= 6) {
+ decode(new_instance, bl);
+ } else {
+ new_instance = cls_rgw_bucket_instance_entry();
+ }
+ if (struct_v >= 7) {
+ decode(syncstopped,bl);
+ }
+ DECODE_FINISH(bl);
+ }
+ void dump(Formatter *f) const;
+ static void generate_test_instances(list<rgw_bucket_dir_header*>& o);
+
+ bool resharding() const {
+ return new_instance.resharding();
+ }
+ bool resharding_in_progress() const {
+ return new_instance.resharding_in_progress();
+ }
+};
+WRITE_CLASS_ENCODER(rgw_bucket_dir_header)
+
+struct rgw_bucket_dir {
+ rgw_bucket_dir_header header;
+ std::map<string, rgw_bucket_dir_entry> m;
+
+ void encode(bufferlist &bl) const {
+ ENCODE_START(2, 2, bl);
+ encode(header, bl);
+ encode(m, bl);
+ ENCODE_FINISH(bl);
+ }
+ void decode(bufferlist::const_iterator &bl) {
+ DECODE_START_LEGACY_COMPAT_LEN(2, 2, 2, bl);
+ decode(header, bl);
+ decode(m, bl);
+ DECODE_FINISH(bl);
+ }
+ void dump(Formatter *f) const;
+ static void generate_test_instances(list<rgw_bucket_dir*>& o);
+};
+WRITE_CLASS_ENCODER(rgw_bucket_dir)
+
+struct rgw_usage_data {
+ uint64_t bytes_sent;
+ uint64_t bytes_received;
+ uint64_t ops;
+ uint64_t successful_ops;
+
+ rgw_usage_data() : bytes_sent(0), bytes_received(0), ops(0), successful_ops(0) {}
+ rgw_usage_data(uint64_t sent, uint64_t received) : bytes_sent(sent), bytes_received(received), ops(0), successful_ops(0) {}
+
+ void encode(bufferlist& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(bytes_sent, bl);
+ encode(bytes_received, bl);
+ encode(ops, bl);
+ encode(successful_ops, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(bufferlist::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(bytes_sent, bl);
+ decode(bytes_received, bl);
+ decode(ops, bl);
+ decode(successful_ops, bl);
+ DECODE_FINISH(bl);
+ }
+
+ void aggregate(const rgw_usage_data& usage) {
+ bytes_sent += usage.bytes_sent;
+ bytes_received += usage.bytes_received;
+ ops += usage.ops;
+ successful_ops += usage.successful_ops;
+ }
+};
+WRITE_CLASS_ENCODER(rgw_usage_data)
+
+
+struct rgw_usage_log_entry {
+ rgw_user owner;
+ rgw_user payer; /* if empty, same as owner */
+ string bucket;
+ uint64_t epoch;
+ rgw_usage_data total_usage; /* this one is kept for backwards compatibility */
+ map<string, rgw_usage_data> usage_map;
+
+ rgw_usage_log_entry() : epoch(0) {}
+ rgw_usage_log_entry(string& o, string& b) : owner(o), bucket(b), epoch(0) {}
+ rgw_usage_log_entry(string& o, string& p, string& b) : owner(o), payer(p), bucket(b), epoch(0) {}
+
+ void encode(bufferlist& bl) const {
+ ENCODE_START(3, 1, bl);
+ encode(owner.to_str(), bl);
+ encode(bucket, bl);
+ encode(epoch, bl);
+ encode(total_usage.bytes_sent, bl);
+ encode(total_usage.bytes_received, bl);
+ encode(total_usage.ops, bl);
+ encode(total_usage.successful_ops, bl);
+ encode(usage_map, bl);
+ encode(payer.to_str(), bl);
+ ENCODE_FINISH(bl);
+ }
+
+
+ void decode(bufferlist::const_iterator& bl) {
+ DECODE_START(3, bl);
+ string s;
+ decode(s, bl);
+ owner.from_str(s);
+ decode(bucket, bl);
+ decode(epoch, bl);
+ decode(total_usage.bytes_sent, bl);
+ decode(total_usage.bytes_received, bl);
+ decode(total_usage.ops, bl);
+ decode(total_usage.successful_ops, bl);
+ if (struct_v < 2) {
+ usage_map[""] = total_usage;
+ } else {
+ decode(usage_map, bl);
+ }
+ if (struct_v >= 3) {
+ string p;
+ decode(p, bl);
+ payer.from_str(p);
+ }
+ DECODE_FINISH(bl);
+ }
+
+ void aggregate(const rgw_usage_log_entry& e, map<string, bool> *categories = NULL) {
+ if (owner.empty()) {
+ owner = e.owner;
+ bucket = e.bucket;
+ epoch = e.epoch;
+ payer = e.payer;
+ }
+
+ map<string, rgw_usage_data>::const_iterator iter;
+ for (iter = e.usage_map.begin(); iter != e.usage_map.end(); ++iter) {
+ if (!categories || !categories->size() || categories->count(iter->first)) {
+ add(iter->first, iter->second);
+ }
+ }
+ }
+
+ void sum(rgw_usage_data& usage, map<string, bool>& categories) const {
+ usage = rgw_usage_data();
+ for (map<string, rgw_usage_data>::const_iterator iter = usage_map.begin(); iter != usage_map.end(); ++iter) {
+ if (!categories.size() || categories.count(iter->first)) {
+ usage.aggregate(iter->second);
+ }
+ }
+ }
+
+ void add(const string& category, const rgw_usage_data& data) {
+ usage_map[category].aggregate(data);
+ total_usage.aggregate(data);
+ }
+
+ void dump(Formatter* f) const;
+ static void generate_test_instances(list<rgw_usage_log_entry*>& o);
+
+};
+WRITE_CLASS_ENCODER(rgw_usage_log_entry)
+
+struct rgw_usage_log_info {
+ vector<rgw_usage_log_entry> entries;
+
+ void encode(bufferlist& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(entries, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(bufferlist::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(entries, bl);
+ DECODE_FINISH(bl);
+ }
+
+ rgw_usage_log_info() {}
+};
+WRITE_CLASS_ENCODER(rgw_usage_log_info)
+
+struct rgw_user_bucket {
+ string user;
+ string bucket;
+
+ rgw_user_bucket() {}
+ rgw_user_bucket(const string& u, const string& b) : user(u), bucket(b) {}
+
+ void encode(bufferlist& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(user, bl);
+ encode(bucket, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(bufferlist::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(user, bl);
+ decode(bucket, bl);
+ DECODE_FINISH(bl);
+ }
+
+ bool operator<(const rgw_user_bucket& ub2) const {
+ int comp = user.compare(ub2.user);
+ if (comp < 0)
+ return true;
+ else if (!comp)
+ return bucket.compare(ub2.bucket) < 0;
+
+ return false;
+ }
+};
+WRITE_CLASS_ENCODER(rgw_user_bucket)
+
+enum cls_rgw_gc_op {
+ CLS_RGW_GC_DEL_OBJ,
+ CLS_RGW_GC_DEL_BUCKET,
+};
+
+struct cls_rgw_obj {
+ string pool;
+ cls_rgw_obj_key key;
+ string loc;
+
+ cls_rgw_obj() {}
+ cls_rgw_obj(string& _p, cls_rgw_obj_key& _k) : pool(_p), key(_k) {}
+
+ void encode(bufferlist& bl) const {
+ ENCODE_START(2, 1, bl);
+ encode(pool, bl);
+ encode(key.name, bl);
+ encode(loc, bl);
+ encode(key, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(bufferlist::const_iterator& bl) {
+ DECODE_START(2, bl);
+ decode(pool, bl);
+ decode(key.name, bl);
+ decode(loc, bl);
+ if (struct_v >= 2) {
+ decode(key, bl);
+ }
+ DECODE_FINISH(bl);
+ }
+
+ void dump(Formatter *f) const {
+ f->dump_string("pool", pool);
+ f->dump_string("oid", key.name);
+ f->dump_string("key", loc);
+ f->dump_string("instance", key.instance);
+ }
+ static void generate_test_instances(list<cls_rgw_obj*>& ls) {
+ ls.push_back(new cls_rgw_obj);
+ ls.push_back(new cls_rgw_obj);
+ ls.back()->pool = "mypool";
+ ls.back()->key.name = "myoid";
+ ls.back()->loc = "mykey";
+ }
+};
+WRITE_CLASS_ENCODER(cls_rgw_obj)
+
+struct cls_rgw_obj_chain {
+ list<cls_rgw_obj> objs;
+
+ cls_rgw_obj_chain() {}
+
+ void push_obj(const string& pool, const cls_rgw_obj_key& key, const string& loc) {
+ cls_rgw_obj obj;
+ obj.pool = pool;
+ obj.key = key;
+ obj.loc = loc;
+ objs.push_back(obj);
+ }
+
+ void encode(bufferlist& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(objs, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(bufferlist::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(objs, bl);
+ DECODE_FINISH(bl);
+ }
+
+ void dump(Formatter *f) const {
+ f->open_array_section("objs");
+ for (list<cls_rgw_obj>::const_iterator p = objs.begin(); p != objs.end(); ++p) {
+ f->open_object_section("obj");
+ p->dump(f);
+ f->close_section();
+ }
+ f->close_section();
+ }
+ static void generate_test_instances(list<cls_rgw_obj_chain*>& ls) {
+ ls.push_back(new cls_rgw_obj_chain);
+ }
+
+ bool empty() {
+ return objs.empty();
+ }
+};
+WRITE_CLASS_ENCODER(cls_rgw_obj_chain)
+
+struct cls_rgw_gc_obj_info
+{
+ string tag;
+ cls_rgw_obj_chain chain;
+ ceph::real_time time;
+
+ cls_rgw_gc_obj_info() {}
+
+ void encode(bufferlist& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(tag, bl);
+ encode(chain, bl);
+ encode(time, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(bufferlist::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(tag, bl);
+ decode(chain, bl);
+ decode(time, bl);
+ DECODE_FINISH(bl);
+ }
+
+ void dump(Formatter *f) const {
+ f->dump_string("tag", tag);
+ f->open_object_section("chain");
+ chain.dump(f);
+ f->close_section();
+ f->dump_stream("time") << time;
+ }
+ static void generate_test_instances(list<cls_rgw_gc_obj_info*>& ls) {
+ ls.push_back(new cls_rgw_gc_obj_info);
+ ls.push_back(new cls_rgw_gc_obj_info);
+ ls.back()->tag = "footag";
+ ceph_timespec ts{init_le32(21), init_le32(32)};
+ ls.back()->time = ceph::real_clock::from_ceph_timespec(ts);
+ }
+};
+WRITE_CLASS_ENCODER(cls_rgw_gc_obj_info)
+
+struct cls_rgw_lc_obj_head
+{
+ time_t start_date = 0;
+ string marker;
+
+ cls_rgw_lc_obj_head() {}
+
+ void encode(bufferlist& bl) const {
+ ENCODE_START(1, 1, bl);
+ uint64_t t = start_date;
+ encode(t, bl);
+ encode(marker, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(bufferlist::const_iterator& bl) {
+ DECODE_START(1, bl);
+ uint64_t t;
+ decode(t, bl);
+ start_date = static_cast<time_t>(t);
+ decode(marker, bl);
+ DECODE_FINISH(bl);
+ }
+
+ void dump(Formatter *f) const;
+ static void generate_test_instances(list<cls_rgw_lc_obj_head*>& ls);
+};
+WRITE_CLASS_ENCODER(cls_rgw_lc_obj_head)
+
+struct cls_rgw_reshard_entry
+{
+ ceph::real_time time;
+ string tenant;
+ string bucket_name;
+ string bucket_id;
+ string new_instance_id;
+ uint32_t old_num_shards{0};
+ uint32_t new_num_shards{0};
+
+ cls_rgw_reshard_entry() {}
+
+ void encode(bufferlist& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(time, bl);
+ encode(tenant, bl);
+ encode(bucket_name, bl);
+ encode(bucket_id, bl);
+ encode(new_instance_id, bl);
+ encode(old_num_shards, bl);
+ encode(new_num_shards, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(bufferlist::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(time, bl);
+ decode(tenant, bl);
+ decode(bucket_name, bl);
+ decode(bucket_id, bl);
+ decode(new_instance_id, bl);
+ decode(old_num_shards, bl);
+ decode(new_num_shards, bl);
+ DECODE_FINISH(bl);
+ }
+
+ void dump(Formatter *f) const;
+ static void generate_test_instances(list<cls_rgw_reshard_entry*>& o);
+
+ static void generate_key(const string& tenant, const string& bucket_name, string *key);
+ void get_key(string *key) const;
+};
+WRITE_CLASS_ENCODER(cls_rgw_reshard_entry)
+
+#endif
diff --git a/src/cls/sdk/cls_sdk.cc b/src/cls/sdk/cls_sdk.cc
new file mode 100644
index 00000000..95b5096a
--- /dev/null
+++ b/src/cls/sdk/cls_sdk.cc
@@ -0,0 +1,131 @@
+/*
+ * This is an example RADOS object class built using only the Ceph SDK interface.
+ */
+#include "include/rados/objclass.h"
+
+CLS_VER(1,0)
+CLS_NAME(sdk)
+
+cls_handle_t h_class;
+cls_method_handle_t h_test_coverage_write;
+cls_method_handle_t h_test_coverage_replay;
+
+/**
+ * test_coverage_write - a "write" method that creates an object
+ *
+ * This method modifies the object by making multiple write calls (write,
+ * setxattr and set_val).
+ */
+static int test_coverage_write(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ // create the object
+ int ret = cls_cxx_create(hctx, false);
+ if (ret < 0) {
+ CLS_LOG(0, "ERROR: %s(): cls_cxx_create returned %d", __func__, ret);
+ return ret;
+ }
+
+ uint64_t size;
+ // get the size of the object
+ ret = cls_cxx_stat(hctx, &size, NULL);
+ if (ret < 0)
+ return ret;
+
+ std::string c = "test";
+ bufferlist bl;
+ bl.append(c);
+
+ // write to the object
+ ret = cls_cxx_write(hctx, 0, bl.length(), &bl);
+ if (ret < 0)
+ return ret;
+
+ uint64_t new_size;
+ // get the new size of the object
+ ret = cls_cxx_stat(hctx, &new_size, NULL);
+ if (ret < 0)
+ return ret;
+
+ // make some change to the xattr
+ ret = cls_cxx_setxattr(hctx, "foo", &bl);
+ if (ret < 0)
+ return ret;
+
+ // make some change to the omap
+ ret = cls_cxx_map_set_val(hctx, "foo", &bl);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+/**
+ * test_coverage_replay - a "read" method to retrieve previously written data
+ *
+ * This method reads the object by making multiple read calls (read, getxattr
+ * and get_val). It also removes the object after reading.
+ */
+
+static int test_coverage_replay(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ CLS_LOG(0, "reading already written object");
+ uint64_t size;
+ // get the size of the object
+ int ret = cls_cxx_stat(hctx, &size, NULL);
+ if (ret < 0)
+ return ret;
+
+ bufferlist bl;
+ // read the object entry
+ ret = cls_cxx_read(hctx, 0, size, &bl);
+ if (ret < 0)
+ return ret;
+
+ // if the size is incorrect
+ if (bl.length() != size)
+ return -EIO;
+
+ bl.clear();
+
+ // read xattr entry
+ ret = cls_cxx_getxattr(hctx, "foo", &bl);
+ if (ret < 0)
+ return ret;
+
+ // if the size is incorrect
+ if (bl.length() != size)
+ return -EIO;
+
+ bl.clear();
+
+ // read omap entry
+ ret = cls_cxx_map_get_val(hctx, "foo", &bl);
+ if (ret < 0)
+ return ret;
+
+ // if the size is incorrect
+ if (bl.length() != size)
+ return -EIO;
+
+ // remove the object
+ ret = cls_cxx_remove(hctx);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+CLS_INIT(sdk)
+{
+ CLS_LOG(0, "loading cls_sdk");
+
+ cls_register("sdk", &h_class);
+
+ cls_register_cxx_method(h_class, "test_coverage_write",
+ CLS_METHOD_RD|CLS_METHOD_WR,
+ test_coverage_write, &h_test_coverage_write);
+
+ cls_register_cxx_method(h_class, "test_coverage_replay",
+ CLS_METHOD_RD|CLS_METHOD_WR,
+ test_coverage_replay, &h_test_coverage_replay);
+}
diff --git a/src/cls/timeindex/cls_timeindex.cc b/src/cls/timeindex/cls_timeindex.cc
new file mode 100644
index 00000000..92ea15be
--- /dev/null
+++ b/src/cls/timeindex/cls_timeindex.cc
@@ -0,0 +1,261 @@
+// -*- mode:C; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#include <errno.h>
+
+#include "objclass/objclass.h"
+
+#include "cls_timeindex_ops.h"
+
+#include "include/compat.h"
+
+CLS_VER(1,0)
+CLS_NAME(timeindex)
+
+static const size_t MAX_LIST_ENTRIES = 1000;
+static const size_t MAX_TRIM_ENTRIES = 1000;
+
+static const string TIMEINDEX_PREFIX = "1_";
+
+static void get_index_time_prefix(const utime_t& ts,
+ string& index)
+{
+ char buf[32];
+
+ snprintf(buf, sizeof(buf), "%s%010ld.%06ld_", TIMEINDEX_PREFIX.c_str(),
+ (long)ts.sec(), (long)ts.usec());
+ buf[sizeof(buf) - 1] = '\0';
+
+ index = buf;
+}
+
+static void get_index(cls_method_context_t hctx,
+ const utime_t& key_ts,
+ const string& key_ext,
+ string& index)
+{
+ get_index_time_prefix(key_ts, index);
+ index.append(key_ext);
+}
+
+static int parse_index(const string& index,
+ utime_t& key_ts,
+ string& key_ext)
+{
+ int sec, usec;
+ char keyext[256];
+
+ int ret = sscanf(index.c_str(), "1_%d.%d_%255s", &sec, &usec, keyext);
+
+ key_ts = utime_t(sec, usec);
+ key_ext = string(keyext);
+ return ret;
+}
+
+static int cls_timeindex_add(cls_method_context_t hctx,
+ bufferlist * const in,
+ bufferlist * const out)
+{
+ auto in_iter = in->cbegin();
+
+ cls_timeindex_add_op op;
+ try {
+ decode(op, in_iter);
+ } catch (buffer::error& err) {
+ CLS_LOG(1, "ERROR: cls_timeindex_add_op(): failed to decode op");
+ return -EINVAL;
+ }
+
+ for (list<cls_timeindex_entry>::iterator iter = op.entries.begin();
+ iter != op.entries.end();
+ ++iter) {
+ cls_timeindex_entry& entry = *iter;
+
+ string index;
+ get_index(hctx, entry.key_ts, entry.key_ext, index);
+
+ CLS_LOG(20, "storing entry at %s", index.c_str());
+
+ int ret = cls_cxx_map_set_val(hctx, index, &entry.value);
+ if (ret < 0) {
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int cls_timeindex_list(cls_method_context_t hctx,
+ bufferlist * const in,
+ bufferlist * const out)
+{
+ auto in_iter = in->cbegin();
+
+ cls_timeindex_list_op op;
+ try {
+ decode(op, in_iter);
+ } catch (buffer::error& err) {
+ CLS_LOG(1, "ERROR: cls_timeindex_list_op(): failed to decode op");
+ return -EINVAL;
+ }
+
+ map<string, bufferlist> keys;
+
+ string from_index;
+ string to_index;
+
+ if (op.marker.empty()) {
+ get_index_time_prefix(op.from_time, from_index);
+ } else {
+ from_index = op.marker;
+ }
+ const bool use_time_boundary = (op.to_time >= op.from_time);
+
+ if (use_time_boundary) {
+ get_index_time_prefix(op.to_time, to_index);
+ }
+
+ size_t max_entries = op.max_entries;
+ if (max_entries > MAX_LIST_ENTRIES) {
+ max_entries = MAX_LIST_ENTRIES;
+ }
+
+ cls_timeindex_list_ret ret;
+
+ int rc = cls_cxx_map_get_vals(hctx, from_index, TIMEINDEX_PREFIX,
+ max_entries, &keys, &ret.truncated);
+ if (rc < 0) {
+ return rc;
+ }
+
+ list<cls_timeindex_entry>& entries = ret.entries;
+ map<string, bufferlist>::iterator iter = keys.begin();
+
+ string marker;
+
+ for (; iter != keys.end(); ++iter) {
+ const string& index = iter->first;
+ bufferlist& bl = iter->second;
+
+ if (use_time_boundary && index.compare(0, to_index.size(), to_index) >= 0) {
+ CLS_LOG(20, "DEBUG: cls_timeindex_list: finishing on to_index=%s",
+ to_index.c_str());
+ ret.truncated = false;
+ break;
+ }
+
+ cls_timeindex_entry e;
+
+ if (parse_index(index, e.key_ts, e.key_ext) < 0) {
+ CLS_LOG(0, "ERROR: cls_timeindex_list: could not parse index=%s",
+ index.c_str());
+ } else {
+ CLS_LOG(20, "DEBUG: cls_timeindex_list: index=%s, key_ext=%s, bl.len = %d",
+ index.c_str(), e.key_ext.c_str(), bl.length());
+ e.value = bl;
+ entries.push_back(e);
+ }
+ marker = index;
+ }
+
+ ret.marker = marker;
+
+ encode(ret, *out);
+
+ return 0;
+}
+
+
+static int cls_timeindex_trim(cls_method_context_t hctx,
+ bufferlist * const in,
+ bufferlist * const out)
+{
+ auto in_iter = in->cbegin();
+
+ cls_timeindex_trim_op op;
+ try {
+ decode(op, in_iter);
+ } catch (buffer::error& err) {
+ CLS_LOG(1, "ERROR: cls_timeindex_trim: failed to decode entry");
+ return -EINVAL;
+ }
+
+ map<string, bufferlist> keys;
+
+ string from_index;
+ string to_index;
+
+ if (op.from_marker.empty()) {
+ get_index_time_prefix(op.from_time, from_index);
+ } else {
+ from_index = op.from_marker;
+ }
+
+ if (op.to_marker.empty()) {
+ get_index_time_prefix(op.to_time, to_index);
+ } else {
+ to_index = op.to_marker;
+ }
+
+ bool more;
+
+ int rc = cls_cxx_map_get_vals(hctx, from_index, TIMEINDEX_PREFIX,
+ MAX_TRIM_ENTRIES, &keys, &more);
+ if (rc < 0) {
+ return rc;
+ }
+
+ map<string, bufferlist>::iterator iter = keys.begin();
+
+ bool removed = false;
+ for (; iter != keys.end(); ++iter) {
+ const string& index = iter->first;
+
+ CLS_LOG(20, "index=%s to_index=%s", index.c_str(), to_index.c_str());
+
+ if (index.compare(0, to_index.size(), to_index) > 0) {
+ CLS_LOG(20, "DEBUG: cls_timeindex_trim: finishing on to_index=%s",
+ to_index.c_str());
+ break;
+ }
+
+ CLS_LOG(20, "removing key: index=%s", index.c_str());
+
+ int rc = cls_cxx_map_remove_key(hctx, index);
+ if (rc < 0) {
+ CLS_LOG(1, "ERROR: cls_cxx_map_remove_key failed rc=%d", rc);
+ return rc;
+ }
+
+ removed = true;
+ }
+
+ if (!removed) {
+ return -ENODATA;
+ }
+
+ return 0;
+}
+
+CLS_INIT(timeindex)
+{
+ CLS_LOG(1, "Loaded timeindex class!");
+
+ cls_handle_t h_class;
+ cls_method_handle_t h_timeindex_add;
+ cls_method_handle_t h_timeindex_list;
+ cls_method_handle_t h_timeindex_trim;
+
+ cls_register("timeindex", &h_class);
+
+ /* timeindex */
+ cls_register_cxx_method(h_class, "add", CLS_METHOD_RD | CLS_METHOD_WR,
+ cls_timeindex_add, &h_timeindex_add);
+ cls_register_cxx_method(h_class, "list", CLS_METHOD_RD,
+ cls_timeindex_list, &h_timeindex_list);
+ cls_register_cxx_method(h_class, "trim", CLS_METHOD_RD | CLS_METHOD_WR,
+ cls_timeindex_trim, &h_timeindex_trim);
+
+ return;
+}
+
diff --git a/src/cls/timeindex/cls_timeindex_client.cc b/src/cls/timeindex/cls_timeindex_client.cc
new file mode 100644
index 00000000..7a38ff5f
--- /dev/null
+++ b/src/cls/timeindex/cls_timeindex_client.cc
@@ -0,0 +1,120 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#include <errno.h>
+
+#include "cls/timeindex/cls_timeindex_ops.h"
+#include "cls/timeindex/cls_timeindex_client.h"
+#include "include/compat.h"
+
+void cls_timeindex_add(
+ librados::ObjectWriteOperation& op,
+ std::list<cls_timeindex_entry>& entries)
+{
+ librados::bufferlist in;
+ cls_timeindex_add_op call;
+ call.entries = entries;
+
+ encode(call, in);
+ op.exec("timeindex", "add", in);
+}
+
+void cls_timeindex_add(
+ librados::ObjectWriteOperation& op,
+ cls_timeindex_entry& entry)
+{
+ librados::bufferlist in;
+ cls_timeindex_add_op call;
+ call.entries.push_back(entry);
+
+ encode(call, in);
+ op.exec("timeindex", "add", in);
+}
+
+void cls_timeindex_add_prepare_entry(
+ cls_timeindex_entry& entry,
+ const utime_t& key_timestamp,
+ const std::string& key_ext,
+ const librados::bufferlist& bl)
+{
+ entry.key_ts = key_timestamp;
+ entry.key_ext = key_ext;
+ entry.value = bl;
+}
+
+void cls_timeindex_add(
+ librados::ObjectWriteOperation& op,
+ const utime_t& key_timestamp,
+ const std::string& key_ext,
+ const librados::bufferlist& bl)
+{
+ cls_timeindex_entry entry;
+ cls_timeindex_add_prepare_entry(entry, key_timestamp, key_ext, bl);
+ cls_timeindex_add(op, entry);
+}
+
+void cls_timeindex_trim(
+ librados::ObjectWriteOperation& op,
+ const utime_t& from_time,
+ const utime_t& to_time,
+ const std::string& from_marker,
+ const std::string& to_marker)
+{
+ librados::bufferlist in;
+ cls_timeindex_trim_op call;
+ call.from_time = from_time;
+ call.to_time = to_time;
+ call.from_marker = from_marker;
+ call.to_marker = to_marker;
+
+ encode(call, in);
+
+ op.exec("timeindex", "trim", in);
+}
+
+int cls_timeindex_trim(
+ librados::IoCtx& io_ctx,
+ const std::string& oid,
+ const utime_t& from_time,
+ const utime_t& to_time,
+ const std::string& from_marker,
+ const std::string& to_marker)
+{
+ bool done = false;
+
+ do {
+ librados::ObjectWriteOperation op;
+ cls_timeindex_trim(op, from_time, to_time, from_marker, to_marker);
+ int r = io_ctx.operate(oid, &op);
+
+ if (r == -ENODATA)
+ done = true;
+ else if (r < 0)
+ return r;
+ } while (!done);
+
+ return 0;
+}
+
+void cls_timeindex_list(
+ librados::ObjectReadOperation& op,
+ const utime_t& from,
+ const utime_t& to,
+ const std::string& in_marker,
+ const int max_entries,
+ std::list<cls_timeindex_entry>& entries,
+ std::string *out_marker,
+ bool *truncated)
+{
+ librados::bufferlist in;
+ cls_timeindex_list_op call;
+ call.from_time = from;
+ call.to_time = to;
+ call.marker = in_marker;
+ call.max_entries = max_entries;
+
+ encode(call, in);
+
+ op.exec("timeindex", "list", in,
+ new TimeindexListCtx(&entries, out_marker, truncated));
+}
diff --git a/src/cls/timeindex/cls_timeindex_client.h b/src/cls/timeindex/cls_timeindex_client.h
new file mode 100644
index 00000000..fc57a160
--- /dev/null
+++ b/src/cls/timeindex/cls_timeindex_client.h
@@ -0,0 +1,93 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#ifndef CEPH_CLS_TIMEINDEX_CLIENT_H
+#define CEPH_CLS_TIMEINDEX_CLIENT_H
+
+#include "include/rados/librados.hpp"
+
+#include "cls_timeindex_ops.h"
+
+/**
+ * timeindex objclass
+ */
+class TimeindexListCtx : public librados::ObjectOperationCompletion {
+ std::list<cls_timeindex_entry> *entries;
+ std::string *marker;
+ bool *truncated;
+
+public:
+ ///* ctor
+ TimeindexListCtx(
+ std::list<cls_timeindex_entry> *_entries,
+ std::string *_marker,
+ bool *_truncated)
+ : entries(_entries), marker(_marker), truncated(_truncated) {}
+
+ ///* dtor
+ ~TimeindexListCtx() {}
+
+ void handle_completion(int r, bufferlist& bl) override {
+ if (r >= 0) {
+ cls_timeindex_list_ret ret;
+ try {
+ auto iter = bl.cbegin();
+ decode(ret, iter);
+ if (entries)
+ *entries = ret.entries;
+ if (truncated)
+ *truncated = ret.truncated;
+ if (marker)
+ *marker = ret.marker;
+ } catch (buffer::error& err) {
+ // nothing we can do about it atm
+ }
+ }
+ }
+};
+
+void cls_timeindex_add_prepare_entry(
+ cls_timeindex_entry& entry,
+ const utime_t& key_timestamp,
+ const std::string& key_ext,
+ bufferlist& bl);
+
+void cls_timeindex_add(
+ librados::ObjectWriteOperation& op,
+ const std::list<cls_timeindex_entry>& entry);
+
+void cls_timeindex_add(
+ librados::ObjectWriteOperation& op,
+ const cls_timeindex_entry& entry);
+
+void cls_timeindex_add(
+ librados::ObjectWriteOperation& op,
+ const utime_t& timestamp,
+ const std::string& name,
+ const bufferlist& bl);
+
+void cls_timeindex_list(
+ librados::ObjectReadOperation& op,
+ const utime_t& from,
+ const utime_t& to,
+ const std::string& in_marker,
+ const int max_entries,
+ std::list<cls_timeindex_entry>& entries,
+ std::string *out_marker,
+ bool *truncated);
+
+void cls_timeindex_trim(
+ librados::ObjectWriteOperation& op,
+ const utime_t& from_time,
+ const utime_t& to_time,
+ const std::string& from_marker = std::string(),
+ const std::string& to_marker = std::string());
+
+int cls_timeindex_trim(
+ librados::IoCtx& io_ctx,
+ const std::string& oid,
+ const utime_t& from_time,
+ const utime_t& to_time,
+ const std::string& from_marker = std::string(),
+ const std::string& to_marker = std::string());
+#endif
diff --git a/src/cls/timeindex/cls_timeindex_ops.h b/src/cls/timeindex/cls_timeindex_ops.h
new file mode 100644
index 00000000..05aa8b19
--- /dev/null
+++ b/src/cls/timeindex/cls_timeindex_ops.h
@@ -0,0 +1,115 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#ifndef CEPH_CLS_TIMEINDEX_OPS_H
+#define CEPH_CLS_TIMEINDEX_OPS_H
+
+#include "cls_timeindex_types.h"
+
+struct cls_timeindex_add_op {
+ list<cls_timeindex_entry> entries;
+
+ cls_timeindex_add_op() {}
+
+ void encode(bufferlist& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(entries, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(bufferlist::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(entries, bl);
+ DECODE_FINISH(bl);
+ }
+};
+WRITE_CLASS_ENCODER(cls_timeindex_add_op)
+
+struct cls_timeindex_list_op {
+ utime_t from_time;
+ string marker; /* if not empty, overrides from_time */
+ utime_t to_time; /* not inclusive */
+ int max_entries; /* upperbound to returned num of entries
+ might return less than that and still be truncated */
+
+ cls_timeindex_list_op() : max_entries(0) {}
+
+ void encode(bufferlist& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(from_time, bl);
+ encode(marker, bl);
+ encode(to_time, bl);
+ encode(max_entries, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(bufferlist::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(from_time, bl);
+ decode(marker, bl);
+ decode(to_time, bl);
+ decode(max_entries, bl);
+ DECODE_FINISH(bl);
+ }
+};
+WRITE_CLASS_ENCODER(cls_timeindex_list_op)
+
+struct cls_timeindex_list_ret {
+ list<cls_timeindex_entry> entries;
+ string marker;
+ bool truncated;
+
+ cls_timeindex_list_ret() : truncated(false) {}
+
+ void encode(bufferlist& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(entries, bl);
+ encode(marker, bl);
+ encode(truncated, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(bufferlist::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(entries, bl);
+ decode(marker, bl);
+ decode(truncated, bl);
+ DECODE_FINISH(bl);
+ }
+};
+WRITE_CLASS_ENCODER(cls_timeindex_list_ret)
+
+
+/*
+ * operation will return 0 when successfully removed but not done. Will return
+ * -ENODATA when done, so caller needs to repeat sending request until that.
+ */
+struct cls_timeindex_trim_op {
+ utime_t from_time;
+ utime_t to_time; /* inclusive */
+ string from_marker;
+ string to_marker;
+
+ cls_timeindex_trim_op() {}
+
+ void encode(bufferlist& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(from_time, bl);
+ encode(to_time, bl);
+ encode(from_marker, bl);
+ encode(to_marker, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(bufferlist::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(from_time, bl);
+ decode(to_time, bl);
+ decode(from_marker, bl);
+ decode(to_marker, bl);
+ DECODE_FINISH(bl);
+ }
+};
+WRITE_CLASS_ENCODER(cls_timeindex_trim_op)
+
+#endif /* CEPH_CLS_TIMEINDEX_OPS_H */
diff --git a/src/cls/timeindex/cls_timeindex_types.h b/src/cls/timeindex/cls_timeindex_types.h
new file mode 100644
index 00000000..2b381767
--- /dev/null
+++ b/src/cls/timeindex/cls_timeindex_types.h
@@ -0,0 +1,43 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#ifndef CEPH_CLS_TIMEINDEX_TYPES_H
+#define CEPH_CLS_TIMEINDEX_TYPES_H
+
+#include "include/encoding.h"
+#include "include/types.h"
+
+#include "include/utime.h"
+
+class JSONObj;
+
+struct cls_timeindex_entry {
+ /* Mandatory timestamp. Will be part of the key. */
+ utime_t key_ts;
+ /* Not mandatory. The name_ext field, if not empty, will form second
+ * part of the key. */
+ string key_ext;
+ /* Become value of OMAP-based mapping. */
+ bufferlist value;
+
+ cls_timeindex_entry() {}
+
+ void encode(bufferlist& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(key_ts, bl);
+ encode(key_ext, bl);
+ encode(value, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(bufferlist::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(key_ts, bl);
+ decode(key_ext, bl);
+ decode(value, bl);
+ DECODE_FINISH(bl);
+ }
+};
+WRITE_CLASS_ENCODER(cls_timeindex_entry)
+
+#endif /* CEPH_CLS_TIMEINDEX_TYPES_H */
diff --git a/src/cls/user/cls_user.cc b/src/cls/user/cls_user.cc
new file mode 100644
index 00000000..d3015a12
--- /dev/null
+++ b/src/cls/user/cls_user.cc
@@ -0,0 +1,455 @@
+// -*- mode:C; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#include <errno.h>
+
+#include "include/utime.h"
+#include "objclass/objclass.h"
+
+#include "cls_user_ops.h"
+
+CLS_VER(1,0)
+CLS_NAME(user)
+
+static int write_entry(cls_method_context_t hctx, const string& key, const cls_user_bucket_entry& entry)
+{
+ bufferlist bl;
+ encode(entry, bl);
+
+ int ret = cls_cxx_map_set_val(hctx, key, &bl);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int remove_entry(cls_method_context_t hctx, const string& key)
+{
+ int ret = cls_cxx_map_remove_key(hctx, key);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static void get_key_by_bucket_name(const string& bucket_name, string *key)
+{
+ *key = bucket_name;
+}
+
+static int get_existing_bucket_entry(cls_method_context_t hctx, const string& bucket_name,
+ cls_user_bucket_entry& entry)
+{
+ if (bucket_name.empty()) {
+ return -EINVAL;
+ }
+
+ string key;
+ get_key_by_bucket_name(bucket_name, &key);
+
+ bufferlist bl;
+ int rc = cls_cxx_map_get_val(hctx, key, &bl);
+ if (rc < 0) {
+ CLS_LOG(10, "could not read entry %s", key.c_str());
+ return rc;
+ }
+ try {
+ auto iter = bl.cbegin();
+ decode(entry, iter);
+ } catch (buffer::error& err) {
+ CLS_LOG(0, "ERROR: failed to decode entry %s", key.c_str());
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int read_header(cls_method_context_t hctx, cls_user_header *header)
+{
+ bufferlist bl;
+
+ int ret = cls_cxx_map_read_header(hctx, &bl);
+ if (ret < 0)
+ return ret;
+
+ if (bl.length() == 0) {
+ *header = cls_user_header();
+ return 0;
+ }
+
+ try {
+ decode(*header, bl);
+ } catch (buffer::error& err) {
+ CLS_LOG(0, "ERROR: failed to decode user header");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static void add_header_stats(cls_user_stats *stats, cls_user_bucket_entry& entry)
+{
+ stats->total_entries += entry.count;
+ stats->total_bytes += entry.size;
+ stats->total_bytes_rounded += entry.size_rounded;
+}
+
+static void dec_header_stats(cls_user_stats *stats, cls_user_bucket_entry& entry)
+{
+ stats->total_bytes -= entry.size;
+ stats->total_bytes_rounded -= entry.size_rounded;
+ stats->total_entries -= entry.count;
+}
+
+static void apply_entry_stats(const cls_user_bucket_entry& src_entry, cls_user_bucket_entry *target_entry)
+{
+ target_entry->size = src_entry.size;
+ target_entry->size_rounded = src_entry.size_rounded;
+ target_entry->count = src_entry.count;
+}
+
+static int cls_user_set_buckets_info(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ auto in_iter = in->cbegin();
+
+ cls_user_set_buckets_op op;
+ try {
+ decode(op, in_iter);
+ } catch (buffer::error& err) {
+ CLS_LOG(1, "ERROR: cls_user_add_op(): failed to decode op");
+ return -EINVAL;
+ }
+
+ cls_user_header header;
+ int ret = read_header(hctx, &header);
+ if (ret < 0) {
+ CLS_LOG(0, "ERROR: failed to read user info header ret=%d", ret);
+ return ret;
+ }
+
+ for (list<cls_user_bucket_entry>::iterator iter = op.entries.begin();
+ iter != op.entries.end(); ++iter) {
+ cls_user_bucket_entry& update_entry = *iter;
+
+ string key;
+
+ get_key_by_bucket_name(update_entry.bucket.name, &key);
+
+ cls_user_bucket_entry entry;
+ ret = get_existing_bucket_entry(hctx, key, entry);
+
+ if (ret == -ENOENT) {
+ if (!op.add)
+ continue; /* racing bucket removal */
+
+ entry = update_entry;
+
+ ret = 0;
+ } else if (op.add) {
+ // bucket id may have changed (ie reshard)
+ entry.bucket.bucket_id = update_entry.bucket.bucket_id;
+ // creation date may have changed (ie delete/recreate bucket)
+ entry.creation_time = update_entry.creation_time;
+ }
+
+ if (ret < 0) {
+ CLS_LOG(0, "ERROR: get_existing_bucket_entry() key=%s returned %d", key.c_str(), ret);
+ return ret;
+ } else if (ret >= 0 && entry.user_stats_sync) {
+ dec_header_stats(&header.stats, entry);
+ }
+
+ CLS_LOG(20, "storing entry for key=%s size=%lld count=%lld",
+ key.c_str(), (long long)update_entry.size, (long long)update_entry.count);
+
+ // sync entry stats when not an op.add, as when the case is op.add if its a
+ // new entry we already have copied update_entry earlier, OTOH, for an existing entry
+ // we end up clobbering the existing stats for the bucket
+ if (!op.add){
+ apply_entry_stats(update_entry, &entry);
+ }
+ entry.user_stats_sync = true;
+
+ ret = write_entry(hctx, key, entry);
+ if (ret < 0)
+ return ret;
+
+ add_header_stats(&header.stats, entry);
+ }
+
+ bufferlist bl;
+
+ CLS_LOG(20, "header: total bytes=%lld entries=%lld", (long long)header.stats.total_bytes, (long long)header.stats.total_entries);
+
+ if (header.last_stats_update < op.time)
+ header.last_stats_update = op.time;
+
+ encode(header, bl);
+
+ ret = cls_cxx_map_write_header(hctx, &bl);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int cls_user_complete_stats_sync(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ auto in_iter = in->cbegin();
+
+ cls_user_complete_stats_sync_op op;
+ try {
+ decode(op, in_iter);
+ } catch (buffer::error& err) {
+ CLS_LOG(1, "ERROR: cls_user_add_op(): failed to decode op");
+ return -EINVAL;
+ }
+
+ cls_user_header header;
+ int ret = read_header(hctx, &header);
+ if (ret < 0) {
+ CLS_LOG(0, "ERROR: failed to read user info header ret=%d", ret);
+ return ret;
+ }
+
+ if (header.last_stats_sync < op.time)
+ header.last_stats_sync = op.time;
+
+ bufferlist bl;
+
+ encode(header, bl);
+
+ ret = cls_cxx_map_write_header(hctx, &bl);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int cls_user_remove_bucket(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ auto in_iter = in->cbegin();
+
+ cls_user_remove_bucket_op op;
+ try {
+ decode(op, in_iter);
+ } catch (buffer::error& err) {
+ CLS_LOG(1, "ERROR: cls_user_add_op(): failed to decode op");
+ return -EINVAL;
+ }
+
+ cls_user_header header;
+ int ret = read_header(hctx, &header);
+ if (ret < 0) {
+ CLS_LOG(0, "ERROR: failed to read user info header ret=%d", ret);
+ return ret;
+ }
+
+ string key;
+
+ get_key_by_bucket_name(op.bucket.name, &key);
+
+ cls_user_bucket_entry entry;
+ ret = get_existing_bucket_entry(hctx, key, entry);
+ if (ret == -ENOENT) {
+ return 0; /* idempotent removal */
+ }
+ if (ret < 0) {
+ CLS_LOG(0, "ERROR: get existing bucket entry, key=%s ret=%d", key.c_str(), ret);
+ return ret;
+ }
+
+ CLS_LOG(20, "removing entry at %s", key.c_str());
+
+ ret = remove_entry(hctx, key);
+ if (ret < 0)
+ return ret;
+
+ if (!entry.user_stats_sync) {
+ return 0;
+ }
+
+ dec_header_stats(&header.stats, entry);
+
+ CLS_LOG(20, "header: total bytes=%lld entries=%lld", (long long)header.stats.total_bytes, (long long)header.stats.total_entries);
+
+ bufferlist bl;
+ encode(header, bl);
+ return cls_cxx_map_write_header(hctx, &bl);
+}
+
+static int cls_user_list_buckets(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ auto in_iter = in->cbegin();
+
+ cls_user_list_buckets_op op;
+ try {
+ decode(op, in_iter);
+ } catch (buffer::error& err) {
+ CLS_LOG(1, "ERROR: cls_user_list_op(): failed to decode op");
+ return -EINVAL;
+ }
+
+ map<string, bufferlist> keys;
+
+ const string& from_index = op.marker;
+ const string& to_index = op.end_marker;
+ const bool to_index_valid = !to_index.empty();
+
+#define MAX_ENTRIES 1000
+ size_t max_entries = op.max_entries;
+ if (max_entries > MAX_ENTRIES)
+ max_entries = MAX_ENTRIES;
+
+ string match_prefix;
+ cls_user_list_buckets_ret ret;
+
+ int rc = cls_cxx_map_get_vals(hctx, from_index, match_prefix, max_entries, &keys, &ret.truncated);
+ if (rc < 0)
+ return rc;
+
+ CLS_LOG(20, "from_index=%s to_index=%s match_prefix=%s",
+ from_index.c_str(),
+ to_index.c_str(),
+ match_prefix.c_str());
+
+ list<cls_user_bucket_entry>& entries = ret.entries;
+ map<string, bufferlist>::iterator iter = keys.begin();
+
+ string marker;
+
+ for (; iter != keys.end(); ++iter) {
+ const string& index = iter->first;
+ marker = index;
+
+ if (to_index_valid && to_index.compare(index) <= 0) {
+ ret.truncated = false;
+ break;
+ }
+
+ bufferlist& bl = iter->second;
+ auto biter = bl.cbegin();
+ try {
+ cls_user_bucket_entry e;
+ decode(e, biter);
+ entries.push_back(e);
+ } catch (buffer::error& err) {
+ CLS_LOG(0, "ERROR: cls_user_list: could not decode entry, index=%s", index.c_str());
+ }
+ }
+
+ if (ret.truncated) {
+ ret.marker = marker;
+ }
+
+ encode(ret, *out);
+
+ return 0;
+}
+
+static int cls_user_get_header(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ auto in_iter = in->cbegin();
+
+ cls_user_get_header_op op;
+ try {
+ decode(op, in_iter);
+ } catch (buffer::error& err) {
+ CLS_LOG(1, "ERROR: cls_user_get_header_op(): failed to decode op");
+ return -EINVAL;
+ }
+
+ cls_user_get_header_ret op_ret;
+
+ int ret = read_header(hctx, &op_ret.header);
+ if (ret < 0)
+ return ret;
+
+ encode(op_ret, *out);
+
+ return 0;
+}
+
+/// A method to reset the user.buckets header stats in accordance to
+/// the values seen in the user.buckets omap keys. This is not be
+/// equivalent to --sync-stats which also re-calculates the stats for
+/// each bucket.
+static int cls_user_reset_stats(cls_method_context_t hctx,
+ bufferlist *in, bufferlist *out /*ignore*/)
+{
+ cls_user_reset_stats_op op;
+
+ try {
+ auto bliter = in->cbegin();
+ decode(op, bliter);
+ } catch (buffer::error& err) {
+ CLS_LOG(0, "ERROR: %s failed to decode op", __func__);
+ return -EINVAL;
+ }
+
+ cls_user_header header;
+ bool truncated = false;
+ string from_index, prefix;
+ do {
+ map<string, bufferlist> keys;
+ int rc = cls_cxx_map_get_vals(hctx, from_index, prefix, MAX_ENTRIES,
+ &keys, &truncated);
+ if (rc < 0) {
+ CLS_LOG(0, "ERROR: %s failed to retrieve omap key-values", __func__);
+ return rc;
+ }
+ CLS_LOG(20, "%s: read %lu key-values, truncated=%d",
+ __func__, keys.size(), truncated);
+
+ for (const auto& kv : keys) {
+ cls_user_bucket_entry e;
+ try {
+ auto bl = kv.second;
+ auto bliter = bl.cbegin();
+ decode(e, bliter);
+ } catch (buffer::error& err) {
+ CLS_LOG(0, "ERROR: %s failed to decode bucket entry for %s",
+ __func__, kv.first.c_str());
+ return -EIO;
+ }
+ add_header_stats(&header.stats, e);
+ }
+ if (!keys.empty()) {
+ from_index = keys.rbegin()->first;
+ }
+ } while (truncated);
+
+ bufferlist bl;
+ header.last_stats_update = op.time;
+ encode(header, bl);
+
+ CLS_LOG(20, "%s: updating header", __func__);
+ return cls_cxx_map_write_header(hctx, &bl);
+}
+
+CLS_INIT(user)
+{
+ CLS_LOG(1, "Loaded user class!");
+
+ cls_handle_t h_class;
+ cls_method_handle_t h_user_set_buckets_info;
+ cls_method_handle_t h_user_complete_stats_sync;
+ cls_method_handle_t h_user_remove_bucket;
+ cls_method_handle_t h_user_list_buckets;
+ cls_method_handle_t h_user_get_header;
+ cls_method_handle_t h_user_reset_stats;
+
+ cls_register("user", &h_class);
+
+ /* log */
+ cls_register_cxx_method(h_class, "set_buckets_info", CLS_METHOD_RD | CLS_METHOD_WR,
+ cls_user_set_buckets_info, &h_user_set_buckets_info);
+ cls_register_cxx_method(h_class, "complete_stats_sync", CLS_METHOD_RD | CLS_METHOD_WR,
+ cls_user_complete_stats_sync, &h_user_complete_stats_sync);
+ cls_register_cxx_method(h_class, "remove_bucket", CLS_METHOD_RD | CLS_METHOD_WR, cls_user_remove_bucket, &h_user_remove_bucket);
+ cls_register_cxx_method(h_class, "list_buckets", CLS_METHOD_RD, cls_user_list_buckets, &h_user_list_buckets);
+ cls_register_cxx_method(h_class, "get_header", CLS_METHOD_RD, cls_user_get_header, &h_user_get_header);
+ cls_register_cxx_method(h_class, "reset_user_stats", CLS_METHOD_RD | CLS_METHOD_WR, cls_user_reset_stats, &h_user_reset_stats);
+ return;
+}
+
diff --git a/src/cls/user/cls_user_client.cc b/src/cls/user/cls_user_client.cc
new file mode 100644
index 00000000..c18be4a2
--- /dev/null
+++ b/src/cls/user/cls_user_client.cc
@@ -0,0 +1,158 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#include <errno.h>
+
+#include "cls/user/cls_user_client.h"
+#include "include/rados/librados.hpp"
+
+
+using namespace librados;
+
+
+void cls_user_set_buckets(librados::ObjectWriteOperation& op, list<cls_user_bucket_entry>& entries, bool add)
+{
+ bufferlist in;
+ cls_user_set_buckets_op call;
+ call.entries = entries;
+ call.add = add;
+ call.time = real_clock::now();
+ encode(call, in);
+ op.exec("user", "set_buckets_info", in);
+}
+
+void cls_user_complete_stats_sync(librados::ObjectWriteOperation& op)
+{
+ bufferlist in;
+ cls_user_complete_stats_sync_op call;
+ call.time = real_clock::now();
+ encode(call, in);
+ op.exec("user", "complete_stats_sync", in);
+}
+
+void cls_user_remove_bucket(librados::ObjectWriteOperation& op, const cls_user_bucket& bucket)
+{
+ bufferlist in;
+ cls_user_remove_bucket_op call;
+ call.bucket = bucket;
+ encode(call, in);
+ op.exec("user", "remove_bucket", in);
+}
+
+class ClsUserListCtx : public ObjectOperationCompletion {
+ list<cls_user_bucket_entry> *entries;
+ string *marker;
+ bool *truncated;
+ int *pret;
+public:
+ ClsUserListCtx(list<cls_user_bucket_entry> *_entries, string *_marker, bool *_truncated, int *_pret) :
+ entries(_entries), marker(_marker), truncated(_truncated), pret(_pret) {}
+ void handle_completion(int r, bufferlist& outbl) override {
+ if (r >= 0) {
+ cls_user_list_buckets_ret ret;
+ try {
+ auto iter = outbl.cbegin();
+ decode(ret, iter);
+ if (entries)
+ *entries = ret.entries;
+ if (truncated)
+ *truncated = ret.truncated;
+ if (marker)
+ *marker = ret.marker;
+ } catch (buffer::error& err) {
+ r = -EIO;
+ }
+ }
+ if (pret) {
+ *pret = r;
+ }
+ }
+};
+
+void cls_user_bucket_list(librados::ObjectReadOperation& op,
+ const string& in_marker,
+ const string& end_marker,
+ int max_entries,
+ list<cls_user_bucket_entry>& entries,
+ string *out_marker,
+ bool *truncated,
+ int *pret)
+{
+ bufferlist inbl;
+ cls_user_list_buckets_op call;
+ call.marker = in_marker;
+ call.end_marker = end_marker;
+ call.max_entries = max_entries;
+
+ encode(call, inbl);
+
+ op.exec("user", "list_buckets", inbl, new ClsUserListCtx(&entries, out_marker, truncated, pret));
+}
+
+class ClsUserGetHeaderCtx : public ObjectOperationCompletion {
+ cls_user_header *header;
+ RGWGetUserHeader_CB *ret_ctx;
+ int *pret;
+public:
+ ClsUserGetHeaderCtx(cls_user_header *_h, RGWGetUserHeader_CB *_ctx, int *_pret) : header(_h), ret_ctx(_ctx), pret(_pret) {}
+ ~ClsUserGetHeaderCtx() override {
+ if (ret_ctx) {
+ ret_ctx->put();
+ }
+ }
+ void handle_completion(int r, bufferlist& outbl) override {
+ if (r >= 0) {
+ cls_user_get_header_ret ret;
+ try {
+ auto iter = outbl.cbegin();
+ decode(ret, iter);
+ if (header)
+ *header = ret.header;
+ } catch (buffer::error& err) {
+ r = -EIO;
+ }
+ if (ret_ctx) {
+ ret_ctx->handle_response(r, ret.header);
+ }
+ }
+ if (pret) {
+ *pret = r;
+ }
+ }
+};
+
+void cls_user_get_header(librados::ObjectReadOperation& op,
+ cls_user_header *header, int *pret)
+{
+ bufferlist inbl;
+ cls_user_get_header_op call;
+
+ encode(call, inbl);
+
+ op.exec("user", "get_header", inbl, new ClsUserGetHeaderCtx(header, NULL, pret));
+}
+
+void cls_user_reset_stats(librados::ObjectWriteOperation &op)
+{
+ bufferlist inbl;
+ cls_user_reset_stats_op call;
+ call.time = real_clock::now();
+ encode(call, inbl);
+ op.exec("user", "reset_user_stats", inbl);
+}
+
+int cls_user_get_header_async(IoCtx& io_ctx, string& oid, RGWGetUserHeader_CB *ctx)
+{
+ bufferlist in, out;
+ cls_user_get_header_op call;
+ encode(call, in);
+ ObjectReadOperation op;
+ op.exec("user", "get_header", in, new ClsUserGetHeaderCtx(NULL, ctx, NULL)); /* no need to pass pret, as we'll call ctx->handle_response() with correct error */
+ AioCompletion *c = librados::Rados::aio_create_completion(NULL, NULL, NULL);
+ int r = io_ctx.aio_operate(oid, c, &op, NULL);
+ c->release();
+ if (r < 0)
+ return r;
+
+ return 0;
+}
diff --git a/src/cls/user/cls_user_client.h b/src/cls/user/cls_user_client.h
new file mode 100644
index 00000000..077c15dc
--- /dev/null
+++ b/src/cls/user/cls_user_client.h
@@ -0,0 +1,36 @@
+// -*- mode:C; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#ifndef CEPH_CLS_USER_CLIENT_H
+#define CEPH_CLS_USER_CLIENT_H
+
+#include "include/rados/librados_fwd.hpp"
+#include "cls_user_ops.h"
+#include "common/RefCountedObj.h"
+
+class RGWGetUserHeader_CB : public RefCountedObject {
+public:
+ ~RGWGetUserHeader_CB() override {}
+ virtual void handle_response(int r, cls_user_header& header) = 0;
+};
+
+/*
+ * user objclass
+ */
+
+void cls_user_set_buckets(librados::ObjectWriteOperation& op, list<cls_user_bucket_entry>& entries, bool add);
+void cls_user_complete_stats_sync(librados::ObjectWriteOperation& op);
+void cls_user_remove_bucket(librados::ObjectWriteOperation& op, const cls_user_bucket& bucket);
+void cls_user_bucket_list(librados::ObjectReadOperation& op,
+ const string& in_marker,
+ const string& end_marker,
+ int max_entries,
+ list<cls_user_bucket_entry>& entries,
+ string *out_marker,
+ bool *truncated,
+ int *pret);
+void cls_user_get_header(librados::ObjectReadOperation& op, cls_user_header *header, int *pret);
+int cls_user_get_header_async(librados::IoCtx& io_ctx, string& oid, RGWGetUserHeader_CB *ctx);
+void cls_user_reset_stats(librados::ObjectWriteOperation& op);
+
+#endif
diff --git a/src/cls/user/cls_user_ops.cc b/src/cls/user/cls_user_ops.cc
new file mode 100644
index 00000000..65d889bb
--- /dev/null
+++ b/src/cls/user/cls_user_ops.cc
@@ -0,0 +1,114 @@
+// -*- mode:C; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#include "cls/user/cls_user_ops.h"
+#include "common/Formatter.h"
+#include "common/ceph_json.h"
+
+void cls_user_set_buckets_op::dump(Formatter *f) const
+{
+ encode_json("entries", entries, f);
+ encode_json("add", add, f);
+ encode_json("time", utime_t(time), f);
+}
+
+void cls_user_set_buckets_op::generate_test_instances(list<cls_user_set_buckets_op*>& ls)
+{
+ ls.push_back(new cls_user_set_buckets_op);
+ cls_user_set_buckets_op *op = new cls_user_set_buckets_op;
+ for (int i = 0; i < 3; i++) {
+ cls_user_bucket_entry e;
+ cls_user_gen_test_bucket_entry(&e, i);
+ op->entries.push_back(e);
+ }
+ op->add = true;
+ op->time = utime_t(1, 0).to_real_time();
+ ls.push_back(op);
+}
+
+void cls_user_remove_bucket_op::dump(Formatter *f) const
+{
+ encode_json("bucket", bucket, f);
+}
+
+void cls_user_remove_bucket_op::generate_test_instances(list<cls_user_remove_bucket_op*>& ls)
+{
+ ls.push_back(new cls_user_remove_bucket_op);
+ cls_user_remove_bucket_op *op = new cls_user_remove_bucket_op;
+ cls_user_gen_test_bucket(&op->bucket, 0);
+ ls.push_back(op);
+}
+
+void cls_user_list_buckets_op::dump(Formatter *f) const
+{
+ encode_json("marker", marker, f);
+ encode_json("max_entries", max_entries, f);
+}
+
+void cls_user_list_buckets_op::generate_test_instances(list<cls_user_list_buckets_op*>& ls)
+{
+ ls.push_back(new cls_user_list_buckets_op);
+ cls_user_list_buckets_op *op = new cls_user_list_buckets_op;
+ op->marker = "marker";
+ op->max_entries = 1000;
+ ls.push_back(op);
+}
+
+void cls_user_list_buckets_ret::dump(Formatter *f) const
+{
+ encode_json("entries", entries, f);
+ encode_json("marker", marker, f);
+ encode_json("truncated", truncated, f);
+}
+
+void cls_user_list_buckets_ret::generate_test_instances(list<cls_user_list_buckets_ret*>& ls)
+{
+ ls.push_back(new cls_user_list_buckets_ret);
+ cls_user_list_buckets_ret *ret = new cls_user_list_buckets_ret;
+ for (int i = 0; i < 3; i++) {
+ cls_user_bucket_entry e;
+ cls_user_gen_test_bucket_entry(&e, i);
+ ret->entries.push_back(e);
+ }
+ ret->marker = "123";
+ ret->truncated = true;
+ ls.push_back(ret);
+}
+
+void cls_user_get_header_op::dump(Formatter *f) const
+{
+ // empty!
+}
+
+void cls_user_get_header_op::generate_test_instances(list<cls_user_get_header_op*>& ls)
+{
+ ls.push_back(new cls_user_get_header_op);
+}
+
+void cls_user_get_header_ret::dump(Formatter *f) const
+{
+ encode_json("header", header, f);
+}
+
+void cls_user_get_header_ret::generate_test_instances(list<cls_user_get_header_ret*>& ls)
+{
+ ls.push_back(new cls_user_get_header_ret);
+ cls_user_get_header_ret *ret = new cls_user_get_header_ret;
+ cls_user_gen_test_header(&ret->header);
+ ls.push_back(ret);
+}
+
+void cls_user_complete_stats_sync_op::dump(Formatter *f) const
+{
+ encode_json("time", utime_t(time), f);
+}
+
+void cls_user_complete_stats_sync_op::generate_test_instances(list<cls_user_complete_stats_sync_op*>& ls)
+{
+ ls.push_back(new cls_user_complete_stats_sync_op);
+ cls_user_complete_stats_sync_op *op = new cls_user_complete_stats_sync_op;
+ op->time = utime_t(12345, 0).to_real_time();
+ ls.push_back(op);
+}
+
+
diff --git a/src/cls/user/cls_user_ops.h b/src/cls/user/cls_user_ops.h
new file mode 100644
index 00000000..fa4b1f31
--- /dev/null
+++ b/src/cls/user/cls_user_ops.h
@@ -0,0 +1,204 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#ifndef CEPH_CLS_USER_OPS_H
+#define CEPH_CLS_USER_OPS_H
+
+#include "cls_user_types.h"
+
+struct cls_user_set_buckets_op {
+ list<cls_user_bucket_entry> entries;
+ bool add;
+ real_time time; /* op time */
+
+ cls_user_set_buckets_op() : add(false) {}
+
+ void encode(bufferlist& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(entries, bl);
+ encode(add, bl);
+ encode(time, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(bufferlist::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(entries, bl);
+ decode(add, bl);
+ decode(time, bl);
+ DECODE_FINISH(bl);
+ }
+
+ void dump(Formatter *f) const;
+ static void generate_test_instances(list<cls_user_set_buckets_op*>& ls);
+};
+WRITE_CLASS_ENCODER(cls_user_set_buckets_op)
+
+struct cls_user_remove_bucket_op {
+ cls_user_bucket bucket;
+
+ cls_user_remove_bucket_op() {}
+
+ void encode(bufferlist& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(bucket, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(bufferlist::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(bucket, bl);
+ DECODE_FINISH(bl);
+ }
+
+ void dump(Formatter *f) const;
+ static void generate_test_instances(list<cls_user_remove_bucket_op*>& ls);
+};
+WRITE_CLASS_ENCODER(cls_user_remove_bucket_op)
+
+struct cls_user_list_buckets_op {
+ string marker;
+ string end_marker;
+ int max_entries; /* upperbound to returned num of entries
+ might return less than that and still be truncated */
+
+ cls_user_list_buckets_op()
+ : max_entries(0) {}
+
+ void encode(bufferlist& bl) const {
+ ENCODE_START(2, 1, bl);
+ encode(marker, bl);
+ encode(max_entries, bl);
+ encode(end_marker, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(bufferlist::const_iterator& bl) {
+ DECODE_START(2, bl);
+ decode(marker, bl);
+ decode(max_entries, bl);
+ if (struct_v >= 2) {
+ decode(end_marker, bl);
+ }
+ DECODE_FINISH(bl);
+ }
+
+ void dump(Formatter *f) const;
+ static void generate_test_instances(list<cls_user_list_buckets_op*>& ls);
+};
+WRITE_CLASS_ENCODER(cls_user_list_buckets_op)
+
+struct cls_user_list_buckets_ret {
+ list<cls_user_bucket_entry> entries;
+ string marker;
+ bool truncated;
+
+ cls_user_list_buckets_ret() : truncated(false) {}
+
+ void encode(bufferlist& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(entries, bl);
+ encode(marker, bl);
+ encode(truncated, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(bufferlist::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(entries, bl);
+ decode(marker, bl);
+ decode(truncated, bl);
+ DECODE_FINISH(bl);
+ }
+
+ void dump(Formatter *f) const;
+ static void generate_test_instances(list<cls_user_list_buckets_ret*>& ls);
+};
+WRITE_CLASS_ENCODER(cls_user_list_buckets_ret)
+
+
+struct cls_user_get_header_op {
+ cls_user_get_header_op() {}
+
+ void encode(bufferlist& bl) const {
+ ENCODE_START(1, 1, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(bufferlist::const_iterator& bl) {
+ DECODE_START(1, bl);
+ DECODE_FINISH(bl);
+ }
+
+ void dump(Formatter *f) const;
+ static void generate_test_instances(list<cls_user_get_header_op*>& ls);
+};
+WRITE_CLASS_ENCODER(cls_user_get_header_op)
+
+struct cls_user_reset_stats_op {
+ real_time time;
+ cls_user_reset_stats_op() {}
+
+ void encode(bufferlist& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(time, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(bufferlist::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(time, bl);
+ DECODE_FINISH(bl);
+ }
+
+ void dump(Formatter *f) const;
+ static void generate_test_instances(list<cls_user_reset_stats_op*>& ls);
+};
+WRITE_CLASS_ENCODER(cls_user_reset_stats_op);
+
+struct cls_user_get_header_ret {
+ cls_user_header header;
+
+ cls_user_get_header_ret() {}
+
+ void encode(bufferlist& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(header, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(bufferlist::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(header, bl);
+ DECODE_FINISH(bl);
+ }
+
+ void dump(Formatter *f) const;
+ static void generate_test_instances(list<cls_user_get_header_ret*>& ls);
+};
+WRITE_CLASS_ENCODER(cls_user_get_header_ret)
+
+struct cls_user_complete_stats_sync_op {
+ real_time time;
+
+ cls_user_complete_stats_sync_op() {}
+
+ void encode(bufferlist& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(time, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(bufferlist::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(time, bl);
+ DECODE_FINISH(bl);
+ }
+
+ void dump(Formatter *f) const;
+ static void generate_test_instances(list<cls_user_complete_stats_sync_op*>& ls);
+};
+WRITE_CLASS_ENCODER(cls_user_complete_stats_sync_op)
+
+
+#endif
diff --git a/src/cls/user/cls_user_types.cc b/src/cls/user/cls_user_types.cc
new file mode 100644
index 00000000..be3280ca
--- /dev/null
+++ b/src/cls/user/cls_user_types.cc
@@ -0,0 +1,104 @@
+// -*- mode:C; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#include "cls/user/cls_user_types.h"
+#include "common/Formatter.h"
+#include "common/ceph_json.h"
+#include "include/utime.h"
+
+void cls_user_gen_test_bucket(cls_user_bucket *bucket, int i)
+{
+ char buf[16];
+ snprintf(buf, sizeof(buf), ".%d", i);
+
+ bucket->name = string("buck") + buf;
+ bucket->marker = string("mark") + buf;
+ bucket->bucket_id = string("bucket.id") + buf;
+}
+
+void cls_user_bucket::dump(Formatter *f) const
+{
+ encode_json("name", name, f);
+ encode_json("marker", marker,f);
+ encode_json("bucket_id", bucket_id,f);
+}
+
+void cls_user_bucket::generate_test_instances(list<cls_user_bucket*>& ls)
+{
+ ls.push_back(new cls_user_bucket);
+ cls_user_bucket *b = new cls_user_bucket;
+ cls_user_gen_test_bucket(b, 0);
+ ls.push_back(b);
+}
+
+void cls_user_bucket_entry::dump(Formatter *f) const
+{
+ encode_json("bucket", bucket, f);
+ encode_json("size", size, f);
+ encode_json("size_rounded", size_rounded, f);
+ encode_json("creation_time", utime_t(creation_time), f);
+ encode_json("count", count, f);
+ encode_json("user_stats_sync", user_stats_sync, f);
+}
+
+void cls_user_gen_test_bucket_entry(cls_user_bucket_entry *entry, int i)
+{
+ cls_user_gen_test_bucket(&entry->bucket, i);
+ entry->size = i + 1;
+ entry->size_rounded = i + 2;
+ entry->creation_time = real_clock::from_time_t(i + 3);
+ entry->count = i + 4;
+ entry->user_stats_sync = true;
+}
+
+void cls_user_bucket_entry::generate_test_instances(list<cls_user_bucket_entry*>& ls)
+{
+ ls.push_back(new cls_user_bucket_entry);
+ cls_user_bucket_entry *entry = new cls_user_bucket_entry;
+ cls_user_gen_test_bucket_entry(entry, 0);
+ ls.push_back(entry);
+}
+
+void cls_user_gen_test_stats(cls_user_stats *s)
+{
+ s->total_entries = 1;
+ s->total_bytes = 2;
+ s->total_bytes_rounded = 3;
+}
+
+void cls_user_stats::dump(Formatter *f) const
+{
+ f->dump_int("total_entries", total_entries);
+ f->dump_int("total_bytes", total_bytes);
+ f->dump_int("total_bytes_rounded", total_bytes_rounded);
+}
+
+void cls_user_stats::generate_test_instances(list<cls_user_stats*>& ls)
+{
+ ls.push_back(new cls_user_stats);
+ cls_user_stats *s = new cls_user_stats;
+ cls_user_gen_test_stats(s);
+ ls.push_back(s);
+}
+
+void cls_user_gen_test_header(cls_user_header *h)
+{
+ cls_user_gen_test_stats(&h->stats);
+ h->last_stats_sync = utime_t(1, 0).to_real_time();
+ h->last_stats_update = utime_t(2, 0).to_real_time();
+}
+
+void cls_user_header::dump(Formatter *f) const
+{
+ encode_json("stats", stats, f);
+ encode_json("last_stats_sync", utime_t(last_stats_sync), f);
+ encode_json("last_stats_update", utime_t(last_stats_update), f);
+}
+
+void cls_user_header::generate_test_instances(list<cls_user_header*>& ls)
+{
+ ls.push_back(new cls_user_header);
+ cls_user_header *h = new cls_user_header;
+ cls_user_gen_test_header(h);
+ ls.push_back(h);
+}
diff --git a/src/cls/user/cls_user_types.h b/src/cls/user/cls_user_types.h
new file mode 100644
index 00000000..8efd8bc5
--- /dev/null
+++ b/src/cls/user/cls_user_types.h
@@ -0,0 +1,226 @@
+// -*- mode:C; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#ifndef CEPH_CLS_USER_TYPES_H
+#define CEPH_CLS_USER_TYPES_H
+
+#include "include/encoding.h"
+#include "include/types.h"
+#include "include/utime.h"
+#include "common/ceph_time.h"
+
+/*
+ * this needs to be compatible with rgw_bucket, as it replaces it
+ */
+struct cls_user_bucket {
+ std::string name;
+ std::string marker;
+ std::string bucket_id;
+ std::string placement_id;
+ struct {
+ std::string data_pool;
+ std::string index_pool;
+ std::string data_extra_pool;
+ } explicit_placement;
+
+ void encode(bufferlist& bl) const {
+ /* since new version of this structure is not backward compatible,
+ * we have older rgw running against newer osd if we encode it
+ * in the new way. Only encode newer version if placement_id is
+ * not empty, otherwise keep handling it as before
+ */
+ if (!placement_id.empty()) {
+ ENCODE_START(9, 8, bl);
+ encode(name, bl);
+ encode(marker, bl);
+ encode(bucket_id, bl);
+ encode(placement_id, bl);
+ ENCODE_FINISH(bl);
+ } else {
+ ENCODE_START(7, 3, bl);
+ encode(name, bl);
+ encode(explicit_placement.data_pool, bl);
+ encode(marker, bl);
+ encode(bucket_id, bl);
+ encode(explicit_placement.index_pool, bl);
+ encode(explicit_placement.data_extra_pool, bl);
+ ENCODE_FINISH(bl);
+ }
+ }
+ void decode(bufferlist::const_iterator& bl) {
+ DECODE_START_LEGACY_COMPAT_LEN(8, 3, 3, bl);
+ decode(name, bl);
+ if (struct_v < 8) {
+ decode(explicit_placement.data_pool, bl);
+ }
+ if (struct_v >= 2) {
+ decode(marker, bl);
+ if (struct_v <= 3) {
+ uint64_t id;
+ decode(id, bl);
+ char buf[16];
+ snprintf(buf, sizeof(buf), "%llu", (long long)id);
+ bucket_id = buf;
+ } else {
+ decode(bucket_id, bl);
+ }
+ }
+ if (struct_v < 8) {
+ if (struct_v >= 5) {
+ decode(explicit_placement.index_pool, bl);
+ } else {
+ explicit_placement.index_pool = explicit_placement.data_pool;
+ }
+ if (struct_v >= 7) {
+ decode(explicit_placement.data_extra_pool, bl);
+ }
+ } else {
+ decode(placement_id, bl);
+ if (struct_v == 8 && placement_id.empty()) {
+ decode(explicit_placement.data_pool, bl);
+ decode(explicit_placement.index_pool, bl);
+ decode(explicit_placement.data_extra_pool, bl);
+ }
+ }
+ DECODE_FINISH(bl);
+ }
+
+ bool operator<(const cls_user_bucket& b) const {
+ return name.compare(b.name) < 0;
+ }
+
+ void dump(Formatter *f) const;
+ static void generate_test_instances(list<cls_user_bucket*>& ls);
+};
+WRITE_CLASS_ENCODER(cls_user_bucket)
+
+/*
+ * this structure overrides RGWBucketEnt
+ */
+struct cls_user_bucket_entry {
+ cls_user_bucket bucket;
+ size_t size;
+ size_t size_rounded;
+ ceph::real_time creation_time;
+ uint64_t count;
+ bool user_stats_sync;
+
+ cls_user_bucket_entry() : size(0), size_rounded(0), count(0), user_stats_sync(false) {}
+
+ void encode(bufferlist& bl) const {
+ ENCODE_START(9, 5, bl);
+ uint64_t s = size;
+ __u32 mt = ceph::real_clock::to_time_t(creation_time);
+ string empty_str; // originally had the bucket name here, but we encode bucket later
+ encode(empty_str, bl);
+ encode(s, bl);
+ encode(mt, bl);
+ encode(count, bl);
+ encode(bucket, bl);
+ s = size_rounded;
+ encode(s, bl);
+ encode(user_stats_sync, bl);
+ encode(creation_time, bl);
+ //::encode(placement_rule, bl); removed in v9
+ ENCODE_FINISH(bl);
+ }
+ void decode(bufferlist::const_iterator& bl) {
+ DECODE_START_LEGACY_COMPAT_LEN(9, 5, 5, bl);
+ __u32 mt;
+ uint64_t s;
+ string empty_str; // backward compatibility
+ decode(empty_str, bl);
+ decode(s, bl);
+ decode(mt, bl);
+ size = s;
+ if (struct_v < 7) {
+ creation_time = ceph::real_clock::from_time_t(mt);
+ }
+ if (struct_v >= 2)
+ decode(count, bl);
+ if (struct_v >= 3)
+ decode(bucket, bl);
+ if (struct_v >= 4)
+ decode(s, bl);
+ size_rounded = s;
+ if (struct_v >= 6)
+ decode(user_stats_sync, bl);
+ if (struct_v >= 7)
+ decode(creation_time, bl);
+ if (struct_v == 8) { // added in v8, removed in v9
+ std::string placement_rule;
+ decode(placement_rule, bl);
+ }
+ DECODE_FINISH(bl);
+ }
+ void dump(Formatter *f) const;
+ static void generate_test_instances(list<cls_user_bucket_entry*>& ls);
+};
+WRITE_CLASS_ENCODER(cls_user_bucket_entry)
+
+struct cls_user_stats {
+ uint64_t total_entries;
+ uint64_t total_bytes;
+ uint64_t total_bytes_rounded;
+
+ cls_user_stats()
+ : total_entries(0),
+ total_bytes(0),
+ total_bytes_rounded(0) {}
+
+ void encode(bufferlist& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(total_entries, bl);
+ encode(total_bytes, bl);
+ encode(total_bytes_rounded, bl);
+ ENCODE_FINISH(bl);
+ }
+ void decode(bufferlist::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(total_entries, bl);
+ decode(total_bytes, bl);
+ decode(total_bytes_rounded, bl);
+ DECODE_FINISH(bl);
+ }
+
+ void dump(Formatter *f) const;
+ static void generate_test_instances(list<cls_user_stats*>& ls);
+};
+WRITE_CLASS_ENCODER(cls_user_stats)
+
+/*
+ * this needs to be compatible with rgw_bucket, as it replaces it
+ */
+struct cls_user_header {
+ cls_user_stats stats;
+ ceph::real_time last_stats_sync; /* last time a full stats sync completed */
+ ceph::real_time last_stats_update; /* last time a stats update was done */
+
+ void encode(bufferlist& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(stats, bl);
+ encode(last_stats_sync, bl);
+ encode(last_stats_update, bl);
+ ENCODE_FINISH(bl);
+ }
+ void decode(bufferlist::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(stats, bl);
+ decode(last_stats_sync, bl);
+ decode(last_stats_update, bl);
+ DECODE_FINISH(bl);
+ }
+
+ void dump(Formatter *f) const;
+ static void generate_test_instances(list<cls_user_header*>& ls);
+};
+WRITE_CLASS_ENCODER(cls_user_header)
+
+void cls_user_gen_test_bucket(cls_user_bucket *bucket, int i);
+void cls_user_gen_test_bucket_entry(cls_user_bucket_entry *entry, int i);
+void cls_user_gen_test_stats(cls_user_stats *stats);
+void cls_user_gen_test_header(cls_user_header *h);
+
+#endif
+
+
diff --git a/src/cls/version/cls_version.cc b/src/cls/version/cls_version.cc
new file mode 100644
index 00000000..61f9fec0
--- /dev/null
+++ b/src/cls/version/cls_version.cc
@@ -0,0 +1,234 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#include <errno.h>
+
+#include "objclass/objclass.h"
+
+#include "cls/version/cls_version_ops.h"
+
+#include "include/compat.h"
+
+CLS_VER(1,0)
+CLS_NAME(version)
+
+
+#define VERSION_ATTR "ceph.objclass.version"
+
+static int set_version(cls_method_context_t hctx, struct obj_version *objv)
+{
+ bufferlist bl;
+
+ encode(*objv, bl);
+
+ CLS_LOG(20, "cls_version: set_version %s:%d", objv->tag.c_str(), (int)objv->ver);
+
+ int ret = cls_cxx_setxattr(hctx, VERSION_ATTR, &bl);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int init_version(cls_method_context_t hctx, struct obj_version *objv)
+{
+#define TAG_LEN 24
+ char buf[TAG_LEN + 1];
+
+ int ret = cls_gen_rand_base64(buf, sizeof(buf));
+ if (ret < 0)
+ return ret;
+
+ objv->ver = 1;
+ objv->tag = buf;
+
+ CLS_LOG(20, "cls_version: init_version %s:%d", objv->tag.c_str(), (int)objv->ver);
+
+ return set_version(hctx, objv);
+}
+
+/* implicit create should be true only if called from a write operation (set, inc), never from a read operation (read, check) */
+static int read_version(cls_method_context_t hctx, obj_version *objv, bool implicit_create)
+{
+ bufferlist bl;
+ int ret = cls_cxx_getxattr(hctx, VERSION_ATTR, &bl);
+ if (ret == -ENOENT || ret == -ENODATA) {
+ objv->ver = 0;
+
+ if (implicit_create) {
+ return init_version(hctx, objv);
+ }
+ return 0;
+ }
+ if (ret < 0)
+ return ret;
+
+ try {
+ auto iter = bl.cbegin();
+ decode(*objv, iter);
+ } catch (buffer::error& err) {
+ CLS_LOG(0, "ERROR: read_version(): failed to decode version entry\n");
+ return -EIO;
+ }
+ CLS_LOG(20, "cls_version: read_version %s:%d", objv->tag.c_str(), (int)objv->ver);
+
+ return 0;
+}
+
+static int cls_version_set(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ auto in_iter = in->cbegin();
+
+ cls_version_set_op op;
+ try {
+ decode(op, in_iter);
+ } catch (buffer::error& err) {
+ CLS_LOG(1, "ERROR: cls_version_get(): failed to decode entry\n");
+ return -EINVAL;
+ }
+
+ int ret = set_version(hctx, &op.objv);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static bool check_conds(list<obj_version_cond>& conds, obj_version& objv)
+{
+ if (conds.empty())
+ return true;
+
+ for (list<obj_version_cond>::iterator iter = conds.begin(); iter != conds.end(); ++iter) {
+ obj_version_cond& cond = *iter;
+ obj_version& v = cond.ver;
+ CLS_LOG(20, "cls_version: check_version %s:%d (cond=%d)", v.tag.c_str(), (int)v.ver, (int)cond.cond);
+
+ switch (cond.cond) {
+ case VER_COND_NONE:
+ break;
+ case VER_COND_EQ:
+ if (!objv.compare(&v))
+ return false;
+ break;
+ case VER_COND_GT:
+ if (!(objv.ver > v.ver))
+ return false;
+ break;
+ case VER_COND_GE:
+ if (!(objv.ver >= v.ver))
+ return false;
+ break;
+ case VER_COND_LT:
+ if (!(objv.ver < v.ver))
+ return false;
+ break;
+ case VER_COND_LE:
+ if (!(objv.ver <= v.ver))
+ return false;
+ break;
+ case VER_COND_TAG_EQ:
+ if (objv.tag.compare(v.tag) != 0)
+ return false;
+ break;
+ case VER_COND_TAG_NE:
+ if (objv.tag.compare(v.tag) == 0)
+ return false;
+ break;
+ }
+ }
+
+ return true;
+}
+
+static int cls_version_inc(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ auto in_iter = in->cbegin();
+
+ cls_version_inc_op op;
+ try {
+ decode(op, in_iter);
+ } catch (buffer::error& err) {
+ CLS_LOG(1, "ERROR: cls_version_get(): failed to decode entry\n");
+ return -EINVAL;
+ }
+
+ obj_version objv;
+ int ret = read_version(hctx, &objv, true);
+ if (ret < 0)
+ return ret;
+
+ if (!check_conds(op.conds, objv)) {
+ return -ECANCELED;
+ }
+ objv.inc();
+
+ ret = set_version(hctx, &objv);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int cls_version_check(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ auto in_iter = in->cbegin();
+
+ cls_version_check_op op;
+ try {
+ decode(op, in_iter);
+ } catch (buffer::error& err) {
+ CLS_LOG(1, "ERROR: cls_version_get(): failed to decode entry\n");
+ return -EINVAL;
+ }
+
+ obj_version objv;
+ int ret = read_version(hctx, &objv, false);
+ if (ret < 0)
+ return ret;
+
+ if (!check_conds(op.conds, objv)) {
+ CLS_LOG(20, "cls_version: failed condition check");
+ return -ECANCELED;
+ }
+
+ return 0;
+}
+
+static int cls_version_read(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ obj_version objv;
+
+ cls_version_read_ret read_ret;
+ int ret = read_version(hctx, &read_ret.objv, false);
+ if (ret < 0)
+ return ret;
+
+ encode(read_ret, *out);
+
+ return 0;
+}
+
+CLS_INIT(version)
+{
+ CLS_LOG(1, "Loaded version class!");
+
+ cls_handle_t h_class;
+ cls_method_handle_t h_version_set;
+ cls_method_handle_t h_version_inc;
+ cls_method_handle_t h_version_inc_conds;
+ cls_method_handle_t h_version_read;
+ cls_method_handle_t h_version_check_conds;
+
+ cls_register("version", &h_class);
+
+ /* version */
+ cls_register_cxx_method(h_class, "set", CLS_METHOD_RD | CLS_METHOD_WR, cls_version_set, &h_version_set);
+ cls_register_cxx_method(h_class, "inc", CLS_METHOD_RD | CLS_METHOD_WR, cls_version_inc, &h_version_inc);
+ cls_register_cxx_method(h_class, "inc_conds", CLS_METHOD_RD | CLS_METHOD_WR, cls_version_inc, &h_version_inc_conds);
+ cls_register_cxx_method(h_class, "read", CLS_METHOD_RD, cls_version_read, &h_version_read);
+ cls_register_cxx_method(h_class, "check_conds", CLS_METHOD_RD, cls_version_check, &h_version_check_conds);
+
+ return;
+}
+
diff --git a/src/cls/version/cls_version_client.cc b/src/cls/version/cls_version_client.cc
new file mode 100644
index 00000000..3c1792aa
--- /dev/null
+++ b/src/cls/version/cls_version_client.cc
@@ -0,0 +1,101 @@
+#include <errno.h>
+
+#include "cls/version/cls_version_client.h"
+#include "include/rados/librados.hpp"
+
+
+using namespace librados;
+
+
+void cls_version_set(librados::ObjectWriteOperation& op, obj_version& objv)
+{
+ bufferlist in;
+ cls_version_set_op call;
+ call.objv = objv;
+ encode(call, in);
+ op.exec("version", "set", in);
+}
+
+void cls_version_inc(librados::ObjectWriteOperation& op)
+{
+ bufferlist in;
+ cls_version_inc_op call;
+ encode(call, in);
+ op.exec("version", "inc", in);
+}
+
+void cls_version_inc(librados::ObjectWriteOperation& op, obj_version& objv, VersionCond cond)
+{
+ bufferlist in;
+ cls_version_inc_op call;
+ call.objv = objv;
+
+ obj_version_cond c;
+ c.cond = cond;
+ c.ver = objv;
+
+ call.conds.push_back(c);
+
+ encode(call, in);
+ op.exec("version", "inc_conds", in);
+}
+
+void cls_version_check(librados::ObjectOperation& op, obj_version& objv, VersionCond cond)
+{
+ bufferlist in;
+ cls_version_check_op call;
+ call.objv = objv;
+
+ obj_version_cond c;
+ c.cond = cond;
+ c.ver = objv;
+
+ call.conds.push_back(c);
+
+ encode(call, in);
+ op.exec("version", "check_conds", in);
+}
+
+class VersionReadCtx : public ObjectOperationCompletion {
+ obj_version *objv;
+public:
+ explicit VersionReadCtx(obj_version *_objv) : objv(_objv) {}
+ void handle_completion(int r, bufferlist& outbl) override {
+ if (r >= 0) {
+ cls_version_read_ret ret;
+ try {
+ auto iter = outbl.cbegin();
+ decode(ret, iter);
+ *objv = ret.objv;
+ } catch (buffer::error& err) {
+ // nothing we can do about it atm
+ }
+ }
+ }
+};
+
+void cls_version_read(librados::ObjectReadOperation& op, obj_version *objv)
+{
+ bufferlist inbl;
+ op.exec("version", "read", inbl, new VersionReadCtx(objv));
+}
+
+int cls_version_read(librados::IoCtx& io_ctx, string& oid, obj_version *ver)
+{
+ bufferlist in, out;
+ int r = io_ctx.exec(oid, "version", "read", in, out);
+ if (r < 0)
+ return r;
+
+ cls_version_read_ret ret;
+ try {
+ auto iter = out.cbegin();
+ decode(ret, iter);
+ } catch (buffer::error& err) {
+ return -EIO;
+ }
+
+ *ver = ret.objv;
+
+ return r;
+}
diff --git a/src/cls/version/cls_version_client.h b/src/cls/version/cls_version_client.h
new file mode 100644
index 00000000..01e7d52c
--- /dev/null
+++ b/src/cls/version/cls_version_client.h
@@ -0,0 +1,25 @@
+#ifndef CEPH_CLS_VERSION_CLIENT_H
+#define CEPH_CLS_VERSION_CLIENT_H
+
+#include "include/rados/librados_fwd.hpp"
+#include "cls_version_ops.h"
+
+/*
+ * version objclass
+ */
+
+void cls_version_set(librados::ObjectWriteOperation& op, obj_version& ver);
+
+/* increase anyway */
+void cls_version_inc(librados::ObjectWriteOperation& op);
+
+/* conditional increase, return -EAGAIN if condition fails */
+void cls_version_inc(librados::ObjectWriteOperation& op, obj_version& ver, VersionCond cond);
+
+void cls_version_read(librados::ObjectReadOperation& op, obj_version *objv);
+
+int cls_version_read(librados::IoCtx& io_ctx, string& oid, obj_version *ver);
+
+void cls_version_check(librados::ObjectOperation& op, obj_version& ver, VersionCond cond);
+
+#endif
diff --git a/src/cls/version/cls_version_ops.h b/src/cls/version/cls_version_ops.h
new file mode 100644
index 00000000..36592fa6
--- /dev/null
+++ b/src/cls/version/cls_version_ops.h
@@ -0,0 +1,92 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#ifndef CEPH_CLS_VERSION_OPS_H
+#define CEPH_CLS_VERSION_OPS_H
+
+#include "cls_version_types.h"
+
+struct cls_version_set_op {
+ obj_version objv;
+
+ cls_version_set_op() {}
+
+ void encode(bufferlist& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(objv, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(bufferlist::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(objv, bl);
+ DECODE_FINISH(bl);
+ }
+};
+WRITE_CLASS_ENCODER(cls_version_set_op)
+
+struct cls_version_inc_op {
+ obj_version objv;
+ list<obj_version_cond> conds;
+
+ cls_version_inc_op() {}
+
+ void encode(bufferlist& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(objv, bl);
+ encode(conds, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(bufferlist::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(objv, bl);
+ decode(conds, bl);
+ DECODE_FINISH(bl);
+ }
+};
+WRITE_CLASS_ENCODER(cls_version_inc_op)
+
+struct cls_version_check_op {
+ obj_version objv;
+ list<obj_version_cond> conds;
+
+ cls_version_check_op() {}
+
+ void encode(bufferlist& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(objv, bl);
+ encode(conds, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(bufferlist::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(objv, bl);
+ decode(conds, bl);
+ DECODE_FINISH(bl);
+ }
+};
+WRITE_CLASS_ENCODER(cls_version_check_op)
+
+struct cls_version_read_ret {
+ obj_version objv;
+
+ cls_version_read_ret() {}
+
+ void encode(bufferlist& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(objv, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(bufferlist::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(objv, bl);
+ DECODE_FINISH(bl);
+ }
+};
+WRITE_CLASS_ENCODER(cls_version_read_ret)
+
+
+#endif
diff --git a/src/cls/version/cls_version_types.cc b/src/cls/version/cls_version_types.cc
new file mode 100644
index 00000000..4ec67867
--- /dev/null
+++ b/src/cls/version/cls_version_types.cc
@@ -0,0 +1,18 @@
+
+#include "cls/version/cls_version_types.h"
+#include "common/Formatter.h"
+#include "common/ceph_json.h"
+
+
+void obj_version::dump(Formatter *f) const
+{
+ f->dump_int("ver", ver);
+ f->dump_string("tag", tag);
+}
+
+void obj_version::decode_json(JSONObj *obj)
+{
+ JSONDecoder::decode_json("ver", ver, obj);
+ JSONDecoder::decode_json("tag", tag, obj);
+}
+
diff --git a/src/cls/version/cls_version_types.h b/src/cls/version/cls_version_types.h
new file mode 100644
index 00000000..15433b46
--- /dev/null
+++ b/src/cls/version/cls_version_types.h
@@ -0,0 +1,89 @@
+#ifndef CEPH_CLS_VERSION_TYPES_H
+#define CEPH_CLS_VERSION_TYPES_H
+
+#include "include/encoding.h"
+#include "include/types.h"
+
+class JSONObj;
+
+
+struct obj_version {
+ uint64_t ver;
+ string tag;
+
+ obj_version() : ver(0) {}
+
+ void encode(bufferlist& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(ver, bl);
+ encode(tag, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(bufferlist::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(ver, bl);
+ decode(tag, bl);
+ DECODE_FINISH(bl);
+ }
+
+ void inc() {
+ ver++;
+ }
+
+ void clear() {
+ ver = 0;
+ tag.clear();
+ }
+
+ bool empty() {
+ return tag.empty();
+ }
+
+ bool compare(struct obj_version *v) {
+ return (ver == v->ver &&
+ tag.compare(v->tag) == 0);
+ }
+
+ void dump(Formatter *f) const;
+ void decode_json(JSONObj *obj);
+};
+WRITE_CLASS_ENCODER(obj_version)
+
+enum VersionCond {
+ VER_COND_NONE = 0,
+ VER_COND_EQ, /* equal */
+ VER_COND_GT, /* greater than */
+ VER_COND_GE, /* greater or equal */
+ VER_COND_LT, /* less than */
+ VER_COND_LE, /* less or equal */
+ VER_COND_TAG_EQ,
+ VER_COND_TAG_NE,
+};
+
+struct obj_version_cond {
+ struct obj_version ver;
+ VersionCond cond;
+
+ void encode(bufferlist& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(ver, bl);
+ uint32_t c = (uint32_t)cond;
+ encode(c, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(bufferlist::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(ver, bl);
+ uint32_t c;
+ decode(c, bl);
+ cond = (VersionCond)c;
+ DECODE_FINISH(bl);
+ }
+
+};
+WRITE_CLASS_ENCODER(obj_version_cond)
+
+
+#endif