summaryrefslogtreecommitdiffstats
path: root/src/librbd/deep_copy
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--src/librbd/deep_copy/Handler.h50
-rw-r--r--src/librbd/deep_copy/ImageCopyRequest.cc278
-rw-r--r--src/librbd/deep_copy/ImageCopyRequest.h123
-rw-r--r--src/librbd/deep_copy/MetadataCopyRequest.cc117
-rw-r--r--src/librbd/deep_copy/MetadataCopyRequest.h78
-rw-r--r--src/librbd/deep_copy/ObjectCopyRequest.cc839
-rw-r--r--src/librbd/deep_copy/ObjectCopyRequest.h163
-rw-r--r--src/librbd/deep_copy/SetHeadRequest.cc223
-rw-r--r--src/librbd/deep_copy/SetHeadRequest.h87
-rw-r--r--src/librbd/deep_copy/SnapshotCopyRequest.cc729
-rw-r--r--src/librbd/deep_copy/SnapshotCopyRequest.h151
-rw-r--r--src/librbd/deep_copy/SnapshotCreateRequest.cc187
-rw-r--r--src/librbd/deep_copy/SnapshotCreateRequest.h98
-rw-r--r--src/librbd/deep_copy/Types.h28
-rw-r--r--src/librbd/deep_copy/Utils.cc61
-rw-r--r--src/librbd/deep_copy/Utils.h29
16 files changed, 3241 insertions, 0 deletions
diff --git a/src/librbd/deep_copy/Handler.h b/src/librbd/deep_copy/Handler.h
new file mode 100644
index 000000000..fea553ee2
--- /dev/null
+++ b/src/librbd/deep_copy/Handler.h
@@ -0,0 +1,50 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#ifndef CEPH_LIBRBD_DEEP_COPY_HANDLER_H
+#define CEPH_LIBRBD_DEEP_COPY_HANDLER_H
+
+#include "include/int_types.h"
+#include "include/rbd/librbd.hpp"
+
+namespace librbd {
+namespace deep_copy {
+
+struct Handler {
+ virtual ~Handler() {}
+
+ virtual void handle_read(uint64_t bytes_read) = 0;
+
+ virtual int update_progress(uint64_t object_number,
+ uint64_t object_count) = 0;
+};
+
+struct NoOpHandler : public Handler {
+ void handle_read(uint64_t bytes_read) override {
+ }
+
+ int update_progress(uint64_t object_number,
+ uint64_t object_count) override {
+ return 0;
+ }
+};
+
+class ProgressHandler : public NoOpHandler {
+public:
+ ProgressHandler(ProgressContext* progress_ctx)
+ : m_progress_ctx(progress_ctx) {
+ }
+
+ int update_progress(uint64_t object_number,
+ uint64_t object_count) override {
+ return m_progress_ctx->update_progress(object_number, object_count);
+ }
+
+private:
+ librbd::ProgressContext* m_progress_ctx;
+};
+
+} // namespace deep_copy
+} // namespace librbd
+
+#endif // CEPH_LIBRBD_DEEP_COPY_HANDLER_H
diff --git a/src/librbd/deep_copy/ImageCopyRequest.cc b/src/librbd/deep_copy/ImageCopyRequest.cc
new file mode 100644
index 000000000..08e959dd5
--- /dev/null
+++ b/src/librbd/deep_copy/ImageCopyRequest.cc
@@ -0,0 +1,278 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#include "ImageCopyRequest.h"
+#include "ObjectCopyRequest.h"
+#include "common/errno.h"
+#include "librbd/Utils.h"
+#include "librbd/asio/ContextWQ.h"
+#include "librbd/deep_copy/Handler.h"
+#include "librbd/deep_copy/Utils.h"
+#include "librbd/object_map/DiffRequest.h"
+#include "osdc/Striper.h"
+
+#define dout_subsys ceph_subsys_rbd
+#undef dout_prefix
+#define dout_prefix *_dout << "librbd::deep_copy::ImageCopyRequest: " \
+ << this << " " << __func__ << ": "
+
+namespace librbd {
+namespace deep_copy {
+
+using librbd::util::create_async_context_callback;
+using librbd::util::create_context_callback;
+using librbd::util::unique_lock_name;
+
+template <typename I>
+ImageCopyRequest<I>::ImageCopyRequest(I *src_image_ctx, I *dst_image_ctx,
+ librados::snap_t src_snap_id_start,
+ librados::snap_t src_snap_id_end,
+ librados::snap_t dst_snap_id_start,
+ bool flatten,
+ const ObjectNumber &object_number,
+ const SnapSeqs &snap_seqs,
+ Handler *handler,
+ Context *on_finish)
+ : RefCountedObject(dst_image_ctx->cct), m_src_image_ctx(src_image_ctx),
+ m_dst_image_ctx(dst_image_ctx), m_src_snap_id_start(src_snap_id_start),
+ m_src_snap_id_end(src_snap_id_end), m_dst_snap_id_start(dst_snap_id_start),
+ m_flatten(flatten), m_object_number(object_number), m_snap_seqs(snap_seqs),
+ m_handler(handler), m_on_finish(on_finish), m_cct(dst_image_ctx->cct),
+ m_lock(ceph::make_mutex(unique_lock_name("ImageCopyRequest::m_lock", this))) {
+}
+
+template <typename I>
+void ImageCopyRequest<I>::send() {
+ m_dst_image_ctx->image_lock.lock_shared();
+ util::compute_snap_map(m_dst_image_ctx->cct, m_src_snap_id_start,
+ m_src_snap_id_end, m_dst_image_ctx->snaps, m_snap_seqs,
+ &m_snap_map);
+ m_dst_image_ctx->image_lock.unlock_shared();
+
+ if (m_snap_map.empty()) {
+ lderr(m_cct) << "failed to map snapshots within boundary" << dendl;
+ finish(-EINVAL);
+ return;
+ }
+
+ compute_diff();
+}
+
+template <typename I>
+void ImageCopyRequest<I>::cancel() {
+ std::lock_guard locker{m_lock};
+
+ ldout(m_cct, 20) << dendl;
+ m_canceled = true;
+}
+
+template <typename I>
+void ImageCopyRequest<I>::map_src_objects(uint64_t dst_object,
+ std::set<uint64_t> *src_objects) {
+ std::vector<std::pair<uint64_t, uint64_t>> image_extents;
+ Striper::extent_to_file(m_cct, &m_dst_image_ctx->layout, dst_object, 0,
+ m_dst_image_ctx->layout.object_size, image_extents);
+
+ for (auto &e : image_extents) {
+ std::map<object_t, std::vector<ObjectExtent>> src_object_extents;
+ Striper::file_to_extents(m_cct, m_src_image_ctx->format_string,
+ &m_src_image_ctx->layout, e.first, e.second, 0,
+ src_object_extents);
+ for (auto &p : src_object_extents) {
+ for (auto &s : p.second) {
+ src_objects->insert(s.objectno);
+ }
+ }
+ }
+
+ ceph_assert(!src_objects->empty());
+
+ ldout(m_cct, 20) << dst_object << " -> " << *src_objects << dendl;
+}
+
+template <typename I>
+void ImageCopyRequest<I>::compute_diff() {
+ if (m_flatten) {
+ send_object_copies();
+ return;
+ }
+
+ ldout(m_cct, 10) << dendl;
+
+ auto ctx = create_context_callback<
+ ImageCopyRequest<I>, &ImageCopyRequest<I>::handle_compute_diff>(this);
+ auto req = object_map::DiffRequest<I>::create(m_src_image_ctx, m_src_snap_id_start,
+ m_src_snap_id_end, &m_object_diff_state,
+ ctx);
+ req->send();
+}
+
+template <typename I>
+void ImageCopyRequest<I>::handle_compute_diff(int r) {
+ ldout(m_cct, 10) << "r=" << r << dendl;
+
+ if (r < 0) {
+ ldout(m_cct, 10) << "fast-diff optimization disabled" << dendl;
+ m_object_diff_state.resize(0);
+ }
+
+ send_object_copies();
+}
+
+template <typename I>
+void ImageCopyRequest<I>::send_object_copies() {
+ m_object_no = 0;
+ if (m_object_number) {
+ m_object_no = *m_object_number + 1;
+ }
+
+ uint64_t size;
+ {
+ std::shared_lock image_locker{m_src_image_ctx->image_lock};
+ size = m_src_image_ctx->get_image_size(CEPH_NOSNAP);
+ for (auto snap_id : m_src_image_ctx->snaps) {
+ size = std::max(size, m_src_image_ctx->get_image_size(snap_id));
+ }
+ }
+ m_end_object_no = Striper::get_num_objects(m_dst_image_ctx->layout, size);
+
+ ldout(m_cct, 20) << "start_object=" << m_object_no << ", "
+ << "end_object=" << m_end_object_no << dendl;
+
+ bool complete;
+ {
+ std::lock_guard locker{m_lock};
+ auto max_ops = m_src_image_ctx->config.template get_val<uint64_t>(
+ "rbd_concurrent_management_ops");
+
+ // attempt to schedule at least 'max_ops' initial requests where
+ // some objects might be skipped if fast-diff notes no change
+ for (uint64_t i = 0; i < max_ops; i++) {
+ send_next_object_copy();
+ }
+
+ complete = (m_current_ops == 0) && !m_updating_progress;
+ }
+
+ if (complete) {
+ finish(m_ret_val);
+ }
+}
+
+template <typename I>
+void ImageCopyRequest<I>::send_next_object_copy() {
+ ceph_assert(ceph_mutex_is_locked(m_lock));
+
+ if (m_canceled && m_ret_val == 0) {
+ ldout(m_cct, 10) << "image copy canceled" << dendl;
+ m_ret_val = -ECANCELED;
+ }
+
+ if (m_ret_val < 0 || m_object_no >= m_end_object_no) {
+ return;
+ }
+
+ uint64_t ono = m_object_no++;
+ Context *ctx = new LambdaContext(
+ [this, ono](int r) {
+ handle_object_copy(ono, r);
+ });
+
+ ldout(m_cct, 20) << "object_num=" << ono << dendl;
+ ++m_current_ops;
+
+ uint8_t object_diff_state = object_map::DIFF_STATE_HOLE;
+ if (m_object_diff_state.size() > 0) {
+ std::set<uint64_t> src_objects;
+ map_src_objects(ono, &src_objects);
+
+ for (auto src_ono : src_objects) {
+ if (src_ono >= m_object_diff_state.size()) {
+ object_diff_state = object_map::DIFF_STATE_DATA_UPDATED;
+ } else {
+ auto state = m_object_diff_state[src_ono];
+ if ((state == object_map::DIFF_STATE_HOLE_UPDATED &&
+ object_diff_state != object_map::DIFF_STATE_DATA_UPDATED) ||
+ (state == object_map::DIFF_STATE_DATA &&
+ object_diff_state == object_map::DIFF_STATE_HOLE) ||
+ (state == object_map::DIFF_STATE_DATA_UPDATED)) {
+ object_diff_state = state;
+ }
+ }
+ }
+
+ if (object_diff_state == object_map::DIFF_STATE_HOLE) {
+ ldout(m_cct, 20) << "skipping non-existent object " << ono << dendl;
+ create_async_context_callback(*m_src_image_ctx, ctx)->complete(0);
+ return;
+ }
+ }
+
+ uint32_t flags = 0;
+ if (m_flatten) {
+ flags |= OBJECT_COPY_REQUEST_FLAG_FLATTEN;
+ }
+ if (object_diff_state == object_map::DIFF_STATE_DATA) {
+ // no source objects have been updated and at least one has clean data
+ flags |= OBJECT_COPY_REQUEST_FLAG_EXISTS_CLEAN;
+ }
+
+ auto req = ObjectCopyRequest<I>::create(
+ m_src_image_ctx, m_dst_image_ctx, m_src_snap_id_start, m_dst_snap_id_start,
+ m_snap_map, ono, flags, m_handler, ctx);
+ req->send();
+}
+
+template <typename I>
+void ImageCopyRequest<I>::handle_object_copy(uint64_t object_no, int r) {
+ ldout(m_cct, 20) << "object_no=" << object_no << ", r=" << r << dendl;
+
+ bool complete;
+ {
+ std::lock_guard locker{m_lock};
+ ceph_assert(m_current_ops > 0);
+ --m_current_ops;
+
+ if (r < 0 && r != -ENOENT) {
+ lderr(m_cct) << "object copy failed: " << cpp_strerror(r) << dendl;
+ if (m_ret_val == 0) {
+ m_ret_val = r;
+ }
+ } else {
+ m_copied_objects.push(object_no);
+ while (!m_updating_progress && !m_copied_objects.empty() &&
+ m_copied_objects.top() ==
+ (m_object_number ? *m_object_number + 1 : 0)) {
+ m_object_number = m_copied_objects.top();
+ m_copied_objects.pop();
+ uint64_t progress_object_no = *m_object_number + 1;
+ m_updating_progress = true;
+ m_lock.unlock();
+ m_handler->update_progress(progress_object_no, m_end_object_no);
+ m_lock.lock();
+ ceph_assert(m_updating_progress);
+ m_updating_progress = false;
+ }
+ }
+
+ send_next_object_copy();
+ complete = (m_current_ops == 0) && !m_updating_progress;
+ }
+
+ if (complete) {
+ finish(m_ret_val);
+ }
+}
+
+template <typename I>
+void ImageCopyRequest<I>::finish(int r) {
+ ldout(m_cct, 20) << "r=" << r << dendl;
+
+ m_on_finish->complete(r);
+ put();
+}
+
+} // namespace deep_copy
+} // namespace librbd
+
+template class librbd::deep_copy::ImageCopyRequest<librbd::ImageCtx>;
diff --git a/src/librbd/deep_copy/ImageCopyRequest.h b/src/librbd/deep_copy/ImageCopyRequest.h
new file mode 100644
index 000000000..cb8b83781
--- /dev/null
+++ b/src/librbd/deep_copy/ImageCopyRequest.h
@@ -0,0 +1,123 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#ifndef CEPH_LIBRBD_DEEP_COPY_IMAGE_DEEP_COPY_REQUEST_H
+#define CEPH_LIBRBD_DEEP_COPY_IMAGE_DEEP_COPY_REQUEST_H
+
+#include "include/int_types.h"
+#include "include/rados/librados.hpp"
+#include "common/bit_vector.hpp"
+#include "common/ceph_mutex.h"
+#include "common/RefCountedObj.h"
+#include "librbd/Types.h"
+#include "librbd/deep_copy/Types.h"
+#include <functional>
+#include <map>
+#include <queue>
+#include <set>
+#include <vector>
+#include <boost/optional.hpp>
+
+class Context;
+
+namespace librbd {
+
+class ImageCtx;
+
+namespace deep_copy {
+
+class Handler;
+
+template <typename ImageCtxT = ImageCtx>
+class ImageCopyRequest : public RefCountedObject {
+public:
+ static ImageCopyRequest* create(ImageCtxT *src_image_ctx,
+ ImageCtxT *dst_image_ctx,
+ librados::snap_t src_snap_id_start,
+ librados::snap_t src_snap_id_end,
+ librados::snap_t dst_snap_id_start,
+ bool flatten,
+ const ObjectNumber &object_number,
+ const SnapSeqs &snap_seqs,
+ Handler *handler,
+ Context *on_finish) {
+ return new ImageCopyRequest(src_image_ctx, dst_image_ctx, src_snap_id_start,
+ src_snap_id_end, dst_snap_id_start, flatten,
+ object_number, snap_seqs, handler, on_finish);
+ }
+
+ ImageCopyRequest(ImageCtxT *src_image_ctx, ImageCtxT *dst_image_ctx,
+ librados::snap_t src_snap_id_start,
+ librados::snap_t src_snap_id_end,
+ librados::snap_t dst_snap_id_start,
+ bool flatten, const ObjectNumber &object_number,
+ const SnapSeqs &snap_seqs, Handler *handler,
+ Context *on_finish);
+
+ void send();
+ void cancel();
+
+private:
+ /**
+ * @verbatim
+ *
+ * <start>
+ * |
+ * v
+ * COMPUTE_DIFF
+ * |
+ * | . . . . .
+ * | . . (parallel execution of
+ * v v . multiple objects at once)
+ * COPY_OBJECT . . . .
+ * |
+ * v
+ * <finish>
+ *
+ * @endverbatim
+ */
+
+ ImageCtxT *m_src_image_ctx;
+ ImageCtxT *m_dst_image_ctx;
+ librados::snap_t m_src_snap_id_start;
+ librados::snap_t m_src_snap_id_end;
+ librados::snap_t m_dst_snap_id_start;
+ bool m_flatten;
+ ObjectNumber m_object_number;
+ SnapSeqs m_snap_seqs;
+ Handler *m_handler;
+ Context *m_on_finish;
+
+ CephContext *m_cct;
+ ceph::mutex m_lock;
+ bool m_canceled = false;
+
+ uint64_t m_object_no = 0;
+ uint64_t m_end_object_no = 0;
+ uint64_t m_current_ops = 0;
+ std::priority_queue<
+ uint64_t, std::vector<uint64_t>, std::greater<uint64_t>> m_copied_objects;
+ bool m_updating_progress = false;
+ SnapMap m_snap_map;
+ int m_ret_val = 0;
+
+ BitVector<2> m_object_diff_state;
+
+ void map_src_objects(uint64_t dst_object, std::set<uint64_t> *src_objects);
+
+ void compute_diff();
+ void handle_compute_diff(int r);
+
+ void send_object_copies();
+ void send_next_object_copy();
+ void handle_object_copy(uint64_t object_no, int r);
+
+ void finish(int r);
+};
+
+} // namespace deep_copy
+} // namespace librbd
+
+extern template class librbd::deep_copy::ImageCopyRequest<librbd::ImageCtx>;
+
+#endif // CEPH_LIBRBD_DEEP_COPY_IMAGE_DEEP_COPY_REQUEST_H
diff --git a/src/librbd/deep_copy/MetadataCopyRequest.cc b/src/librbd/deep_copy/MetadataCopyRequest.cc
new file mode 100644
index 000000000..c584bea54
--- /dev/null
+++ b/src/librbd/deep_copy/MetadataCopyRequest.cc
@@ -0,0 +1,117 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#include "MetadataCopyRequest.h"
+#include "common/dout.h"
+#include "common/errno.h"
+#include "cls/rbd/cls_rbd_client.h"
+#include "librbd/Utils.h"
+#include "librbd/image/GetMetadataRequest.h"
+
+#define dout_subsys ceph_subsys_rbd
+#undef dout_prefix
+#define dout_prefix *_dout << "librbd::deep_copy::MetadataCopyRequest: " \
+ << this << " " << __func__ << ": "
+
+namespace librbd {
+namespace deep_copy {
+
+namespace {
+
+const uint64_t MAX_METADATA_ITEMS = 128;
+
+} // anonymous namespace
+
+using librbd::util::create_context_callback;
+using librbd::util::create_rados_callback;
+
+template <typename I>
+MetadataCopyRequest<I>::MetadataCopyRequest(I *src_image_ctx, I *dst_image_ctx,
+ Context *on_finish)
+ : m_src_image_ctx(src_image_ctx), m_dst_image_ctx(dst_image_ctx),
+ m_on_finish(on_finish), m_cct(dst_image_ctx->cct) {
+}
+
+template <typename I>
+void MetadataCopyRequest<I>::send() {
+ list_src_metadata();
+}
+
+template <typename I>
+void MetadataCopyRequest<I>::list_src_metadata() {
+ ldout(m_cct, 20) << "start_key=" << m_last_metadata_key << dendl;
+
+ m_metadata.clear();
+ auto ctx = create_context_callback<
+ MetadataCopyRequest<I>,
+ &MetadataCopyRequest<I>::handle_list_src_metadata>(this);
+ auto req = image::GetMetadataRequest<I>::create(
+ m_src_image_ctx->md_ctx, m_src_image_ctx->header_oid, true, "",
+ m_last_metadata_key, MAX_METADATA_ITEMS, &m_metadata, ctx);
+ req->send();
+}
+
+template <typename I>
+void MetadataCopyRequest<I>::handle_list_src_metadata(int r) {
+ ldout(m_cct, 20) << "r=" << r << dendl;
+
+ if (r < 0) {
+ lderr(m_cct) << "failed to retrieve metadata: " << cpp_strerror(r) << dendl;
+ finish(r);
+ return;
+ }
+
+ if (m_metadata.empty()) {
+ finish(0);
+ return;
+ }
+
+ m_last_metadata_key = m_metadata.rbegin()->first;
+ m_more_metadata = (m_metadata.size() >= MAX_METADATA_ITEMS);
+ set_dst_metadata();
+}
+
+template <typename I>
+void MetadataCopyRequest<I>::set_dst_metadata() {
+ ldout(m_cct, 20) << "count=" << m_metadata.size() << dendl;
+
+ librados::ObjectWriteOperation op;
+ librbd::cls_client::metadata_set(&op, m_metadata);
+
+ librados::AioCompletion *aio_comp = create_rados_callback<
+ MetadataCopyRequest<I>,
+ &MetadataCopyRequest<I>::handle_set_dst_metadata>(this);
+ m_dst_image_ctx->md_ctx.aio_operate(m_dst_image_ctx->header_oid, aio_comp,
+ &op);
+ aio_comp->release();
+}
+
+template <typename I>
+void MetadataCopyRequest<I>::handle_set_dst_metadata(int r) {
+ ldout(m_cct, 20) << "r=" << r << dendl;
+
+ if (r < 0) {
+ lderr(m_cct) << "failed to set metadata: " << cpp_strerror(r) << dendl;
+ finish(r);
+ return;
+ }
+
+ if (m_more_metadata) {
+ list_src_metadata();
+ return;
+ }
+
+ finish(0);
+}
+
+template <typename I>
+void MetadataCopyRequest<I>::finish(int r) {
+ ldout(m_cct, 20) << "r=" << r << dendl;
+ m_on_finish->complete(r);
+ delete this;
+}
+
+} // namespace deep_copy
+} // namespace librbd
+
+template class librbd::deep_copy::MetadataCopyRequest<librbd::ImageCtx>;
diff --git a/src/librbd/deep_copy/MetadataCopyRequest.h b/src/librbd/deep_copy/MetadataCopyRequest.h
new file mode 100644
index 000000000..8db55db96
--- /dev/null
+++ b/src/librbd/deep_copy/MetadataCopyRequest.h
@@ -0,0 +1,78 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#ifndef CEPH_LIBRBD_DEEP_COPY_METADATA_COPY_REQUEST_H
+#define CEPH_LIBRBD_DEEP_COPY_METADATA_COPY_REQUEST_H
+
+#include "include/int_types.h"
+#include "include/buffer.h"
+#include "include/rados/librados.hpp"
+#include "librbd/ImageCtx.h"
+#include <map>
+#include <string>
+
+class Context;
+
+namespace librbd {
+namespace deep_copy {
+
+template <typename ImageCtxT = librbd::ImageCtx>
+class MetadataCopyRequest {
+public:
+ static MetadataCopyRequest* create(ImageCtxT *src_image_ctx,
+ ImageCtxT *dst_image_ctx,
+ Context *on_finish) {
+ return new MetadataCopyRequest(src_image_ctx, dst_image_ctx, on_finish);
+ }
+
+ MetadataCopyRequest(ImageCtxT *src_image_ctx, ImageCtxT *dst_image_ctx,
+ Context *on_finish);
+
+ void send();
+
+private:
+ /**
+ * @verbatim
+ *
+ * <start>
+ * |
+ * v
+ * LIST_SRC_METADATA <------\
+ * | | (repeat if additional
+ * v | metadata)
+ * SET_DST_METADATA --------/
+ * |
+ * v
+ * <finish>
+ *
+ * @endverbatim
+ */
+ typedef std::map<std::string, bufferlist> Metadata;
+
+ ImageCtxT *m_src_image_ctx;
+ ImageCtxT *m_dst_image_ctx;
+ Context *m_on_finish;
+
+ CephContext *m_cct;
+ bufferlist m_out_bl;
+
+ std::map<std::string, bufferlist> m_metadata;
+ std::string m_last_metadata_key;
+ bool m_more_metadata = false;
+
+ void list_src_metadata();
+ void handle_list_src_metadata(int r);
+
+ void set_dst_metadata();
+ void handle_set_dst_metadata(int r);
+
+ void finish(int r);
+
+};
+
+} // namespace deep_copy
+} // namespace librbd
+
+extern template class librbd::deep_copy::MetadataCopyRequest<librbd::ImageCtx>;
+
+#endif // CEPH_LIBRBD_DEEP_COPY_METADATA_COPY_REQUEST_H
diff --git a/src/librbd/deep_copy/ObjectCopyRequest.cc b/src/librbd/deep_copy/ObjectCopyRequest.cc
new file mode 100644
index 000000000..e8b42b68f
--- /dev/null
+++ b/src/librbd/deep_copy/ObjectCopyRequest.cc
@@ -0,0 +1,839 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#include "ObjectCopyRequest.h"
+#include "include/neorados/RADOS.hpp"
+#include "common/errno.h"
+#include "librados/snap_set_diff.h"
+#include "librbd/ExclusiveLock.h"
+#include "librbd/ObjectMap.h"
+#include "librbd/Utils.h"
+#include "librbd/asio/ContextWQ.h"
+#include "librbd/deep_copy/Handler.h"
+#include "librbd/io/AioCompletion.h"
+#include "librbd/io/AsyncOperation.h"
+#include "librbd/io/ImageDispatchSpec.h"
+#include "librbd/io/ObjectDispatcherInterface.h"
+#include "librbd/io/ReadResult.h"
+#include "librbd/io/Utils.h"
+#include "osdc/Striper.h"
+
+#define dout_subsys ceph_subsys_rbd
+#undef dout_prefix
+#define dout_prefix *_dout << "librbd::deep_copy::ObjectCopyRequest: " \
+ << this << " " << __func__ << ": "
+
+namespace librbd {
+namespace deep_copy {
+
+using librbd::util::create_async_context_callback;
+using librbd::util::create_context_callback;
+using librbd::util::create_rados_callback;
+using librbd::util::get_image_ctx;
+
+template <typename I>
+ObjectCopyRequest<I>::ObjectCopyRequest(I *src_image_ctx,
+ I *dst_image_ctx,
+ librados::snap_t src_snap_id_start,
+ librados::snap_t dst_snap_id_start,
+ const SnapMap &snap_map,
+ uint64_t dst_object_number,
+ uint32_t flags, Handler* handler,
+ Context *on_finish)
+ : m_src_image_ctx(src_image_ctx),
+ m_dst_image_ctx(dst_image_ctx), m_cct(dst_image_ctx->cct),
+ m_src_snap_id_start(src_snap_id_start),
+ m_dst_snap_id_start(dst_snap_id_start), m_snap_map(snap_map),
+ m_dst_object_number(dst_object_number), m_flags(flags),
+ m_handler(handler), m_on_finish(on_finish) {
+ ceph_assert(src_image_ctx->data_ctx.is_valid());
+ ceph_assert(dst_image_ctx->data_ctx.is_valid());
+ ceph_assert(!m_snap_map.empty());
+
+ m_src_async_op = new io::AsyncOperation();
+ m_src_async_op->start_op(*get_image_ctx(m_src_image_ctx));
+
+ m_src_io_ctx.dup(m_src_image_ctx->data_ctx);
+ m_dst_io_ctx.dup(m_dst_image_ctx->data_ctx);
+
+ m_dst_oid = m_dst_image_ctx->get_object_name(dst_object_number);
+
+ ldout(m_cct, 20) << "dst_oid=" << m_dst_oid << ", "
+ << "src_snap_id_start=" << m_src_snap_id_start << ", "
+ << "dst_snap_id_start=" << m_dst_snap_id_start << ", "
+ << "snap_map=" << m_snap_map << dendl;
+}
+
+template <typename I>
+void ObjectCopyRequest<I>::send() {
+ send_list_snaps();
+}
+
+template <typename I>
+void ObjectCopyRequest<I>::send_list_snaps() {
+ // image extents are consistent across src and dst so compute once
+ std::tie(m_image_extents, m_image_area) = io::util::object_to_area_extents(
+ m_dst_image_ctx, m_dst_object_number,
+ {{0, m_dst_image_ctx->layout.object_size}});
+ ldout(m_cct, 20) << "image_extents=" << m_image_extents
+ << " area=" << m_image_area << dendl;
+
+ auto ctx = create_async_context_callback(
+ *m_src_image_ctx, create_context_callback<
+ ObjectCopyRequest, &ObjectCopyRequest<I>::handle_list_snaps>(this));
+ if ((m_flags & OBJECT_COPY_REQUEST_FLAG_EXISTS_CLEAN) != 0) {
+ // skip listing the snaps if we know the destination exists and is clean,
+ // but we do need to update the object-map
+ ctx->complete(0);
+ return;
+ }
+
+ io::SnapIds snap_ids;
+ snap_ids.reserve(1 + m_snap_map.size());
+ snap_ids.push_back(m_src_snap_id_start);
+ for (auto& [src_snap_id, _] : m_snap_map) {
+ if (m_src_snap_id_start < src_snap_id) {
+ snap_ids.push_back(src_snap_id);
+ }
+ }
+
+ auto list_snaps_flags = io::LIST_SNAPS_FLAG_DISABLE_LIST_FROM_PARENT;
+
+ m_snapshot_delta.clear();
+
+ auto aio_comp = io::AioCompletion::create_and_start(
+ ctx, get_image_ctx(m_src_image_ctx), io::AIO_TYPE_GENERIC);
+ auto req = io::ImageDispatchSpec::create_list_snaps(
+ *m_src_image_ctx, io::IMAGE_DISPATCH_LAYER_NONE, aio_comp,
+ io::Extents{m_image_extents}, m_image_area, std::move(snap_ids),
+ list_snaps_flags, &m_snapshot_delta, {});
+ req->send();
+}
+
+template <typename I>
+void ObjectCopyRequest<I>::handle_list_snaps(int r) {
+ ldout(m_cct, 20) << "r=" << r << dendl;
+
+ if (r < 0) {
+ lderr(m_cct) << "failed to list snaps: " << cpp_strerror(r) << dendl;
+ finish(r);
+ return;
+ }
+
+ ldout(m_cct, 20) << "snapshot_delta=" << m_snapshot_delta << dendl;
+
+ compute_dst_object_may_exist();
+ compute_read_ops();
+
+ send_read();
+}
+
+template <typename I>
+void ObjectCopyRequest<I>::send_read() {
+ if (m_read_snaps.empty()) {
+ // all snapshots have been read
+ merge_write_ops();
+ compute_zero_ops();
+
+ send_update_object_map();
+ return;
+ }
+
+ auto index = *m_read_snaps.begin();
+ auto& read_op = m_read_ops[index];
+ if (read_op.image_interval.empty()) {
+ // nothing written to this object for this snapshot (must be trunc/remove)
+ handle_read(0);
+ return;
+ }
+
+ auto io_context = m_src_image_ctx->duplicate_data_io_context();
+ io_context->read_snap(index.second);
+
+ io::Extents image_extents{read_op.image_interval.begin(),
+ read_op.image_interval.end()};
+ io::ReadResult read_result{&read_op.image_extent_map,
+ &read_op.out_bl};
+
+ ldout(m_cct, 20) << "read: src_snap_seq=" << index.second << ", "
+ << "image_extents=" << image_extents << dendl;
+
+ int op_flags = (LIBRADOS_OP_FLAG_FADVISE_SEQUENTIAL |
+ LIBRADOS_OP_FLAG_FADVISE_NOCACHE);
+
+ int read_flags = 0;
+ if (index.second != m_src_image_ctx->snap_id) {
+ read_flags |= io::READ_FLAG_DISABLE_CLIPPING;
+ }
+
+ auto ctx = create_context_callback<
+ ObjectCopyRequest<I>, &ObjectCopyRequest<I>::handle_read>(this);
+ auto aio_comp = io::AioCompletion::create_and_start(
+ ctx, get_image_ctx(m_src_image_ctx), io::AIO_TYPE_READ);
+
+ auto req = io::ImageDispatchSpec::create_read(
+ *m_src_image_ctx, io::IMAGE_DISPATCH_LAYER_INTERNAL_START, aio_comp,
+ std::move(image_extents), m_image_area, std::move(read_result),
+ io_context, op_flags, read_flags, {});
+ req->send();
+}
+
+template <typename I>
+void ObjectCopyRequest<I>::handle_read(int r) {
+ ldout(m_cct, 20) << "r=" << r << dendl;
+
+ if (r < 0) {
+ lderr(m_cct) << "failed to read from source object: " << cpp_strerror(r)
+ << dendl;
+ finish(r);
+ return;
+ }
+
+ if (m_handler != nullptr) {
+ auto index = *m_read_snaps.begin();
+ auto& read_op = m_read_ops[index];
+ m_handler->handle_read(read_op.out_bl.length());
+ }
+
+ ceph_assert(!m_read_snaps.empty());
+ m_read_snaps.erase(m_read_snaps.begin());
+
+ send_read();
+}
+
+template <typename I>
+void ObjectCopyRequest<I>::send_update_object_map() {
+ if (!m_dst_image_ctx->test_features(RBD_FEATURE_OBJECT_MAP) ||
+ m_dst_object_state.empty()) {
+ process_copyup();
+ return;
+ }
+
+ m_dst_image_ctx->owner_lock.lock_shared();
+ m_dst_image_ctx->image_lock.lock_shared();
+ if (m_dst_image_ctx->object_map == nullptr) {
+ // possible that exclusive lock was lost in background
+ lderr(m_cct) << "object map is not initialized" << dendl;
+
+ m_dst_image_ctx->image_lock.unlock_shared();
+ m_dst_image_ctx->owner_lock.unlock_shared();
+ finish(-EINVAL);
+ return;
+ }
+
+ auto &dst_object_state = *m_dst_object_state.begin();
+ auto it = m_snap_map.find(dst_object_state.first);
+ ceph_assert(it != m_snap_map.end());
+ auto dst_snap_id = it->second.front();
+ auto object_state = dst_object_state.second;
+ m_dst_object_state.erase(m_dst_object_state.begin());
+
+ ldout(m_cct, 20) << "dst_snap_id=" << dst_snap_id << ", object_state="
+ << static_cast<uint32_t>(object_state) << dendl;
+
+ int r;
+ auto finish_op_ctx = start_lock_op(m_dst_image_ctx->owner_lock, &r);
+ if (finish_op_ctx == nullptr) {
+ lderr(m_cct) << "lost exclusive lock" << dendl;
+ m_dst_image_ctx->image_lock.unlock_shared();
+ m_dst_image_ctx->owner_lock.unlock_shared();
+ finish(r);
+ return;
+ }
+
+ auto ctx = new LambdaContext([this, finish_op_ctx](int r) {
+ handle_update_object_map(r);
+ finish_op_ctx->complete(0);
+ });
+
+ auto dst_image_ctx = m_dst_image_ctx;
+ bool sent = dst_image_ctx->object_map->template aio_update<
+ Context, &Context::complete>(dst_snap_id, m_dst_object_number, object_state,
+ {}, {}, false, ctx);
+
+ // NOTE: state machine might complete before we reach here
+ dst_image_ctx->image_lock.unlock_shared();
+ dst_image_ctx->owner_lock.unlock_shared();
+ if (!sent) {
+ ceph_assert(dst_snap_id == CEPH_NOSNAP);
+ ctx->complete(0);
+ }
+}
+
+template <typename I>
+void ObjectCopyRequest<I>::handle_update_object_map(int r) {
+ ldout(m_cct, 20) << "r=" << r << dendl;
+
+ if (r < 0) {
+ lderr(m_cct) << "failed to update object map: " << cpp_strerror(r) << dendl;
+ finish(r);
+ return;
+ }
+
+ if (!m_dst_object_state.empty()) {
+ send_update_object_map();
+ return;
+ }
+
+ process_copyup();
+}
+
+template <typename I>
+void ObjectCopyRequest<I>::process_copyup() {
+ if (m_snapshot_sparse_bufferlist.empty()) {
+ // no data to copy or truncate/zero. only the copyup state machine cares
+ // about whether the object exists or not, and it always copies from
+ // snap id 0.
+ finish(m_src_snap_id_start > 0 ? 0 : -ENOENT);
+ return;
+ }
+
+ ldout(m_cct, 20) << dendl;
+
+ // let dispatch layers have a chance to process the data but
+ // assume that the dispatch layer will only touch the sparse bufferlist
+ auto r = m_dst_image_ctx->io_object_dispatcher->prepare_copyup(
+ m_dst_object_number, &m_snapshot_sparse_bufferlist);
+ if (r < 0) {
+ lderr(m_cct) << "failed to prepare copyup data: " << cpp_strerror(r)
+ << dendl;
+ finish(r);
+ return;
+ }
+
+ send_write_object();
+}
+
+template <typename I>
+void ObjectCopyRequest<I>::send_write_object() {
+ ceph_assert(!m_snapshot_sparse_bufferlist.empty());
+ auto& sparse_bufferlist = m_snapshot_sparse_bufferlist.begin()->second;
+
+ m_src_image_ctx->image_lock.lock_shared();
+ bool hide_parent = (m_src_snap_id_start == 0 &&
+ m_src_image_ctx->parent != nullptr);
+ m_src_image_ctx->image_lock.unlock_shared();
+
+ // retrieve the destination snap context for the op
+ SnapIds dst_snap_ids;
+ librados::snap_t dst_snap_seq = 0;
+ librados::snap_t src_snap_seq = m_snapshot_sparse_bufferlist.begin()->first;
+ if (src_snap_seq != 0) {
+ auto snap_map_it = m_snap_map.find(src_snap_seq);
+ ceph_assert(snap_map_it != m_snap_map.end());
+
+ auto dst_snap_id = snap_map_it->second.front();
+ auto dst_may_exist_it = m_dst_object_may_exist.find(dst_snap_id);
+ ceph_assert(dst_may_exist_it != m_dst_object_may_exist.end());
+ if (!dst_may_exist_it->second && !sparse_bufferlist.empty()) {
+ // if the object cannot exist, the only valid op is to remove it
+ ldout(m_cct, 20) << "object DNE: src_snap_seq=" << src_snap_seq << dendl;
+ ceph_assert(sparse_bufferlist.ext_count() == 1U);
+ ceph_assert(sparse_bufferlist.begin().get_val().state ==
+ io::SPARSE_EXTENT_STATE_ZEROED &&
+ sparse_bufferlist.begin().get_off() == 0 &&
+ sparse_bufferlist.begin().get_len() ==
+ m_dst_image_ctx->layout.object_size);
+ }
+
+ // write snapshot context should be before actual snapshot
+ ceph_assert(!snap_map_it->second.empty());
+ auto dst_snap_ids_it = snap_map_it->second.begin();
+ ++dst_snap_ids_it;
+
+ dst_snap_ids = SnapIds{dst_snap_ids_it, snap_map_it->second.end()};
+ if (!dst_snap_ids.empty()) {
+ dst_snap_seq = dst_snap_ids.front();
+ }
+ ceph_assert(dst_snap_seq != CEPH_NOSNAP);
+ }
+
+ ldout(m_cct, 20) << "src_snap_seq=" << src_snap_seq << ", "
+ << "dst_snap_seq=" << dst_snap_seq << ", "
+ << "dst_snaps=" << dst_snap_ids << dendl;
+
+ librados::ObjectWriteOperation op;
+
+ bool migration = ((m_flags & OBJECT_COPY_REQUEST_FLAG_MIGRATION) != 0);
+ if (migration) {
+ ldout(m_cct, 20) << "assert_snapc_seq=" << dst_snap_seq << dendl;
+ cls_client::assert_snapc_seq(&op, dst_snap_seq,
+ cls::rbd::ASSERT_SNAPC_SEQ_GT_SNAPSET_SEQ);
+ }
+
+ for (auto& sbe : sparse_bufferlist) {
+ switch (sbe.get_val().state) {
+ case io::SPARSE_EXTENT_STATE_DATA:
+ ldout(m_cct, 20) << "write op: " << sbe.get_off() << "~"
+ << sbe.get_len() << dendl;
+ op.write(sbe.get_off(), std::move(sbe.get_val().bl));
+ op.set_op_flags2(LIBRADOS_OP_FLAG_FADVISE_SEQUENTIAL |
+ LIBRADOS_OP_FLAG_FADVISE_NOCACHE);
+ break;
+ case io::SPARSE_EXTENT_STATE_ZEROED:
+ if (sbe.get_off() + sbe.get_len() ==
+ m_dst_image_ctx->layout.object_size) {
+ if (sbe.get_off() == 0) {
+ if (hide_parent) {
+ ldout(m_cct, 20) << "create+truncate op" << dendl;
+ op.create(false);
+ op.truncate(0);
+ } else {
+ ldout(m_cct, 20) << "remove op" << dendl;
+ op.remove();
+ }
+ } else {
+ ldout(m_cct, 20) << "trunc op: " << sbe.get_off() << dendl;
+ op.truncate(sbe.get_off());
+ }
+ } else {
+ ldout(m_cct, 20) << "zero op: " << sbe.get_off() << "~"
+ << sbe.get_len() << dendl;
+ op.zero(sbe.get_off(), sbe.get_len());
+ }
+ break;
+ default:
+ ceph_abort();
+ }
+ }
+
+ if (op.size() == (migration ? 1 : 0)) {
+ handle_write_object(0);
+ return;
+ }
+
+ int r;
+ Context *finish_op_ctx;
+ {
+ std::shared_lock owner_locker{m_dst_image_ctx->owner_lock};
+ finish_op_ctx = start_lock_op(m_dst_image_ctx->owner_lock, &r);
+ }
+ if (finish_op_ctx == nullptr) {
+ lderr(m_cct) << "lost exclusive lock" << dendl;
+ finish(r);
+ return;
+ }
+
+ auto ctx = new LambdaContext([this, finish_op_ctx](int r) {
+ handle_write_object(r);
+ finish_op_ctx->complete(0);
+ });
+ librados::AioCompletion *comp = create_rados_callback(ctx);
+ r = m_dst_io_ctx.aio_operate(m_dst_oid, comp, &op, dst_snap_seq, dst_snap_ids,
+ nullptr);
+ ceph_assert(r == 0);
+ comp->release();
+}
+
+template <typename I>
+void ObjectCopyRequest<I>::handle_write_object(int r) {
+ ldout(m_cct, 20) << "r=" << r << dendl;
+
+ if (r == -ENOENT) {
+ r = 0;
+ } else if (r == -ERANGE) {
+ ldout(m_cct, 10) << "concurrent deep copy" << dendl;
+ r = 0;
+ }
+ if (r < 0) {
+ lderr(m_cct) << "failed to write to destination object: " << cpp_strerror(r)
+ << dendl;
+ finish(r);
+ return;
+ }
+
+ m_snapshot_sparse_bufferlist.erase(m_snapshot_sparse_bufferlist.begin());
+ if (!m_snapshot_sparse_bufferlist.empty()) {
+ send_write_object();
+ return;
+ }
+
+ finish(0);
+}
+
+template <typename I>
+Context *ObjectCopyRequest<I>::start_lock_op(ceph::shared_mutex &owner_lock,
+ int* r) {
+ ceph_assert(ceph_mutex_is_locked(m_dst_image_ctx->owner_lock));
+ if (m_dst_image_ctx->exclusive_lock == nullptr) {
+ return new LambdaContext([](int r) {});
+ }
+ return m_dst_image_ctx->exclusive_lock->start_op(r);
+}
+
+template <typename I>
+void ObjectCopyRequest<I>::compute_read_ops() {
+ ldout(m_cct, 20) << dendl;
+
+ m_src_image_ctx->image_lock.lock_shared();
+ bool read_from_parent = (m_src_snap_id_start == 0 &&
+ m_src_image_ctx->parent != nullptr);
+ m_src_image_ctx->image_lock.unlock_shared();
+
+ bool only_dne_extents = true;
+ interval_set<uint64_t> dne_image_interval;
+
+ // compute read ops for any data sections or for any extents that we need to
+ // read from our parent
+ for (auto& [key, image_intervals] : m_snapshot_delta) {
+ io::WriteReadSnapIds write_read_snap_ids{key};
+
+ // advance the src write snap id to the first valid snap id
+ if (write_read_snap_ids.first > m_src_snap_id_start) {
+ // don't attempt to read from snapshots that shouldn't exist in
+ // case the OSD fails to give a correct snap list
+ auto snap_map_it = m_snap_map.find(write_read_snap_ids.first);
+ ceph_assert(snap_map_it != m_snap_map.end());
+ auto dst_snap_seq = snap_map_it->second.front();
+
+ auto dst_may_exist_it = m_dst_object_may_exist.find(dst_snap_seq);
+ ceph_assert(dst_may_exist_it != m_dst_object_may_exist.end());
+ if (!dst_may_exist_it->second) {
+ ldout(m_cct, 20) << "DNE snapshot: " << write_read_snap_ids.first
+ << dendl;
+ continue;
+ }
+ }
+
+ for (auto& image_interval : image_intervals) {
+ auto state = image_interval.get_val().state;
+ switch (state) {
+ case io::SPARSE_EXTENT_STATE_DNE:
+ if (write_read_snap_ids == io::INITIAL_WRITE_READ_SNAP_IDS &&
+ read_from_parent) {
+ // special-case for DNE initial object-extents since when flattening
+ // we need to read data from the parent images extents
+ ldout(m_cct, 20) << "DNE extent: "
+ << image_interval.get_off() << "~"
+ << image_interval.get_len() << dendl;
+ dne_image_interval.insert(
+ image_interval.get_off(), image_interval.get_len());
+ }
+ break;
+ case io::SPARSE_EXTENT_STATE_ZEROED:
+ only_dne_extents = false;
+ break;
+ case io::SPARSE_EXTENT_STATE_DATA:
+ ldout(m_cct, 20) << "read op: "
+ << "snap_ids=" << write_read_snap_ids << " "
+ << image_interval.get_off() << "~"
+ << image_interval.get_len() << dendl;
+ m_read_ops[write_read_snap_ids].image_interval.union_insert(
+ image_interval.get_off(), image_interval.get_len());
+ only_dne_extents = false;
+ break;
+ default:
+ ceph_abort();
+ break;
+ }
+ }
+ }
+
+ bool flatten = ((m_flags & OBJECT_COPY_REQUEST_FLAG_FLATTEN) != 0);
+ if (!dne_image_interval.empty() && (!only_dne_extents || flatten)) {
+ auto snap_map_it = m_snap_map.begin();
+ ceph_assert(snap_map_it != m_snap_map.end());
+
+ auto src_snap_seq = snap_map_it->first;
+ WriteReadSnapIds write_read_snap_ids{src_snap_seq, src_snap_seq};
+
+ // prepare to prune the extents to the maximum parent overlap
+ std::shared_lock image_locker(m_src_image_ctx->image_lock);
+ uint64_t raw_overlap = 0;
+ int r = m_src_image_ctx->get_parent_overlap(src_snap_seq, &raw_overlap);
+ if (r < 0) {
+ ldout(m_cct, 5) << "failed getting parent overlap for snap_id: "
+ << src_snap_seq << ": " << cpp_strerror(r) << dendl;
+ } else if (raw_overlap > 0) {
+ ldout(m_cct, 20) << "raw_overlap=" << raw_overlap << dendl;
+ io::Extents parent_extents;
+ for (auto [image_offset, image_length] : dne_image_interval) {
+ parent_extents.emplace_back(image_offset, image_length);
+ }
+ m_src_image_ctx->prune_parent_extents(parent_extents, m_image_area,
+ raw_overlap, false);
+ for (auto [image_offset, image_length] : parent_extents) {
+ ldout(m_cct, 20) << "parent read op: "
+ << "snap_ids=" << write_read_snap_ids << " "
+ << image_offset << "~" << image_length << dendl;
+ m_read_ops[write_read_snap_ids].image_interval.union_insert(
+ image_offset, image_length);
+ }
+ }
+ }
+
+ for (auto& [write_read_snap_ids, _] : m_read_ops) {
+ m_read_snaps.push_back(write_read_snap_ids);
+ }
+}
+
+template <typename I>
+void ObjectCopyRequest<I>::merge_write_ops() {
+ ldout(m_cct, 20) << dendl;
+
+ for (auto& [write_read_snap_ids, read_op] : m_read_ops) {
+ auto src_snap_seq = write_read_snap_ids.first;
+
+ // convert the the resulting sparse image extent map to an interval ...
+ auto& image_data_interval = m_dst_data_interval[src_snap_seq];
+ for (auto [image_offset, image_length] : read_op.image_extent_map) {
+ image_data_interval.union_insert(image_offset, image_length);
+ }
+
+ // ... and compute the difference between it and the image extents since
+ // that indicates zeroed extents
+ interval_set<uint64_t> intersection;
+ intersection.intersection_of(read_op.image_interval, image_data_interval);
+ read_op.image_interval.subtract(intersection);
+
+ for (auto& [image_offset, image_length] : read_op.image_interval) {
+ ldout(m_cct, 20) << "src_snap_seq=" << src_snap_seq << ", "
+ << "inserting sparse-read zero " << image_offset << "~"
+ << image_length << dendl;
+ m_dst_zero_interval[src_snap_seq].union_insert(
+ image_offset, image_length);
+ }
+
+ uint64_t buffer_offset = 0;
+ for (auto [image_offset, image_length] : read_op.image_extent_map) {
+ // convert image extents back to object extents for the write op
+ striper::LightweightObjectExtents object_extents;
+ io::util::area_to_object_extents(m_dst_image_ctx, image_offset,
+ image_length, m_image_area,
+ buffer_offset, &object_extents);
+ for (auto& object_extent : object_extents) {
+ ldout(m_cct, 20) << "src_snap_seq=" << src_snap_seq << ", "
+ << "object_offset=" << object_extent.offset << ", "
+ << "object_length=" << object_extent.length << dendl;
+
+ bufferlist sub_bl;
+ sub_bl.substr_of(read_op.out_bl, buffer_offset, object_extent.length);
+
+ m_snapshot_sparse_bufferlist[src_snap_seq].insert(
+ object_extent.offset, object_extent.length,
+ {io::SPARSE_EXTENT_STATE_DATA, object_extent.length,\
+ std::move(sub_bl)});
+
+ buffer_offset += object_extent.length;
+ }
+ }
+ }
+}
+
+template <typename I>
+void ObjectCopyRequest<I>::compute_zero_ops() {
+ ldout(m_cct, 20) << dendl;
+
+ m_src_image_ctx->image_lock.lock_shared();
+ bool hide_parent = (m_src_snap_id_start == 0 &&
+ m_src_image_ctx->parent != nullptr);
+ m_src_image_ctx->image_lock.unlock_shared();
+
+ // ensure we have a zeroed interval for each snapshot
+ for (auto& [src_snap_seq, _] : m_snap_map) {
+ if (m_src_snap_id_start < src_snap_seq) {
+ m_dst_zero_interval[src_snap_seq];
+ }
+ }
+
+ // exists if copying from an arbitrary snapshot w/o any deltas in the
+ // start snapshot slot (i.e. DNE)
+ bool object_exists = (
+ m_src_snap_id_start > 0 &&
+ m_snapshot_delta.count({m_src_snap_id_start, m_src_snap_id_start}) == 0);
+ bool fast_diff = m_dst_image_ctx->test_features(RBD_FEATURE_FAST_DIFF);
+ uint64_t prev_end_size = 0;
+
+ // compute zero ops from the zeroed intervals
+ for (auto &it : m_dst_zero_interval) {
+ auto src_snap_seq = it.first;
+ auto &zero_interval = it.second;
+
+ auto snap_map_it = m_snap_map.find(src_snap_seq);
+ ceph_assert(snap_map_it != m_snap_map.end());
+ auto dst_snap_seq = snap_map_it->second.front();
+
+ auto dst_may_exist_it = m_dst_object_may_exist.find(dst_snap_seq);
+ ceph_assert(dst_may_exist_it != m_dst_object_may_exist.end());
+ if (!dst_may_exist_it->second && object_exists) {
+ ldout(m_cct, 5) << "object DNE for snap_id: " << dst_snap_seq << dendl;
+ m_snapshot_sparse_bufferlist[src_snap_seq].insert(
+ 0, m_dst_image_ctx->layout.object_size,
+ {io::SPARSE_EXTENT_STATE_ZEROED, m_dst_image_ctx->layout.object_size});
+ object_exists = false;
+ prev_end_size = 0;
+ continue;
+ }
+
+ if (hide_parent) {
+ std::shared_lock image_locker{m_dst_image_ctx->image_lock};
+ uint64_t raw_overlap = 0;
+ uint64_t object_overlap = 0;
+ int r = m_dst_image_ctx->get_parent_overlap(dst_snap_seq, &raw_overlap);
+ if (r < 0) {
+ ldout(m_cct, 5) << "failed getting parent overlap for snap_id: "
+ << dst_snap_seq << ": " << cpp_strerror(r) << dendl;
+ } else if (raw_overlap > 0) {
+ auto parent_extents = m_image_extents;
+ object_overlap = m_dst_image_ctx->prune_parent_extents(
+ parent_extents, m_image_area, raw_overlap, false);
+ }
+ if (object_overlap == 0) {
+ ldout(m_cct, 20) << "no parent overlap" << dendl;
+ hide_parent = false;
+ }
+ }
+
+ // collect known zeroed extents from the snapshot delta for the current
+ // src snapshot. If this is the first snapshot, we might need to handle
+ // the whiteout case if it overlaps with the parent
+ auto first_src_snap_id = m_snap_map.begin()->first;
+ auto snapshot_delta_it = m_snapshot_delta.lower_bound(
+ {(hide_parent && src_snap_seq == first_src_snap_id ?
+ 0 : src_snap_seq), 0});
+ for (; snapshot_delta_it != m_snapshot_delta.end() &&
+ snapshot_delta_it->first.first <= src_snap_seq;
+ ++snapshot_delta_it) {
+ auto& write_read_snap_ids = snapshot_delta_it->first;
+ auto& image_intervals = snapshot_delta_it->second;
+ for (auto& image_interval : image_intervals) {
+ auto state = image_interval.get_val().state;
+ switch (state) {
+ case io::SPARSE_EXTENT_STATE_ZEROED:
+ if (write_read_snap_ids != io::INITIAL_WRITE_READ_SNAP_IDS) {
+ ldout(m_cct, 20) << "zeroed extent: "
+ << "src_snap_seq=" << src_snap_seq << " "
+ << image_interval.get_off() << "~"
+ << image_interval.get_len() << dendl;
+ zero_interval.union_insert(
+ image_interval.get_off(), image_interval.get_len());
+ } else if (hide_parent &&
+ write_read_snap_ids == io::INITIAL_WRITE_READ_SNAP_IDS) {
+ ldout(m_cct, 20) << "zeroed (hide parent) extent: "
+ << "src_snap_seq=" << src_snap_seq << " "
+ << image_interval.get_off() << "~"
+ << image_interval.get_len() << dendl;
+ zero_interval.union_insert(
+ image_interval.get_off(), image_interval.get_len());
+ }
+ break;
+ case io::SPARSE_EXTENT_STATE_DNE:
+ case io::SPARSE_EXTENT_STATE_DATA:
+ break;
+ default:
+ ceph_abort();
+ break;
+ }
+ }
+ }
+
+ // subtract any data intervals from our zero intervals
+ auto& data_interval = m_dst_data_interval[src_snap_seq];
+ interval_set<uint64_t> intersection;
+ intersection.intersection_of(zero_interval, data_interval);
+ zero_interval.subtract(intersection);
+
+ // update end_size if there are writes into higher offsets
+ uint64_t end_size = prev_end_size;
+ auto iter = m_snapshot_sparse_bufferlist.find(src_snap_seq);
+ if (iter != m_snapshot_sparse_bufferlist.end()) {
+ for (auto &sparse_bufferlist : iter->second) {
+ object_exists = true;
+ end_size = std::max(
+ end_size, sparse_bufferlist.get_off() + sparse_bufferlist.get_len());
+ }
+ }
+
+ ldout(m_cct, 20) << "src_snap_seq=" << src_snap_seq << ", "
+ << "dst_snap_seq=" << dst_snap_seq << ", "
+ << "zero_interval=" << zero_interval << ", "
+ << "end_size=" << end_size << dendl;
+ for (auto z = zero_interval.begin(); z != zero_interval.end(); ++z) {
+ // convert image extents back to object extents for the write op
+ striper::LightweightObjectExtents object_extents;
+ io::util::area_to_object_extents(m_dst_image_ctx, z.get_start(),
+ z.get_len(), m_image_area, 0,
+ &object_extents);
+ for (auto& object_extent : object_extents) {
+ ceph_assert(object_extent.offset + object_extent.length <=
+ m_dst_image_ctx->layout.object_size);
+
+ if (object_extent.offset + object_extent.length >= end_size) {
+ // zero interval at the object end
+ if ((object_extent.offset == 0 && hide_parent) ||
+ (object_extent.offset < prev_end_size)) {
+ ldout(m_cct, 20) << "truncate " << object_extent.offset
+ << dendl;
+ auto length =
+ m_dst_image_ctx->layout.object_size - object_extent.offset;
+ m_snapshot_sparse_bufferlist[src_snap_seq].insert(
+ object_extent.offset, length,
+ {io::SPARSE_EXTENT_STATE_ZEROED, length});
+ }
+
+ object_exists = (object_extent.offset > 0 || hide_parent);
+ end_size = std::min(end_size, object_extent.offset);
+ } else {
+ // zero interval inside the object
+ ldout(m_cct, 20) << "zero "
+ << object_extent.offset << "~"
+ << object_extent.length << dendl;
+ m_snapshot_sparse_bufferlist[src_snap_seq].insert(
+ object_extent.offset, object_extent.length,
+ {io::SPARSE_EXTENT_STATE_ZEROED, object_extent.length});
+ object_exists = true;
+ }
+ }
+ }
+
+ uint8_t dst_object_map_state = OBJECT_NONEXISTENT;
+ if (object_exists) {
+ dst_object_map_state = OBJECT_EXISTS;
+ if (fast_diff && m_snapshot_sparse_bufferlist.count(src_snap_seq) == 0) {
+ dst_object_map_state = OBJECT_EXISTS_CLEAN;
+ }
+ m_dst_object_state[src_snap_seq] = dst_object_map_state;
+ }
+
+ ldout(m_cct, 20) << "dst_snap_seq=" << dst_snap_seq << ", "
+ << "end_size=" << end_size << ", "
+ << "dst_object_map_state="
+ << static_cast<uint32_t>(dst_object_map_state) << dendl;
+ prev_end_size = end_size;
+ }
+}
+
+template <typename I>
+void ObjectCopyRequest<I>::finish(int r) {
+ ldout(m_cct, 20) << "r=" << r << dendl;
+
+ // ensure IoCtxs are closed prior to proceeding
+ auto on_finish = m_on_finish;
+
+ m_src_async_op->finish_op();
+ delete m_src_async_op;
+ delete this;
+
+ on_finish->complete(r);
+}
+
+template <typename I>
+void ObjectCopyRequest<I>::compute_dst_object_may_exist() {
+ std::shared_lock image_locker{m_dst_image_ctx->image_lock};
+
+ auto snap_ids = m_dst_image_ctx->snaps;
+ snap_ids.push_back(CEPH_NOSNAP);
+
+ for (auto snap_id : snap_ids) {
+ m_dst_object_may_exist[snap_id] =
+ (m_dst_object_number < m_dst_image_ctx->get_object_count(snap_id));
+ }
+
+ ldout(m_cct, 20) << "dst_object_may_exist=" << m_dst_object_may_exist
+ << dendl;
+}
+
+} // namespace deep_copy
+} // namespace librbd
+
+template class librbd::deep_copy::ObjectCopyRequest<librbd::ImageCtx>;
diff --git a/src/librbd/deep_copy/ObjectCopyRequest.h b/src/librbd/deep_copy/ObjectCopyRequest.h
new file mode 100644
index 000000000..fc2f58cd3
--- /dev/null
+++ b/src/librbd/deep_copy/ObjectCopyRequest.h
@@ -0,0 +1,163 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#ifndef CEPH_LIBRBD_DEEP_COPY_OBJECT_COPY_REQUEST_H
+#define CEPH_LIBRBD_DEEP_COPY_OBJECT_COPY_REQUEST_H
+
+#include "include/int_types.h"
+#include "include/interval_set.h"
+#include "include/rados/librados.hpp"
+#include "common/snap_types.h"
+#include "librbd/ImageCtx.h"
+#include "librbd/deep_copy/Types.h"
+#include "librbd/io/Types.h"
+#include <list>
+#include <map>
+#include <string>
+
+class Context;
+class RWLock;
+
+namespace librbd {
+
+namespace io { class AsyncOperation; }
+
+namespace deep_copy {
+
+struct Handler;
+
+template <typename ImageCtxT = librbd::ImageCtx>
+class ObjectCopyRequest {
+public:
+ static ObjectCopyRequest* create(ImageCtxT *src_image_ctx,
+ ImageCtxT *dst_image_ctx,
+ librados::snap_t src_snap_id_start,
+ librados::snap_t dst_snap_id_start,
+ const SnapMap &snap_map,
+ uint64_t object_number, uint32_t flags,
+ Handler* handler, Context *on_finish) {
+ return new ObjectCopyRequest(src_image_ctx, dst_image_ctx,
+ src_snap_id_start, dst_snap_id_start, snap_map,
+ object_number, flags, handler, on_finish);
+ }
+
+ ObjectCopyRequest(ImageCtxT *src_image_ctx, ImageCtxT *dst_image_ctx,
+ librados::snap_t src_snap_id_start,
+ librados::snap_t dst_snap_id_start, const SnapMap &snap_map,
+ uint64_t object_number, uint32_t flags, Handler* handler,
+ Context *on_finish);
+
+ void send();
+
+ // testing support
+ inline librados::IoCtx &get_src_io_ctx() {
+ return m_src_io_ctx;
+ }
+ inline librados::IoCtx &get_dst_io_ctx() {
+ return m_dst_io_ctx;
+ }
+
+private:
+ /**
+ * @verbatim
+ *
+ * <start>
+ * |
+ * v
+ * LIST_SNAPS
+ * |
+ * |/---------\
+ * | | (repeat for each snapshot)
+ * v |
+ * READ ---------/
+ * |
+ * | /-----------\
+ * | | | (repeat for each snapshot)
+ * v v |
+ * UPDATE_OBJECT_MAP ---/ (skip if object
+ * | map disabled)
+ * | /-----------\
+ * | | | (repeat for each snapshot)
+ * v v |
+ * WRITE_OBJECT --------/
+ * |
+ * v
+ * <finish>
+ *
+ * @endverbatim
+ */
+
+ enum WriteOpType {
+ WRITE_OP_TYPE_WRITE,
+ WRITE_OP_TYPE_ZERO
+ };
+
+ struct ReadOp {
+ interval_set<uint64_t> image_interval;
+ io::Extents image_extent_map;
+ bufferlist out_bl;
+ };
+
+ typedef std::pair<librados::snap_t, librados::snap_t> WriteReadSnapIds;
+
+ ImageCtxT *m_src_image_ctx;
+ ImageCtxT *m_dst_image_ctx;
+ CephContext *m_cct;
+ librados::snap_t m_src_snap_id_start;
+ librados::snap_t m_dst_snap_id_start;
+ SnapMap m_snap_map;
+ uint64_t m_dst_object_number;
+ uint32_t m_flags;
+ Handler* m_handler;
+ Context *m_on_finish;
+
+ decltype(m_src_image_ctx->data_ctx) m_src_io_ctx;
+ decltype(m_dst_image_ctx->data_ctx) m_dst_io_ctx;
+ std::string m_dst_oid;
+
+ io::Extents m_image_extents;
+ io::ImageArea m_image_area = io::ImageArea::DATA;
+
+ io::SnapshotDelta m_snapshot_delta;
+
+ std::map<WriteReadSnapIds, ReadOp> m_read_ops;
+ std::list<WriteReadSnapIds> m_read_snaps;
+ io::SnapshotSparseBufferlist m_snapshot_sparse_bufferlist;
+
+ std::map<librados::snap_t, interval_set<uint64_t>> m_dst_data_interval;
+ std::map<librados::snap_t, interval_set<uint64_t>> m_dst_zero_interval;
+ std::map<librados::snap_t, uint8_t> m_dst_object_state;
+ std::map<librados::snap_t, bool> m_dst_object_may_exist;
+
+ io::AsyncOperation* m_src_async_op = nullptr;
+
+ void send_list_snaps();
+ void handle_list_snaps(int r);
+
+ void send_read();
+ void handle_read(int r);
+
+ void send_update_object_map();
+ void handle_update_object_map(int r);
+
+ void process_copyup();
+ void send_write_object();
+ void handle_write_object(int r);
+
+ Context *start_lock_op(ceph::shared_mutex &owner_lock, int* r);
+
+ void compute_read_ops();
+ void merge_write_ops();
+ void compute_zero_ops();
+
+ void compute_dst_object_may_exist();
+
+ void finish(int r);
+};
+
+} // namespace deep_copy
+} // namespace librbd
+
+extern template class librbd::deep_copy::ObjectCopyRequest<librbd::ImageCtx>;
+
+#endif // CEPH_LIBRBD_DEEP_COPY_OBJECT_COPY_REQUEST_H
diff --git a/src/librbd/deep_copy/SetHeadRequest.cc b/src/librbd/deep_copy/SetHeadRequest.cc
new file mode 100644
index 000000000..1e056b958
--- /dev/null
+++ b/src/librbd/deep_copy/SetHeadRequest.cc
@@ -0,0 +1,223 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#include "SetHeadRequest.h"
+#include "common/errno.h"
+#include "cls/rbd/cls_rbd_client.h"
+#include "cls/rbd/cls_rbd_types.h"
+#include "librbd/ExclusiveLock.h"
+#include "librbd/Utils.h"
+#include "librbd/image/AttachParentRequest.h"
+#include "librbd/image/DetachParentRequest.h"
+
+#define dout_subsys ceph_subsys_rbd
+#undef dout_prefix
+#define dout_prefix *_dout << "librbd::deep_copy::SetHeadRequest: " \
+ << this << " " << __func__ << ": "
+
+namespace librbd {
+namespace deep_copy {
+
+using librbd::util::create_context_callback;
+using librbd::util::create_rados_callback;
+
+template <typename I>
+SetHeadRequest<I>::SetHeadRequest(I *image_ctx, uint64_t size,
+ const cls::rbd::ParentImageSpec &spec,
+ uint64_t parent_overlap,
+ Context *on_finish)
+ : m_image_ctx(image_ctx), m_size(size), m_parent_spec(spec),
+ m_parent_overlap(parent_overlap), m_on_finish(on_finish),
+ m_cct(image_ctx->cct) {
+ ceph_assert(m_parent_overlap <= m_size);
+}
+
+template <typename I>
+void SetHeadRequest<I>::send() {
+ send_set_size();
+}
+
+template <typename I>
+void SetHeadRequest<I>::send_set_size() {
+ m_image_ctx->image_lock.lock_shared();
+ if (m_image_ctx->size == m_size) {
+ m_image_ctx->image_lock.unlock_shared();
+ send_detach_parent();
+ return;
+ }
+ m_image_ctx->image_lock.unlock_shared();
+
+ ldout(m_cct, 20) << dendl;
+
+ // Change the image size on disk so that the snapshot picks up
+ // the expected size. We can do this because the last snapshot
+ // we process is the sync snapshot which was created to match the
+ // image size. We also don't need to worry about trimming because
+ // we track the highest possible object number within the sync record
+ librados::ObjectWriteOperation op;
+ librbd::cls_client::set_size(&op, m_size);
+
+ int r;
+ auto finish_op_ctx = start_lock_op(&r);
+ if (finish_op_ctx == nullptr) {
+ lderr(m_cct) << "lost exclusive lock" << dendl;
+ finish(r);
+ return;
+ }
+
+ auto ctx = new LambdaContext([this, finish_op_ctx](int r) {
+ handle_set_size(r);
+ finish_op_ctx->complete(0);
+ });
+ librados::AioCompletion *comp = create_rados_callback(ctx);
+ r = m_image_ctx->md_ctx.aio_operate(m_image_ctx->header_oid, comp, &op);
+ ceph_assert(r == 0);
+ comp->release();
+}
+
+template <typename I>
+void SetHeadRequest<I>::handle_set_size(int r) {
+ ldout(m_cct, 20) << "r=" << r << dendl;
+
+ if (r < 0) {
+ lderr(m_cct) << "failed to update image size: " << cpp_strerror(r) << dendl;
+ finish(r);
+ return;
+ }
+
+ {
+ // adjust in-memory image size now that it's updated on disk
+ std::unique_lock image_locker{m_image_ctx->image_lock};
+ if (m_image_ctx->size > m_size) {
+ if (m_image_ctx->parent_md.spec.pool_id != -1 &&
+ m_image_ctx->parent_md.overlap > m_size) {
+ m_image_ctx->parent_md.overlap = m_size;
+ }
+ }
+ m_image_ctx->size = m_size;
+ }
+
+ send_detach_parent();
+}
+
+template <typename I>
+void SetHeadRequest<I>::send_detach_parent() {
+ m_image_ctx->image_lock.lock_shared();
+ if (m_image_ctx->parent_md.spec.pool_id == -1 ||
+ (m_image_ctx->parent_md.spec == m_parent_spec &&
+ m_image_ctx->parent_md.overlap == m_parent_overlap)) {
+ m_image_ctx->image_lock.unlock_shared();
+ send_attach_parent();
+ return;
+ }
+ m_image_ctx->image_lock.unlock_shared();
+
+ ldout(m_cct, 20) << dendl;
+
+ int r;
+ auto finish_op_ctx = start_lock_op(&r);
+ if (finish_op_ctx == nullptr) {
+ lderr(m_cct) << "lost exclusive lock" << dendl;
+ finish(r);
+ return;
+ }
+
+ auto ctx = new LambdaContext([this, finish_op_ctx](int r) {
+ handle_detach_parent(r);
+ finish_op_ctx->complete(0);
+ });
+ auto req = image::DetachParentRequest<I>::create(*m_image_ctx, ctx);
+ req->send();
+}
+
+template <typename I>
+void SetHeadRequest<I>::handle_detach_parent(int r) {
+ ldout(m_cct, 20) << "r=" << r << dendl;
+
+ if (r < 0) {
+ lderr(m_cct) << "failed to remove parent: " << cpp_strerror(r) << dendl;
+ finish(r);
+ return;
+ }
+
+ {
+ // adjust in-memory parent now that it's updated on disk
+ std::unique_lock image_locker{m_image_ctx->image_lock};
+ m_image_ctx->parent_md.spec = {};
+ m_image_ctx->parent_md.overlap = 0;
+ }
+
+ send_attach_parent();
+}
+
+template <typename I>
+void SetHeadRequest<I>::send_attach_parent() {
+ m_image_ctx->image_lock.lock_shared();
+ if (m_image_ctx->parent_md.spec == m_parent_spec &&
+ m_image_ctx->parent_md.overlap == m_parent_overlap) {
+ m_image_ctx->image_lock.unlock_shared();
+ finish(0);
+ return;
+ }
+ m_image_ctx->image_lock.unlock_shared();
+
+ ldout(m_cct, 20) << dendl;
+
+ int r;
+ auto finish_op_ctx = start_lock_op(&r);
+ if (finish_op_ctx == nullptr) {
+ lderr(m_cct) << "lost exclusive lock" << dendl;
+ finish(r);
+ return;
+ }
+
+ auto ctx = new LambdaContext([this, finish_op_ctx](int r) {
+ handle_attach_parent(r);
+ finish_op_ctx->complete(0);
+ });
+ auto req = image::AttachParentRequest<I>::create(
+ *m_image_ctx, m_parent_spec, m_parent_overlap, false, ctx);
+ req->send();
+}
+
+template <typename I>
+void SetHeadRequest<I>::handle_attach_parent(int r) {
+ ldout(m_cct, 20) << "r=" << r << dendl;
+
+ if (r < 0) {
+ lderr(m_cct) << "failed to attach parent: " << cpp_strerror(r) << dendl;
+ finish(r);
+ return;
+ }
+
+ {
+ // adjust in-memory parent now that it's updated on disk
+ std::unique_lock image_locker{m_image_ctx->image_lock};
+ m_image_ctx->parent_md.spec = m_parent_spec;
+ m_image_ctx->parent_md.overlap = m_parent_overlap;
+ }
+
+ finish(0);
+}
+
+template <typename I>
+Context *SetHeadRequest<I>::start_lock_op(int* r) {
+ std::shared_lock owner_locker{m_image_ctx->owner_lock};
+ if (m_image_ctx->exclusive_lock == nullptr) {
+ return new LambdaContext([](int r) {});
+ }
+ return m_image_ctx->exclusive_lock->start_op(r);
+}
+
+template <typename I>
+void SetHeadRequest<I>::finish(int r) {
+ ldout(m_cct, 20) << "r=" << r << dendl;
+
+ m_on_finish->complete(r);
+ delete this;
+}
+
+} // namespace deep_copy
+} // namespace librbd
+
+template class librbd::deep_copy::SetHeadRequest<librbd::ImageCtx>;
diff --git a/src/librbd/deep_copy/SetHeadRequest.h b/src/librbd/deep_copy/SetHeadRequest.h
new file mode 100644
index 000000000..9a17c9fd0
--- /dev/null
+++ b/src/librbd/deep_copy/SetHeadRequest.h
@@ -0,0 +1,87 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#ifndef CEPH_LIBRBD_DEEP_COPY_SET_HEAD_REQUEST_H
+#define CEPH_LIBRBD_DEEP_COPY_SET_HEAD_REQUEST_H
+
+#include "include/int_types.h"
+#include "include/rados/librados.hpp"
+#include "common/snap_types.h"
+#include "librbd/ImageCtx.h"
+#include "librbd/Types.h"
+#include <map>
+#include <set>
+#include <string>
+#include <tuple>
+
+class Context;
+
+namespace librbd {
+namespace deep_copy {
+
+template <typename ImageCtxT = librbd::ImageCtx>
+class SetHeadRequest {
+public:
+ static SetHeadRequest* create(ImageCtxT *image_ctx, uint64_t size,
+ const cls::rbd::ParentImageSpec &parent_spec,
+ uint64_t parent_overlap,
+ Context *on_finish) {
+ return new SetHeadRequest(image_ctx, size, parent_spec, parent_overlap,
+ on_finish);
+ }
+
+ SetHeadRequest(ImageCtxT *image_ctx, uint64_t size,
+ const cls::rbd::ParentImageSpec &parent_spec,
+ uint64_t parent_overlap, Context *on_finish);
+
+ void send();
+
+private:
+ /**
+ * @verbatim
+ *
+ * <start>
+ * |
+ * v (skip if not needed)
+ * SET_SIZE
+ * |
+ * v (skip if not needed)
+ * DETACH_PARENT
+ * |
+ * v (skip if not needed)
+ * ATTACH_PARENT
+ * |
+ * v
+ * <finish>
+ *
+ * @endverbatim
+ */
+
+ ImageCtxT *m_image_ctx;
+ uint64_t m_size;
+ cls::rbd::ParentImageSpec m_parent_spec;
+ uint64_t m_parent_overlap;
+ Context *m_on_finish;
+
+ CephContext *m_cct;
+
+ void send_set_size();
+ void handle_set_size(int r);
+
+ void send_detach_parent();
+ void handle_detach_parent(int r);
+
+ void send_attach_parent();
+ void handle_attach_parent(int r);
+
+ Context *start_lock_op(int* r);
+
+ void finish(int r);
+};
+
+} // namespace deep_copy
+} // namespace librbd
+
+extern template class librbd::deep_copy::SetHeadRequest<librbd::ImageCtx>;
+
+#endif // CEPH_LIBRBD_DEEP_COPY_SET_HEAD_REQUEST_H
diff --git a/src/librbd/deep_copy/SnapshotCopyRequest.cc b/src/librbd/deep_copy/SnapshotCopyRequest.cc
new file mode 100644
index 000000000..1aadd34db
--- /dev/null
+++ b/src/librbd/deep_copy/SnapshotCopyRequest.cc
@@ -0,0 +1,729 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#include "SnapshotCopyRequest.h"
+#include "SetHeadRequest.h"
+#include "SnapshotCreateRequest.h"
+#include "common/errno.h"
+#include "librbd/ExclusiveLock.h"
+#include "librbd/ObjectMap.h"
+#include "librbd/Operations.h"
+#include "librbd/Utils.h"
+#include "librbd/asio/ContextWQ.h"
+#include "osdc/Striper.h"
+
+#define dout_subsys ceph_subsys_rbd
+#undef dout_prefix
+#define dout_prefix *_dout << "librbd::deep_copy::SnapshotCopyRequest: " \
+ << this << " " << __func__ << ": "
+
+namespace librbd {
+namespace deep_copy {
+
+namespace {
+
+template <typename I>
+const std::string &get_snapshot_name(I *image_ctx, librados::snap_t snap_id) {
+ auto snap_it = std::find_if(image_ctx->snap_ids.begin(),
+ image_ctx->snap_ids.end(),
+ [snap_id](
+ const std::pair<
+ std::pair<cls::rbd::SnapshotNamespace,
+ std::string>,
+ librados::snap_t> &pair) {
+ return pair.second == snap_id;
+ });
+ ceph_assert(snap_it != image_ctx->snap_ids.end());
+ return snap_it->first.second;
+}
+
+} // anonymous namespace
+
+using librbd::util::create_context_callback;
+using librbd::util::unique_lock_name;
+
+template <typename I>
+SnapshotCopyRequest<I>::SnapshotCopyRequest(I *src_image_ctx,
+ I *dst_image_ctx,
+ librados::snap_t src_snap_id_start,
+ librados::snap_t src_snap_id_end,
+ librados::snap_t dst_snap_id_start,
+ bool flatten,
+ asio::ContextWQ *work_queue,
+ SnapSeqs *snap_seqs,
+ Context *on_finish)
+ : RefCountedObject(dst_image_ctx->cct), m_src_image_ctx(src_image_ctx),
+ m_dst_image_ctx(dst_image_ctx), m_src_snap_id_start(src_snap_id_start),
+ m_src_snap_id_end(src_snap_id_end), m_dst_snap_id_start(dst_snap_id_start),
+ m_flatten(flatten), m_work_queue(work_queue), m_snap_seqs_result(snap_seqs),
+ m_snap_seqs(*snap_seqs), m_on_finish(on_finish), m_cct(dst_image_ctx->cct),
+ m_lock(ceph::make_mutex(unique_lock_name("SnapshotCopyRequest::m_lock", this))) {
+ ceph_assert((m_src_snap_id_start == 0 && m_dst_snap_id_start == 0) ||
+ (m_src_snap_id_start > 0 && m_dst_snap_id_start > 0));
+
+ // snap ids ordered from oldest to newest
+ m_src_image_ctx->image_lock.lock_shared();
+ m_src_snap_ids.insert(src_image_ctx->snaps.begin(),
+ src_image_ctx->snaps.end());
+ m_src_image_ctx->image_lock.unlock_shared();
+
+ m_dst_image_ctx->image_lock.lock_shared();
+ m_dst_snap_ids.insert(dst_image_ctx->snaps.begin(),
+ dst_image_ctx->snaps.end());
+ m_dst_image_ctx->image_lock.unlock_shared();
+
+ if (m_src_snap_id_end != CEPH_NOSNAP) {
+ m_src_snap_ids.erase(m_src_snap_ids.upper_bound(m_src_snap_id_end),
+ m_src_snap_ids.end());
+ }
+}
+
+template <typename I>
+void SnapshotCopyRequest<I>::send() {
+ cls::rbd::ParentImageSpec src_parent_spec;
+ int r = validate_parent(m_src_image_ctx, &src_parent_spec);
+ if (r < 0) {
+ lderr(m_cct) << "source image parent spec mismatch" << dendl;
+ error(r);
+ return;
+ }
+
+ r = validate_parent(m_dst_image_ctx, &m_dst_parent_spec);
+ if (r < 0) {
+ lderr(m_cct) << "destination image parent spec mismatch" << dendl;
+ error(r);
+ return;
+ }
+
+ send_snap_unprotect();
+}
+
+template <typename I>
+void SnapshotCopyRequest<I>::cancel() {
+ std::lock_guard locker{m_lock};
+
+ ldout(m_cct, 20) << dendl;
+ m_canceled = true;
+}
+
+template <typename I>
+void SnapshotCopyRequest<I>::send_snap_unprotect() {
+
+ SnapIdSet::iterator snap_id_it = m_dst_snap_ids.begin();
+ if (m_prev_snap_id != CEPH_NOSNAP) {
+ snap_id_it = m_dst_snap_ids.upper_bound(m_prev_snap_id);
+ } else if (m_dst_snap_id_start > 0) {
+ snap_id_it = m_dst_snap_ids.upper_bound(m_dst_snap_id_start);
+ }
+
+ for (; snap_id_it != m_dst_snap_ids.end(); ++snap_id_it) {
+ librados::snap_t dst_snap_id = *snap_id_it;
+
+ m_dst_image_ctx->image_lock.lock_shared();
+
+ bool dst_unprotected;
+ int r = m_dst_image_ctx->is_snap_unprotected(dst_snap_id, &dst_unprotected);
+ if (r < 0) {
+ lderr(m_cct) << "failed to retrieve destination snap unprotect status: "
+ << cpp_strerror(r) << dendl;
+ m_dst_image_ctx->image_lock.unlock_shared();
+ finish(r);
+ return;
+ }
+ m_dst_image_ctx->image_lock.unlock_shared();
+
+ if (dst_unprotected) {
+ // snap is already unprotected -- check next snap
+ continue;
+ }
+
+ // if destination snapshot is protected and (1) it isn't in our mapping
+ // table, or (2) the source snapshot isn't protected, unprotect it
+ auto snap_seq_it = std::find_if(
+ m_snap_seqs.begin(), m_snap_seqs.end(),
+ [dst_snap_id](const SnapSeqs::value_type& pair) {
+ return pair.second == dst_snap_id;
+ });
+
+ if (snap_seq_it != m_snap_seqs.end()) {
+ m_src_image_ctx->image_lock.lock_shared();
+ bool src_unprotected;
+ r = m_src_image_ctx->is_snap_unprotected(snap_seq_it->first,
+ &src_unprotected);
+ ldout(m_cct, 20) << "m_src_image_ctx->is_snap_unprotected("
+ << snap_seq_it->first << "): r=" << r
+ << ", src_unprotected=" << src_unprotected << dendl;
+ if (r == -ENOENT) {
+ src_unprotected = true;
+ r = 0;
+ }
+ if (r < 0) {
+ lderr(m_cct) << "failed to retrieve source snap unprotect status: "
+ << cpp_strerror(r) << dendl;
+ m_src_image_ctx->image_lock.unlock_shared();
+ finish(r);
+ return;
+ }
+ m_src_image_ctx->image_lock.unlock_shared();
+
+ if (src_unprotected) {
+ // source is unprotected -- unprotect destination snap
+ break;
+ }
+ } else {
+ // source snapshot doesn't exist -- unprotect destination snap
+ break;
+ }
+ }
+
+ if (snap_id_it == m_dst_snap_ids.end()) {
+ // no destination snapshots to unprotect
+ m_prev_snap_id = CEPH_NOSNAP;
+ send_snap_remove();
+ return;
+ }
+
+ m_prev_snap_id = *snap_id_it;
+ m_snap_name = get_snapshot_name(m_dst_image_ctx, m_prev_snap_id);
+
+ ldout(m_cct, 20) << "snap_name=" << m_snap_name << ", "
+ << "snap_id=" << m_prev_snap_id << dendl;
+
+ int r;
+ auto finish_op_ctx = start_lock_op(&r);
+ if (finish_op_ctx == nullptr) {
+ lderr(m_cct) << "lost exclusive lock" << dendl;
+ finish(r);
+ return;
+ }
+
+ auto ctx = new LambdaContext([this, finish_op_ctx](int r) {
+ handle_snap_unprotect(r);
+ finish_op_ctx->complete(0);
+ });
+ std::shared_lock owner_locker{m_dst_image_ctx->owner_lock};
+ m_dst_image_ctx->operations->execute_snap_unprotect(
+ cls::rbd::UserSnapshotNamespace(), m_snap_name.c_str(), ctx);
+}
+
+template <typename I>
+void SnapshotCopyRequest<I>::handle_snap_unprotect(int r) {
+ ldout(m_cct, 20) << "r=" << r << dendl;
+
+ if (r < 0) {
+ lderr(m_cct) << "failed to unprotect snapshot '" << m_snap_name << "': "
+ << cpp_strerror(r) << dendl;
+ finish(r);
+ return;
+ }
+
+ {
+ // avoid the need to refresh to delete the newly unprotected snapshot
+ std::shared_lock image_locker{m_dst_image_ctx->image_lock};
+ auto snap_info_it = m_dst_image_ctx->snap_info.find(m_prev_snap_id);
+ if (snap_info_it != m_dst_image_ctx->snap_info.end()) {
+ snap_info_it->second.protection_status =
+ RBD_PROTECTION_STATUS_UNPROTECTED;
+ }
+ }
+
+ if (handle_cancellation()) {
+ return;
+ }
+
+ send_snap_unprotect();
+}
+
+template <typename I>
+void SnapshotCopyRequest<I>::send_snap_remove() {
+ SnapIdSet::iterator snap_id_it = m_dst_snap_ids.begin();
+ if (m_prev_snap_id != CEPH_NOSNAP) {
+ snap_id_it = m_dst_snap_ids.upper_bound(m_prev_snap_id);
+ } else if (m_dst_snap_id_start > 0) {
+ snap_id_it = m_dst_snap_ids.upper_bound(m_dst_snap_id_start);
+ }
+
+ for (; snap_id_it != m_dst_snap_ids.end(); ++snap_id_it) {
+ librados::snap_t dst_snap_id = *snap_id_it;
+
+ cls::rbd::SnapshotNamespace snap_namespace;
+ m_dst_image_ctx->image_lock.lock_shared();
+ int r = m_dst_image_ctx->get_snap_namespace(dst_snap_id, &snap_namespace);
+ m_dst_image_ctx->image_lock.unlock_shared();
+ if (r < 0) {
+ lderr(m_cct) << "failed to retrieve destination snap namespace: "
+ << m_snap_name << dendl;
+ finish(r);
+ return;
+ }
+
+ if (!std::holds_alternative<cls::rbd::UserSnapshotNamespace>(snap_namespace)) {
+ continue;
+ }
+
+ // if the destination snapshot isn't in our mapping table, remove it
+ auto snap_seq_it = std::find_if(
+ m_snap_seqs.begin(), m_snap_seqs.end(),
+ [dst_snap_id](const SnapSeqs::value_type& pair) {
+ return pair.second == dst_snap_id;
+ });
+
+ if (snap_seq_it == m_snap_seqs.end()) {
+ break;
+ }
+ }
+
+ if (snap_id_it == m_dst_snap_ids.end()) {
+ // no destination snapshots to delete
+ m_prev_snap_id = CEPH_NOSNAP;
+ send_snap_create();
+ return;
+ }
+
+ m_prev_snap_id = *snap_id_it;
+ m_snap_name = get_snapshot_name(m_dst_image_ctx, m_prev_snap_id);
+
+ ldout(m_cct, 20) << ""
+ << "snap_name=" << m_snap_name << ", "
+ << "snap_id=" << m_prev_snap_id << dendl;
+
+ int r;
+ auto finish_op_ctx = start_lock_op(&r);
+ if (finish_op_ctx == nullptr) {
+ lderr(m_cct) << "lost exclusive lock" << dendl;
+ finish(r);
+ return;
+ }
+
+ auto ctx = new LambdaContext([this, finish_op_ctx](int r) {
+ handle_snap_remove(r);
+ finish_op_ctx->complete(0);
+ });
+ std::shared_lock owner_locker{m_dst_image_ctx->owner_lock};
+ m_dst_image_ctx->operations->execute_snap_remove(
+ cls::rbd::UserSnapshotNamespace(), m_snap_name.c_str(), ctx);
+}
+
+template <typename I>
+void SnapshotCopyRequest<I>::handle_snap_remove(int r) {
+ ldout(m_cct, 20) << "r=" << r << dendl;
+
+ if (r < 0) {
+ lderr(m_cct) << "failed to remove snapshot '" << m_snap_name << "': "
+ << cpp_strerror(r) << dendl;
+ finish(r);
+ return;
+ }
+ if (handle_cancellation()) {
+ return;
+ }
+
+ send_snap_remove();
+}
+
+template <typename I>
+void SnapshotCopyRequest<I>::send_snap_create() {
+ SnapIdSet::iterator snap_id_it = m_src_snap_ids.begin();
+ if (m_prev_snap_id != CEPH_NOSNAP) {
+ snap_id_it = m_src_snap_ids.upper_bound(m_prev_snap_id);
+ } else if (m_src_snap_id_start > 0) {
+ snap_id_it = m_src_snap_ids.upper_bound(m_src_snap_id_start);
+ }
+
+ for (; snap_id_it != m_src_snap_ids.end(); ++snap_id_it) {
+ librados::snap_t src_snap_id = *snap_id_it;
+
+ cls::rbd::SnapshotNamespace snap_namespace;
+ m_src_image_ctx->image_lock.lock_shared();
+ int r = m_src_image_ctx->get_snap_namespace(src_snap_id, &snap_namespace);
+ m_src_image_ctx->image_lock.unlock_shared();
+ if (r < 0) {
+ lderr(m_cct) << "failed to retrieve source snap namespace: "
+ << m_snap_name << dendl;
+ finish(r);
+ return;
+ }
+
+ if (m_snap_seqs.find(src_snap_id) == m_snap_seqs.end()) {
+ // the source snapshot is not in our mapping table, ...
+ if (std::holds_alternative<cls::rbd::UserSnapshotNamespace>(snap_namespace)) {
+ // ... create it since it's a user snapshot
+ break;
+ } else if (src_snap_id == m_src_snap_id_end) {
+ // ... map it to destination HEAD since it's not a user snapshot that we
+ // will create (e.g. MirrorSnapshotNamespace)
+ m_snap_seqs[src_snap_id] = CEPH_NOSNAP;
+ }
+ }
+ }
+
+ if (snap_id_it == m_src_snap_ids.end()) {
+ // no source snapshots to create
+ m_prev_snap_id = CEPH_NOSNAP;
+ send_snap_protect();
+ return;
+ }
+
+ m_prev_snap_id = *snap_id_it;
+ m_snap_name = get_snapshot_name(m_src_image_ctx, m_prev_snap_id);
+
+ m_src_image_ctx->image_lock.lock_shared();
+ auto snap_info_it = m_src_image_ctx->snap_info.find(m_prev_snap_id);
+ if (snap_info_it == m_src_image_ctx->snap_info.end()) {
+ m_src_image_ctx->image_lock.unlock_shared();
+ lderr(m_cct) << "failed to retrieve source snap info: " << m_snap_name
+ << dendl;
+ finish(-ENOENT);
+ return;
+ }
+
+ uint64_t size = snap_info_it->second.size;
+ m_snap_namespace = snap_info_it->second.snap_namespace;
+ cls::rbd::ParentImageSpec parent_spec;
+ uint64_t parent_overlap = 0;
+ if (!m_flatten && snap_info_it->second.parent.spec.pool_id != -1) {
+ parent_spec = m_dst_parent_spec;
+ parent_overlap = snap_info_it->second.parent.overlap;
+ }
+ m_src_image_ctx->image_lock.unlock_shared();
+
+ ldout(m_cct, 20) << "snap_name=" << m_snap_name << ", "
+ << "snap_id=" << m_prev_snap_id << ", "
+ << "size=" << size << ", "
+ << "parent_info=["
+ << "pool_id=" << parent_spec.pool_id << ", "
+ << "image_id=" << parent_spec.image_id << ", "
+ << "snap_id=" << parent_spec.snap_id << ", "
+ << "overlap=" << parent_overlap << "]" << dendl;
+
+ int r;
+ Context *finish_op_ctx = start_lock_op(&r);
+ if (finish_op_ctx == nullptr) {
+ lderr(m_cct) << "lost exclusive lock" << dendl;
+ finish(r);
+ return;
+ }
+
+ auto ctx = new LambdaContext([this, finish_op_ctx](int r) {
+ handle_snap_create(r);
+ finish_op_ctx->complete(0);
+ });
+ SnapshotCreateRequest<I> *req = SnapshotCreateRequest<I>::create(
+ m_dst_image_ctx, m_snap_name, m_snap_namespace, size, parent_spec,
+ parent_overlap, ctx);
+ req->send();
+}
+
+template <typename I>
+void SnapshotCopyRequest<I>::handle_snap_create(int r) {
+ ldout(m_cct, 20) << "r=" << r << dendl;
+
+ if (r < 0) {
+ lderr(m_cct) << "failed to create snapshot '" << m_snap_name << "': "
+ << cpp_strerror(r) << dendl;
+ finish(r);
+ return;
+ }
+ if (handle_cancellation()) {
+ return;
+ }
+
+ ceph_assert(m_prev_snap_id != CEPH_NOSNAP);
+
+ auto snap_it = m_dst_image_ctx->snap_ids.find(
+ {cls::rbd::UserSnapshotNamespace(), m_snap_name});
+ ceph_assert(snap_it != m_dst_image_ctx->snap_ids.end());
+ librados::snap_t dst_snap_id = snap_it->second;
+
+ ldout(m_cct, 20) << "mapping source snap id " << m_prev_snap_id << " to "
+ << dst_snap_id << dendl;
+ m_snap_seqs[m_prev_snap_id] = dst_snap_id;
+
+ send_snap_create();
+}
+
+template <typename I>
+void SnapshotCopyRequest<I>::send_snap_protect() {
+ SnapIdSet::iterator snap_id_it = m_src_snap_ids.begin();
+ if (m_prev_snap_id != CEPH_NOSNAP) {
+ snap_id_it = m_src_snap_ids.upper_bound(m_prev_snap_id);
+ } else if (m_src_snap_id_start > 0) {
+ snap_id_it = m_src_snap_ids.upper_bound(m_src_snap_id_start);
+ }
+
+ for (; snap_id_it != m_src_snap_ids.end(); ++snap_id_it) {
+ librados::snap_t src_snap_id = *snap_id_it;
+
+ m_src_image_ctx->image_lock.lock_shared();
+
+ bool src_protected;
+ int r = m_src_image_ctx->is_snap_protected(src_snap_id, &src_protected);
+ if (r < 0) {
+ lderr(m_cct) << "failed to retrieve source snap protect status: "
+ << cpp_strerror(r) << dendl;
+ m_src_image_ctx->image_lock.unlock_shared();
+ finish(r);
+ return;
+ }
+ m_src_image_ctx->image_lock.unlock_shared();
+
+ if (!src_protected) {
+ // snap is not protected -- check next snap
+ continue;
+ }
+
+ // if destination snapshot is not protected, protect it
+ auto snap_seq_it = m_snap_seqs.find(src_snap_id);
+ ceph_assert(snap_seq_it != m_snap_seqs.end());
+ if (snap_seq_it->second == CEPH_NOSNAP) {
+ // implies src end snapshot is mapped to a non-copyable snapshot
+ ceph_assert(src_snap_id == m_src_snap_id_end);
+ break;
+ }
+
+ m_dst_image_ctx->image_lock.lock_shared();
+ bool dst_protected;
+ r = m_dst_image_ctx->is_snap_protected(snap_seq_it->second, &dst_protected);
+ if (r < 0) {
+ lderr(m_cct) << "failed to retrieve destination snap protect status: "
+ << cpp_strerror(r) << dendl;
+ m_dst_image_ctx->image_lock.unlock_shared();
+ finish(r);
+ return;
+ }
+ m_dst_image_ctx->image_lock.unlock_shared();
+
+ if (!dst_protected) {
+ break;
+ }
+ }
+
+ if (snap_id_it == m_src_snap_ids.end()) {
+ // no destination snapshots to protect
+ m_prev_snap_id = CEPH_NOSNAP;
+ send_set_head();
+ return;
+ }
+
+ m_prev_snap_id = *snap_id_it;
+ m_snap_name = get_snapshot_name(m_src_image_ctx, m_prev_snap_id);
+
+ ldout(m_cct, 20) << "snap_name=" << m_snap_name << ", "
+ << "snap_id=" << m_prev_snap_id << dendl;
+
+ int r;
+ auto finish_op_ctx = start_lock_op(&r);
+ if (finish_op_ctx == nullptr) {
+ lderr(m_cct) << "lost exclusive lock" << dendl;
+ finish(r);
+ return;
+ }
+
+ auto ctx = new LambdaContext([this, finish_op_ctx](int r) {
+ handle_snap_protect(r);
+ finish_op_ctx->complete(0);
+ });
+ std::shared_lock owner_locker{m_dst_image_ctx->owner_lock};
+ m_dst_image_ctx->operations->execute_snap_protect(
+ cls::rbd::UserSnapshotNamespace(), m_snap_name.c_str(), ctx);
+}
+
+template <typename I>
+void SnapshotCopyRequest<I>::handle_snap_protect(int r) {
+ ldout(m_cct, 20) << "r=" << r << dendl;
+
+ if (r < 0) {
+ lderr(m_cct) << "failed to protect snapshot '" << m_snap_name << "': "
+ << cpp_strerror(r) << dendl;
+ finish(r);
+ return;
+ }
+ if (handle_cancellation()) {
+ return;
+ }
+
+ send_snap_protect();
+}
+
+template <typename I>
+void SnapshotCopyRequest<I>::send_set_head() {
+ auto snap_seq_it = m_snap_seqs.find(m_src_snap_id_end);
+ if (m_src_snap_id_end != CEPH_NOSNAP &&
+ (snap_seq_it == m_snap_seqs.end() ||
+ snap_seq_it->second != CEPH_NOSNAP)) {
+ // not copying to src nor dst HEAD revision
+ finish(0);
+ return;
+ }
+
+ ldout(m_cct, 20) << dendl;
+
+ uint64_t size;
+ cls::rbd::ParentImageSpec parent_spec;
+ uint64_t parent_overlap = 0;
+ {
+ std::shared_lock src_locker{m_src_image_ctx->image_lock};
+ auto snap_info_it = m_src_image_ctx->snap_info.find(m_src_snap_id_end);
+ if (snap_info_it != m_src_image_ctx->snap_info.end()) {
+ auto& snap_info = snap_info_it->second;
+ size = snap_info.size;
+ if (!m_flatten && snap_info.parent.spec.pool_id != -1) {
+ parent_spec = m_dst_parent_spec;
+ parent_overlap = snap_info.parent.overlap;
+ }
+ } else {
+ size = m_src_image_ctx->size;
+ if (!m_flatten) {
+ parent_spec = m_dst_image_ctx->parent_md.spec;
+ parent_overlap = m_src_image_ctx->parent_md.overlap;
+ }
+ }
+ }
+
+ auto ctx = create_context_callback<
+ SnapshotCopyRequest<I>, &SnapshotCopyRequest<I>::handle_set_head>(this);
+ auto req = SetHeadRequest<I>::create(m_dst_image_ctx, size, parent_spec,
+ parent_overlap, ctx);
+ req->send();
+}
+
+template <typename I>
+void SnapshotCopyRequest<I>::handle_set_head(int r) {
+ ldout(m_cct, 20) << "r=" << r << dendl;
+
+ if (r < 0) {
+ lderr(m_cct) << "failed to set head: " << cpp_strerror(r) << dendl;
+ finish(r);
+ return;
+ }
+
+ if (handle_cancellation()) {
+ return;
+ }
+
+ send_resize_object_map();
+}
+
+template <typename I>
+void SnapshotCopyRequest<I>::send_resize_object_map() {
+ int r = 0;
+
+ if (m_dst_image_ctx->test_features(RBD_FEATURE_OBJECT_MAP)) {
+ std::shared_lock owner_locker{m_dst_image_ctx->owner_lock};
+ std::shared_lock image_locker{m_dst_image_ctx->image_lock};
+
+ if (m_dst_image_ctx->object_map != nullptr &&
+ Striper::get_num_objects(m_dst_image_ctx->layout,
+ m_dst_image_ctx->size) !=
+ m_dst_image_ctx->object_map->size()) {
+
+ ldout(m_cct, 20) << dendl;
+
+ auto finish_op_ctx = start_lock_op(m_dst_image_ctx->owner_lock, &r);
+ if (finish_op_ctx != nullptr) {
+ auto ctx = new LambdaContext([this, finish_op_ctx](int r) {
+ handle_resize_object_map(r);
+ finish_op_ctx->complete(0);
+ });
+
+ m_dst_image_ctx->object_map->aio_resize(m_dst_image_ctx->size,
+ OBJECT_NONEXISTENT, ctx);
+ return;
+ }
+
+ lderr(m_cct) << "lost exclusive lock" << dendl;
+ }
+ }
+
+ finish(r);
+}
+
+template <typename I>
+void SnapshotCopyRequest<I>::handle_resize_object_map(int r) {
+ ldout(m_cct, 20) << "r=" << r << dendl;
+
+ if (r < 0) {
+ lderr(m_cct) << "failed to resize object map: " << cpp_strerror(r)
+ << dendl;
+ finish(r);
+ return;
+ }
+
+ finish(0);
+}
+
+template <typename I>
+bool SnapshotCopyRequest<I>::handle_cancellation() {
+ {
+ std::lock_guard locker{m_lock};
+ if (!m_canceled) {
+ return false;
+ }
+ }
+ ldout(m_cct, 10) << "snapshot copy canceled" << dendl;
+ finish(-ECANCELED);
+ return true;
+}
+
+template <typename I>
+void SnapshotCopyRequest<I>::error(int r) {
+ ldout(m_cct, 20) << "r=" << r << dendl;
+
+ m_work_queue->queue(new LambdaContext([this, r](int r1) { finish(r); }));
+}
+
+template <typename I>
+int SnapshotCopyRequest<I>::validate_parent(I *image_ctx,
+ cls::rbd::ParentImageSpec *spec) {
+ std::shared_lock owner_locker{image_ctx->owner_lock};
+ std::shared_lock image_locker{image_ctx->image_lock};
+
+ // ensure source image's parent specs are still consistent
+ *spec = image_ctx->parent_md.spec;
+ for (auto &snap_info_pair : image_ctx->snap_info) {
+ auto &parent_spec = snap_info_pair.second.parent.spec;
+ if (parent_spec.pool_id == -1) {
+ continue;
+ } else if (spec->pool_id == -1) {
+ *spec = parent_spec;
+ continue;
+ }
+
+ if (*spec != parent_spec) {
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+template <typename I>
+Context *SnapshotCopyRequest<I>::start_lock_op(int* r) {
+ std::shared_lock owner_locker{m_dst_image_ctx->owner_lock};
+ return start_lock_op(m_dst_image_ctx->owner_lock, r);
+}
+
+template <typename I>
+Context *SnapshotCopyRequest<I>::start_lock_op(ceph::shared_mutex &owner_lock, int* r) {
+ ceph_assert(ceph_mutex_is_locked(m_dst_image_ctx->owner_lock));
+ if (m_dst_image_ctx->exclusive_lock == nullptr) {
+ return new LambdaContext([](int r) {});
+ }
+ return m_dst_image_ctx->exclusive_lock->start_op(r);
+}
+
+template <typename I>
+void SnapshotCopyRequest<I>::finish(int r) {
+ ldout(m_cct, 20) << "r=" << r << dendl;
+
+ if (r == 0) {
+ *m_snap_seqs_result = m_snap_seqs;
+ }
+
+ m_on_finish->complete(r);
+ put();
+}
+
+} // namespace deep_copy
+} // namespace librbd
+
+template class librbd::deep_copy::SnapshotCopyRequest<librbd::ImageCtx>;
diff --git a/src/librbd/deep_copy/SnapshotCopyRequest.h b/src/librbd/deep_copy/SnapshotCopyRequest.h
new file mode 100644
index 000000000..9c6abdf73
--- /dev/null
+++ b/src/librbd/deep_copy/SnapshotCopyRequest.h
@@ -0,0 +1,151 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#ifndef CEPH_LIBRBD_DEEP_COPY_SNAPSHOT_COPY_REQUEST_H
+#define CEPH_LIBRBD_DEEP_COPY_SNAPSHOT_COPY_REQUEST_H
+
+#include "include/int_types.h"
+#include "include/rados/librados.hpp"
+#include "common/RefCountedObj.h"
+#include "common/snap_types.h"
+#include "librbd/ImageCtx.h"
+#include "librbd/Types.h"
+#include <map>
+#include <set>
+#include <string>
+#include <tuple>
+
+class Context;
+
+namespace librbd {
+
+namespace asio { struct ContextWQ; }
+
+namespace deep_copy {
+
+template <typename ImageCtxT = librbd::ImageCtx>
+class SnapshotCopyRequest : public RefCountedObject {
+public:
+ static SnapshotCopyRequest* create(ImageCtxT *src_image_ctx,
+ ImageCtxT *dst_image_ctx,
+ librados::snap_t src_snap_id_start,
+ librados::snap_t src_snap_id_end,
+ librados::snap_t dst_snap_id_start,
+ bool flatten, asio::ContextWQ *work_queue,
+ SnapSeqs *snap_seqs, Context *on_finish) {
+ return new SnapshotCopyRequest(src_image_ctx, dst_image_ctx,
+ src_snap_id_start, src_snap_id_end,
+ dst_snap_id_start, flatten, work_queue,
+ snap_seqs, on_finish);
+ }
+
+ SnapshotCopyRequest(ImageCtxT *src_image_ctx, ImageCtxT *dst_image_ctx,
+ librados::snap_t src_snap_id_start,
+ librados::snap_t src_snap_id_end,
+ librados::snap_t dst_snap_id_start,
+ bool flatten, asio::ContextWQ *work_queue,
+ SnapSeqs *snap_seqs, Context *on_finish);
+
+ void send();
+ void cancel();
+
+private:
+ /**
+ * @verbatim
+ *
+ * <start>
+ * |
+ * | /-----------\
+ * | | |
+ * v v | (repeat as needed)
+ * UNPROTECT_SNAP ----/
+ * |
+ * | /-----------\
+ * | | |
+ * v v | (repeat as needed)
+ * REMOVE_SNAP -------/
+ * |
+ * | /-----------\
+ * | | |
+ * v v | (repeat as needed)
+ * CREATE_SNAP -------/
+ * |
+ * | /-----------\
+ * | | |
+ * v v | (repeat as needed)
+ * PROTECT_SNAP ------/
+ * |
+ * v
+ * SET_HEAD (skip if not needed)
+ * |
+ * v
+ * RESIZE_OBJECT_MAP (skip if not needed)
+ * |
+ * v
+ * <finish>
+ *
+ * @endverbatim
+ */
+
+ typedef std::set<librados::snap_t> SnapIdSet;
+
+ ImageCtxT *m_src_image_ctx;
+ ImageCtxT *m_dst_image_ctx;
+ librados::snap_t m_src_snap_id_start;
+ librados::snap_t m_src_snap_id_end;
+ librados::snap_t m_dst_snap_id_start;
+ bool m_flatten;
+ asio::ContextWQ *m_work_queue;
+ SnapSeqs *m_snap_seqs_result;
+ SnapSeqs m_snap_seqs;
+ Context *m_on_finish;
+
+ CephContext *m_cct;
+ SnapIdSet m_src_snap_ids;
+ SnapIdSet m_dst_snap_ids;
+ librados::snap_t m_prev_snap_id = CEPH_NOSNAP;
+
+ std::string m_snap_name;
+ cls::rbd::SnapshotNamespace m_snap_namespace;
+
+ cls::rbd::ParentImageSpec m_dst_parent_spec;
+
+ ceph::mutex m_lock;
+ bool m_canceled = false;
+
+ void send_snap_unprotect();
+ void handle_snap_unprotect(int r);
+
+ void send_snap_remove();
+ void handle_snap_remove(int r);
+
+ void send_snap_create();
+ void handle_snap_create(int r);
+
+ void send_snap_protect();
+ void handle_snap_protect(int r);
+
+ void send_set_head();
+ void handle_set_head(int r);
+
+ void send_resize_object_map();
+ void handle_resize_object_map(int r);
+
+ bool handle_cancellation();
+
+ void error(int r);
+
+ int validate_parent(ImageCtxT *image_ctx, cls::rbd::ParentImageSpec *spec);
+
+ Context *start_lock_op(int* r);
+ Context *start_lock_op(ceph::shared_mutex &owner_locki, int* r);
+
+ void finish(int r);
+};
+
+} // namespace deep_copy
+} // namespace librbd
+
+extern template class librbd::deep_copy::SnapshotCopyRequest<librbd::ImageCtx>;
+
+#endif // CEPH_LIBRBD_DEEP_COPY_SNAPSHOT_COPY_REQUEST_H
diff --git a/src/librbd/deep_copy/SnapshotCreateRequest.cc b/src/librbd/deep_copy/SnapshotCreateRequest.cc
new file mode 100644
index 000000000..d437bd355
--- /dev/null
+++ b/src/librbd/deep_copy/SnapshotCreateRequest.cc
@@ -0,0 +1,187 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#include "SetHeadRequest.h"
+#include "SnapshotCreateRequest.h"
+#include "common/errno.h"
+#include "cls/rbd/cls_rbd_client.h"
+#include "cls/rbd/cls_rbd_types.h"
+#include "librbd/ExclusiveLock.h"
+#include "librbd/ObjectMap.h"
+#include "librbd/Operations.h"
+#include "librbd/Utils.h"
+#include "osdc/Striper.h"
+
+#define dout_subsys ceph_subsys_rbd
+#undef dout_prefix
+#define dout_prefix *_dout << "librbd::deep_copy::SnapshotCreateRequest: " \
+ << this << " " << __func__ << ": "
+
+namespace librbd {
+namespace deep_copy {
+
+using librbd::util::create_context_callback;
+using librbd::util::create_rados_callback;
+
+template <typename I>
+SnapshotCreateRequest<I>::SnapshotCreateRequest(
+ I *dst_image_ctx, const std::string &snap_name,
+ const cls::rbd::SnapshotNamespace &snap_namespace,
+ uint64_t size, const cls::rbd::ParentImageSpec &spec,
+ uint64_t parent_overlap, Context *on_finish)
+ : m_dst_image_ctx(dst_image_ctx), m_snap_name(snap_name),
+ m_snap_namespace(snap_namespace), m_size(size),
+ m_parent_spec(spec), m_parent_overlap(parent_overlap),
+ m_on_finish(on_finish), m_cct(dst_image_ctx->cct) {
+}
+
+template <typename I>
+void SnapshotCreateRequest<I>::send() {
+ send_set_head();
+}
+
+template <typename I>
+void SnapshotCreateRequest<I>::send_set_head() {
+ ldout(m_cct, 20) << dendl;
+
+ auto ctx = create_context_callback<
+ SnapshotCreateRequest<I>, &SnapshotCreateRequest<I>::handle_set_head>(this);
+ auto req = SetHeadRequest<I>::create(m_dst_image_ctx, m_size, m_parent_spec,
+ m_parent_overlap, ctx);
+ req->send();
+}
+
+template <typename I>
+void SnapshotCreateRequest<I>::handle_set_head(int r) {
+ ldout(m_cct, 20) << "r=" << r << dendl;
+
+ if (r < 0) {
+ lderr(m_cct) << "failed to set head: " << cpp_strerror(r) << dendl;
+ finish(r);
+ return;
+ }
+
+ send_create_snap();
+}
+
+template <typename I>
+void SnapshotCreateRequest<I>::send_create_snap() {
+ ldout(m_cct, 20) << "snap_name=" << m_snap_name << dendl;
+
+ int r;
+ auto finish_op_ctx = start_lock_op(&r);
+ if (finish_op_ctx == nullptr) {
+ lderr(m_cct) << "lost exclusive lock" << dendl;
+ finish(r);
+ return;
+ }
+
+ auto ctx = new LambdaContext([this, finish_op_ctx](int r) {
+ handle_create_snap(r);
+ finish_op_ctx->complete(0);
+ });
+ uint64_t flags = SNAP_CREATE_FLAG_SKIP_OBJECT_MAP |
+ SNAP_CREATE_FLAG_SKIP_NOTIFY_QUIESCE;
+ std::shared_lock owner_locker{m_dst_image_ctx->owner_lock};
+ m_dst_image_ctx->operations->execute_snap_create(
+ m_snap_namespace, m_snap_name.c_str(), ctx, 0U, flags, m_prog_ctx);
+}
+
+template <typename I>
+void SnapshotCreateRequest<I>::handle_create_snap(int r) {
+ ldout(m_cct, 20) << "r=" << r << dendl;
+
+ if (r < 0) {
+ lderr(m_cct) << "failed to create snapshot '" << m_snap_name << "': "
+ << cpp_strerror(r) << dendl;
+ finish(r);
+ return;
+ }
+
+ send_create_object_map();
+}
+template <typename I>
+void SnapshotCreateRequest<I>::send_create_object_map() {
+
+ if (!m_dst_image_ctx->test_features(RBD_FEATURE_OBJECT_MAP)) {
+ finish(0);
+ return;
+ }
+
+ m_dst_image_ctx->image_lock.lock_shared();
+ auto snap_it = m_dst_image_ctx->snap_ids.find(
+ {cls::rbd::UserSnapshotNamespace(), m_snap_name});
+ if (snap_it == m_dst_image_ctx->snap_ids.end()) {
+ lderr(m_cct) << "failed to locate snap: " << m_snap_name << dendl;
+ m_dst_image_ctx->image_lock.unlock_shared();
+ finish(-ENOENT);
+ return;
+ }
+ librados::snap_t local_snap_id = snap_it->second;
+ m_dst_image_ctx->image_lock.unlock_shared();
+
+ std::string object_map_oid(librbd::ObjectMap<>::object_map_name(
+ m_dst_image_ctx->id, local_snap_id));
+ uint64_t object_count = Striper::get_num_objects(m_dst_image_ctx->layout,
+ m_size);
+ ldout(m_cct, 20) << "object_map_oid=" << object_map_oid << ", "
+ << "object_count=" << object_count << dendl;
+
+ // initialize an empty object map of the correct size (object sync
+ // will populate the object map)
+ librados::ObjectWriteOperation op;
+ librbd::cls_client::object_map_resize(&op, object_count, OBJECT_NONEXISTENT);
+
+ int r;
+ auto finish_op_ctx = start_lock_op(&r);
+ if (finish_op_ctx == nullptr) {
+ lderr(m_cct) << "lost exclusive lock" << dendl;
+ finish(r);
+ return;
+ }
+
+ auto ctx = new LambdaContext([this, finish_op_ctx](int r) {
+ handle_create_object_map(r);
+ finish_op_ctx->complete(0);
+ });
+ librados::AioCompletion *comp = create_rados_callback(ctx);
+ r = m_dst_image_ctx->md_ctx.aio_operate(object_map_oid, comp, &op);
+ ceph_assert(r == 0);
+ comp->release();
+}
+
+template <typename I>
+void SnapshotCreateRequest<I>::handle_create_object_map(int r) {
+ ldout(m_cct, 20) << "r=" << r << dendl;
+
+ if (r < 0) {
+ lderr(m_cct) << "failed to create object map: " << cpp_strerror(r)
+ << dendl;
+ finish(r);
+ return;
+ }
+
+ finish(0);
+}
+
+template <typename I>
+Context *SnapshotCreateRequest<I>::start_lock_op(int* r) {
+ std::shared_lock owner_locker{m_dst_image_ctx->owner_lock};
+ if (m_dst_image_ctx->exclusive_lock == nullptr) {
+ return new LambdaContext([](int r) {});
+ }
+ return m_dst_image_ctx->exclusive_lock->start_op(r);
+}
+
+template <typename I>
+void SnapshotCreateRequest<I>::finish(int r) {
+ ldout(m_cct, 20) << "r=" << r << dendl;
+
+ m_on_finish->complete(r);
+ delete this;
+}
+
+} // namespace deep_copy
+} // namespace librbd
+
+template class librbd::deep_copy::SnapshotCreateRequest<librbd::ImageCtx>;
diff --git a/src/librbd/deep_copy/SnapshotCreateRequest.h b/src/librbd/deep_copy/SnapshotCreateRequest.h
new file mode 100644
index 000000000..41f7f54e4
--- /dev/null
+++ b/src/librbd/deep_copy/SnapshotCreateRequest.h
@@ -0,0 +1,98 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#ifndef CEPH_LIBRBD_DEEP_COPY_SNAPSHOT_CREATE_REQUEST_H
+#define CEPH_LIBRBD_DEEP_COPY_SNAPSHOT_CREATE_REQUEST_H
+
+#include "include/int_types.h"
+#include "include/rados/librados.hpp"
+#include "common/snap_types.h"
+#include "librbd/ImageCtx.h"
+#include "librbd/Types.h"
+#include "librbd/internal.h"
+
+#include <map>
+#include <set>
+#include <string>
+#include <tuple>
+
+class Context;
+
+namespace librbd {
+namespace deep_copy {
+
+template <typename ImageCtxT = librbd::ImageCtx>
+class SnapshotCreateRequest {
+public:
+ static SnapshotCreateRequest* create(ImageCtxT *dst_image_ctx,
+ const std::string &snap_name,
+ const cls::rbd::SnapshotNamespace &snap_namespace,
+ uint64_t size,
+ const cls::rbd::ParentImageSpec &parent_spec,
+ uint64_t parent_overlap,
+ Context *on_finish) {
+ return new SnapshotCreateRequest(dst_image_ctx, snap_name, snap_namespace, size,
+ parent_spec, parent_overlap, on_finish);
+ }
+
+ SnapshotCreateRequest(ImageCtxT *dst_image_ctx,
+ const std::string &snap_name,
+ const cls::rbd::SnapshotNamespace &snap_namespace,
+ uint64_t size,
+ const cls::rbd::ParentImageSpec &parent_spec,
+ uint64_t parent_overlap, Context *on_finish);
+
+ void send();
+
+private:
+ /**
+ * @verbatim
+ *
+ * <start>
+ * |
+ * v
+ * SET_HEAD
+ * |
+ * v
+ * CREATE_SNAP
+ * |
+ * v (skip if not needed)
+ * CREATE_OBJECT_MAP
+ * |
+ * v
+ * <finish>
+ *
+ * @endverbatim
+ */
+
+ ImageCtxT *m_dst_image_ctx;
+ std::string m_snap_name;
+ cls::rbd::SnapshotNamespace m_snap_namespace;
+ uint64_t m_size;
+ cls::rbd::ParentImageSpec m_parent_spec;
+ uint64_t m_parent_overlap;
+ Context *m_on_finish;
+
+ CephContext *m_cct;
+ NoOpProgressContext m_prog_ctx;
+
+ void send_set_head();
+ void handle_set_head(int r);
+
+ void send_create_snap();
+ void handle_create_snap(int r);
+
+ void send_create_object_map();
+ void handle_create_object_map(int r);
+
+ Context *start_lock_op(int* r);
+
+ void finish(int r);
+};
+
+} // namespace deep_copy
+} // namespace librbd
+
+extern template class librbd::deep_copy::SnapshotCreateRequest<librbd::ImageCtx>;
+
+#endif // CEPH_LIBRBD_DEEP_COPY_SNAPSHOT_CREATE_REQUEST_H
diff --git a/src/librbd/deep_copy/Types.h b/src/librbd/deep_copy/Types.h
new file mode 100644
index 000000000..9cd4835b3
--- /dev/null
+++ b/src/librbd/deep_copy/Types.h
@@ -0,0 +1,28 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#ifndef CEPH_LIBRBD_DEEP_COPY_TYPES_H
+#define CEPH_LIBRBD_DEEP_COPY_TYPES_H
+
+#include "include/int_types.h"
+#include "include/rados/librados.hpp"
+#include <boost/optional.hpp>
+
+namespace librbd {
+namespace deep_copy {
+
+enum {
+ OBJECT_COPY_REQUEST_FLAG_FLATTEN = 1U << 0,
+ OBJECT_COPY_REQUEST_FLAG_MIGRATION = 1U << 1,
+ OBJECT_COPY_REQUEST_FLAG_EXISTS_CLEAN = 1U << 2,
+};
+
+typedef std::vector<librados::snap_t> SnapIds;
+typedef std::map<librados::snap_t, SnapIds> SnapMap;
+
+typedef boost::optional<uint64_t> ObjectNumber;
+
+} // namespace deep_copy
+} // namespace librbd
+
+#endif // CEPH_LIBRBD_DEEP_COPY_TYPES_H
diff --git a/src/librbd/deep_copy/Utils.cc b/src/librbd/deep_copy/Utils.cc
new file mode 100644
index 000000000..c2dd25020
--- /dev/null
+++ b/src/librbd/deep_copy/Utils.cc
@@ -0,0 +1,61 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#include "common/debug.h"
+#include "Utils.h"
+#include <set>
+
+namespace librbd {
+namespace deep_copy {
+namespace util {
+
+#define dout_subsys ceph_subsys_rbd
+#undef dout_prefix
+#define dout_prefix *_dout << "librbd::deep_copy::util::" << __func__ << ": "
+
+void compute_snap_map(CephContext* cct,
+ librados::snap_t src_snap_id_start,
+ librados::snap_t src_snap_id_end,
+ const SnapIds& dst_snap_ids,
+ const SnapSeqs &snap_seqs,
+ SnapMap *snap_map) {
+ std::set<librados::snap_t> ordered_dst_snap_ids{
+ dst_snap_ids.begin(), dst_snap_ids.end()};
+ auto dst_snap_id_it = ordered_dst_snap_ids.begin();
+
+ SnapIds snap_ids;
+ for (auto &it : snap_seqs) {
+ // ensure all dst snap ids are included in the mapping table since
+ // deep copy will skip non-user snapshots
+ while (dst_snap_id_it != ordered_dst_snap_ids.end()) {
+ if (*dst_snap_id_it < it.second) {
+ snap_ids.insert(snap_ids.begin(), *dst_snap_id_it);
+ } else if (*dst_snap_id_it > it.second) {
+ break;
+ }
+ ++dst_snap_id_it;
+ }
+
+ // we should only have the HEAD revision in the the last snap seq
+ ceph_assert(snap_ids.empty() || snap_ids[0] != CEPH_NOSNAP);
+ snap_ids.insert(snap_ids.begin(), it.second);
+
+ if (it.first < src_snap_id_start) {
+ continue;
+ } else if (it.first > src_snap_id_end) {
+ break;
+ }
+
+ (*snap_map)[it.first] = snap_ids;
+ }
+
+ ldout(cct, 10) << "src_snap_id_start=" << src_snap_id_start << ", "
+ << "src_snap_id_end=" << src_snap_id_end << ", "
+ << "dst_snap_ids=" << dst_snap_ids << ", "
+ << "snap_seqs=" << snap_seqs << ", "
+ << "snap_map=" << *snap_map << dendl;
+}
+
+} // namespace util
+} // namespace deep_copy
+} // namespace librbd
diff --git a/src/librbd/deep_copy/Utils.h b/src/librbd/deep_copy/Utils.h
new file mode 100644
index 000000000..268a39daf
--- /dev/null
+++ b/src/librbd/deep_copy/Utils.h
@@ -0,0 +1,29 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#ifndef CEPH_LIBRBD_DEEP_COPY_UTILS_H
+#define CEPH_LIBRBD_DEEP_COPY_UTILS_H
+
+#include "include/common_fwd.h"
+#include "include/rados/librados.hpp"
+#include "librbd/Types.h"
+#include "librbd/deep_copy/Types.h"
+
+#include <boost/optional.hpp>
+
+namespace librbd {
+namespace deep_copy {
+namespace util {
+
+void compute_snap_map(CephContext* cct,
+ librados::snap_t src_snap_id_start,
+ librados::snap_t src_snap_id_end,
+ const SnapIds& dst_snap_ids,
+ const SnapSeqs &snap_seqs,
+ SnapMap *snap_map);
+
+} // namespace util
+} // namespace deep_copy
+} // namespace librbd
+
+#endif // CEPH_LIBRBD_DEEP_COPY_UTILS_H