From 19fcec84d8d7d21e796c7624e521b60d28ee21ed Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Sun, 7 Apr 2024 20:45:59 +0200 Subject: Adding upstream version 16.2.11+ds. Signed-off-by: Daniel Baumann --- src/librbd/exclusive_lock/AutomaticPolicy.cc | 29 ++ src/librbd/exclusive_lock/AutomaticPolicy.h | 34 +++ src/librbd/exclusive_lock/ImageDispatch.cc | 318 ++++++++++++++++++++ src/librbd/exclusive_lock/ImageDispatch.h | 134 +++++++++ src/librbd/exclusive_lock/Policy.h | 31 ++ src/librbd/exclusive_lock/PostAcquireRequest.cc | 368 ++++++++++++++++++++++++ src/librbd/exclusive_lock/PostAcquireRequest.h | 124 ++++++++ src/librbd/exclusive_lock/PreAcquireRequest.cc | 95 ++++++ src/librbd/exclusive_lock/PreAcquireRequest.h | 75 +++++ src/librbd/exclusive_lock/PreReleaseRequest.cc | 363 +++++++++++++++++++++++ src/librbd/exclusive_lock/PreReleaseRequest.h | 139 +++++++++ src/librbd/exclusive_lock/StandardPolicy.cc | 29 ++ src/librbd/exclusive_lock/StandardPolicy.h | 37 +++ 13 files changed, 1776 insertions(+) create mode 100644 src/librbd/exclusive_lock/AutomaticPolicy.cc create mode 100644 src/librbd/exclusive_lock/AutomaticPolicy.h create mode 100644 src/librbd/exclusive_lock/ImageDispatch.cc create mode 100644 src/librbd/exclusive_lock/ImageDispatch.h create mode 100644 src/librbd/exclusive_lock/Policy.h create mode 100644 src/librbd/exclusive_lock/PostAcquireRequest.cc create mode 100644 src/librbd/exclusive_lock/PostAcquireRequest.h create mode 100644 src/librbd/exclusive_lock/PreAcquireRequest.cc create mode 100644 src/librbd/exclusive_lock/PreAcquireRequest.h create mode 100644 src/librbd/exclusive_lock/PreReleaseRequest.cc create mode 100644 src/librbd/exclusive_lock/PreReleaseRequest.h create mode 100644 src/librbd/exclusive_lock/StandardPolicy.cc create mode 100644 src/librbd/exclusive_lock/StandardPolicy.h (limited to 'src/librbd/exclusive_lock') diff --git a/src/librbd/exclusive_lock/AutomaticPolicy.cc b/src/librbd/exclusive_lock/AutomaticPolicy.cc new file mode 100644 index 000000000..bfaddc1b2 --- /dev/null +++ b/src/librbd/exclusive_lock/AutomaticPolicy.cc @@ -0,0 +1,29 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab + +#include "librbd/exclusive_lock/AutomaticPolicy.h" +#include "librbd/ImageCtx.h" +#include "librbd/ExclusiveLock.h" + +#define dout_subsys ceph_subsys_rbd +#undef dout_prefix +#define dout_prefix *_dout << "librbd::ExclusiveLock::AutomaticPolicy " + +namespace librbd { +namespace exclusive_lock { + +int AutomaticPolicy::lock_requested(bool force) { + ceph_assert(ceph_mutex_is_locked(m_image_ctx->owner_lock)); + ceph_assert(m_image_ctx->exclusive_lock != nullptr); + + ldout(m_image_ctx->cct, 20) << this << " " << __func__ << ": force=" << force + << dendl; + + // release the lock upon request (ignore forced requests) + m_image_ctx->exclusive_lock->release_lock(nullptr); + return 0; +} + +} // namespace exclusive_lock +} // namespace librbd + diff --git a/src/librbd/exclusive_lock/AutomaticPolicy.h b/src/librbd/exclusive_lock/AutomaticPolicy.h new file mode 100644 index 000000000..12ba9b6c4 --- /dev/null +++ b/src/librbd/exclusive_lock/AutomaticPolicy.h @@ -0,0 +1,34 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab + +#ifndef CEPH_LIBRBD_EXCLUSIVE_LOCK_AUTOMATIC_POLICY_H +#define CEPH_LIBRBD_EXCLUSIVE_LOCK_AUTOMATIC_POLICY_H + +#include "librbd/exclusive_lock/Policy.h" + +namespace librbd { + +struct ImageCtx; + +namespace exclusive_lock { + +class AutomaticPolicy : public Policy { +public: + AutomaticPolicy(ImageCtx *image_ctx) : m_image_ctx(image_ctx) { + } + + bool may_auto_request_lock() override { + return true; + } + + int lock_requested(bool force) override; + +private: + ImageCtx *m_image_ctx; + +}; + +} // namespace exclusive_lock +} // namespace librbd + +#endif // CEPH_LIBRBD_EXCLUSIVE_LOCK_AUTOMATIC_POLICY_H diff --git a/src/librbd/exclusive_lock/ImageDispatch.cc b/src/librbd/exclusive_lock/ImageDispatch.cc new file mode 100644 index 000000000..cd7f450f2 --- /dev/null +++ b/src/librbd/exclusive_lock/ImageDispatch.cc @@ -0,0 +1,318 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab + +#include "librbd/exclusive_lock/ImageDispatch.h" +#include "include/Context.h" +#include "common/dout.h" +#include "common/errno.h" +#include "librbd/ExclusiveLock.h" +#include "librbd/ImageCtx.h" +#include "librbd/Utils.h" +#include "librbd/exclusive_lock/Policy.h" +#include "librbd/io/AioCompletion.h" +#include "librbd/io/ImageDispatchSpec.h" +#include "librbd/io/ImageDispatcherInterface.h" + +#define dout_subsys ceph_subsys_rbd +#undef dout_prefix +#define dout_prefix *_dout << "librbd::exclusive_lock::ImageDispatch: " \ + << this << " " << __func__ << ": " + +namespace librbd { +namespace exclusive_lock { + +using util::create_context_callback; + +template +ImageDispatch::ImageDispatch(I* image_ctx) + : m_image_ctx(image_ctx), + m_lock(ceph::make_shared_mutex( + util::unique_lock_name("librbd::exclusve_lock::ImageDispatch::m_lock", + this))) { +} + +template +void ImageDispatch::shut_down(Context* on_finish) { + // release any IO waiting on exclusive lock + Contexts on_dispatches; + { + std::unique_lock locker{m_lock}; + std::swap(on_dispatches, m_on_dispatches); + } + + for (auto ctx : on_dispatches) { + ctx->complete(0); + } + + on_finish->complete(0); +} + +template +void ImageDispatch::set_require_lock(bool init_shutdown, + io::Direction direction, + Context* on_finish) { + // pause any matching IO from proceeding past this layer + set_require_lock(direction, true); + + if (direction == io::DIRECTION_READ) { + on_finish->complete(0); + return; + } + + // push through a flush for any in-flight writes at lower levels + auto aio_comp = io::AioCompletion::create_and_start( + on_finish, util::get_image_ctx(m_image_ctx), io::AIO_TYPE_FLUSH); + auto req = io::ImageDispatchSpec::create_flush( + *m_image_ctx, io::IMAGE_DISPATCH_LAYER_EXCLUSIVE_LOCK, aio_comp, + (init_shutdown ? + io::FLUSH_SOURCE_EXCLUSIVE_LOCK_SKIP_REFRESH : + io::FLUSH_SOURCE_EXCLUSIVE_LOCK), {}); + req->send(); +} + +template +void ImageDispatch::unset_require_lock(io::Direction direction) { + set_require_lock(direction, false); +} + +template +bool ImageDispatch::set_require_lock(io::Direction direction, bool enabled) { + auto cct = m_image_ctx->cct; + ldout(cct, 20) << "direction=" << direction << ", enabled=" << enabled + << dendl; + + std::unique_lock locker{m_lock}; + auto prev_require_lock = (m_require_lock_on_read || m_require_lock_on_write); + + switch (direction) { + case io::DIRECTION_READ: + m_require_lock_on_read = enabled; + break; + case io::DIRECTION_WRITE: + m_require_lock_on_write = enabled; + break; + case io::DIRECTION_BOTH: + m_require_lock_on_read = enabled; + m_require_lock_on_write = enabled; + break; + } + + bool require_lock = (m_require_lock_on_read || m_require_lock_on_write); + return ((enabled && !prev_require_lock && require_lock) || + (!enabled && prev_require_lock && !require_lock)); +} + +template +bool ImageDispatch::read( + io::AioCompletion* aio_comp, io::Extents &&image_extents, + io::ReadResult &&read_result, IOContext io_context, int op_flags, + int read_flags, const ZTracer::Trace &parent_trace, uint64_t tid, + std::atomic* image_dispatch_flags, + io::DispatchResult* dispatch_result, Context** on_finish, + Context* on_dispatched) { + auto cct = m_image_ctx->cct; + ldout(cct, 20) << "image_extents=" << image_extents << dendl; + + if (needs_exclusive_lock(true, tid, dispatch_result, on_dispatched)) { + return true; + } + + return false; +} + +template +bool ImageDispatch::write( + io::AioCompletion* aio_comp, io::Extents &&image_extents, bufferlist &&bl, + IOContext io_context, int op_flags, const ZTracer::Trace &parent_trace, + uint64_t tid, std::atomic* image_dispatch_flags, + io::DispatchResult* dispatch_result, Context** on_finish, + Context* on_dispatched) { + auto cct = m_image_ctx->cct; + ldout(cct, 20) << "tid=" << tid << ", image_extents=" << image_extents + << dendl; + + if (needs_exclusive_lock(false, tid, dispatch_result, on_dispatched)) { + return true; + } + + return false; +} + +template +bool ImageDispatch::discard( + io::AioCompletion* aio_comp, io::Extents &&image_extents, + uint32_t discard_granularity_bytes, IOContext io_context, + const ZTracer::Trace &parent_trace, uint64_t tid, + std::atomic* image_dispatch_flags, + io::DispatchResult* dispatch_result, Context** on_finish, + Context* on_dispatched) { + auto cct = m_image_ctx->cct; + ldout(cct, 20) << "tid=" << tid << ", image_extents=" << image_extents + << dendl; + + if (needs_exclusive_lock(false, tid, dispatch_result, on_dispatched)) { + return true; + } + + return false; +} + +template +bool ImageDispatch::write_same( + io::AioCompletion* aio_comp, io::Extents &&image_extents, bufferlist &&bl, + IOContext io_context, int op_flags, const ZTracer::Trace &parent_trace, + uint64_t tid, std::atomic* image_dispatch_flags, + io::DispatchResult* dispatch_result, Context** on_finish, + Context* on_dispatched) { + auto cct = m_image_ctx->cct; + ldout(cct, 20) << "tid=" << tid << ", image_extents=" << image_extents + << dendl; + + if (needs_exclusive_lock(false, tid, dispatch_result, on_dispatched)) { + return true; + } + + return false; +} + +template +bool ImageDispatch::compare_and_write( + io::AioCompletion* aio_comp, io::Extents &&image_extents, + bufferlist &&cmp_bl, bufferlist &&bl, uint64_t *mismatch_offset, + IOContext io_context, int op_flags, const ZTracer::Trace &parent_trace, + uint64_t tid, std::atomic* image_dispatch_flags, + io::DispatchResult* dispatch_result, Context** on_finish, + Context* on_dispatched) { + auto cct = m_image_ctx->cct; + ldout(cct, 20) << "tid=" << tid << ", image_extents=" << image_extents + << dendl; + + if (needs_exclusive_lock(false, tid, dispatch_result, on_dispatched)) { + return true; + } + + return false; +} + +template +bool ImageDispatch::flush( + io::AioCompletion* aio_comp, io::FlushSource flush_source, + const ZTracer::Trace &parent_trace, uint64_t tid, + std::atomic* image_dispatch_flags, + io::DispatchResult* dispatch_result, Context** on_finish, + Context* on_dispatched) { + auto cct = m_image_ctx->cct; + ldout(cct, 20) << "tid=" << tid << dendl; + + // don't attempt to grab the exclusive lock if were are just internally + // clearing out our in-flight IO queue + if (flush_source != io::FLUSH_SOURCE_USER) { + return false; + } + + if (needs_exclusive_lock(false, tid, dispatch_result, on_dispatched)) { + return true; + } + + return false; +} + +template +bool ImageDispatch::is_lock_required(bool read_op) const { + ceph_assert(ceph_mutex_is_locked(m_lock)); + return ((read_op && m_require_lock_on_read) || + (!read_op && m_require_lock_on_write)); +} + +template +bool ImageDispatch::needs_exclusive_lock(bool read_op, uint64_t tid, + io::DispatchResult* dispatch_result, + Context* on_dispatched) { + auto cct = m_image_ctx->cct; + bool lock_required = false; + { + std::shared_lock locker{m_lock}; + lock_required = is_lock_required(read_op); + } + + if (lock_required) { + std::shared_lock owner_locker{m_image_ctx->owner_lock}; + if (m_image_ctx->exclusive_lock == nullptr) { + // raced with the exclusive lock being disabled + return false; + } + + ldout(cct, 5) << "exclusive lock required: delaying IO" << dendl; + if (!m_image_ctx->get_exclusive_lock_policy()->may_auto_request_lock()) { + lderr(cct) << "op requires exclusive lock" << dendl; + + *dispatch_result = io::DISPATCH_RESULT_CONTINUE; + on_dispatched->complete( + m_image_ctx->exclusive_lock->get_unlocked_op_error()); + return true; + } + + // block potential races with other incoming IOs + std::unique_lock locker{m_lock}; + bool retesting_lock = ( + !m_on_dispatches.empty() && m_on_dispatches.front() == on_dispatched); + if (!m_on_dispatches.empty() && !retesting_lock) { + *dispatch_result = io::DISPATCH_RESULT_RESTART; + m_on_dispatches.push_back(on_dispatched); + return true; + } + + if (!is_lock_required(read_op)) { + return false; + } + + ceph_assert(m_on_dispatches.empty() || retesting_lock); + m_on_dispatches.push_back(on_dispatched); + locker.unlock(); + + *dispatch_result = io::DISPATCH_RESULT_RESTART; + auto ctx = create_context_callback< + ImageDispatch, &ImageDispatch::handle_acquire_lock>(this); + m_image_ctx->exclusive_lock->acquire_lock(ctx); + return true; + } + + return false; +} + +template +void ImageDispatch::handle_acquire_lock(int r) { + auto cct = m_image_ctx->cct; + ldout(cct, 5) << "r=" << r << dendl; + + std::unique_lock locker{m_lock}; + ceph_assert(!m_on_dispatches.empty()); + + Context* failed_dispatch = nullptr; + Contexts on_dispatches; + if (r == -ESHUTDOWN) { + ldout(cct, 5) << "IO raced with exclusive lock shutdown" << dendl; + } else if (r < 0) { + lderr(cct) << "failed to acquire exclusive lock: " << cpp_strerror(r) + << dendl; + failed_dispatch = m_on_dispatches.front(); + m_on_dispatches.pop_front(); + } + + // re-test if lock is still required (i.e. it wasn't acquired/lost) via a + // restart dispatch + std::swap(on_dispatches, m_on_dispatches); + locker.unlock(); + + if (failed_dispatch != nullptr) { + failed_dispatch->complete(r); + } + for (auto ctx : on_dispatches) { + ctx->complete(0); + } +} + +} // namespace exclusive_lock +} // namespace librbd + +template class librbd::exclusive_lock::ImageDispatch; diff --git a/src/librbd/exclusive_lock/ImageDispatch.h b/src/librbd/exclusive_lock/ImageDispatch.h new file mode 100644 index 000000000..77c101973 --- /dev/null +++ b/src/librbd/exclusive_lock/ImageDispatch.h @@ -0,0 +1,134 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab + +#ifndef CEPH_LIBRBD_EXCLUSIVE_LOCK_IMAGE_DISPATCH_H +#define CEPH_LIBRBD_EXCLUSIVE_LOCK_IMAGE_DISPATCH_H + +#include "librbd/io/ImageDispatchInterface.h" +#include "include/int_types.h" +#include "include/buffer.h" +#include "common/ceph_mutex.h" +#include "common/zipkin_trace.h" +#include "librbd/io/ReadResult.h" +#include "librbd/io/Types.h" +#include +#include +#include + +struct Context; + +namespace librbd { + +struct ImageCtx; + +namespace io { +struct AioCompletion; +} + +namespace exclusive_lock { + +template +class ImageDispatch : public io::ImageDispatchInterface { +public: + static ImageDispatch* create(ImageCtxT* image_ctx) { + return new ImageDispatch(image_ctx); + } + void destroy() { + delete this; + } + + ImageDispatch(ImageCtxT* image_ctx); + + io::ImageDispatchLayer get_dispatch_layer() const override { + return io::IMAGE_DISPATCH_LAYER_EXCLUSIVE_LOCK; + } + + void set_require_lock(bool init_shutdown, + io::Direction direction, Context* on_finish); + void unset_require_lock(io::Direction direction); + + void shut_down(Context* on_finish) override; + + bool read( + io::AioCompletion* aio_comp, io::Extents &&image_extents, + io::ReadResult &&read_result, IOContext io_context, int op_flags, + int read_flags, const ZTracer::Trace &parent_trace, uint64_t tid, + std::atomic* image_dispatch_flags, + io::DispatchResult* dispatch_result, Context** on_finish, + Context* on_dispatched) override; + bool write( + io::AioCompletion* aio_comp, io::Extents &&image_extents, bufferlist &&bl, + IOContext io_context, int op_flags, const ZTracer::Trace &parent_trace, + uint64_t tid, std::atomic* image_dispatch_flags, + io::DispatchResult* dispatch_result, Context** on_finish, + Context* on_dispatched) override; + bool discard( + io::AioCompletion* aio_comp, io::Extents &&image_extents, + uint32_t discard_granularity_bytes, IOContext io_context, + const ZTracer::Trace &parent_trace, uint64_t tid, + std::atomic* image_dispatch_flags, + io::DispatchResult* dispatch_result, Context** on_finish, + Context* on_dispatched) override; + bool write_same( + io::AioCompletion* aio_comp, io::Extents &&image_extents, bufferlist &&bl, + IOContext io_context, int op_flags, const ZTracer::Trace &parent_trace, + uint64_t tid, std::atomic* image_dispatch_flags, + io::DispatchResult* dispatch_result, Context** on_finish, + Context* on_dispatched) override; + bool compare_and_write( + io::AioCompletion* aio_comp, io::Extents &&image_extents, + bufferlist &&cmp_bl, bufferlist &&bl, uint64_t *mismatch_offset, + IOContext io_context, int op_flags, const ZTracer::Trace &parent_trace, + uint64_t tid, std::atomic* image_dispatch_flags, + io::DispatchResult* dispatch_result, Context** on_finish, + Context* on_dispatched) override; + bool flush( + io::AioCompletion* aio_comp, io::FlushSource flush_source, + const ZTracer::Trace &parent_trace, uint64_t tid, + std::atomic* image_dispatch_flags, + io::DispatchResult* dispatch_result, Context** on_finish, + Context* on_dispatched) override; + + bool list_snaps( + io::AioCompletion* aio_comp, io::Extents&& image_extents, + io::SnapIds&& snap_ids, int list_snaps_flags, + io::SnapshotDelta* snapshot_delta, const ZTracer::Trace &parent_trace, + uint64_t tid, std::atomic* image_dispatch_flags, + io::DispatchResult* dispatch_result, Context** on_finish, + Context* on_dispatched) override { + return false; + } + + bool invalidate_cache(Context* on_finish) override { + return false; + } + +private: + typedef std::list Contexts; + typedef std::unordered_set Tids; + + ImageCtxT* m_image_ctx; + mutable ceph::shared_mutex m_lock; + + bool m_require_lock_on_read = false; + bool m_require_lock_on_write = false; + + Contexts m_on_dispatches; + + bool set_require_lock(io::Direction direction, bool enabled); + + bool is_lock_required(bool read_op) const; + + bool needs_exclusive_lock(bool read_op, uint64_t tid, + io::DispatchResult* dispatch_result, + Context* on_dispatched); + + void handle_acquire_lock(int r); +}; + +} // namespace exclusiv_lock +} // namespace librbd + +extern template class librbd::exclusive_lock::ImageDispatch; + +#endif // CEPH_LIBRBD_EXCLUSIVE_LOCK_IMAGE_DISPATCH_H diff --git a/src/librbd/exclusive_lock/Policy.h b/src/librbd/exclusive_lock/Policy.h new file mode 100644 index 000000000..7064a6515 --- /dev/null +++ b/src/librbd/exclusive_lock/Policy.h @@ -0,0 +1,31 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab + +#ifndef CEPH_LIBRBD_EXCLUSIVE_LOCK_POLICY_H +#define CEPH_LIBRBD_EXCLUSIVE_LOCK_POLICY_H + +namespace librbd { +namespace exclusive_lock { + +enum OperationRequestType { + OPERATION_REQUEST_TYPE_GENERAL = 0, + OPERATION_REQUEST_TYPE_TRASH_SNAP_REMOVE = 1, + OPERATION_REQUEST_TYPE_FORCE_PROMOTION = 2, +}; + +struct Policy { + virtual ~Policy() { + } + + virtual bool may_auto_request_lock() = 0; + virtual int lock_requested(bool force) = 0; + + virtual bool accept_blocked_request(OperationRequestType) { + return false; + } +}; + +} // namespace exclusive_lock +} // namespace librbd + +#endif // CEPH_LIBRBD_EXCLUSIVE_LOCK_POLICY_H diff --git a/src/librbd/exclusive_lock/PostAcquireRequest.cc b/src/librbd/exclusive_lock/PostAcquireRequest.cc new file mode 100644 index 000000000..4553b2158 --- /dev/null +++ b/src/librbd/exclusive_lock/PostAcquireRequest.cc @@ -0,0 +1,368 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab + +#include "librbd/exclusive_lock/PostAcquireRequest.h" +#include "cls/lock/cls_lock_client.h" +#include "cls/lock/cls_lock_types.h" +#include "common/dout.h" +#include "common/errno.h" +#include "include/stringify.h" +#include "librbd/ExclusiveLock.h" +#include "librbd/ImageCtx.h" +#include "librbd/ImageState.h" +#include "librbd/ImageWatcher.h" +#include "librbd/Journal.h" +#include "librbd/ObjectMap.h" +#include "librbd/Utils.h" +#include "librbd/image/RefreshRequest.h" +#include "librbd/journal/Policy.h" +#include "librbd/PluginRegistry.h" + +#define dout_subsys ceph_subsys_rbd +#undef dout_prefix +#define dout_prefix *_dout << "librbd::exclusive_lock::PostAcquireRequest: " \ + << this << " " << __func__ << ": " + +namespace librbd { +namespace exclusive_lock { + +using util::create_async_context_callback; +using util::create_context_callback; +using util::create_rados_callback; + +template +PostAcquireRequest* PostAcquireRequest::create(I &image_ctx, + Context *on_acquire, + Context *on_finish) { + return new PostAcquireRequest(image_ctx, on_acquire, on_finish); +} + +template +PostAcquireRequest::PostAcquireRequest(I &image_ctx, Context *on_acquire, + Context *on_finish) + : m_image_ctx(image_ctx), + m_on_acquire(on_acquire), + m_on_finish(create_async_context_callback(image_ctx, on_finish)), + m_object_map(nullptr), m_journal(nullptr), m_error_result(0) { +} + +template +PostAcquireRequest::~PostAcquireRequest() { + if (!m_prepare_lock_completed) { + m_image_ctx.state->handle_prepare_lock_complete(); + } + delete m_on_acquire; +} + +template +void PostAcquireRequest::send() { + send_refresh(); +} + +template +void PostAcquireRequest::send_refresh() { + if (!m_image_ctx.state->is_refresh_required()) { + send_open_object_map(); + return; + } + + CephContext *cct = m_image_ctx.cct; + ldout(cct, 10) << dendl; + + using klass = PostAcquireRequest; + Context *ctx = create_async_context_callback( + m_image_ctx, create_context_callback(this)); + + // ImageState is blocked waiting for lock to complete -- safe to directly + // refresh + image::RefreshRequest *req = image::RefreshRequest::create( + m_image_ctx, true, false, ctx); + req->send(); +} + +template +void PostAcquireRequest::handle_refresh(int r) { + CephContext *cct = m_image_ctx.cct; + ldout(cct, 10) << "r=" << r << dendl; + + if (r == -ERESTART) { + // next issued IO or op will (re)-refresh the image and shut down lock + ldout(cct, 5) << "exclusive lock dynamically disabled" << dendl; + r = 0; + } else if (r < 0) { + lderr(cct) << "failed to refresh image: " << cpp_strerror(r) << dendl; + save_result(r); + revert(); + finish(); + return; + } + + send_open_object_map(); +} + +template +void PostAcquireRequest::send_open_journal() { + // alert caller that we now own the exclusive lock + m_on_acquire->complete(0); + m_on_acquire = nullptr; + + bool journal_enabled; + { + std::shared_lock image_locker{m_image_ctx.image_lock}; + journal_enabled = (m_image_ctx.test_features(RBD_FEATURE_JOURNALING, + m_image_ctx.image_lock) && + !m_image_ctx.get_journal_policy()->journal_disabled()); + } + if (!journal_enabled) { + apply(); + send_process_plugin_acquire_lock(); + return; + } + + CephContext *cct = m_image_ctx.cct; + ldout(cct, 10) << dendl; + + using klass = PostAcquireRequest; + Context *ctx = create_context_callback( + this); + m_journal = m_image_ctx.create_journal(); + + // journal playback requires object map (if enabled) and itself + apply(); + + m_journal->open(ctx); +} + +template +void PostAcquireRequest::handle_open_journal(int r) { + CephContext *cct = m_image_ctx.cct; + ldout(cct, 10) << "r=" << r << dendl; + + save_result(r); + if (r < 0) { + lderr(cct) << "failed to open journal: " << cpp_strerror(r) << dendl; + send_close_journal(); + return; + } + + send_allocate_journal_tag(); +} + +template +void PostAcquireRequest::send_allocate_journal_tag() { + CephContext *cct = m_image_ctx.cct; + ldout(cct, 10) << dendl; + + std::shared_lock image_locker{m_image_ctx.image_lock}; + using klass = PostAcquireRequest; + Context *ctx = create_context_callback< + klass, &klass::handle_allocate_journal_tag>(this, m_journal); + m_image_ctx.get_journal_policy()->allocate_tag_on_lock(ctx); +} + +template +void PostAcquireRequest::handle_allocate_journal_tag(int r) { + CephContext *cct = m_image_ctx.cct; + ldout(cct, 10) << "r=" << r << dendl; + + save_result(r); + if (r < 0) { + lderr(cct) << "failed to allocate journal tag: " << cpp_strerror(r) + << dendl; + send_close_journal(); + return; + } + + send_process_plugin_acquire_lock(); +} + +template +void PostAcquireRequest::send_process_plugin_acquire_lock() { + CephContext *cct = m_image_ctx.cct; + ldout(cct, 10) << dendl; + + using klass = PostAcquireRequest; + Context *ctx = create_context_callback< + klass, &klass::handle_process_plugin_acquire_lock>(this); + m_image_ctx.plugin_registry->acquired_exclusive_lock(ctx); +} + +template +void PostAcquireRequest::handle_process_plugin_acquire_lock(int r) { + CephContext *cct = m_image_ctx.cct; + ldout(cct, 10) << "r=" << r << dendl; + + save_result(r); + if (r < 0) { + lderr(cct) << "failed to process plugins: " << cpp_strerror(r) + << dendl; + send_process_plugin_release_lock(); + return; + } + + finish(); +} + +template +void PostAcquireRequest::send_process_plugin_release_lock() { + CephContext *cct = m_image_ctx.cct; + ldout(cct, 10) << dendl; + + using klass = PostAcquireRequest; + Context *ctx = create_context_callback< + klass, &klass::handle_process_plugin_release_lock>(this); + m_image_ctx.plugin_registry->prerelease_exclusive_lock(ctx); +} + +template +void PostAcquireRequest::handle_process_plugin_release_lock(int r) { + CephContext *cct = m_image_ctx.cct; + ldout(cct, 10) << "r=" << r << dendl; + + save_result(r); + if (r < 0) { + lderr(cct) << "failed to release plugins: " << cpp_strerror(r) + << dendl; + } + send_close_journal(); +} + +template +void PostAcquireRequest::send_close_journal() { + if (m_journal == nullptr) { + send_close_object_map(); + return; + } + + CephContext *cct = m_image_ctx.cct; + ldout(cct, 10) << dendl; + + using klass = PostAcquireRequest; + Context *ctx = create_context_callback( + this); + m_journal->close(ctx); +} + +template +void PostAcquireRequest::handle_close_journal(int r) { + CephContext *cct = m_image_ctx.cct; + ldout(cct, 10) << "r=" << r << dendl; + + save_result(r); + if (r < 0) { + lderr(cct) << "failed to close journal: " << cpp_strerror(r) << dendl; + } + + send_close_object_map(); +} + +template +void PostAcquireRequest::send_open_object_map() { + if (!m_image_ctx.test_features(RBD_FEATURE_OBJECT_MAP)) { + send_open_journal(); + return; + } + + CephContext *cct = m_image_ctx.cct; + ldout(cct, 10) << dendl; + + using klass = PostAcquireRequest; + Context *ctx = create_context_callback( + this); + + m_object_map = m_image_ctx.create_object_map(CEPH_NOSNAP); + m_object_map->open(ctx); +} + +template +void PostAcquireRequest::handle_open_object_map(int r) { + CephContext *cct = m_image_ctx.cct; + ldout(cct, 10) << "r=" << r << dendl; + + if (r < 0) { + lderr(cct) << "failed to open object map: " << cpp_strerror(r) << dendl; + m_object_map->put(); + m_object_map = nullptr; + + if (r != -EFBIG) { + save_result(r); + revert(); + finish(); + return; + } + } + + send_open_journal(); +} + +template +void PostAcquireRequest::send_close_object_map() { + if (m_object_map == nullptr) { + revert(); + finish(); + return; + } + + CephContext *cct = m_image_ctx.cct; + ldout(cct, 10) << dendl; + + using klass = PostAcquireRequest; + Context *ctx = create_context_callback< + klass, &klass::handle_close_object_map>(this); + m_object_map->close(ctx); +} + +template +void PostAcquireRequest::handle_close_object_map(int r) { + CephContext *cct = m_image_ctx.cct; + ldout(cct, 10) << "r=" << r << dendl; + + if (r < 0) { + lderr(cct) << "failed to close object map: " << cpp_strerror(r) << dendl; + } + + revert(); + finish(); +} + +template +void PostAcquireRequest::apply() { + { + std::unique_lock image_locker{m_image_ctx.image_lock}; + ceph_assert(m_image_ctx.object_map == nullptr); + m_image_ctx.object_map = m_object_map; + + ceph_assert(m_image_ctx.journal == nullptr); + m_image_ctx.journal = m_journal; + } + + m_prepare_lock_completed = true; + m_image_ctx.state->handle_prepare_lock_complete(); +} + +template +void PostAcquireRequest::revert() { + std::unique_lock image_locker{m_image_ctx.image_lock}; + m_image_ctx.object_map = nullptr; + m_image_ctx.journal = nullptr; + + if (m_object_map) { + m_object_map->put(); + } + if (m_journal) { + m_journal->put(); + } + + ceph_assert(m_error_result < 0); +} + +template +void PostAcquireRequest::finish() { + m_on_finish->complete(m_error_result); + delete this; +} + +} // namespace exclusive_lock +} // namespace librbd + +template class librbd::exclusive_lock::PostAcquireRequest; diff --git a/src/librbd/exclusive_lock/PostAcquireRequest.h b/src/librbd/exclusive_lock/PostAcquireRequest.h new file mode 100644 index 000000000..2f7efdf07 --- /dev/null +++ b/src/librbd/exclusive_lock/PostAcquireRequest.h @@ -0,0 +1,124 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab + +#ifndef CEPH_LIBRBD_EXCLUSIVE_LOCK_POST_ACQUIRE_REQUEST_H +#define CEPH_LIBRBD_EXCLUSIVE_LOCK_POST_ACQUIRE_REQUEST_H + +#include "include/int_types.h" +#include "include/buffer.h" +#include "librbd/ImageCtx.h" +#include "msg/msg_types.h" +#include + +class Context; + +namespace librbd { + +namespace exclusive_lock { + +template +class PostAcquireRequest { +public: + static PostAcquireRequest* create(ImageCtxT &image_ctx, Context *on_acquire, + Context *on_finish); + + ~PostAcquireRequest(); + void send(); + +private: + + /** + * @verbatim + * + * + * | + * | + * v + * REFRESH (skip if not + * | needed) + * v + * OPEN_OBJECT_MAP (skip if + * | disabled) + * v + * OPEN_JOURNAL (skip if + * | * disabled) + * | * + * | * * * * * * * * + * v * + * ALLOCATE_JOURNAL_TAG * + * | * * + * | * * + * v * * + * PROCESS_PLUGIN_ACQUIRE* + * | * * + * | * * + * | v v v + * | PROCESS_PLUGIN_RELEASE + * | | + * | v + * | CLOSE_JOURNAL + * | | + * | v + * | CLOSE_OBJECT_MAP + * | | + * v | + * <----------/ + * + * @endverbatim + */ + + PostAcquireRequest(ImageCtxT &image_ctx, Context *on_acquire, + Context *on_finish); + + ImageCtxT &m_image_ctx; + Context *m_on_acquire; + Context *m_on_finish; + + decltype(m_image_ctx.object_map) m_object_map; + decltype(m_image_ctx.journal) m_journal; + + bool m_prepare_lock_completed = false; + int m_error_result; + + void send_refresh(); + void handle_refresh(int r); + + void send_open_journal(); + void handle_open_journal(int r); + + void send_allocate_journal_tag(); + void handle_allocate_journal_tag(int r); + + void send_open_object_map(); + void handle_open_object_map(int r); + + void send_close_journal(); + void handle_close_journal(int r); + + void send_close_object_map(); + void handle_close_object_map(int r); + + void send_process_plugin_acquire_lock(); + void handle_process_plugin_acquire_lock(int r); + + void send_process_plugin_release_lock(); + void handle_process_plugin_release_lock(int r); + + void apply(); + void revert(); + + void finish(); + + void save_result(int result) { + if (m_error_result == 0 && result < 0) { + m_error_result = result; + } + } +}; + +} // namespace exclusive_lock +} // namespace librbd + +extern template class librbd::exclusive_lock::PostAcquireRequest; + +#endif // CEPH_LIBRBD_EXCLUSIVE_LOCK_POST_ACQUIRE_REQUEST_H diff --git a/src/librbd/exclusive_lock/PreAcquireRequest.cc b/src/librbd/exclusive_lock/PreAcquireRequest.cc new file mode 100644 index 000000000..feb0913d7 --- /dev/null +++ b/src/librbd/exclusive_lock/PreAcquireRequest.cc @@ -0,0 +1,95 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab + +#include "librbd/exclusive_lock/PreAcquireRequest.h" +#include "librbd/Utils.h" +#include "common/dout.h" +#include "common/errno.h" +#include "librbd/ImageCtx.h" +#include "librbd/ImageWatcher.h" +#include "librbd/ImageState.h" +#include "librbd/asio/ContextWQ.h" + +#define dout_subsys ceph_subsys_rbd +#undef dout_prefix +#define dout_prefix *_dout << "librbd::exclusive_lock::PreAcquireRequest: " \ + << this << " " << __func__ << ": " + +namespace librbd { +namespace exclusive_lock { + +using util::create_async_context_callback; +using util::create_context_callback; +using util::create_rados_callback; + +template +PreAcquireRequest* PreAcquireRequest::create(I &image_ctx, + Context *on_finish) { + return new PreAcquireRequest(image_ctx, on_finish); +} + +template +PreAcquireRequest::PreAcquireRequest(I &image_ctx, Context *on_finish) + : m_image_ctx(image_ctx), + m_on_finish(create_async_context_callback(image_ctx, on_finish)), + m_error_result(0) { +} + +template +PreAcquireRequest::~PreAcquireRequest() { +} + +template +void PreAcquireRequest::send() { + send_prepare_lock(); +} + +template +void PreAcquireRequest::send_prepare_lock() { + CephContext *cct = m_image_ctx.cct; + ldout(cct, 10) << dendl; + + // acquire the lock if the image is not busy performing other actions + Context *ctx = create_context_callback< + PreAcquireRequest, &PreAcquireRequest::handle_prepare_lock>(this); + m_image_ctx.state->prepare_lock(ctx); +} + +template +void PreAcquireRequest::handle_prepare_lock(int r) { + CephContext *cct = m_image_ctx.cct; + ldout(cct, 10) << "r=" << r << dendl; + + send_flush_notifies(); +} + +template +void PreAcquireRequest::send_flush_notifies() { + CephContext *cct = m_image_ctx.cct; + ldout(cct, 10) << dendl; + + using klass = PreAcquireRequest; + Context *ctx = create_context_callback( + this); + m_image_ctx.image_watcher->flush(ctx); +} + +template +void PreAcquireRequest::handle_flush_notifies(int r) { + CephContext *cct = m_image_ctx.cct; + ldout(cct, 10) << dendl; + + ceph_assert(r == 0); + finish(); +} + +template +void PreAcquireRequest::finish() { + m_on_finish->complete(m_error_result); + delete this; +} + +} // namespace exclusive_lock +} // namespace librbd + +template class librbd::exclusive_lock::PreAcquireRequest; diff --git a/src/librbd/exclusive_lock/PreAcquireRequest.h b/src/librbd/exclusive_lock/PreAcquireRequest.h new file mode 100644 index 000000000..15d4b2c12 --- /dev/null +++ b/src/librbd/exclusive_lock/PreAcquireRequest.h @@ -0,0 +1,75 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab + +#ifndef CEPH_LIBRBD_EXCLUSIVE_LOCK_PRE_ACQUIRE_REQUEST_H +#define CEPH_LIBRBD_EXCLUSIVE_LOCK_PRE_ACQUIRE_REQUEST_H + +#include "include/int_types.h" +#include "include/buffer.h" +#include "librbd/ImageCtx.h" +#include "msg/msg_types.h" +#include + +class Context; + +namespace librbd { + +namespace exclusive_lock { + +template +class PreAcquireRequest { +public: + static PreAcquireRequest* create(ImageCtxT &image_ctx, Context *on_finish); + + ~PreAcquireRequest(); + void send(); + +private: + + /** + * @verbatim + * + * + * | + * v + * PREPARE_LOCK + * | + * v + * FLUSH_NOTIFIES + * | + * | + * | + v + * + * + * @endverbatim + */ + + PreAcquireRequest(ImageCtxT &image_ctx, Context *on_finish); + + ImageCtxT &m_image_ctx; + Context *m_on_finish; + + int m_error_result; + + void send_prepare_lock(); + void handle_prepare_lock(int r); + + void send_flush_notifies(); + void handle_flush_notifies(int r); + + void finish(); + + void save_result(int result) { + if (m_error_result == 0 && result < 0) { + m_error_result = result; + } + } +}; + +} // namespace exclusive_lock +} // namespace librbd + +extern template class librbd::exclusive_lock::PreAcquireRequest; + +#endif // CEPH_LIBRBD_EXCLUSIVE_LOCK_ACQUIRE_REQUEST_H diff --git a/src/librbd/exclusive_lock/PreReleaseRequest.cc b/src/librbd/exclusive_lock/PreReleaseRequest.cc new file mode 100644 index 000000000..a9cd1248a --- /dev/null +++ b/src/librbd/exclusive_lock/PreReleaseRequest.cc @@ -0,0 +1,363 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab + +#include "librbd/exclusive_lock/PreReleaseRequest.h" +#include "common/AsyncOpTracker.h" +#include "common/dout.h" +#include "common/errno.h" +#include "librbd/ExclusiveLock.h" +#include "librbd/ImageState.h" +#include "librbd/ImageWatcher.h" +#include "librbd/Journal.h" +#include "librbd/ObjectMap.h" +#include "librbd/Utils.h" +#include "librbd/exclusive_lock/ImageDispatch.h" +#include "librbd/io/AioCompletion.h" +#include "librbd/io/ImageDispatchSpec.h" +#include "librbd/io/ImageDispatcherInterface.h" +#include "librbd/io/ObjectDispatcherInterface.h" +#include "librbd/io/Types.h" +#include "librbd/PluginRegistry.h" + +#define dout_subsys ceph_subsys_rbd +#undef dout_prefix +#define dout_prefix *_dout << "librbd::exclusive_lock::PreReleaseRequest: " \ + << this << " " << __func__ << ": " + +namespace librbd { +namespace exclusive_lock { + +using util::create_async_context_callback; +using util::create_context_callback; + +template +PreReleaseRequest* PreReleaseRequest::create( + I &image_ctx, ImageDispatch* image_dispatch, bool shutting_down, + AsyncOpTracker &async_op_tracker, Context *on_finish) { + return new PreReleaseRequest(image_ctx, image_dispatch, shutting_down, + async_op_tracker, on_finish); +} + +template +PreReleaseRequest::PreReleaseRequest(I &image_ctx, + ImageDispatch* image_dispatch, + bool shutting_down, + AsyncOpTracker &async_op_tracker, + Context *on_finish) + : m_image_ctx(image_ctx), m_image_dispatch(image_dispatch), + m_shutting_down(shutting_down), m_async_op_tracker(async_op_tracker), + m_on_finish(create_async_context_callback(image_ctx, on_finish)) { +} + +template +PreReleaseRequest::~PreReleaseRequest() { + if (!m_shutting_down) { + m_image_ctx.state->handle_prepare_lock_complete(); + } +} + +template +void PreReleaseRequest::send() { + send_cancel_op_requests(); +} + +template +void PreReleaseRequest::send_cancel_op_requests() { + CephContext *cct = m_image_ctx.cct; + ldout(cct, 10) << dendl; + + using klass = PreReleaseRequest; + Context *ctx = create_context_callback< + klass, &klass::handle_cancel_op_requests>(this); + m_image_ctx.cancel_async_requests(ctx); +} + +template +void PreReleaseRequest::handle_cancel_op_requests(int r) { + CephContext *cct = m_image_ctx.cct; + ldout(cct, 10) << "r=" << r << dendl; + + ceph_assert(r == 0); + + send_set_require_lock(); +} + +template +void PreReleaseRequest::send_set_require_lock() { + if (!m_image_ctx.test_features(RBD_FEATURE_EXCLUSIVE_LOCK)) { + // exclusive-lock was disabled, no need to block IOs + send_wait_for_ops(); + return; + } + + CephContext *cct = m_image_ctx.cct; + ldout(cct, 10) << dendl; + + using klass = PreReleaseRequest; + Context *ctx = create_context_callback< + klass, &klass::handle_set_require_lock>(this); + + // setting the lock as required will automatically cause the IO + // queue to re-request the lock if any IO is queued + if (m_image_ctx.clone_copy_on_read || + m_image_ctx.test_features(RBD_FEATURE_JOURNALING) || + m_image_ctx.test_features(RBD_FEATURE_DIRTY_CACHE)) { + m_image_dispatch->set_require_lock(m_shutting_down, + io::DIRECTION_BOTH, ctx); + } else { + m_image_dispatch->set_require_lock(m_shutting_down, + io::DIRECTION_WRITE, ctx); + } +} + +template +void PreReleaseRequest::handle_set_require_lock(int r) { + CephContext *cct = m_image_ctx.cct; + ldout(cct, 10) << "r=" << r << dendl; + + if (r < 0) { + // IOs are still flushed regardless of the error + lderr(cct) << "failed to set lock: " << cpp_strerror(r) << dendl; + } + + send_wait_for_ops(); +} + +template +void PreReleaseRequest::send_wait_for_ops() { + CephContext *cct = m_image_ctx.cct; + ldout(cct, 10) << dendl; + + Context *ctx = create_context_callback< + PreReleaseRequest, &PreReleaseRequest::handle_wait_for_ops>(this); + m_async_op_tracker.wait_for_ops(ctx); +} + +template +void PreReleaseRequest::handle_wait_for_ops(int r) { + CephContext *cct = m_image_ctx.cct; + ldout(cct, 10) << dendl; + + send_prepare_lock(); +} + +template +void PreReleaseRequest::send_prepare_lock() { + if (m_shutting_down) { + send_process_plugin_release_lock(); + return; + } + + CephContext *cct = m_image_ctx.cct; + ldout(cct, 10) << dendl; + + // release the lock if the image is not busy performing other actions + Context *ctx = create_context_callback< + PreReleaseRequest, &PreReleaseRequest::handle_prepare_lock>(this); + m_image_ctx.state->prepare_lock(ctx); +} + +template +void PreReleaseRequest::handle_prepare_lock(int r) { + CephContext *cct = m_image_ctx.cct; + ldout(cct, 10) << "r=" << r << dendl; + + send_process_plugin_release_lock(); +} + +template +void PreReleaseRequest::send_process_plugin_release_lock() { + CephContext *cct = m_image_ctx.cct; + ldout(cct, 10) << dendl; + + std::shared_lock owner_lock{m_image_ctx.owner_lock}; + Context *ctx = create_async_context_callback(m_image_ctx, create_context_callback< + PreReleaseRequest, + &PreReleaseRequest::handle_process_plugin_release_lock>(this)); + m_image_ctx.plugin_registry->prerelease_exclusive_lock(ctx); +} + +template +void PreReleaseRequest::handle_process_plugin_release_lock(int r) { + CephContext *cct = m_image_ctx.cct; + ldout(cct, 10) << "r=" << r << dendl; + + if (r < 0) { + lderr(cct) << "failed to handle plugins before releasing lock: " + << cpp_strerror(r) << dendl; + m_image_dispatch->unset_require_lock(io::DIRECTION_BOTH); + save_result(r); + finish(); + return; + } + + send_invalidate_cache(); +} + +template +void PreReleaseRequest::send_invalidate_cache() { + CephContext *cct = m_image_ctx.cct; + ldout(cct, 10) << dendl; + + Context *ctx = create_context_callback< + PreReleaseRequest, + &PreReleaseRequest::handle_invalidate_cache>(this); + m_image_ctx.io_image_dispatcher->invalidate_cache(ctx); +} + +template +void PreReleaseRequest::handle_invalidate_cache(int r) { + CephContext *cct = m_image_ctx.cct; + ldout(cct, 10) << "r=" << r << dendl; + + if (r < 0 && r != -EBLOCKLISTED && r != -EBUSY) { + lderr(cct) << "failed to invalidate cache: " << cpp_strerror(r) + << dendl; + m_image_dispatch->unset_require_lock(io::DIRECTION_BOTH); + save_result(r); + finish(); + return; + } + + send_flush_io(); +} + +template +void PreReleaseRequest::send_flush_io() { + CephContext *cct = m_image_ctx.cct; + ldout(cct, 10) << dendl; + + // ensure that all in-flight IO is flushed -- skipping the refresh layer + // since it should have been flushed when the lock was required and now + // refreshes are disabled / interlocked w/ this state machine. + auto ctx = create_context_callback< + PreReleaseRequest, &PreReleaseRequest::handle_flush_io>(this); + auto aio_comp = io::AioCompletion::create_and_start( + ctx, util::get_image_ctx(&m_image_ctx), librbd::io::AIO_TYPE_FLUSH); + auto req = io::ImageDispatchSpec::create_flush( + m_image_ctx, io::IMAGE_DISPATCH_LAYER_EXCLUSIVE_LOCK, aio_comp, + io::FLUSH_SOURCE_EXCLUSIVE_LOCK_SKIP_REFRESH, {}); + req->send(); +} + +template +void PreReleaseRequest::handle_flush_io(int r) { + CephContext *cct = m_image_ctx.cct; + ldout(cct, 10) << "r=" << r << dendl; + + if (r < 0) { + lderr(cct) << "failed to flush IO: " << cpp_strerror(r) << dendl; + } + + send_flush_notifies(); +} + +template +void PreReleaseRequest::send_flush_notifies() { + CephContext *cct = m_image_ctx.cct; + ldout(cct, 10) << dendl; + + using klass = PreReleaseRequest; + Context *ctx = + create_context_callback(this); + m_image_ctx.image_watcher->flush(ctx); +} + +template +void PreReleaseRequest::handle_flush_notifies(int r) { + CephContext *cct = m_image_ctx.cct; + ldout(cct, 10) << dendl; + + ceph_assert(r == 0); + send_close_journal(); +} + +template +void PreReleaseRequest::send_close_journal() { + { + std::unique_lock image_locker{m_image_ctx.image_lock}; + std::swap(m_journal, m_image_ctx.journal); + } + + if (m_journal == nullptr) { + send_close_object_map(); + return; + } + + CephContext *cct = m_image_ctx.cct; + ldout(cct, 10) << dendl; + + using klass = PreReleaseRequest; + Context *ctx = create_context_callback( + this); + m_journal->close(ctx); +} + +template +void PreReleaseRequest::handle_close_journal(int r) { + CephContext *cct = m_image_ctx.cct; + ldout(cct, 10) << "r=" << r << dendl; + + if (r < 0) { + // error implies some journal events were not flushed -- continue + lderr(cct) << "failed to close journal: " << cpp_strerror(r) << dendl; + } + + m_journal->put(); + m_journal = nullptr; + + send_close_object_map(); +} + +template +void PreReleaseRequest::send_close_object_map() { + { + std::unique_lock image_locker{m_image_ctx.image_lock}; + std::swap(m_object_map, m_image_ctx.object_map); + } + + if (m_object_map == nullptr) { + send_unlock(); + return; + } + + CephContext *cct = m_image_ctx.cct; + ldout(cct, 10) << dendl; + + using klass = PreReleaseRequest; + Context *ctx = create_context_callback< + klass, &klass::handle_close_object_map>(this, m_object_map); + m_object_map->close(ctx); +} + +template +void PreReleaseRequest::handle_close_object_map(int r) { + CephContext *cct = m_image_ctx.cct; + ldout(cct, 10) << "r=" << r << dendl; + + if (r < 0) { + lderr(cct) << "failed to close object map: " << cpp_strerror(r) << dendl; + } + m_object_map->put(); + + send_unlock(); +} + +template +void PreReleaseRequest::send_unlock() { + CephContext *cct = m_image_ctx.cct; + ldout(cct, 10) << dendl; + + finish(); +} + +template +void PreReleaseRequest::finish() { + m_on_finish->complete(m_error_result); + delete this; +} + +} // namespace exclusive_lock +} // namespace librbd + +template class librbd::exclusive_lock::PreReleaseRequest; diff --git a/src/librbd/exclusive_lock/PreReleaseRequest.h b/src/librbd/exclusive_lock/PreReleaseRequest.h new file mode 100644 index 000000000..426337943 --- /dev/null +++ b/src/librbd/exclusive_lock/PreReleaseRequest.h @@ -0,0 +1,139 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab + +#ifndef CEPH_LIBRBD_EXCLUSIVE_LOCK_PRE_RELEASE_REQUEST_H +#define CEPH_LIBRBD_EXCLUSIVE_LOCK_PRE_RELEASE_REQUEST_H + +#include "librbd/ImageCtx.h" +#include + +class AsyncOpTracker; +class Context; + +namespace librbd { + +struct ImageCtx; + +namespace exclusive_lock { + +template struct ImageDispatch; + +template +class PreReleaseRequest { +public: + static PreReleaseRequest* create(ImageCtxT &image_ctx, + ImageDispatch* image_dispatch, + bool shutting_down, + AsyncOpTracker &async_op_tracker, + Context *on_finish); + + ~PreReleaseRequest(); + void send(); + +private: + /** + * @verbatim + * + * + * | + * v + * CANCEL_OP_REQUESTS + * | + * v + * SET_REQUIRE_LOCK + * | + * v + * WAIT_FOR_OPS + * | + * v + * PREPARE_LOCK + * | + * v + * PROCESS_PLUGIN_RELEASE + * | + * v + * SHUT_DOWN_IMAGE_CACHE + * | + * v + * INVALIDATE_CACHE + * | + * v + * FLUSH_IO + * | + * v + * FLUSH_NOTIFIES . . . . . . . . . . . . . . + * | . + * v . + * CLOSE_JOURNAL . + * | (journal disabled, . + * v object map enabled) . + * CLOSE_OBJECT_MAP < . . . . . . . . . . . . + * | . + * v (object map disabled) . + * < . . . . . . . . . . . . . . . . . + * + * @endverbatim + */ + + PreReleaseRequest(ImageCtxT &image_ctx, + ImageDispatch* image_dispatch, + bool shutting_down, AsyncOpTracker &async_op_tracker, + Context *on_finish); + + ImageCtxT &m_image_ctx; + ImageDispatch* m_image_dispatch; + bool m_shutting_down; + AsyncOpTracker &m_async_op_tracker; + Context *m_on_finish; + + int m_error_result = 0; + + decltype(m_image_ctx.object_map) m_object_map = nullptr; + decltype(m_image_ctx.journal) m_journal = nullptr; + + void send_cancel_op_requests(); + void handle_cancel_op_requests(int r); + + void send_set_require_lock(); + void handle_set_require_lock(int r); + + void send_wait_for_ops(); + void handle_wait_for_ops(int r); + + void send_prepare_lock(); + void handle_prepare_lock(int r); + + void send_process_plugin_release_lock(); + void handle_process_plugin_release_lock(int r); + + void send_invalidate_cache(); + void handle_invalidate_cache(int r); + + void send_flush_io(); + void handle_flush_io(int r); + + void send_flush_notifies(); + void handle_flush_notifies(int r); + + void send_close_journal(); + void handle_close_journal(int r); + + void send_close_object_map(); + void handle_close_object_map(int r); + + void send_unlock(); + + void finish(); + + void save_result(int result) { + if (m_error_result == 0 && result < 0) { + m_error_result = result; + } + } + +}; + +} // namespace exclusive_lock +} // namespace librbd + +#endif // CEPH_LIBRBD_EXCLUSIVE_LOCK_PRE_RELEASE_REQUEST_H diff --git a/src/librbd/exclusive_lock/StandardPolicy.cc b/src/librbd/exclusive_lock/StandardPolicy.cc new file mode 100644 index 000000000..519e9618e --- /dev/null +++ b/src/librbd/exclusive_lock/StandardPolicy.cc @@ -0,0 +1,29 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab + +#include "librbd/exclusive_lock/StandardPolicy.h" +#include "librbd/ImageCtx.h" +#include "librbd/ExclusiveLock.h" + +#define dout_subsys ceph_subsys_rbd +#undef dout_prefix +#define dout_prefix *_dout << "librbd::ExclusiveLock::StandardPolicy " + +namespace librbd { +namespace exclusive_lock { + +template +int StandardPolicy::lock_requested(bool force) { + ceph_assert(ceph_mutex_is_locked(m_image_ctx->owner_lock)); + ceph_assert(m_image_ctx->exclusive_lock != nullptr); + + ldout(m_image_ctx->cct, 20) << this << " " << __func__ << ": force=" << force + << dendl; + + return -EROFS; +} + +} // namespace exclusive_lock +} // namespace librbd + +template class librbd::exclusive_lock::StandardPolicy; diff --git a/src/librbd/exclusive_lock/StandardPolicy.h b/src/librbd/exclusive_lock/StandardPolicy.h new file mode 100644 index 000000000..dd4e19050 --- /dev/null +++ b/src/librbd/exclusive_lock/StandardPolicy.h @@ -0,0 +1,37 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab + +#ifndef CEPH_LIBRBD_EXCLUSIVE_LOCK_STANDARD_POLICY_H +#define CEPH_LIBRBD_EXCLUSIVE_LOCK_STANDARD_POLICY_H + +#include "librbd/exclusive_lock/Policy.h" + +namespace librbd { + +struct ImageCtx; + +namespace exclusive_lock { + +template +class StandardPolicy : public Policy { +public: + StandardPolicy(ImageCtxT* image_ctx) : m_image_ctx(image_ctx) { + } + + bool may_auto_request_lock() override { + return false; + } + + int lock_requested(bool force) override; + +private: + ImageCtxT* m_image_ctx; + +}; + +} // namespace exclusive_lock +} // namespace librbd + +extern template class librbd::exclusive_lock::StandardPolicy; + +#endif // CEPH_LIBRBD_EXCLUSIVE_LOCK_STANDARD_POLICY_H -- cgit v1.2.3