diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-21 11:54:28 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-21 11:54:28 +0000 |
commit | e6918187568dbd01842d8d1d2c808ce16a894239 (patch) | |
tree | 64f88b554b444a49f656b6c656111a145cbbaa28 /src/librbd/managed_lock | |
parent | Initial commit. (diff) | |
download | ceph-e6918187568dbd01842d8d1d2c808ce16a894239.tar.xz ceph-e6918187568dbd01842d8d1d2c808ce16a894239.zip |
Adding upstream version 18.2.2.upstream/18.2.2
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'src/librbd/managed_lock')
-rw-r--r-- | src/librbd/managed_lock/AcquireRequest.cc | 184 | ||||
-rw-r--r-- | src/librbd/managed_lock/AcquireRequest.h | 102 | ||||
-rw-r--r-- | src/librbd/managed_lock/BreakRequest.cc | 249 | ||||
-rw-r--r-- | src/librbd/managed_lock/BreakRequest.h | 120 | ||||
-rw-r--r-- | src/librbd/managed_lock/GetLockerRequest.cc | 131 | ||||
-rw-r--r-- | src/librbd/managed_lock/GetLockerRequest.h | 58 | ||||
-rw-r--r-- | src/librbd/managed_lock/ReacquireRequest.cc | 79 | ||||
-rw-r--r-- | src/librbd/managed_lock/ReacquireRequest.h | 69 | ||||
-rw-r--r-- | src/librbd/managed_lock/ReleaseRequest.cc | 97 | ||||
-rw-r--r-- | src/librbd/managed_lock/ReleaseRequest.h | 72 | ||||
-rw-r--r-- | src/librbd/managed_lock/Types.h | 46 | ||||
-rw-r--r-- | src/librbd/managed_lock/Utils.cc | 43 | ||||
-rw-r--r-- | src/librbd/managed_lock/Utils.h | 23 |
13 files changed, 1273 insertions, 0 deletions
diff --git a/src/librbd/managed_lock/AcquireRequest.cc b/src/librbd/managed_lock/AcquireRequest.cc new file mode 100644 index 000000000..79be0f25a --- /dev/null +++ b/src/librbd/managed_lock/AcquireRequest.cc @@ -0,0 +1,184 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab + +#include "librbd/managed_lock/AcquireRequest.h" +#include "librbd/Watcher.h" +#include "cls/lock/cls_lock_client.h" +#include "cls/lock/cls_lock_types.h" +#include "common/dout.h" +#include "common/errno.h" +#include "include/stringify.h" +#include "librbd/AsioEngine.h" +#include "librbd/ImageCtx.h" +#include "librbd/Utils.h" +#include "librbd/asio/ContextWQ.h" +#include "librbd/managed_lock/BreakRequest.h" +#include "librbd/managed_lock/GetLockerRequest.h" +#include "librbd/managed_lock/Utils.h" + +#define dout_subsys ceph_subsys_rbd +#undef dout_prefix +#define dout_prefix *_dout << "librbd::managed_lock::AcquireRequest: " << this \ + << " " << __func__ << ": " + +using std::string; + +namespace librbd { + +using librbd::util::detail::C_AsyncCallback; +using librbd::util::create_context_callback; +using librbd::util::create_rados_callback; + +namespace managed_lock { + +template <typename I> +AcquireRequest<I>* AcquireRequest<I>::create(librados::IoCtx& ioctx, + Watcher *watcher, + AsioEngine& asio_engine, + const string& oid, + const string& cookie, + bool exclusive, + bool blocklist_on_break_lock, + uint32_t blocklist_expire_seconds, + Context *on_finish) { + return new AcquireRequest(ioctx, watcher, asio_engine, oid, cookie, + exclusive, blocklist_on_break_lock, + blocklist_expire_seconds, on_finish); +} + +template <typename I> +AcquireRequest<I>::AcquireRequest(librados::IoCtx& ioctx, Watcher *watcher, + AsioEngine& asio_engine, + const string& oid, + const string& cookie, bool exclusive, + bool blocklist_on_break_lock, + uint32_t blocklist_expire_seconds, + Context *on_finish) + : m_ioctx(ioctx), m_watcher(watcher), + m_cct(reinterpret_cast<CephContext *>(m_ioctx.cct())), + m_asio_engine(asio_engine), m_oid(oid), m_cookie(cookie), + m_exclusive(exclusive), + m_blocklist_on_break_lock(blocklist_on_break_lock), + m_blocklist_expire_seconds(blocklist_expire_seconds), + m_on_finish(new C_AsyncCallback<asio::ContextWQ>( + asio_engine.get_work_queue(), on_finish)) { +} + +template <typename I> +AcquireRequest<I>::~AcquireRequest() { +} + +template <typename I> +void AcquireRequest<I>::send() { + send_get_locker(); +} + +template <typename I> +void AcquireRequest<I>::send_get_locker() { + ldout(m_cct, 10) << dendl; + + Context *ctx = create_context_callback< + AcquireRequest<I>, &AcquireRequest<I>::handle_get_locker>(this); + auto req = GetLockerRequest<I>::create(m_ioctx, m_oid, m_exclusive, + &m_locker, ctx); + req->send(); +} + +template <typename I> +void AcquireRequest<I>::handle_get_locker(int r) { + ldout(m_cct, 10) << "r=" << r << dendl; + + if (r == -ENOENT) { + ldout(m_cct, 20) << "no lockers detected" << dendl; + m_locker = {}; + } else if (r == -EBUSY) { + ldout(m_cct, 5) << "incompatible lock detected" << dendl; + finish(r); + return; + } else if (r < 0) { + lderr(m_cct) << "failed to retrieve lockers: " << cpp_strerror(r) << dendl; + finish(r); + return; + } + + send_lock(); +} + +template <typename I> +void AcquireRequest<I>::send_lock() { + ldout(m_cct, 10) << "entity=client." << m_ioctx.get_instance_id() << ", " + << "cookie=" << m_cookie << dendl; + + librados::ObjectWriteOperation op; + rados::cls::lock::lock(&op, RBD_LOCK_NAME, + m_exclusive ? ClsLockType::EXCLUSIVE : ClsLockType::SHARED, m_cookie, + util::get_watcher_lock_tag(), "", utime_t(), 0); + + using klass = AcquireRequest; + librados::AioCompletion *rados_completion = + create_rados_callback<klass, &klass::handle_lock>(this); + int r = m_ioctx.aio_operate(m_oid, rados_completion, &op); + ceph_assert(r == 0); + rados_completion->release(); +} + +template <typename I> +void AcquireRequest<I>::handle_lock(int r) { + ldout(m_cct, 10) << "r=" << r << dendl; + + if (r == 0) { + finish(0); + return; + } else if (r == -EBUSY && m_locker.cookie.empty()) { + ldout(m_cct, 5) << "already locked, refreshing locker" << dendl; + send_get_locker(); + return; + } else if (r != -EBUSY) { + lderr(m_cct) << "failed to lock: " << cpp_strerror(r) << dendl; + finish(r); + return; + } + + send_break_lock(); +} + +template <typename I> +void AcquireRequest<I>::send_break_lock() { + ldout(m_cct, 10) << dendl; + + Context *ctx = create_context_callback< + AcquireRequest<I>, &AcquireRequest<I>::handle_break_lock>(this); + auto req = BreakRequest<I>::create( + m_ioctx, m_asio_engine, m_oid, m_locker, m_exclusive, + m_blocklist_on_break_lock, m_blocklist_expire_seconds, false, ctx); + req->send(); +} + +template <typename I> +void AcquireRequest<I>::handle_break_lock(int r) { + ldout(m_cct, 10) << "r=" << r << dendl; + + if (r == -EAGAIN) { + ldout(m_cct, 5) << "lock owner is still alive" << dendl; + finish(r); + return; + } else if (r < 0) { + lderr(m_cct) << "failed to break lock : " << cpp_strerror(r) << dendl; + finish(r); + return; + } + + m_locker = {}; + send_lock(); +} + +template <typename I> +void AcquireRequest<I>::finish(int r) { + m_on_finish->complete(r); + delete this; +} + +} // namespace managed_lock +} // namespace librbd + +template class librbd::managed_lock::AcquireRequest<librbd::ImageCtx>; diff --git a/src/librbd/managed_lock/AcquireRequest.h b/src/librbd/managed_lock/AcquireRequest.h new file mode 100644 index 000000000..19424a422 --- /dev/null +++ b/src/librbd/managed_lock/AcquireRequest.h @@ -0,0 +1,102 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab + +#ifndef CEPH_LIBRBD_MANAGED_LOCK_ACQUIRE_REQUEST_H +#define CEPH_LIBRBD_MANAGED_LOCK_ACQUIRE_REQUEST_H + +#include "include/rados/librados.hpp" +#include "include/int_types.h" +#include "include/buffer.h" +#include "msg/msg_types.h" +#include "librbd/managed_lock/Types.h" +#include "librbd/watcher/Types.h" +#include <string> + +class Context; + +namespace librbd { + +class AsioEngine; +class Watcher; + +namespace managed_lock { + +template <typename ImageCtxT> +class AcquireRequest { +private: + typedef watcher::Traits<ImageCtxT> TypeTraits; + typedef typename TypeTraits::Watcher Watcher; + +public: + static AcquireRequest* create(librados::IoCtx& ioctx, Watcher *watcher, + AsioEngine& asio_engine, + const std::string& oid, + const std::string& cookie, + bool exclusive, + bool blocklist_on_break_lock, + uint32_t blocklist_expire_seconds, + Context *on_finish); + + ~AcquireRequest(); + void send(); + +private: + + /** + * @verbatim + * + * <start> + * | + * v + * GET_LOCKER + * | ^ + * | . (EBUSY && no cached locker) + * | . + * | . (EBUSY && cached locker) + * \--> LOCK_IMAGE * * * * * * * * > BREAK_LOCK . . . . . + * | ^ | . + * | | | (success) . + * | \-------------------------/ . + * v . + * <finish> < . . . . . . . . . . . . . . . . . . . + * + * @endverbatim + */ + + AcquireRequest(librados::IoCtx& ioctx, Watcher *watcher, + AsioEngine& asio_engine, const std::string& oid, + const std::string& cookie, bool exclusive, + bool blocklist_on_break_lock, + uint32_t blocklist_expire_seconds, Context *on_finish); + + librados::IoCtx& m_ioctx; + Watcher *m_watcher; + CephContext *m_cct; + AsioEngine& m_asio_engine; + std::string m_oid; + std::string m_cookie; + bool m_exclusive; + bool m_blocklist_on_break_lock; + uint32_t m_blocklist_expire_seconds; + Context *m_on_finish; + + bufferlist m_out_bl; + + Locker m_locker; + + void send_get_locker(); + void handle_get_locker(int r); + + void send_lock(); + void handle_lock(int r); + + void send_break_lock(); + void handle_break_lock(int r); + + void finish(int r); +}; + +} // namespace managed_lock +} // namespace librbd + +#endif // CEPH_LIBRBD_MANAGED_LOCK_ACQUIRE_REQUEST_H diff --git a/src/librbd/managed_lock/BreakRequest.cc b/src/librbd/managed_lock/BreakRequest.cc new file mode 100644 index 000000000..69cd35301 --- /dev/null +++ b/src/librbd/managed_lock/BreakRequest.cc @@ -0,0 +1,249 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab + +#include "librbd/managed_lock/BreakRequest.h" +#include "common/dout.h" +#include "common/errno.h" +#include "include/neorados/RADOS.hpp" +#include "include/stringify.h" +#include "cls/lock/cls_lock_client.h" +#include "cls/lock/cls_lock_types.h" +#include "librbd/AsioEngine.h" +#include "librbd/ImageCtx.h" +#include "librbd/Utils.h" +#include "librbd/asio/ContextWQ.h" +#include "librbd/asio/Utils.h" +#include "librbd/managed_lock/GetLockerRequest.h" + +#define dout_subsys ceph_subsys_rbd +#undef dout_prefix +#define dout_prefix *_dout << "librbd::managed_lock::BreakRequest: " << this \ + << " " << __func__ << ": " + +namespace librbd { +namespace managed_lock { + +using util::create_context_callback; +using util::create_rados_callback; + +template <typename I> +BreakRequest<I>::BreakRequest(librados::IoCtx& ioctx, + AsioEngine& asio_engine, + const std::string& oid, const Locker &locker, + bool exclusive, bool blocklist_locker, + uint32_t blocklist_expire_seconds, + bool force_break_lock, Context *on_finish) + : m_ioctx(ioctx), m_cct(reinterpret_cast<CephContext *>(m_ioctx.cct())), + m_asio_engine(asio_engine), m_oid(oid), m_locker(locker), + m_exclusive(exclusive), m_blocklist_locker(blocklist_locker), + m_blocklist_expire_seconds(blocklist_expire_seconds), + m_force_break_lock(force_break_lock), m_on_finish(on_finish) { +} + +template <typename I> +void BreakRequest<I>::send() { + send_get_watchers(); +} + +template <typename I> +void BreakRequest<I>::send_get_watchers() { + ldout(m_cct, 10) << dendl; + + librados::ObjectReadOperation op; + op.list_watchers(&m_watchers, &m_watchers_ret_val); + + using klass = BreakRequest<I>; + librados::AioCompletion *rados_completion = + create_rados_callback<klass, &klass::handle_get_watchers>(this); + m_out_bl.clear(); + int r = m_ioctx.aio_operate(m_oid, rados_completion, &op, &m_out_bl); + ceph_assert(r == 0); + rados_completion->release(); +} + +template <typename I> +void BreakRequest<I>::handle_get_watchers(int r) { + ldout(m_cct, 10) << "r=" << r << dendl; + + if (r == 0) { + r = m_watchers_ret_val; + } + if (r < 0) { + lderr(m_cct) << "failed to retrieve watchers: " << cpp_strerror(r) + << dendl; + finish(r); + return; + } + + bool found_alive_locker = false; + for (auto &watcher : m_watchers) { + ldout(m_cct, 20) << "watcher=[" + << "addr=" << watcher.addr << ", " + << "entity=client." << watcher.watcher_id << "]" << dendl; + + if ((strncmp(m_locker.address.c_str(), + watcher.addr, sizeof(watcher.addr)) == 0) && + (m_locker.handle == watcher.cookie)) { + ldout(m_cct, 10) << "lock owner is still alive" << dendl; + found_alive_locker = true; + } + } + + if (!m_force_break_lock && found_alive_locker) { + finish(-EAGAIN); + return; + } + + send_get_locker(); +} + +template <typename I> +void BreakRequest<I>::send_get_locker() { + ldout(m_cct, 10) << dendl; + + using klass = BreakRequest<I>; + Context *ctx = create_context_callback<klass, &klass::handle_get_locker>( + this); + auto req = GetLockerRequest<I>::create(m_ioctx, m_oid, m_exclusive, + &m_refreshed_locker, ctx); + req->send(); +} + +template <typename I> +void BreakRequest<I>::handle_get_locker(int r) { + ldout(m_cct, 10) << "r=" << r << dendl; + + if (r == -ENOENT) { + ldout(m_cct, 5) << "no lock owner" << dendl; + finish(0); + return; + } else if (r < 0 && r != -EBUSY) { + lderr(m_cct) << "failed to retrieve lockers: " << cpp_strerror(r) << dendl; + finish(r); + return; + } else if (r < 0) { + m_refreshed_locker = {}; + } + + if (m_refreshed_locker != m_locker || m_refreshed_locker == Locker{}) { + ldout(m_cct, 5) << "no longer lock owner" << dendl; + finish(-EAGAIN); + return; + } + + send_blocklist(); +} + +template <typename I> +void BreakRequest<I>::send_blocklist() { + if (!m_blocklist_locker) { + send_break_lock(); + return; + } + + entity_name_t entity_name = entity_name_t::CLIENT(m_ioctx.get_instance_id()); + ldout(m_cct, 10) << "local entity=" << entity_name << ", " + << "locker entity=" << m_locker.entity << dendl; + + if (m_locker.entity == entity_name) { + lderr(m_cct) << "attempting to self-blocklist" << dendl; + finish(-EINVAL); + return; + } + + entity_addr_t locker_addr; + if (!locker_addr.parse(m_locker.address)) { + lderr(m_cct) << "unable to parse locker address: " << m_locker.address + << dendl; + finish(-EINVAL); + return; + } + + std::optional<std::chrono::seconds> expire; + if (m_blocklist_expire_seconds != 0) { + expire = std::chrono::seconds(m_blocklist_expire_seconds); + } + m_asio_engine.get_rados_api().blocklist_add( + m_locker.address, expire, + librbd::asio::util::get_callback_adapter( + [this](int r) { handle_blocklist(r); })); +} + +template <typename I> +void BreakRequest<I>::handle_blocklist(int r) { + ldout(m_cct, 10) << "r=" << r << dendl; + + if (r < 0) { + lderr(m_cct) << "failed to blocklist lock owner: " << cpp_strerror(r) + << dendl; + finish(r); + return; + } + + wait_for_osd_map(); +} + +template <typename I> +void BreakRequest<I>::wait_for_osd_map() { + ldout(m_cct, 10) << dendl; + + m_asio_engine.get_rados_api().wait_for_latest_osd_map( + librbd::asio::util::get_callback_adapter( + [this](int r) { handle_wait_for_osd_map(r); })); +} + +template <typename I> +void BreakRequest<I>::handle_wait_for_osd_map(int r) { + ldout(m_cct, 10) << "r=" << r << dendl; + + if (r < 0) { + lderr(m_cct) << "failed to wait for updated OSD map: " << cpp_strerror(r) + << dendl; + finish(r); + return; + } + + send_break_lock(); +} + +template <typename I> +void BreakRequest<I>::send_break_lock() { + ldout(m_cct, 10) << dendl; + + librados::ObjectWriteOperation op; + rados::cls::lock::break_lock(&op, RBD_LOCK_NAME, m_locker.cookie, + m_locker.entity); + + using klass = BreakRequest<I>; + librados::AioCompletion *rados_completion = + create_rados_callback<klass, &klass::handle_break_lock>(this); + int r = m_ioctx.aio_operate(m_oid, rados_completion, &op); + ceph_assert(r == 0); + rados_completion->release(); +} + +template <typename I> +void BreakRequest<I>::handle_break_lock(int r) { + ldout(m_cct, 10) << "r=" << r << dendl; + + if (r < 0 && r != -ENOENT) { + lderr(m_cct) << "failed to break lock: " << cpp_strerror(r) << dendl; + finish(r); + return; + } + + finish(0); +} + +template <typename I> +void BreakRequest<I>::finish(int r) { + ldout(m_cct, 10) << "r=" << r << dendl; + + m_on_finish->complete(r); + delete this; +} + +} // namespace managed_lock +} // namespace librbd + +template class librbd::managed_lock::BreakRequest<librbd::ImageCtx>; diff --git a/src/librbd/managed_lock/BreakRequest.h b/src/librbd/managed_lock/BreakRequest.h new file mode 100644 index 000000000..dd46bbcc5 --- /dev/null +++ b/src/librbd/managed_lock/BreakRequest.h @@ -0,0 +1,120 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab + +#ifndef CEPH_LIBRBD_MANAGED_LOCK_BREAK_REQUEST_H +#define CEPH_LIBRBD_MANAGED_LOCK_BREAK_REQUEST_H + +#include "include/int_types.h" +#include "include/buffer_fwd.h" +#include "include/rados/librados_fwd.hpp" +#include "msg/msg_types.h" +#include <list> +#include <string> +#include <boost/optional.hpp> +#include "librbd/managed_lock/Types.h" + +class Context; +class ContextWQ; +class obj_watch_t; + +namespace librbd { + +class AsioEngine; +class ImageCtx; +template <typename> class Journal; +namespace asio { struct ContextWQ; } + +namespace managed_lock { + +template <typename ImageCtxT = ImageCtx> +class BreakRequest { +public: + static BreakRequest* create(librados::IoCtx& ioctx, + AsioEngine& asio_engine, + const std::string& oid, const Locker &locker, + bool exclusive, bool blocklist_locker, + uint32_t blocklist_expire_seconds, + bool force_break_lock, Context *on_finish) { + return new BreakRequest(ioctx, asio_engine, oid, locker, exclusive, + blocklist_locker, blocklist_expire_seconds, + force_break_lock, on_finish); + } + + void send(); + +private: + /** + * @verbatim + * + * <start> + * | + * v + * GET_WATCHERS + * | + * v + * GET_LOCKER + * | + * v + * BLOCKLIST (skip if disabled) + * | + * v + * WAIT_FOR_OSD_MAP + * | + * v + * BREAK_LOCK + * | + * v + * <finish> + * + * @endvertbatim + */ + + librados::IoCtx &m_ioctx; + CephContext *m_cct; + AsioEngine& m_asio_engine; + std::string m_oid; + Locker m_locker; + bool m_exclusive; + bool m_blocklist_locker; + uint32_t m_blocklist_expire_seconds; + bool m_force_break_lock; + Context *m_on_finish; + + bufferlist m_out_bl; + + std::list<obj_watch_t> m_watchers; + int m_watchers_ret_val; + + Locker m_refreshed_locker; + + BreakRequest(librados::IoCtx& ioctx, AsioEngine& asio_engine, + const std::string& oid, const Locker &locker, + bool exclusive, bool blocklist_locker, + uint32_t blocklist_expire_seconds, bool force_break_lock, + Context *on_finish); + + void send_get_watchers(); + void handle_get_watchers(int r); + + void send_get_locker(); + void handle_get_locker(int r); + + void send_blocklist(); + void handle_blocklist(int r); + + void wait_for_osd_map(); + void handle_wait_for_osd_map(int r); + + void send_break_lock(); + void handle_break_lock(int r); + + void finish(int r); + +}; + +} // namespace managed_lock +} // namespace librbd + +extern template class librbd::managed_lock::BreakRequest<librbd::ImageCtx>; + +#endif // CEPH_LIBRBD_MANAGED_LOCK_BREAK_REQUEST_H diff --git a/src/librbd/managed_lock/GetLockerRequest.cc b/src/librbd/managed_lock/GetLockerRequest.cc new file mode 100644 index 000000000..ea898ab96 --- /dev/null +++ b/src/librbd/managed_lock/GetLockerRequest.cc @@ -0,0 +1,131 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab + +#include "librbd/managed_lock/GetLockerRequest.h" +#include "cls/lock/cls_lock_client.h" +#include "cls/lock/cls_lock_types.h" +#include "common/dout.h" +#include "common/errno.h" +#include "include/stringify.h" +#include "librbd/ImageCtx.h" +#include "librbd/Utils.h" +#include "librbd/managed_lock/Types.h" +#include "librbd/managed_lock/Utils.h" + +#define dout_subsys ceph_subsys_rbd +#undef dout_prefix +#define dout_prefix *_dout << "librbd::managed_lock::GetLockerRequest: " \ + << this << " " << __func__ << ": " + +namespace librbd { +namespace managed_lock { + +using librbd::util::create_rados_callback; + +template <typename I> +GetLockerRequest<I>::GetLockerRequest(librados::IoCtx& ioctx, + const std::string& oid, bool exclusive, + Locker *locker, Context *on_finish) + : m_ioctx(ioctx), m_cct(reinterpret_cast<CephContext *>(m_ioctx.cct())), + m_oid(oid), m_exclusive(exclusive), m_locker(locker), + m_on_finish(on_finish) { +} + +template <typename I> +void GetLockerRequest<I>::send() { + send_get_lockers(); +} + +template <typename I> +void GetLockerRequest<I>::send_get_lockers() { + ldout(m_cct, 10) << dendl; + + librados::ObjectReadOperation op; + rados::cls::lock::get_lock_info_start(&op, RBD_LOCK_NAME); + + using klass = GetLockerRequest<I>; + librados::AioCompletion *rados_completion = + create_rados_callback<klass, &klass::handle_get_lockers>(this); + m_out_bl.clear(); + int r = m_ioctx.aio_operate(m_oid, rados_completion, &op, &m_out_bl); + ceph_assert(r == 0); + rados_completion->release(); +} + +template <typename I> +void GetLockerRequest<I>::handle_get_lockers(int r) { + ldout(m_cct, 10) << "r=" << r << dendl; + + std::map<rados::cls::lock::locker_id_t, + rados::cls::lock::locker_info_t> lockers; + ClsLockType lock_type = ClsLockType::NONE; + std::string lock_tag; + if (r == 0) { + auto it = m_out_bl.cbegin(); + r = rados::cls::lock::get_lock_info_finish(&it, &lockers, &lock_type, + &lock_tag); + } + + if (r < 0) { + lderr(m_cct) << "failed to retrieve lockers: " << cpp_strerror(r) << dendl; + finish(r); + return; + } + + if (lockers.empty()) { + ldout(m_cct, 20) << "no lockers detected" << dendl; + finish(-ENOENT); + return; + } + + if (lock_tag != util::get_watcher_lock_tag()) { + ldout(m_cct, 5) <<"locked by external mechanism: tag=" << lock_tag << dendl; + finish(-EBUSY); + return; + } + + if (m_exclusive && lock_type == ClsLockType::SHARED) { + ldout(m_cct, 5) << "incompatible shared lock type detected" << dendl; + finish(-EBUSY); + return; + } else if (!m_exclusive && lock_type == ClsLockType::EXCLUSIVE) { + ldout(m_cct, 5) << "incompatible exclusive lock type detected" << dendl; + finish(-EBUSY); + return; + } + + std::map<rados::cls::lock::locker_id_t, + rados::cls::lock::locker_info_t>::iterator iter = lockers.begin(); + if (!util::decode_lock_cookie(iter->first.cookie, &m_locker->handle)) { + ldout(m_cct, 5) << "locked by external mechanism: " + << "cookie=" << iter->first.cookie << dendl; + finish(-EBUSY); + return; + } + + if (iter->second.addr.is_blank_ip()) { + ldout(m_cct, 5) << "locker has a blank address" << dendl; + finish(-EBUSY); + return; + } + m_locker->entity = iter->first.locker; + m_locker->cookie = iter->first.cookie; + m_locker->address = iter->second.addr.get_legacy_str(); + + ldout(m_cct, 10) << "retrieved exclusive locker: " + << m_locker->entity << "@" << m_locker->address << dendl; + finish(0); +} + +template <typename I> +void GetLockerRequest<I>::finish(int r) { + ldout(m_cct, 10) << "r=" << r << dendl; + + m_on_finish->complete(r); + delete this; +} + +} // namespace managed_lock +} // namespace librbd + +template class librbd::managed_lock::GetLockerRequest<librbd::ImageCtx>; diff --git a/src/librbd/managed_lock/GetLockerRequest.h b/src/librbd/managed_lock/GetLockerRequest.h new file mode 100644 index 000000000..b8fd08f6e --- /dev/null +++ b/src/librbd/managed_lock/GetLockerRequest.h @@ -0,0 +1,58 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab + +#ifndef CEPH_LIBRBD_MANAGED_LOCK_GET_LOCKER_REQUEST_H +#define CEPH_LIBRBD_MANAGED_LOCK_GET_LOCKER_REQUEST_H + +#include "include/int_types.h" +#include "include/buffer.h" +#include "include/rados/librados_fwd.hpp" +#include <string> + +class Context; + +namespace librbd { + +struct ImageCtx; + +namespace managed_lock { + +struct Locker; + +template <typename ImageCtxT = ImageCtx> +class GetLockerRequest { +public: + static GetLockerRequest* create(librados::IoCtx& ioctx, + const std::string& oid, bool exclusive, + Locker *locker, Context *on_finish) { + return new GetLockerRequest(ioctx, oid, exclusive, locker, on_finish); + } + + void send(); + +private: + librados::IoCtx &m_ioctx; + CephContext *m_cct; + std::string m_oid; + bool m_exclusive; + Locker *m_locker; + Context *m_on_finish; + + bufferlist m_out_bl; + + GetLockerRequest(librados::IoCtx& ioctx, const std::string& oid, + bool exclusive, Locker *locker, Context *on_finish); + + void send_get_lockers(); + void handle_get_lockers(int r); + + void finish(int r); + +}; + +} // namespace managed_lock +} // namespace librbd + +extern template class librbd::managed_lock::GetLockerRequest<librbd::ImageCtx>; + +#endif // CEPH_LIBRBD_MANAGED_LOCK_GET_LOCKER_REQUEST_H diff --git a/src/librbd/managed_lock/ReacquireRequest.cc b/src/librbd/managed_lock/ReacquireRequest.cc new file mode 100644 index 000000000..9eaa51569 --- /dev/null +++ b/src/librbd/managed_lock/ReacquireRequest.cc @@ -0,0 +1,79 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab + +#include "librbd/managed_lock/ReacquireRequest.h" +#include "librbd/Watcher.h" +#include "cls/lock/cls_lock_client.h" +#include "cls/lock/cls_lock_types.h" +#include "common/dout.h" +#include "common/errno.h" +#include "librbd/ImageCtx.h" +#include "librbd/Utils.h" +#include "librbd/managed_lock/Utils.h" + +#define dout_subsys ceph_subsys_rbd +#undef dout_prefix +#define dout_prefix *_dout << "librbd::managed_lock::ReacquireRequest: " \ + << this << ": " << __func__ + +using std::string; + +namespace librbd { +namespace managed_lock { + +using librbd::util::create_rados_callback; + +template <typename I> +ReacquireRequest<I>::ReacquireRequest(librados::IoCtx& ioctx, + const string& oid, + const string& old_cookie, + const string &new_cookie, + bool exclusive, + Context *on_finish) + : m_ioctx(ioctx), m_oid(oid), m_old_cookie(old_cookie), + m_new_cookie(new_cookie), m_exclusive(exclusive), m_on_finish(on_finish) { +} + + +template <typename I> +void ReacquireRequest<I>::send() { + set_cookie(); +} + +template <typename I> +void ReacquireRequest<I>::set_cookie() { + CephContext *cct = reinterpret_cast<CephContext *>(m_ioctx.cct()); + ldout(cct, 10) << dendl; + + librados::ObjectWriteOperation op; + rados::cls::lock::set_cookie(&op, RBD_LOCK_NAME, + m_exclusive ? ClsLockType::EXCLUSIVE : ClsLockType::SHARED, + m_old_cookie, util::get_watcher_lock_tag(), + m_new_cookie); + + librados::AioCompletion *rados_completion = create_rados_callback< + ReacquireRequest, &ReacquireRequest::handle_set_cookie>(this); + int r = m_ioctx.aio_operate(m_oid, rados_completion, &op); + ceph_assert(r == 0); + rados_completion->release(); +} + +template <typename I> +void ReacquireRequest<I>::handle_set_cookie(int r) { + CephContext *cct = reinterpret_cast<CephContext *>(m_ioctx.cct()); + ldout(cct, 10) << ": r=" << r << dendl; + + if (r == -EOPNOTSUPP) { + ldout(cct, 10) << ": OSD doesn't support updating lock" << dendl; + } else if (r < 0) { + lderr(cct) << ": failed to update lock: " << cpp_strerror(r) << dendl; + } + + m_on_finish->complete(r); + delete this; +} + +} // namespace managed_lock +} // namespace librbd + +template class librbd::managed_lock::ReacquireRequest<librbd::ImageCtx>; diff --git a/src/librbd/managed_lock/ReacquireRequest.h b/src/librbd/managed_lock/ReacquireRequest.h new file mode 100644 index 000000000..3f2b7d7e2 --- /dev/null +++ b/src/librbd/managed_lock/ReacquireRequest.h @@ -0,0 +1,69 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab + +#ifndef CEPH_LIBRBD_MANAGED_LOCK_REACQUIRE_REQUEST_H +#define CEPH_LIBRBD_MANAGED_LOCK_REACQUIRE_REQUEST_H + +#include "include/rados/librados.hpp" +#include "include/int_types.h" +#include <string> + +class Context; + +namespace librbd { + +class Watcher; + +namespace managed_lock { + +template <typename ImageCtxT> +class ReacquireRequest { +public: + + static ReacquireRequest *create(librados::IoCtx& ioctx, + const std::string& oid, + const std::string& old_cookie, + const std::string &new_cookie, + bool exclusive, + Context *on_finish) { + return new ReacquireRequest(ioctx, oid, old_cookie, new_cookie, exclusive, + on_finish); + } + + ReacquireRequest(librados::IoCtx& ioctx, const std::string& oid, + const std::string& old_cookie, + const std::string &new_cookie, bool exclusive, + Context *on_finish); + + void send(); + +private: + /** + * @verbatim + * + * <start> + * | + * v + * SET_COOKIE + * | + * v + * <finish> + * + * @endverbatim + */ + librados::IoCtx& m_ioctx; + std::string m_oid; + std::string m_old_cookie; + std::string m_new_cookie; + bool m_exclusive; + Context *m_on_finish; + + void set_cookie(); + void handle_set_cookie(int r); + +}; + +} // namespace managed_lock +} // namespace librbd + +#endif // CEPH_LIBRBD_MANAGED_LOCK_REACQUIRE_REQUEST_H diff --git a/src/librbd/managed_lock/ReleaseRequest.cc b/src/librbd/managed_lock/ReleaseRequest.cc new file mode 100644 index 000000000..6707a149f --- /dev/null +++ b/src/librbd/managed_lock/ReleaseRequest.cc @@ -0,0 +1,97 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab + +#include "librbd/managed_lock/ReleaseRequest.h" +#include "cls/lock/cls_lock_client.h" +#include "cls/lock/cls_lock_types.h" +#include "common/dout.h" +#include "common/errno.h" +#include "librbd/ImageCtx.h" +#include "librbd/Utils.h" +#include "librbd/Watcher.h" +#include "librbd/asio/ContextWQ.h" + +#define dout_subsys ceph_subsys_rbd +#undef dout_prefix +#define dout_prefix *_dout << "librbd::managed_lock::ReleaseRequest: " \ + << this << " " << __func__ << ": " + +using std::string; + +namespace librbd { +namespace managed_lock { + +using util::detail::C_AsyncCallback; +using util::create_context_callback; +using util::create_rados_callback; + +template <typename I> +ReleaseRequest<I>* ReleaseRequest<I>::create(librados::IoCtx& ioctx, + Watcher *watcher, + asio::ContextWQ *work_queue, + const string& oid, + const string& cookie, + Context *on_finish) { + return new ReleaseRequest(ioctx, watcher, work_queue, oid, cookie, + on_finish); +} + +template <typename I> +ReleaseRequest<I>::ReleaseRequest(librados::IoCtx& ioctx, Watcher *watcher, + asio::ContextWQ *work_queue, + const string& oid, const string& cookie, + Context *on_finish) + : m_ioctx(ioctx), m_watcher(watcher), m_oid(oid), m_cookie(cookie), + m_on_finish(new C_AsyncCallback<asio::ContextWQ>(work_queue, on_finish)) { +} + +template <typename I> +ReleaseRequest<I>::~ReleaseRequest() { +} + + +template <typename I> +void ReleaseRequest<I>::send() { + send_unlock(); +} + +template <typename I> +void ReleaseRequest<I>::send_unlock() { + CephContext *cct = reinterpret_cast<CephContext *>(m_ioctx.cct()); + ldout(cct, 10) << "entity=client." << m_ioctx.get_instance_id() << ", " + << "cookie=" << m_cookie << dendl; + + librados::ObjectWriteOperation op; + rados::cls::lock::unlock(&op, RBD_LOCK_NAME, m_cookie); + + using klass = ReleaseRequest; + librados::AioCompletion *rados_completion = + create_rados_callback<klass, &klass::handle_unlock>(this); + int r = m_ioctx.aio_operate(m_oid, rados_completion, &op); + ceph_assert(r == 0); + rados_completion->release(); +} + +template <typename I> +void ReleaseRequest<I>::handle_unlock(int r) { + CephContext *cct = reinterpret_cast<CephContext *>(m_ioctx.cct()); + ldout(cct, 10) << "r=" << r << dendl; + + if (r < 0 && r != -ENOENT) { + lderr(cct) << "failed to unlock: " << cpp_strerror(r) << dendl; + } + + finish(); +} + +template <typename I> +void ReleaseRequest<I>::finish() { + m_on_finish->complete(0); + delete this; +} + +} // namespace managed_lock +} // namespace librbd + +template class librbd::managed_lock::ReleaseRequest<librbd::ImageCtx>; + diff --git a/src/librbd/managed_lock/ReleaseRequest.h b/src/librbd/managed_lock/ReleaseRequest.h new file mode 100644 index 000000000..91d922282 --- /dev/null +++ b/src/librbd/managed_lock/ReleaseRequest.h @@ -0,0 +1,72 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab + +#ifndef CEPH_LIBRBD_MANAGED_LOCK_RELEASE_REQUEST_H +#define CEPH_LIBRBD_MANAGED_LOCK_RELEASE_REQUEST_H + +#include "include/rados/librados.hpp" +#include "librbd/watcher/Types.h" +#include <string> + +class Context; +class ContextWQ; + +namespace librbd { + +class Watcher; +namespace asio { struct ContextWQ; } + +namespace managed_lock { + +template <typename ImageCtxT> +class ReleaseRequest { +private: + typedef watcher::Traits<ImageCtxT> TypeTraits; + typedef typename TypeTraits::Watcher Watcher; + +public: + static ReleaseRequest* create(librados::IoCtx& ioctx, Watcher *watcher, + asio::ContextWQ *work_queue, + const std::string& oid, + const std::string& cookie, + Context *on_finish); + + ~ReleaseRequest(); + void send(); + +private: + /** + * @verbatim + * + * <start> + * | + * v + * UNLOCK + * | + * v + * <finish> + * + * @endverbatim + */ + + ReleaseRequest(librados::IoCtx& ioctx, Watcher *watcher, + asio::ContextWQ *work_queue, const std::string& oid, + const std::string& cookie, Context *on_finish); + + librados::IoCtx& m_ioctx; + Watcher *m_watcher; + std::string m_oid; + std::string m_cookie; + Context *m_on_finish; + + void send_unlock(); + void handle_unlock(int r); + + void finish(); + +}; + +} // namespace managed_lock +} // namespace librbd + +#endif // CEPH_LIBRBD_MANAGED_LOCK_RELEASE_REQUEST_H diff --git a/src/librbd/managed_lock/Types.h b/src/librbd/managed_lock/Types.h new file mode 100644 index 000000000..319789c83 --- /dev/null +++ b/src/librbd/managed_lock/Types.h @@ -0,0 +1,46 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab + +#ifndef CEPH_LIBRBD_MANAGED_LOCK_TYPES_H +#define CEPH_LIBRBD_MANAGED_LOCK_TYPES_H + +#include "msg/msg_types.h" +#include <string> + +namespace librbd { +namespace managed_lock { + +struct Locker { + entity_name_t entity; + std::string cookie; + std::string address; + uint64_t handle = 0; + + Locker() { + } + Locker(const entity_name_t& entity, const std::string &cookie, + const std::string &address, uint64_t handle) + : entity(entity), cookie(cookie), address(address), handle(handle) { + } + + inline bool operator==(const Locker &rhs) const { + return (entity == rhs.entity && + cookie == rhs.cookie && + address == rhs.address && + handle == rhs.handle); + } + inline bool operator!=(const Locker &rhs) const { + return !(*this == rhs); + } +}; + +enum Mode { + EXCLUSIVE, + SHARED +}; + + +} // namespace managed_lock +} // namespace librbd + +#endif // CEPH_LIBRBD_MANAGED_LOCK_TYPES_H diff --git a/src/librbd/managed_lock/Utils.cc b/src/librbd/managed_lock/Utils.cc new file mode 100644 index 000000000..0b4f908dd --- /dev/null +++ b/src/librbd/managed_lock/Utils.cc @@ -0,0 +1,43 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab + +#include "include/ceph_assert.h" +#include "librbd/managed_lock/Utils.h" +#include <sstream> + +namespace librbd { +namespace managed_lock { +namespace util { + +namespace { + +const std::string WATCHER_LOCK_COOKIE_PREFIX = "auto"; +const std::string WATCHER_LOCK_TAG("internal"); + +} // anonymous namespace + +const std::string &get_watcher_lock_tag() { + return WATCHER_LOCK_TAG; +} + +bool decode_lock_cookie(const std::string &tag, uint64_t *handle) { + std::string prefix; + std::istringstream ss(tag); + if (!(ss >> prefix >> *handle) || prefix != WATCHER_LOCK_COOKIE_PREFIX) { + return false; + } + return true; +} + +std::string encode_lock_cookie(uint64_t watch_handle) { + ceph_assert(watch_handle != 0); + std::ostringstream ss; + ss << WATCHER_LOCK_COOKIE_PREFIX << " " << watch_handle; + return ss.str(); +} + +} // namespace util +} // namespace managed_lock +} // namespace librbd + + diff --git a/src/librbd/managed_lock/Utils.h b/src/librbd/managed_lock/Utils.h new file mode 100644 index 000000000..679cbfe8e --- /dev/null +++ b/src/librbd/managed_lock/Utils.h @@ -0,0 +1,23 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab + +#ifndef CEPH_LIBRBD_MANAGED_LOCK_UTILS_H +#define CEPH_LIBRBD_MANAGED_LOCK_UTILS_H + +#include "include/int_types.h" +#include <string> + +namespace librbd { +namespace managed_lock { +namespace util { + +const std::string &get_watcher_lock_tag(); + +bool decode_lock_cookie(const std::string &tag, uint64_t *handle); +std::string encode_lock_cookie(uint64_t watch_handle); + +} // namespace util +} // namespace managed_lock +} // namespace librbd + +#endif // CEPH_LIBRBD_MANAGED_LOCK_UTILS_H |