diff options
Diffstat (limited to 'src/cls')
113 files changed, 44880 insertions, 0 deletions
diff --git a/src/cls/2pc_queue/cls_2pc_queue.cc b/src/cls/2pc_queue/cls_2pc_queue.cc new file mode 100644 index 000000000..fba763955 --- /dev/null +++ b/src/cls/2pc_queue/cls_2pc_queue.cc @@ -0,0 +1,602 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab + +#include "include/types.h" + +#include "cls/2pc_queue/cls_2pc_queue_types.h" +#include "cls/2pc_queue/cls_2pc_queue_ops.h" +#include "cls/2pc_queue/cls_2pc_queue_const.h" +#include "cls/queue/cls_queue_ops.h" +#include "cls/queue/cls_queue_src.h" +#include "objclass/objclass.h" + +CLS_VER(1,0) +CLS_NAME(2pc_queue) + +using ceph::bufferlist; +using ceph::decode; +using ceph::encode; + +constexpr auto CLS_QUEUE_URGENT_DATA_XATTR_NAME = "cls_queue_urgent_data"; + +static int cls_2pc_queue_init(cls_method_context_t hctx, bufferlist *in, bufferlist *out) { + auto in_iter = in->cbegin(); + + cls_queue_init_op op; + try { + decode(op, in_iter); + } catch (ceph::buffer::error& err) { + CLS_LOG(1, "ERROR: cls_2pc_queue_init: failed to decode entry: %s", err.what()); + return -EINVAL; + } + + cls_2pc_urgent_data urgent_data; + + cls_queue_init_op init_op; + + CLS_LOG(20, "INFO: cls_2pc_queue_init: max size is %lu (bytes)", op.queue_size); + + init_op.queue_size = op.queue_size; + init_op.max_urgent_data_size = 23552; // overall head is 24KB ~ pending 1K reservations ops + encode(urgent_data, init_op.bl_urgent_data); + + return queue_init(hctx, init_op); +} + +static int cls_2pc_queue_get_capacity(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + cls_queue_get_capacity_ret op_ret; + auto ret = queue_get_capacity(hctx, op_ret); + if (ret < 0) { + return ret; + } + + encode(op_ret, *out); + return 0; +} + +static int cls_2pc_queue_reserve(cls_method_context_t hctx, bufferlist *in, bufferlist *out) { + cls_2pc_queue_reserve_op res_op; + try { + auto in_iter = in->cbegin(); + decode(res_op, in_iter); + } catch (ceph::buffer::error& err) { + CLS_LOG(1, "ERROR: cls_2pc_queue_reserve: failed to decode entry: %s", err.what()); + return -EINVAL; + } + + if (res_op.size == 0) { + CLS_LOG(1, "ERROR: cls_2pc_queue_reserve: cannot reserve zero bytes"); + return -EINVAL; + } + if (res_op.entries == 0) { + CLS_LOG(1, "ERROR: cls_2pc_queue_reserve: cannot reserve zero entries"); + return -EINVAL; + } + + // get head + cls_queue_head head; + int ret = queue_read_head(hctx, head); + if (ret < 0) { + return ret; + } + + cls_2pc_urgent_data urgent_data; + try { + auto in_iter = head.bl_urgent_data.cbegin(); + decode(urgent_data, in_iter); + } catch (ceph::buffer::error& err) { + CLS_LOG(1, "ERROR: cls_2pc_queue_reserve: failed to decode entry: %s", err.what()); + return -EINVAL; + } + + const auto overhead = res_op.entries*QUEUE_ENTRY_OVERHEAD; + const auto remaining_size = (head.tail.offset >= head.front.offset) ? + (head.queue_size - head.tail.offset) + (head.front.offset - head.max_head_size) : + head.front.offset - head.tail.offset; + + + if (res_op.size + urgent_data.reserved_size + overhead > remaining_size) { + CLS_LOG(1, "ERROR: cls_2pc_queue_reserve: reservations exceeded maximum capacity"); + CLS_LOG(10, "INFO: cls_2pc_queue_reserve: remaining size: %lu (bytes)", remaining_size); + CLS_LOG(10, "INFO: cls_2pc_queue_reserve: current reservations: %lu (bytes)", urgent_data.reserved_size); + CLS_LOG(10, "INFO: cls_2pc_queue_reserve: requested size: %lu (bytes)", res_op.size); + return -ENOSPC; + } + + urgent_data.reserved_size += res_op.size + overhead; + // note that last id is incremented regadless of failures + // to avoid "old reservation" issues below + ++urgent_data.last_id; + bool result; + cls_2pc_reservations::iterator last_reservation; + std::tie(last_reservation, result) = urgent_data.reservations.emplace(std::piecewise_construct, + std::forward_as_tuple(urgent_data.last_id), + std::forward_as_tuple(res_op.size, ceph::coarse_real_clock::now())); + if (!result) { + // an old reservation that was never committed or aborted is in the map + // caller should try again assuming other IDs are ok + CLS_LOG(1, "ERROR: cls_2pc_queue_reserve: reservation id conflict after rollover: %u", urgent_data.last_id); + return -EAGAIN; + } + + // write back head + head.bl_urgent_data.clear(); + encode(urgent_data, head.bl_urgent_data); + + const uint64_t urgent_data_length = head.bl_urgent_data.length(); + + if (head.max_urgent_data_size < urgent_data_length) { + CLS_LOG(10, "INFO: cls_2pc_queue_reserve: urgent data size: %lu exceeded maximum: %lu using xattrs", urgent_data_length, head.max_urgent_data_size); + // add the last reservation to xattrs + bufferlist bl_xattrs; + auto ret = cls_cxx_getxattr(hctx, CLS_QUEUE_URGENT_DATA_XATTR_NAME, &bl_xattrs); + if (ret < 0 && (ret != -ENOENT && ret != -ENODATA)) { + CLS_LOG(1, "ERROR: cls_2pc_queue_reserve: failed to read xattrs with: %d", ret); + return ret; + } + cls_2pc_reservations xattr_reservations; + if (ret >= 0) { + // xattrs exist + auto iter = bl_xattrs.cbegin(); + try { + decode(xattr_reservations, iter); + } catch (ceph::buffer::error& err) { + CLS_LOG(1, "ERROR: cls_2pc_queue_reserve: failed to decode xattrs urgent data map"); + return -EINVAL; + } //end - catch + } + std::tie(std::ignore, result) = xattr_reservations.emplace(std::piecewise_construct, + std::forward_as_tuple(urgent_data.last_id), + std::forward_as_tuple(res_op.size, ceph::coarse_real_clock::now())); + if (!result) { + // an old reservation that was never committed or aborted is in the map + // caller should try again assuming other IDs are ok + CLS_LOG(1, "ERROR: cls_2pc_queue_reserve: reservation id conflict inside xattrs after rollover: %u", urgent_data.last_id); + return -EAGAIN; + } + bl_xattrs.clear(); + encode(xattr_reservations, bl_xattrs); + ret = cls_cxx_setxattr(hctx, CLS_QUEUE_URGENT_DATA_XATTR_NAME, &bl_xattrs); + if (ret < 0) { + CLS_LOG(1, "ERROR: cls_2pc_queue_reserve: failed to write xattrs with: %d", ret); + return ret; + } + // remove the last reservation from the reservation list + // and indicate that spillover happened + urgent_data.has_xattrs = true; + urgent_data.reservations.erase(last_reservation); + head.bl_urgent_data.clear(); + encode(urgent_data, head.bl_urgent_data); + } + + ret = queue_write_head(hctx, head); + if (ret < 0) { + return ret; + } + + CLS_LOG(20, "INFO: cls_2pc_queue_reserve: remaining size: %lu (bytes)", remaining_size); + CLS_LOG(20, "INFO: cls_2pc_queue_reserve: current reservations: %lu (bytes)", urgent_data.reserved_size); + CLS_LOG(20, "INFO: cls_2pc_queue_reserve: requested size: %lu (bytes)", res_op.size); + CLS_LOG(20, "INFO: cls_2pc_queue_reserve: urgent data size: %lu (bytes)", urgent_data_length); + + cls_2pc_queue_reserve_ret op_ret; + op_ret.id = urgent_data.last_id; + encode(op_ret, *out); + + return 0; +} + +static int cls_2pc_queue_commit(cls_method_context_t hctx, bufferlist *in, bufferlist *out) { + cls_2pc_queue_commit_op commit_op; + try { + auto in_iter = in->cbegin(); + decode(commit_op, in_iter); + } catch (ceph::buffer::error& err) { + CLS_LOG(1, "ERROR: cls_2pc_queue_commit: failed to decode entry: %s", err.what()); + return -EINVAL; + } + + // get head + cls_queue_head head; + int ret = queue_read_head(hctx, head); + if (ret < 0) { + return ret; + } + + cls_2pc_urgent_data urgent_data; + try { + auto in_iter = head.bl_urgent_data.cbegin(); + decode(urgent_data, in_iter); + } catch (ceph::buffer::error& err) { + CLS_LOG(1, "ERROR: cls_2pc_queue_commit: failed to decode entry: %s", err.what()); + return -EINVAL; + } + + auto it = urgent_data.reservations.find(commit_op.id); + cls_2pc_reservations xattr_reservations; + bufferlist bl_xattrs; + if (it == urgent_data.reservations.end()) { + if (!urgent_data.has_xattrs) { + CLS_LOG(1, "ERROR: cls_2pc_queue_commit: reservation does not exist: %u", commit_op.id); + return -ENOENT; + } + // try to look for the reservation in xattrs + auto ret = cls_cxx_getxattr(hctx, CLS_QUEUE_URGENT_DATA_XATTR_NAME, &bl_xattrs); + if (ret < 0) { + if (ret == -ENOENT || ret == -ENODATA) { + // no xattrs, reservation does not exists + CLS_LOG(1, "ERROR: cls_2pc_queue_commit: reservation does not exist: %u", commit_op.id); + return -ENOENT; + } + CLS_LOG(1, "ERROR: cls_2pc_queue_commit: failed to read xattrs with: %d", ret); + return ret; + } + auto iter = bl_xattrs.cbegin(); + try { + decode(xattr_reservations, iter); + } catch (ceph::buffer::error& err) { + CLS_LOG(1, "ERROR: cls_2pc_queue_commit: failed to decode xattrs urgent data map"); + return -EINVAL; + } //end - catch + it = xattr_reservations.find(commit_op.id); + if (it == urgent_data.reservations.end()) { + CLS_LOG(1, "ERROR: cls_2pc_queue_commit: reservation does not exist: %u", commit_op.id); + return -ENOENT; + } + } + + auto& res = it->second; + const auto actual_size = std::accumulate(commit_op.bl_data_vec.begin(), + commit_op.bl_data_vec.end(), 0UL, [] (uint64_t sum, const bufferlist& bl) { + return sum + bl.length(); + }); + + if (res.size < actual_size) { + CLS_LOG(1, "ERROR: cls_2pc_queue_commit: trying to commit %lu bytes to a %lu bytes reservation", + actual_size, + res.size); + return -EINVAL; + } + + // commit the data to the queue + cls_queue_enqueue_op enqueue_op; + enqueue_op.bl_data_vec = std::move(commit_op.bl_data_vec); + ret = queue_enqueue(hctx, enqueue_op, head); + if (ret < 0) { + return ret; + } + + urgent_data.reserved_size -= res.size; + + if (xattr_reservations.empty()) { + // remove the reservation from urgent data + urgent_data.reservations.erase(it); + } else { + // remove the reservation from xattrs + xattr_reservations.erase(it); + bl_xattrs.clear(); + encode(xattr_reservations, bl_xattrs); + ret = cls_cxx_setxattr(hctx, CLS_QUEUE_URGENT_DATA_XATTR_NAME, &bl_xattrs); + if (ret < 0) { + CLS_LOG(1, "ERROR: cls_2pc_queue_commit: failed to write xattrs with: %d", ret); + return ret; + } + } + + CLS_LOG(20, "INFO: cls_2pc_queue_commit: current reservations: %lu (bytes)", urgent_data.reserved_size); + CLS_LOG(20, "INFO: cls_2pc_queue_commit: current reservation entries: %lu", + urgent_data.reservations.size() + xattr_reservations.size()); + + // write back head + head.bl_urgent_data.clear(); + encode(urgent_data, head.bl_urgent_data); + return queue_write_head(hctx, head); +} + +static int cls_2pc_queue_abort(cls_method_context_t hctx, bufferlist *in, bufferlist *out) { + cls_2pc_queue_abort_op abort_op; + try { + auto in_iter = in->cbegin(); + decode(abort_op, in_iter); + } catch (ceph::buffer::error& err) { + CLS_LOG(1, "ERROR: cls_2pc_queue_abort: failed to decode entry: %s", err.what()); + return -EINVAL; + } + + // get head + cls_queue_head head; + int ret = queue_read_head(hctx, head); + if (ret < 0) { + return ret; + } + + cls_2pc_urgent_data urgent_data; + try { + auto in_iter = head.bl_urgent_data.cbegin(); + decode(urgent_data, in_iter); + } catch (ceph::buffer::error& err) { + CLS_LOG(1, "ERROR: cls_2pc_queue_abort: failed to decode entry: %s", err.what()); + return -EINVAL; + } + + auto it = urgent_data.reservations.find(abort_op.id); + uint64_t reservation_size; + if (it == urgent_data.reservations.end()) { + if (!urgent_data.has_xattrs) { + CLS_LOG(20, "INFO: cls_2pc_queue_abort: reservation does not exist: %u", abort_op.id); + return 0; + } + // try to look for the reservation in xattrs + bufferlist bl_xattrs; + auto ret = cls_cxx_getxattr(hctx, CLS_QUEUE_URGENT_DATA_XATTR_NAME, &bl_xattrs); + if (ret < 0) { + if (ret == -ENOENT || ret == -ENODATA) { + // no xattrs, reservation does not exists + CLS_LOG(20, "INFO: cls_2pc_queue_abort: reservation does not exist: %u", abort_op.id); + return 0; + } + CLS_LOG(1, "ERROR: cls_2pc_queue_abort: failed to read xattrs with: %d", ret); + return ret; + } + auto iter = bl_xattrs.cbegin(); + cls_2pc_reservations xattr_reservations; + try { + decode(xattr_reservations, iter); + } catch (ceph::buffer::error& err) { + CLS_LOG(1, "ERROR: cls_2pc_queue_abort: failed to decode xattrs urgent data map"); + return -EINVAL; + } //end - catch + it = xattr_reservations.find(abort_op.id); + if (it == xattr_reservations.end()) { + CLS_LOG(20, "INFO: cls_2pc_queue_abort: reservation does not exist: %u", abort_op.id); + return 0; + } + reservation_size = it->second.size; + xattr_reservations.erase(it); + bl_xattrs.clear(); + encode(xattr_reservations, bl_xattrs); + ret = cls_cxx_setxattr(hctx, CLS_QUEUE_URGENT_DATA_XATTR_NAME, &bl_xattrs); + if (ret < 0) { + CLS_LOG(1, "ERROR: cls_2pc_queue_abort: failed to write xattrs with: %d", ret); + return ret; + } + } else { + reservation_size = it->second.size; + urgent_data.reservations.erase(it); + } + + // remove the reservation + urgent_data.reserved_size -= reservation_size; + + CLS_LOG(20, "INFO: cls_2pc_queue_abort: current reservations: %lu (bytes)", urgent_data.reserved_size); + + // write back head + head.bl_urgent_data.clear(); + encode(urgent_data, head.bl_urgent_data); + return queue_write_head(hctx, head); +} + +static int cls_2pc_queue_list_reservations(cls_method_context_t hctx, bufferlist *in, bufferlist *out) { + //get head + cls_queue_head head; + auto ret = queue_read_head(hctx, head); + if (ret < 0) { + return ret; + } + + cls_2pc_urgent_data urgent_data; + try { + auto in_iter = head.bl_urgent_data.cbegin(); + decode(urgent_data, in_iter); + } catch (ceph::buffer::error& err) { + CLS_LOG(1, "ERROR: cls_2pc_queue_list_reservations: failed to decode entry: %s", err.what()); + return -EINVAL; + } + + CLS_LOG(20, "INFO: cls_2pc_queue_list_reservations: %lu reservation entries found", urgent_data.reservations.size()); + cls_2pc_queue_reservations_ret op_ret; + op_ret.reservations = std::move(urgent_data.reservations); + if (urgent_data.has_xattrs) { + // try to look for the reservation in xattrs + cls_2pc_reservations xattr_reservations; + bufferlist bl_xattrs; + ret = cls_cxx_getxattr(hctx, CLS_QUEUE_URGENT_DATA_XATTR_NAME, &bl_xattrs); + if (ret < 0 && (ret != -ENOENT && ret != -ENODATA)) { + CLS_LOG(1, "ERROR: cls_2pc_queue_list_reservations: failed to read xattrs with: %d", ret); + return ret; + } + if (ret >= 0) { + auto iter = bl_xattrs.cbegin(); + try { + decode(xattr_reservations, iter); + } catch (ceph::buffer::error& err) { + CLS_LOG(1, "ERROR: cls_2pc_queue_list_reservations: failed to decode xattrs urgent data map"); + return -EINVAL; + } //end - catch + CLS_LOG(20, "INFO: cls_2pc_queue_list_reservations: %lu reservation entries found in xatts", xattr_reservations.size()); + op_ret.reservations.merge(xattr_reservations); + } + } + encode(op_ret, *out); + + return 0; +} + +static int cls_2pc_queue_expire_reservations(cls_method_context_t hctx, bufferlist *in, bufferlist *out) { + cls_2pc_queue_expire_op expire_op; + try { + auto in_iter = in->cbegin(); + decode(expire_op, in_iter); + } catch (ceph::buffer::error& err) { + CLS_LOG(1, "ERROR: cls_2pc_queue_expire_reservations: failed to decode entry: %s", err.what()); + return -EINVAL; + } + + //get head + cls_queue_head head; + auto ret = queue_read_head(hctx, head); + if (ret < 0) { + return ret; + } + + cls_2pc_urgent_data urgent_data; + try { + auto in_iter = head.bl_urgent_data.cbegin(); + decode(urgent_data, in_iter); + } catch (ceph::buffer::error& err) { + CLS_LOG(1, "ERROR: cls_2pc_queue_expire_reservations: failed to decode entry: %s", err.what()); + return -EINVAL; + } + + CLS_LOG(20, "INFO: cls_2pc_queue_expire_reservations: %lu reservation entries found", urgent_data.reservations.size()); + CLS_LOG(20, "INFO: cls_2pc_queue_expire_reservations: current reservations: %lu (bytes)", urgent_data.reserved_size); + + uint64_t reservation_size = 0U; + auto stale_found = false; + auto xattr_stale_found = false; + + for (auto it = urgent_data.reservations.begin(); it != urgent_data.reservations.end();) { + if (it->second.timestamp < expire_op.stale_time) { + CLS_LOG(5, "WARNING: cls_2pc_queue_expire_reservations: stale reservation %u will be removed", it->first); + reservation_size += it->second.size; + it = urgent_data.reservations.erase(it); + stale_found = true; + } else { + ++it; + } + } + + if (urgent_data.has_xattrs) { + // try to look for the reservation in xattrs + cls_2pc_reservations xattr_reservations; + bufferlist bl_xattrs; + ret = cls_cxx_getxattr(hctx, CLS_QUEUE_URGENT_DATA_XATTR_NAME, &bl_xattrs); + if (ret < 0 && (ret != -ENOENT && ret != -ENODATA)) { + CLS_LOG(1, "ERROR: cls_2pc_queue_expire_reservations: failed to read xattrs with: %d", ret); + return ret; + } + if (ret >= 0) { + auto iter = bl_xattrs.cbegin(); + try { + decode(xattr_reservations, iter); + } catch (ceph::buffer::error& err) { + CLS_LOG(1, "ERROR: cls_2pc_queue_expire_reservations: failed to decode xattrs urgent data map"); + return -EINVAL; + } //end - catch + CLS_LOG(20, "INFO: cls_2pc_queue_expire_reservations: %lu reservation entries found in xatts", xattr_reservations.size()); + for (auto it = xattr_reservations.begin(); it != xattr_reservations.end();) { + if (it->second.timestamp < expire_op.stale_time) { + CLS_LOG(5, "WARNING: cls_2pc_queue_expire_reservations: stale reservation %u will be removed", it->first); + reservation_size += it->second.size; + it = xattr_reservations.erase(it); + xattr_stale_found = true; + } else { + ++it; + } + } + if (xattr_stale_found) { + // write xattr back without stale reservations + bl_xattrs.clear(); + encode(xattr_reservations, bl_xattrs); + ret = cls_cxx_setxattr(hctx, CLS_QUEUE_URGENT_DATA_XATTR_NAME, &bl_xattrs); + if (ret < 0) { + CLS_LOG(1, "ERROR: cls_2pc_queue_expire_reservations: failed to write xattrs with: %d", ret); + return ret; + } + } + } + } + + if (stale_found || xattr_stale_found) { + urgent_data.reserved_size -= reservation_size; + CLS_LOG(20, "INFO: cls_2pc_queue_expire_reservations: reservations after cleanup: %lu (bytes)", urgent_data.reserved_size); + // write back head without stale reservations + head.bl_urgent_data.clear(); + encode(urgent_data, head.bl_urgent_data); + return queue_write_head(hctx, head); + } + + return 0; +} + +static int cls_2pc_queue_list_entries(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + auto in_iter = in->cbegin(); + cls_queue_list_op op; + try { + decode(op, in_iter); + } catch (ceph::buffer::error& err) { + CLS_LOG(1, "ERROR: cls_2pc_queue_list_entries: failed to decode entry: %s", err.what()); + return -EINVAL; + } + + cls_queue_head head; + auto ret = queue_read_head(hctx, head); + if (ret < 0) { + return ret; + } + + cls_queue_list_ret op_ret; + ret = queue_list_entries(hctx, op, op_ret, head); + if (ret < 0) { + return ret; + } + + encode(op_ret, *out); + return 0; +} + +static int cls_2pc_queue_remove_entries(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + auto in_iter = in->cbegin(); + cls_queue_remove_op op; + try { + decode(op, in_iter); + } catch (ceph::buffer::error& err) { + CLS_LOG(1, "ERROR: cls_2pc_queue_remove_entries: failed to decode entry: %s", err.what()); + return -EINVAL; + } + + cls_queue_head head; + auto ret = queue_read_head(hctx, head); + if (ret < 0) { + return ret; + } + ret = queue_remove_entries(hctx, op, head); + if (ret < 0) { + return ret; + } + return queue_write_head(hctx, head); +} + +CLS_INIT(2pc_queue) +{ + CLS_LOG(1, "Loaded 2pc queue class!"); + + cls_handle_t h_class; + cls_method_handle_t h_2pc_queue_init; + cls_method_handle_t h_2pc_queue_get_capacity; + cls_method_handle_t h_2pc_queue_reserve; + cls_method_handle_t h_2pc_queue_commit; + cls_method_handle_t h_2pc_queue_abort; + cls_method_handle_t h_2pc_queue_list_reservations; + cls_method_handle_t h_2pc_queue_list_entries; + cls_method_handle_t h_2pc_queue_remove_entries; + cls_method_handle_t h_2pc_queue_expire_reservations; + + cls_register(TPC_QUEUE_CLASS, &h_class); + + cls_register_cxx_method(h_class, TPC_QUEUE_INIT, CLS_METHOD_RD | CLS_METHOD_WR, cls_2pc_queue_init, &h_2pc_queue_init); + cls_register_cxx_method(h_class, TPC_QUEUE_GET_CAPACITY, CLS_METHOD_RD, cls_2pc_queue_get_capacity, &h_2pc_queue_get_capacity); + cls_register_cxx_method(h_class, TPC_QUEUE_RESERVE, CLS_METHOD_RD | CLS_METHOD_WR, cls_2pc_queue_reserve, &h_2pc_queue_reserve); + cls_register_cxx_method(h_class, TPC_QUEUE_COMMIT, CLS_METHOD_RD | CLS_METHOD_WR, cls_2pc_queue_commit, &h_2pc_queue_commit); + cls_register_cxx_method(h_class, TPC_QUEUE_ABORT, CLS_METHOD_RD | CLS_METHOD_WR, cls_2pc_queue_abort, &h_2pc_queue_abort); + cls_register_cxx_method(h_class, TPC_QUEUE_LIST_RESERVATIONS, CLS_METHOD_RD, cls_2pc_queue_list_reservations, &h_2pc_queue_list_reservations); + cls_register_cxx_method(h_class, TPC_QUEUE_LIST_ENTRIES, CLS_METHOD_RD, cls_2pc_queue_list_entries, &h_2pc_queue_list_entries); + cls_register_cxx_method(h_class, TPC_QUEUE_REMOVE_ENTRIES, CLS_METHOD_RD | CLS_METHOD_WR, cls_2pc_queue_remove_entries, &h_2pc_queue_remove_entries); + cls_register_cxx_method(h_class, TPC_QUEUE_EXPIRE_RESERVATIONS, CLS_METHOD_RD | CLS_METHOD_WR, cls_2pc_queue_expire_reservations, &h_2pc_queue_expire_reservations); + + return; +} + diff --git a/src/cls/2pc_queue/cls_2pc_queue_client.cc b/src/cls/2pc_queue/cls_2pc_queue_client.cc new file mode 100644 index 000000000..6868b2b6f --- /dev/null +++ b/src/cls/2pc_queue/cls_2pc_queue_client.cc @@ -0,0 +1,210 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab + +#include "cls/2pc_queue/cls_2pc_queue_client.h" +#include "cls/2pc_queue/cls_2pc_queue_ops.h" +#include "cls/2pc_queue/cls_2pc_queue_const.h" +#include "cls/queue/cls_queue_ops.h" +#include "cls/queue/cls_queue_const.h" + +using namespace librados; + +void cls_2pc_queue_init(ObjectWriteOperation& op, const std::string& queue_name, uint64_t size) { + bufferlist in; + cls_queue_init_op call; + call.queue_size = size; + encode(call, in); + op.exec(TPC_QUEUE_CLASS, TPC_QUEUE_INIT, in); +} + +int cls_2pc_queue_get_capacity_result(const bufferlist& bl, uint64_t& size) { + cls_queue_get_capacity_ret op_ret; + auto iter = bl.cbegin(); + try { + decode(op_ret, iter); + } catch (buffer::error& err) { + return -EIO; + } + + size = op_ret.queue_capacity; + + return 0; +} + +#ifndef CLS_CLIENT_HIDE_IOCTX +int cls_2pc_queue_get_capacity(IoCtx& io_ctx, const std::string& queue_name, uint64_t& size) { + bufferlist in, out; + const auto r = io_ctx.exec(queue_name, TPC_QUEUE_CLASS, TPC_QUEUE_GET_CAPACITY, in, out); + if (r < 0 ) { + return r; + } + + return cls_2pc_queue_get_capacity_result(out, size); +} +#endif + +// optionally async method for getting capacity (bytes) +// after answer is received, call cls_2pc_queue_get_capacity_result() to prase the results +void cls_2pc_queue_get_capacity(ObjectReadOperation& op, bufferlist* obl, int* prval) { + bufferlist in; + op.exec(TPC_QUEUE_CLASS, TPC_QUEUE_GET_CAPACITY, in, obl, prval); +} + + +int cls_2pc_queue_reserve_result(const bufferlist& bl, cls_2pc_reservation::id_t& res_id) { + cls_2pc_queue_reserve_ret op_ret; + auto iter = bl.cbegin(); + try { + decode(op_ret, iter); + } catch (buffer::error& err) { + return -EIO; + } + res_id = op_ret.id; + + return 0; +} + +int cls_2pc_queue_reserve(IoCtx& io_ctx, const std::string& queue_name, + uint64_t res_size, uint32_t entries, cls_2pc_reservation::id_t& res_id) { + bufferlist in, out; + cls_2pc_queue_reserve_op reserve_op; + reserve_op.size = res_size; + reserve_op.entries = entries; + + encode(reserve_op, in); + int rval; + ObjectWriteOperation op; + op.exec(TPC_QUEUE_CLASS, TPC_QUEUE_RESERVE, in, &out, &rval); + const auto r = io_ctx.operate(queue_name, &op, librados::OPERATION_RETURNVEC); + + if (r < 0) { + return r; + } + + return cls_2pc_queue_reserve_result(out, res_id); +} + +void cls_2pc_queue_reserve(ObjectWriteOperation& op, uint64_t res_size, + uint32_t entries, bufferlist* obl, int* prval) { + bufferlist in; + cls_2pc_queue_reserve_op reserve_op; + reserve_op.size = res_size; + reserve_op.entries = entries; + encode(reserve_op, in); + op.exec(TPC_QUEUE_CLASS, TPC_QUEUE_RESERVE, in, obl, prval); +} + +void cls_2pc_queue_commit(ObjectWriteOperation& op, std::vector<bufferlist> bl_data_vec, + cls_2pc_reservation::id_t res_id) { + bufferlist in; + cls_2pc_queue_commit_op commit_op; + commit_op.id = res_id; + commit_op.bl_data_vec = std::move(bl_data_vec); + encode(commit_op, in); + op.exec(TPC_QUEUE_CLASS, TPC_QUEUE_COMMIT, in); +} + +void cls_2pc_queue_abort(ObjectWriteOperation& op, cls_2pc_reservation::id_t res_id) { + bufferlist in; + cls_2pc_queue_abort_op abort_op; + abort_op.id = res_id; + encode(abort_op, in); + op.exec(TPC_QUEUE_CLASS, TPC_QUEUE_ABORT, in); +} + +int cls_2pc_queue_list_entries_result(const bufferlist& bl, std::vector<cls_queue_entry>& entries, + bool *truncated, std::string& next_marker) { + cls_queue_list_ret ret; + auto iter = bl.cbegin(); + try { + decode(ret, iter); + } catch (buffer::error& err) { + return -EIO; + } + + entries = std::move(ret.entries); + *truncated = ret.is_truncated; + + next_marker = std::move(ret.next_marker); + + return 0; +} + +#ifndef CLS_CLIENT_HIDE_IOCTX +int cls_2pc_queue_list_entries(IoCtx& io_ctx, + const std::string& queue_name, + const std::string& marker, uint32_t max, + std::vector<cls_queue_entry>& entries, + bool *truncated, std::string& next_marker) { + bufferlist in, out; + cls_queue_list_op op; + op.start_marker = marker; + op.max = max; + encode(op, in); + + const auto r = io_ctx.exec(queue_name, TPC_QUEUE_CLASS, TPC_QUEUE_LIST_ENTRIES, in, out); + if (r < 0) { + return r; + } + return cls_2pc_queue_list_entries_result(out, entries, truncated, next_marker); +} +#endif + +void cls_2pc_queue_list_entries(ObjectReadOperation& op, const std::string& marker, uint32_t max, bufferlist* obl, int* prval) { + bufferlist in; + cls_queue_list_op list_op; + list_op.start_marker = marker; + list_op.max = max; + encode(list_op, in); + + op.exec(TPC_QUEUE_CLASS, TPC_QUEUE_LIST_ENTRIES, in, obl, prval); +} + +int cls_2pc_queue_list_reservations_result(const bufferlist& bl, cls_2pc_reservations& reservations) { + cls_2pc_queue_reservations_ret ret; + auto iter = bl.cbegin(); + try { + decode(ret, iter); + } catch (buffer::error& err) { + return -EIO; + } + + reservations = std::move(ret.reservations); + + return 0; +} + +#ifndef CLS_CLIENT_HIDE_IOCTX +int cls_2pc_queue_list_reservations(IoCtx& io_ctx, const std::string& queue_name, cls_2pc_reservations& reservations) { + bufferlist in, out; + + const auto r = io_ctx.exec(queue_name, TPC_QUEUE_CLASS, TPC_QUEUE_LIST_RESERVATIONS, in, out); + if (r < 0) { + return r; + } + return cls_2pc_queue_list_reservations_result(out, reservations); +} +#endif + +void cls_2pc_queue_list_reservations(ObjectReadOperation& op, bufferlist* obl, int* prval) { + bufferlist in; + + op.exec(TPC_QUEUE_CLASS, TPC_QUEUE_LIST_RESERVATIONS, in, obl, prval); +} + +void cls_2pc_queue_remove_entries(ObjectWriteOperation& op, const std::string& end_marker) { + bufferlist in; + cls_queue_remove_op rem_op; + rem_op.end_marker = end_marker; + encode(rem_op, in); + op.exec(TPC_QUEUE_CLASS, TPC_QUEUE_REMOVE_ENTRIES, in); +} + +void cls_2pc_queue_expire_reservations(librados::ObjectWriteOperation& op, ceph::coarse_real_time stale_time) { + bufferlist in; + cls_2pc_queue_expire_op expire_op; + expire_op.stale_time = stale_time; + encode(expire_op, in); + op.exec(TPC_QUEUE_CLASS, TPC_QUEUE_EXPIRE_RESERVATIONS, in); +} + diff --git a/src/cls/2pc_queue/cls_2pc_queue_client.h b/src/cls/2pc_queue/cls_2pc_queue_client.h new file mode 100644 index 000000000..e0bdeafd5 --- /dev/null +++ b/src/cls/2pc_queue/cls_2pc_queue_client.h @@ -0,0 +1,84 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab + +#pragma once + +#include <string> +#include <vector> +#include "include/rados/librados.hpp" +#include "cls/queue/cls_queue_types.h" +#include "cls/2pc_queue/cls_2pc_queue_types.h" + +// initialize the queue with maximum size (bytes) +// note that the actual size of the queue will be larger, as 24K bytes will be allocated in the head object +// and more may be allocated as xattrs of the object (depending with the number of concurrent reservations) +void cls_2pc_queue_init(librados::ObjectWriteOperation& op, const std::string& queue_name, uint64_t size); + +// these overloads which call io_ctx.operate() or io_ctx.exec() should not be called in the rgw. +// rgw_rados_operate() should be called after the overloads w/o calls to io_ctx.operate()/exec() +#ifndef CLS_CLIENT_HIDE_IOCTX +// return capacity (bytes) +int cls_2pc_queue_get_capacity(librados::IoCtx& io_ctx, const std::string& queue_name, uint64_t& size); + +// make a reservation on the queue (in bytes) and number of expected entries (to calculate overhead) +// return a reservation id if reservations is possible, 0 otherwise +int cls_2pc_queue_reserve(librados::IoCtx& io_ctx, const std::string& queue_name, + uint64_t res_size, uint32_t entries, cls_2pc_reservation::id_t& res_id); + +// incremental listing of all entries in the queue +int cls_2pc_queue_list_entries(librados::IoCtx& io_ctx, const std::string& queue_name, const std::string& marker, uint32_t max, + std::vector<cls_queue_entry>& entries, bool *truncated, std::string& next_marker); + +// list all pending reservations in the queue +int cls_2pc_queue_list_reservations(librados::IoCtx& io_ctx, const std::string& queue_name, cls_2pc_reservations& reservations); +#endif + +// optionally async method for getting capacity (bytes) +// after answer is received, call cls_2pc_queue_get_capacity_result() to parse the results +void cls_2pc_queue_get_capacity(librados::ObjectReadOperation& op, bufferlist* obl, int* prval); + +int cls_2pc_queue_get_capacity_result(const bufferlist& bl, uint64_t& size); + +// optionally async method for making a reservation on the queue (in bytes) and number of expected entries (to calculate overhead) +// notes: +// (1) make sure that librados::OPERATION_RETURNVEC is passed to the executing function +// (2) multiple operations cannot be executed in a batch (operations both read and write) +// after answer is received, call cls_2pc_queue_reserve_result() to parse the results +void cls_2pc_queue_reserve(librados::ObjectWriteOperation& op, uint64_t res_size, + uint32_t entries, bufferlist* obl, int* prval); + +int cls_2pc_queue_reserve_result(const bufferlist& bl, cls_2pc_reservation::id_t& res_id); + +// commit data using a reservation done beforehand +// res_id must be allocated using cls_2pc_queue_reserve, and could be either committed or aborted once +// the size of bl_data_vec must be equal or smaller to the size reserved for the res_id +// note that the number of entries in bl_data_vec does not have to match the number of entries reserved +// only size (including the overhead of the entries) is checked +void cls_2pc_queue_commit(librados::ObjectWriteOperation& op, std::vector<bufferlist> bl_data_vec, + cls_2pc_reservation::id_t res_id); + +// abort a reservation +// res_id must be allocated using cls_2pc_queue_reserve +void cls_2pc_queue_abort(librados::ObjectWriteOperation& op, + cls_2pc_reservation::id_t res_id); + +// optionally async incremental listing of all entries in the queue +// after answer is received, call cls_2pc_queue_list_entries_result() to parse the results +void cls_2pc_queue_list_entries(librados::ObjectReadOperation& op, const std::string& marker, uint32_t max, bufferlist* obl, int* prval); + +int cls_2pc_queue_list_entries_result(const bufferlist& bl, std::vector<cls_queue_entry>& entries, + bool *truncated, std::string& next_marker); + +// optionally async listing of all pending reservations in the queue +// after answer is received, call cls_2pc_queue_list_reservations_result() to parse the results +void cls_2pc_queue_list_reservations(librados::ObjectReadOperation& op, bufferlist* obl, int* prval); + +int cls_2pc_queue_list_reservations_result(const librados::bufferlist& bl, cls_2pc_reservations& reservations); + +// expire stale reservations (older than the given time) +void cls_2pc_queue_expire_reservations(librados::ObjectWriteOperation& op, + ceph::coarse_real_time stale_time); + +// remove all entries up to the given marker +void cls_2pc_queue_remove_entries(librados::ObjectWriteOperation& op, const std::string& end_marker); + diff --git a/src/cls/2pc_queue/cls_2pc_queue_const.h b/src/cls/2pc_queue/cls_2pc_queue_const.h new file mode 100644 index 000000000..160c5b66e --- /dev/null +++ b/src/cls/2pc_queue/cls_2pc_queue_const.h @@ -0,0 +1,14 @@ +#pragma once + +#define TPC_QUEUE_CLASS "2pc_queue" + +#define TPC_QUEUE_INIT "2pc_queue_init" +#define TPC_QUEUE_GET_CAPACITY "2pc_queue_get_capacity" +#define TPC_QUEUE_RESERVE "2pc_queue_reserve" +#define TPC_QUEUE_COMMIT "2pc_queue_commit" +#define TPC_QUEUE_ABORT "2pc_queue_abort" +#define TPC_QUEUE_LIST_RESERVATIONS "2pc_queue_list_reservations" +#define TPC_QUEUE_LIST_ENTRIES "2pc_queue_list_entries" +#define TPC_QUEUE_REMOVE_ENTRIES "2pc_queue_remove_entries" +#define TPC_QUEUE_EXPIRE_RESERVATIONS "2pc_queue_expire_reservations" + diff --git a/src/cls/2pc_queue/cls_2pc_queue_ops.h b/src/cls/2pc_queue/cls_2pc_queue_ops.h new file mode 100644 index 000000000..d0b84193d --- /dev/null +++ b/src/cls/2pc_queue/cls_2pc_queue_ops.h @@ -0,0 +1,117 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab + +#pragma once + +#include "include/types.h" +#include "cls_2pc_queue_types.h" + +struct cls_2pc_queue_reserve_op { + uint64_t size; + uint32_t entries; + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(1, 1, bl); + encode(size, bl); + encode(entries, bl); + ENCODE_FINISH(bl); + } + + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(1, bl); + decode(size, bl); + decode(entries, bl); + DECODE_FINISH(bl); + } +}; +WRITE_CLASS_ENCODER(cls_2pc_queue_reserve_op) + +struct cls_2pc_queue_reserve_ret { + cls_2pc_reservation::id_t id; // allocated reservation id + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(1, 1, bl); + encode(id, bl); + ENCODE_FINISH(bl); + } + + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(1, bl); + decode(id, bl); + DECODE_FINISH(bl); + } +}; +WRITE_CLASS_ENCODER(cls_2pc_queue_reserve_ret) + +struct cls_2pc_queue_commit_op { + cls_2pc_reservation::id_t id; // reservation to commit + std::vector<ceph::buffer::list> bl_data_vec; // the data to enqueue + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(1, 1, bl); + encode(id, bl); + encode(bl_data_vec, bl); + ENCODE_FINISH(bl); + } + + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(1, bl); + decode(id, bl); + decode(bl_data_vec, bl); + DECODE_FINISH(bl); + } + +}; +WRITE_CLASS_ENCODER(cls_2pc_queue_commit_op) + +struct cls_2pc_queue_abort_op { + cls_2pc_reservation::id_t id; // reservation to abort + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(1, 1, bl); + encode(id, bl); + ENCODE_FINISH(bl); + } + + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(1, bl); + decode(id, bl); + DECODE_FINISH(bl); + } +}; +WRITE_CLASS_ENCODER(cls_2pc_queue_abort_op) + +struct cls_2pc_queue_expire_op { + // any reservation older than this time should expire + ceph::coarse_real_time stale_time; + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(1, 1, bl); + encode(stale_time, bl); + ENCODE_FINISH(bl); + } + + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(1, bl); + decode(stale_time, bl); + DECODE_FINISH(bl); + } +}; +WRITE_CLASS_ENCODER(cls_2pc_queue_expire_op) + +struct cls_2pc_queue_reservations_ret { + cls_2pc_reservations reservations; // reservation list (keyed by id) + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(1, 1, bl); + encode(reservations, bl); + ENCODE_FINISH(bl); + } + + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(1, bl); + decode(reservations, bl); + DECODE_FINISH(bl); + } +}; +WRITE_CLASS_ENCODER(cls_2pc_queue_reservations_ret) diff --git a/src/cls/2pc_queue/cls_2pc_queue_types.h b/src/cls/2pc_queue/cls_2pc_queue_types.h new file mode 100644 index 000000000..7c94cdebf --- /dev/null +++ b/src/cls/2pc_queue/cls_2pc_queue_types.h @@ -0,0 +1,62 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab +#pragma once + +#include "include/types.h" + +struct cls_2pc_reservation +{ + using id_t = uint32_t; + inline static const id_t NO_ID{0}; + uint64_t size; // how many entries are reserved + ceph::coarse_real_time timestamp; // when the reservation was done (used for cleaning stale reservations) + + cls_2pc_reservation(uint64_t _size, ceph::coarse_real_time _timestamp) : + size(_size), timestamp(_timestamp) {} + + cls_2pc_reservation() = default; + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(1, 1, bl); + encode(size, bl); + encode(timestamp, bl); + ENCODE_FINISH(bl); + } + + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(1, bl); + decode(size, bl); + decode(timestamp, bl); + DECODE_FINISH(bl); + } +}; +WRITE_CLASS_ENCODER(cls_2pc_reservation) + +using cls_2pc_reservations = ceph::unordered_map<cls_2pc_reservation::id_t, cls_2pc_reservation>; + +struct cls_2pc_urgent_data +{ + uint64_t reserved_size{0}; // pending reservations size in bytes + cls_2pc_reservation::id_t last_id{cls_2pc_reservation::NO_ID}; // last allocated id + cls_2pc_reservations reservations; // reservation list (keyed by id) + bool has_xattrs{false}; + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(1, 1, bl); + encode(reserved_size, bl); + encode(last_id, bl); + encode(reservations, bl); + encode(has_xattrs, bl); + ENCODE_FINISH(bl); + } + + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(1, bl); + decode(reserved_size, bl); + decode(last_id, bl); + decode(reservations, bl); + decode(has_xattrs, bl); + DECODE_FINISH(bl); + } +}; +WRITE_CLASS_ENCODER(cls_2pc_urgent_data) diff --git a/src/cls/CMakeLists.txt b/src/cls/CMakeLists.txt new file mode 100644 index 000000000..7981fd347 --- /dev/null +++ b/src/cls/CMakeLists.txt @@ -0,0 +1,373 @@ +## Rados object classes + +set(cls_dir ${CMAKE_INSTALL_LIBDIR}/rados-classes) + +# cls_sdk +add_library(cls_sdk SHARED sdk/cls_sdk.cc) +set_target_properties(cls_sdk PROPERTIES + VERSION "1.0.0" + SOVERSION "1" + INSTALL_RPATH "" + CXX_VISIBILITY_PRESET hidden) +install(TARGETS cls_sdk DESTINATION ${cls_dir}) + +# cls_hello +set(cls_hello_srcs hello/cls_hello.cc) +add_library(cls_hello SHARED ${cls_hello_srcs}) +set_target_properties(cls_hello PROPERTIES + VERSION "1.0.0" + SOVERSION "1" + INSTALL_RPATH "" + CXX_VISIBILITY_PRESET hidden) +install(TARGETS cls_hello DESTINATION ${cls_dir}) + +# cls_numops +set(cls_numops_srcs numops/cls_numops.cc) +add_library(cls_numops SHARED ${cls_numops_srcs}) +set_target_properties(cls_numops PROPERTIES + VERSION "1.0.0" + SOVERSION "1" + INSTALL_RPATH "" + CXX_VISIBILITY_PRESET hidden) +install(TARGETS cls_numops DESTINATION ${cls_dir}) + +set(cls_numops_client_srcs numops/cls_numops_client.cc) +add_library(cls_numops_client STATIC ${cls_numops_client_srcs}) + + +# cls_rbd +if (WITH_RBD) + set(cls_rbd_srcs rbd/cls_rbd.cc rbd/cls_rbd_types.cc) + add_library(cls_rbd SHARED ${cls_rbd_srcs}) + set_target_properties(cls_rbd PROPERTIES + VERSION "1.0.0" + SOVERSION "1" + INSTALL_RPATH "" + CXX_VISIBILITY_PRESET hidden) + install(TARGETS cls_rbd DESTINATION ${cls_dir}) + + set(cls_rbd_client_srcs rbd/cls_rbd_client.cc rbd/cls_rbd_types.cc) + add_library(cls_rbd_client STATIC ${cls_rbd_client_srcs}) + target_link_libraries(cls_rbd_client cls_lock_client) + +endif (WITH_RBD) + +# cls_lock +set(cls_lock_srcs lock/cls_lock.cc) +add_library(cls_lock SHARED ${cls_lock_srcs}) +set_target_properties(cls_lock PROPERTIES + VERSION "1.0.0" + SOVERSION "1" + INSTALL_RPATH "" + CXX_VISIBILITY_PRESET hidden) +install(TARGETS cls_lock DESTINATION ${cls_dir}) + +set(cls_lock_client_srcs + lock/cls_lock_client.cc + lock/cls_lock_types.cc + lock/cls_lock_ops.cc) +add_library(cls_lock_client STATIC ${cls_lock_client_srcs}) + + +# cls_otp +if (WITH_RADOSGW) + set(cls_otp_srcs otp/cls_otp.cc) + add_library(cls_otp SHARED ${cls_otp_srcs}) + target_link_libraries(cls_otp OATH::OATH) + target_include_directories(cls_otp + PUBLIC "${CMAKE_SOURCE_DIR}/src/rgw/driver/rados" + PUBLIC "${CMAKE_SOURCE_DIR}/src/rgw" + PUBLIC "${CMAKE_SOURCE_DIR}/src/spawn/include") + set_target_properties(cls_otp PROPERTIES + VERSION "1.0.0" + SOVERSION "1" + INSTALL_RPATH "" + CXX_VISIBILITY_PRESET hidden) + install(TARGETS cls_otp DESTINATION ${cls_dir}) + + set(cls_otp_client_srcs + otp/cls_otp_client.cc + otp/cls_otp_types.cc + ) + add_library(cls_otp_client STATIC ${cls_otp_client_srcs}) +endif (WITH_RADOSGW) + +# cls_refcount +set(cls_refcount_srcs + refcount/cls_refcount.cc + refcount/cls_refcount_ops.cc + ${CMAKE_SOURCE_DIR}/src/common/ceph_json.cc) +add_library(cls_refcount SHARED ${cls_refcount_srcs}) +target_link_libraries(cls_refcount json_spirit) +set_target_properties(cls_refcount PROPERTIES + VERSION "1.0.0" + SOVERSION "1" + INSTALL_RPATH "" + CXX_VISIBILITY_PRESET hidden) +install(TARGETS cls_refcount DESTINATION ${cls_dir}) + +set(cls_refcount_client_srcs + refcount/cls_refcount_client.cc + refcount/cls_refcount_ops.cc) +add_library(cls_refcount_client STATIC ${cls_refcount_client_srcs}) + + +# cls_version +set(cls_version_srcs version/cls_version.cc) +add_library(cls_version SHARED ${cls_version_srcs}) +set_target_properties(cls_version PROPERTIES + VERSION "1.0.0" + SOVERSION "1" + INSTALL_RPATH "" + CXX_VISIBILITY_PRESET hidden) +install(TARGETS cls_version DESTINATION ${cls_dir}) + +set(cls_version_client_srcs + version/cls_version_client.cc + version/cls_version_types.cc) +add_library(cls_version_client STATIC ${cls_version_client_srcs}) + + +# cls_log +set(cls_log_srcs log/cls_log.cc) +add_library(cls_log SHARED ${cls_log_srcs}) +set_target_properties(cls_log PROPERTIES + VERSION "1.0.0" + SOVERSION "1" + INSTALL_RPATH "" + CXX_VISIBILITY_PRESET hidden) +install(TARGETS cls_log DESTINATION ${cls_dir}) + +set(cls_log_client_srcs log/cls_log_client.cc) +add_library(cls_log_client STATIC ${cls_log_client_srcs}) + + +# cls_timeindex +set(cls_timeindex_srcs timeindex/cls_timeindex.cc) +add_library(cls_timeindex SHARED ${cls_timeindex_srcs}) +set_target_properties(cls_timeindex PROPERTIES + VERSION "1.0.0" + SOVERSION "1" + INSTALL_RPATH "" + CXX_VISIBILITY_PRESET hidden) +install(TARGETS cls_timeindex DESTINATION ${cls_dir}) + +set(cls_timeindex_client_srcs + timeindex/cls_timeindex_types.cc + timeindex/cls_timeindex_client.cc) +add_library(cls_timeindex_client STATIC ${cls_timeindex_client_srcs}) + + +# cls_user +set(cls_user_srcs user/cls_user.cc) +add_library(cls_user SHARED ${cls_user_srcs}) +set_target_properties(cls_user PROPERTIES + VERSION "1.0.0" + SOVERSION "1" + INSTALL_RPATH "" + CXX_VISIBILITY_PRESET hidden) +install(TARGETS cls_user DESTINATION ${cls_dir}) + +set(cls_user_client_srcs + user/cls_user_client.cc + user/cls_user_types.cc + user/cls_user_ops.cc) +add_library(cls_user_client STATIC ${cls_user_client_srcs}) + + +# cls_journal +set(cls_journal_srcs + journal/cls_journal.cc + journal/cls_journal_types.cc) +add_library(cls_journal SHARED ${cls_journal_srcs}) +set_target_properties(cls_journal PROPERTIES + VERSION "1.0.0" + SOVERSION "1" + INSTALL_RPATH "" + CXX_VISIBILITY_PRESET hidden) +install(TARGETS cls_journal DESTINATION ${cls_dir}) + +set(cls_journal_client_srcs + journal/cls_journal_client.cc + journal/cls_journal_types.cc) +add_library(cls_journal_client STATIC ${cls_journal_client_srcs}) + + +# cls_rgw +if (WITH_RADOSGW) + set(cls_rgw_srcs + rgw/cls_rgw.cc + rgw/cls_rgw_ops.cc + rgw/cls_rgw_types.cc + ${CMAKE_SOURCE_DIR}/src/common/ceph_json.cc) + add_library(cls_rgw SHARED ${cls_rgw_srcs}) + target_link_libraries(cls_rgw fmt json_spirit) + target_include_directories(cls_rgw + PUBLIC "${CMAKE_SOURCE_DIR}/src/rgw/driver/rados" + PUBLIC "${CMAKE_SOURCE_DIR}/src/rgw" + PUBLIC "${CMAKE_SOURCE_DIR}/src/spawn/include") + set_target_properties(cls_rgw PROPERTIES + VERSION "1.0.0" + SOVERSION "1" + INSTALL_RPATH "" + CXX_VISIBILITY_PRESET hidden) + install(TARGETS cls_rgw DESTINATION ${cls_dir}) + + set(cls_rgw_client_srcs + rgw/cls_rgw_client.cc + rgw/cls_rgw_types.cc + rgw/cls_rgw_ops.cc) + add_library(cls_rgw_client STATIC ${cls_rgw_client_srcs}) + target_include_directories(cls_rgw_client + PUBLIC "${CMAKE_SOURCE_DIR}/src/rgw/driver/rados" + PUBLIC "${CMAKE_SOURCE_DIR}/src/rgw" + PUBLIC "${CMAKE_SOURCE_DIR}/src/spawn/include") + +endif (WITH_RADOSGW) + +# cls_cephfs +if (WITH_CEPHFS) + set(cls_cephfs_srcs + cephfs/cls_cephfs.cc) + add_library(cls_cephfs SHARED ${cls_cephfs_srcs}) + set_target_properties(cls_cephfs PROPERTIES + VERSION "1.0.0" + SOVERSION "1" + INSTALL_RPATH "" + CXX_VISIBILITY_PRESET hidden) + install(TARGETS cls_cephfs DESTINATION ${cls_dir}) + + set(cls_cephfs_client_srcs + cephfs/cls_cephfs_client.cc) + add_library(cls_cephfs_client STATIC ${cls_cephfs_client_srcs}) + +endif (WITH_CEPHFS) + +if (NOT WIN32) + # cls_lua + set(cls_lua_srcs + lua/cls_lua.cc + lua/lua_bufferlist.cc) + add_library(cls_lua SHARED ${cls_lua_srcs}) + set_target_properties(cls_lua PROPERTIES + VERSION "1.0.0" + SOVERSION "1" + INSTALL_RPATH "" + CXX_VISIBILITY_PRESET hidden) + install(TARGETS cls_lua DESTINATION ${cls_dir}) + target_link_libraries(cls_lua + ${LUA_LIBRARIES} + json_spirit) + target_include_directories(cls_lua PRIVATE "${LUA_INCLUDE_DIR}") +endif (NOT WIN32) + +set(cls_lua_client_srcs + lua/cls_lua_client.cc) +add_library(cls_lua_client STATIC ${cls_lua_client_srcs}) + +# cls_cas +set(cls_cas_client_srcs + cas/cls_cas_client.cc) +add_library(cls_cas_client STATIC ${cls_cas_client_srcs}) + +set(cls_cas_internal_srcs + cas/cls_cas_internal.cc) +add_library(cls_cas_internal STATIC ${cls_cas_internal_srcs}) + +set(cls_cas_srcs + cas/cls_cas.cc) +add_library(cls_cas SHARED ${cls_cas_srcs}) +target_link_libraries(cls_cas cls_cas_internal) +set_target_properties(cls_cas PROPERTIES + VERSION "1.0.0" + SOVERSION "1" + INSTALL_RPATH "" + CXX_VISIBILITY_PRESET hidden) +install(TARGETS cls_cas DESTINATION ${cls_dir}) + + + +#cls_queue +set(cls_queue_srcs + queue/cls_queue.cc + queue/cls_queue_src.cc + ${CMAKE_SOURCE_DIR}/src/common/ceph_json.cc) +add_library(cls_queue SHARED ${cls_queue_srcs}) +set_target_properties(cls_queue PROPERTIES + VERSION "1.0.0" + SOVERSION "1" + INSTALL_RPATH "" + CXX_VISIBILITY_PRESET hidden) +install(TARGETS cls_queue DESTINATION ${cls_dir}) + +set(cls_queue_client_srcs + queue/cls_queue_client.cc) +add_library(cls_queue_client STATIC ${cls_queue_client_srcs}) + +# cls_rgw_gc +if (WITH_RADOSGW) + set(cls_rgw_gc_srcs + rgw_gc/cls_rgw_gc.cc + queue/cls_queue_src.cc + ${CMAKE_SOURCE_DIR}/src/common/ceph_json.cc) + add_library(cls_rgw_gc SHARED ${cls_rgw_gc_srcs}) + target_include_directories(cls_rgw_gc + PUBLIC "${CMAKE_SOURCE_DIR}/src/rgw/driver/rados" + PUBLIC "${CMAKE_SOURCE_DIR}/src/rgw" + PUBLIC "${CMAKE_SOURCE_DIR}/src/spawn/include") + set_target_properties(cls_rgw_gc PROPERTIES + VERSION "1.0.0" + SOVERSION "1" + INSTALL_RPATH "" + CXX_VISIBILITY_PRESET hidden) + install(TARGETS cls_rgw_gc DESTINATION ${cls_dir}) + + set(cls_rgw_gc_client_srcs + rgw_gc/cls_rgw_gc_client.cc) + add_library(cls_rgw_gc_client STATIC ${cls_rgw_gc_client_srcs}) + target_include_directories(cls_rgw_gc_client + PUBLIC "${CMAKE_SOURCE_DIR}/src/rgw/driver/rados" + PUBLIC "${CMAKE_SOURCE_DIR}/src/rgw" + PUBLIC "${CMAKE_SOURCE_DIR}/src/spawn/include") +endif (WITH_RADOSGW) + + +#cls_2pc_queue +set(cls_2pc_queue_srcs + 2pc_queue/cls_2pc_queue.cc + queue/cls_queue_src.cc + ${CMAKE_SOURCE_DIR}/src/common/ceph_json.cc) +add_library(cls_2pc_queue SHARED ${cls_2pc_queue_srcs}) +set_target_properties(cls_2pc_queue PROPERTIES + VERSION "1.0.0" + SOVERSION "1" + INSTALL_RPATH "" + CXX_VISIBILITY_PRESET hidden) +install(TARGETS cls_2pc_queue DESTINATION ${cls_dir}) +set(cls_2pc_queue_client_srcs + 2pc_queue/cls_2pc_queue_client.cc) +add_library(cls_2pc_queue_client STATIC ${cls_2pc_queue_client_srcs}) + + +add_subdirectory(cmpomap) + +# cls_fifo +set(cls_fifo_srcs fifo/cls_fifo.cc) +add_library(cls_fifo SHARED ${cls_fifo_srcs}) +set_target_properties(cls_fifo PROPERTIES + VERSION "1.0.0" + SOVERSION "1" + INSTALL_RPATH "" + CXX_VISIBILITY_PRESET hidden) +target_link_libraries(cls_fifo fmt) +install(TARGETS cls_fifo DESTINATION ${cls_dir}) + +# cls_test_remote_reads +set(cls_test_remote_reads_srcs test_remote_reads/cls_test_remote_reads.cc) +add_library(cls_test_remote_reads SHARED ${cls_test_remote_reads_srcs}) +set_target_properties(cls_test_remote_reads PROPERTIES + VERSION "1.0.0" + SOVERSION "1" + INSTALL_RPATH "" + CXX_VISIBILITY_PRESET hidden) +install(TARGETS cls_test_remote_reads DESTINATION ${cls_dir}) diff --git a/src/cls/cas/cls_cas.cc b/src/cls/cas/cls_cas.cc new file mode 100644 index 000000000..26aecd894 --- /dev/null +++ b/src/cls/cas/cls_cas.cc @@ -0,0 +1,239 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab + +#include <errno.h> + +#include "objclass/objclass.h" +#include "cls_cas_ops.h" +#include "cls_cas_internal.h" + +#include "include/compat.h" +#include "osd/osd_types.h" + +using ceph::bufferlist; +using ceph::decode; + +CLS_VER(1,0) +CLS_NAME(cas) + + +// +// helpers +// + +static int chunk_read_refcount( + cls_method_context_t hctx, + chunk_refs_t *objr) +{ + bufferlist bl; + objr->clear(); + int ret = cls_cxx_getxattr(hctx, CHUNK_REFCOUNT_ATTR, &bl); + if (ret == -ENODATA) { + return 0; + } + if (ret < 0) + return ret; + + try { + auto iter = bl.cbegin(); + decode(*objr, iter); + } catch (ceph::buffer::error& err) { + CLS_LOG(0, "ERROR: chunk_read_refcount(): failed to decode refcount entry\n"); + return -EIO; + } + + return 0; +} + +static int chunk_set_refcount( + cls_method_context_t hctx, + const struct chunk_refs_t& objr) +{ + bufferlist bl; + + encode(objr, bl); + + int ret = cls_cxx_setxattr(hctx, CHUNK_REFCOUNT_ATTR, &bl); + if (ret < 0) + return ret; + + return 0; +} + + +// +// methods +// + +static int chunk_create_or_get_ref(cls_method_context_t hctx, + bufferlist *in, bufferlist *out) +{ + auto in_iter = in->cbegin(); + + cls_cas_chunk_create_or_get_ref_op op; + try { + decode(op, in_iter); + } catch (ceph::buffer::error& err) { + CLS_LOG(1, "ERROR: failed to decode entry\n"); + return -EINVAL; + } + + chunk_refs_t objr; + int ret = chunk_read_refcount(hctx, &objr); + if (ret == -ENOENT) { + // new chunk; init refs + CLS_LOG(10, "create oid=%s\n", + op.source.oid.name.c_str()); + ret = cls_cxx_write_full(hctx, &op.data); + if (ret < 0) { + return ret; + } + objr.get(op.source); + ret = chunk_set_refcount(hctx, objr); + if (ret < 0) { + return ret; + } + } else if (ret < 0) { + return ret; + } else { + // existing chunk; inc ref + if (op.flags & cls_cas_chunk_create_or_get_ref_op::FLAG_VERIFY) { + bufferlist old; + cls_cxx_read(hctx, 0, 0, &old); + if (!old.contents_equal(op.data)) { + return -ENOMSG; + } + } + CLS_LOG(10, "inc ref oid=%s\n", + op.source.oid.name.c_str()); + + objr.get(op.source); + + ret = chunk_set_refcount(hctx, objr); + if (ret < 0) { + return ret; + } + } + return 0; +} + +static int chunk_get_ref(cls_method_context_t hctx, + bufferlist *in, bufferlist *out) +{ + auto in_iter = in->cbegin(); + + cls_cas_chunk_get_ref_op op; + try { + decode(op, in_iter); + } catch (ceph::buffer::error& err) { + CLS_LOG(1, "ERROR: failed to decode entry\n"); + return -EINVAL; + } + + chunk_refs_t objr; + int ret = chunk_read_refcount(hctx, &objr); + if (ret < 0) { + CLS_LOG(1, "ERROR: failed to read attr\n"); + return ret; + } + + // existing chunk; inc ref + CLS_LOG(10, "oid=%s\n", op.source.oid.name.c_str()); + + objr.get(op.source); + + ret = chunk_set_refcount(hctx, objr); + if (ret < 0) { + return ret; + } + return 0; +} + +static int chunk_put_ref(cls_method_context_t hctx, + bufferlist *in, bufferlist *out) +{ + auto in_iter = in->cbegin(); + + cls_cas_chunk_put_ref_op op; + try { + decode(op, in_iter); + } catch (ceph::buffer::error& err) { + CLS_LOG(1, "ERROR: failed to decode entry\n"); + return -EINVAL; + } + + chunk_refs_t objr; + int ret = chunk_read_refcount(hctx, &objr); + if (ret < 0) + return ret; + + if (!objr.put(op.source)) { + CLS_LOG(10, "oid=%s (no ref)\n", op.source.oid.name.c_str()); + return -ENOLINK; + } + + if (objr.empty()) { + CLS_LOG(10, "oid=%s (last ref)\n", op.source.oid.name.c_str()); + return cls_cxx_remove(hctx); + } + + CLS_LOG(10, "oid=%s (dec)\n", op.source.oid.name.c_str()); + ret = chunk_set_refcount(hctx, objr); + if (ret < 0) + return ret; + + return 0; +} + +static int references_chunk(cls_method_context_t hctx, + bufferlist *in, bufferlist *out) +{ + auto in_iter = in->cbegin(); + std::string fp_oid; + bufferlist indata, outdata; + try { + decode (fp_oid, in_iter); + } + catch (ceph::buffer::error& e) { + return -EINVAL; + } + CLS_LOG(10, "fp_oid: %s \n", fp_oid.c_str()); + + int ret = cls_get_manifest_ref_count(hctx, fp_oid); + if (ret) { + return ret; + } + return -ENOLINK; +} + +CLS_INIT(cas) +{ + CLS_LOG(1, "Loaded cas class!"); + + cls_handle_t h_class; + cls_method_handle_t h_chunk_create_or_get_ref; + cls_method_handle_t h_chunk_get_ref; + cls_method_handle_t h_chunk_put_ref; + cls_method_handle_t h_references_chunk; + + cls_register("cas", &h_class); + + cls_register_cxx_method(h_class, "chunk_create_or_get_ref", + CLS_METHOD_RD | CLS_METHOD_WR, + chunk_create_or_get_ref, + &h_chunk_create_or_get_ref); + cls_register_cxx_method(h_class, "chunk_get_ref", + CLS_METHOD_RD | CLS_METHOD_WR, + chunk_get_ref, + &h_chunk_get_ref); + cls_register_cxx_method(h_class, "chunk_put_ref", + CLS_METHOD_RD | CLS_METHOD_WR, + chunk_put_ref, + &h_chunk_put_ref); + cls_register_cxx_method(h_class, "references_chunk", CLS_METHOD_RD, + references_chunk, + &h_references_chunk); + + return; +} + diff --git a/src/cls/cas/cls_cas_client.cc b/src/cls/cas/cls_cas_client.cc new file mode 100644 index 000000000..085d9e52a --- /dev/null +++ b/src/cls/cas/cls_cas_client.cc @@ -0,0 +1,65 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab + +#include <errno.h> + +#include "cls/cas/cls_cas_client.h" +#include "cls/cas/cls_cas_ops.h" +#include "include/rados/librados.hpp" + +using std::set; +using std::string; + +using ceph::bufferlist; +using ceph::decode; +using ceph::encode; + +void cls_cas_chunk_create_or_get_ref( + librados::ObjectWriteOperation& op, + const hobject_t& soid, + const bufferlist& data, + bool verify) +{ + bufferlist in; + cls_cas_chunk_create_or_get_ref_op call; + call.source = soid; + if (verify) { + call.flags |= cls_cas_chunk_create_or_get_ref_op::FLAG_VERIFY; + } + call.data = data; + encode(call, in); + op.exec("cas", "chunk_create_or_get_ref", in); +} + +void cls_cas_chunk_get_ref( + librados::ObjectWriteOperation& op, + const hobject_t& soid) +{ + bufferlist in; + cls_cas_chunk_get_ref_op call; + call.source = soid; + encode(call, in); + op.exec("cas", "chunk_get_ref", in); +} + +void cls_cas_chunk_put_ref( + librados::ObjectWriteOperation& op, + const hobject_t& soid) +{ + bufferlist in; + cls_cas_chunk_put_ref_op call; + call.source = soid; + encode(call, in); + op.exec("cas", "chunk_put_ref", in); +} + +int cls_cas_references_chunk( + librados::IoCtx& io_ctx, + const string& oid, + const string& chunk_oid) +{ + bufferlist in, out; + encode(chunk_oid, in); + int r = io_ctx.exec(oid, "cas", "references_chunk", in, out); + return r; +} diff --git a/src/cls/cas/cls_cas_client.h b/src/cls/cas/cls_cas_client.h new file mode 100644 index 000000000..0abbf045b --- /dev/null +++ b/src/cls/cas/cls_cas_client.h @@ -0,0 +1,43 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab + +#ifndef CEPH_CLS_CAS_CLIENT_H +#define CEPH_CLS_CAS_CLIENT_H + +#include "include/types.h" +#include "include/rados/librados_fwd.hpp" +#include "common/hobject.h" + +// +// basic methods +// + +/// create a chunk, or get additional reference if it already exists +void cls_cas_chunk_create_or_get_ref( + librados::ObjectWriteOperation& op, + const hobject_t& soid, + const bufferlist& data, + bool verify=false); + +/// get ref on existing chunk +void cls_cas_chunk_get_ref( + librados::ObjectWriteOperation& op, + const hobject_t& soid); + +/// drop reference on existing chunk +void cls_cas_chunk_put_ref( + librados::ObjectWriteOperation& op, + const hobject_t& soid); + + +// +// advanced (used for scrub, repair, etc.) +// + +/// check if a tiered rados object links to a chunk +int cls_cas_references_chunk( + librados::IoCtx& io_ctx, + const std::string& oid, + const std::string& chunk_oid); + +#endif diff --git a/src/cls/cas/cls_cas_internal.cc b/src/cls/cas/cls_cas_internal.cc new file mode 100644 index 000000000..891b1c311 --- /dev/null +++ b/src/cls/cas/cls_cas_internal.cc @@ -0,0 +1,136 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab + +#include "cls_cas_internal.h" + + +chunk_refs_t::chunk_refs_t(const chunk_refs_t& other) +{ + *this = other; +} + +chunk_refs_t& chunk_refs_t::operator=(const chunk_refs_t& other) +{ + // this is inefficient, but easy. + bufferlist bl; + other.encode(bl); + auto p = bl.cbegin(); + decode(p); + return *this; +} + +void chunk_refs_t::clear() +{ + // default to most precise impl + r.reset(new chunk_refs_by_object_t); +} + + +void chunk_refs_t::encode(ceph::buffer::list& bl) const +{ + bufferlist t; + _encode_r(t); + _encode_final(bl, t); +} + +void chunk_refs_t::_encode_r(ceph::bufferlist& bl) const +{ + using ceph::encode; + switch (r->get_type()) { + case TYPE_BY_OBJECT: + encode(*(chunk_refs_by_object_t*)r.get(), bl); + break; + case TYPE_BY_HASH: + encode(*(chunk_refs_by_hash_t*)r.get(), bl); + break; + case TYPE_BY_POOL: + encode(*(chunk_refs_by_pool_t*)r.get(), bl); + break; + case TYPE_COUNT: + encode(*(chunk_refs_count_t*)r.get(), bl); + break; + default: + ceph_abort("unrecognized ref type"); + } +} + +void chunk_refs_t::dynamic_encode(ceph::buffer::list& bl, size_t max) +{ + bufferlist t; + while (true) { + _encode_r(t); + // account for the additional overhead in _encode_final + if (t.length() + 8 <= max) { + break; + } + // downgrade resolution + std::unique_ptr<refs_t> n; + switch (r->get_type()) { + case TYPE_BY_OBJECT: + r.reset(new chunk_refs_by_hash_t( + static_cast<chunk_refs_by_object_t*>(r.get()))); + break; + case TYPE_BY_HASH: + if (!static_cast<chunk_refs_by_hash_t*>(r.get())->shrink()) { + r.reset(new chunk_refs_by_pool_t( + static_cast<chunk_refs_by_hash_t*>(r.get()))); + } + break; + case TYPE_BY_POOL: + r.reset(new chunk_refs_count_t(r.get())); + break; + } + t.clear(); + } + _encode_final(bl, t); +} + +void chunk_refs_t::_encode_final(bufferlist& bl, bufferlist& t) const +{ + ENCODE_START(1, 1, bl); + encode(r->get_type(), bl); + bl.claim_append(t); + ENCODE_FINISH(bl); +} + +void chunk_refs_t::decode(ceph::buffer::list::const_iterator& p) +{ + DECODE_START(1, p); + uint8_t t; + decode(t, p); + switch (t) { + case TYPE_BY_OBJECT: + { + auto n = new chunk_refs_by_object_t(); + decode(*n, p); + r.reset(n); + } + break; + case TYPE_BY_HASH: + { + auto n = new chunk_refs_by_hash_t(); + decode(*n, p); + r.reset(n); + } + break; + case TYPE_BY_POOL: + { + auto n = new chunk_refs_by_pool_t(); + decode(*n, p); + r.reset(n); + } + break; + case TYPE_COUNT: + { + auto n = new chunk_refs_count_t(); + decode(*n, p); + r.reset(n); + } + break; + default: + throw ceph::buffer::malformed_input( + std::string("unrecognized chunk ref encoding type ") + + stringify((int)t)); + } + DECODE_FINISH(p); +} diff --git a/src/cls/cas/cls_cas_internal.h b/src/cls/cas/cls_cas_internal.h new file mode 100644 index 000000000..09e7f9f1f --- /dev/null +++ b/src/cls/cas/cls_cas_internal.h @@ -0,0 +1,391 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab + +#pragma once + +#include <string> + +#include "boost/variant.hpp" + +#include "include/stringify.h" +#include "common/Formatter.h" +#include "common/hobject.h" + +#define CHUNK_REFCOUNT_ATTR "chunk_refs" + + +// public type + +struct chunk_refs_t { + enum { + TYPE_BY_OBJECT = 1, + TYPE_BY_HASH = 2, + TYPE_BY_PARTIAL = 3, + TYPE_BY_POOL = 4, + TYPE_COUNT = 5, + }; + static const char *type_name(int t) { + switch (t) { + case TYPE_BY_OBJECT: return "by_object"; + case TYPE_BY_HASH: return "by_hash"; + case TYPE_BY_POOL: return "by_pool"; + case TYPE_COUNT: return "count"; + default: return "???"; + } + } + + struct refs_t { + virtual ~refs_t() {} + virtual uint8_t get_type() const = 0; + virtual bool empty() const = 0; + virtual uint64_t count() const = 0; + virtual void get(const hobject_t& o) = 0; + virtual bool put(const hobject_t& o) = 0; + virtual void dump(Formatter *f) const = 0; + virtual std::string describe_encoding() const { + return type_name(get_type()); + } + }; + + std::unique_ptr<refs_t> r; + + chunk_refs_t() { + clear(); + } + chunk_refs_t(const chunk_refs_t& other); + + chunk_refs_t& operator=(const chunk_refs_t&); + + void clear(); + + int get_type() const { + return r->get_type(); + } + std::string describe_encoding() const { + return r->describe_encoding(); + } + + bool empty() const { + return r->empty(); + } + uint64_t count() const { + return r->count(); + } + + void get(const hobject_t& o) { + r->get(o); + } + bool put(const hobject_t& o) { + bool ret = r->put(o); + if (r->get_type() != TYPE_BY_OBJECT && + r->count() == 0) { + clear(); // reset to full resolution, yay + } + return ret; + } + + void _encode_r(bufferlist& bl) const; + void _encode_final(bufferlist& bl, bufferlist& t) const; + void dynamic_encode(ceph::buffer::list& bl, size_t max); + void encode(ceph::buffer::list& bl) const; + void decode(ceph::buffer::list::const_iterator& p); + + void dump(Formatter *f) const { + r->dump(f); + } + static void generate_test_instances(std::list<chunk_refs_t*>& ls) { + ls.push_back(new chunk_refs_t()); + } +}; +WRITE_CLASS_ENCODER(chunk_refs_t) + + +// encoding specific types +// these are internal and should generally not be used directly + +struct chunk_refs_by_object_t : public chunk_refs_t::refs_t { + std::multiset<hobject_t> by_object; + + uint8_t get_type() const { + return chunk_refs_t::TYPE_BY_OBJECT; + } + bool empty() const override { + return by_object.empty(); + } + uint64_t count() const override { + return by_object.size(); + } + void get(const hobject_t& o) override { + by_object.insert(o); + } + bool put(const hobject_t& o) override { + auto p = by_object.find(o); + if (p == by_object.end()) { + return false; + } + by_object.erase(p); + return true; + } + void encode(bufferlist& bl) const { + ENCODE_START(1, 1, bl); + encode(by_object, bl); + ENCODE_FINISH(bl); + } + void decode(bufferlist::const_iterator& p) { + DECODE_START(1, p); + decode(by_object, p); + DECODE_FINISH(p); + } + void dump(Formatter *f) const override { + f->dump_string("type", "by_object"); + f->dump_unsigned("count", by_object.size()); + f->open_array_section("refs"); + for (auto& i : by_object) { + f->dump_object("ref", i); + } + f->close_section(); + } +}; +WRITE_CLASS_ENCODER(chunk_refs_by_object_t) + +struct chunk_refs_by_hash_t : public chunk_refs_t::refs_t { + uint64_t total = 0; + uint32_t hash_bits = 32; ///< how many bits of mask to encode + std::map<std::pair<int64_t,uint32_t>,uint64_t> by_hash; + + chunk_refs_by_hash_t() {} + chunk_refs_by_hash_t(const chunk_refs_by_object_t *o) { + total = o->count(); + for (auto& i : o->by_object) { + by_hash[std::make_pair(i.pool, i.get_hash())]++; + } + } + + std::string describe_encoding() const { + using namespace std::literals; + return "by_hash("s + stringify(hash_bits) + " bits)"; + } + + uint32_t mask() { + // with the hobject_t reverse-bitwise sort, the least significant + // hash values are actually the most significant, so preserve them + // as we lose resolution. + return 0xffffffff >> (32 - hash_bits); + } + + bool shrink() { + if (hash_bits <= 1) { + return false; + } + hash_bits--; + std::map<std::pair<int64_t,uint32_t>,uint64_t> old; + old.swap(by_hash); + auto m = mask(); + for (auto& i : old) { + by_hash[std::make_pair(i.first.first, i.first.second & m)] = i.second; + } + return true; + } + + uint8_t get_type() const { + return chunk_refs_t::TYPE_BY_HASH; + } + bool empty() const override { + return by_hash.empty(); + } + uint64_t count() const override { + return total; + } + void get(const hobject_t& o) override { + by_hash[std::make_pair(o.pool, o.get_hash() & mask())]++; + ++total; + } + bool put(const hobject_t& o) override { + auto p = by_hash.find(std::make_pair(o.pool, o.get_hash() & mask())); + if (p == by_hash.end()) { + return false; + } + if (--p->second == 0) { + by_hash.erase(p); + } + --total; + return true; + } + DENC_HELPERS + void bound_encode(size_t& p) const { + p += 6 + sizeof(uint64_t) + by_hash.size() * (10 + 10); + } + void encode(::ceph::buffer::list::contiguous_appender& p) const { + DENC_START(1, 1, p); + denc_varint(total, p); + denc_varint(hash_bits, p); + denc_varint(by_hash.size(), p); + int hash_bytes = (hash_bits + 7) / 8; + for (auto& i : by_hash) { + denc_signed_varint(i.first.first, p); + // this may write some bytes past where we move cursor too; harmless! + *(ceph_le32*)p.get_pos_add(hash_bytes) = i.first.second; + denc_varint(i.second, p); + } + DENC_FINISH(p); + } + void decode(::ceph::buffer::ptr::const_iterator& p) { + DENC_START(1, 1, p); + denc_varint(total, p); + denc_varint(hash_bits, p); + uint64_t n; + denc_varint(n, p); + int hash_bytes = (hash_bits + 7) / 8; + while (n--) { + int64_t poolid; + ceph_le32 hash; + uint64_t count; + denc_signed_varint(poolid, p); + memcpy(&hash, p.get_pos_add(hash_bytes), hash_bytes); + denc_varint(count, p); + by_hash[std::make_pair(poolid, (uint32_t)hash)] = count; + } + DENC_FINISH(p); + } + void dump(Formatter *f) const override { + f->dump_string("type", "by_hash"); + f->dump_unsigned("count", total); + f->dump_unsigned("hash_bits", hash_bits); + f->open_array_section("refs"); + for (auto& i : by_hash) { + f->open_object_section("hash"); + f->dump_int("pool", i.first.first); + f->dump_unsigned("hash", i.first.second); + f->dump_unsigned("count", i.second); + f->close_section(); + } + f->close_section(); + } +}; +WRITE_CLASS_DENC(chunk_refs_by_hash_t) + +struct chunk_refs_by_pool_t : public chunk_refs_t::refs_t { + uint64_t total = 0; + std::map<int64_t,uint64_t> by_pool; + + chunk_refs_by_pool_t() {} + chunk_refs_by_pool_t(const chunk_refs_by_hash_t *o) { + total = o->count(); + for (auto& i : o->by_hash) { + by_pool[i.first.first] += i.second; + } + } + + uint8_t get_type() const { + return chunk_refs_t::TYPE_BY_POOL; + } + bool empty() const override { + return by_pool.empty(); + } + uint64_t count() const override { + return total; + } + void get(const hobject_t& o) override { + ++by_pool[o.pool]; + ++total; + } + bool put(const hobject_t& o) override { + auto p = by_pool.find(o.pool); + if (p == by_pool.end()) { + return false; + } + --p->second; + if (p->second == 0) { + by_pool.erase(p); + } + --total; + return true; + } + void bound_encode(size_t& p) const { + p += 6 + sizeof(uint64_t) + by_pool.size() * (9 + 9); + } + DENC_HELPERS + void encode(::ceph::buffer::list::contiguous_appender& p) const { + DENC_START(1, 1, p); + denc_varint(total, p); + denc_varint(by_pool.size(), p); + for (auto& i : by_pool) { + denc_signed_varint(i.first, p); + denc_varint(i.second, p); + } + DENC_FINISH(p); + } + void decode(::ceph::buffer::ptr::const_iterator& p) { + DENC_START(1, 1, p); + denc_varint(total, p); + uint64_t n; + denc_varint(n, p); + while (n--) { + int64_t poolid; + uint64_t count; + denc_signed_varint(poolid, p); + denc_varint(count, p); + by_pool[poolid] = count; + } + DENC_FINISH(p); + } + void dump(Formatter *f) const override { + f->dump_string("type", "by_pool"); + f->dump_unsigned("count", total); + f->open_array_section("pools"); + for (auto& i : by_pool) { + f->open_object_section("pool"); + f->dump_unsigned("pool_id", i.first); + f->dump_unsigned("count", i.second); + f->close_section(); + } + f->close_section(); + } +}; +WRITE_CLASS_DENC(chunk_refs_by_pool_t) + + +struct chunk_refs_count_t : public chunk_refs_t::refs_t { + uint64_t total = 0; + + chunk_refs_count_t() {} + chunk_refs_count_t(const refs_t *old) { + total = old->count(); + } + + uint8_t get_type() const { + return chunk_refs_t::TYPE_COUNT; + } + bool empty() const override { + return total == 0; + } + uint64_t count() const override { + return total; + } + void get(const hobject_t& o) override { + ++total; + } + bool put(const hobject_t& o) override { + if (!total) { + return false; + } + --total; + return true; + } + void encode(bufferlist& bl) const { + ENCODE_START(1, 1, bl); + encode(total, bl); + ENCODE_FINISH(bl); + } + void decode(bufferlist::const_iterator& p) { + DECODE_START(1, p); + decode(total, p); + DECODE_FINISH(p); + } + void dump(Formatter *f) const override { + f->dump_string("type", "count"); + f->dump_unsigned("count", total); + } +}; +WRITE_CLASS_ENCODER(chunk_refs_count_t) + diff --git a/src/cls/cas/cls_cas_ops.h b/src/cls/cas/cls_cas_ops.h new file mode 100644 index 000000000..a79013f0e --- /dev/null +++ b/src/cls/cas/cls_cas_ops.h @@ -0,0 +1,101 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab + +#ifndef CEPH_CLS_CAS_OPS_H +#define CEPH_CLS_CAS_OPS_H + +#include "include/types.h" +#include "common/hobject.h" +#include "common/Formatter.h" + +struct cls_cas_chunk_create_or_get_ref_op { + enum { + FLAG_VERIFY = 1, // verify content bit-for-bit if chunk already exists + }; + + hobject_t source; + uint64_t flags = 0; + bufferlist data; + + cls_cas_chunk_create_or_get_ref_op() {} + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(1, 1, bl); + encode(source, bl); + encode(flags, bl); + encode(data, bl); + ENCODE_FINISH(bl); + } + + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(1, bl); + decode(source, bl); + decode(flags, bl); + decode(data, bl); + DECODE_FINISH(bl); + } + void dump(ceph::Formatter *f) const { + f->dump_object("source", source); + f->dump_unsigned("flags", flags); + f->dump_unsigned("data_len", data.length()); + } + static void generate_test_instances(std::list<cls_cas_chunk_create_or_get_ref_op*>& ls) { + ls.push_back(new cls_cas_chunk_create_or_get_ref_op()); + } +}; +WRITE_CLASS_ENCODER(cls_cas_chunk_create_or_get_ref_op) + + +struct cls_cas_chunk_get_ref_op { + hobject_t source; + + cls_cas_chunk_get_ref_op() {} + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(1, 1, bl); + encode(source, bl); + ENCODE_FINISH(bl); + } + + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(1, bl); + decode(source, bl); + DECODE_FINISH(bl); + } + void dump(ceph::Formatter *f) const { + f->dump_object("source", source); + } + static void generate_test_instances(std::list<cls_cas_chunk_get_ref_op*>& ls) { + ls.push_back(new cls_cas_chunk_get_ref_op()); + } +}; +WRITE_CLASS_ENCODER(cls_cas_chunk_get_ref_op) + + +struct cls_cas_chunk_put_ref_op { + hobject_t source; + + cls_cas_chunk_put_ref_op() {} + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(1, 1, bl); + encode(source, bl); + ENCODE_FINISH(bl); + } + + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(1, bl); + decode(source, bl); + DECODE_FINISH(bl); + } + + void dump(ceph::Formatter *f) const { + f->dump_object("source", source); + } + static void generate_test_instances(std::list<cls_cas_chunk_put_ref_op*>& ls) { + ls.push_back(new cls_cas_chunk_put_ref_op()); + } +}; +WRITE_CLASS_ENCODER(cls_cas_chunk_put_ref_op) + +#endif diff --git a/src/cls/cephfs/cls_cephfs.cc b/src/cls/cephfs/cls_cephfs.cc new file mode 100644 index 000000000..dcd07b4bb --- /dev/null +++ b/src/cls/cephfs/cls_cephfs.cc @@ -0,0 +1,214 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab +/* + * Ceph - scalable distributed file system + * + * Copyright (C) 2015 Red Hat + * + * This is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License version 2.1, as published by the Free Software + * Foundation. See file COPYING. + * + */ + + +#include <string> +#include <errno.h> + +#include "objclass/objclass.h" +#include "osd/osd_types.h" + +#include "cls_cephfs.h" + +CLS_VER(1,0) +CLS_NAME(cephfs) + +using ceph::bufferlist; +using ceph::decode; +using ceph::encode; + +std::ostream &operator<<(std::ostream &out, const ObjCeiling &in) +{ + out << "id: " << in.id << " size: " << in.size; + return out; +} + + +/** + * Set a named xattr to a given value, if and only if the xattr + * is not already set to a greater value. + * + * If the xattr is missing, then it is set to the input integer. + * + * @param xattr_name: name of xattr to compare against and set + * @param input_val: candidate new value, of encode()'able type + * @returns 0 on success (irrespective of whether our new value + * was used) else an error code + */ +template <typename A> +static int set_if_greater(cls_method_context_t hctx, + const std::string &xattr_name, const A input_val) +{ + bufferlist existing_val_bl; + + bool set_val = false; + int r = cls_cxx_getxattr(hctx, xattr_name.c_str(), &existing_val_bl); + if (r == -ENOENT || existing_val_bl.length() == 0) { + set_val = true; + } else if (r >= 0) { + auto existing_p = existing_val_bl.cbegin(); + try { + A existing_val; + decode(existing_val, existing_p); + if (!existing_p.end()) { + // Trailing junk? Consider it invalid and overwrite + set_val = true; + } else { + // Valid existing value, do comparison + set_val = input_val > existing_val; + } + } catch (const ceph::buffer::error &err) { + // Corrupt or empty existing value, overwrite it + set_val = true; + } + } else { + return r; + } + + // Conditionally set the new xattr + if (set_val) { + bufferlist set_bl; + encode(input_val, set_bl); + return cls_cxx_setxattr(hctx, xattr_name.c_str(), &set_bl); + } else { + return 0; + } +} + +static int accumulate_inode_metadata(cls_method_context_t hctx, + bufferlist *in, bufferlist *out) +{ + ceph_assert(in != NULL); + ceph_assert(out != NULL); + + int r = 0; + + // Decode `in` + auto q = in->cbegin(); + AccumulateArgs args; + try { + args.decode(q); + } catch (const ceph::buffer::error &err) { + return -EINVAL; + } + + ObjCeiling ceiling(args.obj_index, args.obj_size); + r = set_if_greater(hctx, args.obj_xattr_name, ceiling); + if (r < 0) { + return r; + } + + r = set_if_greater(hctx, args.mtime_xattr_name, args.mtime); + if (r < 0) { + return r; + } + + r = set_if_greater(hctx, args.obj_size_xattr_name, args.obj_size); + if (r < 0) { + return r; + } + + return 0; +} + +// I want to select objects that have a name ending 00000000 +// and an xattr (scrub_tag) not equal to a specific value. +// This is so special case that we can't really pretend it's +// generic, so just fess up and call this the cephfs filter. +class PGLSCephFSFilter : public PGLSFilter { +protected: + std::string scrub_tag; +public: + int init(bufferlist::const_iterator& params) override { + try { + InodeTagFilterArgs args; + args.decode(params); + scrub_tag = args.scrub_tag; + } catch (ceph::buffer::error &e) { + return -EINVAL; + } + + if (scrub_tag.empty()) { + xattr = ""; + } else { + xattr = "_scrub_tag"; + } + + return 0; + } + + ~PGLSCephFSFilter() override {} + bool reject_empty_xattr() const override { return false; } + bool filter(const hobject_t& obj, + const bufferlist& xattr_data) const override; +}; + +bool PGLSCephFSFilter::filter(const hobject_t &obj, + const bufferlist& xattr_data) const +{ + const std::string need_ending = ".00000000"; + const std::string &obj_name = obj.oid.name; + + if (obj_name.length() < need_ending.length()) { + return false; + } + + const bool match = obj_name.compare (obj_name.length() - need_ending.length(), need_ending.length(), need_ending) == 0; + if (!match) { + return false; + } + + if (!scrub_tag.empty() && xattr_data.length() > 0) { + std::string tag_ondisk; + auto q = xattr_data.cbegin(); + try { + decode(tag_ondisk, q); + if (tag_ondisk == scrub_tag) + return false; + } catch (const ceph::buffer::error &err) { + } + } + + return true; +} + +PGLSFilter *inode_tag_filter() +{ + return new PGLSCephFSFilter(); +} + +/** + * initialize class + * + * We do two things here: we register the new class, and then register + * all of the class's methods. + */ +CLS_INIT(cephfs) +{ + // this log message, at level 0, will always appear in the ceph-osd + // log file. + CLS_LOG(0, "loading cephfs"); + + cls_handle_t h_class; + cls_method_handle_t h_accumulate_inode_metadata; + + cls_register("cephfs", &h_class); + cls_register_cxx_method(h_class, "accumulate_inode_metadata", + CLS_METHOD_WR | CLS_METHOD_RD, + accumulate_inode_metadata, &h_accumulate_inode_metadata); + + // A PGLS filter + cls_register_cxx_filter(h_class, "inode_tag", inode_tag_filter); +} + diff --git a/src/cls/cephfs/cls_cephfs.h b/src/cls/cephfs/cls_cephfs.h new file mode 100644 index 000000000..8a595ae1f --- /dev/null +++ b/src/cls/cephfs/cls_cephfs.h @@ -0,0 +1,150 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab +/* + * Ceph - scalable distributed file system + * + * Copyright (C) 2015 Red Hat + * + * This is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License version 2.1, as published by the Free Software + * Foundation. See file COPYING. + * + */ + +#include "include/encoding.h" + +/** + * Value class for the xattr we'll use to accumulate + * the highest object seen for a given inode + */ +class ObjCeiling { + public: + uint64_t id; + uint64_t size; + + ObjCeiling() + : id(0), size(0) + {} + + ObjCeiling(uint64_t id_, uint64_t size_) + : id(id_), size(size_) + {} + + bool operator >(ObjCeiling const &rhs) const + { + return id > rhs.id; + } + + void encode(ceph::buffer::list &bl) const + { + ENCODE_START(1, 1, bl); + encode(id, bl); + encode(size, bl); + ENCODE_FINISH(bl); + } + + void decode(ceph::buffer::list::const_iterator &p) + { + DECODE_START(1, p); + decode(id, p); + decode(size, p); + DECODE_FINISH(p); + } +}; +WRITE_CLASS_ENCODER(ObjCeiling) + +class AccumulateArgs +{ +public: + uint64_t obj_index; + uint64_t obj_size; + int64_t mtime; + std::string obj_xattr_name; + std::string mtime_xattr_name; + std::string obj_size_xattr_name; + + AccumulateArgs( + uint64_t obj_index_, + uint64_t obj_size_, + time_t mtime_, + const std::string &obj_xattr_name_, + const std::string &mtime_xattr_name_, + const std::string &obj_size_xattr_name_) + : obj_index(obj_index_), + obj_size(obj_size_), + mtime(mtime_), + obj_xattr_name(obj_xattr_name_), + mtime_xattr_name(mtime_xattr_name_), + obj_size_xattr_name(obj_size_xattr_name_) + {} + + AccumulateArgs() + : obj_index(0), obj_size(0), mtime(0) + {} + + void encode(ceph::buffer::list &bl) const + { + ENCODE_START(1, 1, bl); + encode(obj_xattr_name, bl); + encode(mtime_xattr_name, bl); + encode(obj_size_xattr_name, bl); + encode(obj_index, bl); + encode(obj_size, bl); + encode(mtime, bl); + ENCODE_FINISH(bl); + } + + void decode(ceph::buffer::list::const_iterator &bl) + { + DECODE_START(1, bl); + decode(obj_xattr_name, bl); + decode(mtime_xattr_name, bl); + decode(obj_size_xattr_name, bl); + decode(obj_index, bl); + decode(obj_size, bl); + decode(mtime, bl); + DECODE_FINISH(bl); + } +}; + +class InodeTagFilterArgs +{ + public: + std::string scrub_tag; + + void encode(ceph::buffer::list &bl) const + { + ENCODE_START(1, 1, bl); + encode(scrub_tag, bl); + ENCODE_FINISH(bl); + } + + void decode(ceph::buffer::list::const_iterator &bl) + { + DECODE_START(1, bl); + decode(scrub_tag, bl); + DECODE_FINISH(bl); + } +}; + +class AccumulateResult +{ +public: + // Index of the highest-indexed object seen + uint64_t ceiling_obj_index; + // Size of the highest-index object seen + uint64_t ceiling_obj_size; + // Largest object seen + uint64_t max_obj_size; + // Non-default object pool id seen + int64_t obj_pool_id; + // Highest mtime seen + int64_t max_mtime; + + AccumulateResult() + : ceiling_obj_index(0), ceiling_obj_size(0), max_obj_size(0), + obj_pool_id(-1), max_mtime(0) + {} +}; + diff --git a/src/cls/cephfs/cls_cephfs_client.cc b/src/cls/cephfs/cls_cephfs_client.cc new file mode 100644 index 000000000..00c57cc48 --- /dev/null +++ b/src/cls/cephfs/cls_cephfs_client.cc @@ -0,0 +1,221 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab +/* + * Ceph - scalable distributed file system + * + * Copyright (C) 2015 Red Hat + * + * This is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License version 2.1, as published by the Free Software + * Foundation. See file COPYING. + * + */ + + + +#include "include/rados/librados.hpp" +#include "mds/CInode.h" + +#include "cls_cephfs_client.h" + +using ceph::bufferlist; +using ceph::decode; + +#define XATTR_CEILING "scan_ceiling" +#define XATTR_MAX_MTIME "scan_max_mtime" +#define XATTR_MAX_SIZE "scan_max_size" +#define XATTR_POOL_ID "scan_pool_id" + +int ClsCephFSClient::accumulate_inode_metadata( + librados::IoCtx &ctx, + inodeno_t inode_no, + const uint64_t obj_index, + const uint64_t obj_size, + const int64_t obj_pool_id, + const time_t mtime) +{ + AccumulateArgs args( + obj_index, + obj_size, + mtime, + XATTR_CEILING, + XATTR_MAX_MTIME, + XATTR_MAX_SIZE); + + // Generate 0th object name, where we will accumulate sizes/mtimes + object_t zeroth_object = InodeStore::get_object_name(inode_no, frag_t(), ""); + + // Construct a librados operation invoking our class method + librados::ObjectWriteOperation op; + bufferlist inbl; + args.encode(inbl); + op.exec("cephfs", "accumulate_inode_metadata", inbl); + + if (obj_pool_id != -1) { + bufferlist bl; + encode(obj_pool_id, bl); + op.setxattr(XATTR_POOL_ID, bl); + } + + // Execute op + return ctx.operate(zeroth_object.name, &op); +} + +int ClsCephFSClient::delete_inode_accumulate_result( + librados::IoCtx &ctx, + const std::string &oid) +{ + librados::ObjectWriteOperation op; + + // Remove xattrs from object + // + op.rmxattr(XATTR_CEILING); + op.rmxattr(XATTR_MAX_SIZE); + op.rmxattr(XATTR_MAX_MTIME); + op.rmxattr(XATTR_POOL_ID); + op.set_op_flags2(librados::OP_FAILOK); + + return (ctx.operate(oid, &op)); +} + +int ClsCephFSClient::fetch_inode_accumulate_result( + librados::IoCtx &ctx, + const std::string &oid, + inode_backtrace_t *backtrace, + file_layout_t *layout, + std::string *symlink, + AccumulateResult *result) +{ + ceph_assert(backtrace != NULL); + ceph_assert(result != NULL); + + librados::ObjectReadOperation op; + + int scan_ceiling_r = 0; + bufferlist scan_ceiling_bl; + op.getxattr(XATTR_CEILING, &scan_ceiling_bl, &scan_ceiling_r); + + int scan_max_size_r = 0; + bufferlist scan_max_size_bl; + op.getxattr(XATTR_MAX_SIZE, &scan_max_size_bl, &scan_max_size_r); + + int scan_max_mtime_r = 0; + bufferlist scan_max_mtime_bl; + op.getxattr(XATTR_MAX_MTIME, &scan_max_mtime_bl, &scan_max_mtime_r); + + int scan_pool_id_r = 0; + bufferlist scan_pool_id_bl; + op.getxattr(XATTR_POOL_ID, &scan_pool_id_bl, &scan_pool_id_r); + op.set_op_flags2(librados::OP_FAILOK); + + int parent_r = 0; + bufferlist parent_bl; + op.getxattr("parent", &parent_bl, &parent_r); + op.set_op_flags2(librados::OP_FAILOK); + + int layout_r = 0; + bufferlist layout_bl; + op.getxattr("layout", &layout_bl, &layout_r); + op.set_op_flags2(librados::OP_FAILOK); + + int symlink_r = 0; + bufferlist symlink_bl; + op.getxattr("symlink", &symlink_bl, &symlink_r); + op.set_op_flags2(librados::OP_FAILOK); + + bufferlist op_bl; + int r = ctx.operate(oid, &op, &op_bl); + if (r < 0) { + return r; + } + + // Load scan_ceiling + try { + auto scan_ceiling_bl_iter = scan_ceiling_bl.cbegin(); + ObjCeiling ceiling; + ceiling.decode(scan_ceiling_bl_iter); + result->ceiling_obj_index = ceiling.id; + result->ceiling_obj_size = ceiling.size; + } catch (const ceph::buffer::error &err) { + //dout(4) << "Invalid ceiling attr on '" << oid << "'" << dendl; + return -EINVAL; + } + + // Load scan_max_size + try { + auto scan_max_size_bl_iter = scan_max_size_bl.cbegin(); + decode(result->max_obj_size, scan_max_size_bl_iter); + } catch (const ceph::buffer::error &err) { + //dout(4) << "Invalid size attr on '" << oid << "'" << dendl; + return -EINVAL; + } + + // Load scan_pool_id + if (scan_pool_id_bl.length()) { + try { + auto scan_pool_id_bl_iter = scan_pool_id_bl.cbegin(); + decode(result->obj_pool_id, scan_pool_id_bl_iter); + } catch (const ceph::buffer::error &err) { + //dout(4) << "Invalid pool_id attr on '" << oid << "'" << dendl; + return -EINVAL; + } + } + + // Load scan_max_mtime + try { + auto scan_max_mtime_bl_iter = scan_max_mtime_bl.cbegin(); + decode(result->max_mtime, scan_max_mtime_bl_iter); + } catch (const ceph::buffer::error &err) { + //dout(4) << "Invalid mtime attr on '" << oid << "'" << dendl; + return -EINVAL; + } + + // Deserialize backtrace + if (parent_bl.length()) { + try { + auto q = parent_bl.cbegin(); + backtrace->decode(q); + } catch (ceph::buffer::error &e) { + //dout(4) << "Corrupt backtrace on '" << oid << "': " << e << dendl; + return -EINVAL; + } + } + + // Deserialize layout + if (layout_bl.length()) { + try { + auto q = layout_bl.cbegin(); + decode(*layout, q); + } catch (ceph::buffer::error &e) { + return -EINVAL; + } + } + + // Deserialize symlink + if (symlink_bl.length()) { + try { + auto q = symlink_bl.cbegin(); + decode(*symlink, q); + } catch (ceph::buffer::error &e) { + return -EINVAL; + } + } + + return 0; +} + +void ClsCephFSClient::build_tag_filter( + const std::string &scrub_tag, + bufferlist *out_bl) +{ + ceph_assert(out_bl != NULL); + + // Leading part of bl is un-versioned string naming the filter + encode(std::string("cephfs.inode_tag"), *out_bl); + + // Filter-specific part of the bl: in our case this is a versioned structure + InodeTagFilterArgs args; + args.scrub_tag = scrub_tag; + args.encode(*out_bl); +} diff --git a/src/cls/cephfs/cls_cephfs_client.h b/src/cls/cephfs/cls_cephfs_client.h new file mode 100644 index 000000000..5c33f77f2 --- /dev/null +++ b/src/cls/cephfs/cls_cephfs_client.h @@ -0,0 +1,36 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab + +#include "include/rados/librados_fwd.hpp" +#include "mds/mdstypes.h" +#include "cls_cephfs.h" + +class AccumulateArgs; + +class ClsCephFSClient +{ + public: + static int accumulate_inode_metadata( + librados::IoCtx &ctx, + inodeno_t inode_no, + const uint64_t obj_index, + const uint64_t obj_size, + const int64_t obj_pool_id, + const time_t mtime); + + static int fetch_inode_accumulate_result( + librados::IoCtx &ctx, + const std::string &oid, + inode_backtrace_t *backtrace, + file_layout_t *layout, + std::string *symlink, + AccumulateResult *result); + + static int delete_inode_accumulate_result( + librados::IoCtx &ctx, + const std::string &oid); + + static void build_tag_filter( + const std::string &scrub_tag, + ceph::buffer::list *out_bl); +}; diff --git a/src/cls/cmpomap/CMakeLists.txt b/src/cls/cmpomap/CMakeLists.txt new file mode 100644 index 000000000..de8ca278c --- /dev/null +++ b/src/cls/cmpomap/CMakeLists.txt @@ -0,0 +1,9 @@ +add_library(cls_cmpomap SHARED server.cc) +set_target_properties(cls_cmpomap PROPERTIES + VERSION "1.0.0" + SOVERSION "1" + INSTALL_RPATH "" + CXX_VISIBILITY_PRESET hidden) +install(TARGETS cls_cmpomap DESTINATION ${cls_dir}) + +add_library(cls_cmpomap_client STATIC client.cc) diff --git a/src/cls/cmpomap/client.cc b/src/cls/cmpomap/client.cc new file mode 100644 index 000000000..e0fbbff18 --- /dev/null +++ b/src/cls/cmpomap/client.cc @@ -0,0 +1,76 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab ft=cpp + +/* + * Ceph - scalable distributed file system + * + * Copyright (C) 2020 Red Hat, Inc + * + * This is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License version 2.1, as published by the Free Software + * Foundation. See file COPYING. + */ + +#include "include/rados/librados.hpp" +#include "client.h" +#include "ops.h" + +namespace cls::cmpomap { + +int cmp_vals(librados::ObjectReadOperation& op, + Mode mode, Op comparison, ComparisonMap values, + std::optional<ceph::bufferlist> default_value) +{ + if (values.size() > max_keys) { + return -E2BIG; + } + cmp_vals_op call; + call.mode = mode; + call.comparison = comparison; + call.values = std::move(values); + call.default_value = std::move(default_value); + + bufferlist in; + encode(call, in); + op.exec("cmpomap", "cmp_vals", in); + return 0; +} + +int cmp_set_vals(librados::ObjectWriteOperation& op, + Mode mode, Op comparison, ComparisonMap values, + std::optional<ceph::bufferlist> default_value) +{ + if (values.size() > max_keys) { + return -E2BIG; + } + cmp_set_vals_op call; + call.mode = mode; + call.comparison = comparison; + call.values = std::move(values); + call.default_value = std::move(default_value); + + bufferlist in; + encode(call, in); + op.exec("cmpomap", "cmp_set_vals", in); + return 0; +} + +int cmp_rm_keys(librados::ObjectWriteOperation& op, + Mode mode, Op comparison, ComparisonMap values) +{ + if (values.size() > max_keys) { + return -E2BIG; + } + cmp_rm_keys_op call; + call.mode = mode; + call.comparison = comparison; + call.values = std::move(values); + + bufferlist in; + encode(call, in); + op.exec("cmpomap", "cmp_rm_keys", in); + return 0; +} + +} // namespace cls::cmpomap diff --git a/src/cls/cmpomap/client.h b/src/cls/cmpomap/client.h new file mode 100644 index 000000000..013d85cc7 --- /dev/null +++ b/src/cls/cmpomap/client.h @@ -0,0 +1,68 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab ft=cpp + +/* + * Ceph - scalable distributed file system + * + * Copyright (C) 2020 Red Hat, Inc + * + * This is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License version 2.1, as published by the Free Software + * Foundation. See file COPYING. + */ + +#pragma once + +#include <optional> +#include "include/rados/librados_fwd.hpp" +#include "types.h" + +namespace cls::cmpomap { + +/// requests with too many key comparisons will be rejected with -E2BIG +static constexpr uint32_t max_keys = 1000; + +/// process each of the omap value comparisons according to the same rules as +/// cmpxattr(), and return -ECANCELED if a comparison is unsuccessful. for +/// comparisons with Mode::U64, failure to decode an input value is reported +/// as -EINVAL, an empty stored value is compared as 0, and failure to decode +/// a stored value is reported as -EIO +[[nodiscard]] int cmp_vals(librados::ObjectReadOperation& op, + Mode mode, Op comparison, ComparisonMap values, + std::optional<ceph::bufferlist> default_value); + +/// process each of the omap value comparisons according to the same rules as +/// cmpxattr(). any key/value pairs that compare successfully are overwritten +/// with the corresponding input value. for comparisons with Mode::U64, failure +/// to decode an input value is reported as -EINVAL. an empty stored value is +/// compared as 0, while decode failure of a stored value is treated as an +/// unsuccessful comparison and is not reported as an error +[[nodiscard]] int cmp_set_vals(librados::ObjectWriteOperation& writeop, + Mode mode, Op comparison, ComparisonMap values, + std::optional<ceph::bufferlist> default_value); + +/// process each of the omap value comparisons according to the same rules as +/// cmpxattr(). any key/value pairs that compare successfully are removed. for +/// comparisons with Mode::U64, failure to decode an input value is reported as +/// -EINVAL. an empty stored value is compared as 0, while decode failure of a +/// stored value is treated as an unsuccessful comparison and is not reported +/// as an error +[[nodiscard]] int cmp_rm_keys(librados::ObjectWriteOperation& writeop, + Mode mode, Op comparison, ComparisonMap values); + + +// bufferlist factories for comparison values +inline ceph::bufferlist string_buffer(std::string_view value) { + ceph::bufferlist bl; + bl.append(value); + return bl; +} +inline ceph::bufferlist u64_buffer(uint64_t value) { + ceph::bufferlist bl; + using ceph::encode; + encode(value, bl); + return bl; +} + +} // namespace cls::cmpomap diff --git a/src/cls/cmpomap/ops.h b/src/cls/cmpomap/ops.h new file mode 100644 index 000000000..39b1049e8 --- /dev/null +++ b/src/cls/cmpomap/ops.h @@ -0,0 +1,100 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab ft=cpp + +/* + * Ceph - scalable distributed file system + * + * Copyright (C) 2020 Red Hat, Inc + * + * This is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License version 2.1, as published by the Free Software + * Foundation. See file COPYING. + */ + +#pragma once + +#include "types.h" +#include "include/encoding.h" + +namespace cls::cmpomap { + +struct cmp_vals_op { + Mode mode; + Op comparison; + ComparisonMap values; + std::optional<ceph::bufferlist> default_value; +}; + +inline void encode(const cmp_vals_op& o, ceph::bufferlist& bl, uint64_t f=0) +{ + ENCODE_START(1, 1, bl); + encode(o.mode, bl); + encode(o.comparison, bl); + encode(o.values, bl); + encode(o.default_value, bl); + ENCODE_FINISH(bl); +} + +inline void decode(cmp_vals_op& o, ceph::bufferlist::const_iterator& bl) +{ + DECODE_START(1, bl); + decode(o.mode, bl); + decode(o.comparison, bl); + decode(o.values, bl); + decode(o.default_value, bl); + DECODE_FINISH(bl); +} + +struct cmp_set_vals_op { + Mode mode; + Op comparison; + ComparisonMap values; + std::optional<ceph::bufferlist> default_value; +}; + +inline void encode(const cmp_set_vals_op& o, ceph::bufferlist& bl, uint64_t f=0) +{ + ENCODE_START(1, 1, bl); + encode(o.mode, bl); + encode(o.comparison, bl); + encode(o.values, bl); + encode(o.default_value, bl); + ENCODE_FINISH(bl); +} + +inline void decode(cmp_set_vals_op& o, ceph::bufferlist::const_iterator& bl) +{ + DECODE_START(1, bl); + decode(o.mode, bl); + decode(o.comparison, bl); + decode(o.values, bl); + decode(o.default_value, bl); + DECODE_FINISH(bl); +} + +struct cmp_rm_keys_op { + Mode mode; + Op comparison; + ComparisonMap values; +}; + +inline void encode(const cmp_rm_keys_op& o, ceph::bufferlist& bl, uint64_t f=0) +{ + ENCODE_START(1, 1, bl); + encode(o.mode, bl); + encode(o.comparison, bl); + encode(o.values, bl); + ENCODE_FINISH(bl); +} + +inline void decode(cmp_rm_keys_op& o, ceph::bufferlist::const_iterator& bl) +{ + DECODE_START(1, bl); + decode(o.mode, bl); + decode(o.comparison, bl); + decode(o.values, bl); + DECODE_FINISH(bl); +} + +} // namespace cls::cmpomap diff --git a/src/cls/cmpomap/server.cc b/src/cls/cmpomap/server.cc new file mode 100644 index 000000000..86e16d940 --- /dev/null +++ b/src/cls/cmpomap/server.cc @@ -0,0 +1,302 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab ft=cpp + +/* + * Ceph - scalable distributed file system + * + * Copyright (C) 2020 Red Hat, Inc + * + * This is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License version 2.1, as published by the Free Software + * Foundation. See file COPYING. + */ + +#include "objclass/objclass.h" +#include "ops.h" + +CLS_VER(1,0) +CLS_NAME(cmpomap) + +using namespace cls::cmpomap; + +// returns negative error codes or 0/1 for failed/successful comparisons +template <typename T> +static int compare_values(Op op, const T& lhs, const T& rhs) +{ + switch (op) { + case Op::EQ: return (lhs == rhs); + case Op::NE: return (lhs != rhs); + case Op::GT: return (lhs > rhs); + case Op::GTE: return (lhs >= rhs); + case Op::LT: return (lhs < rhs); + case Op::LTE: return (lhs <= rhs); + default: return -EINVAL; + } +} + +static int compare_values_u64(Op op, uint64_t lhs, const bufferlist& value) +{ + // empty values compare as 0 for backward compat + uint64_t rhs = 0; + if (value.length()) { + try { + // decode existing value as rhs + auto p = value.cbegin(); + using ceph::decode; + decode(rhs, p); + } catch (const buffer::error&) { + // failures to decode existing values are reported as EIO + return -EIO; + } + } + return compare_values(op, lhs, rhs); +} + +static int compare_value(Mode mode, Op op, const bufferlist& input, + const bufferlist& value) +{ + switch (mode) { + case Mode::String: + return compare_values(op, input, value); + case Mode::U64: + try { + // decode input value as lhs + uint64_t lhs; + auto p = input.cbegin(); + using ceph::decode; + decode(lhs, p); + return compare_values_u64(op, lhs, value); + } catch (const buffer::error&) { + // failures to decode input values are reported as EINVAL + return -EINVAL; + } + default: + return -EINVAL; + } +} + +static int cmp_vals(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + cmp_vals_op op; + try { + auto p = in->cbegin(); + decode(op, p); + } catch (const buffer::error&) { + CLS_LOG(1, "ERROR: cmp_vals(): failed to decode input"); + return -EINVAL; + } + + // collect the keys we need to read + std::set<std::string> keys; + for (const auto& kv : op.values) { + keys.insert(kv.first); + } + + // read the values for each key to compare + std::map<std::string, bufferlist> values; + int r = cls_cxx_map_get_vals_by_keys(hctx, keys, &values); + if (r < 0) { + CLS_LOG(4, "ERROR: cmp_vals() failed to read values r=%d", r); + return r; + } + + auto v = values.cbegin(); + for (const auto& [key, input] : op.values) { + bufferlist value; + if (v != values.end() && v->first == key) { + value = std::move(v->second); + ++v; + CLS_LOG(20, "cmp_vals() comparing key=%s mode=%d op=%d", + key.c_str(), (int)op.mode, (int)op.comparison); + } else if (!op.default_value) { + CLS_LOG(20, "cmp_vals() missing key=%s", key.c_str()); + return -ECANCELED; + } else { + // use optional default for missing keys + value = *op.default_value; + CLS_LOG(20, "cmp_vals() comparing missing key=%s mode=%d op=%d", + key.c_str(), (int)op.mode, (int)op.comparison); + } + + r = compare_value(op.mode, op.comparison, input, value); + if (r < 0) { + CLS_LOG(10, "cmp_vals() failed to compare key=%s r=%d", key.c_str(), r); + return r; + } + if (r == 0) { + CLS_LOG(10, "cmp_vals() comparison at key=%s returned false", key.c_str()); + return -ECANCELED; + } + CLS_LOG(20, "cmp_vals() comparison at key=%s returned true", key.c_str()); + } + + return 0; +} + +static int cmp_set_vals(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + cmp_set_vals_op op; + try { + auto p = in->cbegin(); + decode(op, p); + } catch (const buffer::error&) { + CLS_LOG(1, "ERROR: cmp_set_vals(): failed to decode input"); + return -EINVAL; + } + + // collect the keys we need to read + std::set<std::string> keys; + for (const auto& kv : op.values) { + keys.insert(kv.first); + } + + // read the values for each key to compare + std::map<std::string, bufferlist> values; + int r = cls_cxx_map_get_vals_by_keys(hctx, keys, &values); + if (r < 0) { + CLS_LOG(4, "ERROR: cmp_set_vals() failed to read values r=%d", r); + return r; + } + + auto v = values.begin(); + for (const auto& [key, input] : op.values) { + auto k = values.end(); + bufferlist value; + if (v != values.end() && v->first == key) { + value = std::move(v->second); + k = v++; + CLS_LOG(20, "cmp_set_vals() comparing key=%s mode=%d op=%d", + key.c_str(), (int)op.mode, (int)op.comparison); + } else if (!op.default_value) { + CLS_LOG(20, "cmp_set_vals() missing key=%s", key.c_str()); + continue; + } else { + // use optional default for missing keys + value = *op.default_value; + CLS_LOG(20, "cmp_set_vals() comparing missing key=%s mode=%d op=%d", + key.c_str(), (int)op.mode, (int)op.comparison); + } + + r = compare_value(op.mode, op.comparison, input, value); + if (r == -EIO) { + r = 0; // treat EIO as a failed comparison + } + if (r < 0) { + CLS_LOG(10, "cmp_set_vals() failed to compare key=%s r=%d", + key.c_str(), r); + return r; + } + if (r == 0) { + // unsuccessful comparison + if (k != values.end()) { + values.erase(k); // remove this key from the values to overwrite + CLS_LOG(20, "cmp_set_vals() not overwriting key=%s", key.c_str()); + } else { + CLS_LOG(20, "cmp_set_vals() not writing missing key=%s", key.c_str()); + } + } else { + // successful comparison + if (k != values.end()) { + // overwrite the value + k->second = std::move(input); + CLS_LOG(20, "cmp_set_vals() overwriting key=%s", key.c_str()); + } else { + // insert the value + values.emplace(key, std::move(input)); + CLS_LOG(20, "cmp_set_vals() overwriting missing key=%s", key.c_str()); + } + } + } + + if (values.empty()) { + CLS_LOG(20, "cmp_set_vals() has no values to overwrite"); + return 0; + } + + CLS_LOG(20, "cmp_set_vals() overwriting count=%d", (int)values.size()); + return cls_cxx_map_set_vals(hctx, &values); +} + +static int cmp_rm_keys(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + cmp_rm_keys_op op; + try { + auto p = in->cbegin(); + decode(op, p); + } catch (const buffer::error&) { + CLS_LOG(1, "ERROR: cmp_rm_keys(): failed to decode input"); + return -EINVAL; + } + + // collect the keys we need to read + std::set<std::string> keys; + for (const auto& kv : op.values) { + keys.insert(kv.first); + } + + // read the values for each key to compare + std::map<std::string, bufferlist> values; + int r = cls_cxx_map_get_vals_by_keys(hctx, keys, &values); + if (r < 0) { + CLS_LOG(4, "ERROR: cmp_rm_keys() failed to read values r=%d", r); + return r; + } + + auto v = values.cbegin(); + for (const auto& [key, input] : op.values) { + if (v == values.end() || v->first != key) { + CLS_LOG(20, "cmp_rm_keys() missing key=%s", key.c_str()); + continue; + } + CLS_LOG(20, "cmp_rm_keys() comparing key=%s mode=%d op=%d", + key.c_str(), (int)op.mode, (int)op.comparison); + + const bufferlist& value = v->second; + ++v; + + r = compare_value(op.mode, op.comparison, input, value); + if (r == -EIO) { + r = 0; // treat EIO as a failed comparison + } + if (r < 0) { + CLS_LOG(10, "cmp_rm_keys() failed to compare key=%s r=%d", + key.c_str(), r); + return r; + } + if (r == 0) { + // unsuccessful comparison + CLS_LOG(20, "cmp_rm_keys() preserving key=%s", key.c_str()); + } else { + // successful comparison + CLS_LOG(20, "cmp_rm_keys() removing key=%s", key.c_str()); + r = cls_cxx_map_remove_key(hctx, key); + if (r < 0) { + CLS_LOG(1, "ERROR: cmp_rm_keys() failed to remove key=%s r=%d", + key.c_str(), r); + return r; + } + } + } + + return 0; +} + +CLS_INIT(cmpomap) +{ + CLS_LOG(1, "Loaded cmpomap class!"); + + cls_handle_t h_class; + cls_method_handle_t h_cmp_vals; + cls_method_handle_t h_cmp_set_vals; + cls_method_handle_t h_cmp_rm_keys; + + cls_register("cmpomap", &h_class); + + cls_register_cxx_method(h_class, "cmp_vals", CLS_METHOD_RD, + cmp_vals, &h_cmp_vals); + cls_register_cxx_method(h_class, "cmp_set_vals", CLS_METHOD_RD | CLS_METHOD_WR, + cmp_set_vals, &h_cmp_set_vals); + cls_register_cxx_method(h_class, "cmp_rm_keys", CLS_METHOD_RD | CLS_METHOD_WR, + cmp_rm_keys, &h_cmp_rm_keys); +} diff --git a/src/cls/cmpomap/types.h b/src/cls/cmpomap/types.h new file mode 100644 index 000000000..11e39575f --- /dev/null +++ b/src/cls/cmpomap/types.h @@ -0,0 +1,44 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab ft=cpp + +/* + * Ceph - scalable distributed file system + * + * Copyright (C) 2020 Red Hat, Inc + * + * This is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License version 2.1, as published by the Free Software + * Foundation. See file COPYING. + */ + +#pragma once + +#include <string> +#include <boost/container/flat_map.hpp> +#include "include/rados.h" // CEPH_OSD_CMPXATTR_* +#include "include/encoding.h" + +namespace cls::cmpomap { + +/// comparison operand type +enum class Mode : uint8_t { + String = CEPH_OSD_CMPXATTR_MODE_STRING, + U64 = CEPH_OSD_CMPXATTR_MODE_U64, +}; + +/// comparison operation, where the left-hand operand is the input value and +/// the right-hand operand is the stored value (or the optional default) +enum class Op : uint8_t { + EQ = CEPH_OSD_CMPXATTR_OP_EQ, + NE = CEPH_OSD_CMPXATTR_OP_NE, + GT = CEPH_OSD_CMPXATTR_OP_GT, + GTE = CEPH_OSD_CMPXATTR_OP_GTE, + LT = CEPH_OSD_CMPXATTR_OP_LT, + LTE = CEPH_OSD_CMPXATTR_OP_LTE, +}; + +/// mapping of omap keys to value comparisons +using ComparisonMap = boost::container::flat_map<std::string, ceph::bufferlist>; + +} // namespace cls::cmpomap diff --git a/src/cls/fifo/cls_fifo.cc b/src/cls/fifo/cls_fifo.cc new file mode 100644 index 000000000..85022eeb0 --- /dev/null +++ b/src/cls/fifo/cls_fifo.cc @@ -0,0 +1,958 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab + +/** \file + * + * This is an OSD class that implements methods for management + * and use of fifo + * + */ + +#include <cerrno> +#include <optional> +#include <string> + +#include <fmt/format.h> + +#include "include/buffer.h" +#include "include/types.h" + +#include "objclass/objclass.h" + +#include "cls/fifo/cls_fifo_ops.h" +#include "cls/fifo/cls_fifo_types.h" + +CLS_VER(1,0) +CLS_NAME(fifo) + +namespace rados::cls::fifo { + +static constexpr auto CLS_FIFO_MAX_PART_HEADER_SIZE = 512; + +static std::uint32_t part_entry_overhead; + +struct entry_header_pre { + ceph_le64 magic; + ceph_le64 pre_size; + ceph_le64 header_size; + ceph_le64 data_size; + ceph_le64 index; + ceph_le32 reserved; +} __attribute__ ((packed)); + +struct entry_header { + ceph::real_time mtime; + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(1, 1, bl); + encode(mtime, bl); + ENCODE_FINISH(bl); + } + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(1, bl); + decode(mtime, bl); + DECODE_FINISH(bl); + } +}; +WRITE_CLASS_ENCODER(entry_header) + +namespace { + +std::string new_oid_prefix(std::string id, std::optional<std::string>& val) +{ + static constexpr auto PREFIX_RND_SIZE = 12; + if (val) { + return *val; + } + + char buf[PREFIX_RND_SIZE + 1]; + buf[PREFIX_RND_SIZE] = 0; + + cls_gen_rand_base64(buf, sizeof(buf) - 1); + + return fmt::format("{}.{}", id, buf); +} + +int write_header(cls_method_context_t hctx, + info& header) +{ + static constexpr auto HEADER_INSTANCE_SIZE = 16; + if (header.version.instance.empty()) { + char buf[HEADER_INSTANCE_SIZE + 1]; + buf[HEADER_INSTANCE_SIZE] = 0; + cls_gen_rand_base64(buf, sizeof(buf) - 1); + header.version.instance = buf; + } + ceph::buffer::list bl; + encode(header, bl); + return cls_cxx_write_full(hctx, &bl); +} + +int read_part_header(cls_method_context_t hctx, + part_header* part_header) +{ + ceph::buffer::list bl; + int r = cls_cxx_read2(hctx, 0, CLS_FIFO_MAX_PART_HEADER_SIZE, &bl, + CEPH_OSD_OP_FLAG_FADVISE_WILLNEED); + if (r < 0) { + CLS_ERR("ERROR: %s: cls_cxx_read2() on obj returned %d", __PRETTY_FUNCTION__, r); + return r; + } + + auto iter = bl.cbegin(); + try { + decode(*part_header, iter); + } catch (const ceph::buffer::error& err) { + CLS_ERR("ERROR: %s: failed decoding part header", __PRETTY_FUNCTION__); + return -EIO; + } + + using ceph::operator <<; + std::ostringstream ss; + ss << part_header->max_time; + CLS_LOG(5, "%s:%d read part_header:\n" + "\tmagic=0x%" PRIx64 "\n" + "\tmin_ofs=%" PRId64 "\n" + "\tlast_ofs=%" PRId64 "\n" + "\tnext_ofs=%" PRId64 "\n" + "\tmin_index=%" PRId64 "\n" + "\tmax_index=%" PRId64 "\n" + "\tmax_time=%s\n", + __PRETTY_FUNCTION__, __LINE__, + part_header->magic, + part_header->min_ofs, + part_header->last_ofs, + part_header->next_ofs, + part_header->min_index, + part_header->max_index, + ss.str().c_str()); + + return 0; +} + +int write_part_header(cls_method_context_t hctx, + part_header& part_header) +{ + ceph::buffer::list bl; + encode(part_header, bl); + + if (bl.length() > CLS_FIFO_MAX_PART_HEADER_SIZE) { + CLS_ERR("%s: cannot write part header, buffer exceeds max size", __PRETTY_FUNCTION__); + return -EIO; + } + + int r = cls_cxx_write2(hctx, 0, bl.length(), + &bl, CEPH_OSD_OP_FLAG_FADVISE_WILLNEED); + if (r < 0) { + CLS_ERR("%s: failed to write part header: r=%d", + __PRETTY_FUNCTION__, r); + return r; + } + + return 0; +} + +int read_header(cls_method_context_t hctx, + std::optional<objv> objv, + info* info, bool get_info = false) +{ + std::uint64_t size; + + int r = cls_cxx_stat2(hctx, &size, nullptr); + if (r < 0) { + CLS_ERR("ERROR: %s: cls_cxx_stat2() on obj returned %d", __PRETTY_FUNCTION__, r); + return r; + } + + ceph::buffer::list bl; + r = cls_cxx_read2(hctx, 0, size, &bl, CEPH_OSD_OP_FLAG_FADVISE_WILLNEED); + if (r < 0) { + CLS_ERR("ERROR: %s: cls_cxx_read2() on obj returned %d", __PRETTY_FUNCTION__, r); + return r; + } + + if (r == 0) { + if (get_info) { + CLS_LOG(5, "%s: Zero length object, likely probe, returning ENODATA", __PRETTY_FUNCTION__); + } else { + CLS_ERR("ERROR: %s: Zero length object, returning ENODATA", __PRETTY_FUNCTION__); + } + return -ENODATA; + } + + try { + auto iter = bl.cbegin(); + decode(*info, iter); + } catch (const ceph::buffer::error& err) { + CLS_ERR("ERROR: %s: failed decoding header", __PRETTY_FUNCTION__); + return -EIO; + } + + if (objv && !(info->version== *objv)) { + auto s1 = info->version.to_str(); + auto s2 = objv->to_str(); + CLS_ERR("%s: version mismatch (header=%s, req=%s), canceled operation", + __PRETTY_FUNCTION__, s1.c_str(), s2.c_str()); + return -ECANCELED; + } + + return 0; +} + +int create_meta(cls_method_context_t hctx, + ceph::buffer::list* in, ceph::buffer::list* out) +{ + CLS_LOG(5, "%s", __PRETTY_FUNCTION__); + + op::create_meta op; + try { + auto iter = in->cbegin(); + decode(op, iter); + } catch (const ceph::buffer::error& err) { + CLS_ERR("ERROR: %s: failed to decode request: %s", __PRETTY_FUNCTION__, + err.what()); + return -EINVAL; + } + + if (op.id.empty()) { + CLS_ERR("%s: ID cannot be empty", __PRETTY_FUNCTION__); + return -EINVAL; + } + + if (op.max_part_size == 0 || + op.max_entry_size == 0 || + op.max_entry_size > op.max_part_size) { + CLS_ERR("ERROR: %s: invalid dimensions.", __PRETTY_FUNCTION__); + return -EINVAL; + } + + std::uint64_t size; + + int r = cls_cxx_stat2(hctx, &size, nullptr); + if (r < 0 && r != -ENOENT) { + CLS_ERR("ERROR: %s: cls_cxx_stat2() on obj returned %d", + __PRETTY_FUNCTION__, r); + return r; + } + if (op.exclusive && r == 0) { + CLS_ERR("%s: exclusive create but queue already exists", + __PRETTY_FUNCTION__); + return -EEXIST; + } + + if (r == 0) { + CLS_LOG(5, "%s: FIFO already exists, reading from disk and comparing.", + __PRETTY_FUNCTION__); + ceph::buffer::list bl; + r = cls_cxx_read2(hctx, 0, size, &bl, CEPH_OSD_OP_FLAG_FADVISE_WILLNEED); + if (r < 0) { + CLS_ERR("ERROR: %s: cls_cxx_read2() on obj returned %d", + __PRETTY_FUNCTION__, r); + return r; + } + + info header; + try { + auto iter = bl.cbegin(); + decode(header, iter); + } catch (const ceph::buffer::error& err) { + CLS_ERR("ERROR: %s: failed decoding header: %s", + __PRETTY_FUNCTION__, err.what()); + return -EIO; + } + + if (!(header.id == op.id && + (!op.oid_prefix || + header.oid_prefix == *op.oid_prefix) && + (!op.version || + header.version == *op.version))) { + CLS_ERR("%s: failed to re-create existing queue " + "with different params", __PRETTY_FUNCTION__); + return -EEXIST; + } + + return 0; /* already exists */ + } + info header; + + header.id = op.id; + if (op.version) { + header.version = *op.version; + } else { + static constexpr auto DEFAULT_INSTANCE_SIZE = 16; + char buf[DEFAULT_INSTANCE_SIZE + 1]; + cls_gen_rand_base64(buf, sizeof(buf)); + buf[DEFAULT_INSTANCE_SIZE] = '\0'; + header.version.instance = buf; + header.version.ver = 1; + } + header.oid_prefix = new_oid_prefix(op.id, op.oid_prefix); + + header.params.max_part_size = op.max_part_size; + header.params.max_entry_size = op.max_entry_size; + header.params.full_size_threshold = op.max_part_size - op.max_entry_size - part_entry_overhead; + + r = write_header(hctx, header); + if (r < 0) { + CLS_ERR("%s: failed to write header: r=%d", __PRETTY_FUNCTION__, r); + return r; + } + + return 0; +} + +int update_meta(cls_method_context_t hctx, ceph::buffer::list* in, + ceph::buffer::list* out) +{ + CLS_LOG(5, "%s", __PRETTY_FUNCTION__); + + op::update_meta op; + try { + auto iter = in->cbegin(); + decode(op, iter); + } catch (const ceph::buffer::error& err) { + CLS_ERR("ERROR: %s: failed to decode request", __PRETTY_FUNCTION__); + return -EINVAL; + } + + if (op.version.empty()) { + CLS_ERR("%s: no version supplied", __PRETTY_FUNCTION__); + return -EINVAL; + } + + info header; + + int r = read_header(hctx, op.version, &header); + if (r < 0) { + return r; + } + + auto u = fifo::update().tail_part_num(op.tail_part_num) + .head_part_num(op.head_part_num) + .min_push_part_num(op.min_push_part_num) + .max_push_part_num(op.max_push_part_num) + .journal_entries_add( + std::move(op.journal_entries_add)) + .journal_entries_rm( + std::move(op.journal_entries_rm)); + + auto changed = header.apply_update(u); + if (changed) { + r = write_header(hctx, header); + if (r < 0) { + CLS_ERR("%s: failed to write header: r=%d", __PRETTY_FUNCTION__, r); + return r; + } + } else { + CLS_LOG(10, "%s: No change, nothing to write.", + __PRETTY_FUNCTION__); + } + + return 0; +} + +int get_meta(cls_method_context_t hctx, ceph::buffer::list* in, + ceph::buffer::list* out) +{ + CLS_LOG(5, "%s", __PRETTY_FUNCTION__); + + op::get_meta op; + try { + auto iter = in->cbegin(); + decode(op, iter); + } catch (const ceph::buffer::error &err) { + CLS_ERR("ERROR: %s: failed to decode request", __PRETTY_FUNCTION__); + return -EINVAL; + } + + op::get_meta_reply reply; + int r = read_header(hctx, op.version, &reply.info, true); + if (r < 0) { + return r; + } + + reply.part_header_size = CLS_FIFO_MAX_PART_HEADER_SIZE; + reply.part_entry_overhead = part_entry_overhead; + + encode(reply, *out); + + return 0; +} + +int init_part(cls_method_context_t hctx, ceph::buffer::list* in, + ceph::buffer::list *out) +{ + CLS_LOG(5, "%s", __PRETTY_FUNCTION__); + + op::init_part op; + try { + auto iter = in->cbegin(); + decode(op, iter); + } catch (const ceph::buffer::error &err) { + CLS_ERR("ERROR: %s: failed to decode request", __PRETTY_FUNCTION__); + return -EINVAL; + } + + std::uint64_t size; + + int r = cls_cxx_stat2(hctx, &size, nullptr); + if (r < 0 && r != -ENOENT) { + CLS_ERR("ERROR: %s: cls_cxx_stat2() on obj returned %d", __PRETTY_FUNCTION__, r); + return r; + } + if (r == 0 && size > 0) { + part_header part_header; + r = read_part_header(hctx, &part_header); + if (r < 0) { + CLS_ERR("%s: failed to read part header", __PRETTY_FUNCTION__); + return r; + } + + if (!(part_header.params == op.params)) { + CLS_ERR("%s: failed to re-create existing part with different " + "params", __PRETTY_FUNCTION__); + return -EEXIST; + } + + return 0; /* already exists */ + } + + part_header part_header; + + part_header.params = op.params; + + part_header.min_ofs = CLS_FIFO_MAX_PART_HEADER_SIZE; + part_header.last_ofs = 0; + part_header.next_ofs = part_header.min_ofs; + part_header.max_time = ceph::real_clock::now(); + + cls_gen_random_bytes(reinterpret_cast<char *>(&part_header.magic), + sizeof(part_header.magic)); + + r = write_part_header(hctx, part_header); + if (r < 0) { + CLS_ERR("%s: failed to write header: r=%d", __PRETTY_FUNCTION__, r); + return r; + } + + return 0; +} + +bool full_part(const part_header& part_header) +{ + return (part_header.next_ofs > part_header.params.full_size_threshold); +} + +int push_part(cls_method_context_t hctx, ceph::buffer::list* in, + ceph::buffer::list* out) +{ + CLS_LOG(5, "%s", __PRETTY_FUNCTION__); + + op::push_part op; + try { + auto iter = in->cbegin(); + decode(op, iter); + } catch (const ceph::buffer::error& err) { + CLS_ERR("ERROR: %s: failed to decode request", __PRETTY_FUNCTION__); + return -EINVAL; + } + + part_header part_header; + int r = read_part_header(hctx, &part_header); + if (r < 0) { + CLS_ERR("%s: failed to read part header", __PRETTY_FUNCTION__); + return r; + } + + std::uint64_t effective_len = op.total_len + op.data_bufs.size() * + part_entry_overhead; + + if (effective_len > part_header.params.max_part_size) { + return -EINVAL; + } + + if (full_part(part_header)) { + return -ERANGE; + } + + auto now = ceph::real_clock::now(); + struct entry_header entry_header = { now }; + ceph::buffer::list entry_header_bl; + encode(entry_header, entry_header_bl); + + auto max_index = part_header.max_index; + const auto write_ofs = part_header.next_ofs; + auto ofs = part_header.next_ofs; + + entry_header_pre pre_header; + pre_header.magic = part_header.magic; + pre_header.pre_size = sizeof(pre_header); + pre_header.reserved = 0; + + std::uint64_t total_data = 0; + for (auto& data : op.data_bufs) { + total_data += data.length(); + } + if (total_data != op.total_len) { + CLS_ERR("%s: length mismatch: op.total_len=%" PRId64 + " total data received=%" PRId64, + __PRETTY_FUNCTION__, op.total_len, total_data); + return -EINVAL; + } + + + int entries_pushed = 0; + ceph::buffer::list all_data; + for (auto& data : op.data_bufs) { + if (full_part(part_header)) + break; + + pre_header.header_size = entry_header_bl.length(); + pre_header.data_size = data.length(); + pre_header.index = max_index; + + bufferptr pre(reinterpret_cast<char*>(&pre_header), sizeof(pre_header)); + auto entry_write_len = pre.length() + entry_header_bl.length() + data.length(); + all_data.append(pre); + all_data.append(entry_header_bl); + all_data.claim_append(data); + + part_header.last_ofs = ofs; + ofs += entry_write_len; + ++max_index; + ++entries_pushed; + part_header.max_index = max_index; + part_header.next_ofs = ofs; + } + part_header.max_time = now; + + auto write_len = all_data.length(); + + r = cls_cxx_write2(hctx, write_ofs, write_len, + &all_data, CEPH_OSD_OP_FLAG_FADVISE_WILLNEED); + + if (r < 0) { + CLS_ERR("%s: failed to write entries (ofs=%" PRIu64 + " len=%u): r=%d", __PRETTY_FUNCTION__, write_ofs, + write_len, r); + return r; + } + + + r = write_part_header(hctx, part_header); + if (r < 0) { + CLS_ERR("%s: failed to write header: r=%d", __PRETTY_FUNCTION__, r); + return r; + } + + if (entries_pushed == 0) { + CLS_ERR("%s: pushed no entries? Can't happen!", __PRETTY_FUNCTION__); + return -EFAULT; + } + + return entries_pushed; +} + +class EntryReader { + static constexpr std::uint64_t prefetch_len = (128 * 1024); + + cls_method_context_t hctx; + + const fifo::part_header& part_header; + + std::uint64_t ofs; + ceph::buffer::list data; + + int fetch(std::uint64_t num_bytes); + int read(std::uint64_t num_bytes, ceph::buffer::list* pbl); + int peek(std::uint64_t num_bytes, char *dest); + int seek(std::uint64_t num_bytes); + +public: + EntryReader(cls_method_context_t hctx, + const fifo::part_header& part_header, + uint64_t ofs) : hctx(hctx), + part_header(part_header), + ofs(ofs < part_header.min_ofs ? + part_header.min_ofs : + ofs) {} + + std::uint64_t get_ofs() const { + return ofs; + } + + bool end() const { + return (ofs >= part_header.next_ofs); + } + + int peek_pre_header(entry_header_pre* pre_header); + int get_next_entry(ceph::buffer::list* pbl, + std::uint64_t* pofs, + ceph::real_time* pmtime); +}; + + +int EntryReader::fetch(std::uint64_t num_bytes) +{ + CLS_LOG(5, "%s: fetch %d bytes, ofs=%d data.length()=%d", __PRETTY_FUNCTION__, (int)num_bytes, (int)ofs, (int)data.length()); + if (data.length() < num_bytes) { + ceph::buffer::list bl; + CLS_LOG(5, "%s: reading % " PRId64 " bytes at ofs=%" PRId64, __PRETTY_FUNCTION__, + prefetch_len, ofs + data.length()); + int r = cls_cxx_read2(hctx, ofs + data.length(), prefetch_len, &bl, CEPH_OSD_OP_FLAG_FADVISE_WILLNEED); + if (r < 0) { + CLS_ERR("ERROR: %s: cls_cxx_read2() on obj returned %d", __PRETTY_FUNCTION__, r); + return r; + } + data.claim_append(bl); + } + + if (static_cast<unsigned>(num_bytes) > data.length()) { + CLS_ERR("%s: requested %" PRId64 " bytes, but only " + "%u were available", __PRETTY_FUNCTION__, num_bytes, data.length()); + return -ERANGE; + } + + return 0; +} + +int EntryReader::read(std::uint64_t num_bytes, ceph::buffer::list* pbl) +{ + int r = fetch(num_bytes); + if (r < 0) { + return r; + } + data.splice(0, num_bytes, pbl); + + ofs += num_bytes; + + return 0; +} + +int EntryReader::peek(std::uint64_t num_bytes, char* dest) +{ + int r = fetch(num_bytes); + if (r < 0) { + return r; + } + + data.begin().copy(num_bytes, dest); + + return 0; +} + +int EntryReader::seek(std::uint64_t num_bytes) +{ + ceph::buffer::list bl; + + CLS_LOG(5, "%s:%d: num_bytes=%" PRIu64, __PRETTY_FUNCTION__, __LINE__, num_bytes); + return read(num_bytes, &bl); +} + +int EntryReader::peek_pre_header(entry_header_pre* pre_header) +{ + if (end()) { + return -ENOENT; + } + + int r = peek(sizeof(*pre_header), + reinterpret_cast<char*>(pre_header)); + if (r < 0) { + CLS_ERR("ERROR: %s: peek() size=%zu failed: r=%d", __PRETTY_FUNCTION__, + sizeof(pre_header), r); + return r; + } + + if (pre_header->magic != part_header.magic) { + CLS_ERR("ERROR: %s: unexpected pre_header magic", __PRETTY_FUNCTION__); + return -ERANGE; + } + + return 0; +} + + +int EntryReader::get_next_entry(ceph::buffer::list* pbl, + std::uint64_t* pofs, + ceph::real_time* pmtime) +{ + entry_header_pre pre_header; + int r = peek_pre_header(&pre_header); + if (r < 0) { + CLS_ERR("ERROR: %s: peek_pre_header() failed: r=%d", __PRETTY_FUNCTION__, r); + return r; + } + + if (pofs) { + *pofs = ofs; + } + + CLS_LOG(5, "%s:%d: pre_header.pre_size=%" PRIu64, __PRETTY_FUNCTION__, __LINE__, + uint64_t(pre_header.pre_size)); + r = seek(pre_header.pre_size); + if (r < 0) { + CLS_ERR("ERROR: %s: failed to seek: r=%d", __PRETTY_FUNCTION__, r); + return r; + } + + ceph::buffer::list header; + CLS_LOG(5, "%s:%d: pre_header.header_size=%d", __PRETTY_FUNCTION__, __LINE__, (int)pre_header.header_size); + r = read(pre_header.header_size, &header); + if (r < 0) { + CLS_ERR("ERROR: %s: failed to read entry header: r=%d", __PRETTY_FUNCTION__, r); + return r; + } + + entry_header entry_header; + auto iter = header.cbegin(); + try { + decode(entry_header, iter); + } catch (ceph::buffer::error& err) { + CLS_ERR("%s: failed decoding entry header", __PRETTY_FUNCTION__); + return -EIO; + } + + if (pmtime) { + *pmtime = entry_header.mtime; + } + + if (pbl) { + r = read(pre_header.data_size, pbl); + if (r < 0) { + CLS_ERR("%s: failed reading data: r=%d", __PRETTY_FUNCTION__, r); + return r; + } + } else { + r = seek(pre_header.data_size); + if (r < 0) { + CLS_ERR("ERROR: %s: failed to seek: r=%d", __PRETTY_FUNCTION__, r); + return r; + } + } + + return 0; +} + +int trim_part(cls_method_context_t hctx, + ceph::buffer::list *in, ceph::buffer::list *out) +{ + CLS_LOG(5, "%s", __PRETTY_FUNCTION__); + + op::trim_part op; + try { + auto iter = in->cbegin(); + decode(op, iter); + } catch (const ceph::buffer::error &err) { + CLS_ERR("ERROR: %s: failed to decode request", __PRETTY_FUNCTION__); + return -EINVAL; + } + + part_header part_header; + int r = read_part_header(hctx, &part_header); + if (r < 0) { + CLS_ERR("%s: failed to read part header", __PRETTY_FUNCTION__); + return r; + } + + if (op.ofs < part_header.min_ofs) { + return 0; + } + if (op.exclusive && op.ofs == part_header.min_ofs) { + return 0; + } + + if (op.ofs >= part_header.next_ofs) { + if (full_part(part_header)) { + /* + * trim full part completely: remove object + */ + + r = cls_cxx_remove(hctx); + if (r < 0) { + CLS_ERR("%s: ERROR: cls_cxx_remove() returned r=%d", __PRETTY_FUNCTION__, r); + return r; + } + + return 0; + } + + part_header.min_ofs = part_header.next_ofs; + part_header.min_index = part_header.max_index; + } else { + EntryReader reader(hctx, part_header, op.ofs); + + entry_header_pre pre_header; + int r = reader.peek_pre_header(&pre_header); + if (r < 0) { + return r; + } + + if (op.exclusive) { + part_header.min_index = pre_header.index; + } else { + r = reader.get_next_entry(nullptr, nullptr, nullptr); + if (r < 0) { + CLS_ERR("ERROR: %s: unexpected failure at get_next_entry: r=%d", + __PRETTY_FUNCTION__, r); + return r; + } + part_header.min_index = pre_header.index + 1; + } + + part_header.min_ofs = reader.get_ofs(); + } + + r = write_part_header(hctx, part_header); + if (r < 0) { + CLS_ERR("%s: failed to write header: r=%d", __PRETTY_FUNCTION__, r); + return r; + } + + return 0; +} + +int list_part(cls_method_context_t hctx, ceph::buffer::list* in, + ceph::buffer::list* out) +{ + CLS_LOG(5, "%s", __PRETTY_FUNCTION__); + + op::list_part op; + try { + auto iter = in->cbegin(); + decode(op, iter); + } catch (const buffer::error &err) { + CLS_ERR("ERROR: %s: failed to decode request", __PRETTY_FUNCTION__); + return -EINVAL; + } + + part_header part_header; + int r = read_part_header(hctx, &part_header); + if (r < 0) { + CLS_ERR("%s: failed to read part header", __PRETTY_FUNCTION__); + return r; + } + + EntryReader reader(hctx, part_header, op.ofs); + + if (op.ofs >= part_header.min_ofs && + !reader.end()) { + r = reader.get_next_entry(nullptr, nullptr, nullptr); + if (r < 0) { + CLS_ERR("ERROR: %s: unexpected failure at get_next_entry: r=%d", __PRETTY_FUNCTION__, r); + return r; + } + } + + op::list_part_reply reply; + + auto max_entries = std::min(op.max_entries, op::MAX_LIST_ENTRIES); + + for (int i = 0; i < max_entries && !reader.end(); ++i) { + ceph::buffer::list data; + ceph::real_time mtime; + std::uint64_t ofs; + + r = reader.get_next_entry(&data, &ofs, &mtime); + if (r < 0) { + CLS_ERR("ERROR: %s: unexpected failure at get_next_entry: r=%d", + __PRETTY_FUNCTION__, r); + return r; + } + + reply.entries.emplace_back(std::move(data), ofs, mtime); + } + + reply.more = !reader.end(); + reply.full_part = full_part(part_header); + + encode(reply, *out); + + return 0; +} + +int get_part_info(cls_method_context_t hctx, ceph::buffer::list *in, + ceph::buffer::list *out) +{ + CLS_LOG(5, "%s", __PRETTY_FUNCTION__); + + op::get_part_info op; + try { + auto iter = in->cbegin(); + decode(op, iter); + } catch (const ceph::buffer::error &err) { + CLS_ERR("ERROR: %s: failed to decode request", __PRETTY_FUNCTION__); + return -EINVAL; + } + + op::get_part_info_reply reply; + + int r = read_part_header(hctx, &reply.header); + if (r < 0) { + CLS_ERR("%s: failed to read part header", __PRETTY_FUNCTION__); + return r; + } + + encode(reply, *out); + + return 0; +} +} +} // namespace rados::cls::fifo + +CLS_INIT(fifo) +{ + using namespace rados::cls::fifo; + CLS_LOG(10, "Loaded fifo class!"); + + cls_handle_t h_class; + cls_method_handle_t h_create_meta; + cls_method_handle_t h_get_meta; + cls_method_handle_t h_update_meta; + cls_method_handle_t h_init_part; + cls_method_handle_t h_push_part; + cls_method_handle_t h_trim_part; + cls_method_handle_t h_list_part; + cls_method_handle_t h_get_part_info; + + cls_register(op::CLASS, &h_class); + cls_register_cxx_method(h_class, op::CREATE_META, + CLS_METHOD_RD | CLS_METHOD_WR, + create_meta, &h_create_meta); + + cls_register_cxx_method(h_class, op::GET_META, + CLS_METHOD_RD, + get_meta, &h_get_meta); + + cls_register_cxx_method(h_class, op::UPDATE_META, + CLS_METHOD_RD | CLS_METHOD_WR, + update_meta, &h_update_meta); + + cls_register_cxx_method(h_class, op::INIT_PART, + CLS_METHOD_RD | CLS_METHOD_WR, + init_part, &h_init_part); + + cls_register_cxx_method(h_class, op::PUSH_PART, + CLS_METHOD_RD | CLS_METHOD_WR, + push_part, &h_push_part); + + cls_register_cxx_method(h_class, op::TRIM_PART, + CLS_METHOD_RD | CLS_METHOD_WR, + trim_part, &h_trim_part); + + cls_register_cxx_method(h_class, op::LIST_PART, + CLS_METHOD_RD, + list_part, &h_list_part); + + cls_register_cxx_method(h_class, op::GET_PART_INFO, + CLS_METHOD_RD, + get_part_info, &h_get_part_info); + + /* calculate entry overhead */ + struct entry_header entry_header; + ceph::buffer::list entry_header_bl; + encode(entry_header, entry_header_bl); + + part_entry_overhead = sizeof(entry_header_pre) + entry_header_bl.length(); + + return; +} diff --git a/src/cls/fifo/cls_fifo_ops.h b/src/cls/fifo/cls_fifo_ops.h new file mode 100644 index 000000000..e850c635c --- /dev/null +++ b/src/cls/fifo/cls_fifo_ops.h @@ -0,0 +1,311 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab + +/* + * Ceph - scalable distributed file system + * + * Copyright (C) 2019 Red Hat, Inc. + * Copyright (C) 2019 SUSE LLC + * + * This is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License version 2.1, as published by the Free Software + * Foundation. See file COPYING. + * + */ + +#pragma once + +#include <cstdint> +#include <optional> +#include <string> +#include <vector> + +#include "include/buffer.h" +#include "include/encoding.h" +#include "include/types.h" + +#include "cls/fifo/cls_fifo_types.h" + +namespace rados::cls::fifo::op { +struct create_meta +{ + std::string id; + std::optional<objv> version; + struct { + std::string name; + std::string ns; + } pool; + std::optional<std::string> oid_prefix; + + std::uint64_t max_part_size{0}; + std::uint64_t max_entry_size{0}; + + bool exclusive{false}; + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(1, 1, bl); + encode(id, bl); + encode(version, bl); + encode(pool.name, bl); + encode(pool.ns, bl); + encode(oid_prefix, bl); + encode(max_part_size, bl); + encode(max_entry_size, bl); + encode(exclusive, bl); + ENCODE_FINISH(bl); + } + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(1, bl); + decode(id, bl); + decode(version, bl); + decode(pool.name, bl); + decode(pool.ns, bl); + decode(oid_prefix, bl); + decode(max_part_size, bl); + decode(max_entry_size, bl); + decode(exclusive, bl); + DECODE_FINISH(bl); + } +}; +WRITE_CLASS_ENCODER(create_meta) + +struct get_meta +{ + std::optional<objv> version; + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(1, 1, bl); + encode(version, bl); + ENCODE_FINISH(bl); + } + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(1, bl); + decode(version, bl); + DECODE_FINISH(bl); + } +}; +WRITE_CLASS_ENCODER(get_meta) + +struct get_meta_reply +{ + fifo::info info; + std::uint32_t part_header_size{0}; + /* per entry extra data that is stored */ + std::uint32_t part_entry_overhead{0}; + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(1, 1, bl); + encode(info, bl); + encode(part_header_size, bl); + encode(part_entry_overhead, bl); + ENCODE_FINISH(bl); + } + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(1, bl); + decode(info, bl); + decode(part_header_size, bl); + decode(part_entry_overhead, bl); + DECODE_FINISH(bl); + } +}; +WRITE_CLASS_ENCODER(get_meta_reply) + +struct update_meta +{ + objv version; + + std::optional<std::uint64_t> tail_part_num; + std::optional<std::uint64_t> head_part_num; + std::optional<std::uint64_t> min_push_part_num; + std::optional<std::uint64_t> max_push_part_num; + std::vector<journal_entry> journal_entries_add; + std::vector<journal_entry> journal_entries_rm; + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(1, 1, bl); + encode(version, bl); + encode(tail_part_num, bl); + encode(head_part_num, bl); + encode(min_push_part_num, bl); + encode(max_push_part_num, bl); + encode(journal_entries_add, bl); + encode(journal_entries_rm, bl); + ENCODE_FINISH(bl); + } + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(1, bl); + decode(version, bl); + decode(tail_part_num, bl); + decode(head_part_num, bl); + decode(min_push_part_num, bl); + decode(max_push_part_num, bl); + decode(journal_entries_add, bl); + decode(journal_entries_rm, bl); + DECODE_FINISH(bl); + } +}; +WRITE_CLASS_ENCODER(update_meta) + +struct init_part +{ + data_params params; + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(1, 1, bl); + std::string tag; + encode(tag, bl); + encode(params, bl); + ENCODE_FINISH(bl); + } + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(1, bl); + std::string tag; + decode(tag, bl); + decode(params, bl); + DECODE_FINISH(bl); + } +}; +WRITE_CLASS_ENCODER(init_part) + +struct push_part +{ + std::deque<ceph::buffer::list> data_bufs; + std::uint64_t total_len{0}; + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(1, 1, bl); + std::string tag; + encode(tag, bl); + encode(data_bufs, bl); + encode(total_len, bl); + ENCODE_FINISH(bl); + } + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(1, bl); + std::string tag; + decode(tag, bl); + decode(data_bufs, bl); + decode(total_len, bl); + DECODE_FINISH(bl); + } +}; +WRITE_CLASS_ENCODER(push_part) + +struct trim_part +{ + std::uint64_t ofs{0}; + bool exclusive = false; + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(1, 1, bl); + std::optional<std::string> tag; + encode(tag, bl); + encode(ofs, bl); + encode(exclusive, bl); + ENCODE_FINISH(bl); + } + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(1, bl); + std::optional<std::string> tag; + decode(tag, bl); + decode(ofs, bl); + decode(exclusive, bl); + DECODE_FINISH(bl); + } +}; +WRITE_CLASS_ENCODER(trim_part) + +struct list_part +{ + std::uint64_t ofs{0}; + int max_entries{100}; + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(1, 1, bl); + std::optional<std::string> tag; + encode(tag, bl); + encode(ofs, bl); + encode(max_entries, bl); + ENCODE_FINISH(bl); + } + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(1, bl); + std::optional<std::string> tag; + decode(tag, bl); + decode(ofs, bl); + decode(max_entries, bl); + DECODE_FINISH(bl); + } +}; +WRITE_CLASS_ENCODER(list_part) +inline constexpr int MAX_LIST_ENTRIES = 512; + +struct list_part_reply +{ + std::vector<part_list_entry> entries; + bool more{false}; + bool full_part{false}; /* whether part is full or still can be written to. + A non full part is by definition head part */ + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(1, 1, bl); + std::string tag; + encode(tag, bl); + encode(entries, bl); + encode(more, bl); + encode(full_part, bl); + ENCODE_FINISH(bl); + } + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(1, bl); + std::string tag; + decode(tag, bl); + decode(entries, bl); + decode(more, bl); + decode(full_part, bl); + DECODE_FINISH(bl); + } +}; +WRITE_CLASS_ENCODER(list_part_reply) + +struct get_part_info +{ + void encode(ceph::buffer::list &bl) const { + ENCODE_START(1, 1, bl); + ENCODE_FINISH(bl); + } + void decode(ceph::buffer::list::const_iterator &bl) { + DECODE_START(1, bl); + DECODE_FINISH(bl); + } +}; +WRITE_CLASS_ENCODER(get_part_info) + +struct get_part_info_reply +{ + part_header header; + + void encode(ceph::buffer::list &bl) const { + ENCODE_START(1, 1, bl); + encode(header, bl); + ENCODE_FINISH(bl); + } + void decode(ceph::buffer::list::const_iterator &bl) { + DECODE_START(1, bl); + decode(header, bl); + DECODE_FINISH(bl); + } +}; +WRITE_CLASS_ENCODER(get_part_info_reply) + +inline constexpr auto CLASS = "fifo"; +inline constexpr auto CREATE_META = "create_meta"; +inline constexpr auto GET_META = "get_meta"; +inline constexpr auto UPDATE_META = "update_meta"; +inline constexpr auto INIT_PART = "init_part"; +inline constexpr auto PUSH_PART = "push_part"; +inline constexpr auto TRIM_PART = "trim_part"; +inline constexpr auto LIST_PART = "part_list"; +inline constexpr auto GET_PART_INFO = "get_part_info"; +} // namespace rados::cls::fifo::op diff --git a/src/cls/fifo/cls_fifo_types.h b/src/cls/fifo/cls_fifo_types.h new file mode 100644 index 000000000..1c69c1f08 --- /dev/null +++ b/src/cls/fifo/cls_fifo_types.h @@ -0,0 +1,559 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab + +/* + * Ceph - scalable distributed file system + * + * Copyright (C) 2019 Red Hat, Inc. + * + * This is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License version 2.1, as published by the Free Software + * Foundation. See file COPYING. + * + */ + +#pragma once + +#include <algorithm> +#include <cstdint> +#include <map> +#include <optional> +#include <ostream> +#include <string> +#include <vector> + +#include <boost/container/flat_set.hpp> + +#include <fmt/format.h> +#if FMT_VERSION >= 90000 +#include <fmt/ostream.h> +#endif +#include "include/buffer.h" +#include "include/encoding.h" +#include "include/types.h" + +#include "common/ceph_time.h" + +class JSONObj; + +namespace rados::cls::fifo { +struct objv { + std::string instance; + std::uint64_t ver{0}; + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(1, 1, bl); + encode(instance, bl); + encode(ver, bl); + ENCODE_FINISH(bl); + } + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(1, bl); + decode(instance, bl); + decode(ver, bl); + DECODE_FINISH(bl); + } + void dump(ceph::Formatter* f) const; + void decode_json(JSONObj* obj); + + bool operator ==(const objv& rhs) const { + return (instance == rhs.instance && + ver == rhs.ver); + } + bool operator !=(const objv& rhs) const { + return (instance != rhs.instance || + ver != rhs.ver); + } + bool same_or_later(const objv& rhs) const { + return (instance == rhs.instance && + ver >= rhs.ver); + } + + bool empty() const { + return instance.empty(); + } + + std::string to_str() const { + return fmt::format("{}{{{}}}", instance, ver); + } +}; +WRITE_CLASS_ENCODER(objv) +inline std::ostream& operator <<(std::ostream& os, const objv& objv) +{ + return os << objv.to_str(); +} + +struct data_params { + std::uint64_t max_part_size{0}; + std::uint64_t max_entry_size{0}; + std::uint64_t full_size_threshold{0}; + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(1, 1, bl); + encode(max_part_size, bl); + encode(max_entry_size, bl); + encode(full_size_threshold, bl); + ENCODE_FINISH(bl); + } + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(1, bl); + decode(max_part_size, bl); + decode(max_entry_size, bl); + decode(full_size_threshold, bl); + DECODE_FINISH(bl); + } + void dump(ceph::Formatter* f) const; + void decode_json(JSONObj* obj); + + auto operator <=>(const data_params&) const = default; +}; +WRITE_CLASS_ENCODER(data_params) +inline std::ostream& operator <<(std::ostream& m, const data_params& d) { + return m << "max_part_size: " << d.max_part_size << ", " + << "max_entry_size: " << d.max_entry_size << ", " + << "full_size_threshold: " << d.full_size_threshold; +} + +struct journal_entry { + enum class Op { + unknown = -1, + create = 1, + set_head = 2, + remove = 3, + } op{Op::unknown}; + + std::int64_t part_num{-1}; + + bool valid() const { + using enum Op; + switch (op) { + case create: [[fallthrough]]; + case set_head: [[fallthrough]]; + case remove: + return part_num >= 0; + + default: + return false; + } + } + + journal_entry() = default; + journal_entry(Op op, std::int64_t part_num) + : op(op), part_num(part_num) {} + + void encode(ceph::buffer::list& bl) const { + ceph_assert(valid()); + ENCODE_START(1, 1, bl); + encode((int)op, bl); + encode(part_num, bl); + std::string part_tag; + encode(part_tag, bl); + ENCODE_FINISH(bl); + } + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(1, bl); + int i; + decode(i, bl); + op = static_cast<Op>(i); + decode(part_num, bl); + std::string part_tag; + decode(part_tag, bl); + DECODE_FINISH(bl); + } + void dump(ceph::Formatter* f) const; + + auto operator <=>(const journal_entry&) const = default; +}; +WRITE_CLASS_ENCODER(journal_entry) +inline std::ostream& operator <<(std::ostream& m, const journal_entry::Op& o) { + switch (o) { + case journal_entry::Op::unknown: + return m << "Op::unknown"; + case journal_entry::Op::create: + return m << "Op::create"; + case journal_entry::Op::set_head: + return m << "Op::set_head"; + case journal_entry::Op::remove: + return m << "Op::remove"; + } + return m << "Bad value: " << static_cast<int>(o); +} +inline std::ostream& operator <<(std::ostream& m, const journal_entry& j) { + return m << "op: " << j.op << ", " + << "part_num: " << j.part_num; +} + +// This is actually a useful builder, since otherwise we end up with +// four uint64_ts in a row and only care about a subset at a time. +class update { + std::optional<std::int64_t> tail_part_num_; + std::optional<std::int64_t> head_part_num_; + std::optional<std::int64_t> min_push_part_num_; + std::optional<std::int64_t> max_push_part_num_; + std::vector<fifo::journal_entry> journal_entries_add_; + std::vector<fifo::journal_entry> journal_entries_rm_; + +public: + + update&& tail_part_num(std::optional<std::int64_t> num) noexcept { + tail_part_num_ = num; + return std::move(*this); + } + auto tail_part_num() const noexcept { + return tail_part_num_; + } + + update&& head_part_num(std::optional<std::int64_t> num) noexcept { + head_part_num_ = num; + return std::move(*this); + } + auto head_part_num() const noexcept { + return head_part_num_; + } + + update&& min_push_part_num(std::optional<std::int64_t> num) + noexcept { + min_push_part_num_ = num; + return std::move(*this); + } + auto min_push_part_num() const noexcept { + return min_push_part_num_; + } + + update&& max_push_part_num(std::optional<std::int64_t> num) noexcept { + max_push_part_num_ = num; + return std::move(*this); + } + auto max_push_part_num() const noexcept { + return max_push_part_num_; + } + + update&& journal_entry_add(fifo::journal_entry entry) { + journal_entries_add_.push_back(std::move(entry)); + return std::move(*this); + } + update&& journal_entries_add( + std::optional<std::vector<fifo::journal_entry>>&& entries) { + if (entries) { + journal_entries_add_ = std::move(*entries); + } else { + journal_entries_add_.clear(); + } + return std::move(*this); + } + const auto& journal_entries_add() const & noexcept { + return journal_entries_add_; + } + auto&& journal_entries_add() && noexcept { + return std::move(journal_entries_add_); + } + + update&& journal_entry_rm(fifo::journal_entry entry) { + journal_entries_rm_.push_back(std::move(entry)); + return std::move(*this); + } + update&& journal_entries_rm( + std::optional<std::vector<fifo::journal_entry>>&& entries) { + if (entries) { + journal_entries_rm_ = std::move(*entries); + } else { + journal_entries_rm_.clear(); + } + return std::move(*this); + } + const auto& journal_entries_rm() const & noexcept { + return journal_entries_rm_; + } + auto&& journal_entries_rm() && noexcept { + return std::move(journal_entries_rm_); + } + friend std::ostream& operator <<(std::ostream& m, const update& u); +}; +inline std::ostream& operator <<(std::ostream& m, const update& u) { + bool prev = false; + if (u.tail_part_num_) { + m << "tail_part_num: " << *u.tail_part_num_; + prev = true; + } + if (u.head_part_num_) { + if (prev) + m << ", "; + m << "head_part_num: " << *u.head_part_num_; + prev = true; + } + if (u.min_push_part_num_) { + if (prev) + m << ", "; + m << "min_push_part_num: " << *u.min_push_part_num_; + prev = true; + } + if (u.max_push_part_num_) { + if (prev) + m << ", "; + m << "max_push_part_num: " << *u.max_push_part_num_; + prev = true; + } + if (!u.journal_entries_add_.empty()) { + if (prev) + m << ", "; + m << "journal_entries_add: {" << u.journal_entries_add_ << "}"; + prev = true; + } + if (!u.journal_entries_rm_.empty()) { + if (prev) + m << ", "; + m << "journal_entries_rm: {" << u.journal_entries_rm_ << "}"; + prev = true; + } + if (!prev) + m << "(none)"; + return m; +} + +struct info { + std::string id; + objv version; + std::string oid_prefix; + data_params params; + + std::int64_t tail_part_num{0}; + std::int64_t head_part_num{-1}; + std::int64_t min_push_part_num{0}; + std::int64_t max_push_part_num{-1}; + + boost::container::flat_set<journal_entry> journal; + static_assert(journal_entry::Op::create < journal_entry::Op::set_head); + + // So we can get rid of the multimap without breaking compatibility + void encode_journal(bufferlist& bl) const { + using ceph::encode; + assert(journal.size() <= std::numeric_limits<uint32_t>::max()); + uint32_t n = static_cast<uint32_t>(journal.size()); + encode(n, bl); + for (const auto& entry : journal) { + encode(entry.part_num, bl); + encode(entry, bl); + } + } + + void decode_journal( bufferlist::const_iterator& p) { + using enum journal_entry::Op; + using ceph::decode; + uint32_t n; + decode(n, p); + journal.clear(); + while (n--) { + decltype(journal_entry::part_num) dummy; + decode(dummy, p); + journal_entry e; + decode(e, p); + if (!e.valid()) { + throw ceph::buffer::malformed_input(); + } else { + journal.insert(std::move(e)); + } + } + } + bool need_new_head() const { + return (head_part_num < min_push_part_num); + } + + bool need_new_part() const { + return (max_push_part_num < min_push_part_num); + } + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(1, 1, bl); + encode(id, bl); + encode(version, bl); + encode(oid_prefix, bl); + encode(params, bl); + encode(tail_part_num, bl); + encode(head_part_num, bl); + encode(min_push_part_num, bl); + encode(max_push_part_num, bl); + std::string head_tag; + std::map<int64_t, std::string> tags; + encode(tags, bl); + encode(head_tag, bl); + encode_journal(bl); + ENCODE_FINISH(bl); + } + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(1, bl); + decode(id, bl); + decode(version, bl); + decode(oid_prefix, bl); + decode(params, bl); + decode(tail_part_num, bl); + decode(head_part_num, bl); + decode(min_push_part_num, bl); + decode(max_push_part_num, bl); + std::string head_tag; + std::map<int64_t, std::string> tags; + decode(tags, bl); + decode(head_tag, bl); + decode_journal(bl); + DECODE_FINISH(bl); + } + void dump(ceph::Formatter* f) const; + void decode_json(JSONObj* obj); + + std::string part_oid(std::int64_t part_num) const { + return fmt::format("{}.{}", oid_prefix, part_num); + } + + bool apply_update(const update& update) { + bool changed = false; + if (update.tail_part_num() && (tail_part_num != *update.tail_part_num())) { + tail_part_num = *update.tail_part_num(); + changed = true; + } + + if (update.min_push_part_num() && + (min_push_part_num != *update.min_push_part_num())) { + min_push_part_num = *update.min_push_part_num(); + changed = true; + } + + if (update.max_push_part_num() && + (max_push_part_num != *update.max_push_part_num())) { + max_push_part_num = *update.max_push_part_num(); + changed = true; + } + + for (const auto& entry : update.journal_entries_add()) { + auto [iter, inserted] = journal.insert(entry); + if (inserted) { + changed = true; + } + } + + for (const auto& entry : update.journal_entries_rm()) { + auto count = journal.erase(entry); + if (count > 0) { + changed = true; + } + } + + if (update.head_part_num() && (head_part_num != *update.head_part_num())) { + head_part_num = *update.head_part_num(); + changed = true; + } + if (changed) { + ++version.ver; + } + return changed; + } +}; +WRITE_CLASS_ENCODER(info) +inline std::ostream& operator <<(std::ostream& m, const info& i) { + return m << "id: " << i.id << ", " + << "version: " << i.version << ", " + << "oid_prefix: " << i.oid_prefix << ", " + << "params: {" << i.params << "}, " + << "tail_part_num: " << i.tail_part_num << ", " + << "head_part_num: " << i.head_part_num << ", " + << "min_push_part_num: " << i.min_push_part_num << ", " + << "max_push_part_num: " << i.max_push_part_num << ", " + << "journal: {" << i.journal; +} + +struct part_list_entry { + ceph::buffer::list data; + std::uint64_t ofs = 0; + ceph::real_time mtime; + + part_list_entry() {} + part_list_entry(ceph::buffer::list&& data, + uint64_t ofs, + ceph::real_time mtime) + : data(std::move(data)), ofs(ofs), mtime(mtime) {} + + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(1, 1, bl); + encode(data, bl); + encode(ofs, bl); + encode(mtime, bl); + ENCODE_FINISH(bl); + } + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(1, bl); + decode(data, bl); + decode(ofs, bl); + decode(mtime, bl); + DECODE_FINISH(bl); + } +}; +WRITE_CLASS_ENCODER(part_list_entry) +inline std::ostream& operator <<(std::ostream& m, + const part_list_entry& p) { + using ceph::operator <<; + return m << "data: " << p.data << ", " + << "ofs: " << p.ofs << ", " + << "mtime: " << p.mtime; +} + +struct part_header { + data_params params; + + std::uint64_t magic{0}; + + std::uint64_t min_ofs{0}; + std::uint64_t last_ofs{0}; + std::uint64_t next_ofs{0}; + std::uint64_t min_index{0}; + std::uint64_t max_index{0}; + ceph::real_time max_time; + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(1, 1, bl); + std::string tag; + encode(tag, bl); + encode(params, bl); + encode(magic, bl); + encode(min_ofs, bl); + encode(last_ofs, bl); + encode(next_ofs, bl); + encode(min_index, bl); + encode(max_index, bl); + encode(max_time, bl); + ENCODE_FINISH(bl); + } + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(1, bl); + std::string tag; + decode(tag, bl); + decode(params, bl); + decode(magic, bl); + decode(min_ofs, bl); + decode(last_ofs, bl); + decode(next_ofs, bl); + decode(min_index, bl); + decode(max_index, bl); + decode(max_time, bl); + DECODE_FINISH(bl); + } +}; +WRITE_CLASS_ENCODER(part_header) +inline std::ostream& operator <<(std::ostream& m, const part_header& p) { + using ceph::operator <<; + return m << "params: {" << p.params << "}, " + << "magic: " << p.magic << ", " + << "min_ofs: " << p.min_ofs << ", " + << "last_ofs: " << p.last_ofs << ", " + << "next_ofs: " << p.next_ofs << ", " + << "min_index: " << p.min_index << ", " + << "max_index: " << p.max_index << ", " + << "max_time: " << p.max_time; +} +} // namespace rados::cls::fifo + +#if FMT_VERSION >= 90000 +template<> +struct fmt::formatter<rados::cls::fifo::info> : fmt::ostream_formatter {}; +template<> +struct fmt::formatter<rados::cls::fifo::part_header> : fmt::ostream_formatter {}; +#endif diff --git a/src/cls/hello/cls_hello.cc b/src/cls/hello/cls_hello.cc new file mode 100644 index 000000000..d7263b431 --- /dev/null +++ b/src/cls/hello/cls_hello.cc @@ -0,0 +1,373 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab + +/* + * This is a simple example RADOS class, designed to be usable as a + * template for implementing new methods. + * + * Our goal here is to illustrate the interface between the OSD and + * the class and demonstrate what kinds of things a class can do. + * + * Note that any *real* class will probably have a much more + * sophisticated protocol dealing with the in and out data buffers. + * For an example of the model that we've settled on for handling that + * in a clean way, please refer to cls_lock or cls_version for + * relatively simple examples of how the parameter encoding can be + * encoded in a way that allows for forward and backward compatibility + * between client vs class revisions. + */ + +/* + * A quick note about bufferlists: + * + * The bufferlist class allows memory buffers to be concatenated, + * truncated, spliced, "copied," encoded/embedded, and decoded. For + * most operations no actual data is ever copied, making bufferlists + * very convenient for efficiently passing data around. + * + * bufferlist is actually a typedef of buffer::list, and is defined in + * include/buffer.h (and implemented in common/buffer.cc). + */ + +#include <algorithm> +#include <string> +#include <sstream> +#include <cerrno> + +#include "objclass/objclass.h" +#include "osd/osd_types.h" + +using std::string; +using std::ostringstream; + +using ceph::bufferlist; +using ceph::decode; +using ceph::encode; + +CLS_VER(1,0) +CLS_NAME(hello) + +/** + * say hello - a "read" method that does not depend on the object + * + * This is an example of a method that does some computation and + * returns data to the caller, without depending on the local object + * content. + */ +static int say_hello(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + // see if the input data from the client matches what this method + // expects to receive. your class can fill this buffer with what it + // wants. + if (in->length() > 100) + return -EINVAL; + + // we generate our reply + out->append("Hello, "); + if (in->length() == 0) + out->append("world"); + else + out->append(*in); + out->append("!"); + + // this return value will be returned back to the librados caller + return 0; +} + +/** + * record hello - a "write" method that creates an object + * + * This method modifies a local object (in this case, by creating it + * if it doesn't exist). We make multiple write calls (write, + * setxattr) which are accumulated and applied as an atomic + * transaction. + */ +static int record_hello(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + // we can write arbitrary stuff to the ceph-osd debug log. each log + // message is accompanied by an integer log level. smaller is + // "louder". how much of this makes it into the log is controlled + // by the debug_cls option on the ceph-osd, similar to how other log + // levels are controlled. this message, at level 20, will generally + // not be seen by anyone unless debug_cls is set at 20 or higher. + CLS_LOG(20, "in record_hello"); + + // see if the input data from the client matches what this method + // expects to receive. your class can fill this buffer with what it + // wants. + if (in->length() > 100) + return -EINVAL; + + // only say hello to non-existent objects + if (cls_cxx_stat(hctx, NULL, NULL) == 0) + return -EEXIST; + + bufferlist content; + content.append("Hello, "); + if (in->length() == 0) + content.append("world"); + else + content.append(*in); + content.append("!"); + + // create/write the object + int r = cls_cxx_write_full(hctx, &content); + if (r < 0) + return r; + + // also make note of who said it + entity_inst_t origin; + cls_get_request_origin(hctx, &origin); + ostringstream ss; + ss << origin; + bufferlist attrbl; + attrbl.append(ss.str()); + r = cls_cxx_setxattr(hctx, "said_by", &attrbl); + if (r < 0) + return r; + + // For write operations, there are two possible outcomes: + // + // * For a failure, we return a negative error code. The out + // buffer can contain any data that we want, and that data will + // be returned to the caller. No change is made to the object. + // + // * For a success, we must return 0 and *no* data in the out + // buffer. This is becaues the OSD does not log write result + // codes or output buffers and we need a replayed/resent + // operation (e.g., after a TCP disconnect) to be idempotent. + // + // If a class returns a positive value or puts data in the out + // buffer, the OSD code will ignore it and return 0 to the + // client. + return 0; +} + +static int write_return_data(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + // make some change to the object + bufferlist attrbl; + attrbl.append("bar"); + int r = cls_cxx_setxattr(hctx, "foo", &attrbl); + if (r < 0) + return r; + + if (in->length() > 0) { + // note that if we return anything < 0 (an error), this + // operation/transaction will abort, and the setattr above will + // never happen. however, we *can* return data on error. + out->append("too much input data!"); + return -EINVAL; + } + + // try to return some data. note that this will only reach the client + // if the client has set the CEPH_OSD_FLAG_RETURNVEC flag on the op. + out->append("you might see this"); + + // client will only see a >0 value with the RETURNVEC flag is set; otherwise + // they will see 0. + return 42; +} + +static int write_too_much_return_data(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + // make some change to the object + bufferlist attrbl; + attrbl.append("bar"); + int r = cls_cxx_setxattr(hctx, "foo", &attrbl); + if (r < 0) + return r; + + // try to return too much data. this should be enough to exceed + // osd_max_write_op_reply_len, which defaults to a pretty small number. + for (unsigned i=0; i < 10; ++i) { + out->append("you should not see this because it is toooooo long. "); + } + + return 42; +} + +/** + * replay - a "read" method to get a previously recorded hello + * + * This is a read method that will retrieve a previously recorded + * hello statement. + */ +static int replay(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + // read contents out of the on-disk object. our behavior can be a + // function of either the request alone, or the request and the + // on-disk state, depending on whether the RD flag is specified when + // registering the method (see the __cls__init function below). + int r = cls_cxx_read(hctx, 0, 1100, out); + if (r < 0) + return r; + + // note that our return value need not be the length of the returned + // data; it can be whatever value we want: positive, zero or + // negative (this is a read). + return 0; +} + +/** + * turn_it_to_11 - a "write" method that mutates existing object data + * + * A write method can depend on previous object content (i.e., perform + * a read/modify/write operation). This atomically transitions the + * object state from the old content to the new content. + */ +static int turn_it_to_11(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + // see if the input data from the client matches what this method + // expects to receive. your class can fill this buffer with what it + // wants. + if (in->length() != 0) + return -EINVAL; + + bufferlist previous; + int r = cls_cxx_read(hctx, 0, 1100, &previous); + if (r < 0) + return r; + + std::string str(previous.c_str(), previous.length()); + std::transform(str.begin(), str.end(), str.begin(), ::toupper); + previous.clear(); + previous.append(str); + + // replace previous byte data content (write_full == truncate(0) + write) + r = cls_cxx_write_full(hctx, &previous); + if (r < 0) + return r; + + // record who did it + entity_inst_t origin; + cls_get_request_origin(hctx, &origin); + ostringstream ss; + ss << origin; + bufferlist attrbl; + attrbl.append(ss.str()); + r = cls_cxx_setxattr(hctx, "amplified_by", &attrbl); + if (r < 0) + return r; + + // return value is 0 for success; out buffer is empty. + return 0; +} + +/** + * example method that does not behave + * + * This method is registered as WR but tries to read + */ +static int bad_reader(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + return cls_cxx_read(hctx, 0, 100, out); +} + +/** + * example method that does not behave + * + * This method is registered as RD but tries to write + */ +static int bad_writer(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + return cls_cxx_write_full(hctx, in); +} + + +class PGLSHelloFilter : public PGLSFilter { + string val; +public: + int init(bufferlist::const_iterator& params) override { + try { + decode(xattr, params); + decode(val, params); + } catch (ceph::buffer::error &e) { + return -EINVAL; + } + return 0; + } + + ~PGLSHelloFilter() override {} + bool filter(const hobject_t& obj, + const bufferlist& xattr_data) const override + { + return xattr_data.contents_equal(val.c_str(), val.size()); + } +}; + + +PGLSFilter *hello_filter() +{ + return new PGLSHelloFilter(); +} + + +/** + * initialize class + * + * We do two things here: we register the new class, and then register + * all of the class's methods. + */ +CLS_INIT(hello) +{ + // this log message, at level 0, will always appear in the ceph-osd + // log file. + CLS_LOG(0, "loading cls_hello"); + + cls_handle_t h_class; + cls_method_handle_t h_say_hello; + cls_method_handle_t h_record_hello; + cls_method_handle_t h_replay; + cls_method_handle_t h_write_return_data; + cls_method_handle_t h_writes_dont_return_data; + cls_method_handle_t h_write_too_much_return_data; + cls_method_handle_t h_turn_it_to_11; + cls_method_handle_t h_bad_reader; + cls_method_handle_t h_bad_writer; + + cls_register("hello", &h_class); + + // There are two flags we specify for methods: + // + // RD : whether this method (may) read prior object state + // WR : whether this method (may) write or update the object + // + // A method can be RD, WR, neither, or both. If a method does + // neither, the data it returns to the caller is a function of the + // request and not the object contents. + + cls_register_cxx_method(h_class, "say_hello", + CLS_METHOD_RD, + say_hello, &h_say_hello); + cls_register_cxx_method(h_class, "record_hello", + CLS_METHOD_WR | CLS_METHOD_PROMOTE, + record_hello, &h_record_hello); + cls_register_cxx_method(h_class, "write_return_data", + CLS_METHOD_WR, + write_return_data, &h_write_return_data); + // legacy alias for this method for pre-octopus clients + cls_register_cxx_method(h_class, "writes_dont_return_data", + CLS_METHOD_WR, + write_return_data, &h_writes_dont_return_data); + cls_register_cxx_method(h_class, "write_too_much_return_data", + CLS_METHOD_WR, + write_too_much_return_data, &h_write_too_much_return_data); + cls_register_cxx_method(h_class, "replay", + CLS_METHOD_RD, + replay, &h_replay); + + // RD | WR is a read-modify-write method. + cls_register_cxx_method(h_class, "turn_it_to_11", + CLS_METHOD_RD | CLS_METHOD_WR | CLS_METHOD_PROMOTE, + turn_it_to_11, &h_turn_it_to_11); + + // counter-examples + cls_register_cxx_method(h_class, "bad_reader", CLS_METHOD_WR, + bad_reader, &h_bad_reader); + cls_register_cxx_method(h_class, "bad_writer", CLS_METHOD_RD, + bad_writer, &h_bad_writer); + + // A PGLS filter + cls_register_cxx_filter(h_class, "hello", hello_filter); +} diff --git a/src/cls/journal/cls_journal.cc b/src/cls/journal/cls_journal.cc new file mode 100644 index 000000000..1479e1de6 --- /dev/null +++ b/src/cls/journal/cls_journal.cc @@ -0,0 +1,1314 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab + +#include "include/int_types.h" +#include "include/buffer.h" +#include "include/encoding.h" +#include "common/errno.h" +#include "objclass/objclass.h" +#include "cls/journal/cls_journal_types.h" +#include <errno.h> +#include <map> +#include <string> +#include <sstream> + +CLS_VER(1, 0) +CLS_NAME(journal) + +using std::string; + +using ceph::bufferlist; +using ceph::decode; +using ceph::encode; + +namespace { + +static const uint64_t MAX_KEYS_READ = 64; + +static const std::string HEADER_KEY_ORDER = "order"; +static const std::string HEADER_KEY_SPLAY_WIDTH = "splay_width"; +static const std::string HEADER_KEY_POOL_ID = "pool_id"; +static const std::string HEADER_KEY_MINIMUM_SET = "minimum_set"; +static const std::string HEADER_KEY_ACTIVE_SET = "active_set"; +static const std::string HEADER_KEY_NEXT_TAG_TID = "next_tag_tid"; +static const std::string HEADER_KEY_NEXT_TAG_CLASS = "next_tag_class"; +static const std::string HEADER_KEY_CLIENT_PREFIX = "client_"; +static const std::string HEADER_KEY_TAG_PREFIX = "tag_"; + +std::string to_hex(uint64_t value) { + std::ostringstream oss; + oss << std::setw(16) << std::setfill('0') << std::hex << value; + return oss.str(); +} + +std::string key_from_client_id(const std::string &client_id) { + return HEADER_KEY_CLIENT_PREFIX + client_id; +} + +std::string key_from_tag_tid(uint64_t tag_tid) { + return HEADER_KEY_TAG_PREFIX + to_hex(tag_tid); +} + +uint64_t tag_tid_from_key(const std::string &key) { + std::istringstream iss(key); + uint64_t id; + iss.ignore(HEADER_KEY_TAG_PREFIX.size()) >> std::hex >> id; + return id; +} + +template <typename T> +int read_key(cls_method_context_t hctx, const string &key, T *t, + bool ignore_enoent = false) { + bufferlist bl; + int r = cls_cxx_map_get_val(hctx, key, &bl); + if (r == -ENOENT) { + if (ignore_enoent) { + r = 0; + } + return r; + } else if (r < 0) { + CLS_ERR("failed to get omap key: %s", key.c_str()); + return r; + } + + try { + auto iter = bl.cbegin(); + decode(*t, iter); + } catch (const ceph::buffer::error &err) { + CLS_ERR("failed to decode input parameters: %s", err.what()); + return -EINVAL; + } + return 0; +} + +template <typename T> +int write_key(cls_method_context_t hctx, const string &key, const T &t) { + bufferlist bl; + encode(t, bl); + + int r = cls_cxx_map_set_val(hctx, key, &bl); + if (r < 0) { + CLS_ERR("failed to set omap key: %s", key.c_str()); + return r; + } + return 0; +} + +int remove_key(cls_method_context_t hctx, const string &key) { + int r = cls_cxx_map_remove_key(hctx, key); + if (r < 0 && r != -ENOENT) { + CLS_ERR("failed to remove key: %s", key.c_str()); + return r; + } + return 0; +} + +int expire_tags(cls_method_context_t hctx, const std::string *skip_client_id) { + + std::string skip_client_key; + if (skip_client_id != nullptr) { + skip_client_key = key_from_client_id(*skip_client_id); + } + + uint64_t minimum_tag_tid = std::numeric_limits<uint64_t>::max(); + std::string last_read = ""; + bool more; + do { + std::map<std::string, bufferlist> vals; + int r = cls_cxx_map_get_vals(hctx, last_read, HEADER_KEY_CLIENT_PREFIX, + MAX_KEYS_READ, &vals, &more); + if (r < 0 && r != -ENOENT) { + CLS_ERR("failed to retrieve registered clients: %s", + cpp_strerror(r).c_str()); + return r; + } + + for (auto &val : vals) { + // if we are removing a client, skip its commit positions + if (val.first == skip_client_key) { + continue; + } + + cls::journal::Client client; + auto iter = val.second.cbegin(); + try { + decode(client, iter); + } catch (const ceph::buffer::error &err) { + CLS_ERR("error decoding registered client: %s", + val.first.c_str()); + return -EIO; + } + + if (client.state == cls::journal::CLIENT_STATE_DISCONNECTED) { + // don't allow a disconnected client to prevent pruning + continue; + } else if (client.commit_position.object_positions.empty()) { + // cannot prune if one or more clients has an empty commit history + return 0; + } + + for (auto object_position : client.commit_position.object_positions) { + minimum_tag_tid = std::min(minimum_tag_tid, object_position.tag_tid); + } + } + if (!vals.empty()) { + last_read = vals.rbegin()->first; + } + } while (more); + + // cannot expire tags if a client hasn't committed yet + if (minimum_tag_tid == std::numeric_limits<uint64_t>::max()) { + return 0; + } + + // compute the minimum in-use tag for each class + std::map<uint64_t, uint64_t> minimum_tag_class_to_tids; + typedef enum { TAG_PASS_CALCULATE_MINIMUMS, + TAG_PASS_SCRUB, + TAG_PASS_DONE } TagPass; + int tag_pass = TAG_PASS_CALCULATE_MINIMUMS; + last_read = HEADER_KEY_TAG_PREFIX; + do { + std::map<std::string, bufferlist> vals; + int r = cls_cxx_map_get_vals(hctx, last_read, HEADER_KEY_TAG_PREFIX, + MAX_KEYS_READ, &vals, &more); + if (r < 0 && r != -ENOENT) { + CLS_ERR("failed to retrieve tags: %s", cpp_strerror(r).c_str()); + return r; + } + + for (auto &val : vals) { + cls::journal::Tag tag; + auto iter = val.second.cbegin(); + try { + decode(tag, iter); + } catch (const ceph::buffer::error &err) { + CLS_ERR("error decoding tag: %s", val.first.c_str()); + return -EIO; + } + + if (tag.tid != tag_tid_from_key(val.first)) { + CLS_ERR("tag tid mismatched: %s", val.first.c_str()); + return -EINVAL; + } + + if (tag_pass == TAG_PASS_CALCULATE_MINIMUMS) { + minimum_tag_class_to_tids[tag.tag_class] = tag.tid; + } else if (tag_pass == TAG_PASS_SCRUB && + tag.tid < minimum_tag_class_to_tids[tag.tag_class]) { + r = remove_key(hctx, val.first); + if (r < 0) { + return r; + } + } + + if (tag.tid >= minimum_tag_tid) { + // no need to check for tag classes beyond this point + vals.clear(); + more = false; + break; + } + } + + if (tag_pass != TAG_PASS_DONE && !more) { + last_read = HEADER_KEY_TAG_PREFIX; + ++tag_pass; + } else if (!vals.empty()) { + last_read = vals.rbegin()->first; + } + } while (tag_pass != TAG_PASS_DONE); + return 0; +} + +int get_client_list_range(cls_method_context_t hctx, + std::set<cls::journal::Client> *clients, + std::string start_after, uint64_t max_return) { + std::string last_read; + if (!start_after.empty()) { + last_read = key_from_client_id(start_after); + } + + std::map<std::string, bufferlist> vals; + bool more; + int r = cls_cxx_map_get_vals(hctx, last_read, HEADER_KEY_CLIENT_PREFIX, + max_return, &vals, &more); + if (r < 0) { + CLS_ERR("failed to retrieve omap values: %s", cpp_strerror(r).c_str()); + return r; + } + + for (std::map<std::string, bufferlist>::iterator it = vals.begin(); + it != vals.end(); ++it) { + try { + auto iter = it->second.cbegin(); + + cls::journal::Client client; + decode(client, iter); + clients->insert(client); + } catch (const ceph::buffer::error &err) { + CLS_ERR("could not decode client '%s': %s", it->first.c_str(), + err.what()); + return -EIO; + } + } + + return 0; +} + +int find_min_commit_position(cls_method_context_t hctx, + cls::journal::ObjectSetPosition *minset) { + int r; + bool valid = false; + std::string start_after = ""; + uint64_t tag_tid = 0, entry_tid = 0; + + while (true) { + std::set<cls::journal::Client> batch; + + r = get_client_list_range(hctx, &batch, start_after, cls::journal::JOURNAL_MAX_RETURN); + if ((r < 0) || batch.empty()) { + break; + } + + start_after = batch.rbegin()->id; + // update the (minimum) commit position from this batch of clients + for (const auto &client : batch) { + if (client.state == cls::journal::CLIENT_STATE_DISCONNECTED) { + continue; + } + const auto &object_set_position = client.commit_position; + if (object_set_position.object_positions.empty()) { + *minset = cls::journal::ObjectSetPosition(); + break; + } + cls::journal::ObjectPosition first = object_set_position.object_positions.front(); + + // least tag_tid (or least entry_tid for matching tag_tid) + if (!valid || (tag_tid > first.tag_tid) || ((tag_tid == first.tag_tid) && (entry_tid > first.entry_tid))) { + tag_tid = first.tag_tid; + entry_tid = first.entry_tid; + *minset = cls::journal::ObjectSetPosition(object_set_position); + valid = true; + } + } + + // got the last batch, we're done + if (batch.size() < cls::journal::JOURNAL_MAX_RETURN) { + break; + } + } + + return r; +} + +} // anonymous namespace + +/** + * Input: + * @param order (uint8_t) - bits to shift to compute the object max size + * @param splay width (uint8_t) - number of active journal objects + * + * Output: + * @returns 0 on success, negative error code on failure + */ +int journal_create(cls_method_context_t hctx, bufferlist *in, bufferlist *out) { + uint8_t order; + uint8_t splay_width; + int64_t pool_id; + try { + auto iter = in->cbegin(); + decode(order, iter); + decode(splay_width, iter); + decode(pool_id, iter); + } catch (const ceph::buffer::error &err) { + CLS_ERR("failed to decode input parameters: %s", err.what()); + return -EINVAL; + } + + bufferlist stored_orderbl; + int r = cls_cxx_map_get_val(hctx, HEADER_KEY_ORDER, &stored_orderbl); + if (r >= 0) { + CLS_ERR("journal already exists"); + return -EEXIST; + } else if (r != -ENOENT) { + return r; + } + + r = write_key(hctx, HEADER_KEY_ORDER, order); + if (r < 0) { + return r; + } + + r = write_key(hctx, HEADER_KEY_SPLAY_WIDTH, splay_width); + if (r < 0) { + return r; + } + + r = write_key(hctx, HEADER_KEY_POOL_ID, pool_id); + if (r < 0) { + return r; + } + + uint64_t object_set = 0; + r = write_key(hctx, HEADER_KEY_ACTIVE_SET, object_set); + if (r < 0) { + return r; + } + + r = write_key(hctx, HEADER_KEY_MINIMUM_SET, object_set); + if (r < 0) { + return r; + } + + uint64_t tag_id = 0; + r = write_key(hctx, HEADER_KEY_NEXT_TAG_TID, tag_id); + if (r < 0) { + return r; + } + + r = write_key(hctx, HEADER_KEY_NEXT_TAG_CLASS, tag_id); + if (r < 0) { + return r; + } + return 0; +} + +/** + * Input: + * none + * + * Output: + * order (uint8_t) + * @returns 0 on success, negative error code on failure + */ +int journal_get_order(cls_method_context_t hctx, bufferlist *in, + bufferlist *out) { + uint8_t order; + int r = read_key(hctx, HEADER_KEY_ORDER, &order); + if (r < 0) { + return r; + } + + encode(order, *out); + return 0; +} + +/** + * Input: + * none + * + * Output: + * splay_width (uint8_t) + * @returns 0 on success, negative error code on failure + */ +int journal_get_splay_width(cls_method_context_t hctx, bufferlist *in, + bufferlist *out) { + uint8_t splay_width; + int r = read_key(hctx, HEADER_KEY_SPLAY_WIDTH, &splay_width); + if (r < 0) { + return r; + } + + encode(splay_width, *out); + return 0; +} + +/** + * Input: + * none + * + * Output: + * pool_id (int64_t) + * @returns 0 on success, negative error code on failure + */ +int journal_get_pool_id(cls_method_context_t hctx, bufferlist *in, + bufferlist *out) { + int64_t pool_id = 0; + int r = read_key(hctx, HEADER_KEY_POOL_ID, &pool_id); + if (r < 0) { + return r; + } + + encode(pool_id, *out); + return 0; +} + +/** + * Input: + * none + * + * Output: + * object set (uint64_t) + * @returns 0 on success, negative error code on failure + */ +int journal_get_minimum_set(cls_method_context_t hctx, bufferlist *in, + bufferlist *out) { + uint64_t minimum_set; + int r = read_key(hctx, HEADER_KEY_MINIMUM_SET, &minimum_set); + if (r < 0) { + return r; + } + + encode(minimum_set, *out); + return 0; +} + +/** + * Input: + * @param object set (uint64_t) + * + * Output: + * @returns 0 on success, negative error code on failure + */ +int journal_set_minimum_set(cls_method_context_t hctx, bufferlist *in, + bufferlist *out) { + uint64_t object_set; + try { + auto iter = in->cbegin(); + decode(object_set, iter); + } catch (const ceph::buffer::error &err) { + CLS_ERR("failed to decode input parameters: %s", err.what()); + return -EINVAL; + } + + uint64_t current_active_set; + int r = read_key(hctx, HEADER_KEY_ACTIVE_SET, ¤t_active_set); + if (r < 0) { + return r; + } + + if (current_active_set < object_set) { + CLS_LOG(10, "active object set earlier than minimum: %" PRIu64 + " < %" PRIu64, current_active_set, object_set); + return -EINVAL; + } + + uint64_t current_minimum_set; + r = read_key(hctx, HEADER_KEY_MINIMUM_SET, ¤t_minimum_set); + if (r < 0) { + return r; + } + + if (object_set == current_minimum_set) { + return 0; + } else if (object_set < current_minimum_set) { + CLS_ERR("object number earlier than current object: %" PRIu64 " < %" PRIu64, + object_set, current_minimum_set); + return -ESTALE; + } + + r = write_key(hctx, HEADER_KEY_MINIMUM_SET, object_set); + if (r < 0) { + return r; + } + return 0; +} + +/** + * Input: + * none + * + * Output: + * object set (uint64_t) + * @returns 0 on success, negative error code on failure + */ +int journal_get_active_set(cls_method_context_t hctx, bufferlist *in, + bufferlist *out) { + uint64_t active_set; + int r = read_key(hctx, HEADER_KEY_ACTIVE_SET, &active_set); + if (r < 0) { + return r; + } + + encode(active_set, *out); + return 0; +} + +/** + * Input: + * @param object set (uint64_t) + * + * Output: + * @returns 0 on success, negative error code on failure + */ +int journal_set_active_set(cls_method_context_t hctx, bufferlist *in, + bufferlist *out) { + uint64_t object_set; + try { + auto iter = in->cbegin(); + decode(object_set, iter); + } catch (const ceph::buffer::error &err) { + CLS_ERR("failed to decode input parameters: %s", err.what()); + return -EINVAL; + } + + uint64_t current_minimum_set; + int r = read_key(hctx, HEADER_KEY_MINIMUM_SET, ¤t_minimum_set); + if (r < 0) { + return r; + } + + if (current_minimum_set > object_set) { + CLS_ERR("minimum object set later than active: %" PRIu64 + " > %" PRIu64, current_minimum_set, object_set); + return -EINVAL; + } + + uint64_t current_active_set; + r = read_key(hctx, HEADER_KEY_ACTIVE_SET, ¤t_active_set); + if (r < 0) { + return r; + } + + if (object_set == current_active_set) { + return 0; + } else if (object_set < current_active_set) { + CLS_ERR("object number earlier than current object: %" PRIu64 " < %" PRIu64, + object_set, current_active_set); + return -ESTALE; + } + + r = write_key(hctx, HEADER_KEY_ACTIVE_SET, object_set); + if (r < 0) { + return r; + } + return 0; +} + +/** + * Input: + * @param id (string) - unique client id + * + * Output: + * cls::journal::Client + * @returns 0 on success, negative error code on failure + */ +int journal_get_client(cls_method_context_t hctx, bufferlist *in, + bufferlist *out) { + std::string id; + try { + auto iter = in->cbegin(); + decode(id, iter); + } catch (const ceph::buffer::error &err) { + CLS_ERR("failed to decode input parameters: %s", err.what()); + return -EINVAL; + } + + std::string key(key_from_client_id(id)); + cls::journal::Client client; + int r = read_key(hctx, key, &client); + if (r < 0) { + return r; + } + + encode(client, *out); + return 0; +} + +/** + * Input: + * @param id (string) - unique client id + * @param data (bufferlist) - opaque data associated to client + * + * Output: + * @returns 0 on success, negative error code on failure + */ +int journal_client_register(cls_method_context_t hctx, bufferlist *in, + bufferlist *out) { + std::string id; + bufferlist data; + try { + auto iter = in->cbegin(); + decode(id, iter); + decode(data, iter); + } catch (const ceph::buffer::error &err) { + CLS_ERR("failed to decode input parameters: %s", err.what()); + return -EINVAL; + } + + uint8_t order; + int r = read_key(hctx, HEADER_KEY_ORDER, &order); + if (r < 0) { + return r; + } + + std::string key(key_from_client_id(id)); + bufferlist stored_clientbl; + r = cls_cxx_map_get_val(hctx, key, &stored_clientbl); + if (r >= 0) { + CLS_ERR("duplicate client id: %s", id.c_str()); + return -EEXIST; + } else if (r != -ENOENT) { + return r; + } + + cls::journal::ObjectSetPosition minset; + r = find_min_commit_position(hctx, &minset); + if (r < 0) + return r; + + cls::journal::Client client(id, data, minset); + r = write_key(hctx, key, client); + if (r < 0) { + return r; + } + return 0; +} + +/** + * Input: + * @param id (string) - unique client id + * @param data (bufferlist) - opaque data associated to client + * + * Output: + * @returns 0 on success, negative error code on failure + */ +int journal_client_update_data(cls_method_context_t hctx, bufferlist *in, + bufferlist *out) { + std::string id; + bufferlist data; + try { + auto iter = in->cbegin(); + decode(id, iter); + decode(data, iter); + } catch (const ceph::buffer::error &err) { + CLS_ERR("failed to decode input parameters: %s", err.what()); + return -EINVAL; + } + + std::string key(key_from_client_id(id)); + cls::journal::Client client; + int r = read_key(hctx, key, &client); + if (r < 0) { + return r; + } + + client.data = data; + r = write_key(hctx, key, client); + if (r < 0) { + return r; + } + return 0; +} + +/** + * Input: + * @param id (string) - unique client id + * @param state (uint8_t) - client state + * + * Output: + * @returns 0 on success, negative error code on failure + */ +int journal_client_update_state(cls_method_context_t hctx, bufferlist *in, + bufferlist *out) { + std::string id; + cls::journal::ClientState state; + bufferlist data; + try { + auto iter = in->cbegin(); + decode(id, iter); + uint8_t state_raw; + decode(state_raw, iter); + state = static_cast<cls::journal::ClientState>(state_raw); + } catch (const ceph::buffer::error &err) { + CLS_ERR("failed to decode input parameters: %s", err.what()); + return -EINVAL; + } + + std::string key(key_from_client_id(id)); + cls::journal::Client client; + int r = read_key(hctx, key, &client); + if (r < 0) { + return r; + } + + client.state = state; + r = write_key(hctx, key, client); + if (r < 0) { + return r; + } + return 0; +} + +/** + * Input: + * @param id (string) - unique client id + * + * Output: + * @returns 0 on success, negative error code on failure + */ +int journal_client_unregister(cls_method_context_t hctx, bufferlist *in, + bufferlist *out) { + std::string id; + try { + auto iter = in->cbegin(); + decode(id, iter); + } catch (const ceph::buffer::error &err) { + CLS_ERR("failed to decode input parameters: %s", err.what()); + return -EINVAL; + } + + std::string key(key_from_client_id(id)); + bufferlist bl; + int r = cls_cxx_map_get_val(hctx, key, &bl); + if (r < 0) { + CLS_ERR("client is not registered: %s", id.c_str()); + return r; + } + + r = cls_cxx_map_remove_key(hctx, key); + if (r < 0) { + CLS_ERR("failed to remove omap key: %s", key.c_str()); + return r; + } + + // prune expired tags + r = expire_tags(hctx, &id); + if (r < 0) { + return r; + } + return 0; +} + +/** + * Input: + * @param client_id (uint64_t) - unique client id + * @param commit_position (ObjectSetPosition) + * + * Output: + * @returns 0 on success, negative error code on failure + */ +int journal_client_commit(cls_method_context_t hctx, bufferlist *in, + bufferlist *out) { + std::string id; + cls::journal::ObjectSetPosition commit_position; + try { + auto iter = in->cbegin(); + decode(id, iter); + decode(commit_position, iter); + } catch (const ceph::buffer::error &err) { + CLS_ERR("failed to decode input parameters: %s", err.what()); + return -EINVAL; + } + + uint8_t splay_width; + int r = read_key(hctx, HEADER_KEY_SPLAY_WIDTH, &splay_width); + if (r < 0) { + return r; + } + if (commit_position.object_positions.size() > splay_width) { + CLS_ERR("too many object positions"); + return -EINVAL; + } + + std::string key(key_from_client_id(id)); + cls::journal::Client client; + r = read_key(hctx, key, &client); + if (r < 0) { + return r; + } + + if (client.commit_position == commit_position) { + return 0; + } + + client.commit_position = commit_position; + r = write_key(hctx, key, client); + if (r < 0) { + return r; + } + return 0; +} + +/** + * Input: + * @param start_after (string) + * @param max_return (uint64_t) + * + * Output: + * clients (set<cls::journal::Client>) - collection of registered clients + * @returns 0 on success, negative error code on failure + */ +int journal_client_list(cls_method_context_t hctx, bufferlist *in, + bufferlist *out) { + std::string start_after; + uint64_t max_return; + try { + auto iter = in->cbegin(); + decode(start_after, iter); + decode(max_return, iter); + } catch (const ceph::buffer::error &err) { + CLS_ERR("failed to decode input parameters: %s", err.what()); + return -EINVAL; + } + + std::set<cls::journal::Client> clients; + int r = get_client_list_range(hctx, &clients, start_after, max_return); + if (r < 0) + return r; + + encode(clients, *out); + return 0; +} + +/** + * Input: + * none + * + * Output: + * @returns 0 on success, negative error code on failure + */ +int journal_get_next_tag_tid(cls_method_context_t hctx, bufferlist *in, + bufferlist *out) { + uint64_t tag_tid; + int r = read_key(hctx, HEADER_KEY_NEXT_TAG_TID, &tag_tid); + if (r < 0) { + return r; + } + + encode(tag_tid, *out); + return 0; +} + +/** + * Input: + * @param tag_tid (uint64_t) + * + * Output: + * cls::journal::Tag + * @returns 0 on success, negative error code on failure + */ +int journal_get_tag(cls_method_context_t hctx, bufferlist *in, + bufferlist *out) { + uint64_t tag_tid; + try { + auto iter = in->cbegin(); + decode(tag_tid, iter); + } catch (const ceph::buffer::error &err) { + CLS_ERR("failed to decode input parameters: %s", err.what()); + return -EINVAL; + } + + std::string key(key_from_tag_tid(tag_tid)); + cls::journal::Tag tag; + int r = read_key(hctx, key, &tag); + if (r < 0) { + return r; + } + + encode(tag, *out); + return 0; +} + +/** + * Input: + * @param tag_tid (uint64_t) + * @param tag_class (uint64_t) + * @param data (bufferlist) + * + * Output: + * @returns 0 on success, negative error code on failure + */ +int journal_tag_create(cls_method_context_t hctx, bufferlist *in, + bufferlist *out) { + uint64_t tag_tid; + uint64_t tag_class; + bufferlist data; + try { + auto iter = in->cbegin(); + decode(tag_tid, iter); + decode(tag_class, iter); + decode(data, iter); + } catch (const ceph::buffer::error &err) { + CLS_ERR("failed to decode input parameters: %s", err.what()); + return -EINVAL; + } + + std::string key(key_from_tag_tid(tag_tid)); + bufferlist stored_tag_bl; + int r = cls_cxx_map_get_val(hctx, key, &stored_tag_bl); + if (r >= 0) { + CLS_ERR("duplicate tag id: %" PRIu64, tag_tid); + return -EEXIST; + } else if (r != -ENOENT) { + return r; + } + + // verify tag tid ordering + uint64_t next_tag_tid; + r = read_key(hctx, HEADER_KEY_NEXT_TAG_TID, &next_tag_tid); + if (r < 0) { + return r; + } + if (tag_tid != next_tag_tid) { + CLS_LOG(5, "out-of-order tag sequence: %" PRIu64, tag_tid); + return -ESTALE; + } + + uint64_t next_tag_class; + r = read_key(hctx, HEADER_KEY_NEXT_TAG_CLASS, &next_tag_class); + if (r < 0) { + return r; + } + + if (tag_class == cls::journal::Tag::TAG_CLASS_NEW) { + // allocate a new tag class + tag_class = next_tag_class; + r = write_key(hctx, HEADER_KEY_NEXT_TAG_CLASS, tag_class + 1); + if (r < 0) { + return r; + } + } else { + // verify tag class range + if (tag_class >= next_tag_class) { + CLS_ERR("out-of-sequence tag class: %" PRIu64, tag_class); + return -EINVAL; + } + } + + // prune expired tags + r = expire_tags(hctx, nullptr); + if (r < 0) { + return r; + } + + // update tag tid sequence + r = write_key(hctx, HEADER_KEY_NEXT_TAG_TID, tag_tid + 1); + if (r < 0) { + return r; + } + + // write tag structure + cls::journal::Tag tag(tag_tid, tag_class, data); + key = key_from_tag_tid(tag_tid); + r = write_key(hctx, key, tag); + if (r < 0) { + return r; + } + return 0; +} + +/** + * Input: + * @param start_after_tag_tid (uint64_t) - first tag tid + * @param max_return (uint64_t) - max tags to return + * @param client_id (std::string) - client id filter + * @param tag_class (boost::optional<uint64_t> - optional tag class filter + * + * Output: + * std::set<cls::journal::Tag> - collection of tags + * @returns 0 on success, negative error code on failure + */ +int journal_tag_list(cls_method_context_t hctx, bufferlist *in, + bufferlist *out) { + uint64_t start_after_tag_tid; + uint64_t max_return; + std::string client_id; + boost::optional<uint64_t> tag_class(0); + + // handle compiler false positive about use-before-init + tag_class = boost::none; + try { + auto iter = in->cbegin(); + decode(start_after_tag_tid, iter); + decode(max_return, iter); + decode(client_id, iter); + decode(tag_class, iter); + } catch (const ceph::buffer::error &err) { + CLS_ERR("failed to decode input parameters: %s", err.what()); + return -EINVAL; + } + + // calculate the minimum tag within client's commit position + uint64_t minimum_tag_tid = std::numeric_limits<uint64_t>::max(); + cls::journal::Client client; + int r = read_key(hctx, key_from_client_id(client_id), &client); + if (r < 0) { + return r; + } + + for (auto object_position : client.commit_position.object_positions) { + minimum_tag_tid = std::min(minimum_tag_tid, object_position.tag_tid); + } + + // compute minimum tags in use per-class + std::set<cls::journal::Tag> tags; + std::map<uint64_t, uint64_t> minimum_tag_class_to_tids; + typedef enum { TAG_PASS_CALCULATE_MINIMUMS, + TAG_PASS_LIST, + TAG_PASS_DONE } TagPass; + int tag_pass = (minimum_tag_tid == std::numeric_limits<uint64_t>::max() ? + TAG_PASS_LIST : TAG_PASS_CALCULATE_MINIMUMS); + std::string last_read = HEADER_KEY_TAG_PREFIX; + do { + std::map<std::string, bufferlist> vals; + bool more; + r = cls_cxx_map_get_vals(hctx, last_read, HEADER_KEY_TAG_PREFIX, + MAX_KEYS_READ, &vals, &more); + if (r < 0 && r != -ENOENT) { + CLS_ERR("failed to retrieve tags: %s", cpp_strerror(r).c_str()); + return r; + } + + for (auto &val : vals) { + cls::journal::Tag tag; + auto iter = val.second.cbegin(); + try { + decode(tag, iter); + } catch (const ceph::buffer::error &err) { + CLS_ERR("error decoding tag: %s", val.first.c_str()); + return -EIO; + } + + if (tag_pass == TAG_PASS_CALCULATE_MINIMUMS) { + minimum_tag_class_to_tids[tag.tag_class] = tag.tid; + + // completed calculation of tag class minimums + if (tag.tid >= minimum_tag_tid) { + vals.clear(); + more = false; + break; + } + } else if (tag_pass == TAG_PASS_LIST) { + if (start_after_tag_tid != 0 && tag.tid <= start_after_tag_tid) { + continue; + } + + if (tag.tid >= minimum_tag_class_to_tids[tag.tag_class] && + (!tag_class || *tag_class == tag.tag_class)) { + tags.insert(tag); + } + if (tags.size() >= max_return) { + tag_pass = TAG_PASS_DONE; + } + } + } + + if (tag_pass != TAG_PASS_DONE && !more) { + last_read = HEADER_KEY_TAG_PREFIX; + ++tag_pass; + } else if (!vals.empty()) { + last_read = vals.rbegin()->first; + } + } while (tag_pass != TAG_PASS_DONE); + + encode(tags, *out); + return 0; +} + +/** + * Input: + * @param soft_max_size (uint64_t) + * + * Output: + * @returns 0 if object size less than max, negative error code otherwise + */ +int journal_object_guard_append(cls_method_context_t hctx, bufferlist *in, + bufferlist *out) { + uint64_t soft_max_size; + try { + auto iter = in->cbegin(); + decode(soft_max_size, iter); + } catch (const ceph::buffer::error &err) { + CLS_ERR("failed to decode input parameters: %s", err.what()); + return -EINVAL; + } + + uint64_t size; + time_t mtime; + int r = cls_cxx_stat(hctx, &size, &mtime); + if (r == -ENOENT) { + return 0; + } else if (r < 0) { + CLS_ERR("failed to stat object: %s", cpp_strerror(r).c_str()); + return r; + } + + if (size >= soft_max_size) { + CLS_LOG(5, "journal object full: %" PRIu64 " >= %" PRIu64, + size, soft_max_size); + return -EOVERFLOW; + } + return 0; +} + +/** + * Input: + * @param soft_max_size (uint64_t) + * @param data (bufferlist) data to append + * + * Output: + * @returns 0 on success, negative error code on failure + * @returns -EOVERFLOW if object size is equal or more than soft_max_size + */ +int journal_object_append(cls_method_context_t hctx, bufferlist *in, + bufferlist *out) { + uint64_t soft_max_size; + bufferlist data; + try { + auto iter = in->cbegin(); + decode(soft_max_size, iter); + decode(data, iter); + } catch (const ceph::buffer::error &err) { + CLS_ERR("failed to decode input parameters: %s", err.what()); + return -EINVAL; + } + + uint64_t size = 0; + int r = cls_cxx_stat(hctx, &size, nullptr); + if (r < 0 && r != -ENOENT) { + CLS_ERR("append: failed to stat object: %s", cpp_strerror(r).c_str()); + return r; + } + + if (size >= soft_max_size) { + CLS_LOG(5, "journal object full: %" PRIu64 " >= %" PRIu64, + size, soft_max_size); + return -EOVERFLOW; + } + + auto offset = size; + r = cls_cxx_write2(hctx, offset, data.length(), &data, + CEPH_OSD_OP_FLAG_FADVISE_DONTNEED); + if (r < 0) { + CLS_ERR("append: error when writing: %s", cpp_strerror(r).c_str()); + return r; + } + + if (cls_get_min_compatible_client(hctx) < ceph_release_t::octopus) { + return 0; + } + + auto min_alloc_size = cls_get_osd_min_alloc_size(hctx); + if (min_alloc_size == 0) { + min_alloc_size = 8; + } + + auto stripe_width = cls_get_pool_stripe_width(hctx); + if (stripe_width > 0) { + min_alloc_size = round_up_to(min_alloc_size, stripe_width); + } + + CLS_LOG(20, "pad to %" PRIu64, min_alloc_size); + + auto end = offset + data.length(); + auto new_end = round_up_to(end, min_alloc_size); + if (new_end == end) { + return 0; + } + + r = cls_cxx_truncate(hctx, new_end); + if (r < 0) { + CLS_ERR("append: error when truncating: %s", cpp_strerror(r).c_str()); + return r; + } + + return 0; +} + +CLS_INIT(journal) +{ + CLS_LOG(20, "Loaded journal class!"); + + cls_handle_t h_class; + cls_method_handle_t h_journal_create; + cls_method_handle_t h_journal_get_order; + cls_method_handle_t h_journal_get_splay_width; + cls_method_handle_t h_journal_get_pool_id; + cls_method_handle_t h_journal_get_minimum_set; + cls_method_handle_t h_journal_set_minimum_set; + cls_method_handle_t h_journal_get_active_set; + cls_method_handle_t h_journal_set_active_set; + cls_method_handle_t h_journal_get_client; + cls_method_handle_t h_journal_client_register; + cls_method_handle_t h_journal_client_update_data; + cls_method_handle_t h_journal_client_update_state; + cls_method_handle_t h_journal_client_unregister; + cls_method_handle_t h_journal_client_commit; + cls_method_handle_t h_journal_client_list; + cls_method_handle_t h_journal_get_next_tag_tid; + cls_method_handle_t h_journal_get_tag; + cls_method_handle_t h_journal_tag_create; + cls_method_handle_t h_journal_tag_list; + cls_method_handle_t h_journal_object_guard_append; + cls_method_handle_t h_journal_object_append; + + cls_register("journal", &h_class); + + /// methods for journal.$journal_id objects + cls_register_cxx_method(h_class, "create", + CLS_METHOD_RD | CLS_METHOD_WR, + journal_create, &h_journal_create); + cls_register_cxx_method(h_class, "get_order", + CLS_METHOD_RD, + journal_get_order, &h_journal_get_order); + cls_register_cxx_method(h_class, "get_splay_width", + CLS_METHOD_RD, + journal_get_splay_width, &h_journal_get_splay_width); + cls_register_cxx_method(h_class, "get_pool_id", + CLS_METHOD_RD, + journal_get_pool_id, &h_journal_get_pool_id); + cls_register_cxx_method(h_class, "get_minimum_set", + CLS_METHOD_RD, + journal_get_minimum_set, + &h_journal_get_minimum_set); + cls_register_cxx_method(h_class, "set_minimum_set", + CLS_METHOD_RD | CLS_METHOD_WR, + journal_set_minimum_set, + &h_journal_set_minimum_set); + cls_register_cxx_method(h_class, "get_active_set", + CLS_METHOD_RD, + journal_get_active_set, + &h_journal_get_active_set); + cls_register_cxx_method(h_class, "set_active_set", + CLS_METHOD_RD | CLS_METHOD_WR, + journal_set_active_set, + &h_journal_set_active_set); + + cls_register_cxx_method(h_class, "get_client", + CLS_METHOD_RD, + journal_get_client, &h_journal_get_client); + cls_register_cxx_method(h_class, "client_register", + CLS_METHOD_RD | CLS_METHOD_WR, + journal_client_register, &h_journal_client_register); + cls_register_cxx_method(h_class, "client_update_data", + CLS_METHOD_RD | CLS_METHOD_WR, + journal_client_update_data, + &h_journal_client_update_data); + cls_register_cxx_method(h_class, "client_update_state", + CLS_METHOD_RD | CLS_METHOD_WR, + journal_client_update_state, + &h_journal_client_update_state); + cls_register_cxx_method(h_class, "client_unregister", + CLS_METHOD_RD | CLS_METHOD_WR, + journal_client_unregister, + &h_journal_client_unregister); + cls_register_cxx_method(h_class, "client_commit", + CLS_METHOD_RD | CLS_METHOD_WR, + journal_client_commit, &h_journal_client_commit); + cls_register_cxx_method(h_class, "client_list", + CLS_METHOD_RD, + journal_client_list, &h_journal_client_list); + + cls_register_cxx_method(h_class, "get_next_tag_tid", + CLS_METHOD_RD, + journal_get_next_tag_tid, + &h_journal_get_next_tag_tid); + cls_register_cxx_method(h_class, "get_tag", + CLS_METHOD_RD, + journal_get_tag, &h_journal_get_tag); + cls_register_cxx_method(h_class, "tag_create", + CLS_METHOD_RD | CLS_METHOD_WR, + journal_tag_create, &h_journal_tag_create); + cls_register_cxx_method(h_class, "tag_list", + CLS_METHOD_RD, + journal_tag_list, &h_journal_tag_list); + + /// methods for journal_data.$journal_id.$object_id objects + cls_register_cxx_method(h_class, "guard_append", + CLS_METHOD_RD | CLS_METHOD_WR, + journal_object_guard_append, + &h_journal_object_guard_append); + cls_register_cxx_method(h_class, "append", CLS_METHOD_RD | CLS_METHOD_WR, + journal_object_append, &h_journal_object_append); +} diff --git a/src/cls/journal/cls_journal_client.cc b/src/cls/journal/cls_journal_client.cc new file mode 100644 index 000000000..88f7ddb1f --- /dev/null +++ b/src/cls/journal/cls_journal_client.cc @@ -0,0 +1,507 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab + +#include "cls/journal/cls_journal_client.h" +#include "include/rados/librados.hpp" +#include "include/buffer.h" +#include "include/Context.h" +#include "common/Cond.h" +#include <errno.h> + +namespace cls { +namespace journal { +namespace client { +using ceph::encode; +using ceph::decode; + +namespace { + +struct C_AioExec : public Context { + librados::IoCtx &ioctx; + std::string oid; + + C_AioExec(librados::IoCtx &_ioctx, const std::string &_oid) + : ioctx(_ioctx), oid(_oid) { + } + + static void rados_callback(rados_completion_t c, void *arg) { + Context *ctx = reinterpret_cast<Context *>(arg); + ctx->complete(rados_aio_get_return_value(c)); + } +}; + +struct C_ClientList : public C_AioExec { + std::set<cls::journal::Client> *clients; + Context *on_finish; + bufferlist outbl; + + C_ClientList(librados::IoCtx &_ioctx, const std::string &_oid, + std::set<cls::journal::Client> *_clients, + Context *_on_finish) + : C_AioExec(_ioctx, _oid), clients(_clients), on_finish(_on_finish) {} + + void send(const std::string &start_after) { + bufferlist inbl; + encode(start_after, inbl); + encode(JOURNAL_MAX_RETURN, inbl); + + librados::ObjectReadOperation op; + op.exec("journal", "client_list", inbl); + + outbl.clear(); + librados::AioCompletion *rados_completion = + librados::Rados::aio_create_completion(this, rados_callback); + int r = ioctx.aio_operate(oid, rados_completion, &op, &outbl); + ceph_assert(r == 0); + rados_completion->release(); + } + + void complete(int r) override { + if (r < 0) { + finish(r); + return; + } + + try { + auto iter = outbl.cbegin(); + std::set<cls::journal::Client> partial_clients; + decode(partial_clients, iter); + + std::string start_after; + if (!partial_clients.empty()) { + start_after = partial_clients.rbegin()->id; + clients->insert(partial_clients.begin(), partial_clients.end()); + } + + if (partial_clients.size() < JOURNAL_MAX_RETURN) { + finish(0); + } else { + send(start_after); + } + } catch (const buffer::error &err) { + finish(-EBADMSG); + } + } + + void finish(int r) override { + on_finish->complete(r); + delete this; + } +}; + +struct C_ImmutableMetadata : public C_AioExec { + uint8_t *order; + uint8_t *splay_width; + int64_t *pool_id; + Context *on_finish; + bufferlist outbl; + + C_ImmutableMetadata(librados::IoCtx &_ioctx, const std::string &_oid, + uint8_t *_order, uint8_t *_splay_width, + int64_t *_pool_id, Context *_on_finish) + : C_AioExec(_ioctx, _oid), order(_order), splay_width(_splay_width), + pool_id(_pool_id), on_finish(_on_finish) { + } + + void send() { + librados::ObjectReadOperation op; + bufferlist inbl; + op.exec("journal", "get_order", inbl); + op.exec("journal", "get_splay_width", inbl); + op.exec("journal", "get_pool_id", inbl); + + librados::AioCompletion *rados_completion = + librados::Rados::aio_create_completion(this, rados_callback); + int r = ioctx.aio_operate(oid, rados_completion, &op, &outbl); + ceph_assert(r == 0); + rados_completion->release(); + } + + void finish(int r) override { + if (r == 0) { + try { + auto iter = outbl.cbegin(); + decode(*order, iter); + decode(*splay_width, iter); + decode(*pool_id, iter); + } catch (const buffer::error &err) { + r = -EBADMSG; + } + } + on_finish->complete(r); + } +}; + +struct C_MutableMetadata : public C_AioExec { + uint64_t *minimum_set; + uint64_t *active_set; + C_ClientList *client_list; + bufferlist outbl; + + C_MutableMetadata(librados::IoCtx &_ioctx, const std::string &_oid, + uint64_t *_minimum_set, uint64_t *_active_set, + C_ClientList *_client_list) + : C_AioExec(_ioctx, _oid), minimum_set(_minimum_set), + active_set(_active_set), client_list(_client_list) {} + + void send() { + librados::ObjectReadOperation op; + bufferlist inbl; + op.exec("journal", "get_minimum_set", inbl); + op.exec("journal", "get_active_set", inbl); + + librados::AioCompletion *rados_completion = + librados::Rados::aio_create_completion(this, rados_callback); + int r = ioctx.aio_operate(oid, rados_completion, &op, &outbl); + ceph_assert(r == 0); + rados_completion->release(); + } + + void finish(int r) override { + if (r == 0) { + try { + auto iter = outbl.cbegin(); + decode(*minimum_set, iter); + decode(*active_set, iter); + client_list->send(""); + } catch (const buffer::error &err) { + r = -EBADMSG; + } + } + if (r < 0) { + client_list->complete(r); + } + } +}; + + +} // anonymous namespace + +void create(librados::ObjectWriteOperation *op, + uint8_t order, uint8_t splay, int64_t pool_id) { + bufferlist bl; + encode(order, bl); + encode(splay, bl); + encode(pool_id, bl); + + op->exec("journal", "create", bl); +} + +int create(librados::IoCtx &ioctx, const std::string &oid, uint8_t order, + uint8_t splay, int64_t pool_id) { + librados::ObjectWriteOperation op; + create(&op, order, splay, pool_id); + + int r = ioctx.operate(oid, &op); + if (r < 0) { + return r; + } + return 0; +} + +void get_immutable_metadata(librados::IoCtx &ioctx, const std::string &oid, + uint8_t *order, uint8_t *splay_width, + int64_t *pool_id, Context *on_finish) { + C_ImmutableMetadata *metadata = new C_ImmutableMetadata(ioctx, oid, order, + splay_width, pool_id, + on_finish); + metadata->send(); +} + +void get_mutable_metadata(librados::IoCtx &ioctx, const std::string &oid, + uint64_t *minimum_set, uint64_t *active_set, + std::set<cls::journal::Client> *clients, + Context *on_finish) { + C_ClientList *client_list = new C_ClientList(ioctx, oid, clients, on_finish); + C_MutableMetadata *metadata = new C_MutableMetadata( + ioctx, oid, minimum_set, active_set, client_list); + metadata->send(); +} + +void set_minimum_set(librados::ObjectWriteOperation *op, uint64_t object_set) { + bufferlist bl; + encode(object_set, bl); + op->exec("journal", "set_minimum_set", bl); +} + +void set_active_set(librados::ObjectWriteOperation *op, uint64_t object_set) { + bufferlist bl; + encode(object_set, bl); + op->exec("journal", "set_active_set", bl); +} + +int get_client(librados::IoCtx &ioctx, const std::string &oid, + const std::string &id, cls::journal::Client *client) { + librados::ObjectReadOperation op; + get_client_start(&op, id); + + bufferlist out_bl; + int r = ioctx.operate(oid, &op, &out_bl); + if (r < 0) { + return r; + } + + auto iter = out_bl.cbegin(); + r = get_client_finish(&iter, client); + if (r < 0) { + return r; + } + return 0; +} + +void get_client_start(librados::ObjectReadOperation *op, + const std::string &id) { + bufferlist bl; + encode(id, bl); + op->exec("journal", "get_client", bl); +} + +int get_client_finish(bufferlist::const_iterator *iter, + cls::journal::Client *client) { + try { + decode(*client, *iter); + } catch (const buffer::error &err) { + return -EBADMSG; + } + return 0; +} + +int client_register(librados::IoCtx &ioctx, const std::string &oid, + const std::string &id, const bufferlist &data) { + librados::ObjectWriteOperation op; + client_register(&op, id, data); + return ioctx.operate(oid, &op); +} + +void client_register(librados::ObjectWriteOperation *op, + const std::string &id, const bufferlist &data) { + bufferlist bl; + encode(id, bl); + encode(data, bl); + op->exec("journal", "client_register", bl); +} + +int client_update_data(librados::IoCtx &ioctx, const std::string &oid, + const std::string &id, const bufferlist &data) { + librados::ObjectWriteOperation op; + client_update_data(&op, id, data); + return ioctx.operate(oid, &op); +} + +void client_update_data(librados::ObjectWriteOperation *op, + const std::string &id, const bufferlist &data) { + bufferlist bl; + encode(id, bl); + encode(data, bl); + op->exec("journal", "client_update_data", bl); +} + +int client_update_state(librados::IoCtx &ioctx, const std::string &oid, + const std::string &id, cls::journal::ClientState state) { + librados::ObjectWriteOperation op; + client_update_state(&op, id, state); + return ioctx.operate(oid, &op); +} + +void client_update_state(librados::ObjectWriteOperation *op, + const std::string &id, + cls::journal::ClientState state) { + bufferlist bl; + encode(id, bl); + encode(static_cast<uint8_t>(state), bl); + op->exec("journal", "client_update_state", bl); +} + +int client_unregister(librados::IoCtx &ioctx, const std::string &oid, + const std::string &id) { + librados::ObjectWriteOperation op; + client_unregister(&op, id); + return ioctx.operate(oid, &op); +} + +void client_unregister(librados::ObjectWriteOperation *op, + const std::string &id) { + + bufferlist bl; + encode(id, bl); + op->exec("journal", "client_unregister", bl); +} + +void client_commit(librados::ObjectWriteOperation *op, const std::string &id, + const cls::journal::ObjectSetPosition &commit_position) { + bufferlist bl; + encode(id, bl); + encode(commit_position, bl); + op->exec("journal", "client_commit", bl); +} + +int client_list(librados::IoCtx &ioctx, const std::string &oid, + std::set<cls::journal::Client> *clients) { + C_SaferCond cond; + client_list(ioctx, oid, clients, &cond); + return cond.wait(); +} + +void client_list(librados::IoCtx &ioctx, const std::string &oid, + std::set<cls::journal::Client> *clients, Context *on_finish) { + C_ClientList *client_list = new C_ClientList(ioctx, oid, clients, on_finish); + client_list->send(""); +} + +int get_next_tag_tid(librados::IoCtx &ioctx, const std::string &oid, + uint64_t *tag_tid) { + librados::ObjectReadOperation op; + get_next_tag_tid_start(&op); + + bufferlist out_bl; + int r = ioctx.operate(oid, &op, &out_bl); + if (r < 0) { + return r; + } + + auto iter = out_bl.cbegin(); + r = get_next_tag_tid_finish(&iter, tag_tid); + if (r < 0) { + return r; + } + return 0; +} + +void get_next_tag_tid_start(librados::ObjectReadOperation *op) { + bufferlist bl; + op->exec("journal", "get_next_tag_tid", bl); +} + +int get_next_tag_tid_finish(bufferlist::const_iterator *iter, + uint64_t *tag_tid) { + try { + decode(*tag_tid, *iter); + } catch (const buffer::error &err) { + return -EBADMSG; + } + return 0; +} + +int get_tag(librados::IoCtx &ioctx, const std::string &oid, + uint64_t tag_tid, cls::journal::Tag *tag) { + librados::ObjectReadOperation op; + get_tag_start(&op, tag_tid); + + bufferlist out_bl; + int r = ioctx.operate(oid, &op, &out_bl); + if (r < 0) { + return r; + } + + auto iter = out_bl.cbegin(); + r = get_tag_finish(&iter, tag); + if (r < 0) { + return r; + } + return 0; +} + +void get_tag_start(librados::ObjectReadOperation *op, + uint64_t tag_tid) { + bufferlist bl; + encode(tag_tid, bl); + op->exec("journal", "get_tag", bl); +} + +int get_tag_finish(bufferlist::const_iterator *iter, cls::journal::Tag *tag) { + try { + decode(*tag, *iter); + } catch (const buffer::error &err) { + return -EBADMSG; + } + return 0; +} + +int tag_create(librados::IoCtx &ioctx, const std::string &oid, + uint64_t tag_tid, uint64_t tag_class, + const bufferlist &data) { + librados::ObjectWriteOperation op; + tag_create(&op, tag_tid, tag_class, data); + return ioctx.operate(oid, &op); +} + +void tag_create(librados::ObjectWriteOperation *op, uint64_t tag_tid, + uint64_t tag_class, const bufferlist &data) { + bufferlist bl; + encode(tag_tid, bl); + encode(tag_class, bl); + encode(data, bl); + op->exec("journal", "tag_create", bl); +} + +int tag_list(librados::IoCtx &ioctx, const std::string &oid, + const std::string &client_id, boost::optional<uint64_t> tag_class, + std::set<cls::journal::Tag> *tags) { + tags->clear(); + uint64_t start_after_tag_tid = 0; + while (true) { + librados::ObjectReadOperation op; + tag_list_start(&op, start_after_tag_tid, JOURNAL_MAX_RETURN, client_id, + tag_class); + + bufferlist out_bl; + int r = ioctx.operate(oid, &op, &out_bl); + if (r < 0) { + return r; + } + + auto iter = out_bl.cbegin(); + std::set<cls::journal::Tag> decode_tags; + r = tag_list_finish(&iter, &decode_tags); + if (r < 0) { + return r; + } + + tags->insert(decode_tags.begin(), decode_tags.end()); + if (decode_tags.size() < JOURNAL_MAX_RETURN) { + break; + } + } + return 0; +} + +void tag_list_start(librados::ObjectReadOperation *op, + uint64_t start_after_tag_tid, uint64_t max_return, + const std::string &client_id, + boost::optional<uint64_t> tag_class) { + bufferlist bl; + encode(start_after_tag_tid, bl); + encode(max_return, bl); + encode(client_id, bl); + encode(tag_class, bl); + op->exec("journal", "tag_list", bl); +} + +int tag_list_finish(bufferlist::const_iterator *iter, + std::set<cls::journal::Tag> *tags) { + try { + decode(*tags, *iter); + } catch (const buffer::error &err) { + return -EBADMSG; + } + return 0; +} + +void guard_append(librados::ObjectWriteOperation *op, uint64_t soft_max_size) { + bufferlist bl; + encode(soft_max_size, bl); + op->exec("journal", "guard_append", bl); +} + +void append(librados::ObjectWriteOperation *op, uint64_t soft_max_size, + bufferlist &data) { + bufferlist bl; + encode(soft_max_size, bl); + encode(data, bl); + + op->exec("journal", "append", bl); +} + +} // namespace client +} // namespace journal +} // namespace cls diff --git a/src/cls/journal/cls_journal_client.h b/src/cls/journal/cls_journal_client.h new file mode 100644 index 000000000..f8ad9db51 --- /dev/null +++ b/src/cls/journal/cls_journal_client.h @@ -0,0 +1,109 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab + +#ifndef CEPH_CLS_JOURNAL_CLIENT_H +#define CEPH_CLS_JOURNAL_CLIENT_H + +#include "include/rados/librados_fwd.hpp" +#include "cls/journal/cls_journal_types.h" +#include <set> +#include <boost/optional.hpp> + +class Context; + +namespace cls { +namespace journal { +namespace client { + +void create(librados::ObjectWriteOperation *op, + uint8_t order, uint8_t splay, int64_t pool_id); +int create(librados::IoCtx &ioctx, const std::string &oid, uint8_t order, + uint8_t splay, int64_t pool_id); + +void get_immutable_metadata(librados::IoCtx &ioctx, const std::string &oid, + uint8_t *order, uint8_t *splay_width, + int64_t *pool_id, Context *on_finish); +void get_mutable_metadata(librados::IoCtx &ioctx, const std::string &oid, + uint64_t *minimum_set, uint64_t *active_set, + std::set<cls::journal::Client> *clients, + Context *on_finish); + +void set_minimum_set(librados::ObjectWriteOperation *op, uint64_t object_set); +void set_active_set(librados::ObjectWriteOperation *op, uint64_t object_set); + +// journal client helpers +int get_client(librados::IoCtx &ioctx, const std::string &oid, + const std::string &id, cls::journal::Client *client); +void get_client_start(librados::ObjectReadOperation *op, + const std::string &id); +int get_client_finish(bufferlist::const_iterator *iter, + cls::journal::Client *client); + +int client_register(librados::IoCtx &ioctx, const std::string &oid, + const std::string &id, const bufferlist &data); +void client_register(librados::ObjectWriteOperation *op, + const std::string &id, const bufferlist &data); + +int client_update_data(librados::IoCtx &ioctx, const std::string &oid, + const std::string &id, const bufferlist &data); +void client_update_data(librados::ObjectWriteOperation *op, + const std::string &id, const bufferlist &data); +int client_update_state(librados::IoCtx &ioctx, const std::string &oid, + const std::string &id, cls::journal::ClientState state); +void client_update_state(librados::ObjectWriteOperation *op, + const std::string &id, + cls::journal::ClientState state); + +int client_unregister(librados::IoCtx &ioctx, const std::string &oid, + const std::string &id); +void client_unregister(librados::ObjectWriteOperation *op, + const std::string &id); + +void client_commit(librados::ObjectWriteOperation *op, const std::string &id, + const cls::journal::ObjectSetPosition &commit_position); + +int client_list(librados::IoCtx &ioctx, const std::string &oid, + std::set<cls::journal::Client> *clients); +void client_list(librados::IoCtx &ioctx, const std::string &oid, + std::set<cls::journal::Client> *clients, Context *on_finish); + +// journal tag helpers +int get_next_tag_tid(librados::IoCtx &ioctx, const std::string &oid, + uint64_t *tag_tid); +void get_next_tag_tid_start(librados::ObjectReadOperation *op); +int get_next_tag_tid_finish(bufferlist::const_iterator *iter, + uint64_t *tag_tid); + +int get_tag(librados::IoCtx &ioctx, const std::string &oid, + uint64_t tag_tid, cls::journal::Tag *tag); +void get_tag_start(librados::ObjectReadOperation *op, + uint64_t tag_tid); +int get_tag_finish(bufferlist::const_iterator *iter, cls::journal::Tag *tag); + +int tag_create(librados::IoCtx &ioctx, const std::string &oid, + uint64_t tag_tid, uint64_t tag_class, + const bufferlist &data); +void tag_create(librados::ObjectWriteOperation *op, + uint64_t tag_tid, uint64_t tag_class, + const bufferlist &data); + +int tag_list(librados::IoCtx &ioctx, const std::string &oid, + const std::string &client_id, boost::optional<uint64_t> tag_class, + std::set<cls::journal::Tag> *tags); +void tag_list_start(librados::ObjectReadOperation *op, + uint64_t start_after_tag_tid, uint64_t max_return, + const std::string &client_id, + boost::optional<uint64_t> tag_class); +int tag_list_finish(bufferlist::const_iterator *iter, + std::set<cls::journal::Tag> *tags); + +// journal entry helpers +void guard_append(librados::ObjectWriteOperation *op, uint64_t soft_max_size); +void append(librados::ObjectWriteOperation *op, uint64_t soft_max_size, + bufferlist &data); + +} // namespace client +} // namespace journal +} // namespace cls + +#endif // CEPH_CLS_JOURNAL_CLIENT_H diff --git a/src/cls/journal/cls_journal_types.cc b/src/cls/journal/cls_journal_types.cc new file mode 100644 index 000000000..6e9dfde87 --- /dev/null +++ b/src/cls/journal/cls_journal_types.cc @@ -0,0 +1,199 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab + +#include "cls/journal/cls_journal_types.h" +#include "include/stringify.h" +#include "common/Formatter.h" + +using ceph::bufferlist; +using ceph::Formatter; + +namespace cls { +namespace journal { + +void ObjectPosition::encode(bufferlist& bl) const { + ENCODE_START(1, 1, bl); + encode(object_number, bl); + encode(tag_tid, bl); + encode(entry_tid, bl); + ENCODE_FINISH(bl); +} + +void ObjectPosition::decode(bufferlist::const_iterator& iter) { + DECODE_START(1, iter); + decode(object_number, iter); + decode(tag_tid, iter); + decode(entry_tid, iter); + DECODE_FINISH(iter); +} + +void ObjectPosition::dump(Formatter *f) const { + f->dump_unsigned("object_number", object_number); + f->dump_unsigned("tag_tid", tag_tid); + f->dump_unsigned("entry_tid", entry_tid); +} + +void ObjectPosition::generate_test_instances(std::list<ObjectPosition *> &o) { + o.push_back(new ObjectPosition()); + o.push_back(new ObjectPosition(1, 2, 3)); +} + +void ObjectSetPosition::encode(bufferlist& bl) const { + ENCODE_START(1, 1, bl); + encode(object_positions, bl); + ENCODE_FINISH(bl); +} + +void ObjectSetPosition::decode(bufferlist::const_iterator& iter) { + DECODE_START(1, iter); + decode(object_positions, iter); + DECODE_FINISH(iter); +} + +void ObjectSetPosition::dump(Formatter *f) const { + f->open_array_section("object_positions"); + for (auto &pos : object_positions) { + f->open_object_section("object_position"); + pos.dump(f); + f->close_section(); + } + f->close_section(); +} + +void ObjectSetPosition::generate_test_instances( + std::list<ObjectSetPosition *> &o) { + o.push_back(new ObjectSetPosition()); + o.push_back(new ObjectSetPosition({{0, 1, 120}, {121, 2, 121}})); +} + +void Client::encode(bufferlist& bl) const { + ENCODE_START(1, 1, bl); + encode(id, bl); + encode(data, bl); + encode(commit_position, bl); + encode(static_cast<uint8_t>(state), bl); + ENCODE_FINISH(bl); +} + +void Client::decode(bufferlist::const_iterator& iter) { + DECODE_START(1, iter); + decode(id, iter); + decode(data, iter); + decode(commit_position, iter); + + uint8_t state_raw; + decode(state_raw, iter); + state = static_cast<ClientState>(state_raw); + DECODE_FINISH(iter); +} + +void Client::dump(Formatter *f) const { + f->dump_string("id", id); + + std::stringstream data_ss; + data.hexdump(data_ss); + f->dump_string("data", data_ss.str()); + + f->open_object_section("commit_position"); + commit_position.dump(f); + f->close_section(); + + f->dump_string("state", stringify(state)); +} + +void Client::generate_test_instances(std::list<Client *> &o) { + bufferlist data; + data.append(std::string(128, '1')); + + o.push_back(new Client()); + o.push_back(new Client("id", data)); + o.push_back(new Client("id", data, {{{1, 2, 120}, {2, 3, 121}}})); +} + +void Tag::encode(bufferlist& bl) const { + ENCODE_START(1, 1, bl); + encode(tid, bl); + encode(tag_class, bl); + encode(data, bl); + ENCODE_FINISH(bl); +} + +void Tag::decode(bufferlist::const_iterator& iter) { + DECODE_START(1, iter); + decode(tid, iter); + decode(tag_class, iter); + decode(data, iter); + DECODE_FINISH(iter); +} + +void Tag::dump(Formatter *f) const { + f->dump_unsigned("tid", tid); + f->dump_unsigned("tag_class", tag_class); + + std::stringstream data_ss; + data.hexdump(data_ss); + f->dump_string("data", data_ss.str()); +} + +void Tag::generate_test_instances(std::list<Tag *> &o) { + o.push_back(new Tag()); + + bufferlist data; + data.append(std::string(128, '1')); + o.push_back(new Tag(123, 234, data)); +} + +std::ostream &operator<<(std::ostream &os, const ClientState &state) { + switch (state) { + case CLIENT_STATE_CONNECTED: + os << "connected"; + break; + case CLIENT_STATE_DISCONNECTED: + os << "disconnected"; + break; + default: + os << "unknown (" << static_cast<uint32_t>(state) << ")"; + break; + } + return os; +} + +std::ostream &operator<<(std::ostream &os, + const ObjectPosition &object_position) { + os << "[" + << "object_number=" << object_position.object_number << ", " + << "tag_tid=" << object_position.tag_tid << ", " + << "entry_tid=" << object_position.entry_tid << "]"; + return os; +} + +std::ostream &operator<<(std::ostream &os, + const ObjectSetPosition &object_set_position) { + os << "[positions=["; + std::string delim; + for (auto &object_position : object_set_position.object_positions) { + os << delim << object_position; + delim = ", "; + } + os << "]]"; + return os; +} + +std::ostream &operator<<(std::ostream &os, const Client &client) { + os << "[id=" << client.id << ", " + << "commit_position=" << client.commit_position << ", " + << "state=" << client.state << "]"; + return os; +} + +std::ostream &operator<<(std::ostream &os, const Tag &tag) { + os << "[tid=" << tag.tid << ", " + << "tag_class=" << tag.tag_class << ", " + << "data="; + tag.data.hexdump(os); + os << "]"; + return os; +} + +} // namespace journal +} // namespace cls diff --git a/src/cls/journal/cls_journal_types.h b/src/cls/journal/cls_journal_types.h new file mode 100644 index 000000000..f82d30c7e --- /dev/null +++ b/src/cls/journal/cls_journal_types.h @@ -0,0 +1,157 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab + +#ifndef CEPH_CLS_JOURNAL_TYPES_H +#define CEPH_CLS_JOURNAL_TYPES_H + +#include "include/int_types.h" +#include "include/buffer_fwd.h" +#include "include/encoding.h" +#include <iosfwd> +#include <list> +#include <string> + +namespace ceph { +class Formatter; +} + +namespace cls { +namespace journal { + +static const uint64_t JOURNAL_MAX_RETURN = 256; + +struct ObjectPosition { + uint64_t object_number; + uint64_t tag_tid; + uint64_t entry_tid; + + ObjectPosition() : object_number(0), tag_tid(0), entry_tid(0) {} + ObjectPosition(uint64_t _object_number, uint64_t _tag_tid, + uint64_t _entry_tid) + : object_number(_object_number), tag_tid(_tag_tid), entry_tid(_entry_tid) {} + + inline bool operator==(const ObjectPosition& rhs) const { + return (object_number == rhs.object_number && + tag_tid == rhs.tag_tid && + entry_tid == rhs.entry_tid); + } + inline bool operator!=(const ObjectPosition& rhs) const { + return !(*this == rhs); + } + + void encode(ceph::buffer::list& bl) const; + void decode(ceph::buffer::list::const_iterator& iter); + void dump(ceph::Formatter *f) const; + + inline bool operator<(const ObjectPosition &rhs) const { + if (object_number != rhs.object_number) { + return object_number < rhs.object_number; + } else if (tag_tid != rhs.tag_tid) { + return tag_tid < rhs.tag_tid; + } + return entry_tid < rhs.entry_tid; + } + + static void generate_test_instances(std::list<ObjectPosition *> &o); +}; + +typedef std::list<ObjectPosition> ObjectPositions; + +struct ObjectSetPosition { + // stored in most-recent -> least recent committed entry order + ObjectPositions object_positions; + + ObjectSetPosition() {} + ObjectSetPosition(const ObjectPositions &_object_positions) + : object_positions(_object_positions) {} + + void encode(ceph::buffer::list& bl) const; + void decode(ceph::buffer::list::const_iterator& iter); + void dump(ceph::Formatter *f) const; + + inline bool operator==(const ObjectSetPosition &rhs) const { + return (object_positions == rhs.object_positions); + } + + static void generate_test_instances(std::list<ObjectSetPosition *> &o); +}; + +enum ClientState { + CLIENT_STATE_CONNECTED = 0, + CLIENT_STATE_DISCONNECTED = 1 +}; + +struct Client { + std::string id; + ceph::buffer::list data; + ObjectSetPosition commit_position; + ClientState state; + + Client() : state(CLIENT_STATE_CONNECTED) {} + Client(const std::string& _id, const ceph::buffer::list &_data, + const ObjectSetPosition &_commit_position = ObjectSetPosition(), + ClientState _state = CLIENT_STATE_CONNECTED) + : id(_id), data(_data), commit_position(_commit_position), state(_state) {} + + inline bool operator==(const Client &rhs) const { + return (id == rhs.id && + data.contents_equal(rhs.data) && + commit_position == rhs.commit_position && + state == rhs.state); + } + inline bool operator<(const Client &rhs) const { + return (id < rhs.id); + } + + void encode(ceph::buffer::list& bl) const; + void decode(ceph::buffer::list::const_iterator& iter); + void dump(ceph::Formatter *f) const; + + static void generate_test_instances(std::list<Client *> &o); +}; + +struct Tag { + static const uint64_t TAG_CLASS_NEW = static_cast<uint64_t>(-1); + + uint64_t tid; + uint64_t tag_class; + ceph::buffer::list data; + + Tag() : tid(0), tag_class(0) {} + Tag(uint64_t tid, uint64_t tag_class, const ceph::buffer::list &data) + : tid(tid), tag_class(tag_class), data(data) {} + + inline bool operator==(const Tag &rhs) const { + return (tid == rhs.tid && + tag_class == rhs.tag_class && + data.contents_equal(rhs.data)); + } + inline bool operator<(const Tag &rhs) const { + return (tid < rhs.tid); + } + + void encode(ceph::buffer::list& bl) const; + void decode(ceph::buffer::list::const_iterator& iter); + void dump(ceph::Formatter *f) const; + + static void generate_test_instances(std::list<Tag *> &o); +}; + +WRITE_CLASS_ENCODER(ObjectPosition); +WRITE_CLASS_ENCODER(ObjectSetPosition); +WRITE_CLASS_ENCODER(Client); +WRITE_CLASS_ENCODER(Tag); + +std::ostream &operator<<(std::ostream &os, const ClientState &state); +std::ostream &operator<<(std::ostream &os, + const ObjectPosition &object_position); +std::ostream &operator<<(std::ostream &os, + const ObjectSetPosition &object_set_position); +std::ostream &operator<<(std::ostream &os, + const Client &client); +std::ostream &operator<<(std::ostream &os, const Tag &tag); + +} // namespace journal +} // namespace cls + +#endif // CEPH_CLS_JOURNAL_TYPES_H diff --git a/src/cls/lock/cls_lock.cc b/src/cls/lock/cls_lock.cc new file mode 100644 index 000000000..96d28461d --- /dev/null +++ b/src/cls/lock/cls_lock.cc @@ -0,0 +1,648 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab + +/** \file + * + * This is an OSD class that implements methods for object + * advisory locking. + * + */ + +#include <errno.h> +#include <map> +#include <sstream> + +#include "include/types.h" +#include "include/utime.h" +#include "objclass/objclass.h" + +#include "common/errno.h" +#include "common/Clock.h" + +#include "cls/lock/cls_lock_types.h" +#include "cls/lock/cls_lock_ops.h" + +#include "global/global_context.h" + +#include "include/compat.h" + +using std::map; +using std::string; + +using ceph::bufferlist; +using namespace rados::cls::lock; + +CLS_VER(1,0) +CLS_NAME(lock) + +#define LOCK_PREFIX "lock." + +static int clean_lock(cls_method_context_t hctx) +{ + int r = cls_cxx_remove(hctx); + if (r < 0) + return r; + + return 0; +} + +static int read_lock(cls_method_context_t hctx, + const string& name, + lock_info_t *lock) +{ + bufferlist bl; + string key = LOCK_PREFIX; + key.append(name); + + int r = cls_cxx_getxattr(hctx, key.c_str(), &bl); + if (r < 0) { + if (r == -ENODATA) { + *lock = lock_info_t(); + return 0; + } + if (r != -ENOENT) { + CLS_ERR("error reading xattr %s: %d", key.c_str(), r); + } + return r; + } + + try { + auto it = bl.cbegin(); + decode(*lock, it); + } catch (const ceph::buffer::error &err) { + CLS_ERR("error decoding %s", key.c_str()); + return -EIO; + } + + /* now trim expired locks */ + + utime_t now = ceph_clock_now(); + + auto iter = lock->lockers.begin(); + + while (iter != lock->lockers.end()) { + struct locker_info_t& info = iter->second; + if (!info.expiration.is_zero() && info.expiration < now) { + CLS_LOG(20, "expiring locker"); + iter = lock->lockers.erase(iter); + } else { + ++iter; + } + } + + if (lock->lockers.empty() && cls_lock_is_ephemeral(lock->lock_type)) { + r = clean_lock(hctx); + if (r < 0) { + CLS_ERR("error, on read, cleaning lock object %s", cpp_strerror(r).c_str()); + } + } + + return 0; +} + +static int write_lock(cls_method_context_t hctx, const string& name, const lock_info_t& lock) +{ + using ceph::encode; + string key = LOCK_PREFIX; + key.append(name); + + bufferlist lock_bl; + encode(lock, lock_bl, cls_get_client_features(hctx)); + + int r = cls_cxx_setxattr(hctx, key.c_str(), &lock_bl); + if (r < 0) + return r; + + return 0; +} + +/** + * helper function to add a lock and update disk state. + * + * Input: + * @param name Lock name + * @param lock_type Type of lock (exclusive / shared) + * @param duration Duration of lock (in seconds). Zero means it doesn't expire. + * @param flags lock flags + * @param cookie The cookie to set in the lock + * @param tag The tag to match with the lock (can only lock with matching tags) + * @param lock_description The lock description to set (if not empty) + * @param locker_description The locker description + * + * @return 0 on success, or -errno on failure + */ +static int lock_obj(cls_method_context_t hctx, + const string& name, + ClsLockType lock_type, + utime_t duration, + const string& description, + uint8_t flags, + const string& cookie, + const string& tag) +{ + bool exclusive = cls_lock_is_exclusive(lock_type); + lock_info_t linfo; + bool fail_if_exists = (flags & LOCK_FLAG_MAY_RENEW) == 0; + bool fail_if_does_not_exist = flags & LOCK_FLAG_MUST_RENEW; + + CLS_LOG(20, + "requested lock_type=%s fail_if_exists=%d fail_if_does_not_exist=%d", + cls_lock_type_str(lock_type), fail_if_exists, fail_if_does_not_exist); + if (!cls_lock_is_valid(lock_type)) { + return -EINVAL; + } + + if (name.empty()) + return -EINVAL; + + if (!fail_if_exists && fail_if_does_not_exist) { + // at most one of LOCK_FLAG_MAY_RENEW and LOCK_FLAG_MUST_RENEW may + // be set since they have different implications if the lock does + // not already exist + return -EINVAL; + } + + // see if there's already a locker + int r = read_lock(hctx, name, &linfo); + if (r < 0 && r != -ENOENT) { + CLS_ERR("Could not read lock info: %s", cpp_strerror(r).c_str()); + return r; + } + + auto& lockers = linfo.lockers; + + locker_id_t id; + id.cookie = cookie; + entity_inst_t inst; + r = cls_get_request_origin(hctx, &inst); + id.locker = inst.name; + ceph_assert(r == 0); + + /* check this early, before we check fail_if_exists, otherwise we might + * remove the locker entry and not check it later */ + if (lockers.size() && tag != linfo.tag) { + CLS_LOG(20, "cannot take lock on object, conflicting tag"); + return -EBUSY; + } + + ClsLockType existing_lock_type = linfo.lock_type; + CLS_LOG(20, "existing_lock_type=%s", cls_lock_type_str(existing_lock_type)); + auto iter = lockers.find(id); + if (iter != lockers.end()) { + if (fail_if_exists && !fail_if_does_not_exist) { + return -EEXIST; + } else { + lockers.erase(iter); // remove old entry + } + } else if (fail_if_does_not_exist) { + return -ENOENT; + } + + if (!lockers.empty()) { + if (exclusive) { + auto locker_lister = + [&lockers]() -> std::string { + std::stringstream locker_list; + locker_list << lockers; + return locker_list.str(); + }; + CLS_LOG(20, "could not exclusive-lock object, already locked by %s", + locker_lister().c_str()); + return -EBUSY; + } + + if (existing_lock_type != lock_type) { + CLS_LOG(20, "cannot take lock on object, conflicting lock type"); + return -EBUSY; + } + } + + linfo.lock_type = lock_type; + linfo.tag = tag; + utime_t expiration; + if (!duration.is_zero()) { + expiration = ceph_clock_now(); + expiration += duration; + + } + // make all addrs of type legacy, because v2 clients speak v2 or v1, + // even depending on which OSD they are talking to, and the type + // isn't what uniquely identifies them. also, storing a v1 addr + // here means that old clients who get this locker_info won't see an + // old "msgr2:" prefix. + inst.addr.set_type(entity_addr_t::TYPE_LEGACY); + + struct locker_info_t info(expiration, inst.addr, description); + + linfo.lockers[id] = info; + + r = write_lock(hctx, name, linfo); + if (r < 0) + return r; + + return 0; +} + +/** + * Set an exclusive lock on an object for the activating client, if possible. + * + * Input: + * @param cls_lock_lock_op request input + * + * @returns 0 on success, -EINVAL if it can't decode the lock_cookie, + * -EBUSY if the object is already locked, or -errno on (unexpected) failure. + */ +static int lock_op(cls_method_context_t hctx, + bufferlist *in, bufferlist *out) +{ + CLS_LOG(20, "lock_op"); + cls_lock_lock_op op; + try { + auto iter = in->cbegin(); + decode(op, iter); + } catch (const ceph::buffer::error &err) { + return -EINVAL; + } + + return lock_obj(hctx, + op.name, op.type, op.duration, op.description, + op.flags, op.cookie, op.tag); +} + +/** + * helper function to remove a lock from on disk and clean up state. + * + * @param name The lock name + * @param locker The locker entity name + * @param cookie The user-defined cookie associated with the lock. + * + * @return 0 on success, -ENOENT if there is no such lock (either + * entity or cookie is wrong), or -errno on other error. + */ +static int remove_lock(cls_method_context_t hctx, + const string& name, + entity_name_t& locker, + const string& cookie) +{ + // get current lockers + lock_info_t linfo; + int r = read_lock(hctx, name, &linfo); + if (r < 0) { + CLS_ERR("Could not read list of current lockers off disk: %s", cpp_strerror(r).c_str()); + return r; + } + + auto& lockers = linfo.lockers; + struct locker_id_t id(locker, cookie); + + // remove named locker from set + auto iter = lockers.find(id); + if (iter == lockers.end()) { // no such key + CLS_LOG(10, "locker %s [name: %s.%ld, cookie: %s] does not exist", name.c_str(), + locker.type_str(), locker.num(), cookie.c_str()); + return -ENOENT; + } + lockers.erase(iter); + + if (cls_lock_is_ephemeral(linfo.lock_type)) { + ceph_assert(lockers.empty()); + r = clean_lock(hctx); + } else { + r = write_lock(hctx, name, linfo); + } + + return r; +} + +/** + * Unlock an object which the activating client currently has locked. + * + * Input: + * @param cls_lock_unlock_op request input + * + * @return 0 on success, -EINVAL if it can't decode the cookie, -ENOENT + * if there is no such lock (either entity or cookie is wrong), or + * -errno on other (unexpected) error. + */ +static int unlock_op(cls_method_context_t hctx, + bufferlist *in, bufferlist *out) +{ + CLS_LOG(20, "unlock_op"); + cls_lock_unlock_op op; + try { + auto iter = in->cbegin(); + decode(op, iter); + } catch (const ceph::buffer::error& err) { + return -EINVAL; + } + + entity_inst_t inst; + int r = cls_get_request_origin(hctx, &inst); + ceph_assert(r == 0); + return remove_lock(hctx, op.name, inst.name, op.cookie); +} + +/** + * Break the lock on an object held by any client. + * + * Input: + * @param cls_lock_break_op request input + * + * @return 0 on success, -EINVAL if it can't decode the locker and + * cookie, -ENOENT if there is no such lock (either entity or cookie + * is wrong), or -errno on other (unexpected) error. + */ +static int break_lock(cls_method_context_t hctx, + bufferlist *in, bufferlist *out) +{ + CLS_LOG(20, "break_lock"); + cls_lock_break_op op; + try { + auto iter = in->cbegin(); + decode(op, iter); + } catch (const ceph::buffer::error& err) { + return -EINVAL; + } + + return remove_lock(hctx, op.name, op.locker, op.cookie); +} + + +/** + * Retrieve lock info: lockers, tag, exclusive + * + * Input: + * @param cls_lock_list_lockers_op request input + * + * Output: + * @param cls_lock_list_lockers_reply result + * + * @return 0 on success, -errno on failure. + */ +static int get_info(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + CLS_LOG(20, "get_info"); + cls_lock_get_info_op op; + try { + auto iter = in->cbegin(); + decode(op, iter); + } catch (const ceph::buffer::error& err) { + return -EINVAL; + } + + // get current lockers + lock_info_t linfo; + int r = read_lock(hctx, op.name, &linfo); + if (r < 0) { + CLS_ERR("Could not read lock info: %s", cpp_strerror(r).c_str()); + return r; + } + + struct cls_lock_get_info_reply ret; + + for (auto iter = linfo.lockers.begin(); iter != linfo.lockers.end(); ++iter) { + ret.lockers[iter->first] = iter->second; + } + ret.lock_type = linfo.lock_type; + ret.tag = linfo.tag; + + encode(ret, *out, cls_get_client_features(hctx)); + + return 0; +} + + +/** + * Retrieve a list of locks for this object + * + * Input: + * @param in is ignored. + * + * Output: + * @param out contains encoded cls_list_locks_reply + * + * @return 0 on success, -errno on failure. + */ +static int list_locks(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + CLS_LOG(20, "list_locks"); + + map<string, bufferlist> attrs; + + int r = cls_cxx_getxattrs(hctx, &attrs); + if (r < 0) + return r; + + cls_lock_list_locks_reply ret; + + size_t pos = sizeof(LOCK_PREFIX) - 1; + for (auto iter = attrs.begin(); iter != attrs.end(); ++iter) { + const string& attr = iter->first; + if (attr.substr(0, pos).compare(LOCK_PREFIX) == 0) { + ret.locks.push_back(attr.substr(pos)); + } + } + + encode(ret, *out); + + return 0; +} + +/** + * Assert that the object is currently locked + * + * Input: + * @param cls_lock_assert_op request input + * + * Output: + * @param none + * + * @return 0 on success, -errno on failure. + */ +int assert_locked(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + CLS_LOG(20, "assert_locked"); + + cls_lock_assert_op op; + try { + auto iter = in->cbegin(); + decode(op, iter); + } catch (const ceph::buffer::error& err) { + return -EINVAL; + } + + if (!cls_lock_is_valid(op.type)) { + return -EINVAL; + } + + if (op.name.empty()) { + return -EINVAL; + } + + // see if there's already a locker + lock_info_t linfo; + int r = read_lock(hctx, op.name, &linfo); + if (r < 0) { + CLS_ERR("Could not read lock info: %s", cpp_strerror(r).c_str()); + return r; + } + + if (linfo.lockers.empty()) { + CLS_LOG(20, "object not locked"); + return -EBUSY; + } + + if (linfo.lock_type != op.type) { + CLS_LOG(20, "lock type mismatch: current=%s, assert=%s", + cls_lock_type_str(linfo.lock_type), cls_lock_type_str(op.type)); + return -EBUSY; + } + + if (linfo.tag != op.tag) { + CLS_LOG(20, "lock tag mismatch: current=%s, assert=%s", linfo.tag.c_str(), + op.tag.c_str()); + return -EBUSY; + } + + entity_inst_t inst; + r = cls_get_request_origin(hctx, &inst); + ceph_assert(r == 0); + + locker_id_t id; + id.cookie = op.cookie; + id.locker = inst.name; + + auto iter = linfo.lockers.find(id); + if (iter == linfo.lockers.end()) { + CLS_LOG(20, "not locked by assert client"); + return -EBUSY; + } + return 0; +} + +/** + * Update the cookie associated with an object lock + * + * Input: + * @param cls_lock_set_cookie_op request input + * + * Output: + * @param none + * + * @return 0 on success, -errno on failure. + */ +int set_cookie(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + CLS_LOG(20, "set_cookie"); + + cls_lock_set_cookie_op op; + try { + auto iter = in->cbegin(); + decode(op, iter); + } catch (const ceph::buffer::error& err) { + return -EINVAL; + } + + if (!cls_lock_is_valid(op.type)) { + return -EINVAL; + } + + if (op.name.empty()) { + return -EINVAL; + } + + // see if there's already a locker + lock_info_t linfo; + int r = read_lock(hctx, op.name, &linfo); + if (r < 0) { + CLS_ERR("Could not read lock info: %s", cpp_strerror(r).c_str()); + return r; + } + + if (linfo.lockers.empty()) { + CLS_LOG(20, "object not locked"); + return -EBUSY; + } + + if (linfo.lock_type != op.type) { + CLS_LOG(20, "lock type mismatch: current=%s, assert=%s", + cls_lock_type_str(linfo.lock_type), cls_lock_type_str(op.type)); + return -EBUSY; + } + + if (linfo.tag != op.tag) { + CLS_LOG(20, "lock tag mismatch: current=%s, assert=%s", linfo.tag.c_str(), + op.tag.c_str()); + return -EBUSY; + } + + entity_inst_t inst; + r = cls_get_request_origin(hctx, &inst); + ceph_assert(r == 0); + + locker_id_t id; + id.cookie = op.cookie; + id.locker = inst.name; + + map<locker_id_t, locker_info_t>::iterator iter = linfo.lockers.find(id); + if (iter == linfo.lockers.end()) { + CLS_LOG(20, "not locked by client"); + return -EBUSY; + } + + id.cookie = op.new_cookie; + if (linfo.lockers.count(id) != 0) { + CLS_LOG(20, "lock cookie in-use"); + return -EBUSY; + } + + locker_info_t locker_info(iter->second); + linfo.lockers.erase(iter); + + linfo.lockers[id] = locker_info; + r = write_lock(hctx, op.name, linfo); + if (r < 0) { + CLS_ERR("Could not update lock info: %s", cpp_strerror(r).c_str()); + return r; + } + return 0; +} + +CLS_INIT(lock) +{ + CLS_LOG(20, "Loaded lock class!"); + + cls_handle_t h_class; + cls_method_handle_t h_lock_op; + cls_method_handle_t h_unlock_op; + cls_method_handle_t h_break_lock; + cls_method_handle_t h_get_info; + cls_method_handle_t h_list_locks; + cls_method_handle_t h_assert_locked; + cls_method_handle_t h_set_cookie; + + cls_register("lock", &h_class); + cls_register_cxx_method(h_class, "lock", + CLS_METHOD_RD | CLS_METHOD_WR | CLS_METHOD_PROMOTE, + lock_op, &h_lock_op); + cls_register_cxx_method(h_class, "unlock", + CLS_METHOD_RD | CLS_METHOD_WR | CLS_METHOD_PROMOTE, + unlock_op, &h_unlock_op); + cls_register_cxx_method(h_class, "break_lock", + CLS_METHOD_RD | CLS_METHOD_WR, + break_lock, &h_break_lock); + cls_register_cxx_method(h_class, "get_info", + CLS_METHOD_RD, + get_info, &h_get_info); + cls_register_cxx_method(h_class, "list_locks", + CLS_METHOD_RD, + list_locks, &h_list_locks); + cls_register_cxx_method(h_class, "assert_locked", + CLS_METHOD_RD | CLS_METHOD_PROMOTE, + assert_locked, &h_assert_locked); + cls_register_cxx_method(h_class, "set_cookie", + CLS_METHOD_RD | CLS_METHOD_WR | CLS_METHOD_PROMOTE, + set_cookie, &h_set_cookie); + + return; +} diff --git a/src/cls/lock/cls_lock_client.cc b/src/cls/lock/cls_lock_client.cc new file mode 100644 index 000000000..305659867 --- /dev/null +++ b/src/cls/lock/cls_lock_client.cc @@ -0,0 +1,286 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab +/* + * Ceph - scalable distributed file system + * + * Copyright (C) 2004-2006 Sage Weil <sage@newdream.net> + * + * This is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License version 2.1, as published by the Free Software + * Foundation. See file COPYING. + * + */ + +#include "include/types.h" +#include "msg/msg_types.h" +#include "include/rados/librados.hpp" +#include "include/utime.h" + +#include "cls/lock/cls_lock_ops.h" +#include "cls/lock/cls_lock_client.h" + +using std::map; + +using namespace librados; + + +namespace rados { + namespace cls { + namespace lock { + + void lock(ObjectWriteOperation *rados_op, + const std::string& name, ClsLockType type, + const std::string& cookie, const std::string& tag, + const std::string& description, + const utime_t& duration, uint8_t flags) + { + cls_lock_lock_op op; + op.name = name; + op.type = type; + op.cookie = cookie; + op.tag = tag; + op.description = description; + op.duration = duration; + op.flags = flags; + bufferlist in; + encode(op, in); + rados_op->exec("lock", "lock", in); + } + + int lock(IoCtx *ioctx, + const std::string& oid, + const std::string& name, ClsLockType type, + const std::string& cookie, const std::string& tag, + const std::string& description, const utime_t& duration, + uint8_t flags) + { + ObjectWriteOperation op; + lock(&op, name, type, cookie, tag, description, duration, flags); + return ioctx->operate(oid, &op); + } + + void unlock(ObjectWriteOperation *rados_op, + const std::string& name, const std::string& cookie) + { + cls_lock_unlock_op op; + op.name = name; + op.cookie = cookie; + bufferlist in; + encode(op, in); + + rados_op->exec("lock", "unlock", in); + } + + int unlock(IoCtx *ioctx, const std::string& oid, + const std::string& name, const std::string& cookie) + { + ObjectWriteOperation op; + unlock(&op, name, cookie); + return ioctx->operate(oid, &op); + } + + int aio_unlock(IoCtx *ioctx, const std::string& oid, + const std::string& name, const std::string& cookie, + librados::AioCompletion *completion) + { + ObjectWriteOperation op; + unlock(&op, name, cookie); + return ioctx->aio_operate(oid, completion, &op); + } + + void break_lock(ObjectWriteOperation *rados_op, + const std::string& name, const std::string& cookie, + const entity_name_t& locker) + { + cls_lock_break_op op; + op.name = name; + op.cookie = cookie; + op.locker = locker; + bufferlist in; + encode(op, in); + rados_op->exec("lock", "break_lock", in); + } + + int break_lock(IoCtx *ioctx, const std::string& oid, + const std::string& name, const std::string& cookie, + const entity_name_t& locker) + { + ObjectWriteOperation op; + break_lock(&op, name, cookie, locker); + return ioctx->operate(oid, &op); + } + + int list_locks(IoCtx *ioctx, const std::string& oid, std::list<std::string> *locks) + { + bufferlist in, out; + int r = ioctx->exec(oid, "lock", "list_locks", in, out); + if (r < 0) + return r; + + cls_lock_list_locks_reply ret; + auto iter = std::cbegin(out); + try { + decode(ret, iter); + } catch (ceph::buffer::error& err) { + return -EBADMSG; + } + + *locks = ret.locks; + + return 0; + } + + void get_lock_info_start(ObjectReadOperation *rados_op, + const std::string& name) + { + bufferlist in; + cls_lock_get_info_op op; + op.name = name; + encode(op, in); + rados_op->exec("lock", "get_info", in); + } + + int get_lock_info_finish(bufferlist::const_iterator *iter, + map<locker_id_t, locker_info_t> *lockers, + ClsLockType *type, std::string *tag) + { + cls_lock_get_info_reply ret; + try { + decode(ret, *iter); + } catch (ceph::buffer::error& err) { + return -EBADMSG; + } + + if (lockers) { + *lockers = ret.lockers; + } + + if (type) { + *type = ret.lock_type; + } + + if (tag) { + *tag = ret.tag; + } + + return 0; + } + + int get_lock_info(IoCtx *ioctx, const std::string& oid, const std::string& name, + map<locker_id_t, locker_info_t> *lockers, + ClsLockType *type, std::string *tag) + { + ObjectReadOperation op; + get_lock_info_start(&op, name); + bufferlist out; + int r = ioctx->operate(oid, &op, &out); + if (r < 0) + return r; + auto it = std::cbegin(out); + return get_lock_info_finish(&it, lockers, type, tag); + } + + void assert_locked(librados::ObjectOperation *rados_op, + const std::string& name, ClsLockType type, + const std::string& cookie, const std::string& tag) + { + cls_lock_assert_op op; + op.name = name; + op.type = type; + op.cookie = cookie; + op.tag = tag; + bufferlist in; + encode(op, in); + rados_op->exec("lock", "assert_locked", in); + } + + void set_cookie(librados::ObjectWriteOperation *rados_op, + const std::string& name, ClsLockType type, + const std::string& cookie, const std::string& tag, + const std::string& new_cookie) + { + cls_lock_set_cookie_op op; + op.name = name; + op.type = type; + op.cookie = cookie; + op.tag = tag; + op.new_cookie = new_cookie; + bufferlist in; + encode(op, in); + rados_op->exec("lock", "set_cookie", in); + } + + void Lock::assert_locked_shared(ObjectOperation *op) + { + assert_locked(op, name, ClsLockType::SHARED, cookie, tag); + } + + void Lock::assert_locked_exclusive(ObjectOperation *op) + { + assert_locked(op, name, ClsLockType::EXCLUSIVE, cookie, tag); + } + + void Lock::assert_locked_exclusive_ephemeral(ObjectOperation *op) + { + assert_locked(op, name, ClsLockType::EXCLUSIVE_EPHEMERAL, cookie, tag); + } + + void Lock::lock_shared(ObjectWriteOperation *op) + { + lock(op, name, ClsLockType::SHARED, + cookie, tag, description, duration, flags); + } + + int Lock::lock_shared(IoCtx *ioctx, const std::string& oid) + { + return lock(ioctx, oid, name, ClsLockType::SHARED, + cookie, tag, description, duration, flags); + } + + void Lock::lock_exclusive(ObjectWriteOperation *op) + { + lock(op, name, ClsLockType::EXCLUSIVE, + cookie, tag, description, duration, flags); + } + + int Lock::lock_exclusive(IoCtx *ioctx, const std::string& oid) + { + return lock(ioctx, oid, name, ClsLockType::EXCLUSIVE, + cookie, tag, description, duration, flags); + } + + void Lock::lock_exclusive_ephemeral(ObjectWriteOperation *op) + { + lock(op, name, ClsLockType::EXCLUSIVE_EPHEMERAL, + cookie, tag, description, duration, flags); + } + + int Lock::lock_exclusive_ephemeral(IoCtx *ioctx, const std::string& oid) + { + return lock(ioctx, oid, name, ClsLockType::EXCLUSIVE_EPHEMERAL, + cookie, tag, description, duration, flags); + } + + void Lock::unlock(ObjectWriteOperation *op) + { + rados::cls::lock::unlock(op, name, cookie); + } + + int Lock::unlock(IoCtx *ioctx, const std::string& oid) + { + return rados::cls::lock::unlock(ioctx, oid, name, cookie); + } + + void Lock::break_lock(ObjectWriteOperation *op, const entity_name_t& locker) + { + rados::cls::lock::break_lock(op, name, cookie, locker); + } + + int Lock::break_lock(IoCtx *ioctx, const std::string& oid, const entity_name_t& locker) + { + return rados::cls::lock::break_lock(ioctx, oid, name, cookie, locker); + } + } // namespace lock + } // namespace cls +} // namespace rados diff --git a/src/cls/lock/cls_lock_client.h b/src/cls/lock/cls_lock_client.h new file mode 100644 index 000000000..92e1396bf --- /dev/null +++ b/src/cls/lock/cls_lock_client.h @@ -0,0 +1,141 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab + +#ifndef CEPH_CLS_LOCK_CLIENT_H +#define CEPH_CLS_LOCK_CLIENT_H + +#include <chrono> + +#include "include/rados/librados_fwd.hpp" +#include "cls/lock/cls_lock_types.h" + +namespace rados { + namespace cls { + namespace lock { + extern void lock(librados::ObjectWriteOperation *rados_op, + const std::string& name, ClsLockType type, + const std::string& cookie, const std::string& tag, + const std::string& description, const utime_t& duration, + uint8_t flags); + + extern int lock(librados::IoCtx *ioctx, + const std::string& oid, + const std::string& name, ClsLockType type, + const std::string& cookie, const std::string& tag, + const std::string& description, const utime_t& duration, + uint8_t flags); + + extern void unlock(librados::ObjectWriteOperation *rados_op, + const std::string& name, const std::string& cookie); + + extern int unlock(librados::IoCtx *ioctx, const std::string& oid, + const std::string& name, const std::string& cookie); + + extern int aio_unlock(librados::IoCtx *ioctx, const std::string& oid, + const std::string& name, const std::string& cookie, + librados::AioCompletion *completion); + + extern void break_lock(librados::ObjectWriteOperation *op, + const std::string& name, const std::string& cookie, + const entity_name_t& locker); + + extern int break_lock(librados::IoCtx *ioctx, const std::string& oid, + const std::string& name, const std::string& cookie, + const entity_name_t& locker); + + extern int list_locks(librados::IoCtx *ioctx, const std::string& oid, + std::list<std::string> *locks); + extern void get_lock_info_start(librados::ObjectReadOperation *rados_op, + const std::string& name); + extern int get_lock_info_finish(ceph::bufferlist::const_iterator *out, + std::map<locker_id_t, locker_info_t> *lockers, + ClsLockType *type, std::string *tag); + + extern int get_lock_info(librados::IoCtx *ioctx, const std::string& oid, + const std::string& name, + std::map<locker_id_t, locker_info_t> *lockers, + ClsLockType *type, std::string *tag); + + extern void assert_locked(librados::ObjectOperation *rados_op, + const std::string& name, ClsLockType type, + const std::string& cookie, + const std::string& tag); + + extern void set_cookie(librados::ObjectWriteOperation *rados_op, + const std::string& name, ClsLockType type, + const std::string& cookie, const std::string& tag, + const std::string& new_cookie); + + class Lock { + std::string name; + std::string cookie; + std::string tag; + std::string description; + utime_t duration; + uint8_t flags; + + public: + + Lock(const std::string& _n) : name(_n), flags(0) {} + + void set_cookie(const std::string& c) { cookie = c; } + void set_tag(const std::string& t) { tag = t; } + void set_description(const std::string& desc) { description = desc; } + void set_duration(const utime_t& e) { duration = e; } + void set_duration(const ceph::timespan& d) { + duration = utime_t(ceph::real_clock::zero() + d); + } + + void set_may_renew(bool renew) { + if (renew) { + flags |= LOCK_FLAG_MAY_RENEW; + flags &= ~LOCK_FLAG_MUST_RENEW; // if may then not must + } else { + flags &= ~LOCK_FLAG_MAY_RENEW; + } + } + + void set_must_renew(bool renew) { + if (renew) { + flags |= LOCK_FLAG_MUST_RENEW; + flags &= ~LOCK_FLAG_MAY_RENEW; // if must then not may + } else { + flags &= ~LOCK_FLAG_MUST_RENEW; + } + } + + void assert_locked_shared(librados::ObjectOperation *rados_op); + void assert_locked_exclusive(librados::ObjectOperation *rados_op); + void assert_locked_exclusive_ephemeral(librados::ObjectOperation *rados_op); + + /* ObjectWriteOperation */ + void lock_shared(librados::ObjectWriteOperation *ioctx); + void lock_exclusive(librados::ObjectWriteOperation *ioctx); + + // Be careful when using an exclusive ephemeral lock; it is + // intended strictly for cases when a lock object exists + // solely for a lock in a given process and the object is no + // longer needed when the lock is unlocked or expired, as the + // cls back-end will make an effort to delete it. + void lock_exclusive_ephemeral(librados::ObjectWriteOperation *ioctx); + void unlock(librados::ObjectWriteOperation *ioctx); + void break_lock(librados::ObjectWriteOperation *ioctx, + const entity_name_t& locker); + + /* IoCtx */ + int lock_shared(librados::IoCtx *ioctx, const std::string& oid); + int lock_exclusive(librados::IoCtx *ioctx, const std::string& oid); + + // NB: see above comment on exclusive ephemeral locks + int lock_exclusive_ephemeral(librados::IoCtx *ioctx, + const std::string& oid); + int unlock(librados::IoCtx *ioctx, const std::string& oid); + int break_lock(librados::IoCtx *ioctx, const std::string& oid, + const entity_name_t& locker); + }; + + } // namespace lock + } // namespace cls +} // namespace rados + +#endif diff --git a/src/cls/lock/cls_lock_ops.cc b/src/cls/lock/cls_lock_ops.cc new file mode 100644 index 000000000..1f878be48 --- /dev/null +++ b/src/cls/lock/cls_lock_ops.cc @@ -0,0 +1,213 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab +/* + * Ceph - scalable distributed file system + * + * Copyright (C) 2004-2006 Sage Weil <sage@newdream.net> + * + * This is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License version 2.1, as published by the Free Software + * Foundation. See file COPYING. + * + */ + +#include "msg/msg_types.h" +#include "common/Formatter.h" + +#include "cls/lock/cls_lock_ops.h" + +using namespace rados::cls::lock; +using std::list; +using std::map; +using std::string; + +static void generate_lock_id(locker_id_t& i, int n, const string& cookie) +{ + i.locker = entity_name_t::CLIENT(n); + i.cookie = cookie; +} + +void cls_lock_lock_op::dump(Formatter *f) const +{ + f->dump_string("name", name); + f->dump_string("type", cls_lock_type_str(type)); + f->dump_string("cookie", cookie); + f->dump_string("tag", tag); + f->dump_string("description", description); + f->dump_stream("duration") << duration; + f->dump_int("flags", (int)flags); +} + +void cls_lock_lock_op::generate_test_instances(list<cls_lock_lock_op*>& o) +{ + cls_lock_lock_op *i = new cls_lock_lock_op; + i->name = "name"; + i->type = ClsLockType::SHARED; + i->cookie = "cookie"; + i->tag = "tag"; + i->description = "description"; + i->duration = utime_t(5, 0); + i->flags = LOCK_FLAG_MAY_RENEW; + o.push_back(i); + o.push_back(new cls_lock_lock_op); +} + +void cls_lock_unlock_op::dump(Formatter *f) const +{ + f->dump_string("name", name); + f->dump_string("cookie", cookie); +} + +void cls_lock_unlock_op::generate_test_instances(list<cls_lock_unlock_op*>& o) +{ + cls_lock_unlock_op *i = new cls_lock_unlock_op; + i->name = "name"; + i->cookie = "cookie"; + o.push_back(i); + o.push_back(new cls_lock_unlock_op); +} + +void cls_lock_break_op::dump(Formatter *f) const +{ + f->dump_string("name", name); + f->dump_string("cookie", cookie); + f->dump_stream("locker") << locker; +} + +void cls_lock_break_op::generate_test_instances(list<cls_lock_break_op*>& o) +{ + cls_lock_break_op *i = new cls_lock_break_op; + i->name = "name"; + i->cookie = "cookie"; + i->locker = entity_name_t::CLIENT(1); + o.push_back(i); + o.push_back(new cls_lock_break_op); +} + +void cls_lock_get_info_op::dump(Formatter *f) const +{ + f->dump_string("name", name); +} + +void cls_lock_get_info_op::generate_test_instances(list<cls_lock_get_info_op*>& o) +{ + cls_lock_get_info_op *i = new cls_lock_get_info_op; + i->name = "name"; + o.push_back(i); + o.push_back(new cls_lock_get_info_op); +} + +static void generate_test_addr(entity_addr_t& a, int nonce, int port) +{ + a.set_type(entity_addr_t::TYPE_LEGACY); + a.set_nonce(nonce); + a.set_family(AF_INET); + a.set_in4_quad(0, 127); + a.set_in4_quad(1, 0); + a.set_in4_quad(2, 1); + a.set_in4_quad(3, 2); + a.set_port(port); +} + +void cls_lock_get_info_reply::dump(Formatter *f) const +{ + f->dump_string("lock_type", cls_lock_type_str(lock_type)); + f->dump_string("tag", tag); + f->open_array_section("lockers"); + map<locker_id_t, locker_info_t>::const_iterator iter; + for (iter = lockers.begin(); iter != lockers.end(); ++iter) { + const locker_id_t& id = iter->first; + const locker_info_t& info = iter->second; + f->open_object_section("object"); + f->dump_stream("locker") << id.locker; + f->dump_string("description", info.description); + f->dump_string("cookie", id.cookie); + f->dump_stream("expiration") << info.expiration; + f->dump_string("addr", info.addr.get_legacy_str()); + f->close_section(); + } + f->close_section(); +} + +void cls_lock_get_info_reply::generate_test_instances(list<cls_lock_get_info_reply*>& o) +{ + cls_lock_get_info_reply *i = new cls_lock_get_info_reply; + i->lock_type = ClsLockType::SHARED; + i->tag = "tag"; + locker_id_t id1, id2; + entity_addr_t addr1, addr2; + generate_lock_id(id1, 1, "cookie1"); + generate_test_addr(addr1, 10, 20); + i->lockers[id1] = locker_info_t(utime_t(10, 0), addr1, "description1"); + generate_lock_id(id2, 2, "cookie2"); + generate_test_addr(addr2, 30, 40); + i->lockers[id2] = locker_info_t(utime_t(20, 0), addr2, "description2"); + + o.push_back(i); + o.push_back(new cls_lock_get_info_reply); +} + +void cls_lock_list_locks_reply::dump(Formatter *f) const +{ + list<string>::const_iterator iter; + f->open_array_section("locks"); + for (iter = locks.begin(); iter != locks.end(); ++iter) { + f->open_array_section("object"); + f->dump_string("lock", *iter); + f->close_section(); + } + f->close_section(); +} + +void cls_lock_list_locks_reply::generate_test_instances(list<cls_lock_list_locks_reply*>& o) +{ + cls_lock_list_locks_reply *i = new cls_lock_list_locks_reply; + i->locks.push_back("lock1"); + i->locks.push_back("lock2"); + i->locks.push_back("lock3"); + + o.push_back(i); + o.push_back(new cls_lock_list_locks_reply); +} + +void cls_lock_assert_op::dump(Formatter *f) const +{ + f->dump_string("name", name); + f->dump_string("type", cls_lock_type_str(type)); + f->dump_string("cookie", cookie); + f->dump_string("tag", tag); +} + +void cls_lock_assert_op::generate_test_instances(list<cls_lock_assert_op*>& o) +{ + cls_lock_assert_op *i = new cls_lock_assert_op; + i->name = "name"; + i->type = ClsLockType::SHARED; + i->cookie = "cookie"; + i->tag = "tag"; + o.push_back(i); + o.push_back(new cls_lock_assert_op); +} + +void cls_lock_set_cookie_op::dump(Formatter *f) const +{ + f->dump_string("name", name); + f->dump_string("type", cls_lock_type_str(type)); + f->dump_string("cookie", cookie); + f->dump_string("tag", tag); + f->dump_string("new_cookie", new_cookie); +} + +void cls_lock_set_cookie_op::generate_test_instances(list<cls_lock_set_cookie_op*>& o) +{ + cls_lock_set_cookie_op *i = new cls_lock_set_cookie_op; + i->name = "name"; + i->type = ClsLockType::SHARED; + i->cookie = "cookie"; + i->tag = "tag"; + i->new_cookie = "new cookie"; + o.push_back(i); + o.push_back(new cls_lock_set_cookie_op); +} + diff --git a/src/cls/lock/cls_lock_ops.h b/src/cls/lock/cls_lock_ops.h new file mode 100644 index 000000000..4de050f18 --- /dev/null +++ b/src/cls/lock/cls_lock_ops.h @@ -0,0 +1,245 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab + +#ifndef CEPH_CLS_LOCK_OPS_H +#define CEPH_CLS_LOCK_OPS_H + +#include "include/types.h" +#include "include/utime.h" +#include "cls/lock/cls_lock_types.h" + +struct cls_lock_lock_op +{ + std::string name; + ClsLockType type; + std::string cookie; + std::string tag; + std::string description; + utime_t duration; + uint8_t flags; + + cls_lock_lock_op() : type(ClsLockType::NONE), flags(0) {} + + void encode(ceph::buffer::list &bl) const { + ENCODE_START(1, 1, bl); + encode(name, bl); + uint8_t t = (uint8_t)type; + encode(t, bl); + encode(cookie, bl); + encode(tag, bl); + encode(description, bl); + encode(duration, bl); + encode(flags, bl); + ENCODE_FINISH(bl); + } + void decode(ceph::buffer::list::const_iterator &bl) { + DECODE_START_LEGACY_COMPAT_LEN(1, 1, 1, bl); + decode(name, bl); + uint8_t t; + decode(t, bl); + type = (ClsLockType)t; + decode(cookie, bl); + decode(tag, bl); + decode(description, bl); + decode(duration, bl); + decode(flags, bl); + DECODE_FINISH(bl); + } + void dump(ceph::Formatter *f) const; + static void generate_test_instances(std::list<cls_lock_lock_op*>& o); +}; +WRITE_CLASS_ENCODER(cls_lock_lock_op) + +struct cls_lock_unlock_op +{ + std::string name; + std::string cookie; + + cls_lock_unlock_op() {} + + void encode(ceph::buffer::list &bl) const { + ENCODE_START(1, 1, bl); + encode(name, bl); + encode(cookie, bl); + ENCODE_FINISH(bl); + } + void decode(ceph::buffer::list::const_iterator &bl) { + DECODE_START_LEGACY_COMPAT_LEN(1, 1, 1, bl); + decode(name, bl); + decode(cookie, bl); + DECODE_FINISH(bl); + } + void dump(ceph::Formatter *f) const; + static void generate_test_instances(std::list<cls_lock_unlock_op*>& o); +}; +WRITE_CLASS_ENCODER(cls_lock_unlock_op) + +struct cls_lock_break_op +{ + std::string name; + entity_name_t locker; + std::string cookie; + + cls_lock_break_op() {} + + void encode(ceph::buffer::list &bl) const { + ENCODE_START(1, 1, bl); + encode(name, bl); + encode(locker, bl); + encode(cookie, bl); + ENCODE_FINISH(bl); + } + void decode(ceph::buffer::list::const_iterator &bl) { + DECODE_START_LEGACY_COMPAT_LEN(1, 1, 1, bl); + decode(name, bl); + decode(locker, bl); + decode(cookie, bl); + DECODE_FINISH(bl); + } + void dump(ceph::Formatter *f) const; + static void generate_test_instances(std::list<cls_lock_break_op*>& o); +}; +WRITE_CLASS_ENCODER(cls_lock_break_op) + +struct cls_lock_get_info_op +{ + std::string name; + + cls_lock_get_info_op() {} + + void encode(ceph::buffer::list &bl) const { + ENCODE_START(1, 1, bl); + encode(name, bl); + ENCODE_FINISH(bl); + } + void decode(ceph::buffer::list::const_iterator &bl) { + DECODE_START_LEGACY_COMPAT_LEN(1, 1, 1, bl); + decode(name, bl); + DECODE_FINISH(bl); + } + void dump(ceph::Formatter *f) const; + static void generate_test_instances(std::list<cls_lock_get_info_op*>& o); +}; +WRITE_CLASS_ENCODER(cls_lock_get_info_op) + +struct cls_lock_get_info_reply +{ + std::map<rados::cls::lock::locker_id_t, rados::cls::lock::locker_info_t> lockers; + ClsLockType lock_type; + std::string tag; + + cls_lock_get_info_reply() : lock_type(ClsLockType::NONE) {} + + void encode(ceph::buffer::list &bl, uint64_t features) const { + ENCODE_START(1, 1, bl); + encode(lockers, bl, features); + uint8_t t = (uint8_t)lock_type; + encode(t, bl); + encode(tag, bl); + ENCODE_FINISH(bl); + } + void decode(ceph::buffer::list::const_iterator &bl) { + DECODE_START_LEGACY_COMPAT_LEN(1, 1, 1, bl); + decode(lockers, bl); + uint8_t t; + decode(t, bl); + lock_type = (ClsLockType)t; + decode(tag, bl); + DECODE_FINISH(bl); + } + void dump(ceph::Formatter *f) const; + static void generate_test_instances(std::list<cls_lock_get_info_reply*>& o); +}; +WRITE_CLASS_ENCODER_FEATURES(cls_lock_get_info_reply) + +struct cls_lock_list_locks_reply +{ + std::list<std::string> locks; + + cls_lock_list_locks_reply() {} + + void encode(ceph::buffer::list &bl) const { + ENCODE_START(1, 1, bl); + encode(locks, bl); + ENCODE_FINISH(bl); + } + void decode(ceph::buffer::list::const_iterator &bl) { + DECODE_START_LEGACY_COMPAT_LEN(1, 1, 1, bl); + decode(locks, bl); + DECODE_FINISH(bl); + } + void dump(ceph::Formatter *f) const; + static void generate_test_instances(std::list<cls_lock_list_locks_reply*>& o); +}; +WRITE_CLASS_ENCODER(cls_lock_list_locks_reply) + +struct cls_lock_assert_op +{ + std::string name; + ClsLockType type; + std::string cookie; + std::string tag; + + cls_lock_assert_op() : type(ClsLockType::NONE) {} + + void encode(ceph::buffer::list &bl) const { + ENCODE_START(1, 1, bl); + encode(name, bl); + uint8_t t = (uint8_t)type; + encode(t, bl); + encode(cookie, bl); + encode(tag, bl); + ENCODE_FINISH(bl); + } + void decode(ceph::buffer::list::const_iterator &bl) { + DECODE_START_LEGACY_COMPAT_LEN(1, 1, 1, bl); + decode(name, bl); + uint8_t t; + decode(t, bl); + type = (ClsLockType)t; + decode(cookie, bl); + decode(tag, bl); + DECODE_FINISH(bl); + } + void dump(ceph::Formatter *f) const; + static void generate_test_instances(std::list<cls_lock_assert_op*>& o); +}; +WRITE_CLASS_ENCODER(cls_lock_assert_op) + +struct cls_lock_set_cookie_op +{ + std::string name; + ClsLockType type; + std::string cookie; + std::string tag; + std::string new_cookie; + + cls_lock_set_cookie_op() : type(ClsLockType::NONE) {} + + void encode(ceph::buffer::list &bl) const { + ENCODE_START(1, 1, bl); + encode(name, bl); + uint8_t t = (uint8_t)type; + encode(t, bl); + encode(cookie, bl); + encode(tag, bl); + encode(new_cookie, bl); + ENCODE_FINISH(bl); + } + void decode(ceph::buffer::list::const_iterator &bl) { + DECODE_START_LEGACY_COMPAT_LEN(1, 1, 1, bl); + decode(name, bl); + uint8_t t; + decode(t, bl); + type = (ClsLockType)t; + decode(cookie, bl); + decode(tag, bl); + decode(new_cookie, bl); + DECODE_FINISH(bl); + } + void dump(ceph::Formatter *f) const; + static void generate_test_instances(std::list<cls_lock_set_cookie_op*>& o); +}; +WRITE_CLASS_ENCODER(cls_lock_set_cookie_op) + +#endif diff --git a/src/cls/lock/cls_lock_types.cc b/src/cls/lock/cls_lock_types.cc new file mode 100644 index 000000000..904ed268e --- /dev/null +++ b/src/cls/lock/cls_lock_types.cc @@ -0,0 +1,98 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab +/* + * Ceph - scalable distributed file system + * + * Copyright (C) 2004-2006 Sage Weil <sage@newdream.net> + * + * This is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License version 2.1, as published by the Free Software + * Foundation. See file COPYING. + * + */ + +#include "common/Formatter.h" + +#include "cls/lock/cls_lock_types.h" + +using namespace rados::cls::lock; + +static void generate_lock_id(locker_id_t& i, int n, const std::string& cookie) +{ + i.locker = entity_name_t::CLIENT(n); + i.cookie = cookie; +} + +void locker_id_t::dump(ceph::Formatter *f) const +{ + f->dump_stream("locker") << locker; + f->dump_string("cookie", cookie); +} + +void locker_id_t::generate_test_instances(std::list<locker_id_t*>& o) +{ + locker_id_t *i = new locker_id_t; + generate_lock_id(*i, 1, "cookie"); + o.push_back(i); + o.push_back(new locker_id_t); +} + +void locker_info_t::dump(ceph::Formatter *f) const +{ + f->dump_stream("expiration") << expiration; + f->dump_string("addr", addr.get_legacy_str()); + f->dump_string("description", description); +} + +static void generate_test_addr(entity_addr_t& a, int nonce, int port) +{ + a.set_type(entity_addr_t::TYPE_LEGACY); + a.set_nonce(nonce); + a.set_family(AF_INET); + a.set_in4_quad(0, 127); + a.set_in4_quad(1, 0); + a.set_in4_quad(2, 1); + a.set_in4_quad(3, 2); + a.set_port(port); +} + +void locker_info_t::generate_test_instances(std::list<locker_info_t*>& o) +{ + locker_info_t *i = new locker_info_t; + i->expiration = utime_t(5, 0); + generate_test_addr(i->addr, 1, 2); + i->description = "description"; + o.push_back(i); + o.push_back(new locker_info_t); +} + +void lock_info_t::dump(ceph::Formatter *f) const +{ + f->dump_int("lock_type", static_cast<int>(lock_type)); + f->dump_string("tag", tag); + f->open_array_section("lockers"); + for (auto &i : lockers) { + f->open_object_section("locker"); + f->dump_object("id", i.first); + f->dump_object("info", i.second); + f->close_section(); + } + f->close_section(); +} + +void lock_info_t::generate_test_instances(std::list<lock_info_t *>& o) +{ + lock_info_t *i = new lock_info_t; + locker_id_t id; + locker_info_t info; + generate_lock_id(id, 1, "cookie"); + info.expiration = utime_t(5, 0); + generate_test_addr(info.addr, 1, 2); + info.description = "description"; + i->lockers[id] = info; + i->lock_type = ClsLockType::EXCLUSIVE; + i->tag = "tag"; + o.push_back(i); + o.push_back(new lock_info_t); +} diff --git a/src/cls/lock/cls_lock_types.h b/src/cls/lock/cls_lock_types.h new file mode 100644 index 000000000..13f3e1478 --- /dev/null +++ b/src/cls/lock/cls_lock_types.h @@ -0,0 +1,174 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab + +#ifndef CEPH_CLS_LOCK_TYPES_H +#define CEPH_CLS_LOCK_TYPES_H + +#include "include/encoding.h" +#include "include/types.h" +#include "include/utime.h" +#include "msg/msg_types.h" + +/* lock flags */ +#define LOCK_FLAG_MAY_RENEW 0x1 /* idempotent lock acquire */ +#define LOCK_FLAG_MUST_RENEW 0x2 /* lock must already be acquired */ + +enum class ClsLockType { + NONE = 0, + EXCLUSIVE = 1, + SHARED = 2, + EXCLUSIVE_EPHEMERAL = 3, /* lock object is removed @ unlock */ +}; + +inline const char *cls_lock_type_str(ClsLockType type) +{ + switch (type) { + case ClsLockType::NONE: + return "none"; + case ClsLockType::EXCLUSIVE: + return "exclusive"; + case ClsLockType::SHARED: + return "shared"; + case ClsLockType::EXCLUSIVE_EPHEMERAL: + return "exclusive-ephemeral"; + default: + return "<unknown>"; + } +} + +inline bool cls_lock_is_exclusive(ClsLockType type) { + return ClsLockType::EXCLUSIVE == type || ClsLockType::EXCLUSIVE_EPHEMERAL == type; +} + +inline bool cls_lock_is_ephemeral(ClsLockType type) { + return ClsLockType::EXCLUSIVE_EPHEMERAL == type; +} + +inline bool cls_lock_is_valid(ClsLockType type) { + return ClsLockType::SHARED == type || + ClsLockType::EXCLUSIVE == type || + ClsLockType::EXCLUSIVE_EPHEMERAL == type; +} + +namespace rados { + namespace cls { + namespace lock { + + /* + * locker_id_t: the locker id, needs to be unique in a single lock + */ + struct locker_id_t { + entity_name_t locker; // locker's client name + std::string cookie; // locker's cookie. + + locker_id_t() {} + locker_id_t(entity_name_t& _n, const std::string& _c) : locker(_n), cookie(_c) {} + + void encode(ceph::buffer::list &bl) const { + ENCODE_START(1, 1, bl); + encode(locker, bl); + encode(cookie, bl); + ENCODE_FINISH(bl); + } + void decode(ceph::buffer::list::const_iterator &bl) { + DECODE_START_LEGACY_COMPAT_LEN(1, 1, 1, bl); + decode(locker, bl); + decode(cookie, bl); + DECODE_FINISH(bl); + } + + bool operator<(const locker_id_t& rhs) const { + if (locker == rhs.locker) + return cookie.compare(rhs.cookie) < 0; + if (locker < rhs.locker) + return true; + return false; + } + void dump(ceph::Formatter *f) const; + friend std::ostream& operator<<(std::ostream& out, + const locker_id_t& data) { + out << data.locker; + return out; + } + static void generate_test_instances(std::list<locker_id_t*>& o); + }; + WRITE_CLASS_ENCODER(locker_id_t) + + struct locker_info_t + { + utime_t expiration; // expiration: non-zero means epoch of locker expiration + entity_addr_t addr; // addr: locker address + std::string description; // description: locker description, may be empty + + locker_info_t() {} + locker_info_t(const utime_t& _e, const entity_addr_t& _a, + const std::string& _d) : expiration(_e), addr(_a), description(_d) {} + + void encode(ceph::buffer::list &bl, uint64_t features) const { + ENCODE_START(1, 1, bl); + encode(expiration, bl); + encode(addr, bl, features); + encode(description, bl); + ENCODE_FINISH(bl); + } + void decode(ceph::buffer::list::const_iterator &bl) { + DECODE_START_LEGACY_COMPAT_LEN(1, 1, 1, bl); + decode(expiration, bl); + decode(addr, bl); + decode(description, bl); + DECODE_FINISH(bl); + } + void dump(ceph::Formatter *f) const; + friend std::ostream& operator<<(std::ostream& out, + const locker_info_t& data) { + using ceph::operator <<; + out << "{addr:" << data.addr << ", exp:"; + + const auto& exp = data.expiration; + if (exp.is_zero()) { + out << "never}"; + } else { + out << exp.to_real_time() << "}"; + } + + return out; + } + static void generate_test_instances(std::list<locker_info_t *>& o); + }; + WRITE_CLASS_ENCODER_FEATURES(locker_info_t) + + struct lock_info_t { + std::map<locker_id_t, locker_info_t> lockers; // map of lockers + ClsLockType lock_type; // lock type (exclusive / shared) + std::string tag; // tag: operations on lock can only succeed with this tag + // as long as set of non expired lockers + // is bigger than 0. + + void encode(ceph::buffer::list &bl, uint64_t features) const { + ENCODE_START(1, 1, bl); + encode(lockers, bl, features); + uint8_t t = (uint8_t)lock_type; + encode(t, bl); + encode(tag, bl); + ENCODE_FINISH(bl); + } + void decode(ceph::buffer::list::const_iterator &bl) { + DECODE_START_LEGACY_COMPAT_LEN(1, 1, 1, bl); + decode(lockers, bl); + uint8_t t; + decode(t, bl); + lock_type = (ClsLockType)t; + decode(tag, bl); + DECODE_FINISH(bl); + } + + lock_info_t() : lock_type(ClsLockType::NONE) {} + void dump(ceph::Formatter *f) const; + static void generate_test_instances(std::list<lock_info_t *>& o); + }; + WRITE_CLASS_ENCODER_FEATURES(lock_info_t); + } + } +} + +#endif diff --git a/src/cls/log/cls_log.cc b/src/cls/log/cls_log.cc new file mode 100644 index 000000000..58a8524da --- /dev/null +++ b/src/cls/log/cls_log.cc @@ -0,0 +1,323 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab + +#include "include/types.h" +#include "include/utime.h" +#include "objclass/objclass.h" + +#include "cls_log_types.h" +#include "cls_log_ops.h" + +#include "global/global_context.h" +#include "include/compat.h" + +using std::map; +using std::string; + +using ceph::bufferlist; + +CLS_VER(1,0) +CLS_NAME(log) + +static string log_index_prefix = "1_"; + + +static int write_log_entry(cls_method_context_t hctx, string& index, cls_log_entry& entry) +{ + bufferlist bl; + encode(entry, bl); + + int ret = cls_cxx_map_set_val(hctx, index, &bl); + if (ret < 0) + return ret; + + return 0; +} + +static void get_index_time_prefix(utime_t& ts, string& index) +{ + char buf[32]; + snprintf(buf, sizeof(buf), "%010ld.%06ld_", (long)ts.sec(), (long)ts.usec()); + + index = log_index_prefix + buf; +} + +static int read_header(cls_method_context_t hctx, cls_log_header& header) +{ + bufferlist header_bl; + + int ret = cls_cxx_map_read_header(hctx, &header_bl); + if (ret < 0) + return ret; + + if (header_bl.length() == 0) { + header = cls_log_header(); + return 0; + } + + auto iter = header_bl.cbegin(); + try { + decode(header, iter); + } catch (ceph::buffer::error& err) { + CLS_LOG(0, "ERROR: read_header(): failed to decode header"); + } + + return 0; +} + +static int write_header(cls_method_context_t hctx, cls_log_header& header) +{ + bufferlist header_bl; + encode(header, header_bl); + + int ret = cls_cxx_map_write_header(hctx, &header_bl); + if (ret < 0) + return ret; + + return 0; +} + +static void get_index(cls_method_context_t hctx, utime_t& ts, string& index) +{ + get_index_time_prefix(ts, index); + + string unique_id; + + cls_cxx_subop_version(hctx, &unique_id); + + index.append(unique_id); +} + +static int cls_log_add(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + auto in_iter = in->cbegin(); + + cls_log_add_op op; + try { + decode(op, in_iter); + } catch (ceph::buffer::error& err) { + CLS_LOG(1, "ERROR: cls_log_add_op(): failed to decode op"); + return -EINVAL; + } + + cls_log_header header; + + int ret = read_header(hctx, header); + if (ret < 0) + return ret; + + for (auto iter = op.entries.begin(); iter != op.entries.end(); ++iter) { + cls_log_entry& entry = *iter; + + string index; + + utime_t timestamp = entry.timestamp; + if (op.monotonic_inc && timestamp < header.max_time) + timestamp = header.max_time; + else if (timestamp > header.max_time) + header.max_time = timestamp; + + if (entry.id.empty()) { + get_index(hctx, timestamp, index); + entry.id = index; + } else { + index = entry.id; + } + + CLS_LOG(20, "storing entry at %s", index.c_str()); + + + if (index > header.max_marker) + header.max_marker = index; + + ret = write_log_entry(hctx, index, entry); + if (ret < 0) + return ret; + } + + ret = write_header(hctx, header); + if (ret < 0) + return ret; + + return 0; +} + +static int cls_log_list(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + auto in_iter = in->cbegin(); + + cls_log_list_op op; + try { + decode(op, in_iter); + } catch (ceph::buffer::error& err) { + CLS_LOG(1, "ERROR: cls_log_list_op(): failed to decode op"); + return -EINVAL; + } + + map<string, bufferlist> keys; + + string from_index; + string to_index; + + if (op.marker.empty()) { + get_index_time_prefix(op.from_time, from_index); + } else { + from_index = op.marker; + } + bool use_time_boundary = (!op.from_time.is_zero() && (op.to_time >= op.from_time)); + + if (use_time_boundary) + get_index_time_prefix(op.to_time, to_index); + +#define MAX_ENTRIES 1000 + size_t max_entries = op.max_entries; + if (!max_entries || max_entries > MAX_ENTRIES) + max_entries = MAX_ENTRIES; + + cls_log_list_ret ret; + + int rc = cls_cxx_map_get_vals(hctx, from_index, log_index_prefix, max_entries, &keys, &ret.truncated); + if (rc < 0) + return rc; + + auto& entries = ret.entries; + auto iter = keys.begin(); + + string marker; + + for (; iter != keys.end(); ++iter) { + const string& index = iter->first; + marker = index; + if (use_time_boundary && index.compare(0, to_index.size(), to_index) >= 0) { + ret.truncated = false; + break; + } + + bufferlist& bl = iter->second; + auto biter = bl.cbegin(); + try { + cls_log_entry e; + decode(e, biter); + entries.push_back(e); + } catch (ceph::buffer::error& err) { + CLS_LOG(0, "ERROR: cls_log_list: could not decode entry, index=%s", index.c_str()); + } + } + + ret.marker = marker; + + encode(ret, *out); + + return 0; +} + + +static int cls_log_trim(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + auto in_iter = in->cbegin(); + + cls_log_trim_op op; + try { + decode(op, in_iter); + } catch (ceph::buffer::error& err) { + CLS_LOG(0, "ERROR: cls_log_trim(): failed to decode entry"); + return -EINVAL; + } + + string from_index; + string to_index; + + if (op.from_marker.empty()) { + get_index_time_prefix(op.from_time, from_index); + } else { + from_index = op.from_marker; + } + + // cls_cxx_map_remove_range() expects one-past-end + if (op.to_marker.empty()) { + auto t = op.to_time; + t.nsec_ref() += 1000; // equivalent to usec() += 1 + t.normalize(); + get_index_time_prefix(t, to_index); + } else { + to_index = op.to_marker; + to_index.append(1, '\0'); + } + + // list a single key to detect whether the range is empty + const size_t max_entries = 1; + std::set<std::string> keys; + bool more = false; + + int rc = cls_cxx_map_get_keys(hctx, from_index, max_entries, &keys, &more); + if (rc < 0) { + CLS_LOG(1, "ERROR: cls_cxx_map_get_keys failed rc=%d", rc); + return rc; + } + + if (keys.empty()) { + CLS_LOG(20, "range is empty from_index=%s", from_index.c_str()); + return -ENODATA; + } + + const std::string& first_key = *keys.begin(); + if (to_index < first_key) { + CLS_LOG(20, "listed key %s past to_index=%s", first_key.c_str(), to_index.c_str()); + return -ENODATA; + } + + CLS_LOG(20, "listed key %s, removing through %s", first_key.c_str(), to_index.c_str()); + + rc = cls_cxx_map_remove_range(hctx, first_key, to_index); + if (rc < 0) { + CLS_LOG(1, "ERROR: cls_cxx_map_remove_range failed rc=%d", rc); + return rc; + } + + return 0; +} + +static int cls_log_info(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + auto in_iter = in->cbegin(); + + cls_log_info_op op; + try { + decode(op, in_iter); + } catch (ceph::buffer::error& err) { + CLS_LOG(1, "ERROR: cls_log_add_op(): failed to decode op"); + return -EINVAL; + } + + cls_log_info_ret ret; + + int rc = read_header(hctx, ret.header); + if (rc < 0) + return rc; + + encode(ret, *out); + + return 0; +} + +CLS_INIT(log) +{ + CLS_LOG(1, "Loaded log class!"); + + cls_handle_t h_class; + cls_method_handle_t h_log_add; + cls_method_handle_t h_log_list; + cls_method_handle_t h_log_trim; + cls_method_handle_t h_log_info; + + cls_register("log", &h_class); + + /* log */ + cls_register_cxx_method(h_class, "add", CLS_METHOD_RD | CLS_METHOD_WR, cls_log_add, &h_log_add); + cls_register_cxx_method(h_class, "list", CLS_METHOD_RD, cls_log_list, &h_log_list); + cls_register_cxx_method(h_class, "trim", CLS_METHOD_RD | CLS_METHOD_WR, cls_log_trim, &h_log_trim); + cls_register_cxx_method(h_class, "info", CLS_METHOD_RD, cls_log_info, &h_log_info); + + return; +} + diff --git a/src/cls/log/cls_log_client.cc b/src/cls/log/cls_log_client.cc new file mode 100644 index 000000000..182bb9fec --- /dev/null +++ b/src/cls/log/cls_log_client.cc @@ -0,0 +1,160 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab +#include <errno.h> + +#include "cls/log/cls_log_ops.h" +#include "include/rados/librados.hpp" +#include "include/compat.h" + + +using std::list; +using std::string; + +using ceph::bufferlist; + +using namespace librados; + + + +void cls_log_add(librados::ObjectWriteOperation& op, list<cls_log_entry>& entries, bool monotonic_inc) +{ + bufferlist in; + cls_log_add_op call; + call.entries = entries; + encode(call, in); + op.exec("log", "add", in); +} + +void cls_log_add(librados::ObjectWriteOperation& op, cls_log_entry& entry) +{ + bufferlist in; + cls_log_add_op call; + call.entries.push_back(entry); + encode(call, in); + op.exec("log", "add", in); +} + +void cls_log_add_prepare_entry(cls_log_entry& entry, const utime_t& timestamp, + const string& section, const string& name, bufferlist& bl) +{ + entry.timestamp = timestamp; + entry.section = section; + entry.name = name; + entry.data = bl; +} + +void cls_log_add(librados::ObjectWriteOperation& op, const utime_t& timestamp, + const string& section, const string& name, bufferlist& bl) +{ + cls_log_entry entry; + + cls_log_add_prepare_entry(entry, timestamp, section, name, bl); + cls_log_add(op, entry); +} + +void cls_log_trim(librados::ObjectWriteOperation& op, const utime_t& from_time, const utime_t& to_time, + const string& from_marker, const string& to_marker) +{ + bufferlist in; + cls_log_trim_op call; + call.from_time = from_time; + call.to_time = to_time; + call.from_marker = from_marker; + call.to_marker = to_marker; + encode(call, in); + op.exec("log", "trim", in); +} + +int cls_log_trim(librados::IoCtx& io_ctx, const string& oid, const utime_t& from_time, const utime_t& to_time, + const string& from_marker, const string& to_marker) +{ + bool done = false; + + do { + ObjectWriteOperation op; + + cls_log_trim(op, from_time, to_time, from_marker, to_marker); + + int r = io_ctx.operate(oid, &op); + if (r == -ENODATA) + done = true; + else if (r < 0) + return r; + + } while (!done); + + + return 0; +} + +class LogListCtx : public ObjectOperationCompletion { + list<cls_log_entry> *entries; + string *marker; + bool *truncated; +public: + LogListCtx(list<cls_log_entry> *_entries, string *_marker, bool *_truncated) : + entries(_entries), marker(_marker), truncated(_truncated) {} + void handle_completion(int r, bufferlist& outbl) override { + if (r >= 0) { + cls_log_list_ret ret; + try { + auto iter = outbl.cbegin(); + decode(ret, iter); + if (entries) + *entries = std::move(ret.entries); + if (truncated) + *truncated = ret.truncated; + if (marker) + *marker = std::move(ret.marker); + } catch (ceph::buffer::error& err) { + // nothing we can do about it atm + } + } + } +}; + +void cls_log_list(librados::ObjectReadOperation& op, const utime_t& from, + const utime_t& to, const string& in_marker, int max_entries, + list<cls_log_entry>& entries, + string *out_marker, bool *truncated) +{ + bufferlist inbl; + cls_log_list_op call; + call.from_time = from; + call.to_time = to; + call.marker = in_marker; + call.max_entries = max_entries; + + encode(call, inbl); + + op.exec("log", "list", inbl, new LogListCtx(&entries, out_marker, truncated)); +} + +class LogInfoCtx : public ObjectOperationCompletion { + cls_log_header *header; +public: + explicit LogInfoCtx(cls_log_header *_header) : header(_header) {} + void handle_completion(int r, bufferlist& outbl) override { + if (r >= 0) { + cls_log_info_ret ret; + try { + auto iter = outbl.cbegin(); + decode(ret, iter); + if (header) + *header = ret.header; + } catch (ceph::buffer::error& err) { + // nothing we can do about it atm + } + } + } +}; + +void cls_log_info(librados::ObjectReadOperation& op, cls_log_header *header) +{ + bufferlist inbl; + cls_log_info_op call; + + encode(call, inbl); + + op.exec("log", "info", inbl, new LogInfoCtx(header)); +} diff --git a/src/cls/log/cls_log_client.h b/src/cls/log/cls_log_client.h new file mode 100644 index 000000000..2afdabeb3 --- /dev/null +++ b/src/cls/log/cls_log_client.h @@ -0,0 +1,39 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab + +#ifndef CEPH_CLS_LOG_CLIENT_H +#define CEPH_CLS_LOG_CLIENT_H + +#include "include/rados/librados_fwd.hpp" +#include "cls_log_types.h" + +/* + * log objclass + */ + +void cls_log_add_prepare_entry(cls_log_entry& entry, const utime_t& timestamp, + const std::string& section, const std::string& name, ceph::buffer::list& bl); + +void cls_log_add(librados::ObjectWriteOperation& op, std::list<cls_log_entry>& entries, bool monotonic_inc); +void cls_log_add(librados::ObjectWriteOperation& op, cls_log_entry& entry); +void cls_log_add(librados::ObjectWriteOperation& op, const utime_t& timestamp, + const std::string& section, const std::string& name, ceph::buffer::list& bl); + +void cls_log_list(librados::ObjectReadOperation& op, const utime_t& from, + const utime_t& to, const std::string& in_marker, + int max_entries, std::list<cls_log_entry>& entries, + std::string *out_marker, bool *truncated); + +void cls_log_trim(librados::ObjectWriteOperation& op, const utime_t& from_time, const utime_t& to_time, + const std::string& from_marker, const std::string& to_marker); + +// these overloads which call io_ctx.operate() should not be called in the rgw. +// rgw_rados_operate() should be called after the overloads w/o calls to io_ctx.operate() +#ifndef CLS_CLIENT_HIDE_IOCTX +int cls_log_trim(librados::IoCtx& io_ctx, const std::string& oid, const utime_t& from_time, const utime_t& to_time, + const std::string& from_marker, const std::string& to_marker); +#endif + +void cls_log_info(librados::ObjectReadOperation& op, cls_log_header *header); + +#endif diff --git a/src/cls/log/cls_log_ops.h b/src/cls/log/cls_log_ops.h new file mode 100644 index 000000000..0cedc8802 --- /dev/null +++ b/src/cls/log/cls_log_ops.h @@ -0,0 +1,156 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab + +#ifndef CEPH_CLS_LOG_OPS_H +#define CEPH_CLS_LOG_OPS_H + +#include "cls_log_types.h" + +struct cls_log_add_op { + std::list<cls_log_entry> entries; + bool monotonic_inc; + + cls_log_add_op() : monotonic_inc(true) {} + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(2, 1, bl); + encode(entries, bl); + encode(monotonic_inc, bl); + ENCODE_FINISH(bl); + } + + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(2, bl); + decode(entries, bl); + if (struct_v >= 2) { + decode(monotonic_inc, bl); + } + DECODE_FINISH(bl); + } +}; +WRITE_CLASS_ENCODER(cls_log_add_op) + +struct cls_log_list_op { + utime_t from_time; + std::string marker; /* if not empty, overrides from_time */ + utime_t to_time; /* not inclusive */ + int max_entries; /* upperbound to returned num of entries + might return less than that and still be truncated */ + + cls_log_list_op() : max_entries(0) {} + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(1, 1, bl); + encode(from_time, bl); + encode(marker, bl); + encode(to_time, bl); + encode(max_entries, bl); + ENCODE_FINISH(bl); + } + + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(1, bl); + decode(from_time, bl); + decode(marker, bl); + decode(to_time, bl); + decode(max_entries, bl); + DECODE_FINISH(bl); + } +}; +WRITE_CLASS_ENCODER(cls_log_list_op) + +struct cls_log_list_ret { + std::list<cls_log_entry> entries; + std::string marker; + bool truncated; + + cls_log_list_ret() : truncated(false) {} + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(1, 1, bl); + encode(entries, bl); + encode(marker, bl); + encode(truncated, bl); + ENCODE_FINISH(bl); + } + + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(1, bl); + decode(entries, bl); + decode(marker, bl); + decode(truncated, bl); + DECODE_FINISH(bl); + } +}; +WRITE_CLASS_ENCODER(cls_log_list_ret) + + +/* + * operation will return 0 when successfully removed but not done. Will return + * -ENODATA when done, so caller needs to repeat sending request until that. + */ +struct cls_log_trim_op { + utime_t from_time; + utime_t to_time; /* inclusive */ + std::string from_marker; + std::string to_marker; + + cls_log_trim_op() {} + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(2, 1, bl); + encode(from_time, bl); + encode(to_time, bl); + encode(from_marker, bl); + encode(to_marker, bl); + ENCODE_FINISH(bl); + } + + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(2, bl); + decode(from_time, bl); + decode(to_time, bl); + if (struct_v >= 2) { + decode(from_marker, bl); + decode(to_marker, bl); + } + DECODE_FINISH(bl); + } +}; +WRITE_CLASS_ENCODER(cls_log_trim_op) + +struct cls_log_info_op { + cls_log_info_op() {} + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(1, 1, bl); + // currently empty request + ENCODE_FINISH(bl); + } + + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(1, bl); + // currently empty request + DECODE_FINISH(bl); + } +}; +WRITE_CLASS_ENCODER(cls_log_info_op) + +struct cls_log_info_ret { + cls_log_header header; + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(1, 1, bl); + encode(header, bl); + ENCODE_FINISH(bl); + } + + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(1, bl); + decode(header, bl); + DECODE_FINISH(bl); + } +}; +WRITE_CLASS_ENCODER(cls_log_info_ret) + +#endif diff --git a/src/cls/log/cls_log_types.h b/src/cls/log/cls_log_types.h new file mode 100644 index 000000000..1746d243e --- /dev/null +++ b/src/cls/log/cls_log_types.h @@ -0,0 +1,74 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab +#ifndef CEPH_CLS_LOG_TYPES_H +#define CEPH_CLS_LOG_TYPES_H + +#include "include/encoding.h" +#include "include/types.h" + +#include "include/utime.h" + +class JSONObj; + + +struct cls_log_entry { + std::string id; + std::string section; + std::string name; + utime_t timestamp; + ceph::buffer::list data; + + cls_log_entry() {} + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(2, 1, bl); + encode(section, bl); + encode(name, bl); + encode(timestamp, bl); + encode(data, bl); + encode(id, bl); + ENCODE_FINISH(bl); + } + + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(2, bl); + decode(section, bl); + decode(name, bl); + decode(timestamp, bl); + decode(data, bl); + if (struct_v >= 2) + decode(id, bl); + DECODE_FINISH(bl); + } +}; +WRITE_CLASS_ENCODER(cls_log_entry) + +struct cls_log_header { + std::string max_marker; + utime_t max_time; + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(1, 1, bl); + encode(max_marker, bl); + encode(max_time, bl); + ENCODE_FINISH(bl); + } + + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(1, bl); + decode(max_marker, bl); + decode(max_time, bl); + DECODE_FINISH(bl); + } +}; +inline bool operator ==(const cls_log_header& lhs, const cls_log_header& rhs) { + return (lhs.max_marker == rhs.max_marker && + lhs.max_time == rhs.max_time); +} +inline bool operator !=(const cls_log_header& lhs, const cls_log_header& rhs) { + return !(lhs == rhs); +} +WRITE_CLASS_ENCODER(cls_log_header) + + +#endif diff --git a/src/cls/lua/cls_lua.cc b/src/cls/lua/cls_lua.cc new file mode 100644 index 000000000..dea5fe1a8 --- /dev/null +++ b/src/cls/lua/cls_lua.cc @@ -0,0 +1,1054 @@ +/* + * Lua Bindings for RADOS Object Class + */ +#include <errno.h> +#include <setjmp.h> +#include <string> +#include <sstream> +#include <lua.hpp> +#include "include/types.h" +#include "objclass/objclass.h" +#include "json_spirit/json_spirit.h" +#include "cls_lua.h" +#include "cls_lua_ops.h" + +using namespace std; + +CLS_VER(1,0) +CLS_NAME(lua) + +/* + * Jump point for recovering from Lua panic. + */ +static jmp_buf cls_lua_panic_jump; + +/* + * Handle Lua panic. + */ +static int cls_lua_atpanic(lua_State *lua) +{ + CLS_ERR("error: Lua panic: %s", lua_tostring(lua, -1)); + longjmp(cls_lua_panic_jump, 1); + return 0; +} + +struct clslua_err { + bool error; + int ret; +}; + +/* + * Input parameter encoding. + */ +enum InputEncoding { + JSON_ENC, + BUFFERLIST_ENC, +}; + +struct clslua_hctx { + struct clslua_err error; + InputEncoding in_enc; + int ret; + + cls_method_context_t *hctx; + bufferlist *inbl; // raw cls input + bufferlist *outbl; // raw cls output + + string script; // lua script + string handler; // lua handler + bufferlist input; // lua handler input +}; + +/* Lua registry key for method context */ +static char clslua_hctx_reg_key; + +/* + * Grabs the full method handler context + */ +static clslua_hctx *__clslua_get_hctx(lua_State *L) +{ + /* lookup registry value */ + lua_pushlightuserdata(L, &clslua_hctx_reg_key); + lua_gettable(L, LUA_REGISTRYINDEX); + + /* check cls_lua assumptions */ + ceph_assert(!lua_isnil(L, -1)); + ceph_assert(lua_type(L, -1) == LUA_TLIGHTUSERDATA); + + /* cast and cleanup stack */ + clslua_hctx *hctx = (struct clslua_hctx *)lua_touserdata(L, -1); + lua_pop(L, 1); + + return hctx; +} + +/* + * Get the method context out of the registry. This is called at the beginning + * of each clx_cxx_* wrapper, and must be set before there is any chance a Lua + * script calling a 'cls' module function that requires it. + */ +static cls_method_context_t clslua_get_hctx(lua_State *L) +{ + struct clslua_hctx *hctx = __clslua_get_hctx(L); + return *hctx->hctx; +} + +/* + * Returns a reference to cls_lua error state from registry. + */ +struct clslua_err *clslua_checkerr(lua_State *L) +{ + struct clslua_hctx *hctx = __clslua_get_hctx(L); + struct clslua_err *err = &hctx->error; + return err; +} + + +/* Registry key for real `pcall` function */ +static char clslua_pcall_reg_key; + +/* + * Wrap Lua pcall to check for errors thrown by cls_lua (e.g. I/O errors or + * bufferlist decoding errors). The global error is cleared before returning + * to the caller. + */ +static int clslua_pcall(lua_State *L) +{ + int nargs = lua_gettop(L); + lua_pushlightuserdata(L, &clslua_pcall_reg_key); + lua_gettable(L, LUA_REGISTRYINDEX); + lua_insert(L, 1); + lua_call(L, nargs, LUA_MULTRET); + struct clslua_err *err = clslua_checkerr(L); + ceph_assert(err); + if (err->error) { + err->error = false; + lua_pushinteger(L, err->ret); + lua_insert(L, -2); + } + return lua_gettop(L); +} + + +/* + * cls_log + */ +static int clslua_log(lua_State *L) +{ + int nargs = lua_gettop(L); + + if (!nargs) + return 0; + + int loglevel = LOG_LEVEL_DEFAULT; + bool custom_ll = false; + + /* check if first arg can be a log level */ + if (nargs > 1 && lua_isnumber(L, 1)) { + int ll = (int)lua_tonumber(L, 1); + if (ll >= 0) { + loglevel = ll; + custom_ll = true; + } + } + + /* check space for args and seperators (" ") */ + int nelems = ((nargs - (custom_ll ? 1 : 0)) * 2) - 1; + luaL_checkstack(L, nelems, "rados.log(..)"); + + for (int i = custom_ll ? 2 : 1; i <= nargs; i++) { + const char *part = lua_tostring(L, i); + if (!part) { + if (lua_type(L, i) == LUA_TBOOLEAN) + part = lua_toboolean(L, i) ? "true" : "false"; + else + part = luaL_typename(L, i); + } + lua_pushstring(L, part); + if ((i+1) <= nargs) + lua_pushstring(L, " "); + } + + /* join string parts and send to Ceph/reply log */ + lua_concat(L, nelems); + CLS_LOG(loglevel, "%s", lua_tostring(L, -1)); + + /* concat leaves result at top of stack */ + return 1; +} + +static char clslua_registered_handle_reg_key; + +/* + * Register a function to be used as a handler target + */ +static int clslua_register(lua_State *L) +{ + luaL_checktype(L, 1, LUA_TFUNCTION); + + /* get table of registered handlers */ + lua_pushlightuserdata(L, &clslua_registered_handle_reg_key); + lua_gettable(L, LUA_REGISTRYINDEX); + ceph_assert(lua_type(L, -1) == LUA_TTABLE); + + /* lookup function argument */ + lua_pushvalue(L, 1); + lua_gettable(L, -2); + + if (lua_isnil(L, -1)) { + lua_pushvalue(L, 1); + lua_pushvalue(L, 1); + lua_settable(L, -4); + } else { + lua_pushstring(L, "Cannot register handler more than once"); + return lua_error(L); + } + + return 0; +} + +/* + * Check if a function is registered as a handler + */ +static void clslua_check_registered_handler(lua_State *L) +{ + luaL_checktype(L, -1, LUA_TFUNCTION); + + /* get table of registered handlers */ + lua_pushlightuserdata(L, &clslua_registered_handle_reg_key); + lua_gettable(L, LUA_REGISTRYINDEX); + ceph_assert(lua_type(L, -1) == LUA_TTABLE); + + /* lookup function argument */ + lua_pushvalue(L, -2); + lua_gettable(L, -2); + + if (!lua_rawequal(L, -1, -3)) { + lua_pushstring(L, "Handler is not registered"); + lua_error(L); + } + + lua_pop(L, 2); +} + +/* + * Handle result of a cls_cxx_* call. If @ok is non-zero then we return with + * the number of Lua return arguments on the stack. Otherwise we save error + * information in the registry and throw a Lua error. + */ +static int clslua_opresult(lua_State *L, int ok, int ret, int nargs, + bool error_on_stack = false) +{ + struct clslua_err *err = clslua_checkerr(L); + + ceph_assert(err); + if (err->error) { + CLS_ERR("error: cls_lua state machine: unexpected error"); + ceph_abort(); + } + + /* everything is cherry */ + if (ok) + return nargs; + + /* set error in registry */ + err->error = true; + err->ret = ret; + + /* push error message */ + if (!error_on_stack) + lua_pushfstring(L, "%s", strerror(-ret)); + + return lua_error(L); +} + +/* + * cls_cxx_create + */ +static int clslua_create(lua_State *lua) +{ + cls_method_context_t hctx = clslua_get_hctx(lua); + int exclusive = lua_toboolean(lua, 1); + + int ret = cls_cxx_create(hctx, exclusive); + return clslua_opresult(lua, (ret == 0), ret, 0); +} + +/* + * cls_cxx_remove + */ +static int clslua_remove(lua_State *lua) +{ + cls_method_context_t hctx = clslua_get_hctx(lua); + + int ret = cls_cxx_remove(hctx); + return clslua_opresult(lua, (ret == 0), ret, 0); +} + +/* + * cls_cxx_stat + */ +static int clslua_stat(lua_State *L) +{ + cls_method_context_t hctx = clslua_get_hctx(L); + + uint64_t size; + time_t mtime; + int ret = cls_cxx_stat(hctx, &size, &mtime); + if (!ret) { + lua_pushinteger(L, size); + lua_pushinteger(L, mtime); + } + return clslua_opresult(L, (ret == 0), ret, 2); +} + +/* + * cls_cxx_read + */ +static int clslua_read(lua_State *L) +{ + cls_method_context_t hctx = clslua_get_hctx(L); + int offset = luaL_checkinteger(L, 1); + int length = luaL_checkinteger(L, 2); + bufferlist *bl = clslua_pushbufferlist(L, NULL); + int ret = cls_cxx_read(hctx, offset, length, bl); + return clslua_opresult(L, (ret >= 0), ret, 1); +} + +/* + * cls_cxx_write + */ +static int clslua_write(lua_State *L) +{ + cls_method_context_t hctx = clslua_get_hctx(L); + int offset = luaL_checkinteger(L, 1); + int length = luaL_checkinteger(L, 2); + bufferlist *bl = clslua_checkbufferlist(L, 3); + int ret = cls_cxx_write(hctx, offset, length, bl); + return clslua_opresult(L, (ret == 0), ret, 0); +} + +/* + * cls_cxx_write_full + */ +static int clslua_write_full(lua_State *L) +{ + cls_method_context_t hctx = clslua_get_hctx(L); + bufferlist *bl = clslua_checkbufferlist(L, 1); + int ret = cls_cxx_write_full(hctx, bl); + return clslua_opresult(L, (ret == 0), ret, 0); +} + +/* + * cls_cxx_getxattr + */ +static int clslua_getxattr(lua_State *L) +{ + cls_method_context_t hctx = clslua_get_hctx(L); + const char *name = luaL_checkstring(L, 1); + bufferlist *bl = clslua_pushbufferlist(L, NULL); + int ret = cls_cxx_getxattr(hctx, name, bl); + return clslua_opresult(L, (ret >= 0), ret, 1); +} + +/* + * cls_cxx_getxattrs + */ +static int clslua_getxattrs(lua_State *L) +{ + cls_method_context_t hctx = clslua_get_hctx(L); + + map<string, bufferlist> attrs; + int ret = cls_cxx_getxattrs(hctx, &attrs); + if (ret < 0) + return clslua_opresult(L, 0, ret, 0); + + lua_createtable(L, 0, attrs.size()); + + for (auto it = attrs.cbegin(); it != attrs.cend(); it++) { + lua_pushstring(L, it->first.c_str()); + bufferlist *bl = clslua_pushbufferlist(L, NULL); + *bl = it->second; // xfer ownership... will be GC'd + lua_settable(L, -3); + } + + return clslua_opresult(L, 1, ret, 1); +} + +/* + * cls_cxx_setxattr + */ +static int clslua_setxattr(lua_State *L) +{ + cls_method_context_t hctx = clslua_get_hctx(L); + const char *name = luaL_checkstring(L, 1); + bufferlist *bl = clslua_checkbufferlist(L, 2); + int ret = cls_cxx_setxattr(hctx, name, bl); + return clslua_opresult(L, (ret == 0), ret, 1); +} + +/* + * cls_cxx_map_get_val + */ +static int clslua_map_get_val(lua_State *L) +{ + cls_method_context_t hctx = clslua_get_hctx(L); + const char *key = luaL_checkstring(L, 1); + bufferlist *bl = clslua_pushbufferlist(L, NULL); + int ret = cls_cxx_map_get_val(hctx, key, bl); + return clslua_opresult(L, (ret == 0), ret, 1); +} + +/* + * cls_cxx_map_set_val + */ +static int clslua_map_set_val(lua_State *L) +{ + cls_method_context_t hctx = clslua_get_hctx(L); + const char *key = luaL_checkstring(L, 1); + bufferlist *val = clslua_checkbufferlist(L, 2); + int ret = cls_cxx_map_set_val(hctx, key, val); + return clslua_opresult(L, (ret == 0), ret, 0); +} + +/* + * cls_cxx_map_clear + */ +static int clslua_map_clear(lua_State *L) +{ + cls_method_context_t hctx = clslua_get_hctx(L); + int ret = cls_cxx_map_clear(hctx); + return clslua_opresult(L, (ret == 0), ret, 0); +} + +/* + * cls_cxx_map_get_keys + */ +static int clslua_map_get_keys(lua_State *L) +{ + cls_method_context_t hctx = clslua_get_hctx(L); + const char *start_after = luaL_checkstring(L, 1); + int max_to_get = luaL_checkinteger(L, 2); + + std::set<string> keys; + bool more; + int ret = cls_cxx_map_get_keys(hctx, start_after, max_to_get, &keys, &more); + if (ret < 0) + return clslua_opresult(L, 0, ret, 0); + + lua_createtable(L, 0, keys.size()); + + for (auto it = keys.cbegin(); it != keys.cend(); it++) { + const std::string& key = *it; + lua_pushstring(L, key.c_str()); + lua_pushboolean(L, 1); + lua_settable(L, -3); + } + + return clslua_opresult(L, 1, ret, 1); +} + +/* + * cls_cxx_map_get_vals + */ +static int clslua_map_get_vals(lua_State *L) +{ + cls_method_context_t hctx = clslua_get_hctx(L); + const char *start_after = luaL_checkstring(L, 1); + const char *filter_prefix= luaL_checkstring(L, 2); + int max_to_get = luaL_checkinteger(L, 3); + + map<string, bufferlist> kvpairs; + bool more; + int ret = cls_cxx_map_get_vals(hctx, start_after, filter_prefix, + max_to_get, &kvpairs, &more); + if (ret < 0) + return clslua_opresult(L, 0, ret, 0); + + lua_createtable(L, 0, kvpairs.size()); + + for (auto it = kvpairs.cbegin(); it != kvpairs.cend(); it++) { + lua_pushstring(L, it->first.c_str()); + bufferlist *bl = clslua_pushbufferlist(L, NULL); + *bl = it->second; // xfer ownership... will be GC'd + lua_settable(L, -3); + } + + return clslua_opresult(L, 1, ret, 1); +} + +/* + * cls_cxx_map_read_header + */ +static int clslua_map_read_header(lua_State *L) +{ + cls_method_context_t hctx = clslua_get_hctx(L); + bufferlist *bl = clslua_pushbufferlist(L, NULL); + int ret = cls_cxx_map_read_header(hctx, bl); + return clslua_opresult(L, (ret >= 0), ret, 1); +} + +/* + * cls_cxx_map_write_header + */ +static int clslua_map_write_header(lua_State *L) +{ + cls_method_context_t hctx = clslua_get_hctx(L); + bufferlist *bl = clslua_checkbufferlist(L, 1); + int ret = cls_cxx_map_write_header(hctx, bl); + return clslua_opresult(L, (ret == 0), ret, 0); +} + +/* + * cls_cxx_map_set_vals + */ +static int clslua_map_set_vals(lua_State *L) +{ + cls_method_context_t hctx = clslua_get_hctx(L); + luaL_checktype(L, 1, LUA_TTABLE); + + map<string, bufferlist> kvpairs; + + for (lua_pushnil(L); lua_next(L, 1); lua_pop(L, 1)) { + /* + * In the case of a numeric key a copy is made on the stack because + * converting to a string would otherwise manipulate the original key and + * cause problems for iteration. + */ + string key; + int type_code = lua_type(L, -2); + switch (type_code) { + case LUA_TSTRING: + key.assign(lua_tolstring(L, -2, NULL)); + break; + + case LUA_TNUMBER: + lua_pushvalue(L, -2); + key.assign(lua_tolstring(L, -1, NULL)); + lua_pop(L, 1); + break; + + default: + lua_pushfstring(L, "map_set_vals: invalid key type (%s)", + lua_typename(L, type_code)); + return clslua_opresult(L, 0, -EINVAL, 0, true); + } + + bufferlist val; + type_code = lua_type(L, -1); + switch (type_code) { + case LUA_TSTRING: + { + size_t len; + const char *data = lua_tolstring(L, -1, &len); + val.append(data, len); + } + break; + + default: + lua_pushfstring(L, "map_set_vals: invalid val type (%s) for key (%s)", + lua_typename(L, type_code), key.c_str()); + return clslua_opresult(L, 0, -EINVAL, 0, true); + } + + kvpairs[key] = val; + } + + int ret = cls_cxx_map_set_vals(hctx, &kvpairs); + + return clslua_opresult(L, (ret == 0), ret, 0); +} + +/* + * cls_cxx_map_remove_key + */ +static int clslua_map_remove_key(lua_State *L) +{ + cls_method_context_t hctx = clslua_get_hctx(L); + const char *key = luaL_checkstring(L, 1); + int ret = cls_cxx_map_remove_key(hctx, key); + return clslua_opresult(L, (ret == 0), ret, 0); +} + +/* + * cls_current_version + */ +static int clslua_current_version(lua_State *L) +{ + cls_method_context_t hctx = clslua_get_hctx(L); + uint64_t version = cls_current_version(hctx); + lua_pushinteger(L, version); + return clslua_opresult(L, 1, 0, 1); +} + +/* + * cls_current_subop_num + */ +static int clslua_current_subop_num(lua_State *L) +{ + cls_method_context_t hctx = clslua_get_hctx(L); + int num = cls_current_subop_num(hctx); + lua_pushinteger(L, num); + return clslua_opresult(L, 1, 0, 1); +} + +/* + * cls_current_subop_version + */ +static int clslua_current_subop_version(lua_State *L) +{ + cls_method_context_t hctx = clslua_get_hctx(L); + string s; + cls_cxx_subop_version(hctx, &s); + lua_pushstring(L, s.c_str()); + return clslua_opresult(L, 1, 0, 1); +} + +/* + * Functions registered in the 'cls' module. + */ +static const luaL_Reg clslua_lib[] = { + // mgmt + {"register", clslua_register}, + {"log", clslua_log}, + + // data + {"create", clslua_create}, + {"remove", clslua_remove}, + {"stat", clslua_stat}, + {"read", clslua_read}, + {"write", clslua_write}, + {"write_full", clslua_write_full}, + + // xattr + {"getxattr", clslua_getxattr}, + {"getxattrs", clslua_getxattrs}, + {"setxattr", clslua_setxattr}, + + // omap + {"map_clear", clslua_map_clear}, + {"map_get_keys", clslua_map_get_keys}, + {"map_get_vals", clslua_map_get_vals}, + {"map_read_header", clslua_map_read_header}, + {"map_write_header", clslua_map_write_header}, + {"map_get_val", clslua_map_get_val}, + {"map_set_val", clslua_map_set_val}, + {"map_set_vals", clslua_map_set_vals}, + {"map_remove_key", clslua_map_remove_key}, + + // env + {"current_version", clslua_current_version}, + {"current_subop_num", clslua_current_subop_num}, + {"current_subop_version", clslua_current_subop_version}, + + {NULL, NULL} +}; + +/* + * Set const int in table at top of stack + */ +#define SET_INT_CONST(var) do { \ + lua_pushinteger(L, var); \ + lua_setfield(L, -2, #var); \ +} while (0) + +/* + * + */ +static int luaopen_objclass(lua_State *L) +{ + lua_newtable(L); + + /* + * Register cls functions (cls.log, etc...) + */ + luaL_setfuncs(L, clslua_lib, 0); + + /* + * Register generic errno values under 'cls' + */ + SET_INT_CONST(EPERM); + SET_INT_CONST(ENOENT); + SET_INT_CONST(ESRCH); + SET_INT_CONST(EINTR); + SET_INT_CONST(EIO); + SET_INT_CONST(ENXIO); + SET_INT_CONST(E2BIG); + SET_INT_CONST(ENOEXEC); + SET_INT_CONST(EBADF); + SET_INT_CONST(ECHILD); + SET_INT_CONST(EAGAIN); + SET_INT_CONST(ENOMEM); + SET_INT_CONST(EACCES); + SET_INT_CONST(EFAULT); + SET_INT_CONST(EBUSY); + SET_INT_CONST(EEXIST); + SET_INT_CONST(EXDEV); + SET_INT_CONST(ENODEV); + SET_INT_CONST(ENOTDIR); + SET_INT_CONST(EISDIR); + SET_INT_CONST(EINVAL); + SET_INT_CONST(ENFILE); + SET_INT_CONST(EMFILE); + SET_INT_CONST(ENOTTY); + SET_INT_CONST(EFBIG); + SET_INT_CONST(ENOSPC); + SET_INT_CONST(ESPIPE); + SET_INT_CONST(EROFS); + SET_INT_CONST(EMLINK); + SET_INT_CONST(EPIPE); + SET_INT_CONST(EDOM); + SET_INT_CONST(ERANGE); + + return 1; +} + +/* + * Setup the execution environment. Our sandbox currently is not + * sophisticated. With a new Lua state per-request we don't need to work about + * users stepping on each other, but we do rip out access to the local file + * system. All this will change when/if we decide to use some shared Lua + * states, most likely for performance reasons. + */ +static void clslua_setup_env(lua_State *L) +{ + luaL_requiref(L, "_G", luaopen_base, 1); + lua_pop(L, 1); + + /* + * Wrap `pcall` to intercept errors. First save a reference to the default + * Lua `pcall` function, and then replace `pcall` with our version. + */ + lua_pushlightuserdata(L, &clslua_pcall_reg_key); + lua_getglobal(L, "pcall"); + lua_settable(L, LUA_REGISTRYINDEX); + + lua_pushcfunction(L, clslua_pcall); + lua_setglobal(L, "pcall"); + + /* mask unsafe */ + lua_pushnil(L); + lua_setglobal(L, "loadfile"); + + /* mask unsafe */ + lua_pushnil(L); + lua_setglobal(L, "dofile"); + + /* not integrated into our error handling */ + lua_pushnil(L); + lua_setglobal(L, "xpcall"); + + luaL_requiref(L, LUA_TABLIBNAME, luaopen_table, 1); + lua_pop(L, 1); + + luaL_requiref(L, LUA_STRLIBNAME, luaopen_string, 1); + lua_pop(L, 1); + + luaL_requiref(L, LUA_MATHLIBNAME, luaopen_math, 1); + lua_pop(L, 1); + + luaL_requiref(L, "objclass", luaopen_objclass, 1); + lua_pop(L, 1); + + luaL_requiref(L, "bufferlist", luaopen_bufferlist, 1); + lua_pop(L, 1); +} + +/* + * Schema: + * { + * "script": "...", + * "handler": "...", + * "input": "..." # optional + * } + */ +static int unpack_json_command(lua_State *L, struct clslua_hctx *ctx, + std::string& script, std::string& handler, std::string& input, + size_t *input_len) +{ + std::string json_input(ctx->inbl->c_str()); + json_spirit::mValue value; + + if (!json_spirit::read(json_input, value)) { + CLS_ERR("error: unparseable JSON"); + ctx->ret = -EINVAL; + return 1; + } + + if (value.type() != json_spirit::obj_type) { + CLS_ERR("error: input not a JSON object"); + ctx->ret = -EINVAL; + return 1; + } + json_spirit::mObject obj = value.get_obj(); + + // grab the script + std::map<std::string, json_spirit::mValue>::const_iterator it = obj.find("script"); + if (it == obj.end()) { + CLS_ERR("error: 'script' field found in JSON object"); + ctx->ret = -EINVAL; + return 1; + } + + if (it->second.type() != json_spirit::str_type) { + CLS_ERR("error: script is not a string"); + ctx->ret = -EINVAL; + return 1; + } + script = it->second.get_str(); + + // grab the target function/handler name + it = obj.find("handler"); + if (it == obj.end()) { + CLS_ERR("error: no target handler found in JSON object"); + ctx->ret = -EINVAL; + return 1; + } + + if (it->second.type() != json_spirit::str_type) { + CLS_ERR("error: target handler is not a string"); + ctx->ret = -EINVAL; + return 1; + } + handler = it->second.get_str(); + + // grab the input (optional) + it = obj.find("input"); + if (it != obj.end()) { + if (it->second.type() != json_spirit::str_type) { + CLS_ERR("error: handler input is not a string"); + ctx->ret = -EINVAL; + return 1; + } + input = it->second.get_str(); + *input_len = input.size(); + } + + return 0; +} + +/* + * Runs the script, and calls handler. + */ +static int clslua_eval(lua_State *L) +{ + struct clslua_hctx *ctx = __clslua_get_hctx(L); + ctx->ret = -EIO; /* assume failure */ + + /* + * Load modules, errno value constants, and other environment goodies. Must + * be done before loading/compiling the chunk. + */ + clslua_setup_env(L); + + /* + * Deserialize the input that contains the script, the name of the handler + * to call, and the handler input. + */ + switch (ctx->in_enc) { + case JSON_ENC: + { + std::string input_str; + size_t input_str_len = 0; + + // if there is an error decoding json then ctx->ret will be set and we + // return normally from this function. + if (unpack_json_command(L, ctx, ctx->script, ctx->handler, input_str, + &input_str_len)) + return 0; + + bufferptr bp(input_str.c_str(), input_str_len); + ctx->input.push_back(bp); + } + break; + + case BUFFERLIST_ENC: + { + cls_lua_eval_op op; + + try { + auto it = ctx->inbl->cbegin(); + decode(op, it); + } catch (const buffer::error &err) { + CLS_ERR("error: could not decode ceph encoded input"); + ctx->ret = -EINVAL; + return 0; + } + + ctx->script.swap(op.script); + ctx->handler.swap(op.handler); + ctx->input = op.input; + } + break; + + default: + CLS_ERR("error: unknown encoding type"); + ctx->ret = -EFAULT; + ceph_abort(); + return 0; + } + + /* + * Create table to hold registered (valid) handlers. + * + * Must be done before running the script for the first time because the + * script will immediately try to register one or more handlers using + * cls.register(function), which depends on this table. + */ + lua_pushlightuserdata(L, &clslua_registered_handle_reg_key); + lua_newtable(L); + lua_settable(L, LUA_REGISTRYINDEX); + + /* load and compile chunk */ + if (luaL_loadstring(L, ctx->script.c_str())) + return lua_error(L); + + /* execute chunk */ + lua_call(L, 0, 0); + + /* no error, but nothing left to do */ + if (!ctx->handler.size()) { + CLS_LOG(10, "no handler name provided"); + ctx->ret = 0; /* success */ + return 0; + } + + lua_getglobal(L, ctx->handler.c_str()); + if (lua_type(L, -1) != LUA_TFUNCTION) { + CLS_ERR("error: unknown handler or not function: %s", ctx->handler.c_str()); + ctx->ret = -EOPNOTSUPP; + return 0; + } + + /* throw error if function is not registered */ + clslua_check_registered_handler(L); + + /* setup the input/output bufferlists */ + clslua_pushbufferlist(L, &ctx->input); + clslua_pushbufferlist(L, ctx->outbl); + + /* + * Call the target Lua object class handler. If the call is successful then + * we will examine the return value here and store it in the context. Errors + * that occur are handled in the top-level eval() function. + */ + int top = lua_gettop(L); + lua_call(L, 2, LUA_MULTRET); + + /* store return value in context */ + if (!(lua_gettop(L) + 3 - top)) + lua_pushinteger(L, 0); + ctx->ret = luaL_checkinteger(L, -1); + + return 0; +} + +/* + * Main handler. Proxies the Lua VM and the Lua-defined handler. + */ +static int eval_generic(cls_method_context_t hctx, bufferlist *in, bufferlist *out, + InputEncoding in_enc) +{ + struct clslua_hctx ctx; + lua_State *L = NULL; + int ret = -EIO; + + /* stash context for use in Lua VM */ + ctx.hctx = &hctx; + ctx.inbl = in; + ctx.in_enc = in_enc; + ctx.outbl = out; + ctx.error.error = false; + + /* build lua vm state */ + L = luaL_newstate(); + if (!L) { + CLS_ERR("error creating new Lua state"); + goto out; + } + + /* panic handler for unhandled errors */ + lua_atpanic(L, &cls_lua_atpanic); + + if (setjmp(cls_lua_panic_jump) == 0) { + + /* + * Stash the handler context in the register. It contains the objclass + * method context, global error state, and the command and reply structs. + */ + lua_pushlightuserdata(L, &clslua_hctx_reg_key); + lua_pushlightuserdata(L, &ctx); + lua_settable(L, LUA_REGISTRYINDEX); + + /* Process the input and run the script */ + lua_pushcfunction(L, clslua_eval); + ret = lua_pcall(L, 0, 0, 0); + + /* Encountered an error? */ + if (ret) { + struct clslua_err *err = clslua_checkerr(L); + if (!err) { + CLS_ERR("error: cls_lua state machine: unexpected error"); + ceph_abort(); + } + + /* Error origin a cls_cxx_* method? */ + if (err->error) { + ret = err->ret; /* cls_cxx_* return value */ + + /* Errors always abort. Fix up ret and log error */ + if (ret >= 0) { + CLS_ERR("error: unexpected handler return value"); + ret = -EFAULT; + } + + } else + ret = -EIO; /* Generic error code */ + + CLS_ERR("error: %s", lua_tostring(L, -1)); + + } else { + /* + * No Lua error encountered while running the script, but the handler + * may still have returned an error code (e.g. an errno value). + */ + ret = ctx.ret; + } + + } else { + CLS_ERR("error: recovering from Lua panic"); + ret = -EFAULT; + } + +out: + if (L) + lua_close(L); + return ret; +} + +static int eval_json(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + return eval_generic(hctx, in, out, JSON_ENC); +} + +static int eval_bufferlist(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + return eval_generic(hctx, in, out, BUFFERLIST_ENC); +} + +CLS_INIT(lua) +{ + CLS_LOG(20, "Loaded lua class!"); + + cls_handle_t h_class; + cls_method_handle_t h_eval_json; + cls_method_handle_t h_eval_bufferlist; + + cls_register("lua", &h_class); + + cls_register_cxx_method(h_class, "eval_json", + CLS_METHOD_RD | CLS_METHOD_WR, eval_json, &h_eval_json); + + cls_register_cxx_method(h_class, "eval_bufferlist", + CLS_METHOD_RD | CLS_METHOD_WR, eval_bufferlist, &h_eval_bufferlist); +} diff --git a/src/cls/lua/cls_lua.h b/src/cls/lua/cls_lua.h new file mode 100644 index 000000000..70ce9a927 --- /dev/null +++ b/src/cls/lua/cls_lua.h @@ -0,0 +1,14 @@ +#ifndef CEPH_CLS_LUA_H +#define CEPH_CLS_LUA_H + +#include <lua.hpp> +#include "include/types.h" + +#define LOG_LEVEL_DEFAULT 10 + +int luaopen_bufferlist(lua_State *L); + +bufferlist *clslua_checkbufferlist(lua_State *L, int pos = 1); +bufferlist *clslua_pushbufferlist(lua_State *L, bufferlist *set); + +#endif diff --git a/src/cls/lua/cls_lua_client.cc b/src/cls/lua/cls_lua_client.cc new file mode 100644 index 000000000..0e6544a26 --- /dev/null +++ b/src/cls/lua/cls_lua_client.cc @@ -0,0 +1,34 @@ +#include <string> +#include <vector> +#include "include/encoding.h" +#include "include/rados/librados.hpp" // for IoCtx +#include "cls_lua_client.h" +#include "cls_lua_ops.h" + +using std::string; +using std::vector; +using librados::IoCtx; +using librados::bufferlist; + +namespace cls_lua_client { + /* + * Currently the return code and return bufferlist are not wrapped in a + * protocol that allows object class vs Lua to be distinguished. For + * instance, -EOPNOTSUPP might refer to cls_lua not being found, but would + * also be returned when cls_lua is found, but a Lua handler is not found. + */ + int exec(IoCtx& ioctx, const string& oid, const string& script, + const string& handler, bufferlist& input, bufferlist& output) + { + cls_lua_eval_op op; + + op.script = script; + op.handler = handler; + op.input = input; + + bufferlist inbl; + encode(op, inbl); + + return ioctx.exec(oid, "lua", "eval_bufferlist", inbl, output); + } +} diff --git a/src/cls/lua/cls_lua_client.h b/src/cls/lua/cls_lua_client.h new file mode 100644 index 000000000..7e7e164bf --- /dev/null +++ b/src/cls/lua/cls_lua_client.h @@ -0,0 +1,13 @@ +#ifndef CLS_LUA_CLIENT_HPP +#define CLS_LUA_CLIENT_HPP +#include <string> + +#include "include/rados/librados.hpp" + +namespace cls_lua_client { + int exec(librados::IoCtx& ioctx, const std::string& oid, + const std::string& script, const std::string& handler, + librados::bufferlist& inbl, librados::bufferlist& outbl); +} + +#endif diff --git a/src/cls/lua/cls_lua_ops.h b/src/cls/lua/cls_lua_ops.h new file mode 100644 index 000000000..c4afbd8a2 --- /dev/null +++ b/src/cls/lua/cls_lua_ops.h @@ -0,0 +1,31 @@ +#ifndef CEPH_CLS_LUA_OPS_H +#define CEPH_CLS_LUA_OPS_H + +#include <string> + +#include "include/encoding.h" + +struct cls_lua_eval_op { + std::string script; + std::string handler; + bufferlist input; + + void encode(bufferlist &bl) const { + ENCODE_START(1, 1, bl); + encode(script, bl); + encode(handler, bl); + encode(input, bl); + ENCODE_FINISH(bl); + } + + void decode(bufferlist::const_iterator &bl) { + DECODE_START(1, bl); + decode(script, bl); + decode(handler, bl); + decode(input, bl); + DECODE_FINISH(bl); + } +}; +WRITE_CLASS_ENCODER(cls_lua_eval_op) + +#endif diff --git a/src/cls/lua/lua_bufferlist.cc b/src/cls/lua/lua_bufferlist.cc new file mode 100644 index 000000000..5d44d0aef --- /dev/null +++ b/src/cls/lua/lua_bufferlist.cc @@ -0,0 +1,180 @@ +/* + * Lua module wrapping librados::bufferlist + */ +#include <errno.h> +#include <string> +#include <sstream> +#include <math.h> +#include <lua.hpp> +#include "include/types.h" +#include "include/buffer.h" +#include "objclass/objclass.h" +#include "cls/lua/cls_lua.h" + +#define LUA_BUFFERLIST "ClsLua.Bufferlist" + +struct bufferlist_wrap { + bufferlist *bl; + int gc; /* do garbage collect? */ +}; + +static inline struct bufferlist_wrap *to_blwrap(lua_State *L, int pos = 1) +{ + return (bufferlist_wrap *)luaL_checkudata(L, pos, LUA_BUFFERLIST); +} + +bufferlist *clslua_checkbufferlist(lua_State *L, int pos) +{ + struct bufferlist_wrap *blw = to_blwrap(L, pos); + return blw->bl; +} + +/* + * Pushes a new bufferlist userdata object onto the stack. If @set is non-null + * it is assumed to be a bufferlist that should not be garbage collected. + */ +bufferlist *clslua_pushbufferlist(lua_State *L, bufferlist *set) +{ + bufferlist_wrap *blw = static_cast<bufferlist_wrap *>(lua_newuserdata(L, sizeof(*blw))); + blw->bl = set ? set : new bufferlist(); + blw->gc = set ? 0 : 1; + luaL_getmetatable(L, LUA_BUFFERLIST); + lua_setmetatable(L, -2); + return blw->bl; +} + +/* + * Create a new bufferlist + */ +static int bl_new(lua_State *L) +{ + clslua_pushbufferlist(L, NULL); + return 1; +} + +/* + * Convert bufferlist to Lua string + */ +static int bl_str(lua_State *L) +{ + bufferlist *bl = clslua_checkbufferlist(L); + lua_pushlstring(L, bl->c_str(), bl->length()); + return 1; +} + +/* + * Append a Lua string to bufferlist + */ +static int bl_append(lua_State *L) +{ + bufferlist *bl = clslua_checkbufferlist(L); + luaL_checktype(L, 2, LUA_TSTRING); + + size_t len; + const char *data = lua_tolstring(L, 2, &len); + bl->append(data, len); + + return 0; +} + +/* + * Return the length in bytes of bufferlist + */ +static int bl_len(lua_State *L) +{ + bufferlist *bl = clslua_checkbufferlist(L); + lua_pushinteger(L, bl->length()); + return 1; +} + +/* + * Perform byte-for-byte bufferlist equality test + */ +static int bl_eq(lua_State *L) +{ + bufferlist *bl1 = clslua_checkbufferlist(L, 1); + bufferlist *bl2 = clslua_checkbufferlist(L, 2); + lua_pushboolean(L, *bl1 == *bl2 ? 1 : 0); + return 1; +} + +/* + * Bufferlist < operator + */ +static int bl_lt(lua_State *L) +{ + bufferlist *bl1 = clslua_checkbufferlist(L, 1); + bufferlist *bl2 = clslua_checkbufferlist(L, 2); + lua_pushboolean(L, *bl1 < *bl2 ? 1 : 0); + return 1; +} + +/* + * Bufferlist <= operator + */ +static int bl_le(lua_State *L) +{ + bufferlist *bl1 = clslua_checkbufferlist(L, 1); + bufferlist *bl2 = clslua_checkbufferlist(L, 2); + lua_pushboolean(L, *bl1 <= *bl2 ? 1 : 0); + return 1; +} + +/* + * Bufferlist concatentation + */ +static int bl_concat(lua_State *L) +{ + bufferlist *bl1 = clslua_checkbufferlist(L, 1); + bufferlist *bl2 = clslua_checkbufferlist(L, 2); + bufferlist *ret = clslua_pushbufferlist(L, NULL); + ret->append(bl1->c_str(), bl1->length()); + ret->append(bl2->c_str(), bl2->length()); + return 1; +} + +/* + * Garbage collect bufferlist + */ +static int bl_gc(lua_State *L) +{ + struct bufferlist_wrap *blw = to_blwrap(L); + ceph_assert(blw); + ceph_assert(blw->bl); + if (blw->gc) + delete blw->bl; + return 0; +} + +static const struct luaL_Reg bufferlist_methods[] = { + {"str", bl_str}, + {"append", bl_append}, + {"__concat", bl_concat}, + {"__len", bl_len}, + {"__lt", bl_lt}, + {"__le", bl_le}, + {"__gc", bl_gc}, + {"__eq", bl_eq}, + {NULL, NULL} +}; + +static const struct luaL_Reg bllib_f[] = { + {"new", bl_new}, + {NULL, NULL} +}; + +int luaopen_bufferlist(lua_State *L) +{ + /* Setup bufferlist user-data type */ + luaL_newmetatable(L, LUA_BUFFERLIST); + lua_pushvalue(L, -1); + lua_setfield(L, -2, "__index"); + + luaL_setfuncs(L, bufferlist_methods, 0); + lua_pop(L, 1); + + lua_newtable(L); + luaL_setfuncs(L, bllib_f, 0); + + return 1; +} diff --git a/src/cls/numops/cls_numops.cc b/src/cls/numops/cls_numops.cc new file mode 100644 index 000000000..331eb7dca --- /dev/null +++ b/src/cls/numops/cls_numops.cc @@ -0,0 +1,168 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab +/* + * Ceph - scalable distributed file system + * + * Copyright (C) 2015 CERN + * + * Author: Joaquim Rocha <joaquim.rocha@cern.ch> + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + */ + +/** \file + * + * This is an OSD class that implements methods for object numeric options on + * its omap values. + * + */ + +#include "objclass/objclass.h" +#include <errno.h> +#include <string> +#include <sstream> +#include <cstdio> +#include <include/compat.h> + +#define DECIMAL_PRECISION 10 + +using ceph::bufferlist; +using std::string; +using ceph::decode; +using ceph::encode; + +CLS_VER(1,0) +CLS_NAME(numops) + +static int add(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + string key, diff_str; + + auto iter = in->cbegin(); + try { + decode(key, iter); + decode(diff_str, iter); + } catch (const ceph::buffer::error &err) { + CLS_LOG(20, "add: invalid decode of input"); + return -EINVAL; + } + + char *end_ptr = 0; + double difference = strtod(diff_str.c_str(), &end_ptr); + + if (end_ptr && *end_ptr != '\0') { + CLS_ERR("add: invalid input value: %s", diff_str.c_str()); + return -EINVAL; + } + + bufferlist bl; + int ret = cls_cxx_map_get_val(hctx, key, &bl); + + double value; + + if (ret == -ENODATA || bl.length() == 0) { + value = 0; + } else if (ret < 0) { + if (ret != -ENOENT) { + CLS_ERR("add: error reading omap key %s: %d", key.c_str(), ret); + } + return ret; + } else { + std::string stored_value(bl.c_str(), bl.length()); + end_ptr = 0; + value = strtod(stored_value.c_str(), &end_ptr); + + if (end_ptr && *end_ptr != '\0') { + CLS_ERR("add: invalid stored value: %s", stored_value.c_str()); + return -EBADMSG; + } + } + + value += difference; + + std::stringstream stream; + stream << std::setprecision(DECIMAL_PRECISION) << value; + + bufferlist new_value; + new_value.append(stream.str()); + + return cls_cxx_map_set_val(hctx, key, &new_value); +} + +static int mul(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + string key, diff_str; + + auto iter = in->cbegin(); + try { + decode(key, iter); + decode(diff_str, iter); + } catch (const ceph::buffer::error &err) { + CLS_LOG(20, "mul: invalid decode of input"); + return -EINVAL; + } + + char *end_ptr = 0; + double difference = strtod(diff_str.c_str(), &end_ptr); + + if (end_ptr && *end_ptr != '\0') { + CLS_ERR("mul: invalid input value: %s", diff_str.c_str()); + return -EINVAL; + } + + bufferlist bl; + int ret = cls_cxx_map_get_val(hctx, key, &bl); + + double value; + + if (ret == -ENODATA || bl.length() == 0) { + value = 0; + } else if (ret < 0) { + if (ret != -ENOENT) { + CLS_ERR("mul: error reading omap key %s: %d", key.c_str(), ret); + } + return ret; + } else { + std::string stored_value(bl.c_str(), bl.length()); + end_ptr = 0; + value = strtod(stored_value.c_str(), &end_ptr); + + if (end_ptr && *end_ptr != '\0') { + CLS_ERR("mul: invalid stored value: %s", stored_value.c_str()); + return -EBADMSG; + } + } + + value *= difference; + + std::stringstream stream; + stream << std::setprecision(DECIMAL_PRECISION) << value; + + bufferlist new_value; + new_value.append(stream.str()); + + return cls_cxx_map_set_val(hctx, key, &new_value); +} + +CLS_INIT(numops) +{ + CLS_LOG(20, "loading cls_numops"); + + cls_handle_t h_class; + cls_method_handle_t h_add; + cls_method_handle_t h_mul; + + cls_register("numops", &h_class); + + cls_register_cxx_method(h_class, "add", + CLS_METHOD_RD | CLS_METHOD_WR, + add, &h_add); + + cls_register_cxx_method(h_class, "mul", + CLS_METHOD_RD | CLS_METHOD_WR, + mul, &h_mul); +} diff --git a/src/cls/numops/cls_numops_client.cc b/src/cls/numops/cls_numops_client.cc new file mode 100644 index 000000000..fa1a69f2e --- /dev/null +++ b/src/cls/numops/cls_numops_client.cc @@ -0,0 +1,79 @@ +/* + * Ceph - scalable distributed file system + * + * Copyright (C) 2015 CERN + * + * Author: Joaquim Rocha <joaquim.rocha@cern.ch> + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + */ + +#include "cls/numops/cls_numops_client.h" +#include "include/encoding.h" +#include "include/rados/librados.hpp" + +#include <errno.h> +#include <sstream> + +namespace rados { + namespace cls { + namespace numops { + + int add(librados::IoCtx *ioctx, + const std::string& oid, + const std::string& key, + double value_to_add) + { + bufferlist in, out; + encode(key, in); + + std::stringstream stream; + stream << value_to_add; + + encode(stream.str(), in); + + return ioctx->exec(oid, "numops", "add", in, out); + } + + int sub(librados::IoCtx *ioctx, + const std::string& oid, + const std::string& key, + double value_to_subtract) + { + return add(ioctx, oid, key, -value_to_subtract); + } + + int mul(librados::IoCtx *ioctx, + const std::string& oid, + const std::string& key, + double value_to_multiply) + { + bufferlist in, out; + encode(key, in); + + std::stringstream stream; + stream << value_to_multiply; + + encode(stream.str(), in); + + return ioctx->exec(oid, "numops", "mul", in, out); + } + + int div(librados::IoCtx *ioctx, + const std::string& oid, + const std::string& key, + double value_to_divide) + { + if (value_to_divide == 0) + return -EINVAL; + + return mul(ioctx, oid, key, 1 / value_to_divide); + } + + } // namespace numops + } // namespace cls +} // namespace rados diff --git a/src/cls/numops/cls_numops_client.h b/src/cls/numops/cls_numops_client.h new file mode 100644 index 000000000..0b0ccbe5b --- /dev/null +++ b/src/cls/numops/cls_numops_client.h @@ -0,0 +1,50 @@ +/* + * Ceph - scalable distributed file system + * + * Copyright (C) 2015 CERN + * + * Author: Joaquim Rocha <joaquim.rocha@cern.ch> + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + */ + +#ifndef CEPH_LIBRBD_CLS_NUMOPS_CLIENT_H +#define CEPH_LIBRBD_CLS_NUMOPS_CLIENT_H + +#include "include/rados/librados_fwd.hpp" +#include <string> + +namespace rados { + namespace cls { + namespace numops { + + extern int add(librados::IoCtx *ioctx, + const std::string& oid, + const std::string& key, + double value_to_add); + + extern int sub(librados::IoCtx *ioctx, + const std::string& oid, + const std::string& key, + double value_to_subtract); + + extern int mul(librados::IoCtx *ioctx, + const std::string& oid, + const std::string& key, + double value_to_multiply); + + extern int div(librados::IoCtx *ioctx, + const std::string& oid, + const std::string& key, + double value_to_divide); + + } // namespace numops + } // namespace cls +} // namespace rados + +#endif // CEPH_LIBRBD_CLS_NUMOPS_CLIENT_H + diff --git a/src/cls/otp/cls_otp.cc b/src/cls/otp/cls_otp.cc new file mode 100644 index 000000000..9143f6241 --- /dev/null +++ b/src/cls/otp/cls_otp.cc @@ -0,0 +1,578 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab + +/** \file + * + * This is an OSD class that implements methods for management + * and use of otp (one time password). + * + */ + +#include <errno.h> +#include <map> +#include <list> + +#include <boost/range/adaptor/reversed.hpp> + +#include <liboath/oath.h> + +#include "include/types.h" +#include "include/utime.h" +#include "objclass/objclass.h" + +#include "common/errno.h" +#include "common/Clock.h" + +#include "cls/otp/cls_otp_ops.h" +#include "cls/otp/cls_otp_types.h" + +using std::list; +using std::string; +using std::set; + +using ceph::bufferlist; +using ceph::encode; +using ceph::real_clock; + +using namespace rados::cls::otp; + + +CLS_VER(1,0) +CLS_NAME(otp) + +#define ATTEMPTS_PER_WINDOW 5 + +static string otp_header_key = "header"; +static string otp_key_prefix = "otp/"; + +struct otp_header { + set<string> ids; + + otp_header() {} + + void encode(bufferlist &bl) const { + ENCODE_START(1, 1, bl); + encode(ids, bl); + ENCODE_FINISH(bl); + } + void decode(bufferlist::const_iterator &bl) { + DECODE_START(1, bl); + decode(ids, bl); + DECODE_FINISH(bl); + } +}; +WRITE_CLASS_ENCODER(otp_header) + +struct otp_instance { + otp_info_t otp; + + list<otp_check_t> last_checks; + uint64_t last_success{0}; /* otp counter/step of last successful check */ + + otp_instance() {} + + void encode(bufferlist &bl) const { + ENCODE_START(1, 1, bl); + encode(otp, bl); + encode(last_checks, bl); + encode(last_success, bl); + ENCODE_FINISH(bl); + } + void decode(bufferlist::const_iterator &bl) { + DECODE_START(1, bl); + decode(otp, bl); + decode(last_checks, bl); + decode(last_success, bl); + DECODE_FINISH(bl); + } + + void trim_expired(const ceph::real_time& now); + void check(const string& token, const string& val, bool *update); + bool verify(const ceph::real_time& timestamp, const string& val); + + void find(const string& token, otp_check_t *result); +}; +WRITE_CLASS_ENCODER(otp_instance) + + +void otp_instance::trim_expired(const ceph::real_time& now) +{ + ceph::real_time window_start = now - std::chrono::seconds(otp.step_size); + + while (!last_checks.empty() && + last_checks.front().timestamp < window_start) { + last_checks.pop_front(); + } +} + +void otp_instance::check(const string& token, const string& val, bool *update) +{ + ceph::real_time now = ceph::real_clock::now(); + trim_expired(now); + + if (last_checks.size() >= ATTEMPTS_PER_WINDOW) { + /* too many attempts */ + *update = false; + return; + } + + otp_check_t check; + check.token = token; + check.timestamp = now; + check.result = (verify(now, val) ? OTP_CHECK_SUCCESS : OTP_CHECK_FAIL); + + last_checks.push_back(check); + + *update = true; +} + +bool otp_instance::verify(const ceph::real_time& timestamp, const string& val) +{ + uint64_t index; + uint32_t secs = (uint32_t)ceph::real_clock::to_time_t(timestamp); + int result = oath_totp_validate2(otp.seed_bin.c_str(), otp.seed_bin.length(), + secs, otp.step_size, otp.time_ofs, otp.window, + nullptr /* otp pos */, + val.c_str()); + if (result == OATH_INVALID_OTP || + result < 0) { + CLS_LOG(20, "otp check failed, result=%d", result); + return false; + } + + index = result + (secs - otp.time_ofs) / otp.step_size; + + if (index <= last_success) { /* already used value */ + CLS_LOG(20, "otp, use of old token: index=%lld last_success=%lld", (long long)index, (long long)last_success); + return false; + } + + last_success = index; + + return true; +} + +void otp_instance::find(const string& token, otp_check_t *result) +{ + auto now = real_clock::now(); + trim_expired(now); + + for (auto& entry : boost::adaptors::reverse(last_checks)) { + if (entry.token == token) { + *result = entry; + return; + } + } + result->token = token; + result->result = OTP_CHECK_UNKNOWN; + result->timestamp = now; +} + +static int get_otp_instance(cls_method_context_t hctx, const string& id, otp_instance *instance) +{ + bufferlist bl; + string key = otp_key_prefix + id; + + int r = cls_cxx_map_get_val(hctx, key, &bl); + if (r < 0) { + if (r != -ENOENT) { + CLS_ERR("error reading key %s: %d", key.c_str(), r); + } + return r; + } + + try { + auto it = bl.cbegin(); + decode(*instance, it); + } catch (const ceph::buffer::error &err) { + CLS_ERR("ERROR: failed to decode %s", key.c_str()); + return -EIO; + } + + return 0; +} + +static int write_otp_instance(cls_method_context_t hctx, const otp_instance& instance) +{ + string key = otp_key_prefix + instance.otp.id; + + bufferlist bl; + encode(instance, bl); + + int r = cls_cxx_map_set_val(hctx, key, &bl); + if (r < 0) { + CLS_ERR("ERROR: %s(): failed to store key (otp id=%s, r=%d)", __func__, instance.otp.id.c_str(), r); + return r; + } + + return 0; +} + +static int remove_otp_instance(cls_method_context_t hctx, const string& id) +{ + string key = otp_key_prefix + id; + + int r = cls_cxx_map_remove_key(hctx, key); + if (r < 0) { + CLS_ERR("ERROR: %s(): failed to remove key (otp id=%s, r=%d)", __func__, id.c_str(), r); + return r; + } + + return 0; +} + +static int read_header(cls_method_context_t hctx, otp_header *h) +{ + bufferlist bl; + encode(h, bl); + int r = cls_cxx_map_get_val(hctx, otp_header_key, &bl); + if (r == -ENOENT || r == -ENODATA) { + *h = otp_header(); + return 0; + } + if (r < 0) { + CLS_ERR("ERROR: %s(): failed to read header (r=%d)", __func__, r); + return r; + } + + if (bl.length() == 0) { + *h = otp_header(); + return 0; + } + + auto iter = bl.cbegin(); + try { + decode(*h, iter); + } catch (ceph::buffer::error& err) { + CLS_ERR("failed to decode otp_header"); + return -EIO; + } + + return 0; +} + +static int write_header(cls_method_context_t hctx, const otp_header& h) +{ + bufferlist bl; + encode(h, bl); + + int r = cls_cxx_map_set_val(hctx, otp_header_key, &bl); + if (r < 0) { + CLS_ERR("failed to store header (r=%d)", r); + return r; + } + + return 0; +} + +static int parse_seed(const string& seed, SeedType seed_type, bufferlist *seed_bin) +{ + size_t slen = seed.length(); + char secret[seed.length()]; + char *psecret = secret; + int result; + bool need_free = false; + + seed_bin->clear(); + + switch (seed_type) { + case OTP_SEED_BASE32: + need_free = true; /* oath_base32_decode allocates dest buffer */ + result = oath_base32_decode(seed.c_str(), seed.length(), + &psecret, &slen); + break; + default: /* just assume hex is the default */ + result = oath_hex2bin(seed.c_str(), psecret, &slen); + } + if (result != OATH_OK) { + CLS_LOG(20, "failed to parse seed"); + return -EINVAL; + } + + seed_bin->append(psecret, slen); + + if (need_free) { + free(psecret); + } + + return 0; +} + +static int otp_set_op(cls_method_context_t hctx, + bufferlist *in, bufferlist *out) +{ + CLS_LOG(20, "%s", __func__); + cls_otp_set_otp_op op; + try { + auto iter = in->cbegin(); + decode(op, iter); + } catch (const ceph::buffer::error &err) { + CLS_ERR("ERROR: %s(): failed to decode request", __func__); + return -EINVAL; + } + + otp_header h; + int r = read_header(hctx, &h); + if (r < 0) { + return r; + } + + for (auto entry : op.entries) { + otp_instance instance; + r = get_otp_instance(hctx, entry.id, &instance); + if (r < 0 && + r != -ENOENT) { + return r; + } + instance.otp = entry; + + r = parse_seed(instance.otp.seed, instance.otp.seed_type, &instance.otp.seed_bin); + if (r < 0) { + return r; + } + + r = write_otp_instance(hctx, instance); + if (r < 0) { + return r; + } + + h.ids.insert(entry.id); + } + + r = write_header(hctx, h); + if (r < 0) { + return r; + } + + return 0; +} + +static int otp_remove_op(cls_method_context_t hctx, + bufferlist *in, bufferlist *out) +{ + CLS_LOG(20, "%s", __func__); + cls_otp_remove_otp_op op; + try { + auto iter = in->cbegin(); + decode(op, iter); + } catch (const ceph::buffer::error &err) { + CLS_ERR("ERROR: %s(): failed to decode request", __func__); + return -EINVAL; + } + + otp_header h; + bool removed_existing = false; + int r = read_header(hctx, &h); + if (r < 0) { + return r; + } + + for (auto id : op.ids) { + bool existed = (h.ids.find(id) != h.ids.end()); + removed_existing = (removed_existing || existed); + + if (!existed) { + continue; + } + + r = remove_otp_instance(hctx, id); + if (r < 0) { + return r; + } + + h.ids.erase(id); + } + + if (removed_existing) { + r = write_header(hctx, h); + if (r < 0) { + return r; + } + } + + return 0; +} + +static int otp_get_op(cls_method_context_t hctx, + bufferlist *in, bufferlist *out) +{ + CLS_LOG(20, "%s", __func__); + cls_otp_get_otp_op op; + try { + auto iter = in->cbegin(); + decode(op, iter); + } catch (const ceph::buffer::error &err) { + CLS_ERR("ERROR: %s(): failed to decode request", __func__); + return -EINVAL; + } + + cls_otp_get_otp_reply result; + + otp_header h; + int r; + + r = read_header(hctx, &h); + if (r < 0) { + return r; + } + + if (op.get_all) { + op.ids.clear(); + for (auto id : h.ids) { + op.ids.push_back(id); + } + } + + for (auto id : op.ids) { + bool exists = (h.ids.find(id) != h.ids.end()); + + if (!exists) { + continue; + } + + otp_instance instance; + r = get_otp_instance(hctx, id, &instance); + if (r < 0) { + return r; + } + + result.found_entries.push_back(instance.otp); + } + + encode(result, *out); + + return 0; +} + +static int otp_check_op(cls_method_context_t hctx, + bufferlist *in, bufferlist *out) +{ + CLS_LOG(20, "%s", __func__); + cls_otp_check_otp_op op; + try { + auto iter = in->cbegin(); + decode(op, iter); + } catch (const ceph::buffer::error &err) { + CLS_ERR("ERROR: %s(): failed to decode request", __func__); + return -EINVAL; + } + + otp_header h; + int r; + + otp_instance instance; + + r = get_otp_instance(hctx, op.id, &instance); + if (r < 0) { + return r; + } + + bool update{false}; + instance.check(op.token, op.val, &update); + + if (update) { + r = write_otp_instance(hctx, instance); + if (r < 0) { + return r; + } + } + + return 0; +} + +static int otp_get_result(cls_method_context_t hctx, + bufferlist *in, bufferlist *out) +{ + CLS_LOG(20, "%s", __func__); + cls_otp_check_otp_op op; + try { + auto iter = in->cbegin(); + decode(op, iter); + } catch (const ceph::buffer::error &err) { + CLS_ERR("ERROR: %s(): failed to decode request", __func__); + return -EINVAL; + } + + otp_header h; + int r; + + otp_instance instance; + + r = get_otp_instance(hctx, op.id, &instance); + if (r < 0) { + return r; + } + + cls_otp_get_result_reply reply; + instance.find(op.token, &reply.result); + encode(reply, *out); + + return 0; +} + +static int otp_get_current_time_op(cls_method_context_t hctx, + bufferlist *in, bufferlist *out) +{ + CLS_LOG(20, "%s", __func__); + cls_otp_get_current_time_op op; + try { + auto iter = in->cbegin(); + decode(op, iter); + } catch (const ceph::buffer::error &err) { + CLS_ERR("ERROR: %s(): failed to decode request", __func__); + return -EINVAL; + } + + cls_otp_get_current_time_reply reply; + reply.time = real_clock::now(); + encode(reply, *out); + + return 0; +} + +CLS_INIT(otp) +{ + CLS_LOG(20, "Loaded otp class!"); + + oath_init(); + + cls_handle_t h_class; + cls_method_handle_t h_set_otp_op; + cls_method_handle_t h_get_otp_op; + cls_method_handle_t h_check_otp_op; + cls_method_handle_t h_get_result_op; /* + * need to check and get check result in two phases. The + * reason is that we need to update failure internally, + * however, there's no way to both return a failure and + * update, because a failure will cancel the operation, + * and write operations will not return a value. So + * we're returning a success, potentially updating the + * status internally, then a subsequent request can try + * to fetch the status. If it fails it means that failed + * to authenticate. + */ + cls_method_handle_t h_remove_otp_op; + cls_method_handle_t h_get_current_time_op; + + cls_register("otp", &h_class); + cls_register_cxx_method(h_class, "otp_set", + CLS_METHOD_RD | CLS_METHOD_WR, + otp_set_op, &h_set_otp_op); + cls_register_cxx_method(h_class, "otp_get", + CLS_METHOD_RD, + otp_get_op, &h_get_otp_op); + cls_register_cxx_method(h_class, "otp_check", + CLS_METHOD_RD | CLS_METHOD_WR, + otp_check_op, &h_check_otp_op); + cls_register_cxx_method(h_class, "otp_get_result", + CLS_METHOD_RD, + otp_get_result, &h_get_result_op); + cls_register_cxx_method(h_class, "otp_remove", + CLS_METHOD_RD | CLS_METHOD_WR, + otp_remove_op, &h_remove_otp_op); + cls_register_cxx_method(h_class, "get_current_time", + CLS_METHOD_RD, + otp_get_current_time_op, &h_get_current_time_op); + + return; +} diff --git a/src/cls/otp/cls_otp_client.cc b/src/cls/otp/cls_otp_client.cc new file mode 100644 index 000000000..0ba55571f --- /dev/null +++ b/src/cls/otp/cls_otp_client.cc @@ -0,0 +1,191 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab +/* + * Ceph - scalable distributed file system + * + * Copyright (C) 2004-2006 Sage Weil <sage@newdream.net> + * + * This is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License version 2.1, as published by the Free Software + * Foundation. See file COPYING. + * + */ + +#include "include/types.h" +#include "msg/msg_types.h" +#include "include/rados/librados.hpp" +#include "include/utime.h" + +using std::list; +using std::string; +using namespace librados; + +#include "cls/otp/cls_otp_ops.h" +#include "cls/otp/cls_otp_client.h" + +#include "common/random_string.h" /* for gen_rand_alphanumeric */ + +namespace rados { + namespace cls { + namespace otp { + + void OTP::create(librados::ObjectWriteOperation *rados_op, + const otp_info_t& config) { + cls_otp_set_otp_op op; + op.entries.push_back(config); + bufferlist in; + encode(op, in); + rados_op->exec("otp", "otp_set", in); + } + + void OTP::set(librados::ObjectWriteOperation *rados_op, + const list<otp_info_t>& entries) { + cls_otp_set_otp_op op; + op.entries = entries; + bufferlist in; + encode(op, in); + rados_op->exec("otp", "otp_set", in); + } + + void OTP::remove(librados::ObjectWriteOperation *rados_op, + const string& id) { + cls_otp_remove_otp_op op; + op.ids.push_back(id); + bufferlist in; + encode(op, in); + rados_op->exec("otp", "otp_remove", in); + } + + int OTP::check(CephContext *cct, librados::IoCtx& ioctx, const string& oid, + const string& id, const string& val, otp_check_t *result) { + cls_otp_check_otp_op op; + op.id = id; + op.val = val; +#define TOKEN_LEN 16 + op.token = gen_rand_alphanumeric(cct, TOKEN_LEN); + + bufferlist in; + bufferlist out; + encode(op, in); + int r = ioctx.exec(oid, "otp", "otp_check", in, out); + if (r < 0) { + return r; + } + + cls_otp_get_result_op op2; + op2.token = op.token; + bufferlist in2; + bufferlist out2; + encode(op2, in2); + r = ioctx.exec(oid, "otp", "otp_get_result", in, out); + if (r < 0) { + return r; + } + + auto iter = out.cbegin(); + cls_otp_get_result_reply ret; + try { + decode(ret, iter); + } catch (ceph::buffer::error& err) { + return -EBADMSG; + } + + *result = ret.result; + + return 0; + } + + int OTP::get(librados::ObjectReadOperation *rop, + librados::IoCtx& ioctx, const string& oid, + const list<string> *ids, bool get_all, list<otp_info_t> *result) { + librados::ObjectReadOperation _rop; + if (!rop) { + rop = &_rop; + } + cls_otp_get_otp_op op; + if (ids) { + op.ids = *ids; + } + op.get_all = get_all; + bufferlist in; + bufferlist out; + int op_ret; + encode(op, in); + rop->exec("otp", "otp_get", in, &out, &op_ret); + int r = ioctx.operate(oid, rop, nullptr); + if (r < 0) { + return r; + } + if (op_ret < 0) { + return op_ret; + } + + cls_otp_get_otp_reply ret; + auto iter = out.cbegin(); + try { + decode(ret, iter); + } catch (ceph::buffer::error& err) { + return -EBADMSG; + } + + *result = ret.found_entries;; + + return 0; + } + + int OTP::get(librados::ObjectReadOperation *op, + librados::IoCtx& ioctx, const string& oid, + const string& id, otp_info_t *result) { + list<string> ids{ id }; + list<otp_info_t> ret; + + int r = get(op, ioctx, oid, &ids, false, &ret); + if (r < 0) { + return r; + } + if (ret.empty()) { + return -ENOENT; + } + *result = ret.front(); + + return 0; + } + + int OTP::get_all(librados::ObjectReadOperation *op, librados::IoCtx& ioctx, const string& oid, + list<otp_info_t> *result) { + return get(op, ioctx, oid, nullptr, true, result); + } + + int OTP::get_current_time(librados::IoCtx& ioctx, const string& oid, + ceph::real_time *result) { + cls_otp_get_current_time_op op; + bufferlist in; + bufferlist out; + int op_ret; + encode(op, in); + ObjectReadOperation rop; + rop.exec("otp", "get_current_time", in, &out, &op_ret); + int r = ioctx.operate(oid, &rop, nullptr); + if (r < 0) { + return r; + } + if (op_ret < 0) { + return op_ret; + } + + cls_otp_get_current_time_reply ret; + auto iter = out.cbegin(); + try { + decode(ret, iter); + } catch (ceph::buffer::error& err) { + return -EBADMSG; + } + + *result = ret.time; + + return 0; + } + } // namespace otp + } // namespace cls +} // namespace rados diff --git a/src/cls/otp/cls_otp_client.h b/src/cls/otp/cls_otp_client.h new file mode 100644 index 000000000..a921e4e38 --- /dev/null +++ b/src/cls/otp/cls_otp_client.h @@ -0,0 +1,60 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab + +#ifndef CEPH_CLS_OTP_CLIENT_H +#define CEPH_CLS_OTP_CLIENT_H + +#include "include/rados/librados_fwd.hpp" +#include "cls/otp/cls_otp_types.h" + +namespace rados { + namespace cls { + namespace otp { + + class OTP { + public: + static void create(librados::ObjectWriteOperation *op, const otp_info_t& config); + static void set(librados::ObjectWriteOperation *op, const std::list<otp_info_t>& entries); + static void remove(librados::ObjectWriteOperation *op, const std::string& id); + static int get(librados::ObjectReadOperation *op, + librados::IoCtx& ioctx, const std::string& oid, + const std::string& id, otp_info_t *result); + static int get_all(librados::ObjectReadOperation *op, + librados::IoCtx& ioctx, const std::string& oid, + std::list<otp_info_t> *result); +// these overloads which call io_ctx.operate() or io_ctx.exec() should not be called in the rgw. +// rgw_rados_operate() should be called after the overloads w/o calls to io_ctx.operate()/exec() +#ifndef CLS_CLIENT_HIDE_IOCTX + static int get(librados::ObjectReadOperation *op, + librados::IoCtx& ioctx, const std::string& oid, + const std::list<std::string> *ids, bool get_all, std::list<otp_info_t> *result); + static int check(CephContext *cct, librados::IoCtx& ioctx, const std::string& oid, + const std::string& id, const std::string& val, otp_check_t *result); + static int get_current_time(librados::IoCtx& ioctx, const std::string& oid, + ceph::real_time *result); +#endif + }; + + class TOTPConfig { + otp_info_t config; + public: + TOTPConfig(const std::string& id, const std::string& seed) { + config.type = OTP_TOTP; + config.id = id; + config.seed = seed; + } + void set_step_size(int step_size) { + config.step_size = step_size; + } + void set_window(int window) { + config.window = window; + } + void get_config(otp_info_t *conf) { + *conf = config; + } + }; + } // namespace otp + } // namespace cls +} // namespace rados + +#endif diff --git a/src/cls/otp/cls_otp_ops.h b/src/cls/otp/cls_otp_ops.h new file mode 100644 index 000000000..970bd3a78 --- /dev/null +++ b/src/cls/otp/cls_otp_ops.h @@ -0,0 +1,169 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab + +#ifndef CEPH_CLS_OTP_OPS_H +#define CEPH_CLS_OTP_OPS_H + +#include "include/types.h" +#include "include/utime.h" +#include "cls/otp/cls_otp_types.h" + +struct cls_otp_set_otp_op +{ + std::list<rados::cls::otp::otp_info_t> entries; + + void encode(ceph::buffer::list &bl) const { + ENCODE_START(1, 1, bl); + encode(entries, bl); + ENCODE_FINISH(bl); + } + void decode(ceph::buffer::list::const_iterator &bl) { + DECODE_START(1, bl); + decode(entries, bl); + DECODE_FINISH(bl); + } +}; +WRITE_CLASS_ENCODER(cls_otp_set_otp_op) + +struct cls_otp_check_otp_op +{ + std::string id; + std::string val; + std::string token; + + void encode(ceph::buffer::list &bl) const { + ENCODE_START(1, 1, bl); + encode(id, bl); + encode(val, bl); + encode(token, bl); + ENCODE_FINISH(bl); + } + void decode(ceph::buffer::list::const_iterator &bl) { + DECODE_START(1, bl); + decode(id, bl); + decode(val, bl); + decode(token, bl); + DECODE_FINISH(bl); + } +}; +WRITE_CLASS_ENCODER(cls_otp_check_otp_op) + +struct cls_otp_get_result_op +{ + std::string token; + + void encode(ceph::buffer::list &bl) const { + ENCODE_START(1, 1, bl); + encode(token, bl); + ENCODE_FINISH(bl); + } + void decode(ceph::buffer::list::const_iterator &bl) { + DECODE_START(1, bl); + decode(token, bl); + DECODE_FINISH(bl); + } +}; +WRITE_CLASS_ENCODER(cls_otp_get_result_op) + +struct cls_otp_get_result_reply +{ + rados::cls::otp::otp_check_t result; + + void encode(ceph::buffer::list &bl) const { + ENCODE_START(1, 1, bl); + encode(result, bl); + ENCODE_FINISH(bl); + } + void decode(ceph::buffer::list::const_iterator &bl) { + DECODE_START(1, bl); + decode(result, bl); + DECODE_FINISH(bl); + } +}; +WRITE_CLASS_ENCODER(cls_otp_get_result_reply) + +struct cls_otp_remove_otp_op +{ + std::list<std::string> ids; + + void encode(ceph::buffer::list &bl) const { + ENCODE_START(1, 1, bl); + encode(ids, bl); + ENCODE_FINISH(bl); + } + void decode(ceph::buffer::list::const_iterator &bl) { + DECODE_START(1, bl); + decode(ids, bl); + DECODE_FINISH(bl); + } +}; +WRITE_CLASS_ENCODER(cls_otp_remove_otp_op) + +struct cls_otp_get_otp_op +{ + bool get_all{false}; + std::list<std::string> ids; + + void encode(ceph::buffer::list &bl) const { + ENCODE_START(1, 1, bl); + encode(get_all, bl); + encode(ids, bl); + ENCODE_FINISH(bl); + } + void decode(ceph::buffer::list::const_iterator &bl) { + DECODE_START(1, bl); + decode(get_all, bl); + decode(ids, bl); + DECODE_FINISH(bl); + } +}; +WRITE_CLASS_ENCODER(cls_otp_get_otp_op) + +struct cls_otp_get_otp_reply +{ + std::list<rados::cls::otp::otp_info_t> found_entries; + + void encode(ceph::buffer::list &bl) const { + ENCODE_START(1, 1, bl); + encode(found_entries, bl); + ENCODE_FINISH(bl); + } + void decode(ceph::buffer::list::const_iterator &bl) { + DECODE_START(1, bl); + decode(found_entries, bl); + DECODE_FINISH(bl); + } +}; +WRITE_CLASS_ENCODER(cls_otp_get_otp_reply) + +struct cls_otp_get_current_time_op +{ + void encode(ceph::buffer::list &bl) const { + ENCODE_START(1, 1, bl); + ENCODE_FINISH(bl); + } + void decode(ceph::buffer::list::const_iterator &bl) { + DECODE_START(1, bl); + DECODE_FINISH(bl); + } +}; +WRITE_CLASS_ENCODER(cls_otp_get_current_time_op) + +struct cls_otp_get_current_time_reply +{ + ceph::real_time time; + + void encode(ceph::buffer::list &bl) const { + ENCODE_START(1, 1, bl); + encode(time, bl); + ENCODE_FINISH(bl); + } + void decode(ceph::buffer::list::const_iterator &bl) { + DECODE_START(1, bl); + decode(time, bl); + DECODE_FINISH(bl); + } +}; +WRITE_CLASS_ENCODER(cls_otp_get_current_time_reply) + +#endif diff --git a/src/cls/otp/cls_otp_types.cc b/src/cls/otp/cls_otp_types.cc new file mode 100644 index 000000000..1f95749ed --- /dev/null +++ b/src/cls/otp/cls_otp_types.cc @@ -0,0 +1,71 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab +/* + * Ceph - scalable distributed file system + * + * Copyright (C) 2004-2006 Sage Weil <sage@newdream.net> + * + * This is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License version 2.1, as published by the Free Software + * Foundation. See file COPYING. + * + */ + +#include "objclass/objclass.h" +#include "common/Formatter.h" +#include "common/Clock.h" +#include "common/ceph_json.h" + +#include "include/utime.h" + +#include "cls/otp/cls_otp_types.h" + +using std::string; + +using ceph::Formatter; + +using namespace rados::cls::otp; + +void otp_info_t::dump(Formatter *f) const +{ + encode_json("type", (int)type, f); + encode_json("id", id, f); + encode_json("seed", seed, f); + string st; + switch (seed_type) { + case rados::cls::otp::OTP_SEED_HEX: + st = "hex"; + break; + case rados::cls::otp::OTP_SEED_BASE32: + st = "base32"; + break; + default: + st = "unknown"; + } + encode_json("seed_type", st, f); + encode_json("time_ofs", time_ofs, f); + encode_json("step_size", step_size, f); + encode_json("window", window, f); +} + +void otp_info_t::decode_json(JSONObj *obj) +{ + int t{-1}; + JSONDecoder::decode_json("type", t, obj); + type = (OTPType)t; + JSONDecoder::decode_json("id", id, obj); + JSONDecoder::decode_json("seed", seed, obj); + string st; + JSONDecoder::decode_json("seed_type", st, obj); + if (st == "hex") { + seed_type = OTP_SEED_HEX; + } else if (st == "base32") { + seed_type = OTP_SEED_BASE32; + } else { + seed_type = OTP_SEED_UNKNOWN; + } + JSONDecoder::decode_json("time_ofs", time_ofs, obj); + JSONDecoder::decode_json("step_size", step_size, obj); + JSONDecoder::decode_json("window", window, obj); +} diff --git a/src/cls/otp/cls_otp_types.h b/src/cls/otp/cls_otp_types.h new file mode 100644 index 000000000..6e431b330 --- /dev/null +++ b/src/cls/otp/cls_otp_types.h @@ -0,0 +1,135 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab + +#ifndef CEPH_CLS_OTP_TYPES_H +#define CEPH_CLS_OTP_TYPES_H + +#include "include/encoding.h" +#include "include/types.h" + + +#define CLS_OTP_MAX_REPO_SIZE 100 + +class JSONObj; + +namespace rados { + namespace cls { + namespace otp { + + enum OTPType { + OTP_UNKNOWN = 0, + OTP_HOTP = 1, /* unsupported */ + OTP_TOTP = 2, + }; + + enum SeedType { + OTP_SEED_UNKNOWN = 0, + OTP_SEED_HEX = 1, + OTP_SEED_BASE32 = 2, + }; + + struct otp_info_t { + OTPType type{OTP_TOTP}; + std::string id; + std::string seed; + SeedType seed_type{OTP_SEED_UNKNOWN}; + ceph::buffer::list seed_bin; /* parsed seed, built automatically by otp_set_op, + * not being json encoded/decoded on purpose + */ + int32_t time_ofs{0}; + uint32_t step_size{30}; /* num of seconds foreach otp to test */ + uint32_t window{2}; /* num of otp after/before start otp to test */ + + otp_info_t() {} + + void encode(ceph::buffer::list &bl) const { + ENCODE_START(1, 1, bl); + encode((uint8_t)type, bl); + /* if we ever implement anything other than TOTP + * then we'll need to branch here */ + encode(id, bl); + encode(seed, bl); + encode((uint8_t)seed_type, bl); + encode(seed_bin, bl); + encode(time_ofs, bl); + encode(step_size, bl); + encode(window, bl); + ENCODE_FINISH(bl); + } + void decode(ceph::buffer::list::const_iterator &bl) { + DECODE_START(1, bl); + uint8_t t; + decode(t, bl); + type = (OTPType)t; + decode(id, bl); + decode(seed, bl); + uint8_t st; + decode(st, bl); + seed_type = (SeedType)st; + decode(seed_bin, bl); + decode(time_ofs, bl); + decode(step_size, bl); + decode(window, bl); + DECODE_FINISH(bl); + } + void dump(ceph::Formatter *f) const; + void decode_json(JSONObj *obj); + }; + WRITE_CLASS_ENCODER(rados::cls::otp::otp_info_t) + + enum OTPCheckResult { + OTP_CHECK_UNKNOWN = 0, + OTP_CHECK_SUCCESS = 1, + OTP_CHECK_FAIL = 2, + }; + + struct otp_check_t { + std::string token; + ceph::real_time timestamp; + OTPCheckResult result{OTP_CHECK_UNKNOWN}; + + void encode(ceph::buffer::list &bl) const { + ENCODE_START(1, 1, bl); + encode(token, bl); + encode(timestamp, bl); + encode((char)result, bl); + ENCODE_FINISH(bl); + } + void decode(ceph::buffer::list::const_iterator &bl) { + DECODE_START(1, bl); + decode(token, bl); + decode(timestamp, bl); + uint8_t t; + decode(t, bl); + result = (OTPCheckResult)t; + DECODE_FINISH(bl); + } + }; + WRITE_CLASS_ENCODER(rados::cls::otp::otp_check_t) + + struct otp_repo_t { + std::map<std::string, otp_info_t> entries; + + otp_repo_t() {} + + void encode(ceph::buffer::list &bl) const { + ENCODE_START(1, 1, bl); + encode(entries, bl); + ENCODE_FINISH(bl); + } + void decode(ceph::buffer::list::const_iterator &bl) { + DECODE_START(1, bl); + decode(entries, bl); + DECODE_FINISH(bl); + } + }; + WRITE_CLASS_ENCODER(rados::cls::otp::otp_repo_t) + } + } +} + +WRITE_CLASS_ENCODER(rados::cls::otp::otp_info_t) +WRITE_CLASS_ENCODER(rados::cls::otp::otp_check_t) +WRITE_CLASS_ENCODER(rados::cls::otp::otp_repo_t) + +#endif diff --git a/src/cls/queue/cls_queue.cc b/src/cls/queue/cls_queue.cc new file mode 100644 index 000000000..cf4daaac8 --- /dev/null +++ b/src/cls/queue/cls_queue.cc @@ -0,0 +1,145 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab + +#include "include/types.h" + +#include <errno.h> + +#include "objclass/objclass.h" +#include "cls/queue/cls_queue_types.h" +#include "cls/queue/cls_queue_ops.h" +#include "cls/queue/cls_queue_const.h" +#include "cls/queue/cls_queue_src.h" + +using ceph::bufferlist; +using ceph::decode; +using ceph::encode; + +CLS_VER(1,0) +CLS_NAME(queue) + +static int cls_queue_init(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + auto in_iter = in->cbegin(); + cls_queue_init_op op; + try { + decode(op, in_iter); + } catch (ceph::buffer::error& err) { + CLS_LOG(1, "ERROR: cls_queue_init_op(): failed to decode entry\n"); + return -EINVAL; + } + + return queue_init(hctx, op); +} + +static int cls_queue_get_capacity(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + cls_queue_get_capacity_ret op_ret; + auto ret = queue_get_capacity(hctx, op_ret); + if (ret < 0) { + return ret; + } + + encode(op_ret, *out); + return 0; +} + +static int cls_queue_enqueue(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + auto iter = in->cbegin(); + cls_queue_enqueue_op op; + try { + decode(op, iter); + } catch (ceph::buffer::error& err) { + CLS_LOG(1, "ERROR: cls_queue_enqueue: failed to decode input data \n"); + return -EINVAL; + } + + cls_queue_head head; + auto ret = queue_read_head(hctx, head); + if (ret < 0) { + return ret; + } + + ret = queue_enqueue(hctx, op, head); + if (ret < 0) { + return ret; + } + + //Write back head + return queue_write_head(hctx, head); +} + +static int cls_queue_list_entries(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + auto in_iter = in->cbegin(); + cls_queue_list_op op; + try { + decode(op, in_iter); + } catch (ceph::buffer::error& err) { + CLS_LOG(5, "ERROR: cls_queue_list_entries(): failed to decode input data\n"); + return -EINVAL; + } + + cls_queue_head head; + auto ret = queue_read_head(hctx, head); + if (ret < 0) { + return ret; + } + + cls_queue_list_ret op_ret; + ret = queue_list_entries(hctx, op, op_ret, head); + if (ret < 0) { + return ret; + } + + encode(op_ret, *out); + return 0; +} + +static int cls_queue_remove_entries(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + auto in_iter = in->cbegin(); + cls_queue_remove_op op; + try { + decode(op, in_iter); + } catch (ceph::buffer::error& err) { + CLS_LOG(5, "ERROR: cls_queue_remove_entries: failed to decode input data\n"); + return -EINVAL; + } + + cls_queue_head head; + auto ret = queue_read_head(hctx, head); + if (ret < 0) { + return ret; + } + ret = queue_remove_entries(hctx, op, head); + if (ret < 0) { + return ret; + } + return queue_write_head(hctx, head); +} + +CLS_INIT(queue) +{ + CLS_LOG(1, "Loaded queue class!"); + + cls_handle_t h_class; + cls_method_handle_t h_queue_init; + cls_method_handle_t h_queue_get_capacity; + cls_method_handle_t h_queue_enqueue; + cls_method_handle_t h_queue_list_entries; + cls_method_handle_t h_queue_remove_entries; + + cls_register(QUEUE_CLASS, &h_class); + + /* queue*/ + cls_register_cxx_method(h_class, QUEUE_INIT, CLS_METHOD_RD | CLS_METHOD_WR, cls_queue_init, &h_queue_init); + cls_register_cxx_method(h_class, QUEUE_GET_CAPACITY, CLS_METHOD_RD, cls_queue_get_capacity, &h_queue_get_capacity); + cls_register_cxx_method(h_class, QUEUE_ENQUEUE, CLS_METHOD_RD | CLS_METHOD_WR, cls_queue_enqueue, &h_queue_enqueue); + cls_register_cxx_method(h_class, QUEUE_LIST_ENTRIES, CLS_METHOD_RD, cls_queue_list_entries, &h_queue_list_entries); + cls_register_cxx_method(h_class, QUEUE_REMOVE_ENTRIES, CLS_METHOD_RD | CLS_METHOD_WR, cls_queue_remove_entries, &h_queue_remove_entries); + + return; +} + diff --git a/src/cls/queue/cls_queue_client.cc b/src/cls/queue/cls_queue_client.cc new file mode 100644 index 000000000..87d17bb9e --- /dev/null +++ b/src/cls/queue/cls_queue_client.cc @@ -0,0 +1,88 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab +#include <errno.h> + +#include "cls/queue/cls_queue_ops.h" +#include "cls/queue/cls_queue_const.h" +#include "cls/queue/cls_queue_client.h" + +using namespace std; +using namespace librados; + +void cls_queue_init(ObjectWriteOperation& op, const string& queue_name, uint64_t size) +{ + bufferlist in; + cls_queue_init_op call; + call.max_urgent_data_size = 0; + call.queue_size = size; + encode(call, in); + op.exec(QUEUE_CLASS, QUEUE_INIT, in); +} + +int cls_queue_get_capacity(IoCtx& io_ctx, const string& oid, uint64_t& size) +{ + bufferlist in, out; + int r = io_ctx.exec(oid, QUEUE_CLASS, QUEUE_GET_CAPACITY, in, out); + if (r < 0) + return r; + + cls_queue_get_capacity_ret op_ret; + auto iter = out.cbegin(); + try { + decode(op_ret, iter); + } catch (buffer::error& err) { + return -EIO; + } + + size = op_ret.queue_capacity; + + return 0; +} + +void cls_queue_enqueue(ObjectWriteOperation& op, uint32_t expiration_secs, vector<bufferlist> bl_data_vec) +{ + bufferlist in; + cls_queue_enqueue_op call; + call.bl_data_vec = std::move(bl_data_vec); + encode(call, in); + op.exec(QUEUE_CLASS, QUEUE_ENQUEUE, in); +} + +int cls_queue_list_entries(IoCtx& io_ctx, const string& oid, const string& marker, uint32_t max, + vector<cls_queue_entry>& entries, + bool *truncated, string& next_marker) +{ + bufferlist in, out; + cls_queue_list_op op; + op.start_marker = marker; + op.max = max; + encode(op, in); + + int r = io_ctx.exec(oid, QUEUE_CLASS, QUEUE_LIST_ENTRIES, in, out); + if (r < 0) + return r; + + cls_queue_list_ret ret; + auto iter = out.cbegin(); + try { + decode(ret, iter); + } catch (buffer::error& err) { + return -EIO; + } + + entries = std::move(ret.entries); + *truncated = ret.is_truncated; + + next_marker = std::move(ret.next_marker); + + return 0; +} + +void cls_queue_remove_entries(ObjectWriteOperation& op, const string& end_marker) +{ + bufferlist in, out; + cls_queue_remove_op rem_op; + rem_op.end_marker = end_marker; + encode(rem_op, in); + op.exec(QUEUE_CLASS, QUEUE_REMOVE_ENTRIES, in); +} diff --git a/src/cls/queue/cls_queue_client.h b/src/cls/queue/cls_queue_client.h new file mode 100644 index 000000000..895a51c11 --- /dev/null +++ b/src/cls/queue/cls_queue_client.h @@ -0,0 +1,16 @@ +#ifndef CEPH_CLS_QUEUE_CLIENT_H +#define CEPH_CLS_QUEUE_CLIENT_H + +#include "include/rados/librados.hpp" +#include "cls/queue/cls_queue_types.h" +#include "cls_queue_ops.h" +#include "common/ceph_time.h" + +void cls_queue_init(librados::ObjectWriteOperation& op, const std::string& queue_name, uint64_t size); +int cls_queue_get_capacity(librados::IoCtx& io_ctx, const std::string& oid, uint64_t& size); +void cls_queue_enqueue(librados::ObjectWriteOperation& op, uint32_t expiration_secs, std::vector<bufferlist> bl_data_vec); +int cls_queue_list_entries(librados::IoCtx& io_ctx, const std::string& oid, const std::string& marker, uint32_t max, + std::vector<cls_queue_entry>& entries, bool *truncated, std::string& next_marker); +void cls_queue_remove_entries(librados::ObjectWriteOperation& op, const std::string& end_marker); + +#endif diff --git a/src/cls/queue/cls_queue_const.h b/src/cls/queue/cls_queue_const.h new file mode 100644 index 000000000..3f289abb0 --- /dev/null +++ b/src/cls/queue/cls_queue_const.h @@ -0,0 +1,12 @@ +#ifndef CEPH_CLS_QUEUE_CONSTS_H +#define CEPH_CLS_QUEUE_CONSTS_H + +#define QUEUE_CLASS "queue" + +#define QUEUE_INIT "queue_init" +#define QUEUE_GET_CAPACITY "queue_get_capacity" +#define QUEUE_ENQUEUE "queue_enqueue" +#define QUEUE_LIST_ENTRIES "queue_list_entries" +#define QUEUE_REMOVE_ENTRIES "queue_remove_entries" + +#endif
\ No newline at end of file diff --git a/src/cls/queue/cls_queue_ops.h b/src/cls/queue/cls_queue_ops.h new file mode 100644 index 000000000..64891cffb --- /dev/null +++ b/src/cls/queue/cls_queue_ops.h @@ -0,0 +1,139 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab + +#ifndef CEPH_CLS_QUEUE_OPS_H +#define CEPH_CLS_QUEUE_OPS_H + +#include "cls/queue/cls_queue_types.h" + +struct cls_queue_init_op { + uint64_t queue_size{0}; + uint64_t max_urgent_data_size{0}; + ceph::buffer::list bl_urgent_data; + + cls_queue_init_op() {} + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(1, 1, bl); + encode(queue_size, bl); + encode(max_urgent_data_size, bl); + encode(bl_urgent_data, bl); + ENCODE_FINISH(bl); + } + + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(1, bl); + decode(queue_size, bl); + decode(max_urgent_data_size, bl); + decode(bl_urgent_data, bl); + DECODE_FINISH(bl); + } + +}; +WRITE_CLASS_ENCODER(cls_queue_init_op) + +struct cls_queue_enqueue_op { + std::vector<ceph::buffer::list> bl_data_vec; + + cls_queue_enqueue_op() {} + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(1, 1, bl); + encode(bl_data_vec, bl); + ENCODE_FINISH(bl); + } + + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(1, bl); + decode(bl_data_vec, bl); + DECODE_FINISH(bl); + } +}; +WRITE_CLASS_ENCODER(cls_queue_enqueue_op) + +struct cls_queue_list_op { + uint64_t max; + std::string start_marker; + + cls_queue_list_op() {} + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(1, 1, bl); + encode(max, bl); + encode(start_marker, bl); + ENCODE_FINISH(bl); + } + + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(1, bl); + decode(max, bl); + decode(start_marker, bl); + DECODE_FINISH(bl); + } +}; +WRITE_CLASS_ENCODER(cls_queue_list_op) + +struct cls_queue_list_ret { + bool is_truncated; + std::string next_marker; + std::vector<cls_queue_entry> entries; + + cls_queue_list_ret() {} + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(1, 1, bl); + encode(is_truncated, bl); + encode(next_marker, bl); + encode(entries, bl); + ENCODE_FINISH(bl); + } + + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(1, bl); + decode(is_truncated, bl); + decode(next_marker, bl); + decode(entries, bl); + DECODE_FINISH(bl); + } +}; +WRITE_CLASS_ENCODER(cls_queue_list_ret) + +struct cls_queue_remove_op { + std::string end_marker; + + cls_queue_remove_op() {} + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(1, 1, bl); + encode(end_marker, bl); + ENCODE_FINISH(bl); + } + + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(1, bl); + decode(end_marker, bl); + DECODE_FINISH(bl); + } +}; +WRITE_CLASS_ENCODER(cls_queue_remove_op) + +struct cls_queue_get_capacity_ret { + uint64_t queue_capacity; + + cls_queue_get_capacity_ret() {} + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(1, 1, bl); + encode(queue_capacity, bl); + ENCODE_FINISH(bl); + } + + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(1, bl); + decode(queue_capacity, bl); + DECODE_FINISH(bl); + } +}; +WRITE_CLASS_ENCODER(cls_queue_get_capacity_ret) + +#endif /* CEPH_CLS_QUEUE_OPS_H */ diff --git a/src/cls/queue/cls_queue_src.cc b/src/cls/queue/cls_queue_src.cc new file mode 100644 index 000000000..b34d9929b --- /dev/null +++ b/src/cls/queue/cls_queue_src.cc @@ -0,0 +1,519 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab + +#include "include/types.h" + +#include "objclass/objclass.h" +#include "cls/queue/cls_queue_types.h" +#include "cls/queue/cls_queue_ops.h" +#include "cls/queue/cls_queue_const.h" +#include "cls/queue/cls_queue_src.h" + +using std::string; +using ceph::bufferlist; +using ceph::decode; +using ceph::encode; + +const uint64_t page_size = 4096; +const uint64_t large_chunk_size = 1ul << 22; + +int queue_write_head(cls_method_context_t hctx, cls_queue_head& head) +{ + bufferlist bl; + uint16_t entry_start = QUEUE_HEAD_START; + encode(entry_start, bl); + + bufferlist bl_head; + encode(head, bl_head); + + uint64_t encoded_len = bl_head.length(); + encode(encoded_len, bl); + + bl.claim_append(bl_head); + + if (bl.length() > head.max_head_size) { + CLS_LOG(0, "ERROR: queue_write_head: invalid head size = %u and urgent data size = %u \n", bl.length(), head.bl_urgent_data.length()); + return -EINVAL; + } + + int ret = cls_cxx_write2(hctx, 0, bl.length(), &bl, CEPH_OSD_OP_FLAG_FADVISE_WILLNEED); + if (ret < 0) { + CLS_LOG(5, "ERROR: queue_write_head: failed to write head"); + return ret; + } + return 0; +} + +int queue_read_head(cls_method_context_t hctx, cls_queue_head& head) +{ + uint64_t chunk_size = page_size, start_offset = 0; + + bufferlist bl_head; + const auto ret = cls_cxx_read(hctx, start_offset, chunk_size, &bl_head); + if (ret < 0) { + CLS_LOG(5, "ERROR: queue_read_head: failed to read head"); + return ret; + } + if (ret == 0) { + CLS_LOG(20, "INFO: queue_read_head: empty head, not initialized yet"); + return -EINVAL; + } + + //Process the chunk of data read + auto it = bl_head.cbegin(); + // Queue head start + uint16_t queue_head_start; + try { + decode(queue_head_start, it); + } catch (const ceph::buffer::error& err) { + CLS_LOG(0, "ERROR: queue_read_head: failed to decode queue start: %s", err.what()); + return -EINVAL; + } + if (queue_head_start != QUEUE_HEAD_START) { + CLS_LOG(0, "ERROR: queue_read_head: invalid queue start"); + return -EINVAL; + } + + uint64_t encoded_len; + try { + decode(encoded_len, it); + } catch (const ceph::buffer::error& err) { + CLS_LOG(0, "ERROR: queue_read_head: failed to decode encoded head size: %s", err.what()); + return -EINVAL; + } + + if (encoded_len > (chunk_size - QUEUE_ENTRY_OVERHEAD)) { + start_offset = chunk_size; + chunk_size = (encoded_len - (chunk_size - QUEUE_ENTRY_OVERHEAD)); + bufferlist bl_remaining_head; + const auto ret = cls_cxx_read2(hctx, start_offset, chunk_size, &bl_remaining_head, CEPH_OSD_OP_FLAG_FADVISE_SEQUENTIAL); + if (ret < 0) { + CLS_LOG(5, "ERROR: queue_read_head: failed to read remaining part of head"); + return ret; + } + bl_head.claim_append(bl_remaining_head); + } + + try { + decode(head, it); + } catch (const ceph::buffer::error& err) { + CLS_LOG(0, "ERROR: queue_read_head: failed to decode head: %s", err.what()); + return -EINVAL; + } + + return 0; +} + +int queue_init(cls_method_context_t hctx, const cls_queue_init_op& op) +{ + //get head and its size + cls_queue_head head; + int ret = queue_read_head(hctx, head); + + //head is already initialized + if (ret == 0) { + return -EEXIST; + } + + if (ret < 0 && ret != -EINVAL) { + return ret; + } + + if (op.bl_urgent_data.length() > 0) { + head.bl_urgent_data = op.bl_urgent_data; + } + + head.max_head_size = QUEUE_HEAD_SIZE_1K + op.max_urgent_data_size; + head.queue_size = op.queue_size + head.max_head_size; + head.max_urgent_data_size = op.max_urgent_data_size; + head.tail.gen = head.front.gen = 0; + head.tail.offset = head.front.offset = head.max_head_size; + + CLS_LOG(20, "INFO: init_queue_op queue actual size %lu", head.queue_size); + CLS_LOG(20, "INFO: init_queue_op head size %lu", head.max_head_size); + CLS_LOG(20, "INFO: init_queue_op queue front offset %s", head.front.to_str().c_str()); + CLS_LOG(20, "INFO: init_queue_op queue max urgent data size %lu", head.max_urgent_data_size); + + return queue_write_head(hctx, head); +} + +int queue_get_capacity(cls_method_context_t hctx, cls_queue_get_capacity_ret& op_ret) +{ + //get head + cls_queue_head head; + int ret = queue_read_head(hctx, head); + if (ret < 0) { + return ret; + } + + op_ret.queue_capacity = head.queue_size - head.max_head_size; + + CLS_LOG(20, "INFO: queue_get_capacity: size of queue is %lu", op_ret.queue_capacity); + + return 0; +} + + +/* +enqueue of new bufferlist happens in the free spaces of the queue, the queue can be in +one of two states: + +(1) split free space ++-------------+--------------------------------------------------------------------+ +| object head | XXXXXXXXXXXXXXXXXXXXXXXXXXX | +| | ^ ^ | +| front tail | | | | ++---+------+--+----------------|-------------------------|-------------------------+ + | | | | + | +-------------------|-------------------------+ + +--------------------------+ + +(2) continuous free space ++-------------+--------------------------------------------------------------------+ +| object head |XXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXX| +| | ^ ^ | +| front tail | | | | ++---+------+--+----------------|-------------------------|-------------------------+ + | | | | + | +-------------------+ | + +----------------------------------------------------+ +*/ + +int queue_enqueue(cls_method_context_t hctx, cls_queue_enqueue_op& op, cls_queue_head& head) +{ + if ((head.front.offset == head.tail.offset) && (head.tail.gen == head.front.gen + 1)) { + CLS_LOG(0, "ERROR: No space left in queue"); + return -ENOSPC; + } + + for (auto& bl_data : op.bl_data_vec) { + bufferlist bl; + uint16_t entry_start = QUEUE_ENTRY_START; + encode(entry_start, bl); + uint64_t data_size = bl_data.length(); + encode(data_size, bl); + bl.claim_append(bl_data); + + CLS_LOG(10, "INFO: queue_enqueue(): Total size to be written is %u and data size is %lu", bl.length(), data_size); + + if (head.tail.offset >= head.front.offset) { + // check if data can fit in the remaining space in queue + if ((head.tail.offset + bl.length()) <= head.queue_size) { + CLS_LOG(5, "INFO: queue_enqueue: Writing data size and data: offset: %s, size: %u", head.tail.to_str().c_str(), bl.length()); + //write data size and data at tail offset + auto ret = cls_cxx_write2(hctx, head.tail.offset, bl.length(), &bl, CEPH_OSD_OP_FLAG_FADVISE_SEQUENTIAL); + if (ret < 0) { + return ret; + } + head.tail.offset += bl.length(); + } else { + uint64_t free_space_available = (head.queue_size - head.tail.offset) + (head.front.offset - head.max_head_size); + //Split data if there is free space available + if (bl.length() <= free_space_available) { + uint64_t size_before_wrap = head.queue_size - head.tail.offset; + bufferlist bl_data_before_wrap; + bl.splice(0, size_before_wrap, &bl_data_before_wrap); + //write spliced (data size and data) at tail offset + CLS_LOG(5, "INFO: queue_enqueue: Writing spliced data at offset: %s and data size: %u", head.tail.to_str().c_str(), bl_data_before_wrap.length()); + auto ret = cls_cxx_write2(hctx, head.tail.offset, bl_data_before_wrap.length(), &bl_data_before_wrap, CEPH_OSD_OP_FLAG_FADVISE_SEQUENTIAL); + if (ret < 0) { + return ret; + } + head.tail.offset = head.max_head_size; + head.tail.gen += 1; + //write remaining data at tail offset after wrapping around + CLS_LOG(5, "INFO: queue_enqueue: Writing remaining data at offset: %s and data size: %u", head.tail.to_str().c_str(), bl.length()); + ret = cls_cxx_write2(hctx, head.tail.offset, bl.length(), &bl, CEPH_OSD_OP_FLAG_FADVISE_SEQUENTIAL); + if (ret < 0) { + return ret; + } + head.tail.offset += bl.length(); + } else { + CLS_LOG(0, "ERROR: No space left in queue\n"); + // return queue full error + return -ENOSPC; + } + } + } else if (head.front.offset > head.tail.offset) { + if ((head.tail.offset + bl.length()) <= head.front.offset) { + CLS_LOG(5, "INFO: queue_enqueue: Writing data size and data: offset: %s, size: %u", head.tail.to_str().c_str(), bl.length()); + //write data size and data at tail offset + auto ret = cls_cxx_write2(hctx, head.tail.offset, bl.length(), &bl, CEPH_OSD_OP_FLAG_FADVISE_SEQUENTIAL); + if (ret < 0) { + return ret; + } + head.tail.offset += bl.length(); + } else { + CLS_LOG(0, "ERROR: No space left in queue"); + // return queue full error + return -ENOSPC; + } + } + + if (head.tail.offset == head.queue_size) { + head.tail.offset = head.max_head_size; + head.tail.gen += 1; + } + CLS_LOG(20, "INFO: queue_enqueue: New tail offset: %s", head.tail.to_str().c_str()); + } //end - for + + return 0; +} + +int queue_list_entries(cls_method_context_t hctx, const cls_queue_list_op& op, cls_queue_list_ret& op_ret, cls_queue_head& head) +{ + // If queue is empty, return from here + if ((head.front.offset == head.tail.offset) && (head.front.gen == head.tail.gen)) { + CLS_LOG(20, "INFO: queue_list_entries(): Next offset is %s", head.front.to_str().c_str()); + op_ret.next_marker = head.front.to_str(); + op_ret.is_truncated = false; + return 0; + } + + cls_queue_marker start_marker; + start_marker.from_str(op.start_marker.c_str()); + cls_queue_marker next_marker = {0, 0}; + + uint64_t start_offset = 0, gen = 0; + if (start_marker.offset == 0) { + start_offset = head.front.offset; + gen = head.front.gen; + } else { + start_offset = start_marker.offset; + gen = start_marker.gen; + } + + op_ret.is_truncated = true; + uint64_t contiguous_data_size = 0, size_to_read = 0; + bool wrap_around = false; + + //Calculate length of contiguous data to be read depending on front, tail and start offset + if (head.tail.offset > head.front.offset) { + contiguous_data_size = head.tail.offset - start_offset; + } else if (head.front.offset >= head.tail.offset) { + if (start_offset >= head.front.offset) { + contiguous_data_size = head.queue_size - start_offset; + wrap_around = true; + } else if (start_offset <= head.tail.offset) { + contiguous_data_size = head.tail.offset - start_offset; + } + } + + CLS_LOG(10, "INFO: queue_list_entries(): front is: %s, tail is %s", head.front.to_str().c_str(), head.tail.to_str().c_str()); + + bool offset_populated = false, entry_start_processed = false; + uint64_t data_size = 0, num_ops = 0; + uint16_t entry_start = 0; + bufferlist bl; + string last_marker; + do + { + CLS_LOG(10, "INFO: queue_list_entries(): start_offset is %lu", start_offset); + + bufferlist bl_chunk; + //Read chunk size at a time, if it is less than contiguous data size, else read contiguous data size + size_to_read = std::min(contiguous_data_size, large_chunk_size); + CLS_LOG(10, "INFO: queue_list_entries(): size_to_read is %lu", size_to_read); + if (size_to_read == 0) { + next_marker = head.tail; + op_ret.is_truncated = false; + CLS_LOG(20, "INFO: queue_list_entries(): size_to_read is 0, hence breaking out!\n"); + break; + } + + auto ret = cls_cxx_read(hctx, start_offset, size_to_read, &bl_chunk); + if (ret < 0) { + return ret; + } + + //If there is leftover data from previous iteration, append new data to leftover data + uint64_t entry_start_offset = start_offset - bl.length(); + CLS_LOG(20, "INFO: queue_list_entries(): Entry start offset accounting for leftover data is %lu", entry_start_offset); + bl.claim_append(bl_chunk); + bl_chunk = std::move(bl); + + CLS_LOG(20, "INFO: queue_list_entries(): size of chunk %u", bl_chunk.length()); + + //Process the chunk of data read + unsigned index = 0; + auto it = bl_chunk.cbegin(); + uint64_t size_to_process = bl_chunk.length(); + do { + CLS_LOG(10, "INFO: queue_list_entries(): index: %u, size_to_process: %lu", index, size_to_process); + cls_queue_entry entry; + ceph_assert(it.get_off() == index); + //Use the last marker saved in previous iteration as the marker for this entry + if (offset_populated) { + entry.marker = last_marker; + } + //Populate offset if not done in previous iteration + if (! offset_populated) { + cls_queue_marker marker = {entry_start_offset + index, gen}; + CLS_LOG(5, "INFO: queue_list_entries(): offset: %s\n", marker.to_str().c_str()); + entry.marker = marker.to_str(); + } + // Magic number + Data size - process if not done in previous iteration + if (! entry_start_processed ) { + if (size_to_process >= QUEUE_ENTRY_OVERHEAD) { + // Decode magic number at start + try { + decode(entry_start, it); + } catch (const ceph::buffer::error& err) { + CLS_LOG(10, "ERROR: queue_list_entries: failed to decode entry start: %s", err.what()); + return -EINVAL; + } + if (entry_start != QUEUE_ENTRY_START) { + CLS_LOG(5, "ERROR: queue_list_entries: invalid entry start %u", entry_start); + return -EINVAL; + } + index += sizeof(uint16_t); + size_to_process -= sizeof(uint16_t); + // Decode data size + try { + decode(data_size, it); + } catch (const ceph::buffer::error& err) { + CLS_LOG(10, "ERROR: queue_list_entries: failed to decode data size: %s", err.what()); + return -EINVAL; + } + } else { + // Copy unprocessed data to bl + bl_chunk.splice(index, size_to_process, &bl); + offset_populated = true; + last_marker = entry.marker; + CLS_LOG(10, "INFO: queue_list_entries: not enough data to read entry start and data size, breaking out!"); + break; + } + CLS_LOG(20, "INFO: queue_list_entries(): data size: %lu", data_size); + index += sizeof(uint64_t); + size_to_process -= sizeof(uint64_t); + } + // Data + if (data_size <= size_to_process) { + it.copy(data_size, entry.data); + index += entry.data.length(); + size_to_process -= entry.data.length(); + } else { + it.copy(size_to_process, bl); + offset_populated = true; + entry_start_processed = true; + last_marker = entry.marker; + CLS_LOG(10, "INFO: queue_list_entries(): not enough data to read data, breaking out!"); + break; + } + op_ret.entries.emplace_back(entry); + // Resetting some values + offset_populated = false; + entry_start_processed = false; + data_size = 0; + entry_start = 0; + num_ops++; + last_marker.clear(); + if (num_ops == op.max) { + CLS_LOG(10, "INFO: queue_list_entries(): num_ops is same as op.max, hence breaking out from inner loop!"); + break; + } + } while(index < bl_chunk.length()); + + CLS_LOG(10, "INFO: num_ops: %lu and op.max is %lu\n", num_ops, op.max); + + if (num_ops == op.max) { + next_marker = cls_queue_marker{(entry_start_offset + index), gen}; + CLS_LOG(10, "INFO: queue_list_entries(): num_ops is same as op.max, hence breaking out from outer loop with next offset: %lu", next_marker.offset); + break; + } + + //Calculate new start_offset and contiguous data size + start_offset += size_to_read; + contiguous_data_size -= size_to_read; + if (contiguous_data_size == 0) { + if (wrap_around) { + start_offset = head.max_head_size; + contiguous_data_size = head.tail.offset - head.max_head_size; + gen += 1; + wrap_around = false; + } else { + CLS_LOG(10, "INFO: queue_list_entries(): end of queue data is reached, hence breaking out from outer loop!"); + next_marker = head.tail; + op_ret.is_truncated = false; + break; + } + } + + } while(num_ops < op.max); + + //Wrap around next offset if it has reached end of queue + if (next_marker.offset == head.queue_size) { + next_marker.offset = head.max_head_size; + next_marker.gen += 1; + } + if ((next_marker.offset == head.tail.offset) && (next_marker.gen == head.tail.gen)) { + op_ret.is_truncated = false; + } + + CLS_LOG(5, "INFO: queue_list_entries(): next offset: %s", next_marker.to_str().c_str()); + op_ret.next_marker = next_marker.to_str(); + + return 0; +} + +int queue_remove_entries(cls_method_context_t hctx, const cls_queue_remove_op& op, cls_queue_head& head) +{ + //Queue is empty + if ((head.front.offset == head.tail.offset) && (head.front.gen == head.tail.gen)) { + return 0; + } + + cls_queue_marker end_marker; + end_marker.from_str(op.end_marker.c_str()); + + CLS_LOG(5, "INFO: queue_remove_entries: op.end_marker = %s", end_marker.to_str().c_str()); + + //Zero out the entries that have been removed, to reclaim storage space + if (end_marker.offset > head.front.offset && end_marker.gen == head.front.gen) { + uint64_t len = end_marker.offset - head.front.offset; + if (len > 0) { + auto ret = cls_cxx_write_zero(hctx, head.front.offset, len); + if (ret < 0) { + CLS_LOG(5, "INFO: queue_remove_entries: Failed to zero out entries"); + CLS_LOG(10, "INFO: queue_remove_entries: Start offset = %s", head.front.to_str().c_str()); + return ret; + } + } + } else if ((head.front.offset >= end_marker.offset) && (end_marker.gen == head.front.gen + 1)) { //start offset > end offset + uint64_t len = head.queue_size - head.front.offset; + if (len > 0) { + auto ret = cls_cxx_write_zero(hctx, head.front.offset, len); + if (ret < 0) { + CLS_LOG(5, "INFO: queue_remove_entries: Failed to zero out entries"); + CLS_LOG(10, "INFO: queue_remove_entries: Start offset = %s", head.front.to_str().c_str()); + return ret; + } + } + len = end_marker.offset - head.max_head_size; + if (len > 0) { + auto ret = cls_cxx_write_zero(hctx, head.max_head_size, len); + if (ret < 0) { + CLS_LOG(5, "INFO: queue_remove_entries: Failed to zero out entries"); + CLS_LOG(10, "INFO: queue_remove_entries: Start offset = %lu", head.max_head_size); + return ret; + } + } + } else if ((head.front.offset == end_marker.offset) && (head.front.gen == end_marker.gen)) { + //no-op + } else { + CLS_LOG(0, "INFO: queue_remove_entries: Invalid end marker: offset = %s, gen = %lu", end_marker.to_str().c_str(), end_marker.gen); + return -EINVAL; + } + + head.front = end_marker; + + // Check if it is the end, then wrap around + if (head.front.offset == head.queue_size) { + head.front.offset = head.max_head_size; + head.front.gen += 1; + } + + CLS_LOG(20, "INFO: queue_remove_entries: front offset is: %s and tail offset is %s", head.front.to_str().c_str(), head.tail.to_str().c_str()); + + return 0; +} diff --git a/src/cls/queue/cls_queue_src.h b/src/cls/queue/cls_queue_src.h new file mode 100644 index 000000000..9970b98ea --- /dev/null +++ b/src/cls/queue/cls_queue_src.h @@ -0,0 +1,16 @@ +#ifndef CEPH_CLS_QUEUE_SRC_H +#define CEPH_CLS_QUEUE_SRC_H + +#include "objclass/objclass.h" +#include "cls/queue/cls_queue_types.h" +#include "cls/queue/cls_queue_ops.h" + +int queue_write_head(cls_method_context_t hctx, cls_queue_head& head); +int queue_read_head(cls_method_context_t hctx, cls_queue_head& head); +int queue_init(cls_method_context_t hctx, const cls_queue_init_op& op); +int queue_get_capacity(cls_method_context_t hctx, cls_queue_get_capacity_ret& op_ret); +int queue_enqueue(cls_method_context_t hctx, cls_queue_enqueue_op& op, cls_queue_head& head); +int queue_list_entries(cls_method_context_t hctx, const cls_queue_list_op& op, cls_queue_list_ret& op_ret, cls_queue_head& head); +int queue_remove_entries(cls_method_context_t hctx, const cls_queue_remove_op& op, cls_queue_head& head); + +#endif /* CEPH_CLS_QUEUE_SRC_H */ diff --git a/src/cls/queue/cls_queue_types.h b/src/cls/queue/cls_queue_types.h new file mode 100644 index 000000000..cc46df405 --- /dev/null +++ b/src/cls/queue/cls_queue_types.h @@ -0,0 +1,120 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab + +#ifndef CEPH_CLS_QUEUE_TYPES_H +#define CEPH_CLS_QUEUE_TYPES_H + +#include <errno.h> +#include "include/types.h" + +//Size of head leaving out urgent data +#define QUEUE_HEAD_SIZE_1K 1024 + +#define QUEUE_START_OFFSET_1K QUEUE_HEAD_SIZE_1K + +constexpr unsigned int QUEUE_HEAD_START = 0xDEAD; +constexpr unsigned int QUEUE_ENTRY_START = 0xBEEF; +constexpr unsigned int QUEUE_ENTRY_OVERHEAD = sizeof(uint16_t) + sizeof(uint64_t); + +struct cls_queue_entry +{ + ceph::buffer::list data; + std::string marker; + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(1, 1, bl); + encode(data, bl); + encode(marker, bl); + ENCODE_FINISH(bl); + } + + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(1, bl); + decode(data, bl); + decode(marker, bl); + DECODE_FINISH(bl); + } +}; +WRITE_CLASS_ENCODER(cls_queue_entry) + +struct cls_queue_marker +{ + uint64_t offset{0}; + uint64_t gen{0}; + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(1, 1, bl); + encode(gen, bl); + encode(offset, bl); + ENCODE_FINISH(bl); + } + + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(1, bl); + decode(gen, bl); + decode(offset, bl); + DECODE_FINISH(bl); + } + + std::string to_str() { + return std::to_string(gen) + '/' + std::to_string(offset); + } + + int from_str(const char* str) { + errno = 0; + char* end = nullptr; + gen = ::strtoull(str, &end, 10); + if (errno) { + return errno; + } + if (str == end || *end != '/') { // expects delimiter + return -EINVAL; + } + str = end + 1; + offset = ::strtoull(str, &end, 10); + if (errno) { + return errno; + } + if (str == end || *end != 0) { // expects null terminator + return -EINVAL; + } + return 0; + } + +}; +WRITE_CLASS_ENCODER(cls_queue_marker) + +struct cls_queue_head +{ + uint64_t max_head_size = QUEUE_HEAD_SIZE_1K; + cls_queue_marker front{QUEUE_START_OFFSET_1K, 0}; + cls_queue_marker tail{QUEUE_START_OFFSET_1K, 0}; + uint64_t queue_size{0}; // size of queue requested by user, with head size added to it + uint64_t max_urgent_data_size{0}; + ceph::buffer::list bl_urgent_data; // special data known to application using queue + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(1, 1, bl); + encode(max_head_size, bl); + encode(front, bl); + encode(tail, bl); + encode(queue_size, bl); + encode(max_urgent_data_size, bl); + encode(bl_urgent_data, bl); + ENCODE_FINISH(bl); + } + + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(1, bl); + decode(max_head_size, bl); + decode(front, bl); + decode(tail, bl); + decode(queue_size, bl); + decode(max_urgent_data_size, bl); + decode(bl_urgent_data, bl); + DECODE_FINISH(bl); + } +}; +WRITE_CLASS_ENCODER(cls_queue_head) + +#endif diff --git a/src/cls/rbd/cls_rbd.cc b/src/cls/rbd/cls_rbd.cc new file mode 100644 index 000000000..229f85d49 --- /dev/null +++ b/src/cls/rbd/cls_rbd.cc @@ -0,0 +1,8627 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab + +/** \file + * + * This is an OSD class that implements methods for + * use with rbd. + * + * Most of these deal with the rbd header object. Methods prefixed + * with old_ deal with the original rbd design, in which clients read + * and interpreted the header object directly. + * + * The new format is meant to be opaque to clients - all their + * interactions with non-data objects should go through this + * class. The OSD class interface leaves the class to implement its + * own argument and payload serialization/deserialization, so for ease + * of implementation we use the existing ceph encoding/decoding + * methods. Something like json might be preferable, but the rbd + * kernel module has to be able to understand format as well. The + * datatypes exposed to the clients are strings, unsigned integers, + * and vectors of those types. The on-wire format can be found in + * src/include/encoding.h. + * + * The methods for interacting with the new format document their + * parameters as the client sees them - it would be silly to mention + * in each one that they take an input and an output bufferlist. + */ +#include "include/types.h" + +#include <algorithm> +#include <errno.h> +#include <sstream> + +#include "include/uuid.h" +#include "common/bit_vector.hpp" +#include "common/errno.h" +#include "objclass/objclass.h" +#include "osd/osd_types.h" +#include "include/rbd_types.h" +#include "include/rbd/object_map_types.h" + +#include "cls/rbd/cls_rbd.h" +#include "cls/rbd/cls_rbd_types.h" + +#include <boost/algorithm/string/predicate.hpp> + +using std::istringstream; +using std::ostringstream; +using std::map; +using std::set; +using std::string; +using std::vector; + +using ceph::BitVector; +using ceph::bufferlist; +using ceph::bufferptr; +using ceph::encode; +using ceph::decode; + +/* + * Object keys: + * + * <partial list> + * + * stripe_unit: size in bytes of the stripe unit. if not present, + * the stripe unit is assumed to match the object size (1 << order). + * + * stripe_count: number of objects to stripe over before looping back. + * if not present or 1, striping is disabled. this is the default. + * + */ + +CLS_VER(2,0) +CLS_NAME(rbd) + +#define RBD_MAX_KEYS_READ 64 +#define RBD_SNAP_KEY_PREFIX "snapshot_" +#define RBD_SNAP_CHILDREN_KEY_PREFIX "snap_children_" +#define RBD_DIR_ID_KEY_PREFIX "id_" +#define RBD_DIR_NAME_KEY_PREFIX "name_" +#define RBD_METADATA_KEY_PREFIX "metadata_" + +namespace { + +uint64_t get_encode_features(cls_method_context_t hctx) { + uint64_t features = 0; + ceph_release_t require_osd_release = cls_get_required_osd_release(hctx); + if (require_osd_release >= ceph_release_t::nautilus) { + features |= CEPH_FEATURE_SERVER_NAUTILUS; + } + return features; +} + +bool calc_sparse_extent(const bufferptr &bp, size_t sparse_size, + uint64_t length, size_t *write_offset, + size_t *write_length, size_t *offset) { + size_t extent_size; + if (*offset + sparse_size > length) { + extent_size = length - *offset; + } else { + extent_size = sparse_size; + } + + bufferptr extent(bp, *offset, extent_size); + *offset += extent_size; + + bool extent_is_zero = extent.is_zero(); + if (!extent_is_zero) { + *write_length += extent_size; + } + if (extent_is_zero && *write_length == 0) { + *write_offset += extent_size; + } + + if ((extent_is_zero || *offset == length) && *write_length != 0) { + return true; + } + return false; +} + +} // anonymous namespace + +static int snap_read_header(cls_method_context_t hctx, bufferlist& bl) +{ + unsigned snap_count = 0; + uint64_t snap_names_len = 0; + struct rbd_obj_header_ondisk *header; + + CLS_LOG(20, "snapshots_list"); + + while (1) { + int len = sizeof(*header) + + snap_count * sizeof(struct rbd_obj_snap_ondisk) + + snap_names_len; + + int rc = cls_cxx_read(hctx, 0, len, &bl); + if (rc < 0) + return rc; + + if (bl.length() < sizeof(*header)) + return -EINVAL; + + header = (struct rbd_obj_header_ondisk *)bl.c_str(); + ceph_assert(header); + + if ((snap_count != header->snap_count) || + (snap_names_len != header->snap_names_len)) { + snap_count = header->snap_count; + snap_names_len = header->snap_names_len; + bl.clear(); + continue; + } + break; + } + + return 0; +} + +static void key_from_snap_id(snapid_t snap_id, string *out) +{ + ostringstream oss; + oss << RBD_SNAP_KEY_PREFIX + << std::setw(16) << std::setfill('0') << std::hex << snap_id; + *out = oss.str(); +} + +static snapid_t snap_id_from_key(const string &key) { + istringstream iss(key); + uint64_t id; + iss.ignore(strlen(RBD_SNAP_KEY_PREFIX)) >> std::hex >> id; + return id; +} + +template<typename T> +static int read_key(cls_method_context_t hctx, const string &key, T *out) +{ + bufferlist bl; + int r = cls_cxx_map_get_val(hctx, key, &bl); + if (r < 0) { + if (r != -ENOENT) { + CLS_ERR("error reading omap key %s: %s", key.c_str(), cpp_strerror(r).c_str()); + } + return r; + } + + try { + auto it = bl.cbegin(); + decode(*out, it); + } catch (const ceph::buffer::error &err) { + CLS_ERR("error decoding %s", key.c_str()); + return -EIO; + } + + return 0; +} + +template <typename T> +static int write_key(cls_method_context_t hctx, const string &key, const T &t) { + bufferlist bl; + encode(t, bl); + + int r = cls_cxx_map_set_val(hctx, key, &bl); + if (r < 0) { + CLS_ERR("failed to set omap key: %s", key.c_str()); + return r; + } + return 0; +} + +template <typename T> +static int write_key(cls_method_context_t hctx, const string &key, const T &t, + uint64_t features) { + bufferlist bl; + encode(t, bl, features); + + int r = cls_cxx_map_set_val(hctx, key, &bl); + if (r < 0) { + CLS_ERR("failed to set omap key: %s", key.c_str()); + return r; + } + return 0; +} + +static int remove_key(cls_method_context_t hctx, const string &key) { + int r = cls_cxx_map_remove_key(hctx, key); + if (r < 0 && r != -ENOENT) { + CLS_ERR("failed to remove key: %s", key.c_str()); + return r; + } + return 0; +} + +static bool is_valid_id(const string &id) { + if (!id.size()) + return false; + for (size_t i = 0; i < id.size(); ++i) { + if (!isalnum(id[i])) { + return false; + } + } + return true; +} + +/** + * verify that the header object exists + * + * @return 0 if the object exists, -ENOENT if it does not, or other error + */ +static int check_exists(cls_method_context_t hctx) +{ + uint64_t size; + time_t mtime; + return cls_cxx_stat(hctx, &size, &mtime); +} + +namespace image { + +/** + * check that given feature(s) are set + * + * @param hctx context + * @param need features needed + * @return 0 if features are set, negative error (like ENOEXEC) otherwise + */ +int require_feature(cls_method_context_t hctx, uint64_t need) +{ + uint64_t features; + int r = read_key(hctx, "features", &features); + if (r == -ENOENT) // this implies it's an old-style image with no features + return -ENOEXEC; + if (r < 0) + return r; + if ((features & need) != need) { + CLS_LOG(10, "require_feature missing feature %llx, have %llx", + (unsigned long long)need, (unsigned long long)features); + return -ENOEXEC; + } + return 0; +} + +std::string snap_children_key_from_snap_id(snapid_t snap_id) +{ + ostringstream oss; + oss << RBD_SNAP_CHILDREN_KEY_PREFIX + << std::setw(16) << std::setfill('0') << std::hex << snap_id; + return oss.str(); +} + +int set_op_features(cls_method_context_t hctx, uint64_t op_features, + uint64_t mask) { + uint64_t orig_features; + int r = read_key(hctx, "features", &orig_features); + if (r < 0) { + CLS_ERR("failed to read features off disk: %s", cpp_strerror(r).c_str()); + return r; + } + + uint64_t orig_op_features = 0; + r = read_key(hctx, "op_features", &orig_op_features); + if (r < 0 && r != -ENOENT) { + CLS_ERR("Could not read op features off disk: %s", cpp_strerror(r).c_str()); + return r; + } + + op_features = (orig_op_features & ~mask) | (op_features & mask); + CLS_LOG(10, "op_features=%" PRIu64 " orig_op_features=%" PRIu64, + op_features, orig_op_features); + if (op_features == orig_op_features) { + return 0; + } + + uint64_t features = orig_features; + if (op_features == 0ULL) { + features &= ~RBD_FEATURE_OPERATIONS; + + r = cls_cxx_map_remove_key(hctx, "op_features"); + if (r == -ENOENT) { + r = 0; + } + } else { + features |= RBD_FEATURE_OPERATIONS; + + bufferlist bl; + encode(op_features, bl); + r = cls_cxx_map_set_val(hctx, "op_features", &bl); + } + + if (r < 0) { + CLS_ERR("error updating op features: %s", cpp_strerror(r).c_str()); + return r; + } + + if (features != orig_features) { + bufferlist bl; + encode(features, bl); + r = cls_cxx_map_set_val(hctx, "features", &bl); + if (r < 0) { + CLS_ERR("error updating features: %s", cpp_strerror(r).c_str()); + return r; + } + } + + return 0; +} + +int set_migration(cls_method_context_t hctx, + const cls::rbd::MigrationSpec &migration_spec, bool init) { + if (init) { + bufferlist bl; + int r = cls_cxx_map_get_val(hctx, "migration", &bl); + if (r != -ENOENT) { + if (r == 0) { + CLS_LOG(10, "migration already set"); + return -EEXIST; + } + CLS_ERR("failed to read migration off disk: %s", cpp_strerror(r).c_str()); + return r; + } + + uint64_t features = 0; + r = read_key(hctx, "features", &features); + if (r == -ENOENT) { + CLS_LOG(20, "no features, assuming v1 format"); + bufferlist header; + r = cls_cxx_read(hctx, 0, sizeof(RBD_HEADER_TEXT), &header); + if (r < 0) { + CLS_ERR("failed to read v1 header: %s", cpp_strerror(r).c_str()); + return r; + } + if (header.length() != sizeof(RBD_HEADER_TEXT)) { + CLS_ERR("unrecognized v1 header format"); + return -ENXIO; + } + if (memcmp(RBD_HEADER_TEXT, header.c_str(), header.length()) != 0) { + if (memcmp(RBD_MIGRATE_HEADER_TEXT, header.c_str(), + header.length()) == 0) { + CLS_LOG(10, "migration already set"); + return -EEXIST; + } else { + CLS_ERR("unrecognized v1 header format"); + return -ENXIO; + } + } + if (migration_spec.header_type != cls::rbd::MIGRATION_HEADER_TYPE_SRC) { + CLS_LOG(10, "v1 format image can only be migration source"); + return -EINVAL; + } + + header.clear(); + header.append(RBD_MIGRATE_HEADER_TEXT); + r = cls_cxx_write(hctx, 0, header.length(), &header); + if (r < 0) { + CLS_ERR("error updating v1 header: %s", cpp_strerror(r).c_str()); + return r; + } + } else if (r < 0) { + CLS_ERR("failed to read features off disk: %s", cpp_strerror(r).c_str()); + return r; + } else if ((features & RBD_FEATURE_MIGRATING) != 0ULL) { + if (migration_spec.header_type != cls::rbd::MIGRATION_HEADER_TYPE_DST) { + CLS_LOG(10, "migrating feature already set"); + return -EEXIST; + } + } else { + features |= RBD_FEATURE_MIGRATING; + bl.clear(); + encode(features, bl); + r = cls_cxx_map_set_val(hctx, "features", &bl); + if (r < 0) { + CLS_ERR("error updating features: %s", cpp_strerror(r).c_str()); + return r; + } + } + } + + bufferlist bl; + encode(migration_spec, bl); + int r = cls_cxx_map_set_val(hctx, "migration", &bl); + if (r < 0) { + CLS_ERR("error setting migration: %s", cpp_strerror(r).c_str()); + return r; + } + + return 0; +} + +int read_migration(cls_method_context_t hctx, + cls::rbd::MigrationSpec *migration_spec) { + uint64_t features = 0; + int r = read_key(hctx, "features", &features); + if (r == -ENOENT) { + CLS_LOG(20, "no features, assuming v1 format"); + bufferlist header; + r = cls_cxx_read(hctx, 0, sizeof(RBD_HEADER_TEXT), &header); + if (r < 0) { + CLS_ERR("failed to read v1 header: %s", cpp_strerror(r).c_str()); + return r; + } + if (header.length() != sizeof(RBD_HEADER_TEXT)) { + CLS_ERR("unrecognized v1 header format"); + return -ENXIO; + } + if (memcmp(RBD_MIGRATE_HEADER_TEXT, header.c_str(), header.length()) != 0) { + if (memcmp(RBD_HEADER_TEXT, header.c_str(), header.length()) == 0) { + CLS_LOG(10, "migration feature not set"); + return -EINVAL; + } else { + CLS_ERR("unrecognized v1 header format"); + return -ENXIO; + } + } + if (migration_spec->header_type != cls::rbd::MIGRATION_HEADER_TYPE_SRC) { + CLS_LOG(10, "v1 format image can only be migration source"); + return -EINVAL; + } + } else if (r < 0) { + CLS_ERR("failed to read features off disk: %s", cpp_strerror(r).c_str()); + return r; + } else if ((features & RBD_FEATURE_MIGRATING) == 0ULL) { + CLS_LOG(10, "migration feature not set"); + return -EINVAL; + } + + r = read_key(hctx, "migration", migration_spec); + if (r < 0) { + CLS_ERR("failed to read migration off disk: %s", cpp_strerror(r).c_str()); + return r; + } + + return 0; +} + +int remove_migration(cls_method_context_t hctx) { + int r = remove_key(hctx, "migration"); + if (r < 0) { + return r; + } + + uint64_t features = 0; + r = read_key(hctx, "features", &features); + if (r == -ENOENT) { + CLS_LOG(20, "no features, assuming v1 format"); + bufferlist header; + r = cls_cxx_read(hctx, 0, sizeof(RBD_MIGRATE_HEADER_TEXT), &header); + if (header.length() != sizeof(RBD_MIGRATE_HEADER_TEXT)) { + CLS_ERR("unrecognized v1 header format"); + return -ENXIO; + } + if (memcmp(RBD_MIGRATE_HEADER_TEXT, header.c_str(), header.length()) != 0) { + if (memcmp(RBD_HEADER_TEXT, header.c_str(), header.length()) == 0) { + CLS_LOG(10, "migration feature not set"); + return -EINVAL; + } else { + CLS_ERR("unrecognized v1 header format"); + return -ENXIO; + } + } + header.clear(); + header.append(RBD_HEADER_TEXT); + r = cls_cxx_write(hctx, 0, header.length(), &header); + if (r < 0) { + CLS_ERR("error updating v1 header: %s", cpp_strerror(r).c_str()); + return r; + } + } else if (r < 0) { + CLS_ERR("failed to read features off disk: %s", cpp_strerror(r).c_str()); + return r; + } else if ((features & RBD_FEATURE_MIGRATING) == 0ULL) { + CLS_LOG(10, "migrating feature not set"); + } else { + features &= ~RBD_FEATURE_MIGRATING; + bufferlist bl; + encode(features, bl); + r = cls_cxx_map_set_val(hctx, "features", &bl); + if (r < 0) { + CLS_ERR("error updating features: %s", cpp_strerror(r).c_str()); + return r; + } + } + + return 0; +} + +namespace snapshot { + +template<typename L> +int iterate(cls_method_context_t hctx, L& lambda) { + int max_read = RBD_MAX_KEYS_READ; + string last_read = RBD_SNAP_KEY_PREFIX; + bool more = false; + do { + map<string, bufferlist> vals; + int r = cls_cxx_map_get_vals(hctx, last_read, RBD_SNAP_KEY_PREFIX, + max_read, &vals, &more); + if (r < 0) { + return r; + } + + cls_rbd_snap snap_meta; + for (auto& val : vals) { + auto iter = val.second.cbegin(); + try { + decode(snap_meta, iter); + } catch (const ceph::buffer::error &err) { + CLS_ERR("error decoding snapshot metadata for snap : %s", + val.first.c_str()); + return -EIO; + } + + r = lambda(snap_meta); + if (r < 0) { + return r; + } + } + + if (!vals.empty()) { + last_read = vals.rbegin()->first; + } + } while (more); + + return 0; +} + +int write(cls_method_context_t hctx, const std::string& snap_key, + cls_rbd_snap&& snap) { + int r; + uint64_t encode_features = get_encode_features(hctx); + if (snap.migrate_parent_format(encode_features)) { + // ensure the normalized parent link exists before removing it from the + // snapshot record + cls_rbd_parent on_disk_parent; + r = read_key(hctx, "parent", &on_disk_parent); + if (r < 0 && r != -ENOENT) { + return r; + } + + if (!on_disk_parent.exists()) { + on_disk_parent = snap.parent; + on_disk_parent.head_overlap = std::nullopt; + + r = write_key(hctx, "parent", on_disk_parent, encode_features); + if (r < 0) { + return r; + } + } + + // only store the parent overlap in the snapshot + snap.parent_overlap = snap.parent.head_overlap; + snap.parent = {}; + } + + r = write_key(hctx, snap_key, snap, encode_features); + if (r < 0) { + return r; + } + return 0; +} + +} // namespace snapshot + +namespace parent { + +int attach(cls_method_context_t hctx, cls_rbd_parent parent, + bool reattach) { + int r = check_exists(hctx); + if (r < 0) { + CLS_LOG(20, "cls_rbd::image::parent::attach: child doesn't exist"); + return r; + } + + r = image::require_feature(hctx, RBD_FEATURE_LAYERING); + if (r < 0) { + CLS_LOG(20, "cls_rbd::image::parent::attach: child does not support " + "layering"); + return r; + } + + CLS_LOG(20, "cls_rbd::image::parent::attach: pool=%" PRIi64 ", ns=%s, id=%s, " + "snapid=%" PRIu64 ", size=%" PRIu64, + parent.pool_id, parent.pool_namespace.c_str(), + parent.image_id.c_str(), parent.snap_id.val, + parent.head_overlap.value_or(0ULL)); + if (!parent.exists() || parent.head_overlap.value_or(0ULL) == 0ULL) { + return -EINVAL; + } + + // make sure there isn't already a parent + cls_rbd_parent on_disk_parent; + r = read_key(hctx, "parent", &on_disk_parent); + if (r < 0 && r != -ENOENT) { + return r; + } + + auto on_disk_parent_without_overlap{on_disk_parent}; + on_disk_parent_without_overlap.head_overlap = parent.head_overlap; + + if (r == 0 && + (on_disk_parent.head_overlap || + on_disk_parent_without_overlap != parent) && + !reattach) { + CLS_LOG(20, "cls_rbd::parent::attach: existing legacy parent " + "pool=%" PRIi64 ", ns=%s, id=%s, snapid=%" PRIu64 ", " + "overlap=%" PRIu64, + on_disk_parent.pool_id, on_disk_parent.pool_namespace.c_str(), + on_disk_parent.image_id.c_str(), on_disk_parent.snap_id.val, + on_disk_parent.head_overlap.value_or(0ULL)); + return -EEXIST; + } + + // our overlap is the min of our size and the parent's size. + uint64_t our_size; + r = read_key(hctx, "size", &our_size); + if (r < 0) { + return r; + } + + parent.head_overlap = std::min(*parent.head_overlap, our_size); + + r = write_key(hctx, "parent", parent, get_encode_features(hctx)); + if (r < 0) { + return r; + } + + return 0; +} + +int detach(cls_method_context_t hctx, bool legacy_api) { + int r = check_exists(hctx); + if (r < 0) { + CLS_LOG(20, "cls_rbd::parent::detach: child doesn't exist"); + return r; + } + + uint64_t features; + r = read_key(hctx, "features", &features); + if (r == -ENOENT || ((features & RBD_FEATURE_LAYERING) == 0)) { + CLS_LOG(20, "cls_rbd::image::parent::detach: child does not support " + "layering"); + return -ENOEXEC; + } else if (r < 0) { + return r; + } + + cls_rbd_parent on_disk_parent; + r = read_key(hctx, "parent", &on_disk_parent); + if (r < 0) { + return r; + } else if (legacy_api && !on_disk_parent.pool_namespace.empty()) { + return -EXDEV; + } else if (!on_disk_parent.head_overlap) { + return -ENOENT; + } + + auto detach_lambda = [hctx, features](const cls_rbd_snap& snap_meta) { + if (snap_meta.parent.pool_id != -1 || snap_meta.parent_overlap) { + if ((features & RBD_FEATURE_DEEP_FLATTEN) != 0ULL) { + // remove parent reference from snapshot + cls_rbd_snap snap_meta_copy = snap_meta; + snap_meta_copy.parent = {}; + snap_meta_copy.parent_overlap = std::nullopt; + + std::string snap_key; + key_from_snap_id(snap_meta_copy.id, &snap_key); + int r = snapshot::write(hctx, snap_key, std::move(snap_meta_copy)); + if (r < 0) { + return r; + } + } else { + return -EEXIST; + } + } + return 0; + }; + + r = snapshot::iterate(hctx, detach_lambda); + bool has_child_snaps = (r == -EEXIST); + if (r < 0 && r != -EEXIST) { + return r; + } + + ceph_release_t require_osd_release = cls_get_required_osd_release(hctx); + if (has_child_snaps && require_osd_release >= ceph_release_t::nautilus) { + // remove overlap from HEAD revision but keep spec for snapshots + on_disk_parent.head_overlap = std::nullopt; + r = write_key(hctx, "parent", on_disk_parent, get_encode_features(hctx)); + if (r < 0) { + return r; + } + } else { + r = remove_key(hctx, "parent"); + if (r < 0 && r != -ENOENT) { + return r; + } + } + + if (!has_child_snaps) { + // disable clone child op feature if no longer associated + r = set_op_features(hctx, 0, RBD_OPERATION_FEATURE_CLONE_CHILD); + if (r < 0) { + return r; + } + } + return 0; +} + +} // namespace parent +} // namespace image + +/** + * Initialize the header with basic metadata. + * Extra features may initialize more fields in the future. + * Everything is stored as key/value pairs as omaps in the header object. + * + * If features the OSD does not understand are requested, -ENOSYS is + * returned. + * + * Input: + * @param size number of bytes in the image (uint64_t) + * @param order bits to shift to determine the size of data objects (uint8_t) + * @param features what optional things this image will use (uint64_t) + * @param object_prefix a prefix for all the data objects + * @param data_pool_id pool id where data objects is stored (int64_t) + * + * Output: + * @return 0 on success, negative error code on failure + */ +int create(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + string object_prefix; + uint64_t features, size; + uint8_t order; + int64_t data_pool_id = -1; + + try { + auto iter = in->cbegin(); + decode(size, iter); + decode(order, iter); + decode(features, iter); + decode(object_prefix, iter); + if (!iter.end()) { + decode(data_pool_id, iter); + } + } catch (const ceph::buffer::error &err) { + return -EINVAL; + } + + CLS_LOG(20, "create object_prefix=%s size=%llu order=%u features=%llu", + object_prefix.c_str(), (unsigned long long)size, order, + (unsigned long long)features); + + if (features & ~RBD_FEATURES_ALL) { + return -ENOSYS; + } + + if (!object_prefix.size()) { + return -EINVAL; + } + + bufferlist stored_prefixbl; + int r = cls_cxx_map_get_val(hctx, "object_prefix", &stored_prefixbl); + if (r != -ENOENT) { + CLS_ERR("reading object_prefix returned %d", r); + return -EEXIST; + } + + bufferlist sizebl; + bufferlist orderbl; + bufferlist featuresbl; + bufferlist object_prefixbl; + bufferlist snap_seqbl; + bufferlist timestampbl; + uint64_t snap_seq = 0; + utime_t timestamp = ceph_clock_now(); + encode(size, sizebl); + encode(order, orderbl); + encode(features, featuresbl); + encode(object_prefix, object_prefixbl); + encode(snap_seq, snap_seqbl); + encode(timestamp, timestampbl); + + map<string, bufferlist> omap_vals; + omap_vals["size"] = sizebl; + omap_vals["order"] = orderbl; + omap_vals["features"] = featuresbl; + omap_vals["object_prefix"] = object_prefixbl; + omap_vals["snap_seq"] = snap_seqbl; + omap_vals["create_timestamp"] = timestampbl; + omap_vals["access_timestamp"] = timestampbl; + omap_vals["modify_timestamp"] = timestampbl; + + if ((features & RBD_FEATURE_OPERATIONS) != 0ULL) { + CLS_ERR("Attempting to set internal feature: operations"); + return -EINVAL; + } + + if (features & RBD_FEATURE_DATA_POOL) { + if (data_pool_id == -1) { + CLS_ERR("data pool not provided with feature enabled"); + return -EINVAL; + } + + bufferlist data_pool_id_bl; + encode(data_pool_id, data_pool_id_bl); + omap_vals["data_pool_id"] = data_pool_id_bl; + } else if (data_pool_id != -1) { + CLS_ERR("data pool provided with feature disabled"); + return -EINVAL; + } + + r = cls_cxx_map_set_vals(hctx, &omap_vals); + if (r < 0) + return r; + + return 0; +} + +/** + * Input: + * @param snap_id which snapshot to query, or CEPH_NOSNAP (uint64_t) (deprecated) + * @param read_only true if the image will be used read-only (bool) + * + * Output: + * @param features list of enabled features for the given snapshot (uint64_t) + * @param incompatible incompatible feature bits + * @returns 0 on success, negative error code on failure + */ +int get_features(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + bool read_only = false; + + auto iter = in->cbegin(); + try { + uint64_t snap_id; + decode(snap_id, iter); + if (!iter.end()) { + decode(read_only, iter); + } + } catch (const ceph::buffer::error &err) { + return -EINVAL; + } + + CLS_LOG(20, "get_features read_only=%d", read_only); + + uint64_t features; + int r = read_key(hctx, "features", &features); + if (r < 0) { + CLS_ERR("failed to read features off disk: %s", cpp_strerror(r).c_str()); + return r; + } + + uint64_t incompatible = (read_only ? features & RBD_FEATURES_INCOMPATIBLE : + features & RBD_FEATURES_RW_INCOMPATIBLE); + encode(features, *out); + encode(incompatible, *out); + return 0; +} + +/** + * set the image features + * + * Input: + * @param features image features + * @param mask image feature mask + * + * Output: + * none + * + * @returns 0 on success, negative error code upon failure + */ +int set_features(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + uint64_t features; + uint64_t mask; + auto iter = in->cbegin(); + try { + decode(features, iter); + decode(mask, iter); + } catch (const ceph::buffer::error &err) { + return -EINVAL; + } + + // check that features exists to make sure this is a header object + // that was created correctly + uint64_t orig_features = 0; + int r = read_key(hctx, "features", &orig_features); + if (r < 0 && r != -ENOENT) { + CLS_ERR("Could not read image's features off disk: %s", + cpp_strerror(r).c_str()); + return r; + } + + if ((mask & RBD_FEATURES_INTERNAL) != 0ULL) { + CLS_ERR("Attempting to set internal feature: %" PRIu64, + static_cast<uint64_t>(mask & RBD_FEATURES_INTERNAL)); + return -EINVAL; + } + + // newer clients might attempt to mask off features we don't support + mask &= RBD_FEATURES_ALL; + + uint64_t enabled_features = features & mask; + if ((enabled_features & RBD_FEATURES_MUTABLE) != enabled_features) { + CLS_ERR("Attempting to enable immutable feature: %" PRIu64, + static_cast<uint64_t>(enabled_features & ~RBD_FEATURES_MUTABLE)); + return -EINVAL; + } + + uint64_t disabled_features = ~features & mask; + uint64_t disable_mask = (RBD_FEATURES_MUTABLE | RBD_FEATURES_DISABLE_ONLY); + if ((disabled_features & disable_mask) != disabled_features) { + CLS_ERR("Attempting to disable immutable feature: %" PRIu64, + enabled_features & ~disable_mask); + return -EINVAL; + } + + features = (orig_features & ~mask) | (features & mask); + CLS_LOG(10, "set_features features=%" PRIu64 " orig_features=%" PRIu64, + features, orig_features); + + bufferlist bl; + encode(features, bl); + r = cls_cxx_map_set_val(hctx, "features", &bl); + if (r < 0) { + CLS_ERR("error updating features: %s", cpp_strerror(r).c_str()); + return r; + } + return 0; +} + +/** + * Input: + * @param snap_id which snapshot to query, or CEPH_NOSNAP (uint64_t) + * + * Output: + * @param order bits to shift to get the size of data objects (uint8_t) + * @param size size of the image in bytes for the given snapshot (uint64_t) + * @returns 0 on success, negative error code on failure + */ +int get_size(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + uint64_t snap_id, size; + uint8_t order; + + auto iter = in->cbegin(); + try { + decode(snap_id, iter); + } catch (const ceph::buffer::error &err) { + return -EINVAL; + } + + CLS_LOG(20, "get_size snap_id=%llu", (unsigned long long)snap_id); + + int r = read_key(hctx, "order", &order); + if (r < 0) { + CLS_ERR("failed to read the order off of disk: %s", cpp_strerror(r).c_str()); + return r; + } + + if (snap_id == CEPH_NOSNAP) { + r = read_key(hctx, "size", &size); + if (r < 0) { + CLS_ERR("failed to read the image's size off of disk: %s", cpp_strerror(r).c_str()); + return r; + } + } else { + cls_rbd_snap snap; + string snapshot_key; + key_from_snap_id(snap_id, &snapshot_key); + int r = read_key(hctx, snapshot_key, &snap); + if (r < 0) + return r; + + size = snap.image_size; + } + + encode(order, *out); + encode(size, *out); + + return 0; +} + +/** + * Input: + * @param size new capacity of the image in bytes (uint64_t) + * + * Output: + * @returns 0 on success, negative error code on failure + */ +int set_size(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + uint64_t size; + + auto iter = in->cbegin(); + try { + decode(size, iter); + } catch (const ceph::buffer::error &err) { + return -EINVAL; + } + + // check that size exists to make sure this is a header object + // that was created correctly + uint64_t orig_size; + int r = read_key(hctx, "size", &orig_size); + if (r < 0) { + CLS_ERR("Could not read image's size off disk: %s", cpp_strerror(r).c_str()); + return r; + } + + CLS_LOG(20, "set_size size=%llu orig_size=%llu", (unsigned long long)size, + (unsigned long long)orig_size); + + bufferlist sizebl; + encode(size, sizebl); + r = cls_cxx_map_set_val(hctx, "size", &sizebl); + if (r < 0) { + CLS_ERR("error writing snapshot metadata: %s", cpp_strerror(r).c_str()); + return r; + } + + // if we are shrinking, and have a parent, shrink our overlap with + // the parent, too. + if (size < orig_size) { + cls_rbd_parent parent; + r = read_key(hctx, "parent", &parent); + if (r == -ENOENT) + r = 0; + if (r < 0) + return r; + if (parent.exists() && parent.head_overlap.value_or(0ULL) > size) { + parent.head_overlap = size; + r = write_key(hctx, "parent", parent, get_encode_features(hctx)); + if (r < 0) { + return r; + } + } + } + + return 0; +} + +/** + * get the current protection status of the specified snapshot + * + * Input: + * @param snap_id (uint64_t) which snapshot to get the status of + * + * Output: + * @param status (uint8_t) one of: + * RBD_PROTECTION_STATUS_{PROTECTED, UNPROTECTED, UNPROTECTING} + * + * @returns 0 on success, negative error code on failure + * @returns -EINVAL if snapid is CEPH_NOSNAP + */ +int get_protection_status(cls_method_context_t hctx, bufferlist *in, + bufferlist *out) +{ + snapid_t snap_id; + + auto iter = in->cbegin(); + try { + decode(snap_id, iter); + } catch (const ceph::buffer::error &err) { + CLS_LOG(20, "get_protection_status: invalid decode"); + return -EINVAL; + } + + int r = check_exists(hctx); + if (r < 0) + return r; + + CLS_LOG(20, "get_protection_status snap_id=%llu", + (unsigned long long)snap_id.val); + + if (snap_id == CEPH_NOSNAP) + return -EINVAL; + + cls_rbd_snap snap; + string snapshot_key; + key_from_snap_id(snap_id.val, &snapshot_key); + r = read_key(hctx, snapshot_key, &snap); + if (r < 0) { + CLS_ERR("could not read key for snapshot id %" PRIu64, snap_id.val); + return r; + } + + if (snap.protection_status >= RBD_PROTECTION_STATUS_LAST) { + CLS_ERR("invalid protection status for snap id %llu: %u", + (unsigned long long)snap_id.val, snap.protection_status); + return -EIO; + } + + encode(snap.protection_status, *out); + return 0; +} + +/** + * set the proctection status of a snapshot + * + * Input: + * @param snapid (uint64_t) which snapshot to set the status of + * @param status (uint8_t) one of: + * RBD_PROTECTION_STATUS_{PROTECTED, UNPROTECTED, UNPROTECTING} + * + * @returns 0 on success, negative error code on failure + * @returns -EINVAL if snapid is CEPH_NOSNAP + */ +int set_protection_status(cls_method_context_t hctx, bufferlist *in, + bufferlist *out) +{ + snapid_t snap_id; + uint8_t status; + + auto iter = in->cbegin(); + try { + decode(snap_id, iter); + decode(status, iter); + } catch (const ceph::buffer::error &err) { + CLS_LOG(20, "set_protection_status: invalid decode"); + return -EINVAL; + } + + int r = check_exists(hctx); + if (r < 0) + return r; + + r = image::require_feature(hctx, RBD_FEATURE_LAYERING); + if (r < 0) { + CLS_LOG(20, "image does not support layering"); + return r; + } + + CLS_LOG(20, "set_protection_status snapid=%llu status=%u", + (unsigned long long)snap_id.val, status); + + if (snap_id == CEPH_NOSNAP) + return -EINVAL; + + if (status >= RBD_PROTECTION_STATUS_LAST) { + CLS_LOG(10, "invalid protection status for snap id %llu: %u", + (unsigned long long)snap_id.val, status); + return -EINVAL; + } + + cls_rbd_snap snap; + string snapshot_key; + key_from_snap_id(snap_id.val, &snapshot_key); + r = read_key(hctx, snapshot_key, &snap); + if (r < 0) { + CLS_ERR("could not read key for snapshot id %" PRIu64, snap_id.val); + return r; + } + + snap.protection_status = status; + r = image::snapshot::write(hctx, snapshot_key, std::move(snap)); + if (r < 0) { + return r; + } + + return 0; +} + +/** + * get striping parameters + * + * Input: + * none + * + * Output: + * @param stripe unit (bytes) + * @param stripe count (num objects) + * + * @returns 0 on success + */ +int get_stripe_unit_count(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + int r = check_exists(hctx); + if (r < 0) + return r; + + CLS_LOG(20, "get_stripe_unit_count"); + + r = image::require_feature(hctx, RBD_FEATURE_STRIPINGV2); + if (r < 0) + return r; + + uint64_t stripe_unit = 0, stripe_count = 0; + r = read_key(hctx, "stripe_unit", &stripe_unit); + if (r == -ENOENT) { + // default to object size + uint8_t order; + r = read_key(hctx, "order", &order); + if (r < 0) { + CLS_ERR("failed to read the order off of disk: %s", cpp_strerror(r).c_str()); + return -EIO; + } + stripe_unit = 1ull << order; + } + if (r < 0) + return r; + r = read_key(hctx, "stripe_count", &stripe_count); + if (r == -ENOENT) { + // default to 1 + stripe_count = 1; + r = 0; + } + if (r < 0) + return r; + + encode(stripe_unit, *out); + encode(stripe_count, *out); + return 0; +} + +/** + * set striping parameters + * + * Input: + * @param stripe unit (bytes) + * @param stripe count (num objects) + * + * @returns 0 on success + */ +int set_stripe_unit_count(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + uint64_t stripe_unit, stripe_count; + + auto iter = in->cbegin(); + try { + decode(stripe_unit, iter); + decode(stripe_count, iter); + } catch (const ceph::buffer::error &err) { + CLS_LOG(20, "set_stripe_unit_count: invalid decode"); + return -EINVAL; + } + + if (!stripe_count || !stripe_unit) + return -EINVAL; + + int r = check_exists(hctx); + if (r < 0) + return r; + + CLS_LOG(20, "set_stripe_unit_count"); + + r = image::require_feature(hctx, RBD_FEATURE_STRIPINGV2); + if (r < 0) + return r; + + uint8_t order; + r = read_key(hctx, "order", &order); + if (r < 0) { + CLS_ERR("failed to read the order off of disk: %s", cpp_strerror(r).c_str()); + return r; + } + if ((1ull << order) % stripe_unit || stripe_unit > (1ull << order)) { + CLS_ERR("stripe unit %llu is not a factor of the object size %llu", + (unsigned long long)stripe_unit, 1ull << order); + return -EINVAL; + } + + bufferlist bl, bl2; + encode(stripe_unit, bl); + r = cls_cxx_map_set_val(hctx, "stripe_unit", &bl); + if (r < 0) { + CLS_ERR("error writing stripe_unit metadata: %s", cpp_strerror(r).c_str()); + return r; + } + + encode(stripe_count, bl2); + r = cls_cxx_map_set_val(hctx, "stripe_count", &bl2); + if (r < 0) { + CLS_ERR("error writing stripe_count metadata: %s", cpp_strerror(r).c_str()); + return r; + } + + return 0; +} + +int get_create_timestamp(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + CLS_LOG(20, "get_create_timestamp"); + + utime_t timestamp; + bufferlist bl; + int r = cls_cxx_map_get_val(hctx, "create_timestamp", &bl); + if (r < 0) { + if (r != -ENOENT) { + CLS_ERR("error reading create_timestamp: %s", cpp_strerror(r).c_str()); + return r; + } + } else { + try { + auto it = bl.cbegin(); + decode(timestamp, it); + } catch (const ceph::buffer::error &err) { + CLS_ERR("could not decode create_timestamp"); + return -EIO; + } + } + + encode(timestamp, *out); + return 0; +} + +/** + * get the image access timestamp + * + * Input: + * @param none + * + * Output: + * @param timestamp the image access timestamp + * + * @returns 0 on success, negative error code upon failure + */ +int get_access_timestamp(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + CLS_LOG(20, "get_access_timestamp"); + + utime_t timestamp; + bufferlist bl; + int r = cls_cxx_map_get_val(hctx, "access_timestamp", &bl); + if (r < 0) { + if (r != -ENOENT) { + CLS_ERR("error reading access_timestamp: %s", cpp_strerror(r).c_str()); + return r; + } + } else { + try { + auto it = bl.cbegin(); + decode(timestamp, it); + } catch (const ceph::buffer::error &err) { + CLS_ERR("could not decode access_timestamp"); + return -EIO; + } + } + + encode(timestamp, *out); + return 0; +} + +/** + * get the image modify timestamp + * + * Input: + * @param none + * + * Output: + * @param timestamp the image modify timestamp + * + * @returns 0 on success, negative error code upon failure + */ +int get_modify_timestamp(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + CLS_LOG(20, "get_modify_timestamp"); + + utime_t timestamp; + bufferlist bl; + int r = cls_cxx_map_get_val(hctx, "modify_timestamp", &bl); + if (r < 0) { + if (r != -ENOENT) { + CLS_ERR("error reading modify_timestamp: %s", cpp_strerror(r).c_str()); + return r; + } + } else { + try { + auto it = bl.cbegin(); + decode(timestamp, it); + } catch (const ceph::buffer::error &err) { + CLS_ERR("could not decode modify_timestamp"); + return -EIO; + } + } + + encode(timestamp, *out); + return 0; +} + + +/** + * get the image flags + * + * Input: + * @param snap_id which snapshot to query, to CEPH_NOSNAP (uint64_t) + * + * Output: + * @param flags image flags + * + * @returns 0 on success, negative error code upon failure + */ +int get_flags(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + uint64_t snap_id; + auto iter = in->cbegin(); + try { + decode(snap_id, iter); + } catch (const ceph::buffer::error &err) { + return -EINVAL; + } + + CLS_LOG(20, "get_flags snap_id=%llu", (unsigned long long)snap_id); + + uint64_t flags = 0; + if (snap_id == CEPH_NOSNAP) { + int r = read_key(hctx, "flags", &flags); + if (r < 0 && r != -ENOENT) { + CLS_ERR("failed to read flags off disk: %s", cpp_strerror(r).c_str()); + return r; + } + } else { + cls_rbd_snap snap; + string snapshot_key; + key_from_snap_id(snap_id, &snapshot_key); + int r = read_key(hctx, snapshot_key, &snap); + if (r < 0) { + return r; + } + flags = snap.flags; + } + + encode(flags, *out); + return 0; +} + +/** + * set the image flags + * + * Input: + * @param flags image flags + * @param mask image flag mask + * @param snap_id which snapshot to update, or CEPH_NOSNAP (uint64_t) + * + * Output: + * none + * + * @returns 0 on success, negative error code upon failure + */ +int set_flags(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + uint64_t flags; + uint64_t mask; + uint64_t snap_id = CEPH_NOSNAP; + auto iter = in->cbegin(); + try { + decode(flags, iter); + decode(mask, iter); + if (!iter.end()) { + decode(snap_id, iter); + } + } catch (const ceph::buffer::error &err) { + return -EINVAL; + } + + // check that size exists to make sure this is a header object + // that was created correctly + int r; + uint64_t orig_flags = 0; + cls_rbd_snap snap_meta; + string snap_meta_key; + if (snap_id == CEPH_NOSNAP) { + r = read_key(hctx, "flags", &orig_flags); + if (r < 0 && r != -ENOENT) { + CLS_ERR("Could not read image's flags off disk: %s", + cpp_strerror(r).c_str()); + return r; + } + } else { + key_from_snap_id(snap_id, &snap_meta_key); + r = read_key(hctx, snap_meta_key, &snap_meta); + if (r < 0) { + CLS_ERR("Could not read snapshot: snap_id=%" PRIu64 ": %s", + snap_id, cpp_strerror(r).c_str()); + return r; + } + orig_flags = snap_meta.flags; + } + + flags = (orig_flags & ~mask) | (flags & mask); + CLS_LOG(20, "set_flags snap_id=%" PRIu64 ", orig_flags=%" PRIu64 ", " + "new_flags=%" PRIu64 ", mask=%" PRIu64, snap_id, orig_flags, + flags, mask); + + if (snap_id == CEPH_NOSNAP) { + r = write_key(hctx, "flags", flags); + } else { + snap_meta.flags = flags; + r = image::snapshot::write(hctx, snap_meta_key, std::move(snap_meta)); + } + + if (r < 0) { + return r; + } + return 0; +} + +/** + * Get the operation-based image features + * + * Input: + * + * Output: + * @param bitmask of enabled op features (uint64_t) + * @returns 0 on success, negative error code on failure + */ +int op_features_get(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + CLS_LOG(20, "op_features_get"); + + uint64_t op_features = 0; + int r = read_key(hctx, "op_features", &op_features); + if (r < 0 && r != -ENOENT) { + CLS_ERR("failed to read op features off disk: %s", cpp_strerror(r).c_str()); + return r; + } + + encode(op_features, *out); + return 0; +} + +/** + * Set the operation-based image features + * + * Input: + * @param op_features image op features + * @param mask image op feature mask + * + * Output: + * none + * + * @returns 0 on success, negative error code upon failure + */ +int op_features_set(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + uint64_t op_features; + uint64_t mask; + auto iter = in->cbegin(); + try { + decode(op_features, iter); + decode(mask, iter); + } catch (const ceph::buffer::error &err) { + return -EINVAL; + } + + uint64_t unsupported_op_features = (mask & ~RBD_OPERATION_FEATURES_ALL); + if (unsupported_op_features != 0ULL) { + CLS_ERR("unsupported op features: %" PRIu64, unsupported_op_features); + return -EINVAL; + } + + return image::set_op_features(hctx, op_features, mask); +} + +/** + * get the current parent, if any + * + * Input: + * @param snap_id which snapshot to query, or CEPH_NOSNAP (uint64_t) + * + * Output: + * @param pool parent pool id (-1 if parent does not exist) + * @param image parent image id + * @param snapid parent snapid + * @param size portion of parent mapped under the child + * + * @returns 0 on success or parent does not exist, negative error code on failure + */ +int get_parent(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + uint64_t snap_id; + + auto iter = in->cbegin(); + try { + decode(snap_id, iter); + } catch (const ceph::buffer::error &err) { + return -EINVAL; + } + + int r = check_exists(hctx); + if (r < 0) { + return r; + } + + CLS_LOG(20, "get_parent snap_id=%" PRIu64, snap_id); + + cls_rbd_parent parent; + r = image::require_feature(hctx, RBD_FEATURE_LAYERING); + if (r == 0) { + r = read_key(hctx, "parent", &parent); + if (r < 0 && r != -ENOENT) { + return r; + } else if (!parent.pool_namespace.empty()) { + return -EXDEV; + } + + if (snap_id != CEPH_NOSNAP) { + cls_rbd_snap snap; + std::string snapshot_key; + key_from_snap_id(snap_id, &snapshot_key); + r = read_key(hctx, snapshot_key, &snap); + if (r < 0 && r != -ENOENT) { + return r; + } + + if (snap.parent.exists()) { + // legacy format where full parent spec is written within + // each snapshot record + parent = snap.parent; + } else if (snap.parent_overlap) { + // normalized parent reference + if (!parent.exists()) { + CLS_ERR("get_parent: snap_id=%" PRIu64 ": invalid parent spec", + snap_id); + return -EINVAL; + } + parent.head_overlap = *snap.parent_overlap; + } else { + // snapshot doesn't have associated parent + parent = {}; + } + } + } + + encode(parent.pool_id, *out); + encode(parent.image_id, *out); + encode(parent.snap_id, *out); + encode(parent.head_overlap.value_or(0ULL), *out); + return 0; +} + +/** + * set the image parent + * + * Input: + * @param pool parent pool + * @param id parent image id + * @param snapid parent snapid + * @param size parent size + * + * @returns 0 on success, or negative error code + */ +int set_parent(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + cls_rbd_parent parent; + auto iter = in->cbegin(); + try { + decode(parent.pool_id, iter); + decode(parent.image_id, iter); + decode(parent.snap_id, iter); + + uint64_t overlap; + decode(overlap, iter); + parent.head_overlap = overlap; + } catch (const ceph::buffer::error &err) { + CLS_LOG(20, "cls_rbd::set_parent: invalid decode"); + return -EINVAL; + } + + int r = image::parent::attach(hctx, parent, false); + if (r < 0) { + return r; + } + + return 0; +} + + +/** + * remove the parent pointer + * + * This can only happen on the head, not on a snapshot. No arguments. + * + * @returns 0 on success, negative error code on failure. + */ +int remove_parent(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + int r = image::parent::detach(hctx, true); + if (r < 0) { + return r; + } + + return 0; +} + +/** + * Input: + * none + * + * Output: + * @param parent spec (cls::rbd::ParentImageSpec) + * @returns 0 on success, negative error code on failure + */ +int parent_get(cls_method_context_t hctx, bufferlist *in, bufferlist *out) { + int r = check_exists(hctx); + if (r < 0) { + return r; + } + + CLS_LOG(20, "parent_get"); + + cls_rbd_parent parent; + r = image::require_feature(hctx, RBD_FEATURE_LAYERING); + if (r == 0) { + r = read_key(hctx, "parent", &parent); + if (r < 0 && r != -ENOENT) { + return r; + } else if (r == -ENOENT) { + // examine oldest snapshot to see if it has a denormalized parent + auto parent_lambda = [&parent](const cls_rbd_snap& snap_meta) { + if (snap_meta.parent.exists()) { + parent = snap_meta.parent; + } + return 0; + }; + + r = image::snapshot::iterate(hctx, parent_lambda); + if (r < 0) { + return r; + } + } + } + + cls::rbd::ParentImageSpec parent_image_spec{ + parent.pool_id, parent.pool_namespace, parent.image_id, + parent.snap_id}; + encode(parent_image_spec, *out); + return 0; +} + +/** + * Input: + * @param snap id (uint64_t) parent snapshot id + * + * Output: + * @param byte overlap of parent image (std::optional<uint64_t>) + * @returns 0 on success, negative error code on failure + */ +int parent_overlap_get(cls_method_context_t hctx, bufferlist *in, + bufferlist *out) { + uint64_t snap_id; + auto iter = in->cbegin(); + try { + decode(snap_id, iter); + } catch (const ceph::buffer::error &err) { + return -EINVAL; + } + + int r = check_exists(hctx); + CLS_LOG(20, "parent_overlap_get"); + + std::optional<uint64_t> parent_overlap = std::nullopt; + r = image::require_feature(hctx, RBD_FEATURE_LAYERING); + if (r == 0) { + if (snap_id == CEPH_NOSNAP) { + cls_rbd_parent parent; + r = read_key(hctx, "parent", &parent); + if (r < 0 && r != -ENOENT) { + return r; + } else if (r == 0) { + parent_overlap = parent.head_overlap; + } + } else { + cls_rbd_snap snap; + std::string snapshot_key; + key_from_snap_id(snap_id, &snapshot_key); + r = read_key(hctx, snapshot_key, &snap); + if (r < 0) { + return r; + } + + if (snap.parent_overlap) { + parent_overlap = snap.parent_overlap; + } else if (snap.parent.exists()) { + // legacy format where full parent spec is written within + // each snapshot record + parent_overlap = snap.parent.head_overlap; + } + } + }; + + encode(parent_overlap, *out); + return 0; +} + +/** + * Input: + * @param parent spec (cls::rbd::ParentImageSpec) + * @param size parent size (uint64_t) + * + * Output: + * @returns 0 on success, negative error code on failure + */ +int parent_attach(cls_method_context_t hctx, bufferlist *in, bufferlist *out) { + cls::rbd::ParentImageSpec parent_image_spec; + uint64_t parent_overlap; + bool reattach = false; + + auto iter = in->cbegin(); + try { + decode(parent_image_spec, iter); + decode(parent_overlap, iter); + if (!iter.end()) { + decode(reattach, iter); + } + } catch (const ceph::buffer::error &err) { + CLS_LOG(20, "cls_rbd::parent_attach: invalid decode"); + return -EINVAL; + } + + int r = image::parent::attach(hctx, {parent_image_spec, parent_overlap}, + reattach); + if (r < 0) { + return r; + } + + return 0; +} + +/** + * Input: + * none + * + * Output: + * @returns 0 on success, negative error code on failure + */ +int parent_detach(cls_method_context_t hctx, bufferlist *in, bufferlist *out) { + int r = image::parent::detach(hctx, false); + if (r < 0) { + return r; + } + + return 0; +} + + +/** + * methods for dealing with rbd_children object + */ + +static int decode_parent_common(bufferlist::const_iterator& it, uint64_t *pool_id, + string *image_id, snapid_t *snap_id) +{ + try { + decode(*pool_id, it); + decode(*image_id, it); + decode(*snap_id, it); + } catch (const ceph::buffer::error &err) { + CLS_ERR("error decoding parent spec"); + return -EINVAL; + } + return 0; +} + +static int decode_parent(bufferlist *in, uint64_t *pool_id, + string *image_id, snapid_t *snap_id) +{ + auto it = in->cbegin(); + return decode_parent_common(it, pool_id, image_id, snap_id); +} + +static int decode_parent_and_child(bufferlist *in, uint64_t *pool_id, + string *image_id, snapid_t *snap_id, + string *c_image_id) +{ + auto it = in->cbegin(); + int r = decode_parent_common(it, pool_id, image_id, snap_id); + if (r < 0) + return r; + try { + decode(*c_image_id, it); + } catch (const ceph::buffer::error &err) { + CLS_ERR("error decoding child image id"); + return -EINVAL; + } + return 0; +} + +static string parent_key(uint64_t pool_id, string image_id, snapid_t snap_id) +{ + bufferlist key_bl; + encode(pool_id, key_bl); + encode(image_id, key_bl); + encode(snap_id, key_bl); + return string(key_bl.c_str(), key_bl.length()); +} + +/** + * add child to rbd_children directory object + * + * rbd_children is a map of (p_pool_id, p_image_id, p_snap_id) to + * [c_image_id, [c_image_id ... ]] + * + * Input: + * @param p_pool_id parent pool id + * @param p_image_id parent image oid + * @param p_snap_id parent snapshot id + * @param c_image_id new child image oid to add + * + * @returns 0 on success, negative error on failure + */ + +int add_child(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + int r; + + uint64_t p_pool_id; + snapid_t p_snap_id; + string p_image_id, c_image_id; + // Use set for ease of erase() for remove_child() + std::set<string> children; + + r = decode_parent_and_child(in, &p_pool_id, &p_image_id, &p_snap_id, + &c_image_id); + if (r < 0) + return r; + + CLS_LOG(20, "add_child %s to (%" PRIu64 ", %s, %" PRIu64 ")", c_image_id.c_str(), + p_pool_id, p_image_id.c_str(), p_snap_id.val); + + string key = parent_key(p_pool_id, p_image_id, p_snap_id); + + // get current child list for parent, if any + r = read_key(hctx, key, &children); + if ((r < 0) && (r != -ENOENT)) { + CLS_LOG(20, "add_child: omap read failed: %s", cpp_strerror(r).c_str()); + return r; + } + + if (children.find(c_image_id) != children.end()) { + CLS_LOG(20, "add_child: child already exists: %s", c_image_id.c_str()); + return -EEXIST; + } + // add new child + children.insert(c_image_id); + + // write back + bufferlist childbl; + encode(children, childbl); + r = cls_cxx_map_set_val(hctx, key, &childbl); + if (r < 0) + CLS_LOG(20, "add_child: omap write failed: %s", cpp_strerror(r).c_str()); + return r; +} + +/** + * remove child from rbd_children directory object + * + * Input: + * @param p_pool_id parent pool id + * @param p_image_id parent image oid + * @param p_snap_id parent snapshot id + * @param c_image_id new child image oid to add + * + * @returns 0 on success, negative error on failure + */ + +int remove_child(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + int r; + + uint64_t p_pool_id; + snapid_t p_snap_id; + string p_image_id, c_image_id; + std::set<string> children; + + r = decode_parent_and_child(in, &p_pool_id, &p_image_id, &p_snap_id, + &c_image_id); + if (r < 0) + return r; + + CLS_LOG(20, "remove_child %s from (%" PRIu64 ", %s, %" PRIu64 ")", + c_image_id.c_str(), p_pool_id, p_image_id.c_str(), + p_snap_id.val); + + string key = parent_key(p_pool_id, p_image_id, p_snap_id); + + // get current child list for parent. Unlike add_child(), an empty list + // is an error (how can we remove something that doesn't exist?) + r = read_key(hctx, key, &children); + if (r < 0) { + CLS_LOG(20, "remove_child: read omap failed: %s", cpp_strerror(r).c_str()); + return r; + } + + if (children.find(c_image_id) == children.end()) { + CLS_LOG(20, "remove_child: child not found: %s", c_image_id.c_str()); + return -ENOENT; + } + // find and remove child + children.erase(c_image_id); + + // now empty? remove key altogether + if (children.empty()) { + r = cls_cxx_map_remove_key(hctx, key); + if (r < 0) + CLS_LOG(20, "remove_child: remove key failed: %s", cpp_strerror(r).c_str()); + } else { + // write back shortened children list + bufferlist childbl; + encode(children, childbl); + r = cls_cxx_map_set_val(hctx, key, &childbl); + if (r < 0) + CLS_LOG(20, "remove_child: write omap failed: %s", cpp_strerror(r).c_str()); + } + return r; +} + +/** + * Input: + * @param p_pool_id parent pool id + * @param p_image_id parent image oid + * @param p_snap_id parent snapshot id + * @param c_image_id new child image oid to add + * + * Output: + * @param children set<string> of children + * + * @returns 0 on success, negative error on failure + */ +int get_children(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + int r; + uint64_t p_pool_id; + snapid_t p_snap_id; + string p_image_id; + std::set<string> children; + + r = decode_parent(in, &p_pool_id, &p_image_id, &p_snap_id); + if (r < 0) + return r; + + CLS_LOG(20, "get_children of (%" PRIu64 ", %s, %" PRIu64 ")", + p_pool_id, p_image_id.c_str(), p_snap_id.val); + + string key = parent_key(p_pool_id, p_image_id, p_snap_id); + + r = read_key(hctx, key, &children); + if (r < 0) { + if (r != -ENOENT) + CLS_LOG(20, "get_children: read omap failed: %s", cpp_strerror(r).c_str()); + return r; + } + encode(children, *out); + return 0; +} + + +/** + * Get the information needed to create a rados snap context for doing + * I/O to the data objects. This must include all snapshots. + * + * Output: + * @param snap_seq the highest snapshot id ever associated with the image (uint64_t) + * @param snap_ids existing snapshot ids in descending order (vector<uint64_t>) + * @returns 0 on success, negative error code on failure + */ +int get_snapcontext(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + CLS_LOG(20, "get_snapcontext"); + + int r; + int max_read = RBD_MAX_KEYS_READ; + vector<snapid_t> snap_ids; + string last_read = RBD_SNAP_KEY_PREFIX; + bool more; + + do { + set<string> keys; + r = cls_cxx_map_get_keys(hctx, last_read, max_read, &keys, &more); + if (r < 0) + return r; + + for (auto it = keys.begin(); it != keys.end(); ++it) { + if ((*it).find(RBD_SNAP_KEY_PREFIX) != 0) + break; + snapid_t snap_id = snap_id_from_key(*it); + snap_ids.push_back(snap_id); + } + if (!keys.empty()) + last_read = *(keys.rbegin()); + } while (more); + + uint64_t snap_seq; + r = read_key(hctx, "snap_seq", &snap_seq); + if (r < 0) { + CLS_ERR("could not read the image's snap_seq off disk: %s", cpp_strerror(r).c_str()); + return r; + } + + // snap_ids must be descending in a snap context + std::reverse(snap_ids.begin(), snap_ids.end()); + + encode(snap_seq, *out); + encode(snap_ids, *out); + + return 0; +} + +/** + * Output: + * @param object_prefix prefix for data object names (string) + * @returns 0 on success, negative error code on failure + */ +int get_object_prefix(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + CLS_LOG(20, "get_object_prefix"); + + string object_prefix; + int r = read_key(hctx, "object_prefix", &object_prefix); + if (r < 0) { + CLS_ERR("failed to read the image's object prefix off of disk: %s", + cpp_strerror(r).c_str()); + return r; + } + + encode(object_prefix, *out); + + return 0; +} + +/** + * Input: + * none + * + * Output: + * @param pool_id (int64_t) of data pool or -1 if none + * @returns 0 on success, negative error code on failure + */ +int get_data_pool(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + CLS_LOG(20, "get_data_pool"); + + int64_t data_pool_id = -1; + int r = read_key(hctx, "data_pool_id", &data_pool_id); + if (r == -ENOENT) { + data_pool_id = -1; + } else if (r < 0) { + CLS_ERR("error reading image data pool id: %s", cpp_strerror(r).c_str()); + return r; + } + + encode(data_pool_id, *out); + return 0; +} + +/** + * Input: + * @param snap_id which snapshot to query + * + * Output: + * @param name (string) of the snapshot + * @returns 0 on success, negative error code on failure + */ +int get_snapshot_name(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + uint64_t snap_id; + + auto iter = in->cbegin(); + try { + decode(snap_id, iter); + } catch (const ceph::buffer::error &err) { + return -EINVAL; + } + + CLS_LOG(20, "get_snapshot_name snap_id=%llu", (unsigned long long)snap_id); + + if (snap_id == CEPH_NOSNAP) + return -EINVAL; + + cls_rbd_snap snap; + string snapshot_key; + key_from_snap_id(snap_id, &snapshot_key); + int r = read_key(hctx, snapshot_key, &snap); + if (r < 0) + return r; + + encode(snap.name, *out); + + return 0; +} + +/** + * Input: + * @param snap_id which snapshot to query + * + * Output: + * @param timestamp (utime_t) of the snapshot + * @returns 0 on success, negative error code on failure + * + * NOTE: deprecated - remove this method after Luminous is unsupported + */ +int get_snapshot_timestamp(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + uint64_t snap_id; + + auto iter = in->cbegin(); + try { + decode(snap_id, iter); + } catch (const ceph::buffer::error &err) { + return -EINVAL; + } + + CLS_LOG(20, "get_snapshot_timestamp snap_id=%llu", (unsigned long long)snap_id); + + if (snap_id == CEPH_NOSNAP) { + return -EINVAL; + } + + cls_rbd_snap snap; + string snapshot_key; + key_from_snap_id(snap_id, &snapshot_key); + int r = read_key(hctx, snapshot_key, &snap); + if (r < 0) { + return r; + } + + encode(snap.timestamp, *out); + return 0; +} + +/** + * Input: + * @param snap_id which snapshot to query + * + * Output: + * @param snapshot (cls::rbd::SnapshotInfo) + * @returns 0 on success, negative error code on failure + */ +int snapshot_get(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + uint64_t snap_id; + + auto iter = in->cbegin(); + try { + decode(snap_id, iter); + } catch (const ceph::buffer::error &err) { + return -EINVAL; + } + + CLS_LOG(20, "snapshot_get snap_id=%llu", (unsigned long long)snap_id); + if (snap_id == CEPH_NOSNAP) { + return -EINVAL; + } + + cls_rbd_snap snap; + string snapshot_key; + key_from_snap_id(snap_id, &snapshot_key); + int r = read_key(hctx, snapshot_key, &snap); + if (r < 0) { + return r; + } + + cls::rbd::SnapshotInfo snapshot_info{snap.id, snap.snapshot_namespace, + snap.name, snap.image_size, + snap.timestamp, snap.child_count}; + encode(snapshot_info, *out); + return 0; +} + +/** + * Adds a snapshot to an rbd header. Ensures the id and name are unique. + * + * Input: + * @param snap_name name of the snapshot (string) + * @param snap_id id of the snapshot (uint64_t) + * @param snap_namespace namespace of the snapshot (cls::rbd::SnapshotNamespace) + * + * Output: + * @returns 0 on success, negative error code on failure. + * @returns -ESTALE if the input snap_id is less than the image's snap_seq + * @returns -EEXIST if the id or name are already used by another snapshot + */ +int snapshot_add(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + bufferlist snap_namebl, snap_idbl; + cls_rbd_snap snap_meta; + uint64_t snap_limit; + + try { + auto iter = in->cbegin(); + decode(snap_meta.name, iter); + decode(snap_meta.id, iter); + if (!iter.end()) { + decode(snap_meta.snapshot_namespace, iter); + } + } catch (const ceph::buffer::error &err) { + return -EINVAL; + } + + if (std::holds_alternative<cls::rbd::UnknownSnapshotNamespace>( + snap_meta.snapshot_namespace)) { + CLS_ERR("Unknown snapshot namespace provided"); + return -EINVAL; + } + + CLS_LOG(20, "snapshot_add name=%s id=%llu", snap_meta.name.c_str(), + (unsigned long long)snap_meta.id.val); + + if (snap_meta.id > CEPH_MAXSNAP) + return -EINVAL; + + uint64_t cur_snap_seq; + int r = read_key(hctx, "snap_seq", &cur_snap_seq); + if (r < 0) { + CLS_ERR("Could not read image's snap_seq off disk: %s", cpp_strerror(r).c_str()); + return r; + } + + // client lost a race with another snapshot creation. + // snap_seq must be monotonically increasing. + if (snap_meta.id < cur_snap_seq) + return -ESTALE; + + r = read_key(hctx, "size", &snap_meta.image_size); + if (r < 0) { + CLS_ERR("Could not read image's size off disk: %s", cpp_strerror(r).c_str()); + return r; + } + r = read_key(hctx, "flags", &snap_meta.flags); + if (r < 0 && r != -ENOENT) { + CLS_ERR("Could not read image's flags off disk: %s", cpp_strerror(r).c_str()); + return r; + } + + r = read_key(hctx, "snap_limit", &snap_limit); + if (r == -ENOENT) { + snap_limit = UINT64_MAX; + } else if (r < 0) { + CLS_ERR("Could not read snapshot limit off disk: %s", cpp_strerror(r).c_str()); + return r; + } + + snap_meta.timestamp = ceph_clock_now(); + + uint64_t total_read = 0; + auto pre_check_lambda = + [&snap_meta, &total_read, snap_limit](const cls_rbd_snap& old_meta) { + ++total_read; + if (total_read >= snap_limit) { + CLS_ERR("Attempt to create snapshot over limit of %" PRIu64, + snap_limit); + return -EDQUOT; + } + + if ((snap_meta.name == old_meta.name && + snap_meta.snapshot_namespace == old_meta.snapshot_namespace) || + snap_meta.id == old_meta.id) { + CLS_LOG(20, "snap_name %s or snap_id %" PRIu64 " matches existing snap " + "%s %" PRIu64, snap_meta.name.c_str(), snap_meta.id.val, + old_meta.name.c_str(), old_meta.id.val); + return -EEXIST; + } + return 0; + }; + + r = image::snapshot::iterate(hctx, pre_check_lambda); + if (r < 0) { + return r; + } + + // snapshot inherits parent, if any + cls_rbd_parent parent; + r = read_key(hctx, "parent", &parent); + if (r < 0 && r != -ENOENT) { + return r; + } + if (r == 0) { + // write helper method will convert to normalized format if required + snap_meta.parent = parent; + } + + if (cls::rbd::get_snap_namespace_type(snap_meta.snapshot_namespace) == + cls::rbd::SNAPSHOT_NAMESPACE_TYPE_TRASH) { + // add snap_trash feature bit if not already enabled + r = image::set_op_features(hctx, RBD_OPERATION_FEATURE_SNAP_TRASH, + RBD_OPERATION_FEATURE_SNAP_TRASH); + if (r < 0) { + return r; + } + } + + r = write_key(hctx, "snap_seq", snap_meta.id); + if (r < 0) { + return r; + } + + std::string snapshot_key; + key_from_snap_id(snap_meta.id, &snapshot_key); + r = image::snapshot::write(hctx, snapshot_key, std::move(snap_meta)); + if (r < 0) { + return r; + } + + return 0; +} + +/** + * rename snapshot . + * + * Input: + * @param src_snap_id old snap id of the snapshot (snapid_t) + * @param dst_snap_name new name of the snapshot (string) + * + * Output: + * @returns 0 on success, negative error code on failure. + */ +int snapshot_rename(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + bufferlist snap_namebl, snap_idbl; + snapid_t src_snap_id; + string dst_snap_name; + cls_rbd_snap snap_meta; + int r; + + try { + auto iter = in->cbegin(); + decode(src_snap_id, iter); + decode(dst_snap_name, iter); + } catch (const ceph::buffer::error &err) { + return -EINVAL; + } + + CLS_LOG(20, "snapshot_rename id=%" PRIu64 ", dst_name=%s", + src_snap_id.val, dst_snap_name.c_str()); + + auto duplicate_name_lambda = [&dst_snap_name](const cls_rbd_snap& snap_meta) { + if (cls::rbd::get_snap_namespace_type(snap_meta.snapshot_namespace) == + cls::rbd::SNAPSHOT_NAMESPACE_TYPE_USER && + snap_meta.name == dst_snap_name) { + CLS_LOG(20, "snap_name %s matches existing snap with snap id %" PRIu64, + dst_snap_name.c_str(), snap_meta.id.val); + return -EEXIST; + } + return 0; + }; + r = image::snapshot::iterate(hctx, duplicate_name_lambda); + if (r < 0) { + return r; + } + + std::string src_snap_key; + key_from_snap_id(src_snap_id, &src_snap_key); + r = read_key(hctx, src_snap_key, &snap_meta); + if (r == -ENOENT) { + CLS_LOG(20, "cannot find existing snap with snap id = %" PRIu64, + src_snap_id.val); + return r; + } + + if (cls::rbd::get_snap_namespace_type(snap_meta.snapshot_namespace) != + cls::rbd::SNAPSHOT_NAMESPACE_TYPE_USER) { + // can only rename user snapshots + return -EINVAL; + } + + snap_meta.name = dst_snap_name; + r = image::snapshot::write(hctx, src_snap_key, std::move(snap_meta)); + if (r < 0) { + return r; + } + + return 0; +} + +/** + * Removes a snapshot from an rbd header. + * + * Input: + * @param snap_id the id of the snapshot to remove (uint64_t) + * + * Output: + * @returns 0 on success, negative error code on failure + */ +int snapshot_remove(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + snapid_t snap_id; + + try { + auto iter = in->cbegin(); + decode(snap_id, iter); + } catch (const ceph::buffer::error &err) { + return -EINVAL; + } + + CLS_LOG(20, "snapshot_remove id=%llu", (unsigned long long)snap_id.val); + + // check if the key exists. we can't rely on remove_key doing this for + // us, since OMAPRMKEYS returns success if the key is not there. + // bug or feature? sounds like a bug, since tmap did not have this + // behavior, but cls_rgw may rely on it... + cls_rbd_snap snap; + string snapshot_key; + key_from_snap_id(snap_id, &snapshot_key); + int r = read_key(hctx, snapshot_key, &snap); + if (r == -ENOENT) { + return -ENOENT; + } + + if (snap.protection_status != RBD_PROTECTION_STATUS_UNPROTECTED) { + return -EBUSY; + } + + // snapshot is in-use by clone v2 child + if (snap.child_count > 0) { + return -EBUSY; + } + + r = remove_key(hctx, snapshot_key); + if (r < 0) { + return r; + } + + bool has_child_snaps = false; + bool has_trash_snaps = false; + auto remove_lambda = [snap_id, &has_child_snaps, &has_trash_snaps]( + const cls_rbd_snap& snap_meta) { + if (snap_meta.id != snap_id) { + if (snap_meta.parent.pool_id != -1 || snap_meta.parent_overlap) { + has_child_snaps = true; + } + + if (cls::rbd::get_snap_namespace_type(snap_meta.snapshot_namespace) == + cls::rbd::SNAPSHOT_NAMESPACE_TYPE_TRASH) { + has_trash_snaps = true; + } + } + return 0; + }; + + r = image::snapshot::iterate(hctx, remove_lambda); + if (r < 0) { + return r; + } + + cls_rbd_parent parent; + r = read_key(hctx, "parent", &parent); + if (r < 0 && r != -ENOENT) { + return r; + } + + bool has_parent = (r >= 0 && parent.exists()); + bool is_head_child = (has_parent && parent.head_overlap); + ceph_release_t require_osd_release = cls_get_required_osd_release(hctx); + if (has_parent && !is_head_child && !has_child_snaps && + require_osd_release >= ceph_release_t::nautilus) { + // remove the unused parent image spec + r = remove_key(hctx, "parent"); + if (r < 0 && r != -ENOENT) { + return r; + } + } + + uint64_t op_features_mask = 0ULL; + if (!has_child_snaps && !is_head_child) { + // disable clone child op feature if no longer associated + op_features_mask |= RBD_OPERATION_FEATURE_CLONE_CHILD; + } + if (!has_trash_snaps) { + // remove the snap_trash op feature if not in-use by any other snapshots + op_features_mask |= RBD_OPERATION_FEATURE_SNAP_TRASH; + } + + if (op_features_mask != 0ULL) { + r = image::set_op_features(hctx, 0, op_features_mask); + if (r < 0) { + return r; + } + } + + return 0; +} + +/** + * Moves a snapshot to the trash namespace. + * + * Input: + * @param snap_id the id of the snapshot to move to the trash (uint64_t) + * + * Output: + * @returns 0 on success, negative error code on failure + */ +int snapshot_trash_add(cls_method_context_t hctx, bufferlist *in, + bufferlist *out) +{ + snapid_t snap_id; + + try { + auto iter = in->cbegin(); + decode(snap_id, iter); + } catch (const ceph::buffer::error &err) { + return -EINVAL; + } + + CLS_LOG(20, "snapshot_trash_add id=%" PRIu64, snap_id.val); + + cls_rbd_snap snap; + std::string snapshot_key; + key_from_snap_id(snap_id, &snapshot_key); + int r = read_key(hctx, snapshot_key, &snap); + if (r == -ENOENT) { + return r; + } + + if (snap.protection_status != RBD_PROTECTION_STATUS_UNPROTECTED) { + return -EBUSY; + } + + auto snap_type = cls::rbd::get_snap_namespace_type(snap.snapshot_namespace); + if (snap_type == cls::rbd::SNAPSHOT_NAMESPACE_TYPE_TRASH) { + return -EEXIST; + } + + // add snap_trash feature bit if not already enabled + r = image::set_op_features(hctx, RBD_OPERATION_FEATURE_SNAP_TRASH, + RBD_OPERATION_FEATURE_SNAP_TRASH); + if (r < 0) { + return r; + } + + snap.snapshot_namespace = cls::rbd::TrashSnapshotNamespace{snap_type, + snap.name}; + uuid_d uuid_gen; + uuid_gen.generate_random(); + snap.name = uuid_gen.to_string(); + + r = image::snapshot::write(hctx, snapshot_key, std::move(snap)); + if (r < 0) { + return r; + } + + return 0; +} + +/** + * Returns a uint64_t of all the features supported by this class. + */ +int get_all_features(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + uint64_t all_features = RBD_FEATURES_ALL; + encode(all_features, *out); + return 0; +} + +/** + * "Copy up" data from the parent of a clone to the clone's object(s). + * Used for implementing copy-on-write for a clone image. Client + * will pass down a chunk of data that fits completely within one + * clone block (one object), and is aligned (starts at beginning of block), + * but may be shorter (for non-full parent blocks). The class method + * can't know the object size to validate the requested length, + * so it just writes the data as given if the child object doesn't + * already exist, and returns success if it does. + * + * Input: + * @param in bufferlist of data to write + * + * Output: + * @returns 0 on success, or if block already exists in child + * negative error code on other error + */ + +int copyup(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + // check for existence; if child object exists, just return success + if (cls_cxx_stat(hctx, NULL, NULL) == 0) + return 0; + CLS_LOG(20, "copyup: writing length %d\n", in->length()); + return cls_cxx_write(hctx, 0, in->length(), in); +} + +/** + * Input: + * @param extent_map map of extents to write + * @param data bufferlist of data to write + * + * Output: + * @returns 0 on success, or if block already exists in child + * negative error code on other error + */ + +int sparse_copyup(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + std::map<uint64_t, uint64_t> extent_map; + bufferlist data; + + try { + auto iter = in->cbegin(); + decode(extent_map, iter); + decode(data, iter); + } catch (const ceph::buffer::error &err) { + CLS_LOG(20, "sparse_copyup: invalid decode"); + return -EINVAL; + } + + int r = check_exists(hctx); + if (r == 0) { + return 0; + } + + if (extent_map.empty()) { + CLS_LOG(20, "sparse_copyup: create empty object"); + r = cls_cxx_create(hctx, true); + return r; + } + + uint64_t data_offset = 0; + for (auto &it: extent_map) { + auto off = it.first; + auto len = it.second; + + bufferlist tmpbl; + try { + tmpbl.substr_of(data, data_offset, len); + } catch (const ceph::buffer::error &err) { + CLS_LOG(20, "sparse_copyup: invalid data"); + return -EINVAL; + } + data_offset += len; + + CLS_LOG(20, "sparse_copyup: writing extent %" PRIu64 "~%" PRIu64 "\n", off, + len); + int r = cls_cxx_write(hctx, off, len, &tmpbl); + if (r < 0) { + CLS_ERR("sparse_copyup: error writing extent %" PRIu64 "~%" PRIu64 ": %s", + off, len, cpp_strerror(r).c_str()); + return r; + } + } + + return 0; +} + +/************************ rbd_id object methods **************************/ + +/** + * Input: + * @param in ignored + * + * Output: + * @param id the id stored in the object + * @returns 0 on success, negative error code on failure + */ +int get_id(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + uint64_t size; + int r = cls_cxx_stat(hctx, &size, NULL); + if (r < 0) + return r; + + if (size == 0) + return -ENOENT; + + bufferlist read_bl; + r = cls_cxx_read(hctx, 0, size, &read_bl); + if (r < 0) { + CLS_ERR("get_id: could not read id: %s", cpp_strerror(r).c_str()); + return r; + } + + string id; + try { + auto iter = read_bl.cbegin(); + decode(id, iter); + } catch (const ceph::buffer::error &err) { + return -EIO; + } + + encode(id, *out); + return 0; +} + +/** + * Set the id of an image. The object must already exist. + * + * Input: + * @param id the id of the image, as an alpha-numeric string + * + * Output: + * @returns 0 on success, -EEXIST if the atomic create fails, + * negative error code on other error + */ +int set_id(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + int r = check_exists(hctx); + if (r < 0) + return r; + + string id; + try { + auto iter = in->cbegin(); + decode(id, iter); + } catch (const ceph::buffer::error &err) { + return -EINVAL; + } + + if (!is_valid_id(id)) { + CLS_ERR("set_id: invalid id '%s'", id.c_str()); + return -EINVAL; + } + + uint64_t size; + r = cls_cxx_stat(hctx, &size, NULL); + if (r < 0) + return r; + if (size != 0) + return -EEXIST; + + CLS_LOG(20, "set_id: id=%s", id.c_str()); + + bufferlist write_bl; + encode(id, write_bl); + return cls_cxx_write(hctx, 0, write_bl.length(), &write_bl); +} + +/** + * Update the access timestamp of an image + * + * Input: + * @param none + * + * Output: + * @returns 0 on success, negative error code on other error + */ +int set_access_timestamp(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + int r = check_exists(hctx); + if(r < 0) + return r; + + utime_t timestamp = ceph_clock_now(); + r = write_key(hctx, "access_timestamp", timestamp); + if(r < 0) { + CLS_ERR("error setting access_timestamp"); + return r; + } + + return 0; +} + +/** + * Update the modify timestamp of an image + * + * Input: + * @param none + * + * Output: + * @returns 0 on success, negative error code on other error + */ + +int set_modify_timestamp(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + int r = check_exists(hctx); + if(r < 0) + return r; + + utime_t timestamp = ceph_clock_now(); + r = write_key(hctx, "modify_timestamp", timestamp); + if(r < 0) { + CLS_ERR("error setting modify_timestamp"); + return r; + } + + return 0; +} + + + +/*********************** methods for rbd_directory ***********************/ + +static const string dir_key_for_id(const string &id) +{ + return RBD_DIR_ID_KEY_PREFIX + id; +} + +static const string dir_key_for_name(const string &name) +{ + return RBD_DIR_NAME_KEY_PREFIX + name; +} + +static const string dir_name_from_key(const string &key) +{ + return key.substr(strlen(RBD_DIR_NAME_KEY_PREFIX)); +} + +static int dir_add_image_helper(cls_method_context_t hctx, + const string &name, const string &id, + bool check_for_unique_id) +{ + if (!name.size() || !is_valid_id(id)) { + CLS_ERR("dir_add_image_helper: invalid name '%s' or id '%s'", + name.c_str(), id.c_str()); + return -EINVAL; + } + + CLS_LOG(20, "dir_add_image_helper name=%s id=%s", name.c_str(), id.c_str()); + + string tmp; + string name_key = dir_key_for_name(name); + string id_key = dir_key_for_id(id); + int r = read_key(hctx, name_key, &tmp); + if (r != -ENOENT) { + CLS_LOG(10, "name already exists"); + return -EEXIST; + } + r = read_key(hctx, id_key, &tmp); + if (r != -ENOENT && check_for_unique_id) { + CLS_LOG(10, "id already exists"); + return -EBADF; + } + bufferlist id_bl, name_bl; + encode(id, id_bl); + encode(name, name_bl); + map<string, bufferlist> omap_vals; + omap_vals[name_key] = id_bl; + omap_vals[id_key] = name_bl; + return cls_cxx_map_set_vals(hctx, &omap_vals); +} + +static int dir_remove_image_helper(cls_method_context_t hctx, + const string &name, const string &id) +{ + CLS_LOG(20, "dir_remove_image_helper name=%s id=%s", + name.c_str(), id.c_str()); + + string stored_name, stored_id; + string name_key = dir_key_for_name(name); + string id_key = dir_key_for_id(id); + int r = read_key(hctx, name_key, &stored_id); + if (r < 0) { + if (r != -ENOENT) + CLS_ERR("error reading name to id mapping: %s", cpp_strerror(r).c_str()); + return r; + } + r = read_key(hctx, id_key, &stored_name); + if (r < 0) { + CLS_ERR("error reading id to name mapping: %s", cpp_strerror(r).c_str()); + return r; + } + + // check if this op raced with a rename + if (stored_name != name || stored_id != id) { + CLS_ERR("stored name '%s' and id '%s' do not match args '%s' and '%s'", + stored_name.c_str(), stored_id.c_str(), name.c_str(), id.c_str()); + return -ESTALE; + } + + r = cls_cxx_map_remove_key(hctx, name_key); + if (r < 0) { + CLS_ERR("error removing name: %s", cpp_strerror(r).c_str()); + return r; + } + + r = cls_cxx_map_remove_key(hctx, id_key); + if (r < 0) { + CLS_ERR("error removing id: %s", cpp_strerror(r).c_str()); + return r; + } + + return 0; +} + +/** + * Rename an image in the directory, updating both indexes + * atomically. This can't be done from the client calling + * dir_add_image and dir_remove_image in one transaction because the + * results of the first method are not visibale to later steps. + * + * Input: + * @param src original name of the image + * @param dest new name of the image + * @param id the id of the image + * + * Output: + * @returns -ESTALE if src and id do not map to each other + * @returns -ENOENT if src or id are not in the directory + * @returns -EEXIST if dest already exists + * @returns 0 on success, negative error code on failure + */ +int dir_rename_image(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + string src, dest, id; + try { + auto iter = in->cbegin(); + decode(src, iter); + decode(dest, iter); + decode(id, iter); + } catch (const ceph::buffer::error &err) { + return -EINVAL; + } + + int r = dir_remove_image_helper(hctx, src, id); + if (r < 0) + return r; + // ignore duplicate id because the result of + // remove_image_helper is not visible yet + return dir_add_image_helper(hctx, dest, id, false); +} + +/** + * Get the id of an image given its name. + * + * Input: + * @param name the name of the image + * + * Output: + * @param id the id of the image + * @returns 0 on success, negative error code on failure + */ +int dir_get_id(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + string name; + + try { + auto iter = in->cbegin(); + decode(name, iter); + } catch (const ceph::buffer::error &err) { + return -EINVAL; + } + + CLS_LOG(20, "dir_get_id: name=%s", name.c_str()); + + string id; + int r = read_key(hctx, dir_key_for_name(name), &id); + if (r < 0) { + if (r != -ENOENT) + CLS_ERR("error reading id for name '%s': %s", name.c_str(), cpp_strerror(r).c_str()); + return r; + } + encode(id, *out); + return 0; +} + +/** + * Get the name of an image given its id. + * + * Input: + * @param id the id of the image + * + * Output: + * @param name the name of the image + * @returns 0 on success, negative error code on failure + */ +int dir_get_name(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + string id; + + try { + auto iter = in->cbegin(); + decode(id, iter); + } catch (const ceph::buffer::error &err) { + return -EINVAL; + } + + CLS_LOG(20, "dir_get_name: id=%s", id.c_str()); + + string name; + int r = read_key(hctx, dir_key_for_id(id), &name); + if (r < 0) { + if (r != -ENOENT) { + CLS_ERR("error reading name for id '%s': %s", id.c_str(), + cpp_strerror(r).c_str()); + } + return r; + } + encode(name, *out); + return 0; +} + +/** + * List the names and ids of the images in the directory, sorted by + * name. + * + * Input: + * @param start_after which name to begin listing after + * (use the empty string to start at the beginning) + * @param max_return the maximum number of names to list + * + * Output: + * @param images map from name to id of up to max_return images + * @returns 0 on success, negative error code on failure + */ +int dir_list(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + string start_after; + uint64_t max_return; + + try { + auto iter = in->cbegin(); + decode(start_after, iter); + decode(max_return, iter); + } catch (const ceph::buffer::error &err) { + return -EINVAL; + } + + int max_read = RBD_MAX_KEYS_READ; + map<string, string> images; + string last_read = dir_key_for_name(start_after); + bool more = true; + + while (more && images.size() < max_return) { + map<string, bufferlist> vals; + CLS_LOG(20, "last_read = '%s'", last_read.c_str()); + int r = cls_cxx_map_get_vals(hctx, last_read, RBD_DIR_NAME_KEY_PREFIX, + max_read, &vals, &more); + if (r < 0) { + if (r != -ENOENT) { + CLS_ERR("error reading directory by name: %s", cpp_strerror(r).c_str()); + } + return r; + } + + for (auto it = vals.begin(); it != vals.end(); ++it) { + string id; + auto iter = it->second.cbegin(); + try { + decode(id, iter); + } catch (const ceph::buffer::error &err) { + CLS_ERR("could not decode id of image '%s'", it->first.c_str()); + return -EIO; + } + CLS_LOG(20, "adding '%s' -> '%s'", dir_name_from_key(it->first).c_str(), id.c_str()); + images[dir_name_from_key(it->first)] = id; + if (images.size() >= max_return) + break; + } + if (!vals.empty()) { + last_read = dir_key_for_name(images.rbegin()->first); + } + } + + encode(images, *out); + + return 0; +} + +/** + * Add an image to the rbd directory. Creates the directory object if + * needed, and updates the index from id to name and name to id. + * + * Input: + * @param name the name of the image + * @param id the id of the image + * + * Output: + * @returns -EEXIST if the image name is already in the directory + * @returns -EBADF if the image id is already in the directory + * @returns 0 on success, negative error code on failure + */ +int dir_add_image(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + int r = cls_cxx_create(hctx, false); + if (r < 0) { + CLS_ERR("could not create directory: %s", cpp_strerror(r).c_str()); + return r; + } + + string name, id; + try { + auto iter = in->cbegin(); + decode(name, iter); + decode(id, iter); + } catch (const ceph::buffer::error &err) { + return -EINVAL; + } + + return dir_add_image_helper(hctx, name, id, true); +} + +/** + * Remove an image from the rbd directory. + * + * Input: + * @param name the name of the image + * @param id the id of the image + * + * Output: + * @returns -ESTALE if the name and id do not map to each other + * @returns 0 on success, negative error code on failure + */ +int dir_remove_image(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + string name, id; + try { + auto iter = in->cbegin(); + decode(name, iter); + decode(id, iter); + } catch (const ceph::buffer::error &err) { + return -EINVAL; + } + + return dir_remove_image_helper(hctx, name, id); +} + +/** + * Verify the current state of the directory + * + * Input: + * @param state the DirectoryState of the directory + * + * Output: + * @returns -ENOENT if the state does not match + * @returns 0 on success, negative error code on failure + */ +int dir_state_assert(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + cls::rbd::DirectoryState directory_state = cls::rbd::DIRECTORY_STATE_READY; + try { + auto iter = in->cbegin(); + decode(directory_state, iter); + } catch (const ceph::buffer::error &err) { + return -EINVAL; + } + + cls::rbd::DirectoryState on_disk_directory_state = directory_state; + int r = read_key(hctx, "state", &on_disk_directory_state); + if (r < 0) { + return r; + } + + if (directory_state != on_disk_directory_state) { + return -ENOENT; + } + return 0; +} + +/** + * Set the current state of the directory + * + * Input: + * @param state the DirectoryState of the directory + * + * Output: + * @returns -ENOENT if the state does not match + * @returns 0 on success, negative error code on failure + */ +int dir_state_set(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + cls::rbd::DirectoryState directory_state; + try { + auto iter = in->cbegin(); + decode(directory_state, iter); + } catch (const ceph::buffer::error &err) { + return -EINVAL; + } + + int r = check_exists(hctx); + if (r < 0 && r != -ENOENT) { + return r; + } + + switch (directory_state) { + case cls::rbd::DIRECTORY_STATE_READY: + break; + case cls::rbd::DIRECTORY_STATE_ADD_DISABLED: + { + if (r == -ENOENT) { + return r; + } + + // verify that the directory is empty + std::map<std::string, bufferlist> vals; + bool more; + r = cls_cxx_map_get_vals(hctx, RBD_DIR_NAME_KEY_PREFIX, + RBD_DIR_NAME_KEY_PREFIX, 1, &vals, &more); + if (r < 0) { + return r; + } else if (!vals.empty()) { + return -EBUSY; + } + } + break; + default: + return -EINVAL; + } + + r = write_key(hctx, "state", directory_state); + if (r < 0) { + return r; + } + + return 0; +} + +int object_map_read(cls_method_context_t hctx, BitVector<2> &object_map) +{ + uint64_t size; + int r = cls_cxx_stat(hctx, &size, NULL); + if (r < 0) { + return r; + } + if (size == 0) { + return -ENOENT; + } + + bufferlist bl; + r = cls_cxx_read(hctx, 0, size, &bl); + if (r < 0) { + return r; + } + + try { + auto iter = bl.cbegin(); + decode(object_map, iter); + } catch (const ceph::buffer::error &err) { + CLS_ERR("failed to decode object map: %s", err.what()); + return -EINVAL; + } + return 0; +} + +/** + * Load an rbd image's object map + * + * Input: + * none + * + * Output: + * @param object map bit vector + * @returns 0 on success, negative error code on failure + */ +int object_map_load(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + BitVector<2> object_map; + int r = object_map_read(hctx, object_map); + if (r < 0) { + return r; + } + + object_map.set_crc_enabled(false); + encode(object_map, *out); + return 0; +} + +/** + * Save an rbd image's object map + * + * Input: + * @param object map bit vector + * + * Output: + * @returns 0 on success, negative error code on failure + */ +int object_map_save(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + BitVector<2> object_map; + try { + auto iter = in->cbegin(); + decode(object_map, iter); + } catch (const ceph::buffer::error &err) { + return -EINVAL; + } + + object_map.set_crc_enabled(true); + + bufferlist bl; + encode(object_map, bl); + CLS_LOG(20, "object_map_save: object size=%" PRIu64 ", byte size=%u", + object_map.size(), bl.length()); + return cls_cxx_write_full(hctx, &bl); +} + +/** + * Resize an rbd image's object map + * + * Input: + * @param object_count the max number of objects in the image + * @param default_state the default state of newly created objects + * + * Output: + * @returns 0 on success, negative error code on failure + */ +int object_map_resize(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + uint64_t object_count; + uint8_t default_state; + try { + auto iter = in->cbegin(); + decode(object_count, iter); + decode(default_state, iter); + } catch (const ceph::buffer::error &err) { + return -EINVAL; + } + + // protect against excessive memory requirements + if (object_count > cls::rbd::MAX_OBJECT_MAP_OBJECT_COUNT) { + CLS_ERR("object map too large: %" PRIu64, object_count); + return -EINVAL; + } + + BitVector<2> object_map; + int r = object_map_read(hctx, object_map); + if ((r < 0) && (r != -ENOENT)) { + return r; + } + + size_t orig_object_map_size = object_map.size(); + if (object_count < orig_object_map_size) { + auto it = object_map.begin() + object_count; + auto end_it = object_map.end() ; + uint64_t i = object_count; + for (; it != end_it; ++it, ++i) { + if (*it != default_state) { + CLS_ERR("object map indicates object still exists: %" PRIu64, i); + return -ESTALE; + } + } + object_map.resize(object_count); + } else if (object_count > orig_object_map_size) { + object_map.resize(object_count); + auto it = object_map.begin() + orig_object_map_size; + auto end_it = object_map.end(); + for (; it != end_it; ++it) { + *it = default_state; + } + } + + bufferlist map; + encode(object_map, map); + CLS_LOG(20, "object_map_resize: object size=%" PRIu64 ", byte size=%u", + object_count, map.length()); + return cls_cxx_write_full(hctx, &map); +} + +/** + * Update an rbd image's object map + * + * Input: + * @param start_object_no the start object iterator + * @param end_object_no the end object iterator + * @param new_object_state the new object state + * @param current_object_state optional current object state filter + * + * Output: + * @returns 0 on success, negative error code on failure + */ +int object_map_update(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + uint64_t start_object_no; + uint64_t end_object_no; + uint8_t new_object_state; + boost::optional<uint8_t> current_object_state; + try { + auto iter = in->cbegin(); + decode(start_object_no, iter); + decode(end_object_no, iter); + decode(new_object_state, iter); + decode(current_object_state, iter); + } catch (const ceph::buffer::error &err) { + CLS_ERR("failed to decode message"); + return -EINVAL; + } + + uint64_t size; + int r = cls_cxx_stat(hctx, &size, NULL); + if (r < 0) { + return r; + } + + BitVector<2> object_map; + bufferlist header_bl; + r = cls_cxx_read2(hctx, 0, object_map.get_header_length(), &header_bl, + CEPH_OSD_OP_FLAG_FADVISE_WILLNEED); + if (r < 0) { + CLS_ERR("object map header read failed"); + return r; + } + + try { + auto it = header_bl.cbegin(); + object_map.decode_header(it); + } catch (const ceph::buffer::error &err) { + CLS_ERR("failed to decode object map header: %s", err.what()); + return -EINVAL; + } + + uint64_t object_byte_offset; + uint64_t byte_length; + object_map.get_header_crc_extents(&object_byte_offset, &byte_length); + + bufferlist footer_bl; + r = cls_cxx_read2(hctx, object_byte_offset, byte_length, &footer_bl, + CEPH_OSD_OP_FLAG_FADVISE_WILLNEED); + if (r < 0) { + CLS_ERR("object map footer read header CRC failed"); + return r; + } + + try { + auto it = footer_bl.cbegin(); + object_map.decode_header_crc(it); + } catch (const ceph::buffer::error &err) { + CLS_ERR("failed to decode object map header CRC: %s", err.what()); + } + + if (start_object_no >= end_object_no || end_object_no > object_map.size()) { + return -ERANGE; + } + + uint64_t object_count = end_object_no - start_object_no; + object_map.get_data_crcs_extents(start_object_no, object_count, + &object_byte_offset, &byte_length); + const auto footer_object_offset = object_byte_offset; + + footer_bl.clear(); + r = cls_cxx_read2(hctx, object_byte_offset, byte_length, &footer_bl, + CEPH_OSD_OP_FLAG_FADVISE_WILLNEED); + if (r < 0) { + CLS_ERR("object map footer read data CRCs failed"); + return r; + } + + try { + auto it = footer_bl.cbegin(); + object_map.decode_data_crcs(it, start_object_no); + } catch (const ceph::buffer::error &err) { + CLS_ERR("failed to decode object map data CRCs: %s", err.what()); + } + + uint64_t data_byte_offset; + object_map.get_data_extents(start_object_no, object_count, + &data_byte_offset, &object_byte_offset, + &byte_length); + + bufferlist data_bl; + r = cls_cxx_read2(hctx, object_byte_offset, byte_length, &data_bl, + CEPH_OSD_OP_FLAG_FADVISE_WILLNEED); + if (r < 0) { + CLS_ERR("object map data read failed"); + return r; + } + + try { + auto it = data_bl.cbegin(); + object_map.decode_data(it, data_byte_offset); + } catch (const ceph::buffer::error &err) { + CLS_ERR("failed to decode data chunk [%" PRIu64 "]: %s", + data_byte_offset, err.what()); + return -EINVAL; + } + + bool updated = false; + auto it = object_map.begin() + start_object_no; + auto end_it = object_map.begin() + end_object_no; + for (; it != end_it; ++it) { + uint8_t state = *it; + if ((!current_object_state || state == *current_object_state || + (*current_object_state == OBJECT_EXISTS && + state == OBJECT_EXISTS_CLEAN)) && state != new_object_state) { + *it = new_object_state; + updated = true; + } + } + + if (updated) { + CLS_LOG(20, "object_map_update: %" PRIu64 "~%" PRIu64 " -> %" PRIu64, + data_byte_offset, byte_length, object_byte_offset); + + bufferlist data_bl; + object_map.encode_data(data_bl, data_byte_offset, byte_length); + r = cls_cxx_write2(hctx, object_byte_offset, data_bl.length(), &data_bl, + CEPH_OSD_OP_FLAG_FADVISE_WILLNEED); + if (r < 0) { + CLS_ERR("failed to write object map header: %s", cpp_strerror(r).c_str()); + return r; + } + + footer_bl.clear(); + object_map.encode_data_crcs(footer_bl, start_object_no, object_count); + r = cls_cxx_write2(hctx, footer_object_offset, footer_bl.length(), + &footer_bl, CEPH_OSD_OP_FLAG_FADVISE_WILLNEED); + if (r < 0) { + CLS_ERR("failed to write object map footer: %s", cpp_strerror(r).c_str()); + return r; + } + } else { + CLS_LOG(20, "object_map_update: no update necessary"); + } + + return 0; +} + +/** + * Mark all _EXISTS objects as _EXISTS_CLEAN so future writes to the + * image HEAD can be tracked. + * + * Input: + * none + * + * Output: + * @returns 0 on success, negative error code on failure + */ +int object_map_snap_add(cls_method_context_t hctx, bufferlist *in, + bufferlist *out) +{ + BitVector<2> object_map; + int r = object_map_read(hctx, object_map); + if (r < 0) { + return r; + } + + bool updated = false; + auto it = object_map.begin(); + auto end_it = object_map.end(); + for (; it != end_it; ++it) { + if (*it == OBJECT_EXISTS) { + *it = OBJECT_EXISTS_CLEAN; + updated = true; + } + } + + if (updated) { + bufferlist bl; + encode(object_map, bl); + r = cls_cxx_write_full(hctx, &bl); + } + return r; +} + +/** + * Mark all _EXISTS_CLEAN objects as _EXISTS in the current object map + * if the provided snapshot object map object is marked as _EXISTS. + * + * Input: + * @param snapshot object map bit vector + * + * Output: + * @returns 0 on success, negative error code on failure + */ +int object_map_snap_remove(cls_method_context_t hctx, bufferlist *in, + bufferlist *out) +{ + BitVector<2> src_object_map; + try { + auto iter = in->cbegin(); + decode(src_object_map, iter); + } catch (const ceph::buffer::error &err) { + return -EINVAL; + } + + BitVector<2> dst_object_map; + int r = object_map_read(hctx, dst_object_map); + if (r < 0) { + return r; + } + + bool updated = false; + auto src_it = src_object_map.begin(); + auto dst_it = dst_object_map.begin(); + auto dst_it_end = dst_object_map.end(); + uint64_t i = 0; + for (; dst_it != dst_it_end; ++dst_it) { + if (*dst_it == OBJECT_EXISTS_CLEAN && + (i >= src_object_map.size() || *src_it == OBJECT_EXISTS)) { + *dst_it = OBJECT_EXISTS; + updated = true; + } + if (i < src_object_map.size()) + ++src_it; + ++i; + } + + if (updated) { + bufferlist bl; + encode(dst_object_map, bl); + r = cls_cxx_write_full(hctx, &bl); + } + return r; +} + +static const string metadata_key_for_name(const string &name) +{ + return RBD_METADATA_KEY_PREFIX + name; +} + +static const string metadata_name_from_key(const string &key) +{ + return key.substr(strlen(RBD_METADATA_KEY_PREFIX)); +} + +/** + * Input: + * @param start_after which name to begin listing after + * (use the empty string to start at the beginning) + * @param max_return the maximum number of names to list + + * Output: + * @param value + * @returns 0 on success, negative error code on failure + */ +int metadata_list(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + string start_after; + uint64_t max_return; + + try { + auto iter = in->cbegin(); + decode(start_after, iter); + decode(max_return, iter); + } catch (const ceph::buffer::error &err) { + return -EINVAL; + } + + // TODO remove implicit support for zero during the N-release + if (max_return == 0) { + max_return = RBD_MAX_KEYS_READ; + } + + map<string, bufferlist> data; + string last_read = metadata_key_for_name(start_after); + bool more = true; + + while (more && data.size() < max_return) { + map<string, bufferlist> raw_data; + int max_read = std::min<uint64_t>(RBD_MAX_KEYS_READ, max_return - data.size()); + int r = cls_cxx_map_get_vals(hctx, last_read, RBD_METADATA_KEY_PREFIX, + max_read, &raw_data, &more); + if (r < 0) { + if (r != -ENOENT) { + CLS_ERR("failed to read the vals off of disk: %s", + cpp_strerror(r).c_str()); + } + return r; + } + + for (auto& kv : raw_data) { + data[metadata_name_from_key(kv.first)].swap(kv.second); + } + + if (!raw_data.empty()) { + last_read = raw_data.rbegin()->first; + } + } + + encode(data, *out); + return 0; +} + +/** + * Input: + * @param data <map(key, value)> + * + * Output: + * @returns 0 on success, negative error code on failure + */ +int metadata_set(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + map<string, bufferlist> data, raw_data; + + auto iter = in->cbegin(); + try { + decode(data, iter); + } catch (const ceph::buffer::error &err) { + return -EINVAL; + } + + for (auto it = data.begin(); it != data.end(); ++it) { + CLS_LOG(20, "metadata_set key=%s value=%.*s", it->first.c_str(), + it->second.length(), it->second.c_str()); + raw_data[metadata_key_for_name(it->first)].swap(it->second); + } + int r = cls_cxx_map_set_vals(hctx, &raw_data); + if (r < 0) { + CLS_ERR("error writing metadata: %s", cpp_strerror(r).c_str()); + return r; + } + + return 0; +} + +/** + * Input: + * @param key + * + * Output: + * @returns 0 on success, negative error code on failure + */ +int metadata_remove(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + string key; + + auto iter = in->cbegin(); + try { + decode(key, iter); + } catch (const ceph::buffer::error &err) { + return -EINVAL; + } + + CLS_LOG(20, "metadata_remove key=%s", key.c_str()); + + int r = cls_cxx_map_remove_key(hctx, metadata_key_for_name(key)); + if (r < 0) { + CLS_ERR("error removing metadata: %s", cpp_strerror(r).c_str()); + return r; + } + + return 0; +} + +/** + * Input: + * @param key + * + * Output: + * @param metadata value associated with the key + * @returns 0 on success, negative error code on failure + */ +int metadata_get(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + string key; + bufferlist value; + + auto iter = in->cbegin(); + try { + decode(key, iter); + } catch (const ceph::buffer::error &err) { + return -EINVAL; + } + + CLS_LOG(20, "metadata_get key=%s", key.c_str()); + + int r = cls_cxx_map_get_val(hctx, metadata_key_for_name(key), &value); + if (r < 0) { + if (r != -ENOENT) + CLS_ERR("error getting metadata: %s", cpp_strerror(r).c_str()); + return r; + } + + encode(value, *out); + return 0; +} + +int snapshot_get_limit(cls_method_context_t hctx, bufferlist *in, + bufferlist *out) +{ + uint64_t snap_limit; + int r = read_key(hctx, "snap_limit", &snap_limit); + if (r == -ENOENT) { + snap_limit = UINT64_MAX; + } else if (r < 0) { + CLS_ERR("error retrieving snapshot limit: %s", cpp_strerror(r).c_str()); + return r; + } + + CLS_LOG(20, "read snapshot limit %" PRIu64, snap_limit); + encode(snap_limit, *out); + + return 0; +} + +int snapshot_set_limit(cls_method_context_t hctx, bufferlist *in, + bufferlist *out) +{ + int rc; + uint64_t new_limit; + bufferlist bl; + size_t snap_count = 0; + + try { + auto iter = in->cbegin(); + decode(new_limit, iter); + } catch (const ceph::buffer::error &err) { + return -EINVAL; + } + + if (new_limit == UINT64_MAX) { + CLS_LOG(20, "remove snapshot limit\n"); + rc = cls_cxx_map_remove_key(hctx, "snap_limit"); + return rc; + } + + //try to read header as v1 format + rc = snap_read_header(hctx, bl); + + // error when reading header + if (rc < 0 && rc != -EINVAL) { + return rc; + } else if (rc >= 0) { + // success, the image is v1 format + struct rbd_obj_header_ondisk *header; + header = (struct rbd_obj_header_ondisk *)bl.c_str(); + snap_count = header->snap_count; + } else { + // else, the image is v2 format + int max_read = RBD_MAX_KEYS_READ; + string last_read = RBD_SNAP_KEY_PREFIX; + bool more; + + do { + set<string> keys; + rc = cls_cxx_map_get_keys(hctx, last_read, max_read, &keys, &more); + if (rc < 0) { + CLS_ERR("error retrieving snapshots: %s", cpp_strerror(rc).c_str()); + return rc; + } + for (auto& key : keys) { + if (key.find(RBD_SNAP_KEY_PREFIX) != 0) + break; + snap_count++; + } + if (!keys.empty()) + last_read = *(keys.rbegin()); + } while (more); + } + + if (new_limit < snap_count) { + rc = -ERANGE; + CLS_LOG(10, "snapshot limit is less than the number of snapshots.\n"); + } else { + CLS_LOG(20, "set snapshot limit to %" PRIu64 "\n", new_limit); + bl.clear(); + encode(new_limit, bl); + rc = cls_cxx_map_set_val(hctx, "snap_limit", &bl); + } + + return rc; +} + + +/** + * Input: + * @param snap id (uint64_t) parent snapshot id + * @param child spec (cls::rbd::ChildImageSpec) child image + * + * Output: + * @returns 0 on success, negative error code on failure + */ +int child_attach(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + uint64_t snap_id; + cls::rbd::ChildImageSpec child_image; + try { + auto it = in->cbegin(); + decode(snap_id, it); + decode(child_image, it); + } catch (const ceph::buffer::error &err) { + return -EINVAL; + } + + CLS_LOG(20, "child_attach snap_id=%" PRIu64 ", child_pool_id=%" PRIi64 ", " + "child_image_id=%s", snap_id, child_image.pool_id, + child_image.image_id.c_str()); + + cls_rbd_snap snap; + std::string snapshot_key; + key_from_snap_id(snap_id, &snapshot_key); + int r = read_key(hctx, snapshot_key, &snap); + if (r < 0) { + return r; + } + + if (cls::rbd::get_snap_namespace_type(snap.snapshot_namespace) == + cls::rbd::SNAPSHOT_NAMESPACE_TYPE_TRASH) { + // cannot attach to a deleted snapshot + return -ENOENT; + } + + auto children_key = image::snap_children_key_from_snap_id(snap_id); + cls::rbd::ChildImageSpecs child_images; + r = read_key(hctx, children_key, &child_images); + if (r < 0 && r != -ENOENT) { + CLS_ERR("error reading snapshot children: %s", cpp_strerror(r).c_str()); + return r; + } + + auto it = child_images.insert(child_image); + if (!it.second) { + // child already attached to the snapshot + return -EEXIST; + } + + r = write_key(hctx, children_key, child_images); + if (r < 0) { + CLS_ERR("error writing snapshot children: %s", cpp_strerror(r).c_str()); + return r; + } + + ++snap.child_count; + r = image::snapshot::write(hctx, snapshot_key, std::move(snap)); + if (r < 0) { + return r; + } + + r = image::set_op_features(hctx, RBD_OPERATION_FEATURE_CLONE_PARENT, + RBD_OPERATION_FEATURE_CLONE_PARENT); + if (r < 0) { + return r; + } + + return 0; +} + +/** + * Input: + * @param snap id (uint64_t) parent snapshot id + * @param child spec (cls::rbd::ChildImageSpec) child image + * + * Output: + * @returns 0 on success, negative error code on failure + */ +int child_detach(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + uint64_t snap_id; + cls::rbd::ChildImageSpec child_image; + try { + auto it = in->cbegin(); + decode(snap_id, it); + decode(child_image, it); + } catch (const ceph::buffer::error &err) { + return -EINVAL; + } + + CLS_LOG(20, "child_detach snap_id=%" PRIu64 ", child_pool_id=%" PRIi64 ", " + "child_image_id=%s", snap_id, child_image.pool_id, + child_image.image_id.c_str()); + + cls_rbd_snap snap; + std::string snapshot_key; + key_from_snap_id(snap_id, &snapshot_key); + int r = read_key(hctx, snapshot_key, &snap); + if (r < 0) { + return r; + } + + auto children_key = image::snap_children_key_from_snap_id(snap_id); + cls::rbd::ChildImageSpecs child_images; + r = read_key(hctx, children_key, &child_images); + if (r < 0 && r != -ENOENT) { + CLS_ERR("error reading snapshot children: %s", cpp_strerror(r).c_str()); + return r; + } + + if (snap.child_count != child_images.size()) { + // children and reference count don't match + CLS_ERR("children reference count mismatch: %" PRIu64, snap_id); + return -EINVAL; + } + + if (child_images.erase(child_image) == 0) { + // child not attached to the snapshot + return -ENOENT; + } + + if (child_images.empty()) { + r = remove_key(hctx, children_key); + } else { + r = write_key(hctx, children_key, child_images); + if (r < 0) { + CLS_ERR("error writing snapshot children: %s", cpp_strerror(r).c_str()); + return r; + } + } + + --snap.child_count; + r = image::snapshot::write(hctx, snapshot_key, std::move(snap)); + if (r < 0) { + return r; + } + + if (snap.child_count == 0) { + auto clone_in_use_lambda = [snap_id](const cls_rbd_snap& snap_meta) { + if (snap_meta.id != snap_id && snap_meta.child_count > 0) { + return -EEXIST; + } + return 0; + }; + + r = image::snapshot::iterate(hctx, clone_in_use_lambda); + if (r < 0 && r != -EEXIST) { + return r; + } + + if (r != -EEXIST) { + // remove the clone_v2 op feature if not in-use by any other snapshots + r = image::set_op_features(hctx, 0, RBD_OPERATION_FEATURE_CLONE_PARENT); + if (r < 0) { + return r; + } + } + } + + return 0; +} + +/** + * Input: + * @param snap id (uint64_t) parent snapshot id + * + * Output: + * @param (cls::rbd::ChildImageSpecs) child images + * @returns 0 on success, negative error code on failure + */ +int children_list(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + uint64_t snap_id; + try { + auto it = in->cbegin(); + decode(snap_id, it); + } catch (const ceph::buffer::error &err) { + return -EINVAL; + } + + CLS_LOG(20, "children_list snap_id=%" PRIu64, snap_id); + + cls_rbd_snap snap; + std::string snapshot_key; + key_from_snap_id(snap_id, &snapshot_key); + int r = read_key(hctx, snapshot_key, &snap); + if (r < 0) { + return r; + } + + auto children_key = image::snap_children_key_from_snap_id(snap_id); + cls::rbd::ChildImageSpecs child_images; + r = read_key(hctx, children_key, &child_images); + if (r == -ENOENT) { + return r; + } else if (r < 0) { + CLS_ERR("error reading snapshot children: %s", cpp_strerror(r).c_str()); + return r; + } + + encode(child_images, *out); + return 0; +} + +/** + * Set image migration. + * + * Input: + * @param migration_spec (cls::rbd::MigrationSpec) image migration spec + * + * Output: + * + * @returns 0 on success, negative error code on failure + */ +int migration_set(cls_method_context_t hctx, bufferlist *in, bufferlist *out) { + cls::rbd::MigrationSpec migration_spec; + try { + auto it = in->cbegin(); + decode(migration_spec, it); + } catch (const ceph::buffer::error &err) { + return -EINVAL; + } + + int r = image::set_migration(hctx, migration_spec, true); + if (r < 0) { + return r; + } + + return 0; +} + +/** + * Set image migration state. + * + * Input: + * @param state (cls::rbd::MigrationState) migration state + * @param description (std::string) migration state description + * + * Output: + * + * @returns 0 on success, negative error code on failure + */ +int migration_set_state(cls_method_context_t hctx, bufferlist *in, + bufferlist *out) { + cls::rbd::MigrationState state; + std::string description; + try { + auto it = in->cbegin(); + decode(state, it); + decode(description, it); + } catch (const ceph::buffer::error &err) { + return -EINVAL; + } + + cls::rbd::MigrationSpec migration_spec; + int r = image::read_migration(hctx, &migration_spec); + if (r < 0) { + return r; + } + + migration_spec.state = state; + migration_spec.state_description = description; + + r = image::set_migration(hctx, migration_spec, false); + if (r < 0) { + return r; + } + + return 0; +} + +/** + * Get image migration spec. + * + * Input: + * + * Output: + * @param migration_spec (cls::rbd::MigrationSpec) image migration spec + * + * @returns 0 on success, negative error code on failure + */ +int migration_get(cls_method_context_t hctx, bufferlist *in, bufferlist *out) { + cls::rbd::MigrationSpec migration_spec; + int r = image::read_migration(hctx, &migration_spec); + if (r < 0) { + return r; + } + + encode(migration_spec, *out); + + return 0; +} + +/** + * Remove image migration spec. + * + * Input: + * + * Output: + * + * @returns 0 on success, negative error code on failure + */ +int migration_remove(cls_method_context_t hctx, bufferlist *in, + bufferlist *out) { + int r = image::remove_migration(hctx); + if (r < 0) { + return r; + } + + return 0; +} + +/** + * Ensure writer snapc state + * + * Input: + * @param snap id (uint64_t) snap context sequence id + * @param state (cls::rbd::AssertSnapcSeqState) snap context state + * + * Output: + * @returns -ERANGE if assertion fails + * @returns 0 on success, negative error code on failure + */ +int assert_snapc_seq(cls_method_context_t hctx, bufferlist *in, + bufferlist *out) +{ + uint64_t snapc_seq; + cls::rbd::AssertSnapcSeqState state; + try { + auto it = in->cbegin(); + decode(snapc_seq, it); + decode(state, it); + } catch (const ceph::buffer::error &err) { + return -EINVAL; + } + + uint64_t snapset_seq; + int r = cls_get_snapset_seq(hctx, &snapset_seq); + if (r < 0 && r != -ENOENT) { + return r; + } + + switch (state) { + case cls::rbd::ASSERT_SNAPC_SEQ_GT_SNAPSET_SEQ: + return (r == -ENOENT || snapc_seq > snapset_seq) ? 0 : -ERANGE; + case cls::rbd::ASSERT_SNAPC_SEQ_LE_SNAPSET_SEQ: + return (r == -ENOENT || snapc_seq > snapset_seq) ? -ERANGE : 0; + default: + return -EOPNOTSUPP; + } +} + +/****************************** Old format *******************************/ + +int old_snapshots_list(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + bufferlist bl; + struct rbd_obj_header_ondisk *header; + int rc = snap_read_header(hctx, bl); + if (rc < 0) + return rc; + + header = (struct rbd_obj_header_ondisk *)bl.c_str(); + bufferptr p(header->snap_names_len); + char *buf = (char *)header; + char *name = buf + sizeof(*header) + header->snap_count * sizeof(struct rbd_obj_snap_ondisk); + char *end = name + header->snap_names_len; + memcpy(p.c_str(), + buf + sizeof(*header) + header->snap_count * sizeof(struct rbd_obj_snap_ondisk), + header->snap_names_len); + + encode(header->snap_seq, *out); + encode(header->snap_count, *out); + + for (unsigned i = 0; i < header->snap_count; i++) { + string s = name; + encode(header->snaps[i].id, *out); + encode(header->snaps[i].image_size, *out); + encode(s, *out); + + name += strlen(name) + 1; + if (name > end) + return -EIO; + } + + return 0; +} + +int old_snapshot_add(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + bufferlist bl; + struct rbd_obj_header_ondisk *header; + bufferlist newbl; + bufferptr header_bp(sizeof(*header)); + struct rbd_obj_snap_ondisk *new_snaps; + + int rc = snap_read_header(hctx, bl); + if (rc < 0) + return rc; + + header = (struct rbd_obj_header_ondisk *)bl.c_str(); + + int snaps_id_ofs = sizeof(*header); + int names_ofs = snaps_id_ofs + sizeof(*new_snaps) * header->snap_count; + const char *snap_name; + const char *snap_names = ((char *)header) + names_ofs; + const char *end = snap_names + header->snap_names_len; + auto iter = in->cbegin(); + string s; + uint64_t snap_id; + + try { + decode(s, iter); + decode(snap_id, iter); + } catch (const ceph::buffer::error &err) { + return -EINVAL; + } + snap_name = s.c_str(); + + if (header->snap_seq > snap_id) + return -ESTALE; + + uint64_t snap_limit; + rc = read_key(hctx, "snap_limit", &snap_limit); + if (rc == -ENOENT) { + snap_limit = UINT64_MAX; + } else if (rc < 0) { + return rc; + } + + if (header->snap_count >= snap_limit) + return -EDQUOT; + + const char *cur_snap_name; + for (cur_snap_name = snap_names; cur_snap_name < end; cur_snap_name += strlen(cur_snap_name) + 1) { + if (strncmp(cur_snap_name, snap_name, end - cur_snap_name) == 0) + return -EEXIST; + } + if (cur_snap_name > end) + return -EIO; + + int snap_name_len = strlen(snap_name); + + bufferptr new_names_bp(header->snap_names_len + snap_name_len + 1); + bufferptr new_snaps_bp(sizeof(*new_snaps) * (header->snap_count + 1)); + + /* copy snap names and append to new snap name */ + char *new_snap_names = new_names_bp.c_str(); + strcpy(new_snap_names, snap_name); + memcpy(new_snap_names + snap_name_len + 1, snap_names, header->snap_names_len); + + /* append new snap id */ + new_snaps = (struct rbd_obj_snap_ondisk *)new_snaps_bp.c_str(); + memcpy(new_snaps + 1, header->snaps, sizeof(*new_snaps) * header->snap_count); + + header->snap_count = header->snap_count + 1; + header->snap_names_len = header->snap_names_len + snap_name_len + 1; + header->snap_seq = snap_id; + + new_snaps[0].id = snap_id; + new_snaps[0].image_size = header->image_size; + + memcpy(header_bp.c_str(), header, sizeof(*header)); + + newbl.push_back(header_bp); + newbl.push_back(new_snaps_bp); + newbl.push_back(new_names_bp); + + rc = cls_cxx_write_full(hctx, &newbl); + if (rc < 0) + return rc; + + return 0; +} + +int old_snapshot_remove(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + bufferlist bl; + struct rbd_obj_header_ondisk *header; + bufferlist newbl; + bufferptr header_bp(sizeof(*header)); + + int rc = snap_read_header(hctx, bl); + if (rc < 0) + return rc; + + header = (struct rbd_obj_header_ondisk *)bl.c_str(); + + int snaps_id_ofs = sizeof(*header); + int names_ofs = snaps_id_ofs + sizeof(struct rbd_obj_snap_ondisk) * header->snap_count; + const char *snap_name; + const char *snap_names = ((char *)header) + names_ofs; + const char *orig_names = snap_names; + const char *end = snap_names + header->snap_names_len; + auto iter = in->cbegin(); + string s; + unsigned i; + bool found = false; + struct rbd_obj_snap_ondisk snap; + + try { + decode(s, iter); + } catch (const ceph::buffer::error &err) { + return -EINVAL; + } + snap_name = s.c_str(); + + for (i = 0; snap_names < end; i++) { + if (strcmp(snap_names, snap_name) == 0) { + snap = header->snaps[i]; + found = true; + break; + } + snap_names += strlen(snap_names) + 1; + } + if (!found) { + CLS_ERR("couldn't find snap %s\n", snap_name); + return -ENOENT; + } + + header->snap_names_len = header->snap_names_len - (s.length() + 1); + header->snap_count = header->snap_count - 1; + + bufferptr new_names_bp(header->snap_names_len); + bufferptr new_snaps_bp(sizeof(header->snaps[0]) * header->snap_count); + + memcpy(header_bp.c_str(), header, sizeof(*header)); + newbl.push_back(header_bp); + + if (header->snap_count) { + int snaps_len = 0; + int names_len = 0; + CLS_LOG(20, "i=%u\n", i); + if (i > 0) { + snaps_len = sizeof(header->snaps[0]) * i; + names_len = snap_names - orig_names; + memcpy(new_snaps_bp.c_str(), header->snaps, snaps_len); + memcpy(new_names_bp.c_str(), orig_names, names_len); + } + snap_names += s.length() + 1; + + if (i < header->snap_count) { + memcpy(new_snaps_bp.c_str() + snaps_len, + header->snaps + i + 1, + sizeof(header->snaps[0]) * (header->snap_count - i)); + memcpy(new_names_bp.c_str() + names_len, snap_names , end - snap_names); + } + newbl.push_back(new_snaps_bp); + newbl.push_back(new_names_bp); + } + + rc = cls_cxx_write_full(hctx, &newbl); + if (rc < 0) + return rc; + + return 0; +} + +/** + * rename snapshot of old format. + * + * Input: + * @param src_snap_id old snap id of the snapshot (snapid_t) + * @param dst_snap_name new name of the snapshot (string) + * + * Output: + * @returns 0 on success, negative error code on failure. +*/ +int old_snapshot_rename(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + bufferlist bl; + struct rbd_obj_header_ondisk *header; + bufferlist newbl; + bufferptr header_bp(sizeof(*header)); + snapid_t src_snap_id; + const char *dst_snap_name; + string dst; + + int rc = snap_read_header(hctx, bl); + if (rc < 0) + return rc; + + header = (struct rbd_obj_header_ondisk *)bl.c_str(); + + int snaps_id_ofs = sizeof(*header); + int names_ofs = snaps_id_ofs + sizeof(rbd_obj_snap_ondisk) * header->snap_count; + const char *snap_names = ((char *)header) + names_ofs; + const char *orig_names = snap_names; + const char *end = snap_names + header->snap_names_len; + auto iter = in->cbegin(); + unsigned i; + bool found = false; + + try { + decode(src_snap_id, iter); + decode(dst, iter); + } catch (const ceph::buffer::error &err) { + return -EINVAL; + } + dst_snap_name = dst.c_str(); + + const char *cur_snap_name; + for (cur_snap_name = snap_names; cur_snap_name < end; + cur_snap_name += strlen(cur_snap_name) + 1) { + if (strcmp(cur_snap_name, dst_snap_name) == 0) + return -EEXIST; + } + if (cur_snap_name > end) + return -EIO; + for (i = 0; i < header->snap_count; i++) { + if (src_snap_id == header->snaps[i].id) { + found = true; + break; + } + snap_names += strlen(snap_names) + 1; + } + if (!found) { + CLS_ERR("couldn't find snap %llu\n", (unsigned long long)src_snap_id.val); + return -ENOENT; + } + + CLS_LOG(20, "rename snap with snap id %llu to dest name %s", (unsigned long long)src_snap_id.val, dst_snap_name); + header->snap_names_len = header->snap_names_len - strlen(snap_names) + dst.length(); + + bufferptr new_names_bp(header->snap_names_len); + bufferptr new_snaps_bp(sizeof(header->snaps[0]) * header->snap_count); + + if (header->snap_count) { + int names_len = 0; + CLS_LOG(20, "i=%u\n", i); + if (i > 0) { + names_len = snap_names - orig_names; + memcpy(new_names_bp.c_str(), orig_names, names_len); + } + strcpy(new_names_bp.c_str() + names_len, dst_snap_name); + names_len += strlen(dst_snap_name) + 1; + snap_names += strlen(snap_names) + 1; + if (i < header->snap_count) { + memcpy(new_names_bp.c_str() + names_len, snap_names , end - snap_names); + } + memcpy(new_snaps_bp.c_str(), header->snaps, sizeof(header->snaps[0]) * header->snap_count); + } + + memcpy(header_bp.c_str(), header, sizeof(*header)); + newbl.push_back(header_bp); + newbl.push_back(new_snaps_bp); + newbl.push_back(new_names_bp); + + rc = cls_cxx_write_full(hctx, &newbl); + if (rc < 0) + return rc; + return 0; +} + + +namespace mirror { + +static const std::string UUID("mirror_uuid"); +static const std::string MODE("mirror_mode"); +static const std::string PEER_KEY_PREFIX("mirror_peer_"); +static const std::string IMAGE_KEY_PREFIX("image_"); +static const std::string GLOBAL_KEY_PREFIX("global_"); +static const std::string STATUS_GLOBAL_KEY_PREFIX("status_global_"); +static const std::string REMOTE_STATUS_GLOBAL_KEY_PREFIX("remote_status_global_"); +static const std::string INSTANCE_KEY_PREFIX("instance_"); +static const std::string MIRROR_IMAGE_MAP_KEY_PREFIX("image_map_"); + +std::string peer_key(const std::string &uuid) { + return PEER_KEY_PREFIX + uuid; +} + +std::string image_key(const string &image_id) { + return IMAGE_KEY_PREFIX + image_id; +} + +std::string global_key(const string &global_id) { + return GLOBAL_KEY_PREFIX + global_id; +} + +std::string remote_status_global_key(const std::string& global_id, + const std::string& mirror_uuid) { + return REMOTE_STATUS_GLOBAL_KEY_PREFIX + global_id + "_" + mirror_uuid; +} + +std::string status_global_key(const std::string& global_id, + const std::string& mirror_uuid) { + if (mirror_uuid == cls::rbd::MirrorImageSiteStatus::LOCAL_MIRROR_UUID) { + return STATUS_GLOBAL_KEY_PREFIX + global_id; + } else { + return remote_status_global_key(global_id, mirror_uuid); + } +} + +std::string instance_key(const string &instance_id) { + return INSTANCE_KEY_PREFIX + instance_id; +} + +std::string mirror_image_map_key(const string& global_image_id) { + return MIRROR_IMAGE_MAP_KEY_PREFIX + global_image_id; +} + +int uuid_get(cls_method_context_t hctx, std::string *mirror_uuid) { + bufferlist mirror_uuid_bl; + int r = cls_cxx_map_get_val(hctx, mirror::UUID, &mirror_uuid_bl); + if (r < 0) { + if (r != -ENOENT) { + CLS_ERR("error reading mirror uuid: %s", cpp_strerror(r).c_str()); + } + return r; + } + + *mirror_uuid = std::string(mirror_uuid_bl.c_str(), mirror_uuid_bl.length()); + return 0; +} + +int list_watchers(cls_method_context_t hctx, + std::set<entity_inst_t> *entities) { + obj_list_watch_response_t watchers; + int r = cls_cxx_list_watchers(hctx, &watchers); + if (r < 0 && r != -ENOENT) { + CLS_ERR("error listing watchers: '%s'", cpp_strerror(r).c_str()); + return r; + } + + entities->clear(); + for (auto &w : watchers.entries) { + entity_inst_t entity_inst{w.name, w.addr}; + cls::rbd::sanitize_entity_inst(&entity_inst); + + entities->insert(entity_inst); + } + return 0; +} + +int read_peers(cls_method_context_t hctx, + std::vector<cls::rbd::MirrorPeer> *peers) { + std::string last_read = PEER_KEY_PREFIX; + int max_read = RBD_MAX_KEYS_READ; + bool more = true; + while (more) { + std::map<std::string, bufferlist> vals; + int r = cls_cxx_map_get_vals(hctx, last_read, PEER_KEY_PREFIX.c_str(), + max_read, &vals, &more); + if (r < 0) { + if (r != -ENOENT) { + CLS_ERR("error reading peers: %s", cpp_strerror(r).c_str()); + } + return r; + } + + for (auto &it : vals) { + try { + auto bl_it = it.second.cbegin(); + cls::rbd::MirrorPeer peer; + decode(peer, bl_it); + peers->push_back(peer); + } catch (const ceph::buffer::error &err) { + CLS_ERR("could not decode peer '%s'", it.first.c_str()); + return -EIO; + } + } + + if (!vals.empty()) { + last_read = vals.rbegin()->first; + } + } + return 0; +} + +int read_peer(cls_method_context_t hctx, const std::string &id, + cls::rbd::MirrorPeer *peer) { + bufferlist bl; + int r = cls_cxx_map_get_val(hctx, peer_key(id), &bl); + if (r < 0) { + CLS_ERR("error reading peer '%s': %s", id.c_str(), + cpp_strerror(r).c_str()); + return r; + } + + try { + auto bl_it = bl.cbegin(); + decode(*peer, bl_it); + } catch (const ceph::buffer::error &err) { + CLS_ERR("could not decode peer '%s'", id.c_str()); + return -EIO; + } + return 0; +} + +int write_peer(cls_method_context_t hctx, const cls::rbd::MirrorPeer &peer) { + bufferlist bl; + encode(peer, bl); + + int r = cls_cxx_map_set_val(hctx, peer_key(peer.uuid), &bl); + if (r < 0) { + CLS_ERR("error writing peer '%s': %s", peer.uuid.c_str(), + cpp_strerror(r).c_str()); + return r; + } + return 0; +} + +int check_mirroring_enabled(cls_method_context_t hctx) { + uint32_t mirror_mode_decode; + int r = read_key(hctx, mirror::MODE, &mirror_mode_decode); + if (r < 0 && r != -ENOENT) { + return r; + } else if (r == -ENOENT || + mirror_mode_decode == cls::rbd::MIRROR_MODE_DISABLED) { + CLS_ERR("mirroring must be enabled on the pool"); + return -EINVAL; + } + + return 0; +} + +int peer_ping(cls_method_context_t hctx, const std::string& site_name, + const std::string& mirror_uuid) { + int r = check_mirroring_enabled(hctx); + if (r < 0) { + return r; + } + + if (site_name.empty() || mirror_uuid.empty()) { + return -EINVAL; + } + + std::vector<cls::rbd::MirrorPeer> peers; + r = read_peers(hctx, &peers); + if (r < 0 && r != -ENOENT) { + return r; + } + + + cls::rbd::MirrorPeer mirror_peer; + auto site_it = std::find_if(peers.begin(), peers.end(), + [&site_name](auto& peer) { + return (peer.site_name == site_name); + }); + + auto mirror_uuid_it = peers.end(); + if (site_it == peers.end() || + (!site_it->mirror_uuid.empty() && site_it->mirror_uuid != mirror_uuid)) { + // search for existing peer w/ same mirror_uuid + mirror_uuid_it = std::find_if(peers.begin(), peers.end(), + [&mirror_uuid](auto& peer) { + return (peer.mirror_uuid == mirror_uuid); + }); + } + + auto it = peers.end(); + if (site_it != peers.end() && mirror_uuid_it != peers.end()) { + // implies two peers -- match by mirror_uuid but don't update site name + it = mirror_uuid_it; + } else if (mirror_uuid_it != peers.end()) { + // implies site name has been updated in remote + mirror_uuid_it->site_name = site_name; + it = mirror_uuid_it; + } else if (site_it != peers.end()) { + // implies empty mirror_uuid in peer + site_it->mirror_uuid = mirror_uuid; + it = site_it; + } else { + CLS_LOG(10, "auto-generating new TX-only peer: %s", site_name.c_str()); + + uuid_d uuid_gen; + while (true) { + uuid_gen.generate_random(); + mirror_peer.uuid = uuid_gen.to_string(); + + bufferlist bl; + r = cls_cxx_map_get_val(hctx, peer_key(mirror_peer.uuid), &bl); + if (r == -ENOENT) { + break; + } else if (r < 0) { + CLS_ERR("failed to retrieve mirror peer: %s", cpp_strerror(r).c_str()); + return r; + } + } + + mirror_peer.mirror_peer_direction = cls::rbd::MIRROR_PEER_DIRECTION_TX; + mirror_peer.site_name = site_name; + mirror_peer.mirror_uuid = mirror_uuid; + } + + if (it != peers.end()) { + mirror_peer = *it; + + if (mirror_peer.mirror_peer_direction == + cls::rbd::MIRROR_PEER_DIRECTION_RX) { + CLS_LOG(10, "switching to RX/TX peer: %s", site_name.c_str()); + mirror_peer.mirror_peer_direction = cls::rbd::MIRROR_PEER_DIRECTION_RX_TX; + } + } + + mirror_peer.last_seen = ceph_clock_now(); + + if (!mirror_peer.is_valid()) { + CLS_ERR("attempting to update invalid peer: %s", site_name.c_str()); + return -EINVAL; + } + + r = write_peer(hctx, mirror_peer); + if (r < 0) { + return r; + } + + return 0; +} + +int peer_add(cls_method_context_t hctx, cls::rbd::MirrorPeer mirror_peer) { + int r = check_mirroring_enabled(hctx); + if (r < 0) { + return r; + } + + if (!mirror_peer.is_valid()) { + CLS_ERR("mirror peer is not valid"); + return -EINVAL; + } + + std::string mirror_uuid; + r = uuid_get(hctx, &mirror_uuid); + if (r < 0) { + CLS_ERR("error retrieving mirroring uuid: %s", cpp_strerror(r).c_str()); + return r; + } else if (mirror_peer.uuid == mirror_uuid) { + CLS_ERR("peer uuid '%s' matches pool mirroring uuid", + mirror_uuid.c_str()); + return -EINVAL; + } else if (mirror_peer.mirror_peer_direction == + cls::rbd::MIRROR_PEER_DIRECTION_TX) { + CLS_ERR("peer uuid '%s' cannot use TX-only direction", + mirror_peer.uuid.c_str()); + return -EINVAL; + } + + std::vector<cls::rbd::MirrorPeer> peers; + r = read_peers(hctx, &peers); + if (r < 0 && r != -ENOENT) { + return r; + } + + for (auto const &peer : peers) { + if (peer.uuid == mirror_peer.uuid) { + CLS_ERR("peer uuid '%s' already exists", + peer.uuid.c_str()); + return -ESTALE; + } else if (peer.site_name == mirror_peer.site_name) { + CLS_ERR("peer site name '%s' already exists", + peer.site_name.c_str()); + return -EEXIST; + } else if (!mirror_peer.mirror_uuid.empty() && + peer.mirror_uuid == mirror_peer.mirror_uuid) { + CLS_ERR("peer mirror uuid '%s' already exists", + peer.mirror_uuid.c_str()); + return -EEXIST; + } + } + + r = write_peer(hctx, mirror_peer); + if (r < 0) { + return r; + } + return 0; +} + +int peer_remove(cls_method_context_t hctx, const std::string& uuid) { + int r = cls_cxx_map_remove_key(hctx, peer_key(uuid)); + if (r < 0 && r != -ENOENT) { + CLS_ERR("error removing peer: %s", cpp_strerror(r).c_str()); + return r; + } + return 0; +} + +int image_get(cls_method_context_t hctx, const string &image_id, + cls::rbd::MirrorImage *mirror_image) { + bufferlist bl; + int r = cls_cxx_map_get_val(hctx, image_key(image_id), &bl); + if (r < 0) { + if (r != -ENOENT) { + CLS_ERR("error reading mirrored image '%s': '%s'", image_id.c_str(), + cpp_strerror(r).c_str()); + } + return r; + } + + try { + auto it = bl.cbegin(); + decode(*mirror_image, it); + } catch (const ceph::buffer::error &err) { + CLS_ERR("could not decode mirrored image '%s'", image_id.c_str()); + return -EIO; + } + + return 0; +} + +int image_set(cls_method_context_t hctx, const string &image_id, + const cls::rbd::MirrorImage &mirror_image) { + bufferlist bl; + encode(mirror_image, bl); + + cls::rbd::MirrorImage existing_mirror_image; + int r = image_get(hctx, image_id, &existing_mirror_image); + if (r == -ENOENT) { + // make sure global id doesn't already exist + std::string global_id_key = global_key(mirror_image.global_image_id); + std::string image_id; + r = read_key(hctx, global_id_key, &image_id); + if (r >= 0) { + return -EEXIST; + } else if (r != -ENOENT) { + CLS_ERR("error reading global image id: '%s': '%s'", image_id.c_str(), + cpp_strerror(r).c_str()); + return r; + } + + // make sure this was not a race for disabling + if (mirror_image.state == cls::rbd::MIRROR_IMAGE_STATE_DISABLING) { + CLS_ERR("image '%s' is already disabled", image_id.c_str()); + return r; + } + } else if (r < 0) { + CLS_ERR("error reading mirrored image '%s': '%s'", image_id.c_str(), + cpp_strerror(r).c_str()); + return r; + } else if (existing_mirror_image.global_image_id != + mirror_image.global_image_id) { + // cannot change the global id + return -EINVAL; + } + + r = cls_cxx_map_set_val(hctx, image_key(image_id), &bl); + if (r < 0) { + CLS_ERR("error adding mirrored image '%s': %s", image_id.c_str(), + cpp_strerror(r).c_str()); + return r; + } + + bufferlist image_id_bl; + encode(image_id, image_id_bl); + r = cls_cxx_map_set_val(hctx, global_key(mirror_image.global_image_id), + &image_id_bl); + if (r < 0) { + CLS_ERR("error adding global id for image '%s': %s", image_id.c_str(), + cpp_strerror(r).c_str()); + return r; + } + return 0; +} + +int image_status_remove(cls_method_context_t hctx, + const string &global_image_id); + +int image_remove(cls_method_context_t hctx, const string &image_id) { + bufferlist bl; + cls::rbd::MirrorImage mirror_image; + int r = image_get(hctx, image_id, &mirror_image); + if (r < 0) { + if (r != -ENOENT) { + CLS_ERR("error reading mirrored image '%s': '%s'", image_id.c_str(), + cpp_strerror(r).c_str()); + } + return r; + } + + if (mirror_image.state != cls::rbd::MIRROR_IMAGE_STATE_DISABLING) { + return -EBUSY; + } + + r = cls_cxx_map_remove_key(hctx, image_key(image_id)); + if (r < 0) { + CLS_ERR("error removing mirrored image '%s': %s", image_id.c_str(), + cpp_strerror(r).c_str()); + return r; + } + + r = cls_cxx_map_remove_key(hctx, global_key(mirror_image.global_image_id)); + if (r < 0 && r != -ENOENT) { + CLS_ERR("error removing global id for image '%s': %s", image_id.c_str(), + cpp_strerror(r).c_str()); + return r; + } + + r = image_status_remove(hctx, mirror_image.global_image_id); + if (r < 0) { + return r; + } + + return 0; +} + +int image_status_set(cls_method_context_t hctx, const string &global_image_id, + const cls::rbd::MirrorImageSiteStatus &status) { + cls::rbd::MirrorImageSiteStatusOnDisk ondisk_status(status); + ondisk_status.mirror_uuid = ""; // mirror_uuid stored in key + ondisk_status.up = false; + ondisk_status.last_update = ceph_clock_now(); + + std::string global_id_key = global_key(global_image_id); + std::string image_id; + int r = read_key(hctx, global_id_key, &image_id); + if (r < 0) { + return 0; + } + cls::rbd::MirrorImage mirror_image; + r = image_get(hctx, image_id, &mirror_image); + if (r < 0) { + return 0; + } + if (mirror_image.state != cls::rbd::MIRROR_IMAGE_STATE_ENABLED) { + return 0; + } + + r = cls_get_request_origin(hctx, &ondisk_status.origin); + ceph_assert(r == 0); + + bufferlist bl; + encode(ondisk_status, bl, cls_get_features(hctx)); + + r = cls_cxx_map_set_val(hctx, status_global_key(global_image_id, + status.mirror_uuid), &bl); + if (r < 0) { + CLS_ERR("error setting status for mirrored image, global id '%s', " + "site '%s': %s", global_image_id.c_str(), + status.mirror_uuid.c_str(), + cpp_strerror(r).c_str()); + return r; + } + return 0; +} + +int get_remote_image_status_mirror_uuids(cls_method_context_t hctx, + const std::string& global_image_id, + std::set<std::string>* mirror_uuids) { + std::string filter = remote_status_global_key(global_image_id, ""); + std::string last_read = filter; + int max_read = 4; // we don't expect lots of peers + bool more = true; + + do { + std::set<std::string> keys; + int r = cls_cxx_map_get_keys(hctx, last_read, max_read, &keys, &more); + if (r < 0) { + return r; + } + + for (auto& key : keys) { + if (!boost::starts_with(key, filter)) { + more = false; + break; + } + + mirror_uuids->insert(key.substr(filter.length())); + } + + if (!keys.empty()) { + last_read = *keys.rbegin(); + } + } while (more); + + return 0; +} + +int image_status_remove(cls_method_context_t hctx, + const string &global_image_id) { + // remove all local/remote image statuses + std::set<std::string> mirror_uuids; + int r = get_remote_image_status_mirror_uuids(hctx, global_image_id, + &mirror_uuids); + if (r < 0 && r != -ENOENT) { + return r; + } + + mirror_uuids.insert(cls::rbd::MirrorImageSiteStatus::LOCAL_MIRROR_UUID); + for (auto& mirror_uuid : mirror_uuids) { + CLS_LOG(20, "removing status object for mirror_uuid %s", + mirror_uuid.c_str()); + auto key = status_global_key(global_image_id, mirror_uuid); + r = cls_cxx_map_remove_key(hctx, key); + if (r < 0 && r != -ENOENT) { + CLS_ERR("error removing stale status for key '%s': %s", + key.c_str(), cpp_strerror(r).c_str()); + return r; + } + } + + return 0; +} + +int image_status_get(cls_method_context_t hctx, const string &global_image_id, + const std::string& mirror_uuid, const bufferlist& bl, + const std::set<entity_inst_t> &watchers, + cls::rbd::MirrorImageStatus* status) { + cls::rbd::MirrorImageSiteStatusOnDisk ondisk_status; + try { + auto it = bl.cbegin(); + decode(ondisk_status, it); + } catch (const ceph::buffer::error &err) { + CLS_ERR("could not decode status for mirrored image, global id '%s', " + "site '%s'", + global_image_id.c_str(), mirror_uuid.c_str()); + return -EIO; + } + + auto site_status = static_cast<cls::rbd::MirrorImageSiteStatus>( + ondisk_status); + site_status.up = (watchers.find(ondisk_status.origin) != watchers.end()); + site_status.mirror_uuid = mirror_uuid; + status->mirror_image_site_statuses.push_back(site_status); + return 0; +} + +int image_status_get_local(cls_method_context_t hctx, + const string &global_image_id, + const std::set<entity_inst_t> &watchers, + cls::rbd::MirrorImageStatus *status) { + bufferlist bl; + int r = cls_cxx_map_get_val( + hctx, status_global_key(global_image_id, + cls::rbd::MirrorImageSiteStatus::LOCAL_MIRROR_UUID), + &bl); + if (r == -ENOENT) { + return 0; + } else if (r < 0) { + CLS_ERR("error reading status for mirrored image, global id '%s', " + "site '%s': '%s'", + global_image_id.c_str(), + cls::rbd::MirrorImageSiteStatus::LOCAL_MIRROR_UUID.c_str(), + cpp_strerror(r).c_str()); + return r; + } + + return image_status_get(hctx, global_image_id, + cls::rbd::MirrorImageSiteStatus::LOCAL_MIRROR_UUID, + bl, watchers, status); +} + +int image_status_get_remote(cls_method_context_t hctx, + const string &global_image_id, + const std::set<entity_inst_t> &watchers, + cls::rbd::MirrorImageStatus *status) { + std::string filter = remote_status_global_key(global_image_id, ""); + std::string last_read = filter; + int max_read = RBD_MAX_KEYS_READ; + bool more = true; + + do { + std::map<std::string, bufferlist> vals; + CLS_LOG(20, "last_read = '%s'", last_read.c_str()); + int r = cls_cxx_map_get_vals(hctx, last_read, filter, max_read, &vals, + &more); + if (r == -ENOENT) { + return 0; + } else if (r < 0) { + return r; + } + + for (auto& it : vals) { + auto mirror_uuid = it.first.substr(filter.length()); + CLS_LOG(20, "mirror_uuid = '%s'", mirror_uuid.c_str()); + r = image_status_get(hctx, global_image_id, mirror_uuid, it.second, + watchers, status); + if (r < 0) { + return r; + } + } + + if (!vals.empty()) { + last_read = vals.rbegin()->first; + } + } while (more); + + return 0; +} + +int image_status_get(cls_method_context_t hctx, const string &global_image_id, + const std::set<entity_inst_t> &watchers, + cls::rbd::MirrorImageStatus *status) { + status->mirror_image_site_statuses.clear(); + + // collect local site status + int r = image_status_get_local(hctx, global_image_id, watchers, status); + if (r < 0) { + return r; + } + + // collect remote site status (TX to peer) + r = image_status_get_remote(hctx, global_image_id, watchers, status); + if (r < 0) { + return r; + } + + if (status->mirror_image_site_statuses.empty()) { + return -ENOENT; + } + + return 0; +} + +int image_status_list(cls_method_context_t hctx, + const std::string &start_after, uint64_t max_return, + map<std::string, cls::rbd::MirrorImage> *mirror_images, + map<std::string, cls::rbd::MirrorImageStatus> *mirror_statuses) { + std::string last_read = image_key(start_after); + int max_read = RBD_MAX_KEYS_READ; + bool more = true; + + std::set<entity_inst_t> watchers; + int r = list_watchers(hctx, &watchers); + if (r < 0) { + return r; + } + + while (more && mirror_images->size() < max_return) { + std::map<std::string, bufferlist> vals; + CLS_LOG(20, "last_read = '%s'", last_read.c_str()); + r = cls_cxx_map_get_vals(hctx, last_read, IMAGE_KEY_PREFIX, max_read, &vals, + &more); + if (r < 0) { + if (r != -ENOENT) { + CLS_ERR("error reading mirror image directory by name: %s", + cpp_strerror(r).c_str()); + } + return r; + } + + for (auto it = vals.begin(); it != vals.end() && + mirror_images->size() < max_return; ++it) { + const std::string &image_id = it->first.substr(IMAGE_KEY_PREFIX.size()); + cls::rbd::MirrorImage mirror_image; + auto iter = it->second.cbegin(); + try { + decode(mirror_image, iter); + } catch (const ceph::buffer::error &err) { + CLS_ERR("could not decode mirror image payload of image '%s'", + image_id.c_str()); + return -EIO; + } + + (*mirror_images)[image_id] = mirror_image; + + cls::rbd::MirrorImageStatus status; + int r1 = image_status_get(hctx, mirror_image.global_image_id, watchers, + &status); + if (r1 < 0) { + continue; + } + + (*mirror_statuses)[image_id] = status; + } + if (!vals.empty()) { + last_read = image_key(mirror_images->rbegin()->first); + } + } + + return 0; +} + +cls::rbd::MirrorImageStatusState compute_image_status_summary_state( + cls::rbd::MirrorPeerDirection mirror_peer_direction, + const std::set<std::string>& tx_peer_mirror_uuids, + const cls::rbd::MirrorImageStatus& status) { + std::optional<cls::rbd::MirrorImageStatusState> state = {}; + + cls::rbd::MirrorImageSiteStatus local_status; + status.get_local_mirror_image_site_status(&local_status); + + uint64_t unmatched_tx_peers = 0; + switch (mirror_peer_direction) { + case cls::rbd::MIRROR_PEER_DIRECTION_RX: + // if we are RX-only, summary is based on our local status + if (local_status.up) { + state = local_status.state; + } + break; + case cls::rbd::MIRROR_PEER_DIRECTION_RX_TX: + // if we are RX/TX, combine all statuses + if (local_status.up) { + state = local_status.state; + } + [[fallthrough]]; + case cls::rbd::MIRROR_PEER_DIRECTION_TX: + // if we are TX-only, summary is based on remote status + unmatched_tx_peers = tx_peer_mirror_uuids.size(); + for (auto& remote_status : status.mirror_image_site_statuses) { + if (remote_status.mirror_uuid == + cls::rbd::MirrorImageSiteStatus::LOCAL_MIRROR_UUID) { + continue; + } + + if (unmatched_tx_peers > 0 && + tx_peer_mirror_uuids.count(remote_status.mirror_uuid) > 0) { + --unmatched_tx_peers; + } + + auto remote_state = (remote_status.up ? + remote_status.state : cls::rbd::MIRROR_IMAGE_STATUS_STATE_UNKNOWN); + if (remote_status.state == cls::rbd::MIRROR_IMAGE_STATUS_STATE_ERROR) { + state = remote_status.state; + } else if (!state) { + state = remote_state; + } else if (*state != cls::rbd::MIRROR_IMAGE_STATUS_STATE_ERROR) { + state = std::min(*state, remote_state); + } + } + break; + default: + break; + } + + if (!state || unmatched_tx_peers > 0) { + state = cls::rbd::MIRROR_IMAGE_STATUS_STATE_UNKNOWN; + } + return *state; +} + +int image_status_get_summary( + cls_method_context_t hctx, + cls::rbd::MirrorPeerDirection mirror_peer_direction, + const std::set<std::string>& tx_peer_mirror_uuids, + std::map<cls::rbd::MirrorImageStatusState, int32_t> *states) { + std::set<entity_inst_t> watchers; + int r = list_watchers(hctx, &watchers); + if (r < 0) { + return r; + } + + states->clear(); + + string last_read = IMAGE_KEY_PREFIX; + int max_read = RBD_MAX_KEYS_READ; + bool more = true; + while (more) { + map<string, bufferlist> vals; + r = cls_cxx_map_get_vals(hctx, last_read, IMAGE_KEY_PREFIX, + max_read, &vals, &more); + if (r < 0) { + if (r != -ENOENT) { + CLS_ERR("error reading mirrored images: %s", cpp_strerror(r).c_str()); + } + return r; + } + + for (auto &list_it : vals) { + const string &key = list_it.first; + + if (0 != key.compare(0, IMAGE_KEY_PREFIX.size(), IMAGE_KEY_PREFIX)) { + break; + } + + cls::rbd::MirrorImage mirror_image; + auto iter = list_it.second.cbegin(); + try { + decode(mirror_image, iter); + } catch (const ceph::buffer::error &err) { + CLS_ERR("could not decode mirror image payload for key '%s'", + key.c_str()); + return -EIO; + } + + cls::rbd::MirrorImageStatus status; + r = image_status_get(hctx, mirror_image.global_image_id, watchers, + &status); + if (r < 0 && r != -ENOENT) { + return r; + } + + auto state = compute_image_status_summary_state( + mirror_peer_direction, tx_peer_mirror_uuids, status); + (*states)[state]++; + } + + if (!vals.empty()) { + last_read = vals.rbegin()->first; + } + } + + return 0; +} + +int image_status_remove_down(cls_method_context_t hctx) { + std::set<entity_inst_t> watchers; + int r = list_watchers(hctx, &watchers); + if (r < 0) { + return r; + } + + std::vector<std::string> prefixes = { + STATUS_GLOBAL_KEY_PREFIX, REMOTE_STATUS_GLOBAL_KEY_PREFIX}; + for (auto& prefix : prefixes) { + std::string last_read = prefix; + int max_read = RBD_MAX_KEYS_READ; + bool more = true; + while (more) { + std::map<std::string, bufferlist> vals; + r = cls_cxx_map_get_vals(hctx, last_read, prefix, max_read, &vals, &more); + if (r < 0) { + if (r != -ENOENT) { + CLS_ERR("error reading mirrored images: %s", cpp_strerror(r).c_str()); + } + return r; + } + + for (auto &list_it : vals) { + const std::string &key = list_it.first; + + if (0 != key.compare(0, prefix.size(), prefix)) { + break; + } + + cls::rbd::MirrorImageSiteStatusOnDisk status; + try { + auto it = list_it.second.cbegin(); + status.decode_meta(it); + } catch (const ceph::buffer::error &err) { + CLS_ERR("could not decode status metadata for mirrored image '%s'", + key.c_str()); + return -EIO; + } + + if (watchers.find(status.origin) == watchers.end()) { + CLS_LOG(20, "removing stale status object for key %s", + key.c_str()); + int r1 = cls_cxx_map_remove_key(hctx, key); + if (r1 < 0) { + CLS_ERR("error removing stale status for key '%s': %s", + key.c_str(), cpp_strerror(r1).c_str()); + return r1; + } + } + } + + if (!vals.empty()) { + last_read = vals.rbegin()->first; + } + } + } + + return 0; +} + +int image_instance_get(cls_method_context_t hctx, + const string &global_image_id, + const std::set<entity_inst_t> &watchers, + entity_inst_t *instance) { + // instance details only available for local site + bufferlist bl; + int r = cls_cxx_map_get_val( + hctx, status_global_key(global_image_id, + cls::rbd::MirrorImageSiteStatus::LOCAL_MIRROR_UUID), + &bl); + if (r < 0) { + if (r != -ENOENT) { + CLS_ERR("error reading status for mirrored image, global id '%s': '%s'", + global_image_id.c_str(), cpp_strerror(r).c_str()); + } + return r; + } + + cls::rbd::MirrorImageSiteStatusOnDisk ondisk_status; + try { + auto it = bl.cbegin(); + decode(ondisk_status, it); + } catch (const ceph::buffer::error &err) { + CLS_ERR("could not decode status for mirrored image, global id '%s'", + global_image_id.c_str()); + return -EIO; + } + + if (watchers.find(ondisk_status.origin) == watchers.end()) { + return -ESTALE; + } + + *instance = ondisk_status.origin; + return 0; +} + +int image_instance_list(cls_method_context_t hctx, + const std::string &start_after, + uint64_t max_return, + map<std::string, entity_inst_t> *instances) { + std::string last_read = image_key(start_after); + int max_read = RBD_MAX_KEYS_READ; + bool more = true; + + std::set<entity_inst_t> watchers; + int r = list_watchers(hctx, &watchers); + if (r < 0) { + return r; + } + + while (more && instances->size() < max_return) { + std::map<std::string, bufferlist> vals; + CLS_LOG(20, "last_read = '%s'", last_read.c_str()); + r = cls_cxx_map_get_vals(hctx, last_read, IMAGE_KEY_PREFIX, max_read, &vals, + &more); + if (r < 0) { + if (r != -ENOENT) { + CLS_ERR("error reading mirror image directory by name: %s", + cpp_strerror(r).c_str()); + } + return r; + } + + for (auto it = vals.begin(); it != vals.end() && + instances->size() < max_return; ++it) { + const std::string &image_id = it->first.substr(IMAGE_KEY_PREFIX.size()); + cls::rbd::MirrorImage mirror_image; + auto iter = it->second.cbegin(); + try { + decode(mirror_image, iter); + } catch (const ceph::buffer::error &err) { + CLS_ERR("could not decode mirror image payload of image '%s'", + image_id.c_str()); + return -EIO; + } + + entity_inst_t instance; + r = image_instance_get(hctx, mirror_image.global_image_id, watchers, + &instance); + if (r < 0) { + continue; + } + + (*instances)[image_id] = instance; + } + if (!vals.empty()) { + last_read = vals.rbegin()->first; + } + } + + return 0; +} + +int instances_list(cls_method_context_t hctx, + std::vector<std::string> *instance_ids) { + std::string last_read = INSTANCE_KEY_PREFIX; + int max_read = RBD_MAX_KEYS_READ; + bool more = true; + while (more) { + std::map<std::string, bufferlist> vals; + int r = cls_cxx_map_get_vals(hctx, last_read, INSTANCE_KEY_PREFIX.c_str(), + max_read, &vals, &more); + if (r < 0) { + if (r != -ENOENT) { + CLS_ERR("error reading mirror instances: %s", cpp_strerror(r).c_str()); + } + return r; + } + + for (auto &it : vals) { + instance_ids->push_back(it.first.substr(INSTANCE_KEY_PREFIX.size())); + } + + if (!vals.empty()) { + last_read = vals.rbegin()->first; + } + } + return 0; +} + +int instances_add(cls_method_context_t hctx, const string &instance_id) { + bufferlist bl; + + int r = cls_cxx_map_set_val(hctx, instance_key(instance_id), &bl); + if (r < 0) { + CLS_ERR("error setting mirror instance %s: %s", instance_id.c_str(), + cpp_strerror(r).c_str()); + return r; + } + return 0; +} + +int instances_remove(cls_method_context_t hctx, const string &instance_id) { + + int r = cls_cxx_map_remove_key(hctx, instance_key(instance_id)); + if (r < 0) { + CLS_ERR("error removing mirror instance %s: %s", instance_id.c_str(), + cpp_strerror(r).c_str()); + return r; + } + return 0; +} + +int mirror_image_map_list(cls_method_context_t hctx, + const std::string &start_after, + uint64_t max_return, + std::map<std::string, cls::rbd::MirrorImageMap> *image_mapping) { + bool more = true; + std::string last_read = mirror_image_map_key(start_after); + + while (more && image_mapping->size() < max_return) { + std::map<std::string, bufferlist> vals; + CLS_LOG(20, "last read: '%s'", last_read.c_str()); + + int max_read = std::min<uint64_t>(RBD_MAX_KEYS_READ, max_return - image_mapping->size()); + int r = cls_cxx_map_get_vals(hctx, last_read, MIRROR_IMAGE_MAP_KEY_PREFIX, + max_read, &vals, &more); + if (r < 0) { + CLS_ERR("error reading image map: %s", cpp_strerror(r).c_str()); + return r; + } + + if (vals.empty()) { + return 0; + } + + for (auto it = vals.begin(); it != vals.end(); ++it) { + const std::string &global_image_id = + it->first.substr(MIRROR_IMAGE_MAP_KEY_PREFIX.size()); + + cls::rbd::MirrorImageMap mirror_image_map; + auto iter = it->second.cbegin(); + try { + decode(mirror_image_map, iter); + } catch (const ceph::buffer::error &err) { + CLS_ERR("could not decode image map payload: %s", + cpp_strerror(r).c_str()); + return -EINVAL; + } + + image_mapping->insert(std::make_pair(global_image_id, mirror_image_map)); + } + + if (!vals.empty()) { + last_read = vals.rbegin()->first; + } + } + + return 0; +} + +int image_snapshot_unlink_peer(cls_method_context_t hctx, + uint64_t snap_id, + std::string mirror_peer_uuid) { + cls_rbd_snap snap; + std::string snap_key; + key_from_snap_id(snap_id, &snap_key); + int r = read_key(hctx, snap_key, &snap); + if (r < 0) { + if (r != -ENOENT) { + CLS_ERR("Could not read snapshot meta off disk: %s", + cpp_strerror(r).c_str()); + } + return r; + } + + auto mirror_ns = std::get_if<cls::rbd::MirrorSnapshotNamespace>( + &snap.snapshot_namespace); + if (mirror_ns == nullptr) { + CLS_LOG(5, "mirror_image_snapshot_unlink_peer " \ + "not mirroring snapshot snap_id=%" PRIu64, snap_id); + return -EINVAL; + } + + if (mirror_ns->mirror_peer_uuids.count(mirror_peer_uuid) == 0) { + return -ENOENT; + } + + mirror_ns->mirror_peer_uuids.erase(mirror_peer_uuid); + + r = image::snapshot::write(hctx, snap_key, std::move(snap)); + if (r < 0) { + return r; + } + + return 0; +} + +int image_snapshot_set_copy_progress(cls_method_context_t hctx, + uint64_t snap_id, bool complete, + uint64_t last_copied_object_number) { + cls_rbd_snap snap; + std::string snap_key; + key_from_snap_id(snap_id, &snap_key); + int r = read_key(hctx, snap_key, &snap); + if (r < 0) { + if (r != -ENOENT) { + CLS_ERR("Could not read snapshot meta off disk: %s", + cpp_strerror(r).c_str()); + } + return r; + } + + auto mirror_ns = std::get_if<cls::rbd::MirrorSnapshotNamespace>( + &snap.snapshot_namespace); + if (mirror_ns == nullptr) { + CLS_LOG(5, "mirror_image_snapshot_set_copy_progress " \ + "not mirroring snapshot snap_id=%" PRIu64, snap_id); + return -EINVAL; + } + + mirror_ns->complete = complete; + if (mirror_ns->state == cls::rbd::MIRROR_SNAPSHOT_STATE_NON_PRIMARY || + mirror_ns->state == cls::rbd::MIRROR_SNAPSHOT_STATE_NON_PRIMARY_DEMOTED) { + mirror_ns->last_copied_object_number = last_copied_object_number; + } + + r = image::snapshot::write(hctx, snap_key, std::move(snap)); + if (r < 0) { + return r; + } + + return 0; +} + +} // namespace mirror + +/** + * Input: + * none + * + * Output: + * @param uuid (std::string) + * @returns 0 on success, negative error code on failure + */ +int mirror_uuid_get(cls_method_context_t hctx, bufferlist *in, + bufferlist *out) { + std::string mirror_uuid; + int r = mirror::uuid_get(hctx, &mirror_uuid); + if (r < 0) { + return r; + } + + encode(mirror_uuid, *out); + return 0; +} + +/** + * Input: + * @param mirror_uuid (std::string) + * + * Output: + * @returns 0 on success, negative error code on failure + */ +int mirror_uuid_set(cls_method_context_t hctx, bufferlist *in, + bufferlist *out) { + std::string mirror_uuid; + try { + auto bl_it = in->cbegin(); + decode(mirror_uuid, bl_it); + } catch (const ceph::buffer::error &err) { + return -EINVAL; + } + + if (mirror_uuid.empty()) { + CLS_ERR("cannot set empty mirror uuid"); + return -EINVAL; + } + + uint32_t mirror_mode; + int r = read_key(hctx, mirror::MODE, &mirror_mode); + if (r < 0 && r != -ENOENT) { + return r; + } else if (r == 0 && mirror_mode != cls::rbd::MIRROR_MODE_DISABLED) { + CLS_ERR("cannot set mirror uuid while mirroring enabled"); + return -EINVAL; + } + + bufferlist mirror_uuid_bl; + mirror_uuid_bl.append(mirror_uuid); + r = cls_cxx_map_set_val(hctx, mirror::UUID, &mirror_uuid_bl); + if (r < 0) { + CLS_ERR("failed to set mirror uuid"); + return r; + } + return 0; +} + +/** + * Input: + * none + * + * Output: + * @param cls::rbd::MirrorMode (uint32_t) + * @returns 0 on success, negative error code on failure + */ +int mirror_mode_get(cls_method_context_t hctx, bufferlist *in, + bufferlist *out) { + uint32_t mirror_mode_decode; + int r = read_key(hctx, mirror::MODE, &mirror_mode_decode); + if (r < 0) { + return r; + } + + encode(mirror_mode_decode, *out); + return 0; +} + +/** + * Input: + * @param mirror_mode (cls::rbd::MirrorMode) (uint32_t) + * + * Output: + * @returns 0 on success, negative error code on failure + */ +int mirror_mode_set(cls_method_context_t hctx, bufferlist *in, + bufferlist *out) { + uint32_t mirror_mode_decode; + try { + auto bl_it = in->cbegin(); + decode(mirror_mode_decode, bl_it); + } catch (const ceph::buffer::error &err) { + return -EINVAL; + } + + bool enabled; + switch (static_cast<cls::rbd::MirrorMode>(mirror_mode_decode)) { + case cls::rbd::MIRROR_MODE_DISABLED: + enabled = false; + break; + case cls::rbd::MIRROR_MODE_IMAGE: + case cls::rbd::MIRROR_MODE_POOL: + enabled = true; + break; + default: + CLS_ERR("invalid mirror mode: %d", mirror_mode_decode); + return -EINVAL; + } + + int r; + if (enabled) { + std::string mirror_uuid; + r = mirror::uuid_get(hctx, &mirror_uuid); + if (r == -ENOENT) { + return -EINVAL; + } else if (r < 0) { + return r; + } + + bufferlist bl; + encode(mirror_mode_decode, bl); + + r = cls_cxx_map_set_val(hctx, mirror::MODE, &bl); + if (r < 0) { + CLS_ERR("error enabling mirroring: %s", cpp_strerror(r).c_str()); + return r; + } + } else { + std::vector<cls::rbd::MirrorPeer> peers; + r = mirror::read_peers(hctx, &peers); + if (r < 0 && r != -ENOENT) { + return r; + } + + if (!peers.empty()) { + CLS_ERR("mirroring peers still registered"); + return -EBUSY; + } + + r = remove_key(hctx, mirror::MODE); + if (r < 0) { + return r; + } + + r = remove_key(hctx, mirror::UUID); + if (r < 0) { + return r; + } + } + return 0; +} + +/** + * Input: + * @param unique peer site name (std::string) + * @param mirror_uuid (std::string) + * @param direction (MirrorPeerDirection) -- future use + * + * Output: + * @returns 0 on success, negative error code on failure + */ +int mirror_peer_ping(cls_method_context_t hctx, bufferlist *in, + bufferlist *out) { + std::string site_name; + std::string mirror_uuid; + cls::rbd::MirrorPeerDirection mirror_peer_direction; + try { + auto it = in->cbegin(); + decode(site_name, it); + decode(mirror_uuid, it); + + uint8_t direction; + decode(direction, it); + mirror_peer_direction = static_cast<cls::rbd::MirrorPeerDirection>( + direction); + } catch (const ceph::buffer::error &err) { + return -EINVAL; + } + + if (mirror_peer_direction != cls::rbd::MIRROR_PEER_DIRECTION_TX) { + return -EINVAL; + } + + int r = mirror::peer_ping(hctx, site_name, mirror_uuid); + if (r < 0) { + return r; + } + + return 0; +} + +/** + * Input: + * none + * + * Output: + * @param std::vector<cls::rbd::MirrorPeer>: collection of peers + * @returns 0 on success, negative error code on failure + */ +int mirror_peer_list(cls_method_context_t hctx, bufferlist *in, + bufferlist *out) { + std::vector<cls::rbd::MirrorPeer> peers; + int r = mirror::read_peers(hctx, &peers); + if (r < 0 && r != -ENOENT) { + return r; + } + + encode(peers, *out); + return 0; +} + +/** + * Input: + * @param mirror_peer (cls::rbd::MirrorPeer) + * + * Output: + * @returns 0 on success, negative error code on failure + */ +int mirror_peer_add(cls_method_context_t hctx, bufferlist *in, + bufferlist *out) { + cls::rbd::MirrorPeer mirror_peer; + try { + auto it = in->cbegin(); + decode(mirror_peer, it); + } catch (const ceph::buffer::error &err) { + return -EINVAL; + } + + int r = mirror::peer_add(hctx, mirror_peer); + if (r < 0) { + return r; + } + + return 0; +} + +/** + * Input: + * @param uuid (std::string) + * + * Output: + * @returns 0 on success, negative error code on failure + */ +int mirror_peer_remove(cls_method_context_t hctx, bufferlist *in, + bufferlist *out) { + std::string uuid; + try { + auto it = in->cbegin(); + decode(uuid, it); + } catch (const ceph::buffer::error &err) { + return -EINVAL; + } + + int r = mirror::peer_remove(hctx, uuid); + if (r < 0) { + return r; + } + return 0; +} + +/** + * Input: + * @param uuid (std::string) + * @param client_name (std::string) + * + * Output: + * @returns 0 on success, negative error code on failure + */ +int mirror_peer_set_client(cls_method_context_t hctx, bufferlist *in, + bufferlist *out) { + std::string uuid; + std::string client_name; + try { + auto it = in->cbegin(); + decode(uuid, it); + decode(client_name, it); + } catch (const ceph::buffer::error &err) { + return -EINVAL; + } + + cls::rbd::MirrorPeer peer; + int r = mirror::read_peer(hctx, uuid, &peer); + if (r < 0) { + return r; + } + + peer.client_name = client_name; + r = mirror::write_peer(hctx, peer); + if (r < 0) { + return r; + } + return 0; +} + +/** + * Input: + * @param uuid (std::string) + * @param site_name (std::string) + * + * Output: + * @returns 0 on success, negative error code on failure + */ +int mirror_peer_set_cluster(cls_method_context_t hctx, bufferlist *in, + bufferlist *out) { + std::string uuid; + std::string site_name; + try { + auto it = in->cbegin(); + decode(uuid, it); + decode(site_name, it); + } catch (const ceph::buffer::error &err) { + return -EINVAL; + } + + cls::rbd::MirrorPeer* peer = nullptr; + std::vector<cls::rbd::MirrorPeer> peers; + int r = mirror::read_peers(hctx, &peers); + if (r < 0 && r != -ENOENT) { + return r; + } + + for (auto& p : peers) { + if (p.uuid == uuid) { + peer = &p; + } else if (p.site_name == site_name) { + return -EEXIST; + } + } + + if (peer == nullptr) { + return -ENOENT; + } + + peer->site_name = site_name; + r = mirror::write_peer(hctx, *peer); + if (r < 0) { + return r; + } + return 0; +} + +/** + * Input: + * @param uuid (std::string) + * @param direction (uint8_t) + * + * Output: + * @returns 0 on success, negative error code on failure + */ +int mirror_peer_set_direction(cls_method_context_t hctx, bufferlist *in, + bufferlist *out) { + std::string uuid; + cls::rbd::MirrorPeerDirection mirror_peer_direction; + try { + auto it = in->cbegin(); + decode(uuid, it); + uint8_t direction; + decode(direction, it); + mirror_peer_direction = static_cast<cls::rbd::MirrorPeerDirection>( + direction); + } catch (const ceph::buffer::error &err) { + return -EINVAL; + } + + cls::rbd::MirrorPeer peer; + int r = mirror::read_peer(hctx, uuid, &peer); + if (r < 0) { + return r; + } + + peer.mirror_peer_direction = mirror_peer_direction; + r = mirror::write_peer(hctx, peer); + if (r < 0) { + return r; + } + return 0; +} + +/** + * Input: + * @param start_after which name to begin listing after + * (use the empty string to start at the beginning) + * @param max_return the maximum number of names to list + * + * Output: + * @param std::map<std::string, std::string>: local id to global id map + * @returns 0 on success, negative error code on failure + */ +int mirror_image_list(cls_method_context_t hctx, bufferlist *in, + bufferlist *out) { + std::string start_after; + uint64_t max_return; + try { + auto iter = in->cbegin(); + decode(start_after, iter); + decode(max_return, iter); + } catch (const ceph::buffer::error &err) { + return -EINVAL; + } + + int max_read = RBD_MAX_KEYS_READ; + bool more = true; + std::map<std::string, std::string> mirror_images; + std::string last_read = mirror::image_key(start_after); + + while (more && mirror_images.size() < max_return) { + std::map<std::string, bufferlist> vals; + CLS_LOG(20, "last_read = '%s'", last_read.c_str()); + int r = cls_cxx_map_get_vals(hctx, last_read, mirror::IMAGE_KEY_PREFIX, + max_read, &vals, &more); + if (r < 0) { + if (r != -ENOENT) { + CLS_ERR("error reading mirror image directory by name: %s", + cpp_strerror(r).c_str()); + } + return r; + } + + for (auto it = vals.begin(); it != vals.end(); ++it) { + const std::string &image_id = + it->first.substr(mirror::IMAGE_KEY_PREFIX.size()); + cls::rbd::MirrorImage mirror_image; + auto iter = it->second.cbegin(); + try { + decode(mirror_image, iter); + } catch (const ceph::buffer::error &err) { + CLS_ERR("could not decode mirror image payload of image '%s'", + image_id.c_str()); + return -EIO; + } + + mirror_images[image_id] = mirror_image.global_image_id; + if (mirror_images.size() >= max_return) { + break; + } + } + if (!vals.empty()) { + last_read = mirror::image_key(mirror_images.rbegin()->first); + } + } + + encode(mirror_images, *out); + return 0; +} + +/** + * Input: + * @param global_id (std::string) + * + * Output: + * @param std::string - image id + * @returns 0 on success, negative error code on failure + */ +int mirror_image_get_image_id(cls_method_context_t hctx, bufferlist *in, + bufferlist *out) { + std::string global_id; + try { + auto it = in->cbegin(); + decode(global_id, it); + } catch (const ceph::buffer::error &err) { + return -EINVAL; + } + + std::string image_id; + int r = read_key(hctx, mirror::global_key(global_id), &image_id); + if (r < 0) { + if (r != -ENOENT) { + CLS_ERR("error retrieving image id for global id '%s': %s", + global_id.c_str(), cpp_strerror(r).c_str()); + } + return r; + } + + encode(image_id, *out); + return 0; +} + +/** + * Input: + * @param image_id (std::string) + * + * Output: + * @param cls::rbd::MirrorImage - metadata associated with the image_id + * @returns 0 on success, negative error code on failure + */ +int mirror_image_get(cls_method_context_t hctx, bufferlist *in, + bufferlist *out) { + string image_id; + try { + auto it = in->cbegin(); + decode(image_id, it); + } catch (const ceph::buffer::error &err) { + return -EINVAL; + } + + cls::rbd::MirrorImage mirror_image; + int r = mirror::image_get(hctx, image_id, &mirror_image); + if (r < 0) { + return r; + } + + encode(mirror_image, *out); + return 0; +} + +/** + * Input: + * @param image_id (std::string) + * @param mirror_image (cls::rbd::MirrorImage) + * + * Output: + * @returns 0 on success, negative error code on failure + * @returns -EEXIST if there's an existing image_id with a different global_image_id + */ +int mirror_image_set(cls_method_context_t hctx, bufferlist *in, + bufferlist *out) { + string image_id; + cls::rbd::MirrorImage mirror_image; + try { + auto it = in->cbegin(); + decode(image_id, it); + decode(mirror_image, it); + } catch (const ceph::buffer::error &err) { + return -EINVAL; + } + + int r = mirror::image_set(hctx, image_id, mirror_image); + if (r < 0) { + return r; + } + return 0; +} + +/** + * Input: + * @param image_id (std::string) + * + * Output: + * @returns 0 on success, negative error code on failure + */ +int mirror_image_remove(cls_method_context_t hctx, bufferlist *in, + bufferlist *out) { + string image_id; + try { + auto it = in->cbegin(); + decode(image_id, it); + } catch (const ceph::buffer::error &err) { + return -EINVAL; + } + + int r = mirror::image_remove(hctx, image_id); + if (r < 0) { + return r; + } + return 0; +} + +/** + * Input: + * @param global_image_id (std::string) + * @param status (cls::rbd::MirrorImageSiteStatus) + * + * Output: + * @returns 0 on success, negative error code on failure + */ +int mirror_image_status_set(cls_method_context_t hctx, bufferlist *in, + bufferlist *out) { + string global_image_id; + cls::rbd::MirrorImageSiteStatus status; + try { + auto it = in->cbegin(); + decode(global_image_id, it); + decode(status, it); + } catch (const ceph::buffer::error &err) { + return -EINVAL; + } + + int r = mirror::image_status_set(hctx, global_image_id, status); + if (r < 0) { + return r; + } + return 0; +} + +/** + * Input: + * @param global_image_id (std::string) + * + * Output: + * @returns 0 on success, negative error code on failure + * + */ +int mirror_image_status_remove(cls_method_context_t hctx, bufferlist *in, + bufferlist *out) { + string global_image_id; + try { + auto it = in->cbegin(); + decode(global_image_id, it); + } catch (const ceph::buffer::error &err) { + return -EINVAL; + } + + int r = mirror::image_status_remove(hctx, global_image_id); + if (r < 0) { + return r; + } + return 0; +} + +/** + * Input: + * @param global_image_id (std::string) + * + * Output: + * @param cls::rbd::MirrorImageStatus - metadata associated with the global_image_id + * @returns 0 on success, negative error code on failure + */ +int mirror_image_status_get(cls_method_context_t hctx, bufferlist *in, + bufferlist *out) { + string global_image_id; + try { + auto it = in->cbegin(); + decode(global_image_id, it); + } catch (const ceph::buffer::error &err) { + return -EINVAL; + } + + std::set<entity_inst_t> watchers; + int r = mirror::list_watchers(hctx, &watchers); + if (r < 0) { + return r; + } + + cls::rbd::MirrorImageStatus status; + r = mirror::image_status_get(hctx, global_image_id, watchers, &status); + if (r < 0) { + return r; + } + + encode(status, *out); + return 0; +} + +/** + * Input: + * @param start_after which name to begin listing after + * (use the empty string to start at the beginning) + * @param max_return the maximum number of names to list + * + * Output: + * @param std::map<std::string, cls::rbd::MirrorImage>: image id to image map + * @param std::map<std::string, cls::rbd::MirrorImageStatus>: image it to status map + * @returns 0 on success, negative error code on failure + */ +int mirror_image_status_list(cls_method_context_t hctx, bufferlist *in, + bufferlist *out) { + std::string start_after; + uint64_t max_return; + try { + auto iter = in->cbegin(); + decode(start_after, iter); + decode(max_return, iter); + } catch (const ceph::buffer::error &err) { + return -EINVAL; + } + + map<std::string, cls::rbd::MirrorImage> images; + map<std::string, cls::rbd::MirrorImageStatus> statuses; + int r = mirror::image_status_list(hctx, start_after, max_return, &images, + &statuses); + if (r < 0) { + return r; + } + + encode(images, *out); + encode(statuses, *out); + return 0; +} + +/** + * Input: + * @param std::vector<cls::rbd::MirrorPeer> - optional peers (backwards compatibility) + * + * Output: + * @param std::map<cls::rbd::MirrorImageStatusState, int32_t>: states counts + * @returns 0 on success, negative error code on failure + */ +int mirror_image_status_get_summary(cls_method_context_t hctx, bufferlist *in, + bufferlist *out) { + std::vector<cls::rbd::MirrorPeer> peers; + try { + auto iter = in->cbegin(); + if (!iter.end()) { + decode(peers, iter); + } + } catch (const ceph::buffer::error &err) { + return -EINVAL; + } + + auto mirror_peer_direction = cls::rbd::MIRROR_PEER_DIRECTION_RX; + if (!peers.empty()) { + mirror_peer_direction = peers.begin()->mirror_peer_direction; + } + + std::set<std::string> tx_peer_mirror_uuids; + for (auto& peer : peers) { + if (peer.mirror_peer_direction == cls::rbd::MIRROR_PEER_DIRECTION_RX) { + continue; + } + + tx_peer_mirror_uuids.insert(peer.mirror_uuid); + if (mirror_peer_direction != cls::rbd::MIRROR_PEER_DIRECTION_RX_TX && + mirror_peer_direction != peer.mirror_peer_direction) { + mirror_peer_direction = cls::rbd::MIRROR_PEER_DIRECTION_RX_TX; + } + } + + std::map<cls::rbd::MirrorImageStatusState, int32_t> states; + int r = mirror::image_status_get_summary(hctx, mirror_peer_direction, + tx_peer_mirror_uuids, &states); + if (r < 0) { + return r; + } + + encode(states, *out); + return 0; +} + +/** + * Input: + * none + * + * Output: + * @returns 0 on success, negative error code on failure + */ +int mirror_image_status_remove_down(cls_method_context_t hctx, bufferlist *in, + bufferlist *out) { + int r = mirror::image_status_remove_down(hctx); + if (r < 0) { + return r; + } + return 0; +} + +/** + * Input: + * @param global_image_id (std::string) + * + * Output: + * @param entity_inst_t - instance + * @returns 0 on success, negative error code on failure + */ +int mirror_image_instance_get(cls_method_context_t hctx, bufferlist *in, + bufferlist *out) { + string global_image_id; + try { + auto it = in->cbegin(); + decode(global_image_id, it); + } catch (const ceph::buffer::error &err) { + return -EINVAL; + } + + std::set<entity_inst_t> watchers; + int r = mirror::list_watchers(hctx, &watchers); + if (r < 0) { + return r; + } + + entity_inst_t instance; + r = mirror::image_instance_get(hctx, global_image_id, watchers, &instance); + if (r < 0) { + return r; + } + + encode(instance, *out, cls_get_features(hctx)); + return 0; +} + +/** + * Input: + * @param start_after which name to begin listing after + * (use the empty string to start at the beginning) + * @param max_return the maximum number of names to list + * + * Output: + * @param std::map<std::string, entity_inst_t>: image id to instance map + * @returns 0 on success, negative error code on failure + */ +int mirror_image_instance_list(cls_method_context_t hctx, bufferlist *in, + bufferlist *out) { + std::string start_after; + uint64_t max_return; + try { + auto iter = in->cbegin(); + decode(start_after, iter); + decode(max_return, iter); + } catch (const ceph::buffer::error &err) { + return -EINVAL; + } + + map<std::string, entity_inst_t> instances; + int r = mirror::image_instance_list(hctx, start_after, max_return, + &instances); + if (r < 0) { + return r; + } + + encode(instances, *out, cls_get_features(hctx)); + return 0; +} + +/** + * Input: + * none + * + * Output: + * @param std::vector<std::string>: instance ids + * @returns 0 on success, negative error code on failure + */ +int mirror_instances_list(cls_method_context_t hctx, bufferlist *in, + bufferlist *out) { + std::vector<std::string> instance_ids; + + int r = mirror::instances_list(hctx, &instance_ids); + if (r < 0) { + return r; + } + + encode(instance_ids, *out); + return 0; +} + +/** + * Input: + * @param instance_id (std::string) + * + * Output: + * @returns 0 on success, negative error code on failure + */ +int mirror_instances_add(cls_method_context_t hctx, bufferlist *in, + bufferlist *out) { + std::string instance_id; + try { + auto iter = in->cbegin(); + decode(instance_id, iter); + } catch (const ceph::buffer::error &err) { + return -EINVAL; + } + + int r = mirror::instances_add(hctx, instance_id); + if (r < 0) { + return r; + } + return 0; +} + +/** + * Input: + * @param instance_id (std::string) + * + * Output: + * @returns 0 on success, negative error code on failure + */ +int mirror_instances_remove(cls_method_context_t hctx, bufferlist *in, + bufferlist *out) { + std::string instance_id; + try { + auto iter = in->cbegin(); + decode(instance_id, iter); + } catch (const ceph::buffer::error &err) { + return -EINVAL; + } + + int r = mirror::instances_remove(hctx, instance_id); + if (r < 0) { + return r; + } + return 0; +} + +/** + * Input: + * @param start_after: key to start after + * @param max_return: max return items + * + * Output: + * @param std::map<std::string, cls::rbd::MirrorImageMap>: image mapping + * @returns 0 on success, negative error code on failure + */ +int mirror_image_map_list(cls_method_context_t hctx, bufferlist *in, + bufferlist *out) { + std::string start_after; + uint64_t max_return; + try { + auto it = in->cbegin(); + decode(start_after, it); + decode(max_return, it); + } catch (const ceph::buffer::error &err) { + return -EINVAL; + } + + std::map<std::string, cls::rbd::MirrorImageMap> image_mapping; + int r = mirror::mirror_image_map_list(hctx, start_after, max_return, &image_mapping); + if (r < 0) { + return r; + } + + encode(image_mapping, *out); + return 0; +} + +/** + * Input: + * @param global_image_id: global image id + * @param image_map: image map + * + * Output: + * @returns 0 on success, negative error code on failure + */ +int mirror_image_map_update(cls_method_context_t hctx, bufferlist *in, + bufferlist *out) { + std::string global_image_id; + cls::rbd::MirrorImageMap image_map; + + try { + auto it = in->cbegin(); + decode(global_image_id, it); + decode(image_map, it); + } catch (const ceph::buffer::error &err) { + return -EINVAL; + } + + bufferlist bl; + encode(image_map, bl); + + const std::string key = mirror::mirror_image_map_key(global_image_id); + int r = cls_cxx_map_set_val(hctx, key, &bl); + if (r < 0) { + CLS_ERR("error updating image map %s: %s", key.c_str(), + cpp_strerror(r).c_str()); + return r; + } + + return 0; +} + +/** + * Input: + * @param global_image_id: global image id + * + * Output: + * @returns 0 on success, negative error code on failure + */ +int mirror_image_map_remove(cls_method_context_t hctx, bufferlist *in, + bufferlist *out) { + std::string global_image_id; + + try { + auto it = in->cbegin(); + decode(global_image_id, it); + } catch (const ceph::buffer::error &err) { + return -EINVAL; + } + + const std::string key = mirror::mirror_image_map_key(global_image_id); + int r = cls_cxx_map_remove_key(hctx, key); + if (r < 0 && r != -ENOENT) { + CLS_ERR("error removing image map %s: %s", key.c_str(), + cpp_strerror(r).c_str()); + return r; + } + + return 0; +} + + +/** + * Input: + * @param snap_id: snapshot id + * @param mirror_peer_uuid: mirror peer uuid + * + * Output: + * @returns 0 on success, negative error code on failure + */ +int mirror_image_snapshot_unlink_peer(cls_method_context_t hctx, bufferlist *in, + bufferlist *out) { + uint64_t snap_id; + std::string mirror_peer_uuid; + try { + auto iter = in->cbegin(); + decode(snap_id, iter); + decode(mirror_peer_uuid, iter); + } catch (const ceph::buffer::error &err) { + return -EINVAL; + } + + CLS_LOG(20, + "mirror_image_snapshot_unlink_peer snap_id=%" PRIu64 " peer_uuid=%s", + snap_id, mirror_peer_uuid.c_str()); + + int r = mirror::image_snapshot_unlink_peer(hctx, snap_id, mirror_peer_uuid); + if (r < 0) { + return r; + } + return 0; +} + +/** + * Input: + * @param snap_id: snapshot id + * @param complete: true if shapshot fully copied/complete + * @param last_copied_object_number: last copied object number + * + * Output: + * @returns 0 on success, negative error code on failure + */ +int mirror_image_snapshot_set_copy_progress(cls_method_context_t hctx, + bufferlist *in, + bufferlist *out) { + uint64_t snap_id; + bool complete; + uint64_t last_copied_object_number; + try { + auto iter = in->cbegin(); + decode(snap_id, iter); + decode(complete, iter); + decode(last_copied_object_number, iter); + } catch (const ceph::buffer::error &err) { + return -EINVAL; + } + + CLS_LOG(20, "mirror_image_snapshot_set_copy_progress snap_id=%" PRIu64 \ + " complete=%d last_copied_object_number=%" PRIu64, snap_id, complete, + last_copied_object_number); + + int r = mirror::image_snapshot_set_copy_progress(hctx, snap_id, complete, + last_copied_object_number); + if (r < 0) { + return r; + } + return 0; +} + +namespace group { + +/********************** methods for rbd_group_directory ***********************/ + +int dir_add(cls_method_context_t hctx, + const string &name, const string &id, + bool check_for_unique_id) +{ + if (!name.size() || !is_valid_id(id)) { + CLS_ERR("invalid group name '%s' or id '%s'", + name.c_str(), id.c_str()); + return -EINVAL; + } + + CLS_LOG(20, "dir_add name=%s id=%s", name.c_str(), id.c_str()); + + string name_key = dir_key_for_name(name); + string id_key = dir_key_for_id(id); + string tmp; + int r = read_key(hctx, name_key, &tmp); + if (r != -ENOENT) { + CLS_LOG(10, "name already exists"); + return -EEXIST; + } + r = read_key(hctx, id_key, &tmp); + if (r != -ENOENT && check_for_unique_id) { + CLS_LOG(10, "id already exists"); + return -EBADF; + } + bufferlist id_bl, name_bl; + encode(id, id_bl); + encode(name, name_bl); + map<string, bufferlist> omap_vals; + omap_vals[name_key] = id_bl; + omap_vals[id_key] = name_bl; + return cls_cxx_map_set_vals(hctx, &omap_vals); +} + +int dir_remove(cls_method_context_t hctx, + const string &name, const string &id) +{ + CLS_LOG(20, "dir_remove name=%s id=%s", name.c_str(), id.c_str()); + + string name_key = dir_key_for_name(name); + string id_key = dir_key_for_id(id); + string stored_name, stored_id; + + int r = read_key(hctx, name_key, &stored_id); + if (r < 0) { + if (r != -ENOENT) + CLS_ERR("error reading name to id mapping: %s", cpp_strerror(r).c_str()); + return r; + } + r = read_key(hctx, id_key, &stored_name); + if (r < 0) { + if (r != -ENOENT) + CLS_ERR("error reading id to name mapping: %s", cpp_strerror(r).c_str()); + return r; + } + + // check if this op raced with a rename + if (stored_name != name || stored_id != id) { + CLS_ERR("stored name '%s' and id '%s' do not match args '%s' and '%s'", + stored_name.c_str(), stored_id.c_str(), name.c_str(), id.c_str()); + return -ESTALE; + } + + r = cls_cxx_map_remove_key(hctx, name_key); + if (r < 0) { + CLS_ERR("error removing name: %s", cpp_strerror(r).c_str()); + return r; + } + + r = cls_cxx_map_remove_key(hctx, id_key); + if (r < 0) { + CLS_ERR("error removing id: %s", cpp_strerror(r).c_str()); + return r; + } + + return 0; +} + +static const string RBD_GROUP_SNAP_KEY_PREFIX = "snapshot_"; + +std::string snap_key(const std::string &snap_id) { + ostringstream oss; + oss << RBD_GROUP_SNAP_KEY_PREFIX << snap_id; + return oss.str(); +} + +int snap_list(cls_method_context_t hctx, cls::rbd::GroupSnapshot start_after, + uint64_t max_return, + std::vector<cls::rbd::GroupSnapshot> *group_snaps) +{ + int max_read = RBD_MAX_KEYS_READ; + std::map<string, bufferlist> vals; + string last_read = snap_key(start_after.id); + + group_snaps->clear(); + + bool more; + do { + int r = cls_cxx_map_get_vals(hctx, last_read, + RBD_GROUP_SNAP_KEY_PREFIX, + max_read, &vals, &more); + if (r < 0) + return r; + + for (auto it = vals.begin(); it != vals.end() && group_snaps->size() < max_return; ++it) { + + auto iter = it->second.cbegin(); + cls::rbd::GroupSnapshot snap; + try { + decode(snap, iter); + } catch (const ceph::buffer::error &err) { + CLS_ERR("error decoding snapshot: %s", it->first.c_str()); + return -EIO; + } + CLS_LOG(20, "Discovered snapshot %s %s", + snap.name.c_str(), + snap.id.c_str()); + group_snaps->push_back(snap); + } + + if (!vals.empty()) { + last_read = vals.rbegin()->first; + } + } while (more && (group_snaps->size() < max_return)); + + return 0; +} + +static int check_duplicate_snap_name(cls_method_context_t hctx, + const std::string &snap_name, + const std::string &snap_id) +{ + const int max_read = 1024; + cls::rbd::GroupSnapshot snap_last; + std::vector<cls::rbd::GroupSnapshot> page; + + for (;;) { + int r = snap_list(hctx, snap_last, max_read, &page); + if (r < 0) { + return r; + } + for (auto& snap: page) { + if (snap.name == snap_name && snap.id != snap_id) { + return -EEXIST; + } + } + + if (page.size() < max_read) { + break; + } + + snap_last = *page.rbegin(); + } + + return 0; +} + +} // namespace group + +/** + * List groups from the directory. + * + * Input: + * @param start_after (std::string) + * @param max_return (int64_t) + * + * Output: + * @param map of groups (name, id) + * @return 0 on success, negative error code on failure + */ +int group_dir_list(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + string start_after; + uint64_t max_return; + + try { + auto iter = in->cbegin(); + decode(start_after, iter); + decode(max_return, iter); + } catch (const ceph::buffer::error &err) { + return -EINVAL; + } + + int max_read = RBD_MAX_KEYS_READ; + bool more = true; + map<string, string> groups; + string last_read = dir_key_for_name(start_after); + + while (more && groups.size() < max_return) { + map<string, bufferlist> vals; + CLS_LOG(20, "last_read = '%s'", last_read.c_str()); + int r = cls_cxx_map_get_vals(hctx, last_read, RBD_DIR_NAME_KEY_PREFIX, + max_read, &vals, &more); + if (r < 0) { + if (r != -ENOENT) { + CLS_ERR("error reading directory by name: %s", cpp_strerror(r).c_str()); + } + return r; + } + + for (auto val : vals) { + string id; + auto iter = val.second.cbegin(); + try { + decode(id, iter); + } catch (const ceph::buffer::error &err) { + CLS_ERR("could not decode id of group '%s'", val.first.c_str()); + return -EIO; + } + CLS_LOG(20, "adding '%s' -> '%s'", dir_name_from_key(val.first).c_str(), id.c_str()); + groups[dir_name_from_key(val.first)] = id; + if (groups.size() >= max_return) + break; + } + if (!vals.empty()) { + last_read = dir_key_for_name(groups.rbegin()->first); + } + } + + encode(groups, *out); + + return 0; +} + +/** + * Add a group to the directory. + * + * Input: + * @param name (std::string) + * @param id (std::string) + * + * Output: + * @return 0 on success, negative error code on failure + */ +int group_dir_add(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + int r = cls_cxx_create(hctx, false); + + if (r < 0) { + CLS_ERR("could not create group directory: %s", + cpp_strerror(r).c_str()); + return r; + } + + string name, id; + try { + auto iter = in->cbegin(); + decode(name, iter); + decode(id, iter); + } catch (const ceph::buffer::error &err) { + return -EINVAL; + } + + return group::dir_add(hctx, name, id, true); +} + +/** + * Rename a group to the directory. + * + * Input: + * @param src original name of the group (std::string) + * @param dest new name of the group (std::string) + * @param id the id of the group (std::string) + * + * Output: + * @return 0 on success, negative error code on failure + */ +int group_dir_rename(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + string src, dest, id; + try { + auto iter = in->cbegin(); + decode(src, iter); + decode(dest, iter); + decode(id, iter); + } catch (const ceph::buffer::error &err) { + return -EINVAL; + } + + int r = group::dir_remove(hctx, src, id); + if (r < 0) + return r; + + return group::dir_add(hctx, dest, id, false); +} + +/** + * Remove a group from the directory. + * + * Input: + * @param name (std::string) + * @param id (std::string) + * + * Output: + * @return 0 on success, negative error code on failure + */ +int group_dir_remove(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + string name, id; + try { + auto iter = in->cbegin(); + decode(name, iter); + decode(id, iter); + } catch (const ceph::buffer::error &err) { + return -EINVAL; + } + + return group::dir_remove(hctx, name, id); +} + +/** + * Set state of an image in the group. + * + * Input: + * @param image_status (cls::rbd::GroupImageStatus) + * + * Output: + * @return 0 on success, negative error code on failure + */ +int group_image_set(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + CLS_LOG(20, "group_image_set"); + + cls::rbd::GroupImageStatus st; + try { + auto iter = in->cbegin(); + decode(st, iter); + } catch (const ceph::buffer::error &err) { + return -EINVAL; + } + + string image_key = st.spec.image_key(); + + bufferlist image_val_bl; + encode(st.state, image_val_bl); + int r = cls_cxx_map_set_val(hctx, image_key, &image_val_bl); + if (r < 0) { + return r; + } + + return 0; +} + +/** + * Remove reference to an image from the group. + * + * Input: + * @param spec (cls::rbd::GroupImageSpec) + * + * Output: + * @return 0 on success, negative error code on failure + */ +int group_image_remove(cls_method_context_t hctx, + bufferlist *in, bufferlist *out) +{ + CLS_LOG(20, "group_image_remove"); + cls::rbd::GroupImageSpec spec; + try { + auto iter = in->cbegin(); + decode(spec, iter); + } catch (const ceph::buffer::error &err) { + return -EINVAL; + } + + string image_key = spec.image_key(); + + int r = cls_cxx_map_remove_key(hctx, image_key); + if (r < 0) { + CLS_ERR("error removing image from group: %s", cpp_strerror(r).c_str()); + return r; + } + + return 0; +} + +/* + * List images in the group. + * + * Input: + * @param start_after which name to begin listing after + * (use the empty string to start at the beginning) + * @param max_return the maximum number of names to list + * + * Output: + * @param tuples of descriptions of the images: image_id, pool_id, image reference state. + * @return 0 on success, negative error code on failure + */ +int group_image_list(cls_method_context_t hctx, + bufferlist *in, bufferlist *out) +{ + CLS_LOG(20, "group_image_list"); + cls::rbd::GroupImageSpec start_after; + uint64_t max_return; + try { + auto iter = in->cbegin(); + decode(start_after, iter); + decode(max_return, iter); + } catch (const ceph::buffer::error &err) { + return -EINVAL; + } + + int max_read = RBD_MAX_KEYS_READ; + std::map<string, bufferlist> vals; + string last_read = start_after.image_key(); + std::vector<cls::rbd::GroupImageStatus> res; + bool more; + do { + int r = cls_cxx_map_get_vals(hctx, last_read, + cls::rbd::RBD_GROUP_IMAGE_KEY_PREFIX, + max_read, &vals, &more); + if (r < 0) + return r; + + for (auto it = vals.begin(); it != vals.end() && res.size() < max_return; ++it) { + + auto iter = it->second.cbegin(); + cls::rbd::GroupImageLinkState state; + try { + decode(state, iter); + } catch (const ceph::buffer::error &err) { + CLS_ERR("error decoding state for image: %s", it->first.c_str()); + return -EIO; + } + cls::rbd::GroupImageSpec spec; + int r = cls::rbd::GroupImageSpec::from_key(it->first, &spec); + if (r < 0) + return r; + + CLS_LOG(20, "Discovered image %s %" PRId64 " %d", spec.image_id.c_str(), + spec.pool_id, + (int)state); + res.push_back(cls::rbd::GroupImageStatus(spec, state)); + } + if (res.size() > 0) { + last_read = res.rbegin()->spec.image_key(); + } + + } while (more && (res.size() < max_return)); + encode(res, *out); + + return 0; +} + +/** + * Reference the group this image belongs to. + * + * Input: + * @param group_id (std::string) + * @param pool_id (int64_t) + * + * Output: + * @return 0 on success, negative error code on failure + */ +int image_group_add(cls_method_context_t hctx, + bufferlist *in, bufferlist *out) +{ + CLS_LOG(20, "image_group_add"); + cls::rbd::GroupSpec new_group; + try { + auto iter = in->cbegin(); + decode(new_group, iter); + } catch (const ceph::buffer::error &err) { + return -EINVAL; + } + + bufferlist existing_refbl; + + int r = cls_cxx_map_get_val(hctx, RBD_GROUP_REF, &existing_refbl); + if (r == 0) { + // If we are trying to link this image to the same group then return + // success. If this image already belongs to another group then abort. + cls::rbd::GroupSpec old_group; + try { + auto iter = existing_refbl.cbegin(); + decode(old_group, iter); + } catch (const ceph::buffer::error &err) { + return -EINVAL; + } + + if ((old_group.group_id != new_group.group_id) || + (old_group.pool_id != new_group.pool_id)) { + return -EEXIST; + } else { + return 0; // In this case the values are already correct + } + } else if (r < 0 && r != -ENOENT) { + // No entry means this image is not a member of any group. + return r; + } + + r = image::set_op_features(hctx, RBD_OPERATION_FEATURE_GROUP, + RBD_OPERATION_FEATURE_GROUP); + if (r < 0) { + return r; + } + + bufferlist refbl; + encode(new_group, refbl); + r = cls_cxx_map_set_val(hctx, RBD_GROUP_REF, &refbl); + if (r < 0) { + return r; + } + + return 0; +} + +/** + * Remove image's pointer to the group. + * + * Input: + * @param cg_id (std::string) + * @param pool_id (int64_t) + * + * Output: + * @return 0 on success, negative error code on failure + */ +int image_group_remove(cls_method_context_t hctx, + bufferlist *in, + bufferlist *out) +{ + CLS_LOG(20, "image_group_remove"); + cls::rbd::GroupSpec spec; + try { + auto iter = in->cbegin(); + decode(spec, iter); + } catch (const ceph::buffer::error &err) { + return -EINVAL; + } + + bufferlist refbl; + int r = cls_cxx_map_get_val(hctx, RBD_GROUP_REF, &refbl); + if (r < 0) { + return r; + } + + cls::rbd::GroupSpec ref_spec; + auto iter = refbl.cbegin(); + try { + decode(ref_spec, iter); + } catch (const ceph::buffer::error &err) { + return -EINVAL; + } + + if (ref_spec.pool_id != spec.pool_id || ref_spec.group_id != spec.group_id) { + return -EBADF; + } + + r = cls_cxx_map_remove_key(hctx, RBD_GROUP_REF); + if (r < 0) { + return r; + } + + r = image::set_op_features(hctx, 0, RBD_OPERATION_FEATURE_GROUP); + if (r < 0) { + return r; + } + + return 0; +} + +/** + * Retrieve the id and pool of the group this image belongs to. + * + * Input: + * none + * + * Output: + * @param GroupSpec + * @return 0 on success, negative error code on failure + */ +int image_group_get(cls_method_context_t hctx, + bufferlist *in, bufferlist *out) +{ + CLS_LOG(20, "image_group_get"); + bufferlist refbl; + int r = cls_cxx_map_get_val(hctx, RBD_GROUP_REF, &refbl); + if (r < 0 && r != -ENOENT) { + return r; + } + + cls::rbd::GroupSpec spec; + + if (r != -ENOENT) { + auto iter = refbl.cbegin(); + try { + decode(spec, iter); + } catch (const ceph::buffer::error &err) { + return -EINVAL; + } + } + + encode(spec, *out); + return 0; +} + +/** + * Save initial snapshot record. + * + * Input: + * @param GroupSnapshot + * + * Output: + * @return 0 on success, negative error code on failure + */ +int group_snap_set(cls_method_context_t hctx, + bufferlist *in, bufferlist *out) +{ + CLS_LOG(20, "group_snap_set"); + cls::rbd::GroupSnapshot group_snap; + try { + auto iter = in->cbegin(); + decode(group_snap, iter); + } catch (const ceph::buffer::error &err) { + return -EINVAL; + } + + if (group_snap.name.empty()) { + CLS_ERR("group snapshot name is empty"); + return -EINVAL; + } + if (group_snap.id.empty()) { + CLS_ERR("group snapshot id is empty"); + return -EINVAL; + } + + int r = group::check_duplicate_snap_name(hctx, group_snap.name, + group_snap.id); + if (r < 0) { + return r; + } + + std::string key = group::snap_key(group_snap.id); + if (group_snap.state == cls::rbd::GROUP_SNAPSHOT_STATE_INCOMPLETE) { + bufferlist snap_bl; + r = cls_cxx_map_get_val(hctx, key, &snap_bl); + if (r < 0 && r != -ENOENT) { + return r; + } else if (r >= 0) { + return -EEXIST; + } + } + + bufferlist obl; + encode(group_snap, obl); + r = cls_cxx_map_set_val(hctx, key, &obl); + return r; +} + +/** + * Remove snapshot record. + * + * Input: + * @param id Snapshot id + * + * Output: + * @return 0 on success, negative error code on failure + */ +int group_snap_remove(cls_method_context_t hctx, + bufferlist *in, bufferlist *out) +{ + CLS_LOG(20, "group_snap_remove"); + std::string snap_id; + try { + auto iter = in->cbegin(); + decode(snap_id, iter); + } catch (const ceph::buffer::error &err) { + return -EINVAL; + } + + std::string snap_key = group::snap_key(snap_id); + + CLS_LOG(20, "removing snapshot with key %s", snap_key.c_str()); + int r = cls_cxx_map_remove_key(hctx, snap_key); + return r; +} + +/** + * Get group's snapshot by id. + * + * Input: + * @param snapshot_id the id of the snapshot to look for. + * + * Output: + * @param GroupSnapshot the requested snapshot + * @return 0 on success, negative error code on failure + */ +int group_snap_get_by_id(cls_method_context_t hctx, + bufferlist *in, bufferlist *out) +{ + CLS_LOG(20, "group_snap_get_by_id"); + + std::string snap_id; + try { + auto iter = in->cbegin(); + decode(snap_id, iter); + } catch (const ceph::buffer::error &err) { + return -EINVAL; + } + + bufferlist snapbl; + + int r = cls_cxx_map_get_val(hctx, group::snap_key(snap_id), &snapbl); + if (r < 0) { + return r; + } + + cls::rbd::GroupSnapshot group_snap; + auto iter = snapbl.cbegin(); + try { + decode(group_snap, iter); + } catch (const ceph::buffer::error &err) { + CLS_ERR("error decoding snapshot: %s", snap_id.c_str()); + return -EIO; + } + + encode(group_snap, *out); + + return 0; +} + +/** + * List group's snapshots. + * + * Input: + * @param start_after which name to begin listing after + * (use the empty string to start at the beginning) + * @param max_return the maximum number of snapshots to list + * + * Output: + * @param list of snapshots + * @return 0 on success, negative error code on failure + */ +int group_snap_list(cls_method_context_t hctx, + bufferlist *in, bufferlist *out) +{ + CLS_LOG(20, "group_snap_list"); + + cls::rbd::GroupSnapshot start_after; + uint64_t max_return; + try { + auto iter = in->cbegin(); + decode(start_after, iter); + decode(max_return, iter); + } catch (const ceph::buffer::error &err) { + return -EINVAL; + } + std::vector<cls::rbd::GroupSnapshot> group_snaps; + group::snap_list(hctx, start_after, max_return, &group_snaps); + + encode(group_snaps, *out); + + return 0; +} + +namespace trash { + +static const std::string IMAGE_KEY_PREFIX("id_"); + +std::string image_key(const std::string &image_id) { + return IMAGE_KEY_PREFIX + image_id; +} + +std::string image_id_from_key(const std::string &key) { + return key.substr(IMAGE_KEY_PREFIX.size()); +} + +} // namespace trash + +/** + * Add an image entry to the rbd trash. Creates the trash object if + * needed, and stores the trash spec information of the deleted image. + * + * Input: + * @param id the id of the image + * @param trash_spec the spec info of the deleted image + * + * Output: + * @returns -EEXIST if the image id is already in the trash + * @returns 0 on success, negative error code on failure + */ +int trash_add(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + int r = cls_cxx_create(hctx, false); + if (r < 0) { + CLS_ERR("could not create trash: %s", cpp_strerror(r).c_str()); + return r; + } + + string id; + cls::rbd::TrashImageSpec trash_spec; + try { + auto iter = in->cbegin(); + decode(id, iter); + decode(trash_spec, iter); + } catch (const ceph::buffer::error &err) { + return -EINVAL; + } + + if (!is_valid_id(id)) { + CLS_ERR("trash_add: invalid id '%s'", id.c_str()); + return -EINVAL; + } + + CLS_LOG(20, "trash_add id=%s", id.c_str()); + + string key = trash::image_key(id); + cls::rbd::TrashImageSpec tmp; + r = read_key(hctx, key, &tmp); + if (r < 0 && r != -ENOENT) { + CLS_ERR("could not read key %s entry from trash: %s", key.c_str(), + cpp_strerror(r).c_str()); + return r; + } else if (r == 0) { + CLS_LOG(10, "id already exists"); + return -EEXIST; + } + + map<string, bufferlist> omap_vals; + encode(trash_spec, omap_vals[key]); + return cls_cxx_map_set_vals(hctx, &omap_vals); +} + +/** + * Removes an image entry from the rbd trash object. + * image. + * + * Input: + * @param id the id of the image + * + * Output: + * @returns -ENOENT if the image id does not exist in the trash + * @returns 0 on success, negative error code on failure + */ +int trash_remove(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + string id; + try { + auto iter = in->cbegin(); + decode(id, iter); + } catch (const ceph::buffer::error &err) { + return -EINVAL; + } + + CLS_LOG(20, "trash_remove id=%s", id.c_str()); + + string key = trash::image_key(id); + bufferlist tmp; + int r = cls_cxx_map_get_val(hctx, key, &tmp); + if (r < 0) { + if (r != -ENOENT) { + CLS_ERR("error reading entry key %s: %s", key.c_str(), cpp_strerror(r).c_str()); + } + return r; + } + + r = cls_cxx_map_remove_key(hctx, key); + if (r < 0) { + CLS_ERR("error removing entry: %s", cpp_strerror(r).c_str()); + return r; + } + + return 0; +} + +/** + * Returns the list of trash spec entries registered in the rbd_trash + * object. + * + * Input: + * @param start_after which name to begin listing after + * (use the empty string to start at the beginning) + * @param max_return the maximum number of names to list + * + * Output: + * @param data the map between image id and trash spec info + * + * @returns 0 on success, negative error code on failure + */ +int trash_list(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + string start_after; + uint64_t max_return; + + try { + auto iter = in->cbegin(); + decode(start_after, iter); + decode(max_return, iter); + } catch (const ceph::buffer::error &err) { + return -EINVAL; + } + + map<string, cls::rbd::TrashImageSpec> data; + string last_read = trash::image_key(start_after); + bool more = true; + + CLS_LOG(20, "trash_get_images"); + while (data.size() < max_return) { + map<string, bufferlist> raw_data; + int max_read = std::min<int32_t>(RBD_MAX_KEYS_READ, + max_return - data.size()); + int r = cls_cxx_map_get_vals(hctx, last_read, trash::IMAGE_KEY_PREFIX, + max_read, &raw_data, &more); + if (r < 0) { + if (r != -ENOENT) { + CLS_ERR("failed to read the vals off of disk: %s", + cpp_strerror(r).c_str()); + } + return r; + } + if (raw_data.empty()) { + break; + } + + for (auto it = raw_data.begin(); it != raw_data.end(); ++it) { + decode(data[trash::image_id_from_key(it->first)], it->second); + } + + if (!more) { + break; + } + + last_read = raw_data.rbegin()->first; + } + + encode(data, *out); + return 0; +} + +/** + * Returns the trash spec entry of an image registered in the rbd_trash + * object. + * + * Input: + * @param id the id of the image + * + * Output: + * @param out the trash spec entry + * + * @returns 0 on success, negative error code on failure + */ +int trash_get(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + string id; + try { + auto iter = in->cbegin(); + decode(id, iter); + } catch (const ceph::buffer::error &err) { + return -EINVAL; + } + + CLS_LOG(20, "trash_get_image id=%s", id.c_str()); + + + string key = trash::image_key(id); + bufferlist bl; + int r = cls_cxx_map_get_val(hctx, key, out); + if (r < 0 && r != -ENOENT) { + CLS_ERR("error reading image from trash '%s': '%s'", id.c_str(), + cpp_strerror(r).c_str()); + } + return r; +} + +/** + * Set state of an image in the rbd_trash object. + * + * Input: + * @param id the id of the image + * @param trash_state the state of the image to be set + * @param expect_state the expected state of the image + * + * Output: + * @returns 0 on success, negative error code on failure + */ +int trash_state_set(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + string id; + cls::rbd::TrashImageState trash_state; + cls::rbd::TrashImageState expect_state; + try { + auto iter = in->cbegin(); + decode(id, iter); + decode(trash_state, iter); + decode(expect_state, iter); + } catch (const ceph::buffer::error &err) { + return -EINVAL; + } + + CLS_LOG(20, "trash_state_set id=%s", id.c_str()); + + string key = trash::image_key(id); + cls::rbd::TrashImageSpec trash_spec; + int r = read_key(hctx, key, &trash_spec); + if (r < 0) { + if (r != -ENOENT) { + CLS_ERR("Could not read trash image spec off disk: %s", + cpp_strerror(r).c_str()); + } + return r; + } + + if (trash_spec.state == expect_state) { + trash_spec.state = trash_state; + r = write_key(hctx, key, trash_spec); + if (r < 0) { + CLS_ERR("error setting trash image state: %s", cpp_strerror(r).c_str()); + return r; + } + + return 0; + } else if (trash_spec.state == trash_state) { + return 0; + } else { + CLS_ERR("Current trash state: %d do not match expected: %d or set: %d", + trash_spec.state, expect_state, trash_state); + return -ESTALE; + } +} + +namespace nspace { + +const std::string NAME_KEY_PREFIX("name_"); + +std::string key_for_name(const std::string& name) { + return NAME_KEY_PREFIX + name; +} + +std::string name_from_key(const std::string &key) { + return key.substr(NAME_KEY_PREFIX.size()); +} + +} // namespace nspace + +/** + * Add a namespace to the namespace directory. + * + * Input: + * @param name the name of the namespace + * + * Output: + * @returns -EEXIST if the namespace is already exists + * @returns 0 on success, negative error code on failure + */ +int namespace_add(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + std::string name; + try { + auto iter = in->cbegin(); + decode(name, iter); + } catch (const ceph::buffer::error &err) { + return -EINVAL; + } + + std::string key(nspace::key_for_name(name)); + bufferlist value; + int r = cls_cxx_map_get_val(hctx, key, &value); + if (r < 0 && r != -ENOENT) { + return r; + } else if (r == 0) { + return -EEXIST; + } + + r = cls_cxx_map_set_val(hctx, key, &value); + if (r < 0) { + CLS_ERR("failed to set omap key: %s", key.c_str()); + return r; + } + + return 0; +} + +/** + * Remove a namespace from the namespace directory. + * + * Input: + * @param name the name of the namespace + * + * Output: + * @returns 0 on success, negative error code on failure + */ +int namespace_remove(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + std::string name; + try { + auto iter = in->cbegin(); + decode(name, iter); + } catch (const ceph::buffer::error &err) { + return -EINVAL; + } + + std::string key(nspace::key_for_name(name)); + bufferlist bl; + int r = cls_cxx_map_get_val(hctx, key, &bl); + if (r < 0) { + return r; + } + + r = cls_cxx_map_remove_key(hctx, key); + if (r < 0) { + return r; + } + + return 0; +} + +/** + * Returns the list of namespaces in the rbd_namespace object + * + * Input: + * @param start_after which name to begin listing after + * (use the empty string to start at the beginning) + * @param max_return the maximum number of names to list + * + * Output: + * @param data list of namespace names + * @returns 0 on success, negative error code on failure + */ +int namespace_list(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + string start_after; + uint64_t max_return; + try { + auto iter = in->cbegin(); + decode(start_after, iter); + decode(max_return, iter); + } catch (const ceph::buffer::error &err) { + return -EINVAL; + } + + std::list<std::string> data; + std::string last_read = nspace::key_for_name(start_after); + bool more = true; + + CLS_LOG(20, "namespace_list"); + while (data.size() < max_return) { + std::map<std::string, bufferlist> raw_data; + int max_read = std::min<int32_t>(RBD_MAX_KEYS_READ, + max_return - data.size()); + int r = cls_cxx_map_get_vals(hctx, last_read, nspace::NAME_KEY_PREFIX, + max_read, &raw_data, &more); + if (r < 0) { + if (r != -ENOENT) { + CLS_ERR("failed to read the vals off of disk: %s", + cpp_strerror(r).c_str()); + } + return r; + } + + for (auto& it : raw_data) { + data.push_back(nspace::name_from_key(it.first)); + } + + if (raw_data.empty() || !more) { + break; + } + + last_read = raw_data.rbegin()->first; + } + + encode(data, *out); + return 0; +} + +/** + * Reclaim space for zeroed extents + * + * Input: + * @param sparse_size minimal zeroed block to sparse + * @param remove_empty boolean, true if the object should be removed if empty + * + * Output: + * @returns -ENOENT if the object does not exist or has been removed + * @returns 0 on success, negative error code on failure + */ +int sparsify(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + uint64_t sparse_size; + bool remove_empty; + try { + auto iter = in->cbegin(); + decode(sparse_size, iter); + decode(remove_empty, iter); + } catch (const ceph::buffer::error &err) { + return -EINVAL; + } + + int r = check_exists(hctx); + if (r < 0) { + return r; + } + + bufferlist bl; + r = cls_cxx_read(hctx, 0, 0, &bl); + if (r < 0) { + CLS_ERR("failed to read data off of disk: %s", cpp_strerror(r).c_str()); + return r; + } + + if (bl.is_zero()) { + if (remove_empty) { + CLS_LOG(20, "remove"); + r = cls_cxx_remove(hctx); + if (r < 0) { + CLS_ERR("remove failed: %s", cpp_strerror(r).c_str()); + return r; + } + } else if (bl.length() > 0) { + CLS_LOG(20, "truncate"); + bufferlist write_bl; + r = cls_cxx_replace(hctx, 0, 0, &write_bl); + if (r < 0) { + CLS_ERR("truncate failed: %s", cpp_strerror(r).c_str()); + return r; + } + } else { + CLS_LOG(20, "skip empty"); + } + return 0; + } + + bl.rebuild(ceph::buffer::ptr_node::create(bl.length())); + size_t write_offset = 0; + size_t write_length = 0; + size_t offset = 0; + size_t length = bl.length(); + const auto& ptr = bl.front(); + bool replace = true; + while (offset < length) { + if (calc_sparse_extent(ptr, sparse_size, length, &write_offset, + &write_length, &offset)) { + if (write_offset == 0 && write_length == length) { + CLS_LOG(20, "nothing to do"); + return 0; + } + CLS_LOG(20, "write%s %" PRIu64 "~%" PRIu64, (replace ? "(replace)" : ""), + write_offset, write_length); + bufferlist write_bl; + write_bl.push_back(ceph::buffer::ptr_node::create(ptr, write_offset, + write_length)); + if (replace) { + r = cls_cxx_replace(hctx, write_offset, write_length, &write_bl); + replace = false; + } else { + r = cls_cxx_write(hctx, write_offset, write_length, &write_bl); + } + if (r < 0) { + CLS_ERR("write failed: %s", cpp_strerror(r).c_str()); + return r; + } + write_offset = offset; + write_length = 0; + } + } + + return 0; +} + +CLS_INIT(rbd) +{ + CLS_LOG(20, "Loaded rbd class!"); + + cls_handle_t h_class; + cls_method_handle_t h_create; + cls_method_handle_t h_get_features; + cls_method_handle_t h_set_features; + cls_method_handle_t h_get_size; + cls_method_handle_t h_set_size; + cls_method_handle_t h_get_parent; + cls_method_handle_t h_set_parent; + cls_method_handle_t h_remove_parent; + cls_method_handle_t h_parent_get; + cls_method_handle_t h_parent_overlap_get; + cls_method_handle_t h_parent_attach; + cls_method_handle_t h_parent_detach; + cls_method_handle_t h_get_protection_status; + cls_method_handle_t h_set_protection_status; + cls_method_handle_t h_get_stripe_unit_count; + cls_method_handle_t h_set_stripe_unit_count; + cls_method_handle_t h_get_create_timestamp; + cls_method_handle_t h_get_access_timestamp; + cls_method_handle_t h_get_modify_timestamp; + cls_method_handle_t h_get_flags; + cls_method_handle_t h_set_flags; + cls_method_handle_t h_op_features_get; + cls_method_handle_t h_op_features_set; + cls_method_handle_t h_add_child; + cls_method_handle_t h_remove_child; + cls_method_handle_t h_get_children; + cls_method_handle_t h_get_snapcontext; + cls_method_handle_t h_get_object_prefix; + cls_method_handle_t h_get_data_pool; + cls_method_handle_t h_get_snapshot_name; + cls_method_handle_t h_get_snapshot_timestamp; + cls_method_handle_t h_snapshot_get; + cls_method_handle_t h_snapshot_add; + cls_method_handle_t h_snapshot_remove; + cls_method_handle_t h_snapshot_rename; + cls_method_handle_t h_snapshot_trash_add; + cls_method_handle_t h_get_all_features; + cls_method_handle_t h_get_id; + cls_method_handle_t h_set_id; + cls_method_handle_t h_set_modify_timestamp; + cls_method_handle_t h_set_access_timestamp; + cls_method_handle_t h_dir_get_id; + cls_method_handle_t h_dir_get_name; + cls_method_handle_t h_dir_list; + cls_method_handle_t h_dir_add_image; + cls_method_handle_t h_dir_remove_image; + cls_method_handle_t h_dir_rename_image; + cls_method_handle_t h_dir_state_assert; + cls_method_handle_t h_dir_state_set; + cls_method_handle_t h_object_map_load; + cls_method_handle_t h_object_map_save; + cls_method_handle_t h_object_map_resize; + cls_method_handle_t h_object_map_update; + cls_method_handle_t h_object_map_snap_add; + cls_method_handle_t h_object_map_snap_remove; + cls_method_handle_t h_metadata_set; + cls_method_handle_t h_metadata_remove; + cls_method_handle_t h_metadata_list; + cls_method_handle_t h_metadata_get; + cls_method_handle_t h_snapshot_get_limit; + cls_method_handle_t h_snapshot_set_limit; + cls_method_handle_t h_child_attach; + cls_method_handle_t h_child_detach; + cls_method_handle_t h_children_list; + cls_method_handle_t h_migration_set; + cls_method_handle_t h_migration_set_state; + cls_method_handle_t h_migration_get; + cls_method_handle_t h_migration_remove; + cls_method_handle_t h_old_snapshots_list; + cls_method_handle_t h_old_snapshot_add; + cls_method_handle_t h_old_snapshot_remove; + cls_method_handle_t h_old_snapshot_rename; + cls_method_handle_t h_mirror_uuid_get; + cls_method_handle_t h_mirror_uuid_set; + cls_method_handle_t h_mirror_mode_get; + cls_method_handle_t h_mirror_mode_set; + cls_method_handle_t h_mirror_peer_ping; + cls_method_handle_t h_mirror_peer_list; + cls_method_handle_t h_mirror_peer_add; + cls_method_handle_t h_mirror_peer_remove; + cls_method_handle_t h_mirror_peer_set_client; + cls_method_handle_t h_mirror_peer_set_cluster; + cls_method_handle_t h_mirror_peer_set_direction; + cls_method_handle_t h_mirror_image_list; + cls_method_handle_t h_mirror_image_get_image_id; + cls_method_handle_t h_mirror_image_get; + cls_method_handle_t h_mirror_image_set; + cls_method_handle_t h_mirror_image_remove; + cls_method_handle_t h_mirror_image_status_set; + cls_method_handle_t h_mirror_image_status_remove; + cls_method_handle_t h_mirror_image_status_get; + cls_method_handle_t h_mirror_image_status_list; + cls_method_handle_t h_mirror_image_status_get_summary; + cls_method_handle_t h_mirror_image_status_remove_down; + cls_method_handle_t h_mirror_image_instance_get; + cls_method_handle_t h_mirror_image_instance_list; + cls_method_handle_t h_mirror_instances_list; + cls_method_handle_t h_mirror_instances_add; + cls_method_handle_t h_mirror_instances_remove; + cls_method_handle_t h_mirror_image_map_list; + cls_method_handle_t h_mirror_image_map_update; + cls_method_handle_t h_mirror_image_map_remove; + cls_method_handle_t h_mirror_image_snapshot_unlink_peer; + cls_method_handle_t h_mirror_image_snapshot_set_copy_progress; + cls_method_handle_t h_group_dir_list; + cls_method_handle_t h_group_dir_add; + cls_method_handle_t h_group_dir_remove; + cls_method_handle_t h_group_dir_rename; + cls_method_handle_t h_group_image_remove; + cls_method_handle_t h_group_image_list; + cls_method_handle_t h_group_image_set; + cls_method_handle_t h_image_group_add; + cls_method_handle_t h_image_group_remove; + cls_method_handle_t h_image_group_get; + cls_method_handle_t h_group_snap_set; + cls_method_handle_t h_group_snap_remove; + cls_method_handle_t h_group_snap_get_by_id; + cls_method_handle_t h_group_snap_list; + cls_method_handle_t h_trash_add; + cls_method_handle_t h_trash_remove; + cls_method_handle_t h_trash_list; + cls_method_handle_t h_trash_get; + cls_method_handle_t h_trash_state_set; + cls_method_handle_t h_namespace_add; + cls_method_handle_t h_namespace_remove; + cls_method_handle_t h_namespace_list; + cls_method_handle_t h_copyup; + cls_method_handle_t h_sparse_copyup; + cls_method_handle_t h_assert_snapc_seq; + cls_method_handle_t h_sparsify; + + cls_register("rbd", &h_class); + cls_register_cxx_method(h_class, "create", + CLS_METHOD_RD | CLS_METHOD_WR, + create, &h_create); + cls_register_cxx_method(h_class, "get_features", + CLS_METHOD_RD, + get_features, &h_get_features); + cls_register_cxx_method(h_class, "set_features", + CLS_METHOD_RD | CLS_METHOD_WR, + set_features, &h_set_features); + cls_register_cxx_method(h_class, "get_size", + CLS_METHOD_RD, + get_size, &h_get_size); + cls_register_cxx_method(h_class, "set_size", + CLS_METHOD_RD | CLS_METHOD_WR, + set_size, &h_set_size); + cls_register_cxx_method(h_class, "get_snapcontext", + CLS_METHOD_RD, + get_snapcontext, &h_get_snapcontext); + cls_register_cxx_method(h_class, "get_object_prefix", + CLS_METHOD_RD, + get_object_prefix, &h_get_object_prefix); + cls_register_cxx_method(h_class, "get_data_pool", CLS_METHOD_RD, + get_data_pool, &h_get_data_pool); + cls_register_cxx_method(h_class, "get_snapshot_name", + CLS_METHOD_RD, + get_snapshot_name, &h_get_snapshot_name); + cls_register_cxx_method(h_class, "get_snapshot_timestamp", + CLS_METHOD_RD, + get_snapshot_timestamp, &h_get_snapshot_timestamp); + cls_register_cxx_method(h_class, "snapshot_get", + CLS_METHOD_RD, + snapshot_get, &h_snapshot_get); + cls_register_cxx_method(h_class, "snapshot_add", + CLS_METHOD_RD | CLS_METHOD_WR, + snapshot_add, &h_snapshot_add); + cls_register_cxx_method(h_class, "snapshot_remove", + CLS_METHOD_RD | CLS_METHOD_WR, + snapshot_remove, &h_snapshot_remove); + cls_register_cxx_method(h_class, "snapshot_rename", + CLS_METHOD_RD | CLS_METHOD_WR, + snapshot_rename, &h_snapshot_rename); + cls_register_cxx_method(h_class, "snapshot_trash_add", + CLS_METHOD_RD | CLS_METHOD_WR, + snapshot_trash_add, &h_snapshot_trash_add); + cls_register_cxx_method(h_class, "get_all_features", + CLS_METHOD_RD, + get_all_features, &h_get_all_features); + + // NOTE: deprecate v1 parent APIs after mimic EOLed + cls_register_cxx_method(h_class, "get_parent", + CLS_METHOD_RD, + get_parent, &h_get_parent); + cls_register_cxx_method(h_class, "set_parent", + CLS_METHOD_RD | CLS_METHOD_WR, + set_parent, &h_set_parent); + cls_register_cxx_method(h_class, "remove_parent", + CLS_METHOD_RD | CLS_METHOD_WR, + remove_parent, &h_remove_parent); + + cls_register_cxx_method(h_class, "parent_get", + CLS_METHOD_RD, parent_get, &h_parent_get); + cls_register_cxx_method(h_class, "parent_overlap_get", + CLS_METHOD_RD, parent_overlap_get, + &h_parent_overlap_get); + cls_register_cxx_method(h_class, "parent_attach", + CLS_METHOD_RD | CLS_METHOD_WR, + parent_attach, &h_parent_attach); + cls_register_cxx_method(h_class, "parent_detach", + CLS_METHOD_RD | CLS_METHOD_WR, + parent_detach, &h_parent_detach); + + cls_register_cxx_method(h_class, "set_protection_status", + CLS_METHOD_RD | CLS_METHOD_WR, + set_protection_status, &h_set_protection_status); + cls_register_cxx_method(h_class, "get_protection_status", + CLS_METHOD_RD, + get_protection_status, &h_get_protection_status); + cls_register_cxx_method(h_class, "get_stripe_unit_count", + CLS_METHOD_RD, + get_stripe_unit_count, &h_get_stripe_unit_count); + cls_register_cxx_method(h_class, "set_stripe_unit_count", + CLS_METHOD_RD | CLS_METHOD_WR, + set_stripe_unit_count, &h_set_stripe_unit_count); + cls_register_cxx_method(h_class, "get_create_timestamp", + CLS_METHOD_RD, + get_create_timestamp, &h_get_create_timestamp); + cls_register_cxx_method(h_class, "get_access_timestamp", + CLS_METHOD_RD, + get_access_timestamp, &h_get_access_timestamp); + cls_register_cxx_method(h_class, "get_modify_timestamp", + CLS_METHOD_RD, + get_modify_timestamp, &h_get_modify_timestamp); + cls_register_cxx_method(h_class, "get_flags", + CLS_METHOD_RD, + get_flags, &h_get_flags); + cls_register_cxx_method(h_class, "set_flags", + CLS_METHOD_RD | CLS_METHOD_WR, + set_flags, &h_set_flags); + cls_register_cxx_method(h_class, "op_features_get", CLS_METHOD_RD, + op_features_get, &h_op_features_get); + cls_register_cxx_method(h_class, "op_features_set", + CLS_METHOD_RD | CLS_METHOD_WR, + op_features_set, &h_op_features_set); + cls_register_cxx_method(h_class, "metadata_list", + CLS_METHOD_RD, + metadata_list, &h_metadata_list); + cls_register_cxx_method(h_class, "metadata_set", + CLS_METHOD_RD | CLS_METHOD_WR, + metadata_set, &h_metadata_set); + cls_register_cxx_method(h_class, "metadata_remove", + CLS_METHOD_RD | CLS_METHOD_WR, + metadata_remove, &h_metadata_remove); + cls_register_cxx_method(h_class, "metadata_get", + CLS_METHOD_RD, + metadata_get, &h_metadata_get); + cls_register_cxx_method(h_class, "snapshot_get_limit", + CLS_METHOD_RD, + snapshot_get_limit, &h_snapshot_get_limit); + cls_register_cxx_method(h_class, "snapshot_set_limit", + CLS_METHOD_RD | CLS_METHOD_WR, + snapshot_set_limit, &h_snapshot_set_limit); + cls_register_cxx_method(h_class, "child_attach", + CLS_METHOD_RD | CLS_METHOD_WR, + child_attach, &h_child_attach); + cls_register_cxx_method(h_class, "child_detach", + CLS_METHOD_RD | CLS_METHOD_WR, + child_detach, &h_child_detach); + cls_register_cxx_method(h_class, "children_list", + CLS_METHOD_RD, + children_list, &h_children_list); + cls_register_cxx_method(h_class, "migration_set", + CLS_METHOD_RD | CLS_METHOD_WR, + migration_set, &h_migration_set); + cls_register_cxx_method(h_class, "migration_set_state", + CLS_METHOD_RD | CLS_METHOD_WR, + migration_set_state, &h_migration_set_state); + cls_register_cxx_method(h_class, "migration_get", + CLS_METHOD_RD, + migration_get, &h_migration_get); + cls_register_cxx_method(h_class, "migration_remove", + CLS_METHOD_RD | CLS_METHOD_WR, + migration_remove, &h_migration_remove); + + cls_register_cxx_method(h_class, "set_modify_timestamp", + CLS_METHOD_RD | CLS_METHOD_WR, + set_modify_timestamp, &h_set_modify_timestamp); + + cls_register_cxx_method(h_class, "set_access_timestamp", + CLS_METHOD_RD | CLS_METHOD_WR, + set_access_timestamp, &h_set_access_timestamp); + + /* methods for the rbd_children object */ + cls_register_cxx_method(h_class, "add_child", + CLS_METHOD_RD | CLS_METHOD_WR, + add_child, &h_add_child); + cls_register_cxx_method(h_class, "remove_child", + CLS_METHOD_RD | CLS_METHOD_WR, + remove_child, &h_remove_child); + cls_register_cxx_method(h_class, "get_children", + CLS_METHOD_RD, + get_children, &h_get_children); + + /* methods for the rbd_id.$image_name objects */ + cls_register_cxx_method(h_class, "get_id", + CLS_METHOD_RD, + get_id, &h_get_id); + cls_register_cxx_method(h_class, "set_id", + CLS_METHOD_RD | CLS_METHOD_WR, + set_id, &h_set_id); + + /* methods for the rbd_directory object */ + cls_register_cxx_method(h_class, "dir_get_id", + CLS_METHOD_RD, + dir_get_id, &h_dir_get_id); + cls_register_cxx_method(h_class, "dir_get_name", + CLS_METHOD_RD, + dir_get_name, &h_dir_get_name); + cls_register_cxx_method(h_class, "dir_list", + CLS_METHOD_RD, + dir_list, &h_dir_list); + cls_register_cxx_method(h_class, "dir_add_image", + CLS_METHOD_RD | CLS_METHOD_WR, + dir_add_image, &h_dir_add_image); + cls_register_cxx_method(h_class, "dir_remove_image", + CLS_METHOD_RD | CLS_METHOD_WR, + dir_remove_image, &h_dir_remove_image); + cls_register_cxx_method(h_class, "dir_rename_image", + CLS_METHOD_RD | CLS_METHOD_WR, + dir_rename_image, &h_dir_rename_image); + cls_register_cxx_method(h_class, "dir_state_assert", CLS_METHOD_RD, + dir_state_assert, &h_dir_state_assert); + cls_register_cxx_method(h_class, "dir_state_set", + CLS_METHOD_RD | CLS_METHOD_WR, + dir_state_set, &h_dir_state_set); + + /* methods for the rbd_object_map.$image_id object */ + cls_register_cxx_method(h_class, "object_map_load", + CLS_METHOD_RD, + object_map_load, &h_object_map_load); + cls_register_cxx_method(h_class, "object_map_save", + CLS_METHOD_RD | CLS_METHOD_WR, + object_map_save, &h_object_map_save); + cls_register_cxx_method(h_class, "object_map_resize", + CLS_METHOD_RD | CLS_METHOD_WR, + object_map_resize, &h_object_map_resize); + cls_register_cxx_method(h_class, "object_map_update", + CLS_METHOD_RD | CLS_METHOD_WR, + object_map_update, &h_object_map_update); + cls_register_cxx_method(h_class, "object_map_snap_add", + CLS_METHOD_RD | CLS_METHOD_WR, + object_map_snap_add, &h_object_map_snap_add); + cls_register_cxx_method(h_class, "object_map_snap_remove", + CLS_METHOD_RD | CLS_METHOD_WR, + object_map_snap_remove, &h_object_map_snap_remove); + + /* methods for the old format */ + cls_register_cxx_method(h_class, "snap_list", + CLS_METHOD_RD, + old_snapshots_list, &h_old_snapshots_list); + cls_register_cxx_method(h_class, "snap_add", + CLS_METHOD_RD | CLS_METHOD_WR, + old_snapshot_add, &h_old_snapshot_add); + cls_register_cxx_method(h_class, "snap_remove", + CLS_METHOD_RD | CLS_METHOD_WR, + old_snapshot_remove, &h_old_snapshot_remove); + cls_register_cxx_method(h_class, "snap_rename", + CLS_METHOD_RD | CLS_METHOD_WR, + old_snapshot_rename, &h_old_snapshot_rename); + + /* methods for the rbd_mirroring object */ + cls_register_cxx_method(h_class, "mirror_uuid_get", CLS_METHOD_RD, + mirror_uuid_get, &h_mirror_uuid_get); + cls_register_cxx_method(h_class, "mirror_uuid_set", + CLS_METHOD_RD | CLS_METHOD_WR, + mirror_uuid_set, &h_mirror_uuid_set); + cls_register_cxx_method(h_class, "mirror_mode_get", CLS_METHOD_RD, + mirror_mode_get, &h_mirror_mode_get); + cls_register_cxx_method(h_class, "mirror_mode_set", + CLS_METHOD_RD | CLS_METHOD_WR, + mirror_mode_set, &h_mirror_mode_set); + cls_register_cxx_method(h_class, "mirror_peer_ping", + CLS_METHOD_RD | CLS_METHOD_WR, + mirror_peer_ping, &h_mirror_peer_ping); + cls_register_cxx_method(h_class, "mirror_peer_list", CLS_METHOD_RD, + mirror_peer_list, &h_mirror_peer_list); + cls_register_cxx_method(h_class, "mirror_peer_add", + CLS_METHOD_RD | CLS_METHOD_WR, + mirror_peer_add, &h_mirror_peer_add); + cls_register_cxx_method(h_class, "mirror_peer_remove", + CLS_METHOD_RD | CLS_METHOD_WR, + mirror_peer_remove, &h_mirror_peer_remove); + cls_register_cxx_method(h_class, "mirror_peer_set_client", + CLS_METHOD_RD | CLS_METHOD_WR, + mirror_peer_set_client, &h_mirror_peer_set_client); + cls_register_cxx_method(h_class, "mirror_peer_set_cluster", + CLS_METHOD_RD | CLS_METHOD_WR, + mirror_peer_set_cluster, &h_mirror_peer_set_cluster); + cls_register_cxx_method(h_class, "mirror_peer_set_direction", + CLS_METHOD_RD | CLS_METHOD_WR, + mirror_peer_set_direction, + &h_mirror_peer_set_direction); + cls_register_cxx_method(h_class, "mirror_image_list", CLS_METHOD_RD, + mirror_image_list, &h_mirror_image_list); + cls_register_cxx_method(h_class, "mirror_image_get_image_id", CLS_METHOD_RD, + mirror_image_get_image_id, + &h_mirror_image_get_image_id); + cls_register_cxx_method(h_class, "mirror_image_get", CLS_METHOD_RD, + mirror_image_get, &h_mirror_image_get); + cls_register_cxx_method(h_class, "mirror_image_set", + CLS_METHOD_RD | CLS_METHOD_WR, + mirror_image_set, &h_mirror_image_set); + cls_register_cxx_method(h_class, "mirror_image_remove", + CLS_METHOD_RD | CLS_METHOD_WR, + mirror_image_remove, &h_mirror_image_remove); + cls_register_cxx_method(h_class, "mirror_image_status_set", + CLS_METHOD_RD | CLS_METHOD_WR | CLS_METHOD_PROMOTE, + mirror_image_status_set, &h_mirror_image_status_set); + cls_register_cxx_method(h_class, "mirror_image_status_remove", + CLS_METHOD_RD | CLS_METHOD_WR, + mirror_image_status_remove, + &h_mirror_image_status_remove); + cls_register_cxx_method(h_class, "mirror_image_status_get", CLS_METHOD_RD, + mirror_image_status_get, &h_mirror_image_status_get); + cls_register_cxx_method(h_class, "mirror_image_status_list", CLS_METHOD_RD, + mirror_image_status_list, + &h_mirror_image_status_list); + cls_register_cxx_method(h_class, "mirror_image_status_get_summary", + CLS_METHOD_RD, mirror_image_status_get_summary, + &h_mirror_image_status_get_summary); + cls_register_cxx_method(h_class, "mirror_image_status_remove_down", + CLS_METHOD_RD | CLS_METHOD_WR, + mirror_image_status_remove_down, + &h_mirror_image_status_remove_down); + cls_register_cxx_method(h_class, "mirror_image_instance_get", CLS_METHOD_RD, + mirror_image_instance_get, + &h_mirror_image_instance_get); + cls_register_cxx_method(h_class, "mirror_image_instance_list", CLS_METHOD_RD, + mirror_image_instance_list, + &h_mirror_image_instance_list); + cls_register_cxx_method(h_class, "mirror_instances_list", CLS_METHOD_RD, + mirror_instances_list, &h_mirror_instances_list); + cls_register_cxx_method(h_class, "mirror_instances_add", + CLS_METHOD_RD | CLS_METHOD_WR | CLS_METHOD_PROMOTE, + mirror_instances_add, &h_mirror_instances_add); + cls_register_cxx_method(h_class, "mirror_instances_remove", + CLS_METHOD_RD | CLS_METHOD_WR, + mirror_instances_remove, + &h_mirror_instances_remove); + cls_register_cxx_method(h_class, "mirror_image_map_list", + CLS_METHOD_RD, mirror_image_map_list, + &h_mirror_image_map_list); + cls_register_cxx_method(h_class, "mirror_image_map_update", + CLS_METHOD_WR, mirror_image_map_update, + &h_mirror_image_map_update); + cls_register_cxx_method(h_class, "mirror_image_map_remove", + CLS_METHOD_WR, mirror_image_map_remove, + &h_mirror_image_map_remove); + cls_register_cxx_method(h_class, "mirror_image_snapshot_unlink_peer", + CLS_METHOD_RD | CLS_METHOD_WR, + mirror_image_snapshot_unlink_peer, + &h_mirror_image_snapshot_unlink_peer); + cls_register_cxx_method(h_class, "mirror_image_snapshot_set_copy_progress", + CLS_METHOD_RD | CLS_METHOD_WR, + mirror_image_snapshot_set_copy_progress, + &h_mirror_image_snapshot_set_copy_progress); + + /* methods for the groups feature */ + cls_register_cxx_method(h_class, "group_dir_list", + CLS_METHOD_RD, + group_dir_list, &h_group_dir_list); + cls_register_cxx_method(h_class, "group_dir_add", + CLS_METHOD_RD | CLS_METHOD_WR, + group_dir_add, &h_group_dir_add); + cls_register_cxx_method(h_class, "group_dir_remove", + CLS_METHOD_RD | CLS_METHOD_WR, + group_dir_remove, &h_group_dir_remove); + cls_register_cxx_method(h_class, "group_dir_rename", + CLS_METHOD_RD | CLS_METHOD_WR, + group_dir_rename, &h_group_dir_rename); + cls_register_cxx_method(h_class, "group_image_remove", + CLS_METHOD_RD | CLS_METHOD_WR, + group_image_remove, &h_group_image_remove); + cls_register_cxx_method(h_class, "group_image_list", + CLS_METHOD_RD, + group_image_list, &h_group_image_list); + cls_register_cxx_method(h_class, "group_image_set", + CLS_METHOD_RD | CLS_METHOD_WR, + group_image_set, &h_group_image_set); + cls_register_cxx_method(h_class, "image_group_add", + CLS_METHOD_RD | CLS_METHOD_WR, + image_group_add, &h_image_group_add); + cls_register_cxx_method(h_class, "image_group_remove", + CLS_METHOD_RD | CLS_METHOD_WR, + image_group_remove, &h_image_group_remove); + cls_register_cxx_method(h_class, "image_group_get", + CLS_METHOD_RD, + image_group_get, &h_image_group_get); + cls_register_cxx_method(h_class, "group_snap_set", + CLS_METHOD_RD | CLS_METHOD_WR, + group_snap_set, &h_group_snap_set); + cls_register_cxx_method(h_class, "group_snap_remove", + CLS_METHOD_RD | CLS_METHOD_WR, + group_snap_remove, &h_group_snap_remove); + cls_register_cxx_method(h_class, "group_snap_get_by_id", + CLS_METHOD_RD, + group_snap_get_by_id, &h_group_snap_get_by_id); + cls_register_cxx_method(h_class, "group_snap_list", + CLS_METHOD_RD, + group_snap_list, &h_group_snap_list); + + /* rbd_trash object methods */ + cls_register_cxx_method(h_class, "trash_add", + CLS_METHOD_RD | CLS_METHOD_WR, + trash_add, &h_trash_add); + cls_register_cxx_method(h_class, "trash_remove", + CLS_METHOD_RD | CLS_METHOD_WR, + trash_remove, &h_trash_remove); + cls_register_cxx_method(h_class, "trash_list", + CLS_METHOD_RD, + trash_list, &h_trash_list); + cls_register_cxx_method(h_class, "trash_get", + CLS_METHOD_RD, + trash_get, &h_trash_get); + cls_register_cxx_method(h_class, "trash_state_set", + CLS_METHOD_RD | CLS_METHOD_WR, + trash_state_set, &h_trash_state_set); + + /* rbd_namespace object methods */ + cls_register_cxx_method(h_class, "namespace_add", + CLS_METHOD_RD | CLS_METHOD_WR, + namespace_add, &h_namespace_add); + cls_register_cxx_method(h_class, "namespace_remove", + CLS_METHOD_RD | CLS_METHOD_WR, + namespace_remove, &h_namespace_remove); + cls_register_cxx_method(h_class, "namespace_list", CLS_METHOD_RD, + namespace_list, &h_namespace_list); + + /* data object methods */ + cls_register_cxx_method(h_class, "copyup", + CLS_METHOD_RD | CLS_METHOD_WR, + copyup, &h_copyup); + cls_register_cxx_method(h_class, "sparse_copyup", + CLS_METHOD_RD | CLS_METHOD_WR, + sparse_copyup, &h_sparse_copyup); + cls_register_cxx_method(h_class, "assert_snapc_seq", + CLS_METHOD_RD | CLS_METHOD_WR, + assert_snapc_seq, + &h_assert_snapc_seq); + cls_register_cxx_method(h_class, "sparsify", + CLS_METHOD_RD | CLS_METHOD_WR, + sparsify, &h_sparsify); +} diff --git a/src/cls/rbd/cls_rbd.h b/src/cls/rbd/cls_rbd.h new file mode 100644 index 000000000..f0cadf8ae --- /dev/null +++ b/src/cls/rbd/cls_rbd.h @@ -0,0 +1,247 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab + +#ifndef __CEPH_CLS_RBD_H +#define __CEPH_CLS_RBD_H + +#include "include/types.h" +#include "include/buffer_fwd.h" +#include "include/rbd_types.h" +#include "common/Formatter.h" +#include "cls/rbd/cls_rbd_types.h" + +/// information about our parent image, if any +struct cls_rbd_parent { + int64_t pool_id = -1; + std::string pool_namespace; + std::string image_id; + snapid_t snap_id = CEPH_NOSNAP; + std::optional<uint64_t> head_overlap = std::nullopt; + + cls_rbd_parent() { + } + cls_rbd_parent(const cls::rbd::ParentImageSpec& parent_image_spec, + const std::optional<uint64_t>& head_overlap) + : pool_id(parent_image_spec.pool_id), + pool_namespace(parent_image_spec.pool_namespace), + image_id(parent_image_spec.image_id), snap_id(parent_image_spec.snap_id), + head_overlap(head_overlap) { + } + + inline bool exists() const { + return (pool_id >= 0 && !image_id.empty() && snap_id != CEPH_NOSNAP); + } + + inline bool operator==(const cls_rbd_parent& rhs) const { + return (pool_id == rhs.pool_id && + pool_namespace == rhs.pool_namespace && + image_id == rhs.image_id && + snap_id == rhs.snap_id); + } + inline bool operator!=(const cls_rbd_parent& rhs) const { + return !(*this == rhs); + } + + void encode(ceph::buffer::list& bl, uint64_t features) const { + // NOTE: remove support for version 1 after Nautilus EOLed + uint8_t version = 1; + if ((features & CEPH_FEATURE_SERVER_NAUTILUS) != 0ULL) { + // break backwards compatability when using nautilus or later OSDs + version = 2; + } + + ENCODE_START(version, version, bl); + encode(pool_id, bl); + if (version >= 2) { + encode(pool_namespace, bl); + } + encode(image_id, bl); + encode(snap_id, bl); + if (version == 1) { + encode(head_overlap.value_or(0ULL), bl); + } else { + encode(head_overlap, bl); + } + ENCODE_FINISH(bl); + } + + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(2, bl); + decode(pool_id, bl); + if (struct_v >= 2) { + decode(pool_namespace, bl); + } + decode(image_id, bl); + decode(snap_id, bl); + if (struct_v == 1) { + uint64_t overlap; + decode(overlap, bl); + head_overlap = overlap; + } else { + decode(head_overlap, bl); + } + DECODE_FINISH(bl); + } + + void dump(ceph::Formatter *f) const { + f->dump_int("pool_id", pool_id); + f->dump_string("pool_namespace", pool_namespace); + f->dump_string("image_id", image_id); + f->dump_unsigned("snap_id", snap_id); + if (head_overlap) { + f->dump_unsigned("head_overlap", *head_overlap); + } + } + + static void generate_test_instances(std::list<cls_rbd_parent*>& o) { + o.push_back(new cls_rbd_parent{}); + o.push_back(new cls_rbd_parent{{1, "", "image id", 234}, {}}); + o.push_back(new cls_rbd_parent{{1, "", "image id", 234}, {123}}); + o.push_back(new cls_rbd_parent{{1, "ns", "image id", 234}, {123}}); + } +}; +WRITE_CLASS_ENCODER_FEATURES(cls_rbd_parent) + +struct cls_rbd_snap { + snapid_t id = CEPH_NOSNAP; + std::string name; + uint64_t image_size = 0; + uint8_t protection_status = RBD_PROTECTION_STATUS_UNPROTECTED; + cls_rbd_parent parent; + uint64_t flags = 0; + utime_t timestamp; + cls::rbd::SnapshotNamespace snapshot_namespace = { + cls::rbd::UserSnapshotNamespace{}}; + uint32_t child_count = 0; + std::optional<uint64_t> parent_overlap = std::nullopt; + + cls_rbd_snap() { + } + cls_rbd_snap(snapid_t id, const std::string& name, uint64_t image_size, + uint8_t protection_status, const cls_rbd_parent& parent, + uint64_t flags, utime_t timestamp, + const cls::rbd::SnapshotNamespace& snapshot_namespace, + uint32_t child_count, + const std::optional<uint64_t>& parent_overlap) + : id(id), name(name), image_size(image_size), + protection_status(protection_status), parent(parent), flags(flags), + timestamp(timestamp), snapshot_namespace(snapshot_namespace), + child_count(child_count), parent_overlap(parent_overlap) { + } + + bool migrate_parent_format(uint64_t features) const { + return (((features & CEPH_FEATURE_SERVER_NAUTILUS) != 0) && + (parent.exists())); + } + + void encode(ceph::buffer::list& bl, uint64_t features) const { + // NOTE: remove support for versions < 8 after Nautilus EOLed + uint8_t min_version = 1; + if ((features & CEPH_FEATURE_SERVER_NAUTILUS) != 0ULL) { + // break backwards compatability when using nautilus or later OSDs + min_version = 8; + } + + ENCODE_START(8, min_version, bl); + encode(id, bl); + encode(name, bl); + encode(image_size, bl); + if (min_version < 8) { + uint64_t image_features = 0; + encode(image_features, bl); // unused -- preserve ABI + encode(parent, bl, features); + } + encode(protection_status, bl); + encode(flags, bl); + encode(snapshot_namespace, bl); + encode(timestamp, bl); + encode(child_count, bl); + encode(parent_overlap, bl); + ENCODE_FINISH(bl); + } + + void decode(ceph::buffer::list::const_iterator& p) { + DECODE_START(8, p); + decode(id, p); + decode(name, p); + decode(image_size, p); + if (struct_compat < 8) { + uint64_t features; + decode(features, p); // unused -- preserve ABI + } + if (struct_v >= 2 && struct_compat < 8) { + decode(parent, p); + } + if (struct_v >= 3) { + decode(protection_status, p); + } + if (struct_v >= 4) { + decode(flags, p); + } + if (struct_v >= 5) { + decode(snapshot_namespace, p); + } + if (struct_v >= 6) { + decode(timestamp, p); + } + if (struct_v >= 7) { + decode(child_count, p); + } + if (struct_v >= 8) { + decode(parent_overlap, p); + } + DECODE_FINISH(p); + } + + void dump(ceph::Formatter *f) const { + f->dump_unsigned("id", id); + f->dump_string("name", name); + f->dump_unsigned("image_size", image_size); + if (parent.exists()) { + f->open_object_section("parent"); + parent.dump(f); + f->close_section(); + } + switch (protection_status) { + case RBD_PROTECTION_STATUS_UNPROTECTED: + f->dump_string("protection_status", "unprotected"); + break; + case RBD_PROTECTION_STATUS_UNPROTECTING: + f->dump_string("protection_status", "unprotecting"); + break; + case RBD_PROTECTION_STATUS_PROTECTED: + f->dump_string("protection_status", "protected"); + break; + default: + ceph_abort(); + } + f->open_object_section("namespace"); + snapshot_namespace.dump(f); + f->close_section(); + f->dump_stream("timestamp") << timestamp; + f->dump_unsigned("child_count", child_count); + if (parent_overlap) { + f->dump_unsigned("parent_overlap", *parent_overlap); + } + } + + static void generate_test_instances(std::list<cls_rbd_snap*>& o) { + o.push_back(new cls_rbd_snap{}); + o.push_back(new cls_rbd_snap{1, "snap", 123456, + RBD_PROTECTION_STATUS_PROTECTED, + {{1, "", "image", 123}, 234}, 31, {}, + cls::rbd::UserSnapshotNamespace{}, 543, {}}); + o.push_back(new cls_rbd_snap{1, "snap", 123456, + RBD_PROTECTION_STATUS_PROTECTED, + {{1, "", "image", 123}, 234}, 31, {}, + cls::rbd::UserSnapshotNamespace{}, 543, {0}}); + o.push_back(new cls_rbd_snap{1, "snap", 123456, + RBD_PROTECTION_STATUS_PROTECTED, + {{1, "ns", "image", 123}, 234}, 31, {}, + cls::rbd::UserSnapshotNamespace{}, 543, + {123}}); + } +}; +WRITE_CLASS_ENCODER_FEATURES(cls_rbd_snap) + +#endif // __CEPH_CLS_RBD_H diff --git a/src/cls/rbd/cls_rbd_client.cc b/src/cls/rbd/cls_rbd_client.cc new file mode 100644 index 000000000..2f1f37eaa --- /dev/null +++ b/src/cls/rbd/cls_rbd_client.cc @@ -0,0 +1,3010 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab + +#include "cls/rbd/cls_rbd_client.h" +#include "cls/lock/cls_lock_client.h" +#include "include/buffer.h" +#include "include/encoding.h" +#include "include/rbd_types.h" +#include "include/rados/librados.hpp" +#include "include/neorados/RADOS.hpp" +#include "common/bit_vector.hpp" + +#include <errno.h> + +namespace librbd { +namespace cls_client { + +using std::map; +using std::set; +using std::string; + +using ceph::bufferlist; +using ceph::decode; +using ceph::encode; + +void create_image(librados::ObjectWriteOperation *op, uint64_t size, + uint8_t order, uint64_t features, + const std::string &object_prefix, int64_t data_pool_id) +{ + bufferlist bl; + encode(size, bl); + encode(order, bl); + encode(features, bl); + encode(object_prefix, bl); + encode(data_pool_id, bl); + + op->exec("rbd", "create", bl); +} + +int create_image(librados::IoCtx *ioctx, const std::string &oid, + uint64_t size, uint8_t order, uint64_t features, + const std::string &object_prefix, int64_t data_pool_id) +{ + librados::ObjectWriteOperation op; + create_image(&op, size, order, features, object_prefix, data_pool_id); + + return ioctx->operate(oid, &op); +} + +void get_features_start(librados::ObjectReadOperation *op, bool read_only) +{ + bufferlist bl; + encode(static_cast<uint64_t>(CEPH_NOSNAP), bl); + encode(read_only, bl); + op->exec("rbd", "get_features", bl); +} + +int get_features_finish(bufferlist::const_iterator *it, uint64_t *features, + uint64_t *incompatible_features) +{ + try { + decode(*features, *it); + decode(*incompatible_features, *it); + } catch (const ceph::buffer::error &err) { + return -EBADMSG; + } + + return 0; +} + +int get_features(librados::IoCtx *ioctx, const std::string &oid, + bool read_only, uint64_t *features, + uint64_t *incompatible_features) +{ + librados::ObjectReadOperation op; + get_features_start(&op, read_only); + + bufferlist out_bl; + int r = ioctx->operate(oid, &op, &out_bl); + if (r < 0) { + return r; + } + + auto it = out_bl.cbegin(); + return get_features_finish(&it, features, incompatible_features); +} + +void set_features(librados::ObjectWriteOperation *op, uint64_t features, + uint64_t mask) +{ + bufferlist bl; + encode(features, bl); + encode(mask, bl); + + op->exec("rbd", "set_features", bl); +} + +int set_features(librados::IoCtx *ioctx, const std::string &oid, + uint64_t features, uint64_t mask) +{ + librados::ObjectWriteOperation op; + set_features(&op, features, mask); + + return ioctx->operate(oid, &op); +} + +void get_object_prefix_start(librados::ObjectReadOperation *op) +{ + bufferlist bl; + op->exec("rbd", "get_object_prefix", bl); +} + +int get_object_prefix_finish(bufferlist::const_iterator *it, + std::string *object_prefix) +{ + try { + decode(*object_prefix, *it); + } catch (const ceph::buffer::error &err) { + return -EBADMSG; + } + return 0; +} + +int get_object_prefix(librados::IoCtx *ioctx, const std::string &oid, + std::string *object_prefix) +{ + librados::ObjectReadOperation op; + get_object_prefix_start(&op); + + bufferlist out_bl; + int r = ioctx->operate(oid, &op, &out_bl); + if (r < 0) { + return r; + } + + auto it = out_bl.cbegin(); + return get_object_prefix_finish(&it, object_prefix); +} + +void get_data_pool_start(librados::ObjectReadOperation *op) { + bufferlist bl; + op->exec("rbd", "get_data_pool", bl); +} + +int get_data_pool_finish(bufferlist::const_iterator *it, int64_t *data_pool_id) { + try { + decode(*data_pool_id, *it); + } catch (const ceph::buffer::error &err) { + return -EBADMSG; + } + return 0; +} + +int get_data_pool(librados::IoCtx *ioctx, const std::string &oid, + int64_t *data_pool_id) { + librados::ObjectReadOperation op; + get_data_pool_start(&op); + + bufferlist out_bl; + int r = ioctx->operate(oid, &op, &out_bl); + if (r < 0) { + return r; + } + + auto it = out_bl.cbegin(); + return get_data_pool_finish(&it, data_pool_id); +} + +void get_size_start(librados::ObjectReadOperation *op, snapid_t snap_id) +{ + bufferlist bl; + encode(snap_id, bl); + op->exec("rbd", "get_size", bl); +} + +int get_size_finish(bufferlist::const_iterator *it, uint64_t *size, + uint8_t *order) +{ + try { + decode(*order, *it); + decode(*size, *it); + } catch (const ceph::buffer::error &err) { + return -EBADMSG; + } + return 0; +} + +int get_size(librados::IoCtx *ioctx, const std::string &oid, + snapid_t snap_id, uint64_t *size, uint8_t *order) +{ + librados::ObjectReadOperation op; + get_size_start(&op, snap_id); + + bufferlist out_bl; + int r = ioctx->operate(oid, &op, &out_bl); + if (r < 0) { + return r; + } + + auto it = out_bl.cbegin(); + return get_size_finish(&it, size, order); +} + +int set_size(librados::IoCtx *ioctx, const std::string &oid, + uint64_t size) +{ + librados::ObjectWriteOperation op; + set_size(&op, size); + return ioctx->operate(oid, &op); +} + +void set_size(librados::ObjectWriteOperation *op, uint64_t size) +{ + bufferlist bl; + encode(size, bl); + op->exec("rbd", "set_size", bl); +} + +void get_flags_start(librados::ObjectReadOperation *op, snapid_t snap_id) { + bufferlist in_bl; + encode(static_cast<snapid_t>(snap_id), in_bl); + op->exec("rbd", "get_flags", in_bl); +} + +int get_flags_finish(bufferlist::const_iterator *it, uint64_t *flags) { + try { + decode(*flags, *it); + } catch (const ceph::buffer::error &err) { + return -EBADMSG; + } + return 0; +} + +int get_flags(librados::IoCtx *ioctx, const std::string &oid, + snapid_t snap_id, uint64_t *flags) +{ + librados::ObjectReadOperation op; + get_flags_start(&op, snap_id); + + bufferlist out_bl; + int r = ioctx->operate(oid, &op, &out_bl); + if (r < 0) { + return r; + } + + auto it = out_bl.cbegin(); + return get_flags_finish(&it, flags); +} + +void set_flags(librados::ObjectWriteOperation *op, snapid_t snap_id, + uint64_t flags, uint64_t mask) +{ + bufferlist inbl; + encode(flags, inbl); + encode(mask, inbl); + encode(snap_id, inbl); + op->exec("rbd", "set_flags", inbl); +} + +void op_features_get_start(librados::ObjectReadOperation *op) +{ + bufferlist in_bl; + op->exec("rbd", "op_features_get", in_bl); +} + +int op_features_get_finish(bufferlist::const_iterator *it, uint64_t *op_features) +{ + try { + decode(*op_features, *it); + } catch (const ceph::buffer::error &err) { + return -EBADMSG; + } + return 0; +} + +int op_features_get(librados::IoCtx *ioctx, const std::string &oid, + uint64_t *op_features) +{ + librados::ObjectReadOperation op; + op_features_get_start(&op); + + bufferlist out_bl; + int r = ioctx->operate(oid, &op, &out_bl); + if (r < 0) { + return r; + } + + auto it = out_bl.cbegin(); + return op_features_get_finish(&it, op_features); +} + +void op_features_set(librados::ObjectWriteOperation *op, + uint64_t op_features, uint64_t mask) +{ + bufferlist inbl; + encode(op_features, inbl); + encode(mask, inbl); + op->exec("rbd", "op_features_set", inbl); +} + +int op_features_set(librados::IoCtx *ioctx, const std::string &oid, + uint64_t op_features, uint64_t mask) +{ + librados::ObjectWriteOperation op; + op_features_set(&op, op_features, mask); + + return ioctx->operate(oid, &op); +} + +void get_parent_start(librados::ObjectReadOperation *op, snapid_t snap_id) +{ + bufferlist bl; + encode(snap_id, bl); + op->exec("rbd", "get_parent", bl); +} + +int get_parent_finish(bufferlist::const_iterator *it, + cls::rbd::ParentImageSpec *pspec, + uint64_t *parent_overlap) +{ + *pspec = {}; + try { + decode(pspec->pool_id, *it); + decode(pspec->image_id, *it); + decode(pspec->snap_id, *it); + decode(*parent_overlap, *it); + } catch (const ceph::buffer::error &) { + return -EBADMSG; + } + return 0; +} + +int get_parent(librados::IoCtx *ioctx, const std::string &oid, + snapid_t snap_id, cls::rbd::ParentImageSpec *pspec, + uint64_t *parent_overlap) +{ + librados::ObjectReadOperation op; + get_parent_start(&op, snap_id); + + bufferlist out_bl; + int r = ioctx->operate(oid, &op, &out_bl); + if (r < 0) { + return r; + } + + auto it = out_bl.cbegin(); + return get_parent_finish(&it, pspec, parent_overlap); +} + +int set_parent(librados::IoCtx *ioctx, const std::string &oid, + const cls::rbd::ParentImageSpec &pspec, uint64_t parent_overlap) +{ + librados::ObjectWriteOperation op; + set_parent(&op, pspec, parent_overlap); + return ioctx->operate(oid, &op); +} + +void set_parent(librados::ObjectWriteOperation *op, + const cls::rbd::ParentImageSpec &pspec, + uint64_t parent_overlap) { + assert(pspec.pool_namespace.empty()); + + bufferlist in_bl; + encode(pspec.pool_id, in_bl); + encode(pspec.image_id, in_bl); + encode(pspec.snap_id, in_bl); + encode(parent_overlap, in_bl); + + op->exec("rbd", "set_parent", in_bl); +} + +int remove_parent(librados::IoCtx *ioctx, const std::string &oid) +{ + librados::ObjectWriteOperation op; + remove_parent(&op); + return ioctx->operate(oid, &op); +} + +void remove_parent(librados::ObjectWriteOperation *op) +{ + bufferlist inbl; + op->exec("rbd", "remove_parent", inbl); +} + +void parent_get_start(librados::ObjectReadOperation* op) { + bufferlist in_bl; + op->exec("rbd", "parent_get", in_bl); +} + +int parent_get_finish(bufferlist::const_iterator* it, + cls::rbd::ParentImageSpec* parent_image_spec) { + try { + decode(*parent_image_spec, *it); + } catch (const ceph::buffer::error &) { + return -EBADMSG; + } + return 0; +} + +int parent_get(librados::IoCtx* ioctx, const std::string &oid, + cls::rbd::ParentImageSpec* parent_image_spec) { + librados::ObjectReadOperation op; + parent_get_start(&op); + + bufferlist out_bl; + int r = ioctx->operate(oid, &op, &out_bl); + if (r < 0) { + return r; + } + + auto it = out_bl.cbegin(); + r = parent_get_finish(&it, parent_image_spec); + if (r < 0) { + return r; + } + return 0; +} + +void parent_overlap_get_start(librados::ObjectReadOperation* op, + snapid_t snap_id) { + bufferlist in_bl; + encode(snap_id, in_bl); + op->exec("rbd", "parent_overlap_get", in_bl); +} + +int parent_overlap_get_finish(bufferlist::const_iterator* it, + std::optional<uint64_t>* parent_overlap) { + try { + decode(*parent_overlap, *it); + } catch (const ceph::buffer::error &) { + return -EBADMSG; + } + return 0; +} + +int parent_overlap_get(librados::IoCtx* ioctx, const std::string &oid, + snapid_t snap_id, + std::optional<uint64_t>* parent_overlap) { + librados::ObjectReadOperation op; + parent_overlap_get_start(&op, snap_id); + + bufferlist out_bl; + int r = ioctx->operate(oid, &op, &out_bl); + if (r < 0) { + return r; + } + + auto it = out_bl.cbegin(); + r = parent_overlap_get_finish(&it, parent_overlap); + if (r < 0) { + return r; + } + return 0; +} + +void parent_attach(librados::ObjectWriteOperation* op, + const cls::rbd::ParentImageSpec& parent_image_spec, + uint64_t parent_overlap, bool reattach) { + bufferlist in_bl; + encode(parent_image_spec, in_bl); + encode(parent_overlap, in_bl); + encode(reattach, in_bl); + op->exec("rbd", "parent_attach", in_bl); +} + +int parent_attach(librados::IoCtx *ioctx, const std::string &oid, + const cls::rbd::ParentImageSpec& parent_image_spec, + uint64_t parent_overlap, bool reattach) { + librados::ObjectWriteOperation op; + parent_attach(&op, parent_image_spec, parent_overlap, reattach); + return ioctx->operate(oid, &op); +} + +void parent_detach(librados::ObjectWriteOperation* op) { + bufferlist in_bl; + op->exec("rbd", "parent_detach", in_bl); +} + +int parent_detach(librados::IoCtx *ioctx, const std::string &oid) { + librados::ObjectWriteOperation op; + parent_detach(&op); + return ioctx->operate(oid, &op); +} + +int add_child(librados::IoCtx *ioctx, const std::string &oid, + const cls::rbd::ParentImageSpec &pspec, + const std::string &c_imageid) +{ + librados::ObjectWriteOperation op; + add_child(&op, pspec, c_imageid); + return ioctx->operate(oid, &op); +} + +void add_child(librados::ObjectWriteOperation *op, + const cls::rbd::ParentImageSpec& pspec, + const std::string &c_imageid) +{ + assert(pspec.pool_namespace.empty()); + + bufferlist in; + encode(pspec.pool_id, in); + encode(pspec.image_id, in); + encode(pspec.snap_id, in); + encode(c_imageid, in); + + op->exec("rbd", "add_child", in); +} + +void remove_child(librados::ObjectWriteOperation *op, + const cls::rbd::ParentImageSpec &pspec, + const std::string &c_imageid) +{ + assert(pspec.pool_namespace.empty()); + + bufferlist in; + encode(pspec.pool_id, in); + encode(pspec.image_id, in); + encode(pspec.snap_id, in); + encode(c_imageid, in); + op->exec("rbd", "remove_child", in); +} + +int remove_child(librados::IoCtx *ioctx, const std::string &oid, + const cls::rbd::ParentImageSpec &pspec, + const std::string &c_imageid) +{ + librados::ObjectWriteOperation op; + remove_child(&op, pspec, c_imageid); + return ioctx->operate(oid, &op); +} + +void get_children_start(librados::ObjectReadOperation *op, + const cls::rbd::ParentImageSpec &pspec) { + bufferlist in_bl; + encode(pspec.pool_id, in_bl); + encode(pspec.image_id, in_bl); + encode(pspec.snap_id, in_bl); + op->exec("rbd", "get_children", in_bl); +} + +int get_children_finish(bufferlist::const_iterator *it, + std::set<std::string>* children) { + try { + decode(*children, *it); + } catch (const ceph::buffer::error &err) { + return -EBADMSG; + } + return 0; +} + +int get_children(librados::IoCtx *ioctx, const std::string &oid, + const cls::rbd::ParentImageSpec &pspec, set<string>& children) +{ + librados::ObjectReadOperation op; + get_children_start(&op, pspec); + + bufferlist out_bl; + int r = ioctx->operate(oid, &op, &out_bl); + if (r < 0) { + return r; + } + + auto it = out_bl.cbegin(); + return get_children_finish(&it, &children); +} + +void snapshot_get_start(librados::ObjectReadOperation *op, snapid_t snap_id) +{ + bufferlist bl; + encode(snap_id, bl); + op->exec("rbd", "snapshot_get", bl); +} + +int snapshot_get_finish(bufferlist::const_iterator* it, + cls::rbd::SnapshotInfo* snap_info) +{ + try { + decode(*snap_info, *it); + } catch (const ceph::buffer::error &err) { + return -EBADMSG; + } + return 0; +} + +int snapshot_get(librados::IoCtx *ioctx, const std::string &oid, + snapid_t snap_id, cls::rbd::SnapshotInfo* snap_info) +{ + librados::ObjectReadOperation op; + snapshot_get_start(&op, snap_id); + + bufferlist out_bl; + int r = ioctx->operate(oid, &op, &out_bl); + if (r < 0) { + return r; + } + + auto it = out_bl.cbegin(); + return snapshot_get_finish(&it, snap_info); +} + +void snapshot_add(librados::ObjectWriteOperation *op, snapid_t snap_id, + const std::string &snap_name, + const cls::rbd::SnapshotNamespace &snap_namespace) +{ + bufferlist bl; + encode(snap_name, bl); + encode(snap_id, bl); + encode(snap_namespace, bl); + op->exec("rbd", "snapshot_add", bl); +} + +void snapshot_remove(librados::ObjectWriteOperation *op, snapid_t snap_id) +{ + bufferlist bl; + encode(snap_id, bl); + op->exec("rbd", "snapshot_remove", bl); +} + +void snapshot_rename(librados::ObjectWriteOperation *op, + snapid_t src_snap_id, + const std::string &dst_name) +{ + bufferlist bl; + encode(src_snap_id, bl); + encode(dst_name, bl); + op->exec("rbd", "snapshot_rename", bl); +} + +void snapshot_trash_add(librados::ObjectWriteOperation *op, + snapid_t snap_id) +{ + bufferlist bl; + encode(snap_id, bl); + op->exec("rbd", "snapshot_trash_add", bl); +} + +void get_snapcontext_start(librados::ObjectReadOperation *op) +{ + bufferlist bl; + op->exec("rbd", "get_snapcontext", bl); +} + +int get_snapcontext_finish(bufferlist::const_iterator *it, + ::SnapContext *snapc) +{ + try { + decode(*snapc, *it); + } catch (const ceph::buffer::error &err) { + return -EBADMSG; + } + if (!snapc->is_valid()) { + return -EBADMSG; + } + return 0; +} + +int get_snapcontext(librados::IoCtx *ioctx, const std::string &oid, + ::SnapContext *snapc) +{ + librados::ObjectReadOperation op; + get_snapcontext_start(&op); + + bufferlist out_bl; + int r = ioctx->operate(oid, &op, &out_bl); + if (r < 0) { + return r; + } + + auto bl_it = out_bl.cbegin(); + return get_snapcontext_finish(&bl_it, snapc); +} + +void get_snapshot_name_start(librados::ObjectReadOperation *op, + snapid_t snap_id) +{ + bufferlist bl; + encode(snap_id, bl); + op->exec("rbd", "get_snapshot_name", bl); +} + +int get_snapshot_name_finish(bufferlist::const_iterator *it, + std::string *name) +{ + try { + decode(*name, *it); + } catch (const ceph::buffer::error &err) { + return -EBADMSG; + } + return 0; +} + +int get_snapshot_name(librados::IoCtx *ioctx, const std::string &oid, + snapid_t snap_id, std::string *name) +{ + librados::ObjectReadOperation op; + get_snapshot_name_start(&op, snap_id); + + bufferlist out_bl; + int r = ioctx->operate(oid, &op, &out_bl); + if (r < 0) { + return r; + } + + auto it = out_bl.cbegin(); + return get_snapshot_name_finish(&it, name); +} + +void get_snapshot_timestamp_start(librados::ObjectReadOperation *op, + snapid_t snap_id) +{ + bufferlist bl; + encode(snap_id, bl); + op->exec("rbd", "get_snapshot_timestamp", bl); +} + +int get_snapshot_timestamp_finish(bufferlist::const_iterator *it, + utime_t *timestamp) +{ + try { + decode(*timestamp, *it); + } catch (const ceph::buffer::error &err) { + return -EBADMSG; + } + return 0; +} + +int get_snapshot_timestamp(librados::IoCtx *ioctx, const std::string &oid, + snapid_t snap_id, utime_t *timestamp) +{ + librados::ObjectReadOperation op; + get_snapshot_timestamp_start(&op, snap_id); + + bufferlist out_bl; + int r = ioctx->operate(oid, &op, &out_bl); + if (r < 0) { + return r; + } + + auto it = out_bl.cbegin(); + return get_snapshot_timestamp_finish(&it, timestamp); +} + +void old_snapshot_add(librados::ObjectWriteOperation *op, + snapid_t snap_id, const std::string &snap_name) +{ + bufferlist bl; + encode(snap_name, bl); + encode(snap_id, bl); + op->exec("rbd", "snap_add", bl); +} + +void old_snapshot_remove(librados::ObjectWriteOperation *op, + const std::string &snap_name) +{ + bufferlist bl; + encode(snap_name, bl); + op->exec("rbd", "snap_remove", bl); +} + +void old_snapshot_rename(librados::ObjectWriteOperation *op, + snapid_t src_snap_id, const std::string &dst_name) +{ + bufferlist bl; + encode(src_snap_id, bl); + encode(dst_name, bl); + op->exec("rbd", "snap_rename", bl); +} + +void old_snapshot_list_start(librados::ObjectReadOperation *op) { + bufferlist in_bl; + op->exec("rbd", "snap_list", in_bl); +} + +int old_snapshot_list_finish(bufferlist::const_iterator *it, + std::vector<string> *names, + std::vector<uint64_t> *sizes, + ::SnapContext *snapc) { + try { + uint32_t num_snaps; + decode(snapc->seq, *it); + decode(num_snaps, *it); + + names->resize(num_snaps); + sizes->resize(num_snaps); + snapc->snaps.resize(num_snaps); + for (uint32_t i = 0; i < num_snaps; ++i) { + decode(snapc->snaps[i], *it); + decode((*sizes)[i], *it); + decode((*names)[i], *it); + } + } catch (const ceph::buffer::error &err) { + return -EBADMSG; + } + return 0; +} + +int old_snapshot_list(librados::IoCtx *ioctx, const std::string &oid, + std::vector<string> *names, + std::vector<uint64_t> *sizes, + ::SnapContext *snapc) +{ + librados::ObjectReadOperation op; + old_snapshot_list_start(&op); + + bufferlist out_bl; + int r = ioctx->operate(oid, &op, &out_bl); + if (r < 0) { + return r; + } + + auto it = out_bl.cbegin(); + return old_snapshot_list_finish(&it, names, sizes, snapc); +} + +void get_all_features_start(librados::ObjectReadOperation *op) { + bufferlist in; + op->exec("rbd", "get_all_features", in); +} + +int get_all_features_finish(bufferlist::const_iterator *it, + uint64_t *all_features) { + try { + decode(*all_features, *it); + } catch (const ceph::buffer::error &err) { + return -EBADMSG; + } + return 0; +} + +int get_all_features(librados::IoCtx *ioctx, const std::string &oid, + uint64_t *all_features) { + librados::ObjectReadOperation op; + get_all_features_start(&op); + + bufferlist out_bl; + int r = ioctx->operate(oid, &op, &out_bl); + if (r < 0) { + return r; + } + + auto it = out_bl.cbegin(); + return get_all_features_finish(&it, all_features); +} + +template <typename O> +void copyup(O* op, ceph::buffer::list data) { + op->exec("rbd", "copyup", data); +} + +void copyup(neorados::WriteOp* op, ceph::buffer::list data) { + copyup<neorados::WriteOp>(op, data); +} + +void copyup(librados::ObjectWriteOperation *op, bufferlist data) { + copyup<librados::ObjectWriteOperation>(op, data); +} + +int copyup(librados::IoCtx *ioctx, const std::string &oid, + bufferlist data) { + librados::ObjectWriteOperation op; + copyup(&op, data); + + return ioctx->operate(oid, &op); +} + +template <typename O, typename E> +void sparse_copyup(O* op, const E& extent_map, ceph::buffer::list data) { + bufferlist bl; + encode(extent_map, bl); + encode(data, bl); + op->exec("rbd", "sparse_copyup", bl); +} + +void sparse_copyup(neorados::WriteOp* op, + const std::vector<std::pair<uint64_t, uint64_t>>& extent_map, + ceph::buffer::list data) { + sparse_copyup<neorados::WriteOp>(op, extent_map, data); +} + +void sparse_copyup(librados::ObjectWriteOperation *op, + const std::map<uint64_t, uint64_t> &extent_map, + bufferlist data) { + sparse_copyup<librados::ObjectWriteOperation>(op, extent_map, data); +} + +int sparse_copyup(librados::IoCtx *ioctx, const std::string &oid, + const std::map<uint64_t, uint64_t> &extent_map, + bufferlist data) { + librados::ObjectWriteOperation op; + sparse_copyup(&op, extent_map, data); + + return ioctx->operate(oid, &op); +} + +void get_protection_status_start(librados::ObjectReadOperation *op, + snapid_t snap_id) +{ + bufferlist bl; + encode(snap_id, bl); + op->exec("rbd", "get_protection_status", bl); +} + +int get_protection_status_finish(bufferlist::const_iterator *it, + uint8_t *protection_status) +{ + try { + decode(*protection_status, *it); + } catch (const ceph::buffer::error &) { + return -EBADMSG; + } + return 0; +} + +int get_protection_status(librados::IoCtx *ioctx, const std::string &oid, + snapid_t snap_id, uint8_t *protection_status) +{ + librados::ObjectReadOperation op; + get_protection_status_start(&op, snap_id); + + bufferlist out_bl; + int r = ioctx->operate(oid, &op, &out_bl); + if (r < 0) { + return r; + } + + auto it = out_bl.cbegin(); + return get_protection_status_finish(&it, protection_status); +} + +int set_protection_status(librados::IoCtx *ioctx, const std::string &oid, + snapid_t snap_id, uint8_t protection_status) +{ + // TODO remove + librados::ObjectWriteOperation op; + set_protection_status(&op, snap_id, protection_status); + return ioctx->operate(oid, &op); +} + +void set_protection_status(librados::ObjectWriteOperation *op, + snapid_t snap_id, uint8_t protection_status) +{ + bufferlist in; + encode(snap_id, in); + encode(protection_status, in); + op->exec("rbd", "set_protection_status", in); +} + +void snapshot_get_limit_start(librados::ObjectReadOperation *op) +{ + bufferlist bl; + op->exec("rbd", "snapshot_get_limit", bl); +} + +int snapshot_get_limit_finish(bufferlist::const_iterator *it, uint64_t *limit) +{ + try { + decode(*limit, *it); + } catch (const ceph::buffer::error &err) { + return -EBADMSG; + } + return 0; +} + +int snapshot_get_limit(librados::IoCtx *ioctx, const std::string &oid, + uint64_t *limit) +{ + librados::ObjectReadOperation op; + snapshot_get_limit_start(&op); + + bufferlist out_bl; + int r = ioctx->operate(oid, &op, &out_bl); + if (r < 0) { + return r; + } + + auto it = out_bl.cbegin(); + return snapshot_get_limit_finish(&it, limit); +} + +void snapshot_set_limit(librados::ObjectWriteOperation *op, uint64_t limit) +{ + bufferlist in; + encode(limit, in); + op->exec("rbd", "snapshot_set_limit", in); +} + +void get_stripe_unit_count_start(librados::ObjectReadOperation *op) { + bufferlist empty_bl; + op->exec("rbd", "get_stripe_unit_count", empty_bl); +} + +int get_stripe_unit_count_finish(bufferlist::const_iterator *it, + uint64_t *stripe_unit, + uint64_t *stripe_count) { + ceph_assert(stripe_unit); + ceph_assert(stripe_count); + + try { + decode(*stripe_unit, *it); + decode(*stripe_count, *it); + } catch (const ceph::buffer::error &err) { + return -EBADMSG; + } + return 0; +} + +int get_stripe_unit_count(librados::IoCtx *ioctx, const std::string &oid, + uint64_t *stripe_unit, uint64_t *stripe_count) +{ + librados::ObjectReadOperation op; + get_stripe_unit_count_start(&op); + + bufferlist out_bl; + int r = ioctx->operate(oid, &op, &out_bl); + if (r < 0) { + return r; + } + + auto it = out_bl.cbegin(); + return get_stripe_unit_count_finish(&it, stripe_unit, stripe_count); +} + +void set_stripe_unit_count(librados::ObjectWriteOperation *op, + uint64_t stripe_unit, uint64_t stripe_count) +{ + bufferlist bl; + encode(stripe_unit, bl); + encode(stripe_count, bl); + + op->exec("rbd", "set_stripe_unit_count", bl); +} + +int set_stripe_unit_count(librados::IoCtx *ioctx, const std::string &oid, + uint64_t stripe_unit, uint64_t stripe_count) +{ + librados::ObjectWriteOperation op; + set_stripe_unit_count(&op, stripe_unit, stripe_count); + + return ioctx->operate(oid, &op); +} + +void get_create_timestamp_start(librados::ObjectReadOperation *op) { + bufferlist empty_bl; + op->exec("rbd", "get_create_timestamp", empty_bl); +} + +int get_create_timestamp_finish(bufferlist::const_iterator *it, + utime_t *timestamp) { + ceph_assert(timestamp); + + try { + decode(*timestamp, *it); + } catch (const ceph::buffer::error &err) { + return -EBADMSG; + } + return 0; +} + +int get_create_timestamp(librados::IoCtx *ioctx, const std::string &oid, + utime_t *timestamp) +{ + librados::ObjectReadOperation op; + get_create_timestamp_start(&op); + + bufferlist out_bl; + int r = ioctx->operate(oid, &op, &out_bl); + if (r < 0) { + return r; + } + + auto it = out_bl.cbegin(); + return get_create_timestamp_finish(&it, timestamp); +} + +void get_access_timestamp_start(librados::ObjectReadOperation *op) { + bufferlist empty_bl; + op->exec("rbd", "get_access_timestamp", empty_bl); +} + +int get_access_timestamp_finish(bufferlist::const_iterator *it, + utime_t *timestamp) { + ceph_assert(timestamp); + + try { + decode(*timestamp, *it); + } catch (const ceph::buffer::error &err) { + return -EBADMSG; + } + return 0; +} + +int get_access_timestamp(librados::IoCtx *ioctx, const std::string &oid, + utime_t *timestamp) +{ + librados::ObjectReadOperation op; + get_access_timestamp_start(&op); + + bufferlist out_bl; + int r = ioctx->operate(oid, &op, &out_bl); + if (r < 0) { + return r; + } + + auto it = out_bl.cbegin(); + return get_access_timestamp_finish(&it, timestamp); +} + +void set_access_timestamp(librados::ObjectWriteOperation *op) +{ + bufferlist empty_bl; + op->exec("rbd","set_access_timestamp",empty_bl); +} + +int set_access_timestamp(librados::IoCtx *ioctx, const std::string &oid) +{ + librados::ObjectWriteOperation op; + set_access_timestamp(&op); + return ioctx->operate(oid, &op); +} + +void get_modify_timestamp_start(librados::ObjectReadOperation *op) { + bufferlist empty_bl; + op->exec("rbd", "get_modify_timestamp", empty_bl); +} + +int get_modify_timestamp_finish(bufferlist::const_iterator *it, + utime_t *timestamp) { + ceph_assert(timestamp); + + try { + decode(*timestamp, *it); + } catch (const ceph::buffer::error &err) { + return -EBADMSG; + } + return 0; +} + +int get_modify_timestamp(librados::IoCtx *ioctx, const std::string &oid, + utime_t *timestamp) +{ + librados::ObjectReadOperation op; + get_modify_timestamp_start(&op); + + bufferlist out_bl; + int r = ioctx->operate(oid, &op, &out_bl); + if (r < 0) { + return r; + } + + auto it = out_bl.cbegin(); + return get_modify_timestamp_finish(&it, timestamp); +} + +void set_modify_timestamp(librados::ObjectWriteOperation *op) +{ + bufferlist empty_bl; + op->exec("rbd","set_modify_timestamp",empty_bl); +} + +int set_modify_timestamp(librados::IoCtx *ioctx, const std::string &oid) +{ + librados::ObjectWriteOperation op; + set_modify_timestamp(&op); + return ioctx->operate(oid, &op); +} + + +/************************ rbd_id object methods ************************/ + +void get_id_start(librados::ObjectReadOperation *op) { + bufferlist empty_bl; + op->exec("rbd", "get_id", empty_bl); +} + +int get_id_finish(bufferlist::const_iterator *it, std::string *id) { + try { + decode(*id, *it); + } catch (const ceph::buffer::error &err) { + return -EBADMSG; + } + return 0; +} + +int get_id(librados::IoCtx *ioctx, const std::string &oid, std::string *id) +{ + librados::ObjectReadOperation op; + get_id_start(&op); + + bufferlist out_bl; + int r = ioctx->operate(oid, &op, &out_bl); + if (r < 0) { + return r; + } + + auto it = out_bl.cbegin(); + return get_id_finish(&it, id); +} + +void set_id(librados::ObjectWriteOperation *op, const std::string &id) +{ + bufferlist bl; + encode(id, bl); + op->exec("rbd", "set_id", bl); +} + +int set_id(librados::IoCtx *ioctx, const std::string &oid, const std::string &id) +{ + librados::ObjectWriteOperation op; + set_id(&op, id); + + return ioctx->operate(oid, &op); +} + +/******************** rbd_directory object methods ********************/ + +void dir_get_id_start(librados::ObjectReadOperation *op, + const std::string &image_name) { + bufferlist bl; + encode(image_name, bl); + + op->exec("rbd", "dir_get_id", bl); +} + +int dir_get_id_finish(bufferlist::const_iterator *iter, std::string *image_id) { + try { + decode(*image_id, *iter); + } catch (const ceph::buffer::error &err) { + return -EBADMSG; + } + + return 0; +} + +int dir_get_id(librados::IoCtx *ioctx, const std::string &oid, + const std::string &name, std::string *id) { + librados::ObjectReadOperation op; + dir_get_id_start(&op, name); + + bufferlist out_bl; + int r = ioctx->operate(oid, &op, &out_bl); + if (r < 0) { + return r; + } + + auto iter = out_bl.cbegin(); + return dir_get_id_finish(&iter, id); +} + +void dir_get_name_start(librados::ObjectReadOperation *op, + const std::string &id) { + bufferlist in_bl; + encode(id, in_bl); + op->exec("rbd", "dir_get_name", in_bl); +} + +int dir_get_name_finish(bufferlist::const_iterator *it, std::string *name) { + try { + decode(*name, *it); + } catch (const ceph::buffer::error &err) { + return -EBADMSG; + } + return 0; +} + +int dir_get_name(librados::IoCtx *ioctx, const std::string &oid, + const std::string &id, std::string *name) { + librados::ObjectReadOperation op; + dir_get_name_start(&op, id); + + bufferlist out_bl; + int r = ioctx->operate(oid, &op, &out_bl); + if (r < 0) { + return r; + } + + auto it = out_bl.cbegin(); + return dir_get_name_finish(&it, name); +} + +void dir_list_start(librados::ObjectReadOperation *op, + const std::string &start, uint64_t max_return) +{ + bufferlist in_bl; + encode(start, in_bl); + encode(max_return, in_bl); + + op->exec("rbd", "dir_list", in_bl); +} + +int dir_list_finish(bufferlist::const_iterator *it, map<string, string> *images) +{ + try { + decode(*images, *it); + } catch (const ceph::buffer::error &err) { + return -EBADMSG; + } + return 0; +} + +int dir_list(librados::IoCtx *ioctx, const std::string &oid, + const std::string &start, uint64_t max_return, + map<string, string> *images) +{ + librados::ObjectReadOperation op; + dir_list_start(&op, start, max_return); + + bufferlist out_bl; + int r = ioctx->operate(oid, &op, &out_bl); + if (r < 0) { + return r; + } + + auto iter = out_bl.cbegin(); + return dir_list_finish(&iter, images); +} + +void dir_add_image(librados::ObjectWriteOperation *op, + const std::string &name, const std::string &id) +{ + bufferlist bl; + encode(name, bl); + encode(id, bl); + op->exec("rbd", "dir_add_image", bl); +} + +int dir_add_image(librados::IoCtx *ioctx, const std::string &oid, + const std::string &name, const std::string &id) +{ + librados::ObjectWriteOperation op; + dir_add_image(&op, name, id); + + return ioctx->operate(oid, &op); +} + +int dir_remove_image(librados::IoCtx *ioctx, const std::string &oid, + const std::string &name, const std::string &id) +{ + librados::ObjectWriteOperation op; + dir_remove_image(&op, name, id); + + return ioctx->operate(oid, &op); +} + +void dir_state_assert(librados::ObjectOperation *op, + cls::rbd::DirectoryState directory_state) +{ + bufferlist bl; + encode(directory_state, bl); + op->exec("rbd", "dir_state_assert", bl); +} + +int dir_state_assert(librados::IoCtx *ioctx, const std::string &oid, + cls::rbd::DirectoryState directory_state) +{ + librados::ObjectWriteOperation op; + dir_state_assert(&op, directory_state); + + return ioctx->operate(oid, &op); +} + +void dir_state_set(librados::ObjectWriteOperation *op, + cls::rbd::DirectoryState directory_state) +{ + bufferlist bl; + encode(directory_state, bl); + op->exec("rbd", "dir_state_set", bl); +} + +int dir_state_set(librados::IoCtx *ioctx, const std::string &oid, + cls::rbd::DirectoryState directory_state) +{ + librados::ObjectWriteOperation op; + dir_state_set(&op, directory_state); + + return ioctx->operate(oid, &op); +} + +void dir_remove_image(librados::ObjectWriteOperation *op, + const std::string &name, const std::string &id) +{ + bufferlist bl; + encode(name, bl); + encode(id, bl); + + op->exec("rbd", "dir_remove_image", bl); +} + +void dir_rename_image(librados::ObjectWriteOperation *op, + const std::string &src, const std::string &dest, + const std::string &id) +{ + bufferlist in; + encode(src, in); + encode(dest, in); + encode(id, in); + op->exec("rbd", "dir_rename_image", in); +} + +void object_map_load_start(librados::ObjectReadOperation *op) { + bufferlist in_bl; + op->exec("rbd", "object_map_load", in_bl); +} + +int object_map_load_finish(bufferlist::const_iterator *it, + ceph::BitVector<2> *object_map) { + try { + decode(*object_map, *it); + } catch (const ceph::buffer::error &err) { + return -EBADMSG; + } + return 0; +} + +int object_map_load(librados::IoCtx *ioctx, const std::string &oid, + ceph::BitVector<2> *object_map) +{ + librados::ObjectReadOperation op; + object_map_load_start(&op); + + bufferlist out_bl; + int r = ioctx->operate(oid, &op, &out_bl); + if (r < 0) { + return r; + } + + auto it = out_bl.cbegin(); + return object_map_load_finish(&it, object_map); +} + +void object_map_save(librados::ObjectWriteOperation *rados_op, + const ceph::BitVector<2> &object_map) +{ + ceph::BitVector<2> object_map_copy(object_map); + object_map_copy.set_crc_enabled(false); + + bufferlist in; + encode(object_map_copy, in); + rados_op->exec("rbd", "object_map_save", in); +} + +void object_map_resize(librados::ObjectWriteOperation *rados_op, + uint64_t object_count, uint8_t default_state) +{ + bufferlist in; + encode(object_count, in); + encode(default_state, in); + rados_op->exec("rbd", "object_map_resize", in); +} + +void object_map_update(librados::ObjectWriteOperation *rados_op, + uint64_t start_object_no, uint64_t end_object_no, + uint8_t new_object_state, + const boost::optional<uint8_t> ¤t_object_state) +{ + bufferlist in; + encode(start_object_no, in); + encode(end_object_no, in); + encode(new_object_state, in); + encode(current_object_state, in); + rados_op->exec("rbd", "object_map_update", in); +} + +void object_map_snap_add(librados::ObjectWriteOperation *rados_op) +{ + bufferlist in; + rados_op->exec("rbd", "object_map_snap_add", in); +} + +void object_map_snap_remove(librados::ObjectWriteOperation *rados_op, + const ceph::BitVector<2> &object_map) +{ + ceph::BitVector<2> object_map_copy(object_map); + object_map_copy.set_crc_enabled(false); + + bufferlist in; + encode(object_map_copy, in); + rados_op->exec("rbd", "object_map_snap_remove", in); +} + +void metadata_set(librados::ObjectWriteOperation *op, + const map<string, bufferlist> &data) +{ + bufferlist bl; + encode(data, bl); + + op->exec("rbd", "metadata_set", bl); +} + +int metadata_set(librados::IoCtx *ioctx, const std::string &oid, + const map<string, bufferlist> &data) +{ + librados::ObjectWriteOperation op; + metadata_set(&op, data); + + return ioctx->operate(oid, &op); +} + +void metadata_remove(librados::ObjectWriteOperation *op, + const std::string &key) +{ + bufferlist bl; + encode(key, bl); + + op->exec("rbd", "metadata_remove", bl); +} + +int metadata_remove(librados::IoCtx *ioctx, const std::string &oid, + const std::string &key) +{ + librados::ObjectWriteOperation op; + metadata_remove(&op, key); + + return ioctx->operate(oid, &op); +} + +int metadata_list(librados::IoCtx *ioctx, const std::string &oid, + const std::string &start, uint64_t max_return, + map<string, bufferlist> *pairs) +{ + librados::ObjectReadOperation op; + metadata_list_start(&op, start, max_return); + + bufferlist out_bl; + int r = ioctx->operate(oid, &op, &out_bl); + if (r < 0) { + return r; + } + + auto it = out_bl.cbegin(); + return metadata_list_finish(&it, pairs); +} + +void metadata_list_start(librados::ObjectReadOperation *op, + const std::string &start, uint64_t max_return) +{ + bufferlist in_bl; + encode(start, in_bl); + encode(max_return, in_bl); + op->exec("rbd", "metadata_list", in_bl); +} + +int metadata_list_finish(bufferlist::const_iterator *it, + std::map<std::string, bufferlist> *pairs) +{ + ceph_assert(pairs); + try { + decode(*pairs, *it); + } catch (const ceph::buffer::error &err) { + return -EBADMSG; + } + return 0; +} + +void metadata_get_start(librados::ObjectReadOperation* op, + const std::string &key) { + bufferlist bl; + encode(key, bl); + + op->exec("rbd", "metadata_get", bl); +} + +int metadata_get_finish(bufferlist::const_iterator *it, + std::string* value) { + try { + decode(*value, *it); + } catch (const ceph::buffer::error &err) { + return -EBADMSG; + } + return 0; +} + +int metadata_get(librados::IoCtx *ioctx, const std::string &oid, + const std::string &key, string *s) +{ + ceph_assert(s); + librados::ObjectReadOperation op; + metadata_get_start(&op, key); + + bufferlist out_bl; + int r = ioctx->operate(oid, &op, &out_bl); + if (r < 0) { + return r; + } + + auto it = out_bl.cbegin(); + r = metadata_get_finish(&it, s); + if (r < 0) { + return r; + } + return 0; +} + +void child_attach(librados::ObjectWriteOperation *op, snapid_t snap_id, + const cls::rbd::ChildImageSpec& child_image) +{ + bufferlist bl; + encode(snap_id, bl); + encode(child_image, bl); + op->exec("rbd", "child_attach", bl); +} + +int child_attach(librados::IoCtx *ioctx, const std::string &oid, + snapid_t snap_id, + const cls::rbd::ChildImageSpec& child_image) +{ + librados::ObjectWriteOperation op; + child_attach(&op, snap_id, child_image); + + int r = ioctx->operate(oid, &op); + if (r < 0) { + return r; + } + return 0; +} + +void child_detach(librados::ObjectWriteOperation *op, snapid_t snap_id, + const cls::rbd::ChildImageSpec& child_image) +{ + bufferlist bl; + encode(snap_id, bl); + encode(child_image, bl); + op->exec("rbd", "child_detach", bl); +} + +int child_detach(librados::IoCtx *ioctx, const std::string &oid, + snapid_t snap_id, + const cls::rbd::ChildImageSpec& child_image) +{ + librados::ObjectWriteOperation op; + child_detach(&op, snap_id, child_image); + + int r = ioctx->operate(oid, &op); + if (r < 0) { + return r; + } + return 0; +} + +void children_list_start(librados::ObjectReadOperation *op, + snapid_t snap_id) +{ + bufferlist bl; + encode(snap_id, bl); + op->exec("rbd", "children_list", bl); +} + +int children_list_finish(bufferlist::const_iterator *it, + cls::rbd::ChildImageSpecs *child_images) +{ + child_images->clear(); + try { + decode(*child_images, *it); + } catch (const ceph::buffer::error &err) { + return -EBADMSG; + } + return 0; +} + +int children_list(librados::IoCtx *ioctx, const std::string &oid, + snapid_t snap_id, + cls::rbd::ChildImageSpecs *child_images) +{ + librados::ObjectReadOperation op; + children_list_start(&op, snap_id); + + bufferlist out_bl; + int r = ioctx->operate(oid, &op, &out_bl); + if (r < 0) { + return r; + } + + auto it = out_bl.cbegin(); + r = children_list_finish(&it, child_images); + if (r < 0) { + return r; + } + return 0; +} + +int migration_set(librados::IoCtx *ioctx, const std::string &oid, + const cls::rbd::MigrationSpec &migration_spec) { + librados::ObjectWriteOperation op; + migration_set(&op, migration_spec); + return ioctx->operate(oid, &op); +} + +void migration_set(librados::ObjectWriteOperation *op, + const cls::rbd::MigrationSpec &migration_spec) { + bufferlist bl; + encode(migration_spec, bl); + op->exec("rbd", "migration_set", bl); +} + +int migration_set_state(librados::IoCtx *ioctx, const std::string &oid, + cls::rbd::MigrationState state, + const std::string &description) { + librados::ObjectWriteOperation op; + migration_set_state(&op, state, description); + return ioctx->operate(oid, &op); +} + +void migration_set_state(librados::ObjectWriteOperation *op, + cls::rbd::MigrationState state, + const std::string &description) { + bufferlist bl; + encode(state, bl); + encode(description, bl); + op->exec("rbd", "migration_set_state", bl); +} + +void migration_get_start(librados::ObjectReadOperation *op) { + bufferlist bl; + op->exec("rbd", "migration_get", bl); +} + +int migration_get_finish(bufferlist::const_iterator *it, + cls::rbd::MigrationSpec *migration_spec) { + try { + decode(*migration_spec, *it); + } catch (const ceph::buffer::error &err) { + return -EBADMSG; + } + return 0; +} + +int migration_get(librados::IoCtx *ioctx, const std::string &oid, + cls::rbd::MigrationSpec *migration_spec) { + librados::ObjectReadOperation op; + migration_get_start(&op); + + bufferlist out_bl; + int r = ioctx->operate(oid, &op, &out_bl); + if (r < 0) { + return r; + } + + auto iter = out_bl.cbegin(); + r = migration_get_finish(&iter, migration_spec); + if (r < 0) { + return r; + } + return 0; +} + +int migration_remove(librados::IoCtx *ioctx, const std::string &oid) { + librados::ObjectWriteOperation op; + migration_remove(&op); + return ioctx->operate(oid, &op); +} + +void migration_remove(librados::ObjectWriteOperation *op) { + bufferlist bl; + op->exec("rbd", "migration_remove", bl); +} + +template <typename O> +void assert_snapc_seq(O* op, uint64_t snapc_seq, + cls::rbd::AssertSnapcSeqState state) { + bufferlist bl; + encode(snapc_seq, bl); + encode(state, bl); + op->exec("rbd", "assert_snapc_seq", bl); +} + +void assert_snapc_seq(neorados::WriteOp* op, + uint64_t snapc_seq, + cls::rbd::AssertSnapcSeqState state) { + assert_snapc_seq<neorados::WriteOp>(op, snapc_seq, state); +} + +void assert_snapc_seq(librados::ObjectWriteOperation *op, + uint64_t snapc_seq, + cls::rbd::AssertSnapcSeqState state) { + assert_snapc_seq<librados::ObjectWriteOperation>(op, snapc_seq, state); +} + +int assert_snapc_seq(librados::IoCtx *ioctx, const std::string &oid, + uint64_t snapc_seq, + cls::rbd::AssertSnapcSeqState state) { + librados::ObjectWriteOperation op; + assert_snapc_seq(&op, snapc_seq, state); + return ioctx->operate(oid, &op); +} + +void mirror_uuid_get_start(librados::ObjectReadOperation *op) { + bufferlist bl; + op->exec("rbd", "mirror_uuid_get", bl); +} + +int mirror_uuid_get_finish(bufferlist::const_iterator *it, + std::string *uuid) { + try { + decode(*uuid, *it); + } catch (const ceph::buffer::error &err) { + return -EBADMSG; + } + return 0; +} + +int mirror_uuid_get(librados::IoCtx *ioctx, std::string *uuid) { + librados::ObjectReadOperation op; + mirror_uuid_get_start(&op); + + bufferlist out_bl; + int r = ioctx->operate(RBD_MIRRORING, &op, &out_bl); + if (r < 0) { + return r; + } + + auto it = out_bl.cbegin(); + r = mirror_uuid_get_finish(&it, uuid); + if (r < 0) { + return r; + } + return 0; +} + +int mirror_uuid_set(librados::IoCtx *ioctx, const std::string &uuid) { + bufferlist in_bl; + encode(uuid, in_bl); + + bufferlist out_bl; + int r = ioctx->exec(RBD_MIRRORING, "rbd", "mirror_uuid_set", in_bl, + out_bl); + if (r < 0) { + return r; + } + return 0; +} + +void mirror_mode_get_start(librados::ObjectReadOperation *op) { + bufferlist bl; + op->exec("rbd", "mirror_mode_get", bl); +} + +int mirror_mode_get_finish(bufferlist::const_iterator *it, + cls::rbd::MirrorMode *mirror_mode) { + try { + uint32_t mirror_mode_decode; + decode(mirror_mode_decode, *it); + *mirror_mode = static_cast<cls::rbd::MirrorMode>(mirror_mode_decode); + } catch (const ceph::buffer::error &err) { + return -EBADMSG; + } + + return 0; +} + +int mirror_mode_get(librados::IoCtx *ioctx, + cls::rbd::MirrorMode *mirror_mode) { + librados::ObjectReadOperation op; + mirror_mode_get_start(&op); + + bufferlist out_bl; + int r = ioctx->operate(RBD_MIRRORING, &op, &out_bl); + if (r == -ENOENT) { + *mirror_mode = cls::rbd::MIRROR_MODE_DISABLED; + return 0; + } else if (r < 0) { + return r; + } + + auto it = out_bl.cbegin(); + r = mirror_mode_get_finish(&it, mirror_mode); + if (r < 0) { + return r; + } + return 0; +} + +int mirror_mode_set(librados::IoCtx *ioctx, + cls::rbd::MirrorMode mirror_mode) { + bufferlist in_bl; + encode(static_cast<uint32_t>(mirror_mode), in_bl); + + bufferlist out_bl; + int r = ioctx->exec(RBD_MIRRORING, "rbd", "mirror_mode_set", in_bl, + out_bl); + if (r < 0) { + return r; + } + return 0; +} + +void mirror_peer_list_start(librados::ObjectReadOperation *op) { + bufferlist bl; + op->exec("rbd", "mirror_peer_list", bl); +} + +int mirror_peer_list_finish(bufferlist::const_iterator *it, + std::vector<cls::rbd::MirrorPeer> *peers) { + peers->clear(); + try { + decode(*peers, *it); + } catch (const ceph::buffer::error &err) { + return -EBADMSG; + } + return 0; +} + +int mirror_peer_list(librados::IoCtx *ioctx, + std::vector<cls::rbd::MirrorPeer> *peers) { + librados::ObjectReadOperation op; + mirror_peer_list_start(&op); + + bufferlist out_bl; + int r = ioctx->operate(RBD_MIRRORING, &op, &out_bl); + if (r < 0) { + return r; + } + + auto it = out_bl.cbegin(); + r = mirror_peer_list_finish(&it, peers); + if (r < 0) { + return r; + } + return 0; +} + +int mirror_peer_ping(librados::IoCtx *ioctx, + const std::string& site_name, + const std::string& fsid) { + librados::ObjectWriteOperation op; + mirror_peer_ping(&op, site_name, fsid); + + int r = ioctx->operate(RBD_MIRRORING, &op); + if (r < 0) { + return r; + } + + return 0; +} + +void mirror_peer_ping(librados::ObjectWriteOperation *op, + const std::string& site_name, + const std::string& fsid) { + bufferlist in_bl; + encode(site_name, in_bl); + encode(fsid, in_bl); + encode(static_cast<uint8_t>(cls::rbd::MIRROR_PEER_DIRECTION_TX), in_bl); + + op->exec("rbd", "mirror_peer_ping", in_bl); +} + +int mirror_peer_add(librados::IoCtx *ioctx, + const cls::rbd::MirrorPeer& mirror_peer) { + librados::ObjectWriteOperation op; + mirror_peer_add(&op, mirror_peer); + + int r = ioctx->operate(RBD_MIRRORING, &op); + if (r < 0) { + return r; + } + + return 0; +} + +void mirror_peer_add(librados::ObjectWriteOperation *op, + const cls::rbd::MirrorPeer& mirror_peer) { + bufferlist in_bl; + encode(mirror_peer, in_bl); + + op->exec("rbd", "mirror_peer_add", in_bl); +} + +int mirror_peer_remove(librados::IoCtx *ioctx, + const std::string &uuid) { + bufferlist in_bl; + encode(uuid, in_bl); + + bufferlist out_bl; + int r = ioctx->exec(RBD_MIRRORING, "rbd", "mirror_peer_remove", in_bl, + out_bl); + if (r < 0) { + return r; + } + return 0; +} + +int mirror_peer_set_client(librados::IoCtx *ioctx, + const std::string &uuid, + const std::string &client_name) { + bufferlist in_bl; + encode(uuid, in_bl); + encode(client_name, in_bl); + + bufferlist out_bl; + int r = ioctx->exec(RBD_MIRRORING, "rbd", "mirror_peer_set_client", + in_bl, out_bl); + if (r < 0) { + return r; + } + return 0; +} + +int mirror_peer_set_cluster(librados::IoCtx *ioctx, + const std::string &uuid, + const std::string &cluster_name) { + bufferlist in_bl; + encode(uuid, in_bl); + encode(cluster_name, in_bl); + + bufferlist out_bl; + int r = ioctx->exec(RBD_MIRRORING, "rbd", "mirror_peer_set_cluster", + in_bl, out_bl); + if (r < 0) { + return r; + } + return 0; +} + +int mirror_peer_set_direction( + librados::IoCtx *ioctx, const std::string &uuid, + cls::rbd::MirrorPeerDirection mirror_peer_direction) { + bufferlist in_bl; + encode(uuid, in_bl); + encode(static_cast<uint8_t>(mirror_peer_direction), in_bl); + + bufferlist out_bl; + int r = ioctx->exec(RBD_MIRRORING, "rbd", "mirror_peer_set_direction", + in_bl, out_bl); + if (r < 0) { + return r; + } + return 0; +} + +void mirror_image_list_start(librados::ObjectReadOperation *op, + const std::string &start, uint64_t max_return) +{ + bufferlist in_bl; + encode(start, in_bl); + encode(max_return, in_bl); + op->exec("rbd", "mirror_image_list", in_bl); +} + +int mirror_image_list_finish(bufferlist::const_iterator *it, + std::map<string, string> *mirror_image_ids) +{ + try { + decode(*mirror_image_ids, *it); + } catch (const ceph::buffer::error &err) { + return -EBADMSG; + } + return 0; +} + +int mirror_image_list(librados::IoCtx *ioctx, + const std::string &start, uint64_t max_return, + std::map<std::string, std::string> *mirror_image_ids) { + librados::ObjectReadOperation op; + mirror_image_list_start(&op, start, max_return); + + bufferlist out_bl; + int r = ioctx->operate(RBD_MIRRORING, &op, &out_bl); + if (r < 0) { + return r; + } + + auto bl_it = out_bl.cbegin(); + return mirror_image_list_finish(&bl_it, mirror_image_ids); +} + +void mirror_image_get_image_id_start(librados::ObjectReadOperation *op, + const std::string &global_image_id) { + bufferlist in_bl; + encode(global_image_id, in_bl); + op->exec( "rbd", "mirror_image_get_image_id", in_bl); +} + +int mirror_image_get_image_id_finish(bufferlist::const_iterator *it, + std::string *image_id) { + try { + decode(*image_id, *it); + } catch (const ceph::buffer::error &err) { + return -EBADMSG; + } + return 0; +} + +int mirror_image_get_image_id(librados::IoCtx *ioctx, + const std::string &global_image_id, + std::string *image_id) { + librados::ObjectReadOperation op; + mirror_image_get_image_id_start(&op, global_image_id); + + bufferlist out_bl; + int r = ioctx->operate(RBD_MIRRORING, &op, &out_bl); + if (r < 0) { + return r; + } + + auto it = out_bl.cbegin(); + return mirror_image_get_image_id_finish(&it, image_id); +} + +int mirror_image_get(librados::IoCtx *ioctx, const std::string &image_id, + cls::rbd::MirrorImage *mirror_image) { + librados::ObjectReadOperation op; + mirror_image_get_start(&op, image_id); + + bufferlist out_bl; + int r = ioctx->operate(RBD_MIRRORING, &op, &out_bl); + if (r < 0) { + return r; + } + + auto iter = out_bl.cbegin(); + r = mirror_image_get_finish(&iter, mirror_image); + if (r < 0) { + return r; + } + return 0; +} + +void mirror_image_get_start(librados::ObjectReadOperation *op, + const std::string &image_id) { + bufferlist in_bl; + encode(image_id, in_bl); + + op->exec("rbd", "mirror_image_get", in_bl); +} + +int mirror_image_get_finish(bufferlist::const_iterator *iter, + cls::rbd::MirrorImage *mirror_image) { + try { + decode(*mirror_image, *iter); + } catch (const ceph::buffer::error &err) { + return -EBADMSG; + } + return 0; +} + +void mirror_image_set(librados::ObjectWriteOperation *op, + const std::string &image_id, + const cls::rbd::MirrorImage &mirror_image) { + bufferlist bl; + encode(image_id, bl); + encode(mirror_image, bl); + + op->exec("rbd", "mirror_image_set", bl); +} + +int mirror_image_set(librados::IoCtx *ioctx, const std::string &image_id, + const cls::rbd::MirrorImage &mirror_image) { + librados::ObjectWriteOperation op; + mirror_image_set(&op, image_id, mirror_image); + + int r = ioctx->operate(RBD_MIRRORING, &op); + if (r < 0) { + return r; + } + return 0; +} + +void mirror_image_remove(librados::ObjectWriteOperation *op, + const std::string &image_id) { + bufferlist bl; + encode(image_id, bl); + + op->exec("rbd", "mirror_image_remove", bl); +} + +int mirror_image_remove(librados::IoCtx *ioctx, const std::string &image_id) { + librados::ObjectWriteOperation op; + mirror_image_remove(&op, image_id); + + int r = ioctx->operate(RBD_MIRRORING, &op); + if (r < 0) { + return r; + } + return 0; +} + +int mirror_image_status_set(librados::IoCtx *ioctx, + const std::string &global_image_id, + const cls::rbd::MirrorImageSiteStatus &status) { + librados::ObjectWriteOperation op; + mirror_image_status_set(&op, global_image_id, status); + return ioctx->operate(RBD_MIRRORING, &op); +} + +void mirror_image_status_set(librados::ObjectWriteOperation *op, + const std::string &global_image_id, + const cls::rbd::MirrorImageSiteStatus &status) { + bufferlist bl; + encode(global_image_id, bl); + encode(status, bl); + op->exec("rbd", "mirror_image_status_set", bl); +} + +int mirror_image_status_get(librados::IoCtx *ioctx, + const std::string &global_image_id, + cls::rbd::MirrorImageStatus *status) { + librados::ObjectReadOperation op; + mirror_image_status_get_start(&op, global_image_id); + + bufferlist out_bl; + int r = ioctx->operate(RBD_MIRRORING, &op, &out_bl); + if (r < 0) { + return r; + } + + auto iter = out_bl.cbegin(); + r = mirror_image_status_get_finish(&iter, status); + if (r < 0) { + return r; + } + return 0; +} + +void mirror_image_status_get_start(librados::ObjectReadOperation *op, + const std::string &global_image_id) { + bufferlist bl; + encode(global_image_id, bl); + op->exec("rbd", "mirror_image_status_get", bl); +} + +int mirror_image_status_get_finish(bufferlist::const_iterator *iter, + cls::rbd::MirrorImageStatus *status) { + try { + decode(*status, *iter); + } catch (const ceph::buffer::error &err) { + return -EBADMSG; + } + return 0; +} + +int mirror_image_status_list(librados::IoCtx *ioctx, + const std::string &start, uint64_t max_return, + std::map<std::string, cls::rbd::MirrorImage> *images, + std::map<std::string, cls::rbd::MirrorImageStatus> *statuses) { + librados::ObjectReadOperation op; + mirror_image_status_list_start(&op, start, max_return); + + bufferlist out_bl; + int r = ioctx->operate(RBD_MIRRORING, &op, &out_bl); + if (r < 0) { + return r; + } + + auto iter = out_bl.cbegin(); + r = mirror_image_status_list_finish(&iter, images, statuses); + if (r < 0) { + return r; + } + return 0; +} + +void mirror_image_status_list_start(librados::ObjectReadOperation *op, + const std::string &start, + uint64_t max_return) { + bufferlist bl; + encode(start, bl); + encode(max_return, bl); + op->exec("rbd", "mirror_image_status_list", bl); +} + +int mirror_image_status_list_finish(bufferlist::const_iterator *iter, + std::map<std::string, cls::rbd::MirrorImage> *images, + std::map<std::string, cls::rbd::MirrorImageStatus> *statuses) { + images->clear(); + statuses->clear(); + try { + decode(*images, *iter); + decode(*statuses, *iter); + } catch (const ceph::buffer::error &err) { + return -EBADMSG; + } + return 0; +} + +int mirror_image_status_get_summary( + librados::IoCtx *ioctx, + const std::vector<cls::rbd::MirrorPeer>& mirror_peer_sites, + std::map<cls::rbd::MirrorImageStatusState, int32_t> *states) { + librados::ObjectReadOperation op; + mirror_image_status_get_summary_start(&op, mirror_peer_sites); + + bufferlist out_bl; + int r = ioctx->operate(RBD_MIRRORING, &op, &out_bl); + if (r < 0) { + return r; + } + + auto iter = out_bl.cbegin(); + r = mirror_image_status_get_summary_finish(&iter, states); + if (r < 0) { + return r; + } + return 0; +} + +void mirror_image_status_get_summary_start( + librados::ObjectReadOperation *op, + const std::vector<cls::rbd::MirrorPeer>& mirror_peer_sites) { + bufferlist bl; + encode(mirror_peer_sites, bl); + op->exec("rbd", "mirror_image_status_get_summary", bl); +} + +int mirror_image_status_get_summary_finish( + bufferlist::const_iterator *iter, + std::map<cls::rbd::MirrorImageStatusState, int32_t> *states) { + try { + decode(*states, *iter); + } catch (const ceph::buffer::error &err) { + return -EBADMSG; + } + return 0; +} + +int mirror_image_status_remove(librados::IoCtx *ioctx, + const std::string &global_image_id) { + librados::ObjectWriteOperation op; + mirror_image_status_remove(&op, global_image_id); + return ioctx->operate(RBD_MIRRORING, &op); +} + +void mirror_image_status_remove(librados::ObjectWriteOperation *op, + const std::string &global_image_id) { + bufferlist bl; + encode(global_image_id, bl); + op->exec("rbd", "mirror_image_status_remove", bl); +} + +int mirror_image_status_remove_down(librados::IoCtx *ioctx) { + librados::ObjectWriteOperation op; + mirror_image_status_remove_down(&op); + return ioctx->operate(RBD_MIRRORING, &op); +} + +void mirror_image_status_remove_down(librados::ObjectWriteOperation *op) { + bufferlist bl; + op->exec("rbd", "mirror_image_status_remove_down", bl); +} + +int mirror_image_instance_get(librados::IoCtx *ioctx, + const std::string &global_image_id, + entity_inst_t *instance) { + librados::ObjectReadOperation op; + mirror_image_instance_get_start(&op, global_image_id); + + bufferlist out_bl; + int r = ioctx->operate(RBD_MIRRORING, &op, &out_bl); + if (r < 0) { + return r; + } + + auto iter = out_bl.cbegin(); + r = mirror_image_instance_get_finish(&iter, instance); + if (r < 0) { + return r; + } + return 0; +} + +void mirror_image_instance_get_start(librados::ObjectReadOperation *op, + const std::string &global_image_id) { + bufferlist bl; + encode(global_image_id, bl); + op->exec("rbd", "mirror_image_instance_get", bl); +} + +int mirror_image_instance_get_finish(bufferlist::const_iterator *iter, + entity_inst_t *instance) { + try { + decode(*instance, *iter); + } catch (const ceph::buffer::error &err) { + return -EBADMSG; + } + return 0; +} + +int mirror_image_instance_list( + librados::IoCtx *ioctx, const std::string &start, uint64_t max_return, + std::map<std::string, entity_inst_t> *instances) { + librados::ObjectReadOperation op; + mirror_image_instance_list_start(&op, start, max_return); + + bufferlist out_bl; + int r = ioctx->operate(RBD_MIRRORING, &op, &out_bl); + if (r < 0) { + return r; + } + + auto iter = out_bl.cbegin(); + r = mirror_image_instance_list_finish(&iter, instances); + if (r < 0) { + return r; + } + return 0; +} + +void mirror_image_instance_list_start(librados::ObjectReadOperation *op, + const std::string &start, + uint64_t max_return) { + bufferlist bl; + encode(start, bl); + encode(max_return, bl); + op->exec("rbd", "mirror_image_instance_list", bl); +} + +int mirror_image_instance_list_finish( + bufferlist::const_iterator *iter, + std::map<std::string, entity_inst_t> *instances) { + instances->clear(); + try { + decode(*instances, *iter); + } catch (const ceph::buffer::error &err) { + return -EBADMSG; + } + return 0; +} + +void mirror_instances_list_start(librados::ObjectReadOperation *op) { + bufferlist bl; + op->exec("rbd", "mirror_instances_list", bl); +} + +int mirror_instances_list_finish(bufferlist::const_iterator *iter, + std::vector<std::string> *instance_ids) { + instance_ids->clear(); + try { + decode(*instance_ids, *iter); + } catch (const ceph::buffer::error &err) { + return -EBADMSG; + } + return 0; +} + +int mirror_instances_list(librados::IoCtx *ioctx, + std::vector<std::string> *instance_ids) { + librados::ObjectReadOperation op; + mirror_instances_list_start(&op); + + bufferlist out_bl; + int r = ioctx->operate(RBD_MIRROR_LEADER, &op, &out_bl); + if (r < 0) { + return r; + } + + auto iter = out_bl.cbegin(); + r = mirror_instances_list_finish(&iter, instance_ids); + if (r < 0) { + return r; + } + return 0; +} + +void mirror_instances_add(librados::ObjectWriteOperation *op, + const std::string &instance_id) { + bufferlist bl; + encode(instance_id, bl); + op->exec("rbd", "mirror_instances_add", bl); +} + +int mirror_instances_add(librados::IoCtx *ioctx, + const std::string &instance_id) { + librados::ObjectWriteOperation op; + mirror_instances_add(&op, instance_id); + return ioctx->operate(RBD_MIRROR_LEADER, &op); +} + +void mirror_instances_remove(librados::ObjectWriteOperation *op, + const std::string &instance_id) { + bufferlist bl; + encode(instance_id, bl); + op->exec("rbd", "mirror_instances_remove", bl); +} + +int mirror_instances_remove(librados::IoCtx *ioctx, + const std::string &instance_id) { + librados::ObjectWriteOperation op; + mirror_instances_remove(&op, instance_id); + return ioctx->operate(RBD_MIRROR_LEADER, &op); +} + +void mirror_image_map_list_start(librados::ObjectReadOperation *op, + const std::string &start_after, + uint64_t max_read) { + bufferlist bl; + encode(start_after, bl); + encode(max_read, bl); + + op->exec("rbd", "mirror_image_map_list", bl); +} + +int mirror_image_map_list_finish(bufferlist::const_iterator *iter, + std::map<std::string, cls::rbd::MirrorImageMap> *image_mapping) { + try { + decode(*image_mapping, *iter); + } catch (const ceph::buffer::error &err) { + return -EBADMSG; + } + return 0; +} + +int mirror_image_map_list( + librados::IoCtx *ioctx, const std::string &start_after, + uint64_t max_read, + std::map<std::string, cls::rbd::MirrorImageMap> *image_mapping) { + librados::ObjectReadOperation op; + mirror_image_map_list_start(&op, start_after, max_read); + + bufferlist out_bl; + int r = ioctx->operate(RBD_MIRRORING, &op, &out_bl); + if (r < 0) { + return r; + } + + auto iter = out_bl.cbegin(); + return mirror_image_map_list_finish(&iter, image_mapping); +} + +void mirror_image_map_update(librados::ObjectWriteOperation *op, + const std::string &global_image_id, + const cls::rbd::MirrorImageMap &image_map) { + bufferlist bl; + encode(global_image_id, bl); + encode(image_map, bl); + + op->exec("rbd", "mirror_image_map_update", bl); +} + +void mirror_image_map_remove(librados::ObjectWriteOperation *op, + const std::string &global_image_id) { + bufferlist bl; + encode(global_image_id, bl); + + op->exec("rbd", "mirror_image_map_remove", bl); +} + +void mirror_image_snapshot_unlink_peer(librados::ObjectWriteOperation *op, + snapid_t snap_id, + const std::string &mirror_peer_uuid) { + bufferlist bl; + encode(snap_id, bl); + encode(mirror_peer_uuid, bl); + + op->exec("rbd", "mirror_image_snapshot_unlink_peer", bl); +} + +int mirror_image_snapshot_unlink_peer(librados::IoCtx *ioctx, + const std::string &oid, + snapid_t snap_id, + const std::string &mirror_peer_uuid) { + librados::ObjectWriteOperation op; + mirror_image_snapshot_unlink_peer(&op, snap_id, mirror_peer_uuid); + return ioctx->operate(oid, &op); +} + +void mirror_image_snapshot_set_copy_progress(librados::ObjectWriteOperation *op, + snapid_t snap_id, bool complete, + uint64_t copy_progress) { + bufferlist bl; + encode(snap_id, bl); + encode(complete, bl); + encode(copy_progress, bl); + + op->exec("rbd", "mirror_image_snapshot_set_copy_progress", bl); +} + +int mirror_image_snapshot_set_copy_progress(librados::IoCtx *ioctx, + const std::string &oid, + snapid_t snap_id, bool complete, + uint64_t copy_progress) { + librados::ObjectWriteOperation op; + mirror_image_snapshot_set_copy_progress(&op, snap_id, complete, + copy_progress); + return ioctx->operate(oid, &op); +} + +// Groups functions +int group_dir_list(librados::IoCtx *ioctx, const std::string &oid, + const std::string &start, uint64_t max_return, + map<string, string> *cgs) +{ + bufferlist in, out; + encode(start, in); + encode(max_return, in); + int r = ioctx->exec(oid, "rbd", "group_dir_list", in, out); + if (r < 0) + return r; + + auto iter = out.cbegin(); + try { + decode(*cgs, iter); + } catch (const ceph::buffer::error &err) { + return -EBADMSG; + } + + return 0; +} + +int group_dir_add(librados::IoCtx *ioctx, const std::string &oid, + const std::string &name, const std::string &id) +{ + bufferlist in, out; + encode(name, in); + encode(id, in); + return ioctx->exec(oid, "rbd", "group_dir_add", in, out); +} + +int group_dir_rename(librados::IoCtx *ioctx, const std::string &oid, + const std::string &src, const std::string &dest, + const std::string &id) +{ + bufferlist in, out; + encode(src, in); + encode(dest, in); + encode(id, in); + return ioctx->exec(oid, "rbd", "group_dir_rename", in, out); +} + +int group_dir_remove(librados::IoCtx *ioctx, const std::string &oid, + const std::string &name, const std::string &id) +{ + bufferlist in, out; + encode(name, in); + encode(id, in); + return ioctx->exec(oid, "rbd", "group_dir_remove", in, out); +} + +int group_image_remove(librados::IoCtx *ioctx, const std::string &oid, + const cls::rbd::GroupImageSpec &spec) +{ + bufferlist bl, bl2; + encode(spec, bl); + + return ioctx->exec(oid, "rbd", "group_image_remove", bl, bl2); +} + +int group_image_list(librados::IoCtx *ioctx, + const std::string &oid, + const cls::rbd::GroupImageSpec &start, + uint64_t max_return, + std::vector<cls::rbd::GroupImageStatus> *images) +{ + bufferlist bl, bl2; + encode(start, bl); + encode(max_return, bl); + + int r = ioctx->exec(oid, "rbd", "group_image_list", bl, bl2); + if (r < 0) + return r; + + auto iter = bl2.cbegin(); + try { + decode(*images, iter); + } catch (const ceph::buffer::error &err) { + return -EBADMSG; + } + + return 0; +} + +int group_image_set(librados::IoCtx *ioctx, const std::string &oid, + const cls::rbd::GroupImageStatus &st) +{ + bufferlist bl, bl2; + encode(st, bl); + + return ioctx->exec(oid, "rbd", "group_image_set", bl, bl2); +} + +int image_group_add(librados::IoCtx *ioctx, const std::string &oid, + const cls::rbd::GroupSpec &group_spec) +{ + bufferlist bl, bl2; + encode(group_spec, bl); + + return ioctx->exec(oid, "rbd", "image_group_add", bl, bl2); +} + +int image_group_remove(librados::IoCtx *ioctx, const std::string &oid, + const cls::rbd::GroupSpec &group_spec) +{ + bufferlist bl, bl2; + encode(group_spec, bl); + + return ioctx->exec(oid, "rbd", "image_group_remove", bl, bl2); +} + +void image_group_get_start(librados::ObjectReadOperation *op) +{ + bufferlist in_bl; + op->exec("rbd", "image_group_get", in_bl); +} + +int image_group_get_finish(bufferlist::const_iterator *iter, + cls::rbd::GroupSpec *group_spec) +{ + try { + decode(*group_spec, *iter); + } catch (const ceph::buffer::error &err) { + return -EBADMSG; + } + return 0; +} + +int image_group_get(librados::IoCtx *ioctx, const std::string &oid, + cls::rbd::GroupSpec *group_spec) +{ + librados::ObjectReadOperation op; + image_group_get_start(&op); + + bufferlist out_bl; + int r = ioctx->operate(oid, &op, &out_bl); + if (r < 0) { + return r; + } + + auto iter = out_bl.cbegin(); + return image_group_get_finish(&iter, group_spec); +} + +int group_snap_set(librados::IoCtx *ioctx, const std::string &oid, + const cls::rbd::GroupSnapshot &snapshot) +{ + using ceph::encode; + bufferlist inbl, outbl; + encode(snapshot, inbl); + int r = ioctx->exec(oid, "rbd", "group_snap_set", inbl, outbl); + return r; +} + +int group_snap_remove(librados::IoCtx *ioctx, const std::string &oid, + const std::string &snap_id) +{ + using ceph::encode; + bufferlist inbl, outbl; + encode(snap_id, inbl); + return ioctx->exec(oid, "rbd", "group_snap_remove", inbl, outbl); +} + +int group_snap_get_by_id(librados::IoCtx *ioctx, const std::string &oid, + const std::string &snap_id, + cls::rbd::GroupSnapshot *snapshot) +{ + using ceph::encode; + using ceph::decode; + bufferlist inbl, outbl; + + encode(snap_id, inbl); + int r = ioctx->exec(oid, "rbd", "group_snap_get_by_id", inbl, outbl); + if (r < 0) { + return r; + } + + auto iter = outbl.cbegin(); + try { + decode(*snapshot, iter); + } catch (const ceph::buffer::error &err) { + return -EBADMSG; + } + + return 0; +} +int group_snap_list(librados::IoCtx *ioctx, const std::string &oid, + const cls::rbd::GroupSnapshot &start, + uint64_t max_return, + std::vector<cls::rbd::GroupSnapshot> *snapshots) +{ + using ceph::encode; + using ceph::decode; + bufferlist inbl, outbl; + encode(start, inbl); + encode(max_return, inbl); + + int r = ioctx->exec(oid, "rbd", "group_snap_list", inbl, outbl); + if (r < 0) { + return r; + } + auto iter = outbl.cbegin(); + try { + decode(*snapshots, iter); + } catch (const ceph::buffer::error &err) { + return -EBADMSG; + } + + return 0; +} + +// rbd_trash functions +void trash_add(librados::ObjectWriteOperation *op, + const std::string &id, + const cls::rbd::TrashImageSpec &trash_spec) +{ + bufferlist bl; + encode(id, bl); + encode(trash_spec, bl); + op->exec("rbd", "trash_add", bl); +} + +int trash_add(librados::IoCtx *ioctx, const std::string &id, + const cls::rbd::TrashImageSpec &trash_spec) +{ + librados::ObjectWriteOperation op; + trash_add(&op, id, trash_spec); + + return ioctx->operate(RBD_TRASH, &op); +} + +void trash_remove(librados::ObjectWriteOperation *op, + const std::string &id) +{ + bufferlist bl; + encode(id, bl); + op->exec("rbd", "trash_remove", bl); +} + +int trash_remove(librados::IoCtx *ioctx, const std::string &id) +{ + librados::ObjectWriteOperation op; + trash_remove(&op, id); + + return ioctx->operate(RBD_TRASH, &op); +} + +void trash_list_start(librados::ObjectReadOperation *op, + const std::string &start, uint64_t max_return) +{ + bufferlist bl; + encode(start, bl); + encode(max_return, bl); + op->exec("rbd", "trash_list", bl); +} + +int trash_list_finish(bufferlist::const_iterator *it, + map<string, cls::rbd::TrashImageSpec> *entries) +{ + ceph_assert(entries); + + try { + decode(*entries, *it); + } catch (const ceph::buffer::error &err) { + return -EBADMSG; + } + + return 0; +} + +int trash_list(librados::IoCtx *ioctx, + const std::string &start, uint64_t max_return, + map<string, cls::rbd::TrashImageSpec> *entries) +{ + librados::ObjectReadOperation op; + trash_list_start(&op, start, max_return); + + bufferlist out_bl; + int r = ioctx->operate(RBD_TRASH, &op, &out_bl); + if (r < 0) { + return r; + } + + auto iter = out_bl.cbegin(); + return trash_list_finish(&iter, entries); +} + +void trash_get_start(librados::ObjectReadOperation *op, + const std::string &id) +{ + bufferlist bl; + encode(id, bl); + op->exec("rbd", "trash_get", bl); +} + +int trash_get_finish(bufferlist::const_iterator *it, + cls::rbd::TrashImageSpec *trash_spec) { + ceph_assert(trash_spec); + try { + decode(*trash_spec, *it); + } catch (const ceph::buffer::error &err) { + return -EBADMSG; + } + + return 0; +} + +int trash_get(librados::IoCtx *ioctx, const std::string &id, + cls::rbd::TrashImageSpec *trash_spec) +{ + librados::ObjectReadOperation op; + trash_get_start(&op, id); + + bufferlist out_bl; + int r = ioctx->operate(RBD_TRASH, &op, &out_bl); + if (r < 0) { + return r; + } + + auto it = out_bl.cbegin(); + return trash_get_finish(&it, trash_spec); +} + +void trash_state_set(librados::ObjectWriteOperation *op, + const std::string &id, + const cls::rbd::TrashImageState &trash_state, + const cls::rbd::TrashImageState &expect_state) +{ + bufferlist bl; + encode(id, bl); + encode(trash_state, bl); + encode(expect_state, bl); + op->exec("rbd", "trash_state_set", bl); +} + +int trash_state_set(librados::IoCtx *ioctx, const std::string &id, + const cls::rbd::TrashImageState &trash_state, + const cls::rbd::TrashImageState &expect_state) +{ + librados::ObjectWriteOperation op; + trash_state_set(&op, id, trash_state, expect_state); + + return ioctx->operate(RBD_TRASH, &op); +} + +void namespace_add(librados::ObjectWriteOperation *op, + const std::string &name) +{ + bufferlist bl; + encode(name, bl); + op->exec("rbd", "namespace_add", bl); +} + +int namespace_add(librados::IoCtx *ioctx, const std::string &name) +{ + librados::ObjectWriteOperation op; + namespace_add(&op, name); + + return ioctx->operate(RBD_NAMESPACE, &op); +} + +void namespace_remove(librados::ObjectWriteOperation *op, + const std::string &name) +{ + bufferlist bl; + encode(name, bl); + op->exec("rbd", "namespace_remove", bl); +} + +int namespace_remove(librados::IoCtx *ioctx, const std::string &name) +{ + librados::ObjectWriteOperation op; + namespace_remove(&op, name); + + return ioctx->operate(RBD_NAMESPACE, &op); +} + +void namespace_list_start(librados::ObjectReadOperation *op, + const std::string &start, uint64_t max_return) +{ + bufferlist bl; + encode(start, bl); + encode(max_return, bl); + op->exec("rbd", "namespace_list", bl); +} + +int namespace_list_finish(bufferlist::const_iterator *it, + std::list<std::string> *entries) +{ + ceph_assert(entries); + + try { + decode(*entries, *it); + } catch (const ceph::buffer::error &err) { + return -EBADMSG; + } + + return 0; +} + +int namespace_list(librados::IoCtx *ioctx, + const std::string &start, uint64_t max_return, + std::list<std::string> *entries) +{ + librados::ObjectReadOperation op; + namespace_list_start(&op, start, max_return); + + bufferlist out_bl; + int r = ioctx->operate(RBD_NAMESPACE, &op, &out_bl); + if (r < 0) { + return r; + } + + auto iter = out_bl.cbegin(); + return namespace_list_finish(&iter, entries); +} + +void sparsify(librados::ObjectWriteOperation *op, uint64_t sparse_size, + bool remove_empty) +{ + bufferlist bl; + encode(sparse_size, bl); + encode(remove_empty, bl); + op->exec("rbd", "sparsify", bl); +} + +int sparsify(librados::IoCtx *ioctx, const std::string &oid, uint64_t sparse_size, + bool remove_empty) +{ + librados::ObjectWriteOperation op; + sparsify(&op, sparse_size, remove_empty); + + return ioctx->operate(oid, &op); +} + +} // namespace cls_client +} // namespace librbd diff --git a/src/cls/rbd/cls_rbd_client.h b/src/cls/rbd/cls_rbd_client.h new file mode 100644 index 000000000..38098805e --- /dev/null +++ b/src/cls/rbd/cls_rbd_client.h @@ -0,0 +1,667 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab + +#ifndef CEPH_LIBRBD_CLS_RBD_CLIENT_H +#define CEPH_LIBRBD_CLS_RBD_CLIENT_H + +#include "cls/lock/cls_lock_types.h" +#include "cls/rbd/cls_rbd_types.h" +#include "common/snap_types.h" +#include "include/types.h" +#include "include/rados/librados_fwd.hpp" + +class Context; +namespace ceph { template <uint8_t> class BitVector; } +namespace neorados { struct WriteOp; } + +namespace librbd { +namespace cls_client { + +// low-level interface (mainly for testing) +void create_image(librados::ObjectWriteOperation *op, uint64_t size, + uint8_t order, uint64_t features, + const std::string &object_prefix, int64_t data_pool_id); +int create_image(librados::IoCtx *ioctx, const std::string &oid, + uint64_t size, uint8_t order, uint64_t features, + const std::string &object_prefix, int64_t data_pool_id); + +void get_features_start(librados::ObjectReadOperation *op, bool read_only); +int get_features_finish(ceph::buffer::list::const_iterator *it, uint64_t *features, + uint64_t *incompatible_features); +int get_features(librados::IoCtx *ioctx, const std::string &oid, + bool read_only, uint64_t *features, + uint64_t *incompatible_features); +void set_features(librados::ObjectWriteOperation *op, uint64_t features, + uint64_t mask); +int set_features(librados::IoCtx *ioctx, const std::string &oid, + uint64_t features, uint64_t mask); + +void get_object_prefix_start(librados::ObjectReadOperation *op); +int get_object_prefix_finish(ceph::buffer::list::const_iterator *it, + std::string *object_prefix); +int get_object_prefix(librados::IoCtx *ioctx, const std::string &oid, + std::string *object_prefix); + +void get_data_pool_start(librados::ObjectReadOperation *op); +int get_data_pool_finish(ceph::buffer::list::const_iterator *it, int64_t *data_pool_id); +int get_data_pool(librados::IoCtx *ioctx, const std::string &oid, + int64_t *data_pool_id); + +void get_size_start(librados::ObjectReadOperation *op, snapid_t snap_id); +int get_size_finish(ceph::buffer::list::const_iterator *it, uint64_t *size, + uint8_t *order); +int get_size(librados::IoCtx *ioctx, const std::string &oid, + snapid_t snap_id, uint64_t *size, uint8_t *order); +int set_size(librados::IoCtx *ioctx, const std::string &oid, + uint64_t size); +void set_size(librados::ObjectWriteOperation *op, uint64_t size); + +void get_flags_start(librados::ObjectReadOperation *op, snapid_t snap_id); +int get_flags_finish(ceph::buffer::list::const_iterator *it, uint64_t *flags); +int get_flags(librados::IoCtx *ioctx, const std::string &oid, + snapid_t snap_id, uint64_t *flags); + +void set_flags(librados::ObjectWriteOperation *op, snapid_t snap_id, + uint64_t flags, uint64_t mask); + +void op_features_get_start(librados::ObjectReadOperation *op); +int op_features_get_finish(ceph::buffer::list::const_iterator *it, + uint64_t *op_features); +int op_features_get(librados::IoCtx *ioctx, const std::string &oid, + uint64_t *op_features); +void op_features_set(librados::ObjectWriteOperation *op, + uint64_t op_features, uint64_t mask); +int op_features_set(librados::IoCtx *ioctx, const std::string &oid, + uint64_t op_features, uint64_t mask); + +// NOTE: deprecate v1 parent APIs after mimic EOLed +void get_parent_start(librados::ObjectReadOperation *op, snapid_t snap_id); +int get_parent_finish(ceph::buffer::list::const_iterator *it, + cls::rbd::ParentImageSpec *pspec, + uint64_t *parent_overlap); +int get_parent(librados::IoCtx *ioctx, const std::string &oid, + snapid_t snap_id, cls::rbd::ParentImageSpec *pspec, + uint64_t *parent_overlap); +int set_parent(librados::IoCtx *ioctx, const std::string &oid, + const cls::rbd::ParentImageSpec &pspec, uint64_t parent_overlap); +void set_parent(librados::ObjectWriteOperation *op, + const cls::rbd::ParentImageSpec &pspec, + uint64_t parent_overlap); +int remove_parent(librados::IoCtx *ioctx, const std::string &oid); +void remove_parent(librados::ObjectWriteOperation *op); + +// v2 parent APIs +void parent_get_start(librados::ObjectReadOperation* op); +int parent_get_finish(ceph::buffer::list::const_iterator* it, + cls::rbd::ParentImageSpec* parent_image_spec); +int parent_get(librados::IoCtx* ioctx, const std::string &oid, + cls::rbd::ParentImageSpec* parent_image_spec); + +void parent_overlap_get_start(librados::ObjectReadOperation* op, + snapid_t snap_id); +int parent_overlap_get_finish(ceph::buffer::list::const_iterator* it, + std::optional<uint64_t>* parent_overlap); +int parent_overlap_get(librados::IoCtx* ioctx, const std::string &oid, + snapid_t snap_id, + std::optional<uint64_t>* parent_overlap); + +void parent_attach(librados::ObjectWriteOperation* op, + const cls::rbd::ParentImageSpec& parent_image_spec, + uint64_t parent_overlap, bool reattach); +int parent_attach(librados::IoCtx *ioctx, const std::string &oid, + const cls::rbd::ParentImageSpec& parent_image_spec, + uint64_t parent_overlap, bool reattach); + +void parent_detach(librados::ObjectWriteOperation* op); +int parent_detach(librados::IoCtx *ioctx, const std::string &oid); + +int add_child(librados::IoCtx *ioctx, const std::string &oid, + const cls::rbd::ParentImageSpec &pspec, + const std::string &c_imageid); +void add_child(librados::ObjectWriteOperation *op, + const cls::rbd::ParentImageSpec& pspec, + const std::string &c_imageid); +void remove_child(librados::ObjectWriteOperation *op, + const cls::rbd::ParentImageSpec &pspec, + const std::string &c_imageid); +int remove_child(librados::IoCtx *ioctx, const std::string &oid, + const cls::rbd::ParentImageSpec &pspec, + const std::string &c_imageid); +void get_children_start(librados::ObjectReadOperation *op, + const cls::rbd::ParentImageSpec &pspec); +int get_children_finish(ceph::buffer::list::const_iterator *it, + std::set<std::string> *children); +int get_children(librados::IoCtx *ioctx, const std::string &oid, + const cls::rbd::ParentImageSpec& pspec, std::set<std::string>& children); + +void snapshot_get_start(librados::ObjectReadOperation* op, + snapid_t snap_id); +int snapshot_get_finish(ceph::buffer::list::const_iterator* it, + cls::rbd::SnapshotInfo* snap_info); +int snapshot_get(librados::IoCtx *ioctx, const std::string &oid, + snapid_t snap_id, cls::rbd::SnapshotInfo* snap_info); + +void snapshot_add(librados::ObjectWriteOperation *op, snapid_t snap_id, + const std::string &snap_name, + const cls::rbd::SnapshotNamespace &snap_namespace); +void snapshot_remove(librados::ObjectWriteOperation *op, snapid_t snap_id); +void snapshot_rename(librados::ObjectWriteOperation *op, + snapid_t src_snap_id, + const std::string &dst_name); +void snapshot_trash_add(librados::ObjectWriteOperation *op, + snapid_t snap_id); + +void get_snapcontext_start(librados::ObjectReadOperation *op); +int get_snapcontext_finish(ceph::buffer::list::const_iterator *it, + ::SnapContext *snapc); +int get_snapcontext(librados::IoCtx *ioctx, const std::string &oid, + ::SnapContext *snapc); + +/// NOTE: remove after Luminous is retired +void get_snapshot_name_start(librados::ObjectReadOperation *op, + snapid_t snap_id); +int get_snapshot_name_finish(ceph::buffer::list::const_iterator *it, + std::string *name); +int get_snapshot_name(librados::IoCtx *ioctx, const std::string &oid, + snapid_t snap_id, std::string *name); + +/// NOTE: remove after Luminous is retired +void get_snapshot_timestamp_start(librados::ObjectReadOperation *op, + snapid_t snap_id); +int get_snapshot_timestamp_finish(ceph::buffer::list::const_iterator *it, + utime_t *timestamp); +int get_snapshot_timestamp(librados::IoCtx *ioctx, const std::string &oid, + snapid_t snap_id, utime_t *timestamp); + +void get_all_features_start(librados::ObjectReadOperation *op); +int get_all_features_finish(ceph::buffer::list::const_iterator *it, + uint64_t *all_features); +int get_all_features(librados::IoCtx *ioctx, const std::string &oid, + uint64_t *all_features); + +/// NOTE: remove protection after clone v1 is retired +void get_protection_status_start(librados::ObjectReadOperation *op, + snapid_t snap_id); +int get_protection_status_finish(ceph::buffer::list::const_iterator *it, + uint8_t *protection_status); +int get_protection_status(librados::IoCtx *ioctx, const std::string &oid, + snapid_t snap_id, uint8_t *protection_status); + +int set_protection_status(librados::IoCtx *ioctx, const std::string &oid, + snapid_t snap_id, uint8_t protection_status); +void set_protection_status(librados::ObjectWriteOperation *op, + snapid_t snap_id, uint8_t protection_status); + +void snapshot_get_limit_start(librados::ObjectReadOperation *op); +int snapshot_get_limit_finish(ceph::buffer::list::const_iterator *it, uint64_t *limit); +int snapshot_get_limit(librados::IoCtx *ioctx, const std::string &oid, + uint64_t *limit); +void snapshot_set_limit(librados::ObjectWriteOperation *op, + uint64_t limit); + +void get_stripe_unit_count_start(librados::ObjectReadOperation *op); +int get_stripe_unit_count_finish(ceph::buffer::list::const_iterator *it, + uint64_t *stripe_unit, + uint64_t *stripe_count); +int get_stripe_unit_count(librados::IoCtx *ioctx, const std::string &oid, + uint64_t *stripe_unit, uint64_t *stripe_count); + +void set_stripe_unit_count(librados::ObjectWriteOperation *op, + uint64_t stripe_unit, uint64_t stripe_count); +int set_stripe_unit_count(librados::IoCtx *ioctx, const std::string &oid, + uint64_t stripe_unit, uint64_t stripe_count); + +void get_create_timestamp_start(librados::ObjectReadOperation *op); +int get_create_timestamp_finish(ceph::buffer::list::const_iterator *it, + utime_t *timestamp); +int get_create_timestamp(librados::IoCtx *ioctx, const std::string &oid, + utime_t *timestamp); + +void get_access_timestamp_start(librados::ObjectReadOperation *op); +int get_access_timestamp_finish(ceph::buffer::list::const_iterator *it, + utime_t *timestamp); +int get_access_timestamp(librados::IoCtx *ioctx, const std::string &oid, + utime_t *timestamp); + +void set_access_timestamp(librados::ObjectWriteOperation *op); +int set_access_timestamp(librados::IoCtx *ioctx, const std::string &oid); + +void get_modify_timestamp_start(librados::ObjectReadOperation *op); +int get_modify_timestamp_finish(ceph::buffer::list::const_iterator *it, + utime_t *timestamp); +int get_modify_timestamp(librados::IoCtx *ioctx, const std::string &oid, + utime_t *timestamp); + +void set_modify_timestamp(librados::ObjectWriteOperation *op); +int set_modify_timestamp(librados::IoCtx *ioctx, const std::string &oid); + +int metadata_list(librados::IoCtx *ioctx, const std::string &oid, + const std::string &start, uint64_t max_return, + std::map<std::string, ceph::buffer::list> *pairs); +void metadata_list_start(librados::ObjectReadOperation *op, + const std::string &start, uint64_t max_return); +int metadata_list_finish(ceph::buffer::list::const_iterator *it, + std::map<std::string, ceph::buffer::list> *pairs); +void metadata_set(librados::ObjectWriteOperation *op, + const std::map<std::string, ceph::buffer::list> &data); +int metadata_set(librados::IoCtx *ioctx, const std::string &oid, + const std::map<std::string, ceph::buffer::list> &data); +void metadata_remove(librados::ObjectWriteOperation *op, + const std::string &key); +int metadata_remove(librados::IoCtx *ioctx, const std::string &oid, + const std::string &key); +void metadata_get_start(librados::ObjectReadOperation* op, + const std::string &key); +int metadata_get_finish(ceph::buffer::list::const_iterator *it, + std::string* value); +int metadata_get(librados::IoCtx *ioctx, const std::string &oid, + const std::string &key, std::string *v); + +void child_attach(librados::ObjectWriteOperation *op, snapid_t snap_id, + const cls::rbd::ChildImageSpec& child_image); +int child_attach(librados::IoCtx *ioctx, const std::string &oid, + snapid_t snap_id, + const cls::rbd::ChildImageSpec& child_image); +void child_detach(librados::ObjectWriteOperation *op, snapid_t snap_id, + const cls::rbd::ChildImageSpec& child_image); +int child_detach(librados::IoCtx *ioctx, const std::string &oid, + snapid_t snap_id, + const cls::rbd::ChildImageSpec& child_image); +void children_list_start(librados::ObjectReadOperation *op, + snapid_t snap_id); +int children_list_finish(ceph::buffer::list::const_iterator *it, + cls::rbd::ChildImageSpecs *child_images); +int children_list(librados::IoCtx *ioctx, const std::string &oid, + snapid_t snap_id, + cls::rbd::ChildImageSpecs *child_images); +int migration_set(librados::IoCtx *ioctx, const std::string &oid, + const cls::rbd::MigrationSpec &migration_spec); +void migration_set(librados::ObjectWriteOperation *op, + const cls::rbd::MigrationSpec &migration_spec); +int migration_set_state(librados::IoCtx *ioctx, const std::string &oid, + cls::rbd::MigrationState state, + const std::string &description); +void migration_set_state(librados::ObjectWriteOperation *op, + cls::rbd::MigrationState state, + const std::string &description); +void migration_get_start(librados::ObjectReadOperation *op); +int migration_get_finish(ceph::buffer::list::const_iterator *it, + cls::rbd::MigrationSpec *migration_spec); +int migration_get(librados::IoCtx *ioctx, const std::string &oid, + cls::rbd::MigrationSpec *migration_spec); +int migration_remove(librados::IoCtx *ioctx, const std::string &oid); +void migration_remove(librados::ObjectWriteOperation *op); + +// operations on rbd_id objects +void get_id_start(librados::ObjectReadOperation *op); +int get_id_finish(ceph::buffer::list::const_iterator *it, std::string *id); +int get_id(librados::IoCtx *ioctx, const std::string &oid, std::string *id); + +void set_id(librados::ObjectWriteOperation *op, const std::string &id); +int set_id(librados::IoCtx *ioctx, const std::string &oid, const std::string &id); + +// operations on rbd_directory objects +int dir_get_id(librados::IoCtx *ioctx, const std::string &oid, + const std::string &name, std::string *id); +void dir_get_id_start(librados::ObjectReadOperation *op, + const std::string &image_name); +int dir_get_id_finish(ceph::buffer::list::const_iterator *iter, std::string *image_id); +void dir_get_name_start(librados::ObjectReadOperation *op, + const std::string &id); +int dir_get_name_finish(ceph::buffer::list::const_iterator *it, std::string *name); +int dir_get_name(librados::IoCtx *ioctx, const std::string &oid, + const std::string &id, std::string *name); +void dir_list_start(librados::ObjectReadOperation *op, + const std::string &start, uint64_t max_return); +int dir_list_finish(ceph::buffer::list::const_iterator *it, std::map<std::string, std::string> *images); +int dir_list(librados::IoCtx *ioctx, const std::string &oid, + const std::string &start, uint64_t max_return, + std::map<std::string, std::string> *images); +void dir_add_image(librados::ObjectWriteOperation *op, + const std::string &name, const std::string &id); +int dir_add_image(librados::IoCtx *ioctx, const std::string &oid, + const std::string &name, const std::string &id); +int dir_remove_image(librados::IoCtx *ioctx, const std::string &oid, + const std::string &name, const std::string &id); +void dir_remove_image(librados::ObjectWriteOperation *op, + const std::string &name, const std::string &id); +// atomic remove and add +void dir_rename_image(librados::ObjectWriteOperation *op, + const std::string &src, const std::string &dest, + const std::string &id); +void dir_state_assert(librados::ObjectOperation *op, + cls::rbd::DirectoryState directory_state); +int dir_state_assert(librados::IoCtx *ioctx, const std::string &oid, + cls::rbd::DirectoryState directory_state); +void dir_state_set(librados::ObjectWriteOperation *op, + cls::rbd::DirectoryState directory_state); +int dir_state_set(librados::IoCtx *ioctx, const std::string &oid, + cls::rbd::DirectoryState directory_state); + +// operations on the rbd_object_map.$image_id object +void object_map_load_start(librados::ObjectReadOperation *op); +int object_map_load_finish(ceph::buffer::list::const_iterator *it, + ceph::BitVector<2> *object_map); +int object_map_load(librados::IoCtx *ioctx, const std::string &oid, + ceph::BitVector<2> *object_map); +void object_map_save(librados::ObjectWriteOperation *rados_op, + const ceph::BitVector<2> &object_map); +void object_map_resize(librados::ObjectWriteOperation *rados_op, + uint64_t object_count, uint8_t default_state); +void object_map_update(librados::ObjectWriteOperation *rados_op, + uint64_t start_object_no, uint64_t end_object_no, + uint8_t new_object_state, + const boost::optional<uint8_t> ¤t_object_state); +void object_map_snap_add(librados::ObjectWriteOperation *rados_op); +void object_map_snap_remove(librados::ObjectWriteOperation *rados_op, + const ceph::BitVector<2> &object_map); + +// class operations on the old format, kept for +// backwards compatibility +void old_snapshot_add(librados::ObjectWriteOperation *rados_op, + snapid_t snap_id, const std::string &snap_name); +void old_snapshot_remove(librados::ObjectWriteOperation *rados_op, + const std::string &snap_name); +void old_snapshot_rename(librados::ObjectWriteOperation *rados_op, + snapid_t src_snap_id, const std::string &dst_name); + +void old_snapshot_list_start(librados::ObjectReadOperation *op); +int old_snapshot_list_finish(ceph::buffer::list::const_iterator *it, + std::vector<std::string> *names, + std::vector<uint64_t> *sizes, + ::SnapContext *snapc); +int old_snapshot_list(librados::IoCtx *ioctx, const std::string &oid, + std::vector<std::string> *names, + std::vector<uint64_t> *sizes, + ::SnapContext *snapc); + +// operations on the rbd_mirroring object +void mirror_uuid_get_start(librados::ObjectReadOperation *op); +int mirror_uuid_get_finish(ceph::buffer::list::const_iterator *it, + std::string *uuid); +int mirror_uuid_get(librados::IoCtx *ioctx, std::string *uuid); +int mirror_uuid_set(librados::IoCtx *ioctx, const std::string &uuid); +void mirror_mode_get_start(librados::ObjectReadOperation *op); +int mirror_mode_get_finish(ceph::buffer::list::const_iterator *it, + cls::rbd::MirrorMode *mirror_mode); +int mirror_mode_get(librados::IoCtx *ioctx, + cls::rbd::MirrorMode *mirror_mode); +int mirror_mode_set(librados::IoCtx *ioctx, + cls::rbd::MirrorMode mirror_mode); + +int mirror_peer_ping(librados::IoCtx *ioctx, + const std::string& site_name, + const std::string& fsid); +void mirror_peer_ping(librados::ObjectWriteOperation *op, + const std::string& site_name, + const std::string& fsid); +void mirror_peer_list_start(librados::ObjectReadOperation *op); +int mirror_peer_list_finish(ceph::buffer::list::const_iterator *it, + std::vector<cls::rbd::MirrorPeer> *peers); +int mirror_peer_list(librados::IoCtx *ioctx, + std::vector<cls::rbd::MirrorPeer> *peers); +int mirror_peer_add(librados::IoCtx *ioctx, + const cls::rbd::MirrorPeer& mirror_peer); +void mirror_peer_add(librados::ObjectWriteOperation *op, + const cls::rbd::MirrorPeer& mirror_peer); +int mirror_peer_remove(librados::IoCtx *ioctx, + const std::string &uuid); +int mirror_peer_set_client(librados::IoCtx *ioctx, + const std::string &uuid, + const std::string &client_name); +int mirror_peer_set_cluster(librados::IoCtx *ioctx, + const std::string &uuid, + const std::string &cluster_name); +int mirror_peer_set_direction( + librados::IoCtx *ioctx, const std::string &uuid, + cls::rbd::MirrorPeerDirection mirror_peer_direction); + +void mirror_image_list_start(librados::ObjectReadOperation *op, + const std::string &start, uint64_t max_return); +int mirror_image_list_finish(ceph::buffer::list::const_iterator *it, + std::map<std::string, std::string> *mirror_image_ids); +int mirror_image_list(librados::IoCtx *ioctx, + const std::string &start, uint64_t max_return, + std::map<std::string, std::string> *mirror_image_ids); +void mirror_image_get_image_id_start(librados::ObjectReadOperation *op, + const std::string &global_image_id); +int mirror_image_get_image_id_finish(ceph::buffer::list::const_iterator *it, + std::string *image_id); +int mirror_image_get_image_id(librados::IoCtx *ioctx, + const std::string &global_image_id, + std::string *image_id); +int mirror_image_get(librados::IoCtx *ioctx, const std::string &image_id, + cls::rbd::MirrorImage *mirror_image); +void mirror_image_get_start(librados::ObjectReadOperation *op, + const std::string &image_id); +int mirror_image_get_finish(ceph::buffer::list::const_iterator *iter, + cls::rbd::MirrorImage *mirror_image); +void mirror_image_set(librados::ObjectWriteOperation *op, + const std::string &image_id, + const cls::rbd::MirrorImage &mirror_image); +int mirror_image_set(librados::IoCtx *ioctx, const std::string &image_id, + const cls::rbd::MirrorImage &mirror_image); +void mirror_image_remove(librados::ObjectWriteOperation *op, + const std::string &image_id); +int mirror_image_remove(librados::IoCtx *ioctx, + const std::string &image_id); +int mirror_image_status_set(librados::IoCtx *ioctx, + const std::string &global_image_id, + const cls::rbd::MirrorImageSiteStatus &status); +void mirror_image_status_set(librados::ObjectWriteOperation *op, + const std::string &global_image_id, + const cls::rbd::MirrorImageSiteStatus &status); +int mirror_image_status_get(librados::IoCtx *ioctx, + const std::string &global_image_id, + cls::rbd::MirrorImageStatus *status); +void mirror_image_status_get_start(librados::ObjectReadOperation *op, + const std::string &global_image_id); +int mirror_image_status_get_finish(ceph::buffer::list::const_iterator *iter, + cls::rbd::MirrorImageStatus *status); +int mirror_image_status_list(librados::IoCtx *ioctx, + const std::string &start, uint64_t max_return, + std::map<std::string, cls::rbd::MirrorImage> *images, + std::map<std::string, cls::rbd::MirrorImageStatus> *statuses); +void mirror_image_status_list_start(librados::ObjectReadOperation *op, + const std::string &start, + uint64_t max_return); +int mirror_image_status_list_finish(ceph::buffer::list::const_iterator *iter, + std::map<std::string, cls::rbd::MirrorImage> *images, + std::map<std::string, cls::rbd::MirrorImageStatus> *statuses); +int mirror_image_status_get_summary( + librados::IoCtx *ioctx, + const std::vector<cls::rbd::MirrorPeer>& mirror_peer_sites, + std::map<cls::rbd::MirrorImageStatusState, int32_t> *states); +void mirror_image_status_get_summary_start( + librados::ObjectReadOperation *op, + const std::vector<cls::rbd::MirrorPeer>& mirror_peer_sites); +int mirror_image_status_get_summary_finish( + ceph::buffer::list::const_iterator *iter, + std::map<cls::rbd::MirrorImageStatusState, int32_t> *states); +int mirror_image_status_remove(librados::IoCtx *ioctx, + const std::string &global_image_id); +void mirror_image_status_remove(librados::ObjectWriteOperation *op, + const std::string &global_image_id); +int mirror_image_status_remove_down(librados::IoCtx *ioctx); +void mirror_image_status_remove_down(librados::ObjectWriteOperation *op); + +int mirror_image_instance_get(librados::IoCtx *ioctx, + const std::string &global_image_id, + entity_inst_t *instance); +void mirror_image_instance_get_start(librados::ObjectReadOperation *op, + const std::string &global_image_id); +int mirror_image_instance_get_finish(ceph::buffer::list::const_iterator *iter, + entity_inst_t *instance); +int mirror_image_instance_list(librados::IoCtx *ioctx, + const std::string &start, uint64_t max_return, + std::map<std::string, entity_inst_t> *instances); +void mirror_image_instance_list_start(librados::ObjectReadOperation *op, + const std::string &start, + uint64_t max_return); +int mirror_image_instance_list_finish(ceph::buffer::list::const_iterator *iter, + std::map<std::string, entity_inst_t> *instances); + +void mirror_instances_list_start(librados::ObjectReadOperation *op); +int mirror_instances_list_finish(ceph::buffer::list::const_iterator *iter, + std::vector<std::string> *instance_ids); +int mirror_instances_list(librados::IoCtx *ioctx, + std::vector<std::string> *instance_ids); +void mirror_instances_add(librados::ObjectWriteOperation *op, + const std::string &instance_id); +int mirror_instances_add(librados::IoCtx *ioctx, + const std::string &instance_id); +void mirror_instances_remove(librados::ObjectWriteOperation *op, + const std::string &instance_id); +int mirror_instances_remove(librados::IoCtx *ioctx, + const std::string &instance_id); + +// image mapping related routines +void mirror_image_map_list_start(librados::ObjectReadOperation *op, + const std::string &start_after, + uint64_t max_read); +int mirror_image_map_list_finish(ceph::buffer::list::const_iterator *iter, + std::map<std::string, cls::rbd::MirrorImageMap> *image_mapping); +int mirror_image_map_list(librados::IoCtx *ioctx, + const std::string &start_after, uint64_t max_read, + std::map<std::string, cls::rbd::MirrorImageMap> *image_mapping); +void mirror_image_map_update(librados::ObjectWriteOperation *op, + const std::string &global_image_id, + const cls::rbd::MirrorImageMap &image_map); +void mirror_image_map_remove(librados::ObjectWriteOperation *op, + const std::string &global_image_id); + +void mirror_image_snapshot_unlink_peer(librados::ObjectWriteOperation *op, + snapid_t snap_id, + const std::string &mirror_peer_uuid); +int mirror_image_snapshot_unlink_peer(librados::IoCtx *ioctx, + const std::string &oid, + snapid_t snap_id, + const std::string &mirror_peer_uuid); +void mirror_image_snapshot_set_copy_progress(librados::ObjectWriteOperation *op, + snapid_t snap_id, bool complete, + uint64_t copy_progress); +int mirror_image_snapshot_set_copy_progress(librados::IoCtx *ioctx, + const std::string &oid, + snapid_t snap_id, bool complete, + uint64_t copy_progress); + +// Groups functions +int group_dir_list(librados::IoCtx *ioctx, const std::string &oid, + const std::string &start, uint64_t max_return, + std::map<std::string, std::string> *groups); +int group_dir_add(librados::IoCtx *ioctx, const std::string &oid, + const std::string &name, const std::string &id); +int group_dir_rename(librados::IoCtx *ioctx, const std::string &oid, + const std::string &src, const std::string &dest, + const std::string &id); +int group_dir_remove(librados::IoCtx *ioctx, const std::string &oid, + const std::string &name, const std::string &id); +int group_image_remove(librados::IoCtx *ioctx, const std::string &oid, + const cls::rbd::GroupImageSpec &spec); +int group_image_list(librados::IoCtx *ioctx, const std::string &oid, + const cls::rbd::GroupImageSpec &start, + uint64_t max_return, + std::vector<cls::rbd::GroupImageStatus> *images); +int group_image_set(librados::IoCtx *ioctx, const std::string &oid, + const cls::rbd::GroupImageStatus &st); +int image_group_add(librados::IoCtx *ioctx, const std::string &oid, + const cls::rbd::GroupSpec &group_spec); +int image_group_remove(librados::IoCtx *ioctx, const std::string &oid, + const cls::rbd::GroupSpec &group_spec); +void image_group_get_start(librados::ObjectReadOperation *op); +int image_group_get_finish(ceph::buffer::list::const_iterator *iter, + cls::rbd::GroupSpec *group_spec); +int image_group_get(librados::IoCtx *ioctx, const std::string &oid, + cls::rbd::GroupSpec *group_spec); +int group_snap_set(librados::IoCtx *ioctx, const std::string &oid, + const cls::rbd::GroupSnapshot &snapshot); +int group_snap_remove(librados::IoCtx *ioctx, const std::string &oid, + const std::string &snap_id); +int group_snap_get_by_id(librados::IoCtx *ioctx, const std::string &oid, + const std::string &snap_id, + cls::rbd::GroupSnapshot *snapshot); +int group_snap_list(librados::IoCtx *ioctx, const std::string &oid, + const cls::rbd::GroupSnapshot &start, + uint64_t max_return, + std::vector<cls::rbd::GroupSnapshot> *snapshots); + +// operations on rbd_trash object +void trash_add(librados::ObjectWriteOperation *op, + const std::string &id, + const cls::rbd::TrashImageSpec &trash_spec); +int trash_add(librados::IoCtx *ioctx, const std::string &id, + const cls::rbd::TrashImageSpec &trash_spec); +void trash_remove(librados::ObjectWriteOperation *op, + const std::string &id); +int trash_remove(librados::IoCtx *ioctx, const std::string &id); +void trash_list_start(librados::ObjectReadOperation *op, + const std::string &start, uint64_t max_return); +int trash_list_finish(ceph::buffer::list::const_iterator *it, + std::map<std::string, cls::rbd::TrashImageSpec> *entries); +int trash_list(librados::IoCtx *ioctx, + const std::string &start, uint64_t max_return, + std::map<std::string, cls::rbd::TrashImageSpec> *entries); +void trash_get_start(librados::ObjectReadOperation *op, + const std::string &id); +int trash_get_finish(ceph::buffer::list::const_iterator *it, + cls::rbd::TrashImageSpec *trash_spec); +int trash_get(librados::IoCtx *ioctx, const std::string &id, + cls::rbd::TrashImageSpec *trash_spec); +void trash_state_set(librados::ObjectWriteOperation *op, + const std::string &id, + const cls::rbd::TrashImageState &trash_state, + const cls::rbd::TrashImageState &expect_state); +int trash_state_set(librados::IoCtx *ioctx, const std::string &id, + const cls::rbd::TrashImageState &trash_state, + const cls::rbd::TrashImageState &expect_state); + +// operations on rbd_namespace object +void namespace_add(librados::ObjectWriteOperation *op, + const std::string &name); +int namespace_add(librados::IoCtx *ioctx, const std::string &name); +void namespace_remove(librados::ObjectWriteOperation *op, + const std::string &name); +int namespace_remove(librados::IoCtx *ioctx, const std::string &name); +void namespace_list_start(librados::ObjectReadOperation *op, + const std::string &start, uint64_t max_return); +int namespace_list_finish(ceph::buffer::list::const_iterator *it, + std::list<std::string> *entries); +int namespace_list(librados::IoCtx *ioctx, + const std::string &start, uint64_t max_return, + std::list<std::string> *entries); + +// operations on data objects +void assert_snapc_seq(neorados::WriteOp* op, + uint64_t snapc_seq, + cls::rbd::AssertSnapcSeqState state); +void assert_snapc_seq(librados::ObjectWriteOperation *op, + uint64_t snapc_seq, + cls::rbd::AssertSnapcSeqState state); +int assert_snapc_seq(librados::IoCtx *ioctx, const std::string &oid, + uint64_t snapc_seq, + cls::rbd::AssertSnapcSeqState state); + +void copyup(neorados::WriteOp* op, ceph::buffer::list data); +void copyup(librados::ObjectWriteOperation *op, ceph::buffer::list data); +int copyup(librados::IoCtx *ioctx, const std::string &oid, + ceph::buffer::list data); + +void sparse_copyup(neorados::WriteOp* op, + const std::vector<std::pair<uint64_t, uint64_t>>& extent_map, + ceph::buffer::list data); +void sparse_copyup(librados::ObjectWriteOperation *op, + const std::map<uint64_t, uint64_t> &extent_map, + ceph::buffer::list data); +int sparse_copyup(librados::IoCtx *ioctx, const std::string &oid, + const std::map<uint64_t, uint64_t> &extent_map, + ceph::buffer::list data); + +void sparsify(librados::ObjectWriteOperation *op, uint64_t sparse_size, + bool remove_empty); +int sparsify(librados::IoCtx *ioctx, const std::string &oid, uint64_t sparse_size, + bool remove_empty); + +} // namespace cls_client +} // namespace librbd + +#endif // CEPH_LIBRBD_CLS_RBD_CLIENT_H diff --git a/src/cls/rbd/cls_rbd_types.cc b/src/cls/rbd/cls_rbd_types.cc new file mode 100644 index 000000000..cdaf751d7 --- /dev/null +++ b/src/cls/rbd/cls_rbd_types.cc @@ -0,0 +1,1363 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab + +#include <boost/variant.hpp> +#include "cls/rbd/cls_rbd_types.h" +#include "common/Formatter.h" + +namespace cls { +namespace rbd { + +using std::istringstream; +using std::ostringstream; +using std::string; + +using ceph::bufferlist; +using ceph::Formatter; + +std::ostream& operator<<(std::ostream& os, + MirrorPeerDirection mirror_peer_direction) { + switch (mirror_peer_direction) { + case MIRROR_PEER_DIRECTION_RX: + os << "RX"; + break; + case MIRROR_PEER_DIRECTION_TX: + os << "TX"; + break; + case MIRROR_PEER_DIRECTION_RX_TX: + os << "RX/TX"; + break; + default: + os << "unknown"; + break; + } + return os; +} + +void MirrorPeer::encode(bufferlist &bl) const { + ENCODE_START(2, 1, bl); + encode(uuid, bl); + encode(site_name, bl); + encode(client_name, bl); + int64_t pool_id = -1; + encode(pool_id, bl); + + // v2 + encode(static_cast<uint8_t>(mirror_peer_direction), bl); + encode(mirror_uuid, bl); + encode(last_seen, bl); + ENCODE_FINISH(bl); +} + +void MirrorPeer::decode(bufferlist::const_iterator &it) { + DECODE_START(2, it); + decode(uuid, it); + decode(site_name, it); + decode(client_name, it); + int64_t pool_id; + decode(pool_id, it); + + if (struct_v >= 2) { + uint8_t mpd; + decode(mpd, it); + mirror_peer_direction = static_cast<MirrorPeerDirection>(mpd); + decode(mirror_uuid, it); + decode(last_seen, it); + } + + DECODE_FINISH(it); +} + +void MirrorPeer::dump(Formatter *f) const { + f->dump_string("uuid", uuid); + f->dump_stream("direction") << mirror_peer_direction; + f->dump_string("site_name", site_name); + f->dump_string("mirror_uuid", mirror_uuid); + f->dump_string("client_name", client_name); + f->dump_stream("last_seen") << last_seen; +} + +void MirrorPeer::generate_test_instances(std::list<MirrorPeer*> &o) { + o.push_back(new MirrorPeer()); + o.push_back(new MirrorPeer("uuid-123", MIRROR_PEER_DIRECTION_RX, "site A", + "client name", "")); + o.push_back(new MirrorPeer("uuid-234", MIRROR_PEER_DIRECTION_TX, "site B", + "", "mirror_uuid")); + o.push_back(new MirrorPeer("uuid-345", MIRROR_PEER_DIRECTION_RX_TX, "site C", + "client name", "mirror_uuid")); +} + +bool MirrorPeer::operator==(const MirrorPeer &rhs) const { + return (uuid == rhs.uuid && + mirror_peer_direction == rhs.mirror_peer_direction && + site_name == rhs.site_name && + client_name == rhs.client_name && + mirror_uuid == rhs.mirror_uuid && + last_seen == rhs.last_seen); +} + +std::ostream& operator<<(std::ostream& os, const MirrorMode& mirror_mode) { + switch (mirror_mode) { + case MIRROR_MODE_DISABLED: + os << "disabled"; + break; + case MIRROR_MODE_IMAGE: + os << "image"; + break; + case MIRROR_MODE_POOL: + os << "pool"; + break; + default: + os << "unknown (" << static_cast<uint32_t>(mirror_mode) << ")"; + break; + } + return os; +} + +std::ostream& operator<<(std::ostream& os, const MirrorPeer& peer) { + os << "[" + << "uuid=" << peer.uuid << ", " + << "direction=" << peer.mirror_peer_direction << ", " + << "site_name=" << peer.site_name << ", " + << "client_name=" << peer.client_name << ", " + << "mirror_uuid=" << peer.mirror_uuid << ", " + << "last_seen=" << peer.last_seen + << "]"; + return os; +} + +void MirrorImage::encode(bufferlist &bl) const { + ENCODE_START(2, 1, bl); + encode(global_image_id, bl); + encode(static_cast<uint8_t>(state), bl); + encode(static_cast<uint8_t>(mode), bl); + ENCODE_FINISH(bl); +} + +void MirrorImage::decode(bufferlist::const_iterator &it) { + uint8_t int_state; + DECODE_START(2, it); + decode(global_image_id, it); + decode(int_state, it); + state = static_cast<MirrorImageState>(int_state); + if (struct_v >= 2) { + uint8_t int_mode; + decode(int_mode, it); + mode = static_cast<MirrorImageMode>(int_mode); + } + DECODE_FINISH(it); +} + +void MirrorImage::dump(Formatter *f) const { + f->dump_stream("mode") << mode; + f->dump_string("global_image_id", global_image_id); + f->dump_stream("state") << state; +} + +void MirrorImage::generate_test_instances(std::list<MirrorImage*> &o) { + o.push_back(new MirrorImage()); + o.push_back(new MirrorImage(MIRROR_IMAGE_MODE_JOURNAL, "uuid-123", + MIRROR_IMAGE_STATE_ENABLED)); + o.push_back(new MirrorImage(MIRROR_IMAGE_MODE_SNAPSHOT, "uuid-abc", + MIRROR_IMAGE_STATE_DISABLING)); +} + +bool MirrorImage::operator==(const MirrorImage &rhs) const { + return mode == rhs.mode && global_image_id == rhs.global_image_id && + state == rhs.state; +} + +bool MirrorImage::operator<(const MirrorImage &rhs) const { + if (mode != rhs.mode) { + return mode < rhs.mode; + } + if (global_image_id != rhs.global_image_id) { + return global_image_id < rhs.global_image_id; + } + return state < rhs.state; +} + +std::ostream& operator<<(std::ostream& os, const MirrorImageMode& mirror_mode) { + switch (mirror_mode) { + case MIRROR_IMAGE_MODE_JOURNAL: + os << "journal"; + break; + case MIRROR_IMAGE_MODE_SNAPSHOT: + os << "snapshot"; + break; + default: + os << "unknown (" << static_cast<uint32_t>(mirror_mode) << ")"; + break; + } + return os; +} + +std::ostream& operator<<(std::ostream& os, const MirrorImageState& mirror_state) { + switch (mirror_state) { + case MIRROR_IMAGE_STATE_DISABLING: + os << "disabling"; + break; + case MIRROR_IMAGE_STATE_ENABLED: + os << "enabled"; + break; + case MIRROR_IMAGE_STATE_DISABLED: + os << "disabled"; + break; + default: + os << "unknown (" << static_cast<uint32_t>(mirror_state) << ")"; + break; + } + return os; +} + +std::ostream& operator<<(std::ostream& os, const MirrorImage& mirror_image) { + os << "[" + << "mode=" << mirror_image.mode << ", " + << "global_image_id=" << mirror_image.global_image_id << ", " + << "state=" << mirror_image.state << "]"; + return os; +} + +std::ostream& operator<<(std::ostream& os, + const MirrorImageStatusState& state) { + switch (state) { + case MIRROR_IMAGE_STATUS_STATE_UNKNOWN: + os << "unknown"; + break; + case MIRROR_IMAGE_STATUS_STATE_ERROR: + os << "error"; + break; + case MIRROR_IMAGE_STATUS_STATE_SYNCING: + os << "syncing"; + break; + case MIRROR_IMAGE_STATUS_STATE_STARTING_REPLAY: + os << "starting_replay"; + break; + case MIRROR_IMAGE_STATUS_STATE_REPLAYING: + os << "replaying"; + break; + case MIRROR_IMAGE_STATUS_STATE_STOPPING_REPLAY: + os << "stopping_replay"; + break; + case MIRROR_IMAGE_STATUS_STATE_STOPPED: + os << "stopped"; + break; + default: + os << "unknown (" << static_cast<uint32_t>(state) << ")"; + break; + } + return os; +} + +const std::string MirrorImageSiteStatus::LOCAL_MIRROR_UUID(""); // empty mirror uuid + +void MirrorImageSiteStatus::encode_meta(uint8_t version, bufferlist &bl) const { + if (version >= 2) { + ceph::encode(mirror_uuid, bl); + } + cls::rbd::encode(state, bl); + ceph::encode(description, bl); + ceph::encode(last_update, bl); + ceph::encode(up, bl); +} + +void MirrorImageSiteStatus::decode_meta(uint8_t version, + bufferlist::const_iterator &it) { + if (version < 2) { + mirror_uuid = LOCAL_MIRROR_UUID; + } else { + ceph::decode(mirror_uuid, it); + } + + cls::rbd::decode(state, it); + ceph::decode(description, it); + ::decode(last_update, it); + ceph::decode(up, it); +} + +void MirrorImageSiteStatus::encode(bufferlist &bl) const { + // break compatibility when site-name is provided + uint8_t version = (mirror_uuid == LOCAL_MIRROR_UUID ? 1 : 2); + ENCODE_START(version, version, bl); + encode_meta(version, bl); + ENCODE_FINISH(bl); +} + +void MirrorImageSiteStatus::decode(bufferlist::const_iterator &it) { + DECODE_START(2, it); + decode_meta(struct_v, it); + DECODE_FINISH(it); +} + +void MirrorImageSiteStatus::dump(Formatter *f) const { + f->dump_string("state", state_to_string()); + f->dump_string("description", description); + f->dump_stream("last_update") << last_update; +} + +std::string MirrorImageSiteStatus::state_to_string() const { + std::stringstream ss; + ss << (up ? "up+" : "down+") << state; + return ss.str(); +} + +void MirrorImageSiteStatus::generate_test_instances( + std::list<MirrorImageSiteStatus*> &o) { + o.push_back(new MirrorImageSiteStatus()); + o.push_back(new MirrorImageSiteStatus("", MIRROR_IMAGE_STATUS_STATE_REPLAYING, + "")); + o.push_back(new MirrorImageSiteStatus("", MIRROR_IMAGE_STATUS_STATE_ERROR, + "error")); + o.push_back(new MirrorImageSiteStatus("2fb68ca9-1ba0-43b3-8cdf-8c5a9db71e65", + MIRROR_IMAGE_STATUS_STATE_STOPPED, "")); +} + +bool MirrorImageSiteStatus::operator==(const MirrorImageSiteStatus &rhs) const { + return state == rhs.state && description == rhs.description && up == rhs.up; +} + +std::ostream& operator<<(std::ostream& os, + const MirrorImageSiteStatus& status) { + os << "{" + << "state=" << status.state_to_string() << ", " + << "description=" << status.description << ", " + << "last_update=" << status.last_update << "]}"; + return os; +} + +void MirrorImageSiteStatusOnDisk::encode_meta(bufferlist &bl, + uint64_t features) const { + ENCODE_START(1, 1, bl); + auto sanitized_origin = origin; + sanitize_entity_inst(&sanitized_origin); + encode(sanitized_origin, bl, features); + ENCODE_FINISH(bl); +} + +void MirrorImageSiteStatusOnDisk::encode(bufferlist &bl, + uint64_t features) const { + encode_meta(bl, features); + cls::rbd::MirrorImageSiteStatus::encode(bl); +} + +void MirrorImageSiteStatusOnDisk::decode_meta(bufferlist::const_iterator &it) { + DECODE_START(1, it); + decode(origin, it); + sanitize_entity_inst(&origin); + DECODE_FINISH(it); +} + +void MirrorImageSiteStatusOnDisk::decode(bufferlist::const_iterator &it) { + decode_meta(it); + cls::rbd::MirrorImageSiteStatus::decode(it); +} + +void MirrorImageSiteStatusOnDisk::generate_test_instances( + std::list<MirrorImageSiteStatusOnDisk*> &o) { + o.push_back(new MirrorImageSiteStatusOnDisk()); + o.push_back(new MirrorImageSiteStatusOnDisk( + {"", MIRROR_IMAGE_STATUS_STATE_ERROR, "error"})); + o.push_back(new MirrorImageSiteStatusOnDisk( + {"siteA", MIRROR_IMAGE_STATUS_STATE_STOPPED, ""})); +} + +int MirrorImageStatus::get_local_mirror_image_site_status( + MirrorImageSiteStatus* status) const { + auto it = std::find_if( + mirror_image_site_statuses.begin(), + mirror_image_site_statuses.end(), + [](const MirrorImageSiteStatus& status) { + return status.mirror_uuid == MirrorImageSiteStatus::LOCAL_MIRROR_UUID; + }); + if (it == mirror_image_site_statuses.end()) { + return -ENOENT; + } + + *status = *it; + return 0; +} + +void MirrorImageStatus::encode(bufferlist &bl) const { + // don't break compatibility for extra site statuses + ENCODE_START(2, 1, bl); + + // local site status + MirrorImageSiteStatus local_status; + int r = get_local_mirror_image_site_status(&local_status); + local_status.encode_meta(1, bl); + + bool local_status_valid = (r >= 0); + encode(local_status_valid, bl); + + // remote site statuses + __u32 n = mirror_image_site_statuses.size(); + if (local_status_valid) { + --n; + } + encode(n, bl); + + for (auto& status : mirror_image_site_statuses) { + if (status.mirror_uuid == MirrorImageSiteStatus::LOCAL_MIRROR_UUID) { + continue; + } + status.encode_meta(2, bl); + } + ENCODE_FINISH(bl); +} + +void MirrorImageStatus::decode(bufferlist::const_iterator &it) { + DECODE_START(2, it); + + // local site status + MirrorImageSiteStatus local_status; + local_status.decode_meta(1, it); + + if (struct_v < 2) { + mirror_image_site_statuses.push_back(local_status); + } else { + bool local_status_valid; + decode(local_status_valid, it); + + __u32 n; + decode(n, it); + if (local_status_valid) { + ++n; + } + + mirror_image_site_statuses.resize(n); + for (auto status_it = mirror_image_site_statuses.begin(); + status_it != mirror_image_site_statuses.end(); ++status_it) { + if (local_status_valid && + status_it == mirror_image_site_statuses.begin()) { + *status_it = local_status; + continue; + } + + // remote site status + status_it->decode_meta(struct_v, it); + } + } + DECODE_FINISH(it); +} + +void MirrorImageStatus::dump(Formatter *f) const { + MirrorImageSiteStatus local_status; + int r = get_local_mirror_image_site_status(&local_status); + if (r >= 0) { + local_status.dump(f); + } + + f->open_array_section("remotes"); + for (auto& status : mirror_image_site_statuses) { + if (status.mirror_uuid == MirrorImageSiteStatus::LOCAL_MIRROR_UUID) { + continue; + } + + f->open_object_section("remote"); + status.dump(f); + f->close_section(); + } + f->close_section(); +} + +bool MirrorImageStatus::operator==(const MirrorImageStatus &rhs) const { + return (mirror_image_site_statuses == rhs.mirror_image_site_statuses); +} + +void MirrorImageStatus::generate_test_instances( + std::list<MirrorImageStatus*> &o) { + o.push_back(new MirrorImageStatus()); + o.push_back(new MirrorImageStatus({{"", MIRROR_IMAGE_STATUS_STATE_ERROR, ""}})); + o.push_back(new MirrorImageStatus({{"", MIRROR_IMAGE_STATUS_STATE_STOPPED, ""}, + {"siteA", MIRROR_IMAGE_STATUS_STATE_REPLAYING, ""}})); +} + +std::ostream& operator<<(std::ostream& os, + const MirrorImageStatus& status) { + os << "{"; + MirrorImageSiteStatus local_status; + int r = status.get_local_mirror_image_site_status(&local_status); + if (r >= 0) { + os << "state=" << local_status.state_to_string() << ", " + << "description=" << local_status.description << ", " + << "last_update=" << local_status.last_update << ", "; + } + + os << "remotes=["; + for (auto& remote_status : status.mirror_image_site_statuses) { + if (remote_status.mirror_uuid == MirrorImageSiteStatus::LOCAL_MIRROR_UUID) { + continue; + } + + os << "{" + << "mirror_uuid=" << remote_status.mirror_uuid<< ", " + << "state=" << remote_status.state_to_string() << ", " + << "description=" << remote_status.description << ", " + << "last_update=" << remote_status.last_update + << "}"; + } + os << "]}"; + return os; +} + +void ParentImageSpec::encode(bufferlist& bl) const { + ENCODE_START(1, 1, bl); + encode(pool_id, bl); + encode(pool_namespace, bl); + encode(image_id, bl); + encode(snap_id, bl); + ENCODE_FINISH(bl); +} + +void ParentImageSpec::decode(bufferlist::const_iterator& bl) { + DECODE_START(1, bl); + decode(pool_id, bl); + decode(pool_namespace, bl); + decode(image_id, bl); + decode(snap_id, bl); + DECODE_FINISH(bl); +} + +void ParentImageSpec::dump(Formatter *f) const { + f->dump_int("pool_id", pool_id); + f->dump_string("pool_namespace", pool_namespace); + f->dump_string("image_id", image_id); + f->dump_unsigned("snap_id", snap_id); +} + +void ParentImageSpec::generate_test_instances(std::list<ParentImageSpec*>& o) { + o.push_back(new ParentImageSpec{}); + o.push_back(new ParentImageSpec{1, "", "foo", 3}); + o.push_back(new ParentImageSpec{1, "ns", "foo", 3}); +} + +std::ostream& operator<<(std::ostream& os, const ParentImageSpec& rhs) { + os << "[" + << "pool_id=" << rhs.pool_id << ", " + << "pool_namespace=" << rhs.pool_namespace << ", " + << "image_id=" << rhs.image_id << ", " + << "snap_id=" << rhs.snap_id + << "]"; + return os; +} + +void ChildImageSpec::encode(bufferlist &bl) const { + ENCODE_START(2, 1, bl); + encode(pool_id, bl); + encode(image_id, bl); + encode(pool_namespace, bl); + ENCODE_FINISH(bl); +} + +void ChildImageSpec::decode(bufferlist::const_iterator &it) { + DECODE_START(2, it); + decode(pool_id, it); + decode(image_id, it); + if (struct_v >= 2) { + decode(pool_namespace, it); + } + DECODE_FINISH(it); +} + +void ChildImageSpec::dump(Formatter *f) const { + f->dump_int("pool_id", pool_id); + f->dump_string("pool_namespace", pool_namespace); + f->dump_string("image_id", image_id); +} + +void ChildImageSpec::generate_test_instances(std::list<ChildImageSpec*> &o) { + o.push_back(new ChildImageSpec()); + o.push_back(new ChildImageSpec(123, "", "abc")); + o.push_back(new ChildImageSpec(123, "ns", "abc")); +} + +std::ostream& operator<<(std::ostream& os, const ChildImageSpec& rhs) { + os << "[" + << "pool_id=" << rhs.pool_id << ", " + << "pool_namespace=" << rhs.pool_namespace << ", " + << "image_id=" << rhs.image_id + << "]"; + return os; +} + +void GroupImageSpec::encode(bufferlist &bl) const { + ENCODE_START(1, 1, bl); + encode(image_id, bl); + encode(pool_id, bl); + ENCODE_FINISH(bl); +} + +void GroupImageSpec::decode(bufferlist::const_iterator &it) { + DECODE_START(1, it); + decode(image_id, it); + decode(pool_id, it); + DECODE_FINISH(it); +} + +void GroupImageSpec::dump(Formatter *f) const { + f->dump_string("image_id", image_id); + f->dump_int("pool_id", pool_id); +} + +int GroupImageSpec::from_key(const std::string &image_key, + GroupImageSpec *spec) { + if (nullptr == spec) return -EINVAL; + int prefix_len = cls::rbd::RBD_GROUP_IMAGE_KEY_PREFIX.size(); + std::string data_string = image_key.substr(prefix_len, + image_key.size() - prefix_len); + size_t p = data_string.find("_"); + if (std::string::npos == p) { + return -EIO; + } + data_string[p] = ' '; + + istringstream iss(data_string); + uint64_t pool_id; + string image_id; + iss >> std::hex >> pool_id >> image_id; + + spec->image_id = image_id; + spec->pool_id = pool_id; + return 0; +} + +std::string GroupImageSpec::image_key() { + if (-1 == pool_id) + return ""; + else { + ostringstream oss; + oss << RBD_GROUP_IMAGE_KEY_PREFIX << std::setw(16) + << std::setfill('0') << std::hex << pool_id << "_" << image_id; + return oss.str(); + } +} + +void GroupImageSpec::generate_test_instances(std::list<GroupImageSpec*> &o) { + o.push_back(new GroupImageSpec("10152ae8944a", 0)); + o.push_back(new GroupImageSpec("1018643c9869", 3)); +} + +void GroupImageStatus::encode(bufferlist &bl) const { + ENCODE_START(1, 1, bl); + encode(spec, bl); + encode(state, bl); + ENCODE_FINISH(bl); +} + +void GroupImageStatus::decode(bufferlist::const_iterator &it) { + DECODE_START(1, it); + decode(spec, it); + decode(state, it); + DECODE_FINISH(it); +} + +std::string GroupImageStatus::state_to_string() const { + std::stringstream ss; + if (state == GROUP_IMAGE_LINK_STATE_INCOMPLETE) { + ss << "incomplete"; + } + if (state == GROUP_IMAGE_LINK_STATE_ATTACHED) { + ss << "attached"; + } + return ss.str(); +} + +void GroupImageStatus::dump(Formatter *f) const { + spec.dump(f); + f->dump_string("state", state_to_string()); +} + +void GroupImageStatus::generate_test_instances(std::list<GroupImageStatus*> &o) { + o.push_back(new GroupImageStatus(GroupImageSpec("10152ae8944a", 0), GROUP_IMAGE_LINK_STATE_ATTACHED)); + o.push_back(new GroupImageStatus(GroupImageSpec("1018643c9869", 3), GROUP_IMAGE_LINK_STATE_ATTACHED)); + o.push_back(new GroupImageStatus(GroupImageSpec("10152ae8944a", 0), GROUP_IMAGE_LINK_STATE_INCOMPLETE)); + o.push_back(new GroupImageStatus(GroupImageSpec("1018643c9869", 3), GROUP_IMAGE_LINK_STATE_INCOMPLETE)); +} + + +void GroupSpec::encode(bufferlist &bl) const { + ENCODE_START(1, 1, bl); + encode(pool_id, bl); + encode(group_id, bl); + ENCODE_FINISH(bl); +} + +void GroupSpec::decode(bufferlist::const_iterator &it) { + DECODE_START(1, it); + decode(pool_id, it); + decode(group_id, it); + DECODE_FINISH(it); +} + +void GroupSpec::dump(Formatter *f) const { + f->dump_string("group_id", group_id); + f->dump_int("pool_id", pool_id); +} + +bool GroupSpec::is_valid() const { + return (!group_id.empty()) && (pool_id != -1); +} + +void GroupSpec::generate_test_instances(std::list<GroupSpec *> &o) { + o.push_back(new GroupSpec("10152ae8944a", 0)); + o.push_back(new GroupSpec("1018643c9869", 3)); +} + +void GroupSnapshotNamespace::encode(bufferlist& bl) const { + using ceph::encode; + encode(group_pool, bl); + encode(group_id, bl); + encode(group_snapshot_id, bl); +} + +void GroupSnapshotNamespace::decode(bufferlist::const_iterator& it) { + using ceph::decode; + decode(group_pool, it); + decode(group_id, it); + decode(group_snapshot_id, it); +} + +void GroupSnapshotNamespace::dump(Formatter *f) const { + f->dump_int("group_pool", group_pool); + f->dump_string("group_id", group_id); + f->dump_string("group_snapshot_id", group_snapshot_id); +} + +void TrashSnapshotNamespace::encode(bufferlist& bl) const { + using ceph::encode; + encode(original_name, bl); + encode(static_cast<uint32_t>(original_snapshot_namespace_type), bl); +} + +void TrashSnapshotNamespace::decode(bufferlist::const_iterator& it) { + using ceph::decode; + decode(original_name, it); + uint32_t snap_type; + decode(snap_type, it); + original_snapshot_namespace_type = static_cast<SnapshotNamespaceType>( + snap_type); +} + +void TrashSnapshotNamespace::dump(Formatter *f) const { + f->dump_string("original_name", original_name); + f->dump_stream("original_snapshot_namespace") + << original_snapshot_namespace_type; +} + +void MirrorSnapshotNamespace::encode(bufferlist& bl) const { + using ceph::encode; + encode(state, bl); + encode(complete, bl); + encode(mirror_peer_uuids, bl); + encode(primary_mirror_uuid, bl); + encode(primary_snap_id, bl); + encode(last_copied_object_number, bl); + encode(snap_seqs, bl); +} + +void MirrorSnapshotNamespace::decode(bufferlist::const_iterator& it) { + using ceph::decode; + decode(state, it); + decode(complete, it); + decode(mirror_peer_uuids, it); + decode(primary_mirror_uuid, it); + decode(primary_snap_id, it); + decode(last_copied_object_number, it); + decode(snap_seqs, it); +} + +void MirrorSnapshotNamespace::dump(Formatter *f) const { + f->dump_stream("state") << state; + f->dump_bool("complete", complete); + f->open_array_section("mirror_peer_uuids"); + for (auto &peer : mirror_peer_uuids) { + f->dump_string("mirror_peer_uuid", peer); + } + f->close_section(); + if (is_primary()) { + f->dump_unsigned("clean_since_snap_id", clean_since_snap_id); + } else { + f->dump_string("primary_mirror_uuid", primary_mirror_uuid); + f->dump_unsigned("primary_snap_id", primary_snap_id); + f->dump_unsigned("last_copied_object_number", last_copied_object_number); + f->dump_stream("snap_seqs") << snap_seqs; + } +} + +class EncodeSnapshotNamespaceVisitor { +public: + explicit EncodeSnapshotNamespaceVisitor(bufferlist &bl) : m_bl(bl) { + } + + template <typename T> + inline void operator()(const T& t) const { + using ceph::encode; + encode(static_cast<uint32_t>(T::SNAPSHOT_NAMESPACE_TYPE), m_bl); + t.encode(m_bl); + } + +private: + bufferlist &m_bl; +}; + +class DecodeSnapshotNamespaceVisitor { +public: + DecodeSnapshotNamespaceVisitor(bufferlist::const_iterator &iter) + : m_iter(iter) { + } + + template <typename T> + inline void operator()(T& t) const { + t.decode(m_iter); + } +private: + bufferlist::const_iterator &m_iter; +}; + +class DumpSnapshotNamespaceVisitor { +public: + explicit DumpSnapshotNamespaceVisitor(Formatter *formatter, const std::string &key) + : m_formatter(formatter), m_key(key) {} + + template <typename T> + inline void operator()(const T& t) const { + auto type = T::SNAPSHOT_NAMESPACE_TYPE; + m_formatter->dump_string(m_key.c_str(), stringify(type)); + t.dump(m_formatter); + } +private: + ceph::Formatter *m_formatter; + std::string m_key; +}; + +class GetTypeVisitor { +public: + template <typename T> + inline SnapshotNamespaceType operator()(const T&) const { + return static_cast<SnapshotNamespaceType>(T::SNAPSHOT_NAMESPACE_TYPE); + } +}; + +SnapshotNamespaceType get_snap_namespace_type( + const SnapshotNamespace& snapshot_namespace) { + return static_cast<SnapshotNamespaceType>(snapshot_namespace.visit( + GetTypeVisitor())); +} + +void SnapshotInfo::encode(bufferlist& bl) const { + ENCODE_START(1, 1, bl); + encode(id, bl); + encode(snapshot_namespace, bl); + encode(name, bl); + encode(image_size, bl); + encode(timestamp, bl); + encode(child_count, bl); + ENCODE_FINISH(bl); +} + +void SnapshotInfo::decode(bufferlist::const_iterator& it) { + DECODE_START(1, it); + decode(id, it); + decode(snapshot_namespace, it); + decode(name, it); + decode(image_size, it); + decode(timestamp, it); + decode(child_count, it); + DECODE_FINISH(it); +} + +void SnapshotInfo::dump(Formatter *f) const { + f->dump_unsigned("id", id); + f->open_object_section("namespace"); + snapshot_namespace.visit(DumpSnapshotNamespaceVisitor(f, "type")); + f->close_section(); + f->dump_string("name", name); + f->dump_unsigned("image_size", image_size); + f->dump_stream("timestamp") << timestamp; +} + +void SnapshotInfo::generate_test_instances(std::list<SnapshotInfo*> &o) { + o.push_back(new SnapshotInfo(1ULL, UserSnapshotNamespace{}, "snap1", 123, + {123456, 0}, 12)); + o.push_back(new SnapshotInfo(2ULL, + GroupSnapshotNamespace{567, "group1", "snap1"}, + "snap1", 123, {123456, 0}, 987)); + o.push_back(new SnapshotInfo(3ULL, + TrashSnapshotNamespace{ + SNAPSHOT_NAMESPACE_TYPE_USER, "snap1"}, + "12345", 123, {123456, 0}, 429)); + o.push_back(new SnapshotInfo(1ULL, + MirrorSnapshotNamespace{MIRROR_SNAPSHOT_STATE_PRIMARY, + {"1", "2"}, "", CEPH_NOSNAP}, + "snap1", 123, {123456, 0}, 12)); + o.push_back(new SnapshotInfo(1ULL, + MirrorSnapshotNamespace{MIRROR_SNAPSHOT_STATE_NON_PRIMARY, + {"1", "2"}, "uuid", 123}, + "snap1", 123, {123456, 0}, 12)); +} + +void SnapshotNamespace::encode(bufferlist& bl) const { + ENCODE_START(1, 1, bl); + visit(EncodeSnapshotNamespaceVisitor(bl)); + ENCODE_FINISH(bl); +} + +void SnapshotNamespace::decode(bufferlist::const_iterator &p) +{ + DECODE_START(1, p); + uint32_t snap_type; + decode(snap_type, p); + switch (snap_type) { + case cls::rbd::SNAPSHOT_NAMESPACE_TYPE_USER: + *this = UserSnapshotNamespace(); + break; + case cls::rbd::SNAPSHOT_NAMESPACE_TYPE_GROUP: + *this = GroupSnapshotNamespace(); + break; + case cls::rbd::SNAPSHOT_NAMESPACE_TYPE_TRASH: + *this = TrashSnapshotNamespace(); + break; + case cls::rbd::SNAPSHOT_NAMESPACE_TYPE_MIRROR: + *this = MirrorSnapshotNamespace(); + break; + default: + *this = UnknownSnapshotNamespace(); + break; + } + visit(DecodeSnapshotNamespaceVisitor(p)); + DECODE_FINISH(p); +} + +void SnapshotNamespace::dump(Formatter *f) const { + visit(DumpSnapshotNamespaceVisitor(f, "snapshot_namespace_type")); +} + +void SnapshotNamespace::generate_test_instances(std::list<SnapshotNamespace*> &o) { + o.push_back(new SnapshotNamespace(UserSnapshotNamespace())); + o.push_back(new SnapshotNamespace(GroupSnapshotNamespace(0, "10152ae8944a", + "2118643c9732"))); + o.push_back(new SnapshotNamespace(GroupSnapshotNamespace(5, "1018643c9869", + "33352be8933c"))); + o.push_back(new SnapshotNamespace(TrashSnapshotNamespace())); + o.push_back(new SnapshotNamespace(MirrorSnapshotNamespace(MIRROR_SNAPSHOT_STATE_PRIMARY, + {"peer uuid"}, + "", CEPH_NOSNAP))); + o.push_back(new SnapshotNamespace(MirrorSnapshotNamespace(MIRROR_SNAPSHOT_STATE_PRIMARY_DEMOTED, + {"peer uuid"}, + "", CEPH_NOSNAP))); + o.push_back(new SnapshotNamespace(MirrorSnapshotNamespace(MIRROR_SNAPSHOT_STATE_NON_PRIMARY, + {"peer uuid"}, + "uuid", 123))); + o.push_back(new SnapshotNamespace(MirrorSnapshotNamespace(MIRROR_SNAPSHOT_STATE_NON_PRIMARY_DEMOTED, + {"peer uuid"}, + "uuid", 123))); +} + +std::ostream& operator<<(std::ostream& os, const SnapshotNamespace& ns) { + return ns.visit([&os](const auto& val) -> std::ostream& { + return os << val; + }); +} + +std::ostream& operator<<(std::ostream& os, const SnapshotNamespaceType& type) { + switch (type) { + case SNAPSHOT_NAMESPACE_TYPE_USER: + os << "user"; + break; + case SNAPSHOT_NAMESPACE_TYPE_GROUP: + os << "group"; + break; + case SNAPSHOT_NAMESPACE_TYPE_TRASH: + os << "trash"; + break; + case SNAPSHOT_NAMESPACE_TYPE_MIRROR: + os << "mirror"; + break; + default: + os << "unknown"; + break; + } + return os; +} + +std::ostream& operator<<(std::ostream& os, const UserSnapshotNamespace& ns) { + os << "[" << SNAPSHOT_NAMESPACE_TYPE_USER << "]"; + return os; +} + +std::ostream& operator<<(std::ostream& os, const GroupSnapshotNamespace& ns) { + os << "[" << SNAPSHOT_NAMESPACE_TYPE_GROUP << " " + << "group_pool=" << ns.group_pool << ", " + << "group_id=" << ns.group_id << ", " + << "group_snapshot_id=" << ns.group_snapshot_id << "]"; + return os; +} + +std::ostream& operator<<(std::ostream& os, const TrashSnapshotNamespace& ns) { + os << "[" << SNAPSHOT_NAMESPACE_TYPE_TRASH << " " + << "original_name=" << ns.original_name << ", " + << "original_snapshot_namespace=" << ns.original_snapshot_namespace_type + << "]"; + return os; +} + +std::ostream& operator<<(std::ostream& os, const MirrorSnapshotNamespace& ns) { + os << "[" << SNAPSHOT_NAMESPACE_TYPE_MIRROR << " " + << "state=" << ns.state << ", " + << "complete=" << ns.complete << ", " + << "mirror_peer_uuids=" << ns.mirror_peer_uuids << ", "; + if (ns.is_primary()) { + os << "clean_since_snap_id=" << ns.clean_since_snap_id; + } else { + os << "primary_mirror_uuid=" << ns.primary_mirror_uuid << ", " + << "primary_snap_id=" << ns.primary_snap_id << ", " + << "last_copied_object_number=" << ns.last_copied_object_number << ", " + << "snap_seqs=" << ns.snap_seqs; + } + os << "]"; + return os; +} + +std::ostream& operator<<(std::ostream& os, const UnknownSnapshotNamespace& ns) { + os << "[unknown]"; + return os; +} + +std::ostream& operator<<(std::ostream& os, MirrorSnapshotState type) { + switch (type) { + case MIRROR_SNAPSHOT_STATE_PRIMARY: + os << "primary"; + break; + case MIRROR_SNAPSHOT_STATE_PRIMARY_DEMOTED: + os << "primary (demoted)"; + break; + case MIRROR_SNAPSHOT_STATE_NON_PRIMARY: + os << "non-primary"; + break; + case MIRROR_SNAPSHOT_STATE_NON_PRIMARY_DEMOTED: + os << "non-primary (demoted)"; + break; + default: + os << "unknown"; + break; + } + return os; +} + +void ImageSnapshotSpec::encode(bufferlist& bl) const { + using ceph::encode; + ENCODE_START(1, 1, bl); + encode(pool, bl); + encode(image_id, bl); + encode(snap_id, bl); + ENCODE_FINISH(bl); +} + +void ImageSnapshotSpec::decode(bufferlist::const_iterator& it) { + using ceph::decode; + DECODE_START(1, it); + decode(pool, it); + decode(image_id, it); + decode(snap_id, it); + DECODE_FINISH(it); +} + +void ImageSnapshotSpec::dump(Formatter *f) const { + f->dump_int("pool", pool); + f->dump_string("image_id", image_id); + f->dump_int("snap_id", snap_id); +} + +void ImageSnapshotSpec::generate_test_instances(std::list<ImageSnapshotSpec *> &o) { + o.push_back(new ImageSnapshotSpec(0, "myimage", 2)); + o.push_back(new ImageSnapshotSpec(1, "testimage", 7)); +} + +void GroupSnapshot::encode(bufferlist& bl) const { + using ceph::encode; + ENCODE_START(1, 1, bl); + encode(id, bl); + encode(name, bl); + encode(state, bl); + encode(snaps, bl); + ENCODE_FINISH(bl); +} + +void GroupSnapshot::decode(bufferlist::const_iterator& it) { + using ceph::decode; + DECODE_START(1, it); + decode(id, it); + decode(name, it); + decode(state, it); + decode(snaps, it); + DECODE_FINISH(it); +} + +void GroupSnapshot::dump(Formatter *f) const { + f->dump_string("id", id); + f->dump_string("name", name); + f->dump_int("state", state); +} + +void GroupSnapshot::generate_test_instances(std::list<GroupSnapshot *> &o) { + o.push_back(new GroupSnapshot("10152ae8944a", "groupsnapshot1", GROUP_SNAPSHOT_STATE_INCOMPLETE)); + o.push_back(new GroupSnapshot("1018643c9869", "groupsnapshot2", GROUP_SNAPSHOT_STATE_COMPLETE)); +} +void TrashImageSpec::encode(bufferlist& bl) const { + ENCODE_START(2, 1, bl); + encode(source, bl); + encode(name, bl); + encode(deletion_time, bl); + encode(deferment_end_time, bl); + encode(state, bl); + ENCODE_FINISH(bl); +} + +void TrashImageSpec::decode(bufferlist::const_iterator &it) { + DECODE_START(2, it); + decode(source, it); + decode(name, it); + decode(deletion_time, it); + decode(deferment_end_time, it); + if (struct_v >= 2) { + decode(state, it); + } + DECODE_FINISH(it); +} + +void TrashImageSpec::dump(Formatter *f) const { + f->dump_stream("source") << source; + f->dump_string("name", name); + f->dump_unsigned("deletion_time", deletion_time); + f->dump_unsigned("deferment_end_time", deferment_end_time); +} + +void MirrorImageMap::encode(bufferlist &bl) const { + ENCODE_START(1, 1, bl); + encode(instance_id, bl); + encode(mapped_time, bl); + encode(data, bl); + ENCODE_FINISH(bl); +} + +void MirrorImageMap::decode(bufferlist::const_iterator &it) { + DECODE_START(1, it); + decode(instance_id, it); + decode(mapped_time, it); + decode(data, it); + DECODE_FINISH(it); +} + +void MirrorImageMap::dump(Formatter *f) const { + f->dump_string("instance_id", instance_id); + f->dump_stream("mapped_time") << mapped_time; + + std::stringstream data_ss; + data.hexdump(data_ss); + f->dump_string("data", data_ss.str()); +} + +void MirrorImageMap::generate_test_instances( + std::list<MirrorImageMap*> &o) { + bufferlist data; + data.append(std::string(128, '1')); + + o.push_back(new MirrorImageMap("uuid-123", utime_t(), data)); + o.push_back(new MirrorImageMap("uuid-abc", utime_t(), data)); +} + +bool MirrorImageMap::operator==(const MirrorImageMap &rhs) const { + return instance_id == rhs.instance_id && mapped_time == rhs.mapped_time && + data.contents_equal(rhs.data); +} + +bool MirrorImageMap::operator<(const MirrorImageMap &rhs) const { + return instance_id < rhs.instance_id || + (instance_id == rhs.instance_id && mapped_time < rhs.mapped_time); +} + +std::ostream& operator<<(std::ostream& os, + const MirrorImageMap &image_map) { + return os << "[" << "instance_id=" << image_map.instance_id << ", mapped_time=" + << image_map.mapped_time << "]"; +} + +std::ostream& operator<<(std::ostream& os, + const MigrationHeaderType& type) { + switch (type) { + case MIGRATION_HEADER_TYPE_SRC: + os << "source"; + break; + case MIGRATION_HEADER_TYPE_DST: + os << "destination"; + break; + default: + os << "unknown (" << static_cast<uint32_t>(type) << ")"; + break; + } + return os; +} + +std::ostream& operator<<(std::ostream& os, + const MigrationState& migration_state) { + switch (migration_state) { + case MIGRATION_STATE_ERROR: + os << "error"; + break; + case MIGRATION_STATE_PREPARING: + os << "preparing"; + break; + case MIGRATION_STATE_PREPARED: + os << "prepared"; + break; + case MIGRATION_STATE_EXECUTING: + os << "executing"; + break; + case MIGRATION_STATE_EXECUTED: + os << "executed"; + break; + case MIGRATION_STATE_ABORTING: + os << "aborting"; + break; + default: + os << "unknown (" << static_cast<uint32_t>(migration_state) << ")"; + break; + } + return os; +} + +void MigrationSpec::encode(bufferlist& bl) const { + uint8_t min_version = 1; + if (!source_spec.empty()) { + min_version = 3; + } + + ENCODE_START(3, min_version, bl); + encode(header_type, bl); + encode(pool_id, bl); + encode(pool_namespace, bl); + encode(image_name, bl); + encode(image_id, bl); + encode(snap_seqs, bl); + encode(overlap, bl); + encode(flatten, bl); + encode(mirroring, bl); + encode(state, bl); + encode(state_description, bl); + encode(static_cast<uint8_t>(mirror_image_mode), bl); + encode(source_spec, bl); + ENCODE_FINISH(bl); +} + +void MigrationSpec::decode(bufferlist::const_iterator& bl) { + DECODE_START(3, bl); + decode(header_type, bl); + decode(pool_id, bl); + decode(pool_namespace, bl); + decode(image_name, bl); + decode(image_id, bl); + decode(snap_seqs, bl); + decode(overlap, bl); + decode(flatten, bl); + decode(mirroring, bl); + decode(state, bl); + decode(state_description, bl); + if (struct_v >= 2) { + uint8_t int_mode; + decode(int_mode, bl); + mirror_image_mode = static_cast<MirrorImageMode>(int_mode); + } + if (struct_v >= 3) { + decode(source_spec, bl); + } + DECODE_FINISH(bl); +} + +std::ostream& operator<<(std::ostream& os, + const std::map<uint64_t, uint64_t>& snap_seqs) { + os << "{"; + size_t count = 0; + for (auto &it : snap_seqs) { + os << (count++ > 0 ? ", " : "") << "(" << it.first << ", " << it.second + << ")"; + } + os << "}"; + return os; +} + +void MigrationSpec::dump(Formatter *f) const { + f->dump_stream("header_type") << header_type; + if (header_type == MIGRATION_HEADER_TYPE_SRC || + source_spec.empty()) { + f->dump_int("pool_id", pool_id); + f->dump_string("pool_namespace", pool_namespace); + f->dump_string("image_name", image_name); + f->dump_string("image_id", image_id); + } else { + f->dump_string("source_spec", source_spec); + } + f->dump_stream("snap_seqs") << snap_seqs; + f->dump_unsigned("overlap", overlap); + f->dump_bool("mirroring", mirroring); + f->dump_stream("mirror_image_mode") << mirror_image_mode; +} + +void MigrationSpec::generate_test_instances(std::list<MigrationSpec*> &o) { + o.push_back(new MigrationSpec()); + o.push_back(new MigrationSpec(MIGRATION_HEADER_TYPE_SRC, 1, "ns", + "image_name", "image_id", "", {{1, 2}}, 123, + true, MIRROR_IMAGE_MODE_SNAPSHOT, true, + MIGRATION_STATE_PREPARED, "description")); + o.push_back(new MigrationSpec(MIGRATION_HEADER_TYPE_DST, -1, "", "", "", + "{\"format\": \"raw\"}", {{1, 2}}, 123, + true, MIRROR_IMAGE_MODE_SNAPSHOT, true, + MIGRATION_STATE_PREPARED, "description")); +} + +std::ostream& operator<<(std::ostream& os, + const MigrationSpec& migration_spec) { + os << "[" + << "header_type=" << migration_spec.header_type << ", "; + if (migration_spec.header_type == MIGRATION_HEADER_TYPE_SRC || + migration_spec.source_spec.empty()) { + os << "pool_id=" << migration_spec.pool_id << ", " + << "pool_namespace=" << migration_spec.pool_namespace << ", " + << "image_name=" << migration_spec.image_name << ", " + << "image_id=" << migration_spec.image_id << ", "; + } else { + os << "source_spec=" << migration_spec.source_spec << ", "; + } + os << "snap_seqs=" << migration_spec.snap_seqs << ", " + << "overlap=" << migration_spec.overlap << ", " + << "flatten=" << migration_spec.flatten << ", " + << "mirroring=" << migration_spec.mirroring << ", " + << "mirror_image_mode=" << migration_spec.mirror_image_mode << ", " + << "state=" << migration_spec.state << ", " + << "state_description=" << migration_spec.state_description << "]"; + return os; +} + +std::ostream& operator<<(std::ostream& os, const AssertSnapcSeqState& state) { + switch (state) { + case ASSERT_SNAPC_SEQ_GT_SNAPSET_SEQ: + os << "gt"; + break; + case ASSERT_SNAPC_SEQ_LE_SNAPSET_SEQ: + os << "le"; + break; + default: + os << "unknown (" << static_cast<uint32_t>(state) << ")"; + break; + } + return os; +} + +void sanitize_entity_inst(entity_inst_t* entity_inst) { + // make all addrs of type ANY because the type isn't what uniquely + // identifies them and clients and on-disk formats can be encoded + // with different backwards compatibility settings. + entity_inst->addr.set_type(entity_addr_t::TYPE_ANY); +} + +} // namespace rbd +} // namespace cls diff --git a/src/cls/rbd/cls_rbd_types.h b/src/cls/rbd/cls_rbd_types.h new file mode 100644 index 000000000..c8d2cb871 --- /dev/null +++ b/src/cls/rbd/cls_rbd_types.h @@ -0,0 +1,1038 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab + +#ifndef CEPH_CLS_RBD_TYPES_H +#define CEPH_CLS_RBD_TYPES_H + +#include "include/int_types.h" +#include "include/buffer.h" +#include "include/encoding.h" +#include "include/stringify.h" +#include "include/utime.h" +#include "msg/msg_types.h" +#include <iosfwd> +#include <string> +#include <set> +#include <variant> + +#define RBD_GROUP_REF "rbd_group_ref" + +namespace ceph { class Formatter; } + +namespace cls { +namespace rbd { + +static const uint32_t MAX_OBJECT_MAP_OBJECT_COUNT = 256000000; +static const std::string RBD_GROUP_IMAGE_KEY_PREFIX = "image_"; + +enum DirectoryState { + DIRECTORY_STATE_READY = 0, + DIRECTORY_STATE_ADD_DISABLED = 1 +}; + +inline void encode(DirectoryState state, ceph::buffer::list& bl, + uint64_t features=0) +{ + ceph::encode(static_cast<uint8_t>(state), bl); +} + +inline void decode(DirectoryState &state, ceph::buffer::list::const_iterator& it) +{ + uint8_t int_state; + ceph::decode(int_state, it); + state = static_cast<DirectoryState>(int_state); +} + +enum MirrorMode { + MIRROR_MODE_DISABLED = 0, + MIRROR_MODE_IMAGE = 1, + MIRROR_MODE_POOL = 2 +}; + +enum GroupImageLinkState { + GROUP_IMAGE_LINK_STATE_ATTACHED, + GROUP_IMAGE_LINK_STATE_INCOMPLETE +}; + +inline void encode(const GroupImageLinkState &state, ceph::buffer::list& bl, + uint64_t features=0) +{ + using ceph::encode; + encode(static_cast<uint8_t>(state), bl); +} + +inline void decode(GroupImageLinkState &state, ceph::buffer::list::const_iterator& it) +{ + uint8_t int_state; + using ceph::decode; + decode(int_state, it); + state = static_cast<GroupImageLinkState>(int_state); +} + +enum MirrorPeerDirection { + MIRROR_PEER_DIRECTION_RX = 0, + MIRROR_PEER_DIRECTION_TX = 1, + MIRROR_PEER_DIRECTION_RX_TX = 2 +}; + +std::ostream& operator<<(std::ostream& os, + MirrorPeerDirection mirror_peer_direction); + +struct MirrorPeer { + MirrorPeer() { + } + MirrorPeer(const std::string &uuid, + MirrorPeerDirection mirror_peer_direction, + const std::string& site_name, + const std::string& client_name, + const std::string& mirror_uuid) + : uuid(uuid), mirror_peer_direction(mirror_peer_direction), + site_name(site_name), client_name(client_name), + mirror_uuid(mirror_uuid) { + } + + std::string uuid; + + MirrorPeerDirection mirror_peer_direction = MIRROR_PEER_DIRECTION_RX_TX; + std::string site_name; + std::string client_name; // RX property + std::string mirror_uuid; + utime_t last_seen; + + inline bool is_valid() const { + switch (mirror_peer_direction) { + case MIRROR_PEER_DIRECTION_TX: + break; + case MIRROR_PEER_DIRECTION_RX: + case MIRROR_PEER_DIRECTION_RX_TX: + if (client_name.empty()) { + return false; + } + break; + default: + return false; + } + return (!uuid.empty() && !site_name.empty()); + } + + void encode(ceph::buffer::list &bl) const; + void decode(ceph::buffer::list::const_iterator &it); + void dump(ceph::Formatter *f) const; + + static void generate_test_instances(std::list<MirrorPeer*> &o); + + bool operator==(const MirrorPeer &rhs) const; + bool operator!=(const MirrorPeer &rhs) const { + return (!(*this == rhs)); + } +}; + +std::ostream& operator<<(std::ostream& os, const MirrorMode& mirror_mode); +std::ostream& operator<<(std::ostream& os, const MirrorPeer& peer); + +WRITE_CLASS_ENCODER(MirrorPeer); + +enum MirrorImageMode { + MIRROR_IMAGE_MODE_JOURNAL = 0, + MIRROR_IMAGE_MODE_SNAPSHOT = 1, +}; + +enum MirrorImageState { + MIRROR_IMAGE_STATE_DISABLING = 0, + MIRROR_IMAGE_STATE_ENABLED = 1, + MIRROR_IMAGE_STATE_DISABLED = 2, + MIRROR_IMAGE_STATE_CREATING = 3, +}; + +struct MirrorImage { + MirrorImage() { + } + MirrorImage(MirrorImageMode mode, const std::string &global_image_id, + MirrorImageState state) + : mode(mode), global_image_id(global_image_id), state(state) { + } + + MirrorImageMode mode = MIRROR_IMAGE_MODE_JOURNAL; + std::string global_image_id; + MirrorImageState state = MIRROR_IMAGE_STATE_DISABLING; + + void encode(ceph::buffer::list &bl) const; + void decode(ceph::buffer::list::const_iterator &it); + void dump(ceph::Formatter *f) const; + + static void generate_test_instances(std::list<MirrorImage*> &o); + + bool operator==(const MirrorImage &rhs) const; + bool operator<(const MirrorImage &rhs) const; +}; + +std::ostream& operator<<(std::ostream& os, const MirrorImageMode& mirror_mode); +std::ostream& operator<<(std::ostream& os, const MirrorImageState& mirror_state); +std::ostream& operator<<(std::ostream& os, const MirrorImage& mirror_image); + +WRITE_CLASS_ENCODER(MirrorImage); + +enum MirrorImageStatusState { + MIRROR_IMAGE_STATUS_STATE_UNKNOWN = 0, + MIRROR_IMAGE_STATUS_STATE_ERROR = 1, + MIRROR_IMAGE_STATUS_STATE_SYNCING = 2, + MIRROR_IMAGE_STATUS_STATE_STARTING_REPLAY = 3, + MIRROR_IMAGE_STATUS_STATE_REPLAYING = 4, + MIRROR_IMAGE_STATUS_STATE_STOPPING_REPLAY = 5, + MIRROR_IMAGE_STATUS_STATE_STOPPED = 6, +}; + +inline void encode(const MirrorImageStatusState &state, ceph::buffer::list& bl, + uint64_t features=0) +{ + using ceph::encode; + encode(static_cast<uint8_t>(state), bl); +} + +inline void decode(MirrorImageStatusState &state, + ceph::buffer::list::const_iterator& it) +{ + uint8_t int_state; + using ceph::decode; + decode(int_state, it); + state = static_cast<MirrorImageStatusState>(int_state); +} + +std::ostream& operator<<(std::ostream& os, const MirrorImageStatusState& state); + +struct MirrorImageSiteStatus { + static const std::string LOCAL_MIRROR_UUID; + + MirrorImageSiteStatus() {} + MirrorImageSiteStatus(const std::string& mirror_uuid, + MirrorImageStatusState state, + const std::string &description) + : mirror_uuid(mirror_uuid), state(state), description(description) { + } + + std::string mirror_uuid = LOCAL_MIRROR_UUID; + MirrorImageStatusState state = MIRROR_IMAGE_STATUS_STATE_UNKNOWN; + std::string description; + utime_t last_update; + bool up = false; + + void encode_meta(uint8_t version, ceph::buffer::list &bl) const; + void decode_meta(uint8_t version, ceph::buffer::list::const_iterator &it); + + void encode(ceph::buffer::list &bl) const; + void decode(ceph::buffer::list::const_iterator &it); + void dump(ceph::Formatter *f) const; + + std::string state_to_string() const; + + bool operator==(const MirrorImageSiteStatus &rhs) const; + + static void generate_test_instances(std::list<MirrorImageSiteStatus*> &o); +}; +WRITE_CLASS_ENCODER(MirrorImageSiteStatus); + +std::ostream& operator<<(std::ostream& os, const MirrorImageSiteStatus& status); + +struct MirrorImageSiteStatusOnDisk : cls::rbd::MirrorImageSiteStatus { + entity_inst_t origin; + + MirrorImageSiteStatusOnDisk() { + } + MirrorImageSiteStatusOnDisk(const cls::rbd::MirrorImageSiteStatus &status) : + cls::rbd::MirrorImageSiteStatus(status) { + } + + void encode_meta(ceph::buffer::list &bl, uint64_t features) const; + void decode_meta(ceph::buffer::list::const_iterator &it); + + void encode(ceph::buffer::list &bl, uint64_t features) const; + void decode(ceph::buffer::list::const_iterator &it); + + static void generate_test_instances( + std::list<MirrorImageSiteStatusOnDisk*> &o); +}; +WRITE_CLASS_ENCODER_FEATURES(MirrorImageSiteStatusOnDisk) + +struct MirrorImageStatus { + typedef std::list<MirrorImageSiteStatus> MirrorImageSiteStatuses; + + MirrorImageStatus() {} + MirrorImageStatus(const MirrorImageSiteStatuses& statuses) + : mirror_image_site_statuses(statuses) { + } + + MirrorImageSiteStatuses mirror_image_site_statuses; + + int get_local_mirror_image_site_status(MirrorImageSiteStatus* status) const; + + void encode(ceph::buffer::list &bl) const; + void decode(ceph::buffer::list::const_iterator &it); + void dump(ceph::Formatter *f) const; + + bool operator==(const MirrorImageStatus& rhs) const; + + static void generate_test_instances(std::list<MirrorImageStatus*> &o); +}; +WRITE_CLASS_ENCODER(MirrorImageStatus); + +std::ostream& operator<<(std::ostream& os, const MirrorImageStatus& status); + +struct ParentImageSpec { + int64_t pool_id = -1; + std::string pool_namespace; + std::string image_id; + snapid_t snap_id = CEPH_NOSNAP; + + ParentImageSpec() { + } + ParentImageSpec(int64_t pool_id, const std::string& pool_namespace, + const std::string& image_id, snapid_t snap_id) + : pool_id(pool_id), pool_namespace(pool_namespace), image_id(image_id), + snap_id(snap_id) { + } + + bool exists() const { + return (pool_id >= 0 && !image_id.empty() && snap_id != CEPH_NOSNAP); + } + + bool operator==(const ParentImageSpec& rhs) const { + return ((pool_id == rhs.pool_id) && + (pool_namespace == rhs.pool_namespace) && + (image_id == rhs.image_id) && + (snap_id == rhs.snap_id)); + } + + bool operator!=(const ParentImageSpec& rhs) const { + return !(*this == rhs); + } + + void encode(ceph::buffer::list &bl) const; + void decode(ceph::buffer::list::const_iterator &it); + void dump(ceph::Formatter *f) const; + + static void generate_test_instances(std::list<ParentImageSpec*> &o); +}; + +WRITE_CLASS_ENCODER(ParentImageSpec); + +std::ostream& operator<<(std::ostream& os, const ParentImageSpec& rhs); + +struct ChildImageSpec { + int64_t pool_id = -1; + std::string pool_namespace; + std::string image_id; + + ChildImageSpec() {} + ChildImageSpec(int64_t pool_id, const std::string& pool_namespace, + const std::string& image_id) + : pool_id(pool_id), pool_namespace(pool_namespace), image_id(image_id) { + } + + void encode(ceph::buffer::list &bl) const; + void decode(ceph::buffer::list::const_iterator &it); + void dump(ceph::Formatter *f) const; + + static void generate_test_instances(std::list<ChildImageSpec*> &o); + + inline bool operator==(const ChildImageSpec& rhs) const { + return (pool_id == rhs.pool_id && + pool_namespace == rhs.pool_namespace && + image_id == rhs.image_id); + } + inline bool operator<(const ChildImageSpec& rhs) const { + if (pool_id != rhs.pool_id) { + return pool_id < rhs.pool_id; + } + if (pool_namespace != rhs.pool_namespace) { + return pool_namespace < rhs.pool_namespace; + } + return image_id < rhs.image_id; + } +}; +WRITE_CLASS_ENCODER(ChildImageSpec); + +std::ostream& operator<<(std::ostream& os, const ChildImageSpec& rhs); + +typedef std::set<ChildImageSpec> ChildImageSpecs; + +struct GroupImageSpec { + GroupImageSpec() {} + + GroupImageSpec(const std::string &image_id, int64_t pool_id) + : image_id(image_id), pool_id(pool_id) {} + + static int from_key(const std::string &image_key, GroupImageSpec *spec); + + std::string image_id; + int64_t pool_id = -1; + + void encode(ceph::buffer::list &bl) const; + void decode(ceph::buffer::list::const_iterator &it); + void dump(ceph::Formatter *f) const; + + static void generate_test_instances(std::list<GroupImageSpec*> &o); + + std::string image_key(); + +}; +WRITE_CLASS_ENCODER(GroupImageSpec); + +struct GroupImageStatus { + GroupImageStatus() {} + GroupImageStatus(const std::string &image_id, + int64_t pool_id, + GroupImageLinkState state) + : spec(image_id, pool_id), state(state) {} + + GroupImageStatus(GroupImageSpec spec, + GroupImageLinkState state) + : spec(spec), state(state) {} + + GroupImageSpec spec; + GroupImageLinkState state = GROUP_IMAGE_LINK_STATE_INCOMPLETE; + + void encode(ceph::buffer::list &bl) const; + void decode(ceph::buffer::list::const_iterator &it); + void dump(ceph::Formatter *f) const; + + static void generate_test_instances(std::list<GroupImageStatus*> &o); + + std::string state_to_string() const; +}; + +WRITE_CLASS_ENCODER(GroupImageStatus); + +struct GroupSpec { + GroupSpec() {} + GroupSpec(const std::string &group_id, int64_t pool_id) + : group_id(group_id), pool_id(pool_id) {} + + std::string group_id; + int64_t pool_id = -1; + + void encode(ceph::buffer::list &bl) const; + void decode(ceph::buffer::list::const_iterator &it); + void dump(ceph::Formatter *f) const; + bool is_valid() const; + + static void generate_test_instances(std::list<GroupSpec *> &o); +}; + +WRITE_CLASS_ENCODER(GroupSpec); + +enum SnapshotNamespaceType { + SNAPSHOT_NAMESPACE_TYPE_USER = 0, + SNAPSHOT_NAMESPACE_TYPE_GROUP = 1, + SNAPSHOT_NAMESPACE_TYPE_TRASH = 2, + SNAPSHOT_NAMESPACE_TYPE_MIRROR = 3, +}; + +struct UserSnapshotNamespace { + static const SnapshotNamespaceType SNAPSHOT_NAMESPACE_TYPE = + SNAPSHOT_NAMESPACE_TYPE_USER; + + UserSnapshotNamespace() {} + + void encode(ceph::buffer::list& bl) const {} + void decode(ceph::buffer::list::const_iterator& it) {} + + void dump(ceph::Formatter *f) const {} + + inline bool operator==(const UserSnapshotNamespace& usn) const { + return true; + } + + inline bool operator!=(const UserSnapshotNamespace& usn) const { + return false; + } + + inline bool operator<(const UserSnapshotNamespace& usn) const { + return false; + } +}; + +struct GroupSnapshotNamespace { + static const SnapshotNamespaceType SNAPSHOT_NAMESPACE_TYPE = + SNAPSHOT_NAMESPACE_TYPE_GROUP; + + GroupSnapshotNamespace() {} + + GroupSnapshotNamespace(int64_t _group_pool, + const std::string &_group_id, + const std::string &_group_snapshot_id) + : group_id(_group_id), group_pool(_group_pool), + group_snapshot_id(_group_snapshot_id) {} + + std::string group_id; + int64_t group_pool = 0; + std::string group_snapshot_id; + + void encode(ceph::buffer::list& bl) const; + void decode(ceph::buffer::list::const_iterator& it); + + void dump(ceph::Formatter *f) const; + + inline bool operator==(const GroupSnapshotNamespace& gsn) const { + return group_pool == gsn.group_pool && + group_id == gsn.group_id && + group_snapshot_id == gsn.group_snapshot_id; + } + + inline bool operator!=(const GroupSnapshotNamespace& gsn) const { + return !operator==(gsn); + } + + inline bool operator<(const GroupSnapshotNamespace& gsn) const { + if (group_pool != gsn.group_pool) { + return group_pool < gsn.group_pool; + } + if (group_id != gsn.group_id) { + return group_id < gsn.group_id; + } + return group_snapshot_id < gsn.group_snapshot_id; + } +}; + +struct TrashSnapshotNamespace { + static const SnapshotNamespaceType SNAPSHOT_NAMESPACE_TYPE = + SNAPSHOT_NAMESPACE_TYPE_TRASH; + + std::string original_name; + SnapshotNamespaceType original_snapshot_namespace_type = + SNAPSHOT_NAMESPACE_TYPE_USER; + + TrashSnapshotNamespace() {} + TrashSnapshotNamespace(SnapshotNamespaceType original_snapshot_namespace_type, + const std::string& original_name) + : original_name(original_name), + original_snapshot_namespace_type(original_snapshot_namespace_type) {} + + void encode(ceph::buffer::list& bl) const; + void decode(ceph::buffer::list::const_iterator& it); + void dump(ceph::Formatter *f) const; + + inline bool operator==(const TrashSnapshotNamespace& usn) const { + return true; + } + inline bool operator!=(const TrashSnapshotNamespace& usn) const { + return false; + } + inline bool operator<(const TrashSnapshotNamespace& usn) const { + return false; + } +}; + +enum MirrorSnapshotState { + MIRROR_SNAPSHOT_STATE_PRIMARY = 0, + MIRROR_SNAPSHOT_STATE_PRIMARY_DEMOTED = 1, + MIRROR_SNAPSHOT_STATE_NON_PRIMARY = 2, + MIRROR_SNAPSHOT_STATE_NON_PRIMARY_DEMOTED = 3, +}; + +inline void encode(const MirrorSnapshotState &state, ceph::buffer::list& bl, + uint64_t features=0) { + using ceph::encode; + encode(static_cast<uint8_t>(state), bl); +} + +inline void decode(MirrorSnapshotState &state, ceph::buffer::list::const_iterator& it) { + using ceph::decode; + uint8_t int_state; + decode(int_state, it); + state = static_cast<MirrorSnapshotState>(int_state); +} + +std::ostream& operator<<(std::ostream& os, MirrorSnapshotState type); + +typedef std::map<uint64_t, uint64_t> SnapSeqs; + +struct MirrorSnapshotNamespace { + static const SnapshotNamespaceType SNAPSHOT_NAMESPACE_TYPE = + SNAPSHOT_NAMESPACE_TYPE_MIRROR; + + MirrorSnapshotState state = MIRROR_SNAPSHOT_STATE_NON_PRIMARY; + bool complete = false; + std::set<std::string> mirror_peer_uuids; + + std::string primary_mirror_uuid; + union { + snapid_t primary_snap_id = CEPH_NOSNAP; + snapid_t clean_since_snap_id; + }; + uint64_t last_copied_object_number = 0; + SnapSeqs snap_seqs; + + MirrorSnapshotNamespace() { + } + MirrorSnapshotNamespace(MirrorSnapshotState state, + const std::set<std::string> &mirror_peer_uuids, + const std::string& primary_mirror_uuid, + snapid_t primary_snap_id) + : state(state), mirror_peer_uuids(mirror_peer_uuids), + primary_mirror_uuid(primary_mirror_uuid), + primary_snap_id(primary_snap_id) { + } + MirrorSnapshotNamespace(MirrorSnapshotState state, + const std::set<std::string> &mirror_peer_uuids, + const std::string& primary_mirror_uuid, + snapid_t primary_snap_id, + bool complete, + uint64_t last_copied_object_number, + const SnapSeqs& snap_seqs) + : state(state), complete(complete), mirror_peer_uuids(mirror_peer_uuids), + primary_mirror_uuid(primary_mirror_uuid), + primary_snap_id(primary_snap_id), + last_copied_object_number(last_copied_object_number), + snap_seqs(snap_seqs) { + } + + inline bool is_primary() const { + return (state == MIRROR_SNAPSHOT_STATE_PRIMARY || + state == MIRROR_SNAPSHOT_STATE_PRIMARY_DEMOTED); + } + + inline bool is_non_primary() const { + return (state == MIRROR_SNAPSHOT_STATE_NON_PRIMARY || + state == MIRROR_SNAPSHOT_STATE_NON_PRIMARY_DEMOTED); + } + + inline bool is_demoted() const { + return (state == MIRROR_SNAPSHOT_STATE_PRIMARY_DEMOTED || + state == MIRROR_SNAPSHOT_STATE_NON_PRIMARY_DEMOTED); + } + + inline bool is_orphan() const { + return (is_non_primary() && + primary_mirror_uuid.empty() && + primary_snap_id == CEPH_NOSNAP); + } + + void encode(ceph::buffer::list& bl) const; + void decode(ceph::buffer::list::const_iterator& it); + + void dump(ceph::Formatter *f) const; + + inline bool operator==(const MirrorSnapshotNamespace& rhs) const { + return state == rhs.state && + complete == rhs.complete && + mirror_peer_uuids == rhs.mirror_peer_uuids && + primary_mirror_uuid == rhs.primary_mirror_uuid && + primary_snap_id == rhs.primary_snap_id && + last_copied_object_number == rhs.last_copied_object_number && + snap_seqs == rhs.snap_seqs; + } + + inline bool operator!=(const MirrorSnapshotNamespace& rhs) const { + return !operator==(rhs); + } + + inline bool operator<(const MirrorSnapshotNamespace& rhs) const { + if (state != rhs.state) { + return state < rhs.state; + } else if (complete != rhs.complete) { + return complete < rhs.complete; + } else if (mirror_peer_uuids != rhs.mirror_peer_uuids) { + return mirror_peer_uuids < rhs.mirror_peer_uuids; + } else if (primary_mirror_uuid != rhs.primary_mirror_uuid) { + return primary_mirror_uuid < rhs.primary_mirror_uuid; + } else if (primary_snap_id != rhs.primary_snap_id) { + return primary_snap_id < rhs.primary_snap_id; + } else if (last_copied_object_number != rhs.last_copied_object_number) { + return last_copied_object_number < rhs.last_copied_object_number; + } else { + return snap_seqs < rhs.snap_seqs; + } + } +}; + +struct UnknownSnapshotNamespace { + static const SnapshotNamespaceType SNAPSHOT_NAMESPACE_TYPE = + static_cast<SnapshotNamespaceType>(-1); + + UnknownSnapshotNamespace() {} + + void encode(ceph::buffer::list& bl) const {} + void decode(ceph::buffer::list::const_iterator& it) {} + void dump(ceph::Formatter *f) const {} + + inline bool operator==(const UnknownSnapshotNamespace& gsn) const { + return true; + } + + inline bool operator!=(const UnknownSnapshotNamespace& gsn) const { + return false; + } + + inline bool operator<(const UnknownSnapshotNamespace& gsn) const { + return false; + } +}; + +std::ostream& operator<<(std::ostream& os, const SnapshotNamespaceType& type); +std::ostream& operator<<(std::ostream& os, const UserSnapshotNamespace& ns); +std::ostream& operator<<(std::ostream& os, const GroupSnapshotNamespace& ns); +std::ostream& operator<<(std::ostream& os, const TrashSnapshotNamespace& ns); +std::ostream& operator<<(std::ostream& os, const MirrorSnapshotNamespace& ns); +std::ostream& operator<<(std::ostream& os, const UnknownSnapshotNamespace& ns); + +typedef std::variant<UserSnapshotNamespace, + GroupSnapshotNamespace, + TrashSnapshotNamespace, + MirrorSnapshotNamespace, + UnknownSnapshotNamespace> SnapshotNamespaceVariant; + +struct SnapshotNamespace : public SnapshotNamespaceVariant { + using SnapshotNamespaceVariant::SnapshotNamespaceVariant; + + void encode(ceph::buffer::list& bl) const; + void decode(ceph::buffer::list::const_iterator& it); + void dump(ceph::Formatter *f) const; + + template <typename F> + decltype(auto) visit(F&& f) const & { + return std::visit(std::forward<F>(f), static_cast<const SnapshotNamespaceVariant&>(*this)); + } + template <typename F> + decltype(auto) visit(F&& f) & { + return std::visit(std::forward<F>(f), static_cast<SnapshotNamespaceVariant&>(*this)); + } + static void generate_test_instances(std::list<SnapshotNamespace*> &o); +}; +WRITE_CLASS_ENCODER(SnapshotNamespace); + +std::ostream& operator<<(std::ostream& os, const SnapshotNamespace& ns); + +SnapshotNamespaceType get_snap_namespace_type( + const SnapshotNamespace& snapshot_namespace); + +struct SnapshotInfo { + snapid_t id = CEPH_NOSNAP; + cls::rbd::SnapshotNamespace snapshot_namespace = {UserSnapshotNamespace{}}; + std::string name; + uint64_t image_size = 0; + utime_t timestamp; + uint32_t child_count = 0; + + SnapshotInfo() { + } + SnapshotInfo(snapid_t id, + const cls::rbd::SnapshotNamespace& snapshot_namespace, + const std::string& name, uint64_t image_size, + const utime_t& timestamp, uint32_t child_count) + : id(id), snapshot_namespace(snapshot_namespace), + name(name), image_size(image_size), timestamp(timestamp), + child_count(child_count) { + } + + void encode(ceph::buffer::list& bl) const; + void decode(ceph::buffer::list::const_iterator& it); + void dump(ceph::Formatter *f) const; + + static void generate_test_instances(std::list<SnapshotInfo*> &o); +}; +WRITE_CLASS_ENCODER(SnapshotInfo); + +enum GroupSnapshotState { + GROUP_SNAPSHOT_STATE_INCOMPLETE = 0, + GROUP_SNAPSHOT_STATE_COMPLETE = 1, +}; + +inline void encode(const GroupSnapshotState &state, ceph::buffer::list& bl, uint64_t features=0) +{ + using ceph::encode; + encode(static_cast<uint8_t>(state), bl); +} + +inline void decode(GroupSnapshotState &state, ceph::buffer::list::const_iterator& it) +{ + using ceph::decode; + uint8_t int_state; + decode(int_state, it); + state = static_cast<GroupSnapshotState>(int_state); +} + +struct ImageSnapshotSpec { + int64_t pool; + std::string image_id; + snapid_t snap_id; + + ImageSnapshotSpec() {} + ImageSnapshotSpec(int64_t _pool, + std::string _image_id, + snapid_t _snap_id) : pool(_pool), + image_id(_image_id), + snap_id(_snap_id) {} + + void encode(ceph::buffer::list& bl) const; + void decode(ceph::buffer::list::const_iterator& it); + + void dump(ceph::Formatter *f) const; + + static void generate_test_instances(std::list<ImageSnapshotSpec *> &o); +}; +WRITE_CLASS_ENCODER(ImageSnapshotSpec); + +struct GroupSnapshot { + std::string id; + std::string name; + GroupSnapshotState state = GROUP_SNAPSHOT_STATE_INCOMPLETE; + + GroupSnapshot() {} + GroupSnapshot(std::string _id, + std::string _name, + GroupSnapshotState _state) : id(_id), + name(_name), + state(_state) {} + + std::vector<ImageSnapshotSpec> snaps; + + void encode(ceph::buffer::list& bl) const; + void decode(ceph::buffer::list::const_iterator& it); + void dump(ceph::Formatter *f) const; + + static void generate_test_instances(std::list<GroupSnapshot *> &o); +}; +WRITE_CLASS_ENCODER(GroupSnapshot); +enum TrashImageSource { + TRASH_IMAGE_SOURCE_USER = 0, + TRASH_IMAGE_SOURCE_MIRRORING = 1, + TRASH_IMAGE_SOURCE_MIGRATION = 2, + TRASH_IMAGE_SOURCE_REMOVING = 3, + TRASH_IMAGE_SOURCE_USER_PARENT= 4, +}; + +inline std::ostream& operator<<(std::ostream& os, + const TrashImageSource& source) { + switch (source) { + case TRASH_IMAGE_SOURCE_USER: + os << "user"; + break; + case TRASH_IMAGE_SOURCE_MIRRORING: + os << "mirroring"; + break; + case TRASH_IMAGE_SOURCE_MIGRATION: + os << "migration"; + break; + case TRASH_IMAGE_SOURCE_REMOVING: + os << "removing"; + break; + default: + os << "unknown (" << static_cast<uint32_t>(source) << ")"; + break; + } + return os; +} + +inline void encode(const TrashImageSource &source, ceph::buffer::list& bl, + uint64_t features=0) +{ + using ceph::encode; + encode(static_cast<uint8_t>(source), bl); +} + +inline void decode(TrashImageSource &source, ceph::buffer::list::const_iterator& it) +{ + uint8_t int_source; + using ceph::decode; + decode(int_source, it); + source = static_cast<TrashImageSource>(int_source); +} + +enum TrashImageState { + TRASH_IMAGE_STATE_NORMAL = 0, + TRASH_IMAGE_STATE_MOVING = 1, + TRASH_IMAGE_STATE_REMOVING = 2, + TRASH_IMAGE_STATE_RESTORING = 3 +}; + +inline void encode(const TrashImageState &state, ceph::buffer::list &bl) +{ + using ceph::encode; + encode(static_cast<uint8_t>(state), bl); +} + +inline void decode(TrashImageState &state, ceph::buffer::list::const_iterator &it) +{ + uint8_t int_state; + using ceph::decode; + decode(int_state, it); + state = static_cast<TrashImageState>(int_state); +} + +struct TrashImageSpec { + TrashImageSource source = TRASH_IMAGE_SOURCE_USER; + std::string name; + utime_t deletion_time; // time of deletion + utime_t deferment_end_time; + TrashImageState state = TRASH_IMAGE_STATE_NORMAL; + + TrashImageSpec() {} + TrashImageSpec(TrashImageSource source, const std::string &name, + const utime_t& deletion_time, + const utime_t& deferment_end_time) + : source(source), name(name), deletion_time(deletion_time), + deferment_end_time(deferment_end_time) { + } + + void encode(ceph::buffer::list &bl) const; + void decode(ceph::buffer::list::const_iterator& it); + void dump(ceph::Formatter *f) const; + + inline bool operator==(const TrashImageSpec& rhs) const { + return (source == rhs.source && + name == rhs.name && + deletion_time == rhs.deletion_time && + deferment_end_time == rhs.deferment_end_time); + } +}; + +WRITE_CLASS_ENCODER(TrashImageSpec); + +struct MirrorImageMap { + MirrorImageMap() { + } + + MirrorImageMap(const std::string &instance_id, utime_t mapped_time, + const ceph::buffer::list &data) + : instance_id(instance_id), + mapped_time(mapped_time), + data(data) { + } + + std::string instance_id; + utime_t mapped_time; + ceph::buffer::list data; + + void encode(ceph::buffer::list &bl) const; + void decode(ceph::buffer::list::const_iterator &it); + void dump(ceph::Formatter *f) const; + + static void generate_test_instances(std::list<MirrorImageMap*> &o); + + bool operator==(const MirrorImageMap &rhs) const; + bool operator<(const MirrorImageMap &rhs) const; +}; + +std::ostream& operator<<(std::ostream& os, const MirrorImageMap &image_map); + +WRITE_CLASS_ENCODER(MirrorImageMap); + +enum MigrationHeaderType { + MIGRATION_HEADER_TYPE_SRC = 1, + MIGRATION_HEADER_TYPE_DST = 2, +}; + +inline void encode(const MigrationHeaderType &type, ceph::buffer::list& bl) { + using ceph::encode; + encode(static_cast<uint8_t>(type), bl); +} + +inline void decode(MigrationHeaderType &type, ceph::buffer::list::const_iterator& it) { + uint8_t int_type; + using ceph::decode; + decode(int_type, it); + type = static_cast<MigrationHeaderType>(int_type); +} + +enum MigrationState { + MIGRATION_STATE_ERROR = 0, + MIGRATION_STATE_PREPARING = 1, + MIGRATION_STATE_PREPARED = 2, + MIGRATION_STATE_EXECUTING = 3, + MIGRATION_STATE_EXECUTED = 4, + MIGRATION_STATE_ABORTING = 5, +}; + +inline void encode(const MigrationState &state, ceph::buffer::list& bl) { + using ceph::encode; + encode(static_cast<uint8_t>(state), bl); +} + +inline void decode(MigrationState &state, ceph::buffer::list::const_iterator& it) { + uint8_t int_state; + using ceph::decode; + decode(int_state, it); + state = static_cast<MigrationState>(int_state); +} + +std::ostream& operator<<(std::ostream& os, + const MigrationState& migration_state); + +struct MigrationSpec { + MigrationHeaderType header_type = MIGRATION_HEADER_TYPE_SRC; + int64_t pool_id = -1; + std::string pool_namespace; + std::string image_name; + std::string image_id; + std::string source_spec; + std::map<uint64_t, uint64_t> snap_seqs; + uint64_t overlap = 0; + bool flatten = false; + bool mirroring = false; + MirrorImageMode mirror_image_mode = MIRROR_IMAGE_MODE_JOURNAL; + MigrationState state = MIGRATION_STATE_ERROR; + std::string state_description; + + MigrationSpec() { + } + MigrationSpec(MigrationHeaderType header_type, int64_t pool_id, + const std::string& pool_namespace, + const std::string& image_name, const std::string &image_id, + const std::string& source_spec, + const std::map<uint64_t, uint64_t> &snap_seqs, uint64_t overlap, + bool mirroring, MirrorImageMode mirror_image_mode, bool flatten, + MigrationState state, const std::string &state_description) + : header_type(header_type), pool_id(pool_id), + pool_namespace(pool_namespace), image_name(image_name), + image_id(image_id), source_spec(source_spec), snap_seqs(snap_seqs), + overlap(overlap), flatten(flatten), mirroring(mirroring), + mirror_image_mode(mirror_image_mode), state(state), + state_description(state_description) { + } + + void encode(ceph::buffer::list &bl) const; + void decode(ceph::buffer::list::const_iterator& it); + void dump(ceph::Formatter *f) const; + + static void generate_test_instances(std::list<MigrationSpec*> &o); + + inline bool operator==(const MigrationSpec& ms) const { + return header_type == ms.header_type && pool_id == ms.pool_id && + pool_namespace == ms.pool_namespace && image_name == ms.image_name && + image_id == ms.image_id && source_spec == ms.source_spec && + snap_seqs == ms.snap_seqs && overlap == ms.overlap && + flatten == ms.flatten && mirroring == ms.mirroring && + mirror_image_mode == ms.mirror_image_mode && state == ms.state && + state_description == ms.state_description; + } +}; + +std::ostream& operator<<(std::ostream& os, const MigrationSpec& migration_spec); + +WRITE_CLASS_ENCODER(MigrationSpec); + +enum AssertSnapcSeqState { + ASSERT_SNAPC_SEQ_GT_SNAPSET_SEQ = 0, + ASSERT_SNAPC_SEQ_LE_SNAPSET_SEQ = 1, +}; + +inline void encode(const AssertSnapcSeqState &state, ceph::buffer::list& bl) { + using ceph::encode; + encode(static_cast<uint8_t>(state), bl); +} + +inline void decode(AssertSnapcSeqState &state, ceph::buffer::list::const_iterator& it) { + uint8_t int_state; + using ceph::decode; + decode(int_state, it); + state = static_cast<AssertSnapcSeqState>(int_state); +} + +std::ostream& operator<<(std::ostream& os, const AssertSnapcSeqState& state); + +void sanitize_entity_inst(entity_inst_t* entity_inst); + +} // namespace rbd +} // namespace cls + +#endif // CEPH_CLS_RBD_TYPES_H diff --git a/src/cls/refcount/cls_refcount.cc b/src/cls/refcount/cls_refcount.cc new file mode 100644 index 000000000..781d967f9 --- /dev/null +++ b/src/cls/refcount/cls_refcount.cc @@ -0,0 +1,217 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab + +#include <errno.h> + +#include "objclass/objclass.h" +#include "cls/refcount/cls_refcount_ops.h" + +#include "include/compat.h" + +using std::string; + +using ceph::bufferlist; + +CLS_VER(1,0) +CLS_NAME(refcount) + +#define REFCOUNT_ATTR "refcount" + +static string wildcard_tag; + +static int read_refcount(cls_method_context_t hctx, bool implicit_ref, obj_refcount *objr) +{ + bufferlist bl; + objr->refs.clear(); + int ret = cls_cxx_getxattr(hctx, REFCOUNT_ATTR, &bl); + if (ret == -ENODATA) { + if (implicit_ref) { + objr->refs[wildcard_tag] = true; + } + return 0; + } + if (ret < 0) + return ret; + + try { + auto iter = bl.cbegin(); + decode(*objr, iter); + } catch (ceph::buffer::error& err) { + CLS_LOG(0, "ERROR: read_refcount(): failed to decode refcount entry\n"); + return -EIO; + } + + return 0; +} + +static int set_refcount(cls_method_context_t hctx, const struct obj_refcount& objr) +{ + bufferlist bl; + + encode(objr, bl); + + int ret = cls_cxx_setxattr(hctx, REFCOUNT_ATTR, &bl); + if (ret < 0) + return ret; + + return 0; +} + +static int cls_rc_refcount_get(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + auto in_iter = in->cbegin(); + + cls_refcount_get_op op; + try { + decode(op, in_iter); + } catch (ceph::buffer::error& err) { + CLS_LOG(1, "ERROR: cls_rc_refcount_get(): failed to decode entry\n"); + return -EINVAL; + } + + obj_refcount objr; + int ret = read_refcount(hctx, op.implicit_ref, &objr); + if (ret < 0) + return ret; + + CLS_LOG(10, "cls_rc_refcount_get() tag=%s\n", op.tag.c_str()); + + objr.refs[op.tag] = true; + + ret = set_refcount(hctx, objr); + if (ret < 0) + return ret; + + return 0; +} + +static int cls_rc_refcount_put(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + auto in_iter = in->cbegin(); + + cls_refcount_put_op op; + try { + decode(op, in_iter); + } catch (ceph::buffer::error& err) { + CLS_LOG(1, "ERROR: cls_rc_refcount_put(): failed to decode entry\n"); + return -EINVAL; + } + + obj_refcount objr; + int ret = read_refcount(hctx, op.implicit_ref, &objr); + if (ret < 0) + return ret; + + if (objr.refs.empty()) {// shouldn't happen! + CLS_LOG(0, "ERROR: cls_rc_refcount_put() was called without any references!\n"); + return -EINVAL; + } + + CLS_LOG(10, "cls_rc_refcount_put() tag=%s\n", op.tag.c_str()); + + bool found = false; + auto iter = objr.refs.find(op.tag); + if (iter != objr.refs.end()) { + found = true; + } else if (op.implicit_ref) { + iter = objr.refs.find(wildcard_tag); + if (iter != objr.refs.end()) { + found = true; + } + } + + if (!found || + objr.retired_refs.find(op.tag) != objr.retired_refs.end()) + return 0; + + objr.retired_refs.insert(op.tag); + objr.refs.erase(iter); + + if (objr.refs.empty()) { + return cls_cxx_remove(hctx); + } + + ret = set_refcount(hctx, objr); + if (ret < 0) + return ret; + + return 0; +} + +static int cls_rc_refcount_set(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + auto in_iter = in->cbegin(); + + cls_refcount_set_op op; + try { + decode(op, in_iter); + } catch (ceph::buffer::error& err) { + CLS_LOG(1, "ERROR: cls_refcount_set(): failed to decode entry\n"); + return -EINVAL; + } + + if (!op.refs.size()) { + return cls_cxx_remove(hctx); + } + + obj_refcount objr; + for (auto iter = op.refs.begin(); iter != op.refs.end(); ++iter) { + objr.refs[*iter] = true; + } + + int ret = set_refcount(hctx, objr); + if (ret < 0) + return ret; + + return 0; +} + +static int cls_rc_refcount_read(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + auto in_iter = in->cbegin(); + + cls_refcount_read_op op; + try { + decode(op, in_iter); + } catch (ceph::buffer::error& err) { + CLS_LOG(1, "ERROR: cls_rc_refcount_read(): failed to decode entry\n"); + return -EINVAL; + } + + obj_refcount objr; + + cls_refcount_read_ret read_ret; + int ret = read_refcount(hctx, op.implicit_ref, &objr); + if (ret < 0) + return ret; + + for (auto iter = objr.refs.begin(); iter != objr.refs.end(); ++iter) { + read_ret.refs.push_back(iter->first); + } + + encode(read_ret, *out); + + return 0; +} + +CLS_INIT(refcount) +{ + CLS_LOG(1, "Loaded refcount class!"); + + cls_handle_t h_class; + cls_method_handle_t h_refcount_get; + cls_method_handle_t h_refcount_put; + cls_method_handle_t h_refcount_set; + cls_method_handle_t h_refcount_read; + + cls_register("refcount", &h_class); + + /* refcount */ + cls_register_cxx_method(h_class, "get", CLS_METHOD_RD | CLS_METHOD_WR, cls_rc_refcount_get, &h_refcount_get); + cls_register_cxx_method(h_class, "put", CLS_METHOD_RD | CLS_METHOD_WR, cls_rc_refcount_put, &h_refcount_put); + cls_register_cxx_method(h_class, "set", CLS_METHOD_RD | CLS_METHOD_WR, cls_rc_refcount_set, &h_refcount_set); + cls_register_cxx_method(h_class, "read", CLS_METHOD_RD, cls_rc_refcount_read, &h_refcount_read); + + return; +} + diff --git a/src/cls/refcount/cls_refcount_client.cc b/src/cls/refcount/cls_refcount_client.cc new file mode 100644 index 000000000..f65a0fe32 --- /dev/null +++ b/src/cls/refcount/cls_refcount_client.cc @@ -0,0 +1,65 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab + +#include <errno.h> + +#include "cls/refcount/cls_refcount_client.h" +#include "cls/refcount/cls_refcount_ops.h" +#include "include/rados/librados.hpp" + +using std::list; +using std::string; + +using ceph::bufferlist; + +void cls_refcount_get(librados::ObjectWriteOperation& op, const string& tag, bool implicit_ref) +{ + bufferlist in; + cls_refcount_get_op call; + call.tag = tag; + call.implicit_ref = implicit_ref; + encode(call, in); + op.exec("refcount", "get", in); +} + +void cls_refcount_put(librados::ObjectWriteOperation& op, const string& tag, bool implicit_ref) +{ + bufferlist in; + cls_refcount_put_op call; + call.tag = tag; + call.implicit_ref = implicit_ref; + encode(call, in); + op.exec("refcount", "put", in); +} + +void cls_refcount_set(librados::ObjectWriteOperation& op, list<string>& refs) +{ + bufferlist in; + cls_refcount_set_op call; + call.refs = refs; + encode(call, in); + op.exec("refcount", "set", in); +} + +int cls_refcount_read(librados::IoCtx& io_ctx, string& oid, list<string> *refs, bool implicit_ref) +{ + bufferlist in, out; + cls_refcount_read_op call; + call.implicit_ref = implicit_ref; + encode(call, in); + int r = io_ctx.exec(oid, "refcount", "read", in, out); + if (r < 0) + return r; + + cls_refcount_read_ret ret; + try { + auto iter = out.cbegin(); + decode(ret, iter); + } catch (ceph::buffer::error& err) { + return -EIO; + } + + *refs = ret.refs; + + return r; +} diff --git a/src/cls/refcount/cls_refcount_client.h b/src/cls/refcount/cls_refcount_client.h new file mode 100644 index 000000000..73a23a7ee --- /dev/null +++ b/src/cls/refcount/cls_refcount_client.h @@ -0,0 +1,41 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab + +#ifndef CEPH_CLS_REFCOUNT_CLIENT_H +#define CEPH_CLS_REFCOUNT_CLIENT_H + +#include "include/rados/librados_fwd.hpp" +#include "include/types.h" + +/* + * refcount objclass + * + * The refcount objclass implements a refcounting scheme that allows having multiple references + * to a single rados object. The canonical way to use it is to add a reference and to remove a + * reference using a specific tag. This way we ensure that refcounting operations are idempotent, + * that is, a single client can only increase/decrease the refcount once using a single tag, so + * any replay of operations (implicit or explicit) is possible. + * + * So, the regular usage would be to create an object, to increase the refcount. Then, when + * wanting to have another reference to it, increase the refcount using a different tag. When + * removing a reference it is required to drop the refcount (using the same tag that was used + * for that reference). When the refcount drops to zero, the object is removed automaticfally. + * + * In order to maintain backwards compatibility with objects that were created without having + * their refcount increased, the implicit_ref was added. Any object that was created without + * having it's refcount increased (explicitly) is having an implicit refcount of 1. Since + * we don't have a tag for this refcount, we consider this tag as a wildcard. So if the refcount + * is being decreased by an unknown tag and we still have one wildcard tag, we'll accept it + * as the relevant tag, and the refcount will be decreased. + */ + +void cls_refcount_get(librados::ObjectWriteOperation& op, const std::string& tag, bool implicit_ref = false); +void cls_refcount_put(librados::ObjectWriteOperation& op, const std::string& tag, bool implicit_ref = false); +void cls_refcount_set(librados::ObjectWriteOperation& op, std::list<std::string>& refs); +// these overloads which call io_ctx.operate() or io_ctx.exec() should not be called in the rgw. +// rgw_rados_operate() should be called after the overloads w/o calls to io_ctx.operate()/exec() +#ifndef CLS_CLIENT_HIDE_IOCTX +int cls_refcount_read(librados::IoCtx& io_ctx, std::string& oid, std::list<std::string> *refs, bool implicit_ref = false); +#endif + +#endif diff --git a/src/cls/refcount/cls_refcount_ops.cc b/src/cls/refcount/cls_refcount_ops.cc new file mode 100644 index 000000000..3731269a9 --- /dev/null +++ b/src/cls/refcount/cls_refcount_ops.cc @@ -0,0 +1,106 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab + +#include "cls_refcount_ops.h" +#include "common/Formatter.h" +#include "common/ceph_json.h" + +using std::list; + +void cls_refcount_get_op::dump(ceph::Formatter *f) const +{ + f->dump_string("tag", tag); + f->dump_int("implicit_ref", (int)implicit_ref); +} + +void cls_refcount_get_op::generate_test_instances(list<cls_refcount_get_op*>& ls) +{ + ls.push_back(new cls_refcount_get_op); + ls.push_back(new cls_refcount_get_op); + ls.back()->tag = "foo"; + ls.back()->implicit_ref = true; +} + + +void cls_refcount_put_op::dump(ceph::Formatter *f) const +{ + f->dump_string("tag", tag); + f->dump_int("implicit_ref", (int)implicit_ref); +} + +void cls_refcount_put_op::generate_test_instances(list<cls_refcount_put_op*>& ls) +{ + ls.push_back(new cls_refcount_put_op); + ls.push_back(new cls_refcount_put_op); + ls.back()->tag = "foo"; + ls.back()->implicit_ref = true; +} + + + +void cls_refcount_set_op::dump(ceph::Formatter *f) const +{ + encode_json("refs", refs, f); +} + +void cls_refcount_set_op::generate_test_instances(list<cls_refcount_set_op*>& ls) +{ + ls.push_back(new cls_refcount_set_op); + ls.push_back(new cls_refcount_set_op); + ls.back()->refs.push_back("foo"); + ls.back()->refs.push_back("bar"); +} + + +void cls_refcount_read_op::dump(ceph::Formatter *f) const +{ + f->dump_int("implicit_ref", (int)implicit_ref); +} + +void cls_refcount_read_op::generate_test_instances(list<cls_refcount_read_op*>& ls) +{ + ls.push_back(new cls_refcount_read_op); + ls.push_back(new cls_refcount_read_op); + ls.back()->implicit_ref = true; +} + + +void cls_refcount_read_ret::dump(ceph::Formatter *f) const +{ + f->open_array_section("refs"); + for (auto p = refs.begin(); p != refs.end(); ++p) + f->dump_string("ref", *p); + f->close_section(); +} + +void cls_refcount_read_ret::generate_test_instances(list<cls_refcount_read_ret*>& ls) +{ + ls.push_back(new cls_refcount_read_ret); + ls.push_back(new cls_refcount_read_ret); + ls.back()->refs.push_back("foo"); + ls.back()->refs.push_back("bar"); +} + +void obj_refcount::dump(ceph::Formatter *f) const +{ + f->open_array_section("refs"); + for (const auto &kv: refs) { + f->open_object_section("ref"); + f->dump_string("oid", kv.first.c_str()); + f->dump_bool("active",kv.second); + f->close_section(); + } + f->close_section(); + + f->open_array_section("retired_refs"); + for (const auto& it: retired_refs) + f->dump_string("ref", it.c_str()); + f->close_section(); +} + +void obj_refcount::generate_test_instances(list<obj_refcount*>& ls) +{ + ls.push_back(new obj_refcount); + ls.back()->refs.emplace("foo",true); + ls.back()->retired_refs.emplace("bar"); +} diff --git a/src/cls/refcount/cls_refcount_ops.h b/src/cls/refcount/cls_refcount_ops.h new file mode 100644 index 000000000..5d60b161f --- /dev/null +++ b/src/cls/refcount/cls_refcount_ops.h @@ -0,0 +1,154 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab + +#ifndef CEPH_CLS_REFCOUNT_OPS_H +#define CEPH_CLS_REFCOUNT_OPS_H + +#include "include/types.h" +#include "common/hobject.h" + +struct cls_refcount_get_op { + std::string tag; + bool implicit_ref; + + cls_refcount_get_op() : implicit_ref(false) {} + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(1, 1, bl); + encode(tag, bl); + encode(implicit_ref, bl); + ENCODE_FINISH(bl); + } + + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(1, bl); + decode(tag, bl); + decode(implicit_ref, bl); + DECODE_FINISH(bl); + } + void dump(ceph::Formatter *f) const; + static void generate_test_instances(std::list<cls_refcount_get_op*>& ls); +}; +WRITE_CLASS_ENCODER(cls_refcount_get_op) + +struct cls_refcount_put_op { + std::string tag; + bool implicit_ref; // assume wildcard reference for + // objects without a std::set ref + + cls_refcount_put_op() : implicit_ref(false) {} + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(1, 1, bl); + encode(tag, bl); + encode(implicit_ref, bl); + ENCODE_FINISH(bl); + } + + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(1, bl); + decode(tag, bl); + decode(implicit_ref, bl); + DECODE_FINISH(bl); + } + + void dump(ceph::Formatter *f) const; + static void generate_test_instances(std::list<cls_refcount_put_op*>& ls); +}; +WRITE_CLASS_ENCODER(cls_refcount_put_op) + +struct cls_refcount_set_op { + std::list<std::string> refs; + + cls_refcount_set_op() {} + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(1, 1, bl); + encode(refs, bl); + ENCODE_FINISH(bl); + } + + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(1, bl); + decode(refs, bl); + DECODE_FINISH(bl); + } + + void dump(ceph::Formatter *f) const; + static void generate_test_instances(std::list<cls_refcount_set_op*>& ls); +}; +WRITE_CLASS_ENCODER(cls_refcount_set_op) + +struct cls_refcount_read_op { + bool implicit_ref; // assume wildcard reference for + // objects without a std::set ref + + cls_refcount_read_op() : implicit_ref(false) {} + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(1, 1, bl); + encode(implicit_ref, bl); + ENCODE_FINISH(bl); + } + + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(1, bl); + decode(implicit_ref, bl); + DECODE_FINISH(bl); + } + + void dump(ceph::Formatter *f) const; + static void generate_test_instances(std::list<cls_refcount_read_op*>& ls); +}; +WRITE_CLASS_ENCODER(cls_refcount_read_op) + +struct cls_refcount_read_ret { + std::list<std::string> refs; + + cls_refcount_read_ret() {} + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(1, 1, bl); + encode(refs, bl); + ENCODE_FINISH(bl); + } + + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(1, bl); + decode(refs, bl); + DECODE_FINISH(bl); + } + + void dump(ceph::Formatter *f) const; + static void generate_test_instances(std::list<cls_refcount_read_ret*>& ls); +}; +WRITE_CLASS_ENCODER(cls_refcount_read_ret) + +struct obj_refcount { + std::map<std::string, bool> refs; + std::set<std::string> retired_refs; + + obj_refcount() {} + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(2, 1, bl); + encode(refs, bl); + encode(retired_refs, bl); + ENCODE_FINISH(bl); + } + + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(2, bl); + decode(refs, bl); + if (struct_v >= 2) { + decode(retired_refs, bl); + } + DECODE_FINISH(bl); + } + + void dump(ceph::Formatter *f) const; + static void generate_test_instances(std::list<obj_refcount*>& ls); +}; +WRITE_CLASS_ENCODER(obj_refcount) + +#endif diff --git a/src/cls/rgw/cls_rgw.cc b/src/cls/rgw/cls_rgw.cc new file mode 100644 index 000000000..a7e1b65e8 --- /dev/null +++ b/src/cls/rgw/cls_rgw.cc @@ -0,0 +1,4781 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab + +#include "include/types.h" + +#include <errno.h> + +#include <boost/algorithm/string.hpp> + +#include "objclass/objclass.h" +#include "cls/rgw/cls_rgw_ops.h" +#include "cls/rgw/cls_rgw_const.h" +#include "common/Clock.h" +#include "common/strtol.h" +#include "common/escape.h" +#include "common/config_proxy.h" +#include "osd/osd_types.h" + +#include "include/compat.h" +#include <boost/lexical_cast.hpp> + +using std::pair; +using std::list; +using std::map; +using std::string; +using std::vector; + +using ceph::bufferlist; +using ceph::decode; +using ceph::encode; +using ceph::make_timespan; +using ceph::real_clock; +using ceph::real_time; +using ceph::timespan; + +CLS_VER(1,0) +CLS_NAME(rgw) + +// special logging for bucket index transaction instrumentation; if +// instrumenting, log at level 0 and include string "BITX" in log +// message to make entries easier to find +#define CLS_LOG_BITX(is_bitx, level, fmt, ...) \ + if (is_bitx) \ + { CLS_LOG(0, "BITX: " fmt, ##__VA_ARGS__); } \ + else { CLS_LOG(level, fmt, ##__VA_ARGS__); } + +// No UTF-8 character can begin with 0x80, so this is a safe indicator +// of a special bucket-index entry for the first byte. Note: although +// it has no impact, the 2nd, 3rd, or 4th byte of a UTF-8 character +// may be 0x80. +#define BI_PREFIX_CHAR 0x80 + +#define BI_BUCKET_OBJS_INDEX 0 +#define BI_BUCKET_LOG_INDEX 1 +#define BI_BUCKET_OBJ_INSTANCE_INDEX 2 +#define BI_BUCKET_OLH_DATA_INDEX 3 + +#define BI_BUCKET_LAST_INDEX 4 + +static std::string bucket_index_prefixes[] = { "", /* special handling for the objs list index */ + "0_", /* bucket log index */ + "1000_", /* obj instance index */ + "1001_", /* olh data index */ + + /* this must be the last index */ + "9999_",}; + +// this string is greater than all ascii plain entries and less than +// all special entries +static const std::string BI_PREFIX_BEGIN = string(1, BI_PREFIX_CHAR); + +// this string is greater than all special entries and less than all +// non-ascii plain entries +static const std::string BI_PREFIX_END = string(1, BI_PREFIX_CHAR) + + bucket_index_prefixes[BI_BUCKET_LAST_INDEX]; + +/* Returns whether parameter is not a key for a special entry. Empty + * strings are considered plain also, so, for example, an empty marker + * is also considered plain. TODO: check to make sure all callers are + * using appropriately. + */ +static bool bi_is_plain_entry(const std::string& s) { + return (s.empty() || (unsigned char)s[0] != BI_PREFIX_CHAR); +} + +static int bi_entry_type(const string& s) +{ + if (bi_is_plain_entry(s)) { + return BI_BUCKET_OBJS_INDEX; + } + + for (size_t i = 1; + i < sizeof(bucket_index_prefixes) / sizeof(bucket_index_prefixes[0]); + ++i) { + const string& t = bucket_index_prefixes[i]; + + if (s.compare(1, t.size(), t) == 0) { + return i; + } + } + + return -EINVAL; +} + +static bool bi_entry_gt(const string& first, const string& second) +{ + int fi = bi_entry_type(first); + int si = bi_entry_type(second); + + if (fi > si) { + return true; + } else if (fi < si) { + return false; + } + + return first > second; +} + +static void get_time_key(real_time& ut, string *key) +{ + char buf[32]; + ceph_timespec ts = ceph::real_clock::to_ceph_timespec(ut); + snprintf(buf, 32, "%011llu.%09u", (unsigned long long)ts.tv_sec, (unsigned int)ts.tv_nsec); + *key = buf; +} + +static void get_index_ver_key(cls_method_context_t hctx, uint64_t index_ver, string *key) +{ + char buf[48]; + snprintf(buf, sizeof(buf), "%011llu.%llu.%d", (unsigned long long)index_ver, + (unsigned long long)cls_current_version(hctx), + cls_current_subop_num(hctx)); + *key = buf; +} + +static void bi_log_prefix(string& key) +{ + key = BI_PREFIX_CHAR; + key.append(bucket_index_prefixes[BI_BUCKET_LOG_INDEX]); +} + +static void bi_log_index_key(cls_method_context_t hctx, string& key, string& id, uint64_t index_ver) +{ + bi_log_prefix(key); + get_index_ver_key(hctx, index_ver, &id); + key.append(id); +} + +static int log_index_operation(cls_method_context_t hctx, const cls_rgw_obj_key& obj_key, + RGWModifyOp op, const string& tag, real_time timestamp, + const rgw_bucket_entry_ver& ver, RGWPendingState state, uint64_t index_ver, + string& max_marker, uint16_t bilog_flags, string *owner, string *owner_display_name, rgw_zone_set *zones_trace) +{ + bufferlist bl; + + rgw_bi_log_entry entry; + + entry.object = obj_key.name; + entry.instance = obj_key.instance; + entry.timestamp = timestamp; + entry.op = op; + entry.ver = ver; + entry.state = state; + entry.index_ver = index_ver; + entry.tag = tag; + entry.bilog_flags = bilog_flags; + if (owner) { + entry.owner = *owner; + } + if (owner_display_name) { + entry.owner_display_name = *owner_display_name; + } + if (zones_trace) { + entry.zones_trace = std::move(*zones_trace); + } + + string key; + bi_log_index_key(hctx, key, entry.id, index_ver); + + encode(entry, bl); + + if (entry.id > max_marker) + max_marker = entry.id; + + return cls_cxx_map_set_val(hctx, key, &bl); +} + +/* + * Read list of objects, skipping objects in the "ugly namespace". The + * "ugly namespace" entries begin with BI_PREFIX_CHAR (0x80). Valid + * UTF-8 object names can *both* preceed and follow the "ugly + * namespace". + */ +static int get_obj_vals(cls_method_context_t hctx, + const std::string& start, + const std::string& filter_prefix, + int num_entries, + std::map<std::string, bufferlist> *pkeys, + bool *pmore) +{ + int ret = cls_cxx_map_get_vals(hctx, start, filter_prefix, + num_entries, pkeys, pmore); + if (ret < 0) { + return ret; + } + + if (pkeys->empty()) { + return 0; + } + + auto last_element = pkeys->crbegin(); + if ((unsigned char)last_element->first[0] < BI_PREFIX_CHAR) { + /* if the first character of the last entry is less than the + * prefix then all entries must preceed the "ugly namespace" and + * we're done + */ + return 0; + } + + auto first_element = pkeys->cbegin(); + if ((unsigned char)first_element->first[0] > BI_PREFIX_CHAR) { + /* if the first character of the first entry is after the "ugly + * namespace" then all entries must follow the "ugly namespace" + * then all entries do and we're done + */ + return 0; + } + + /* at this point we know we have entries that could precede the + * "ugly namespace", be in the "ugly namespace", and follow the + * "ugly namespace", so let's rebuild the list, only keeping entries + * outside the "ugly namespace" + */ + + auto comp = [](const pair<std::string, bufferlist>& l, const std::string &r) { + return l.first < r; + }; + std::string new_start = {static_cast<char>(BI_PREFIX_CHAR + 1)}; + + auto lower = pkeys->lower_bound(string{static_cast<char>(BI_PREFIX_CHAR)}); + auto upper = std::lower_bound(lower, pkeys->end(), new_start, comp); + pkeys->erase(lower, upper); + + if (num_entries == (int)pkeys->size() || !(*pmore)) { + return 0; + } + + if (pkeys->size() && new_start < pkeys->crbegin()->first) { + new_start = pkeys->rbegin()->first; + } + + std::map<std::string, bufferlist> new_keys; + + /* now get some more keys */ + ret = cls_cxx_map_get_vals(hctx, new_start, filter_prefix, + num_entries - pkeys->size(), &new_keys, pmore); + if (ret < 0) { + return ret; + } + + pkeys->insert(std::make_move_iterator(new_keys.begin()), + std::make_move_iterator(new_keys.end())); + + return 0; +} + +/* + * get a monotonically decreasing string representation. + * For num = x, num = y, where x > y, str(x) < str(y) + * Another property is that string size starts short and grows as num increases + */ +static void decreasing_str(uint64_t num, string *str) +{ + char buf[32]; + if (num < 0x10) { /* 16 */ + snprintf(buf, sizeof(buf), "9%02lld", 15 - (long long)num); + } else if (num < 0x100) { /* 256 */ + snprintf(buf, sizeof(buf), "8%03lld", 255 - (long long)num); + } else if (num < 0x1000) /* 4096 */ { + snprintf(buf, sizeof(buf), "7%04lld", 4095 - (long long)num); + } else if (num < 0x10000) /* 65536 */ { + snprintf(buf, sizeof(buf), "6%05lld", 65535 - (long long)num); + } else if (num < 0x100000000) /* 4G */ { + snprintf(buf, sizeof(buf), "5%010lld", 0xFFFFFFFF - (long long)num); + } else { + snprintf(buf, sizeof(buf), "4%020lld", (long long)-num); + } + + *str = buf; +} + +/* + * We hold two different indexes for objects. The first one holds the + * list of objects in the order that we want them to be listed. The + * second one only holds the objects instances (for versioned + * objects), and they're not arranged in any particular order. When + * listing objects we'll use the first index, when doing operations on + * the objects themselves we'll use the second index. Note that + * regular objects only map to the first index anyway + */ + +static void get_list_index_key(rgw_bucket_dir_entry& entry, string *index_key) +{ + *index_key = entry.key.name; + + string ver_str; + decreasing_str(entry.versioned_epoch, &ver_str); + string instance_delim("\0i", 2); + string ver_delim("\0v", 2); + + index_key->append(ver_delim); + index_key->append(ver_str); + index_key->append(instance_delim); + index_key->append(entry.key.instance); +} + +static void encode_obj_versioned_data_key(const cls_rgw_obj_key& key, string *index_key, bool append_delete_marker_suffix = false) +{ + *index_key = BI_PREFIX_CHAR; + index_key->append(bucket_index_prefixes[BI_BUCKET_OBJ_INSTANCE_INDEX]); + index_key->append(key.name); + string delim("\0i", 2); + index_key->append(delim); + index_key->append(key.instance); + if (append_delete_marker_suffix) { + string dm("\0d", 2); + index_key->append(dm); + } +} + +static void encode_obj_index_key(const cls_rgw_obj_key& key, string *index_key) +{ + if (key.instance.empty()) { + *index_key = key.name; + } else { + encode_obj_versioned_data_key(key, index_key); + } +} + +static void encode_olh_data_key(const cls_rgw_obj_key& key, string *index_key) +{ + *index_key = BI_PREFIX_CHAR; + index_key->append(bucket_index_prefixes[BI_BUCKET_OLH_DATA_INDEX]); + index_key->append(key.name); +} + +template <class T> +static int read_index_entry(cls_method_context_t hctx, string& name, T *entry); + +static int encode_list_index_key(cls_method_context_t hctx, const cls_rgw_obj_key& key, string *index_key) +{ + if (key.instance.empty()) { + *index_key = key.name; + return 0; + } + + string obj_index_key; + cls_rgw_obj_key tmp_key(key); + if (tmp_key.instance == "null") { + tmp_key.instance.clear(); + } + encode_obj_versioned_data_key(tmp_key, &obj_index_key); + + rgw_bucket_dir_entry entry; + + int ret = read_index_entry(hctx, obj_index_key, &entry); + if (ret == -ENOENT) { + /* couldn't find the entry, set key value after the current object */ + char buf[2] = { 0x1, 0 }; + string s(buf); + *index_key = key.name + s; + return 0; + } + if (ret < 0) { + CLS_LOG(1, "ERROR: encode_list_index_key(): cls_cxx_map_get_val returned %d", ret); + return ret; + } + + get_list_index_key(entry, index_key); + + return 0; +} + +static void split_key(const string& key, list<string>& vals) +{ + size_t pos = 0; + const char *p = key.c_str(); + while (pos < key.size()) { + size_t len = strlen(p); + vals.push_back(p); + pos += len + 1; + p += len + 1; + } +} + +static std::string escape_str(const std::string& s) +{ + int len = escape_json_attr_len(s.c_str(), s.size()); + std::string escaped(len, 0); + escape_json_attr(s.c_str(), s.size(), escaped.data()); + return escaped; +} + +/* + * list index key structure: + * + * <obj name>\0[v<ver>\0i<instance id>] + */ +static int decode_list_index_key(const string& index_key, cls_rgw_obj_key *key, uint64_t *ver) +{ + size_t len = strlen(index_key.c_str()); + + key->instance.clear(); + *ver = 0; + + if (len == index_key.size()) { + key->name = index_key; + return 0; + } + + list<string> vals; + split_key(index_key, vals); + + if (vals.empty()) { + CLS_LOG(0, "ERROR: %s: bad index_key (%s): split_key() returned empty vals", __func__, escape_str(index_key).c_str()); + return -EIO; + } + + auto iter = vals.begin(); + key->name = *iter; + ++iter; + + if (iter == vals.end()) { + CLS_LOG(0, "ERROR: %s: bad index_key (%s): no vals", __func__, escape_str(index_key).c_str()); + return -EIO; + } + + for (; iter != vals.end(); ++iter) { + string& val = *iter; + if (val[0] == 'i') { + key->instance = val.substr(1); + } else if (val[0] == 'v') { + string err; + const char *s = val.c_str() + 1; + *ver = strict_strtoll(s, 10, &err); + if (!err.empty()) { + CLS_LOG(0, "ERROR: %s: bad index_key (%s): could not parse val (v=%s)", __func__, escape_str(index_key).c_str(), s); + return -EIO; + } + } + } + + return 0; +} + +static int read_bucket_header(cls_method_context_t hctx, + rgw_bucket_dir_header *header) +{ + bufferlist bl; + int rc = cls_cxx_map_read_header(hctx, &bl); + if (rc < 0) + return rc; + + if (bl.length() == 0) { + *header = rgw_bucket_dir_header(); + return 0; + } + auto iter = bl.cbegin(); + try { + decode(*header, iter); + } catch (ceph::buffer::error& err) { + CLS_LOG(1, "ERROR: read_bucket_header(): failed to decode header\n"); + return -EIO; + } + + return 0; +} + +int rgw_bucket_list(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + CLS_LOG(10, "entered %s", __func__); + + // maximum number of calls to get_obj_vals we'll try; compromise + // between wanting to return the requested # of entries, but not + // wanting to slow down this op with too many omap reads + constexpr int max_attempts = 8; + + auto iter = in->cbegin(); + + rgw_cls_list_op op; + try { + decode(op, iter); + } catch (ceph::buffer::error& err) { + CLS_LOG(1, "ERROR: %s: failed to decode request", __func__); + return -EINVAL; + } + + rgw_cls_list_ret ret; + rgw_bucket_dir& new_dir = ret.dir; + auto& name_entry_map = new_dir.m; // map of keys to entries + + int rc = read_bucket_header(hctx, &new_dir.header); + if (rc < 0) { + CLS_LOG(1, "ERROR: %s: failed to read header", __func__); + return rc; + } + + // some calls just want the header and request 0 entries + if (op.num_entries <= 0) { + ret.is_truncated = false; + encode(ret, *out); + return 0; + } + + // key that we can start listing at, one of a) sent in by caller, b) + // last item visited, or c) when delimiter present, a key that will + // move past the subdirectory + std::string start_after_omap_key; + encode_list_index_key(hctx, op.start_obj, &start_after_omap_key); + + // this is set whenenver start_after_omap_key is set to keep them in + // sync since this will be the returned marker when a marker is + // returned + cls_rgw_obj_key start_after_entry_key; + + // last key stored in result, so if we have to call get_obj_vals + // multiple times, we do not add the overlap to result + std::string prev_omap_key; + + // last prefix_key stored in result, so we can skip over entries + // with the same prefix_key + std::string prev_prefix_omap_key; + + bool done = false; // whether we need to keep calling get_obj_vals + bool more = true; // output parameter of get_obj_vals + bool has_delimiter = !op.delimiter.empty(); + + if (has_delimiter && + start_after_omap_key > op.filter_prefix && + boost::algorithm::ends_with(start_after_omap_key, op.delimiter)) { + // advance past all subdirectory entries if we start after a + // subdirectory + start_after_omap_key = cls_rgw_after_delim(start_after_omap_key); + } + + for (int attempt = 0; + attempt < max_attempts && + more && + !done && + name_entry_map.size() < op.num_entries; + ++attempt) { + std::map<std::string, bufferlist> keys; + + // note: get_obj_vals skips past the "ugly namespace" (i.e., + // entries that start with the BI_PREFIX_CHAR), so no need to + // check for such entries + rc = get_obj_vals(hctx, start_after_omap_key, op.filter_prefix, + op.num_entries - name_entry_map.size(), + &keys, &more); + if (rc < 0) { + return rc; + } + CLS_LOG(20, "%s: on attempt %d get_obj_vls returned %ld entries, more=%d", + __func__, attempt, keys.size(), more); + + done = keys.empty(); + + for (auto kiter = keys.cbegin(); kiter != keys.cend(); ++kiter) { + rgw_bucket_dir_entry entry; + try { + const bufferlist& entrybl = kiter->second; + auto eiter = entrybl.cbegin(); + decode(entry, eiter); + } catch (ceph::buffer::error& err) { + CLS_LOG(1, "ERROR: %s: failed to decode entry, key=%s", + __func__, kiter->first.c_str()); + return -EINVAL; + } + + start_after_omap_key = kiter->first; + start_after_entry_key = entry.key; + CLS_LOG(20, "%s: working on key=%s len=%zu", + __func__, kiter->first.c_str(), kiter->first.size()); + + cls_rgw_obj_key key; + uint64_t ver; + int ret = decode_list_index_key(kiter->first, &key, &ver); + if (ret < 0) { + CLS_LOG(0, "ERROR: %s: failed to decode list index key (%s)", + __func__, escape_str(kiter->first).c_str()); + continue; + } + + if (!entry.is_valid()) { + CLS_LOG(20, "%s: entry %s[%s] is not valid", + __func__, key.name.c_str(), key.instance.c_str()); + continue; + } + + // filter out noncurrent versions, delete markers, and initial marker + if (!op.list_versions && + (!entry.is_visible() || op.start_obj.name == key.name)) { + CLS_LOG(20, "%s: entry %s[%s] is not visible", + __func__, key.name.c_str(), key.instance.c_str()); + continue; + } + + if (has_delimiter) { + int delim_pos = key.name.find(op.delimiter, op.filter_prefix.size()); + + if (delim_pos >= 0) { + /* extract key with trailing delimiter */ + string prefix_key = + key.name.substr(0, delim_pos + op.delimiter.length()); + + if (prefix_key == prev_prefix_omap_key) { + continue; // we've already added this; + } else { + prev_prefix_omap_key = prefix_key; + } + + if (name_entry_map.size() < op.num_entries) { + rgw_bucket_dir_entry proxy_entry; + cls_rgw_obj_key proxy_key(prefix_key); + proxy_entry.key = cls_rgw_obj_key(proxy_key); + proxy_entry.flags = rgw_bucket_dir_entry::FLAG_COMMON_PREFIX; + name_entry_map[prefix_key] = proxy_entry; + + CLS_LOG(20, "%s: got common prefix entry %s[%s] num entries=%lu", + __func__, proxy_key.name.c_str(), proxy_key.instance.c_str(), + name_entry_map.size()); + } + + // make sure that if this is the last item added to the + // result from this call to get_obj_vals, the next call will + // skip past rest of "subdirectory" + start_after_omap_key = cls_rgw_after_delim(prefix_key); + start_after_entry_key.set(start_after_omap_key); + + // advance past this subdirectory, but then back up one, + // so the loop increment will put us in the right place + kiter = keys.lower_bound(start_after_omap_key); + --kiter; + + continue; + } + + // no delimiter after prefix found, so this is a "top-level" + // item and we can just fall through + } + + if (name_entry_map.size() < op.num_entries && + kiter->first != prev_omap_key) { + name_entry_map[kiter->first] = entry; + prev_omap_key = kiter->first; + CLS_LOG(20, "%s: got object entry %s[%s] num entries=%d", + __func__, key.name.c_str(), key.instance.c_str(), + int(name_entry_map.size())); + } + } // for (auto kiter... + } // for (int attempt... + + ret.is_truncated = more && !done; + if (ret.is_truncated) { + ret.marker = start_after_entry_key; + } + CLS_LOG(20, "%s: normal exit returning %ld entries, is_truncated=%d", + __func__, ret.dir.m.size(), ret.is_truncated); + encode(ret, *out); + + if (ret.is_truncated && name_entry_map.size() == 0) { + CLS_LOG(5, "%s: returning value RGWBIAdvanceAndRetryError", __func__); + return RGWBIAdvanceAndRetryError; + } else { + return 0; + } +} // rgw_bucket_list + +static int write_bucket_header(cls_method_context_t hctx, rgw_bucket_dir_header *header) +{ + header->ver++; + + bufferlist header_bl; + encode(*header, header_bl); + return cls_cxx_map_write_header(hctx, &header_bl); +} + + +int rgw_bucket_update_stats(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + CLS_LOG(10, "entered %s", __func__); + // decode request + rgw_cls_bucket_update_stats_op op; + auto iter = in->cbegin(); + try { + decode(op, iter); + } catch (ceph::buffer::error& err) { + CLS_LOG(1, "ERROR: %s: failed to decode request", __func__); + return -EINVAL; + } + + rgw_bucket_dir_header header; + int rc = read_bucket_header(hctx, &header); + if (rc < 0) { + CLS_LOG(1, "ERROR: %s: failed to read header", __func__); + return rc; + } + + for (auto& s : op.stats) { + auto& dest = header.stats[s.first]; + if (op.absolute) { + dest = s.second; + } else { + dest.total_size += s.second.total_size; + dest.total_size_rounded += s.second.total_size_rounded; + dest.num_entries += s.second.num_entries; + dest.actual_size += s.second.actual_size; + } + } + + return write_bucket_header(hctx, &header); +} + +int rgw_bucket_init_index(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + CLS_LOG(10, "entered %s", __func__); + bufferlist header_bl; + int rc = cls_cxx_map_read_header(hctx, &header_bl); + if (rc < 0) { + switch (rc) { + case -ENODATA: + case -ENOENT: + break; + default: + return rc; + } + } + + if (header_bl.length() != 0) { + CLS_LOG(1, "ERROR: index already initialized\n"); + return -EINVAL; + } + + rgw_bucket_dir dir; + + return write_bucket_header(hctx, &dir.header); +} + +int rgw_bucket_set_tag_timeout(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + CLS_LOG(10, "entered %s", __func__); + // decode request + rgw_cls_tag_timeout_op op; + auto iter = in->cbegin(); + try { + decode(op, iter); + } catch (ceph::buffer::error& err) { + CLS_LOG(1, "ERROR: rgw_bucket_set_tag_timeout(): failed to decode request\n"); + return -EINVAL; + } + + rgw_bucket_dir_header header; + int rc = read_bucket_header(hctx, &header); + if (rc < 0) { + CLS_LOG(1, "ERROR: rgw_bucket_set_tag_timeout(): failed to read header\n"); + return rc; + } + + header.tag_timeout = op.tag_timeout; + + return write_bucket_header(hctx, &header); +} + +static int read_key_entry(cls_method_context_t hctx, const cls_rgw_obj_key& key, + string *idx, rgw_bucket_dir_entry *entry, + bool special_delete_marker_name = false); + +static std::string modify_op_str(RGWModifyOp op) { + return std::string(to_string(op)); +} + +static std::string modify_op_str(uint8_t op) { + return modify_op_str((RGWModifyOp) op); +} + +int rgw_bucket_prepare_op(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + const ConfigProxy& conf = cls_get_config(hctx); + const object_info_t& oi = cls_get_object_info(hctx); + + // bucket index transaction instrumentation + const bool bitx_inst = + conf->rgw_bucket_index_transaction_instrumentation; + + CLS_LOG_BITX(bitx_inst, 10, "ENTERING %s for object oid=%s key=%s", + __func__, oi.soid.oid.name.c_str(), oi.soid.get_key().c_str()); + + // decode request + rgw_cls_obj_prepare_op op; + auto iter = in->cbegin(); + try { + decode(op, iter); + } catch (ceph::buffer::error& err) { + CLS_LOG_BITX(bitx_inst, 1, + "ERROR: %s: failed to decode request", __func__); + return -EINVAL; + } + + if (op.tag.empty()) { + CLS_LOG_BITX(bitx_inst, 1, "ERROR: %s: tag is empty", __func__); + return -EINVAL; + } + + CLS_LOG_BITX(bitx_inst, 1, + "INFO: %s: request: op=%s name=%s tag=%s", __func__, + modify_op_str(op.op).c_str(), op.key.to_string().c_str(), op.tag.c_str()); + + // get on-disk state + std::string idx; + + rgw_bucket_dir_entry entry; + int rc = read_key_entry(hctx, op.key, &idx, &entry); + if (rc < 0 && rc != -ENOENT) { + CLS_LOG_BITX(bitx_inst, 1, + "ERROR: %s could not read key entry, key=%s, rc=%d", + __func__, op.key.to_string().c_str(), rc); + return rc; + } + + bool noent = (rc == -ENOENT); + + rc = 0; + + if (noent) { // no entry, initialize fields + entry.key = op.key; + entry.ver = rgw_bucket_entry_ver(); + entry.exists = false; + entry.locator = op.locator; + } + + // fill in proper state + rgw_bucket_pending_info info; + info.timestamp = real_clock::now(); + info.state = CLS_RGW_STATE_PENDING_MODIFY; + info.op = op.op; + CLS_LOG_BITX(bitx_inst, 20, + "INFO: %s: inserting tag %s op %s into pending map for entry %s", + __func__, op.tag.c_str(), modify_op_str(info.op).c_str(), + entry.key.to_string().c_str()); + entry.pending_map.insert(pair<string, rgw_bucket_pending_info>(op.tag, info)); + + // write out new key to disk + bufferlist info_bl; + encode(entry, info_bl); + CLS_LOG_BITX(bitx_inst, 20, + "INFO: %s: setting map entry at key=%s", + __func__, escape_str(idx).c_str()); + rc = cls_cxx_map_set_val(hctx, idx, &info_bl); + if (rc < 0) { + CLS_LOG_BITX(bitx_inst, 1, + "ERROR: %s could not set value for key, key=%s, rc=%d", + __func__, escape_str(idx).c_str(), rc); + return rc; + } + + CLS_LOG_BITX(bitx_inst, 10, "EXITING %s, returning 0", __func__); + return 0; +} // rgw_bucket_prepare_op + +static void unaccount_entry(rgw_bucket_dir_header& header, + rgw_bucket_dir_entry& entry) +{ + if (entry.exists) { + rgw_bucket_category_stats& stats = header.stats[entry.meta.category]; + stats.num_entries--; + stats.total_size -= entry.meta.accounted_size; + stats.total_size_rounded -= + cls_rgw_get_rounded_size(entry.meta.accounted_size); + stats.actual_size -= entry.meta.size; + } +} + +static void log_entry(const char *func, const char *str, rgw_bucket_dir_entry *entry) +{ + CLS_LOG(1, "%s: %s: ver=%ld:%llu name=%s instance=%s locator=%s", func, str, + (long)entry->ver.pool, (unsigned long long)entry->ver.epoch, + entry->key.name.c_str(), entry->key.instance.c_str(), entry->locator.c_str()); +} + +static void log_entry(const char *func, const char *str, rgw_bucket_olh_entry *entry) +{ + CLS_LOG(1, "%s: %s: epoch=%llu name=%s instance=%s tag=%s", func, str, + (unsigned long long)entry->epoch, entry->key.name.c_str(), entry->key.instance.c_str(), + entry->tag.c_str()); +} + +template <class T> +static int read_omap_entry(cls_method_context_t hctx, const std::string& name, + T* entry) +{ + bufferlist current_entry; + int rc = cls_cxx_map_get_val(hctx, name, ¤t_entry); + if (rc < 0) { + return rc; + } + + auto cur_iter = current_entry.cbegin(); + try { + decode(*entry, cur_iter); + } catch (ceph::buffer::error& err) { + CLS_LOG(1, "ERROR: %s: failed to decode entry", __func__); + return -EIO; + } + return 0; +} + +template <class T> +static int read_index_entry(cls_method_context_t hctx, string& name, T* entry) +{ + int ret = read_omap_entry(hctx, name, entry); + if (ret < 0) { + return ret; + } + + log_entry(__func__, "existing entry", entry); + return 0; +} + +static int read_key_entry(cls_method_context_t hctx, const cls_rgw_obj_key& key, + string *idx, rgw_bucket_dir_entry *entry, + bool special_delete_marker_name) +{ + encode_obj_index_key(key, idx); + int rc = read_index_entry(hctx, *idx, entry); + if (rc < 0) { + return rc; + } + + if (key.instance.empty() && + entry->flags & rgw_bucket_dir_entry::FLAG_VER_MARKER) { + /* we only do it where key.instance is empty. In this case the + * delete marker will have a separate entry in the index to avoid + * collisions with the actual object, as it's mutable + */ + if (special_delete_marker_name) { + encode_obj_versioned_data_key(key, idx, true); + rc = read_index_entry(hctx, *idx, entry); + if (rc == 0) { + return 0; + } + } + encode_obj_versioned_data_key(key, idx); + rc = read_index_entry(hctx, *idx, entry); + if (rc < 0) { + *entry = rgw_bucket_dir_entry(); /* need to reset entry because we initialized it earlier */ + return rc; + } + } + + return 0; +} + +// called by rgw_bucket_complete_op() for each item in op.remove_objs +static int complete_remove_obj(cls_method_context_t hctx, + rgw_bucket_dir_header& header, + const cls_rgw_obj_key& key, bool log_op) +{ + rgw_bucket_dir_entry entry; + string idx; + int ret = read_key_entry(hctx, key, &idx, &entry); + if (ret < 0) { + CLS_LOG(1, "%s: read_key_entry name=%s instance=%s failed with %d", + __func__, key.name.c_str(), key.instance.c_str(), ret); + return ret; + } + CLS_LOG(10, "%s: read entry name=%s instance=%s category=%d", __func__, + entry.key.name.c_str(), entry.key.instance.c_str(), + int(entry.meta.category)); + unaccount_entry(header, entry); + + if (log_op) { + ++header.ver; // increment index version, or we'll overwrite keys previously written + const std::string tag; + ret = log_index_operation(hctx, key, CLS_RGW_OP_DEL, tag, entry.meta.mtime, + entry.ver, CLS_RGW_STATE_COMPLETE, header.ver, + header.max_marker, 0, nullptr, nullptr, nullptr); + if (ret < 0) { + return ret; + } + } + + ret = cls_cxx_map_remove_key(hctx, idx); + if (ret < 0) { + CLS_LOG(1, "%s: cls_cxx_map_remove_key failed with %d", __func__, ret); + return ret; + } + return ret; +} + +int rgw_bucket_complete_op(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + const ConfigProxy& conf = cls_get_config(hctx); + const object_info_t& oi = cls_get_object_info(hctx); + + // bucket index transaction instrumentation + const bool bitx_inst = + conf->rgw_bucket_index_transaction_instrumentation; + + CLS_LOG_BITX(bitx_inst, 10, "ENTERING %s for object oid=%s key=%s", + __func__, oi.soid.oid.name.c_str(), oi.soid.get_key().c_str()); + + // decode request + rgw_cls_obj_complete_op op; + auto iter = in->cbegin(); + try { + decode(op, iter); + } catch (ceph::buffer::error& err) { + CLS_LOG_BITX(bitx_inst, 1, "ERROR: %s: failed to decode request", __func__); + return -EINVAL; + } + + CLS_LOG_BITX(bitx_inst, 1, + "INFO: %s: request: op=%s name=%s ver=%lu:%llu tag=%s", + __func__, + modify_op_str(op.op).c_str(), op.key.to_string().c_str(), + (unsigned long)op.ver.pool, (unsigned long long)op.ver.epoch, + op.tag.c_str()); + + rgw_bucket_dir_header header; + int rc = read_bucket_header(hctx, &header); + if (rc < 0) { + CLS_LOG_BITX(bitx_inst, 1, "ERROR: %s: failed to read header, rc=%d", + __func__, rc); + return -EINVAL; + } + + rgw_bucket_dir_entry entry; + bool ondisk = true; + + std::string idx; + rc = read_key_entry(hctx, op.key, &idx, &entry); + if (rc == -ENOENT) { + entry.key = op.key; + entry.ver = op.ver; + entry.meta = op.meta; + entry.locator = op.locator; + ondisk = false; + } else if (rc < 0) { + CLS_LOG_BITX(bitx_inst, 1, + "ERROR: %s: read key entry failed, key=%s, rc=%d", + __func__, op.key.to_string().c_str(), rc); + return rc; + } + + entry.index_ver = header.ver; + /* resetting entry flags, entry might have been previously a delete + * marker */ + entry.flags &= rgw_bucket_dir_entry::FLAG_VER; + + if (op.tag.size()) { + auto pinter = entry.pending_map.find(op.tag); + if (pinter == entry.pending_map.end()) { + CLS_LOG_BITX(bitx_inst, 1, + "ERROR: %s: couldn't find tag for pending operation with tag %s", + __func__, op.tag.c_str()); + return -EINVAL; + } + CLS_LOG_BITX(bitx_inst, 20, + "INFO: %s: removing tag %s from pending map", + __func__, op.tag.c_str()); + entry.pending_map.erase(pinter); + } + + if (op.tag.size() && op.op == CLS_RGW_OP_CANCEL) { + CLS_LOG_BITX(bitx_inst, 20, "INFO: %s: op is cancel", __func__); + } else if (op.ver.pool == entry.ver.pool && + op.ver.epoch && op.ver.epoch <= entry.ver.epoch) { + CLS_LOG_BITX(bitx_inst, 20, + "INFO: %s: skipping request, old epoch", __func__); + op.op = CLS_RGW_OP_CANCEL; + } + + // controls whether remove_objs deletions are logged + const bool default_log_op = op.log_op && !header.syncstopped; + // controls whether this operation is logged (depends on op.op and ondisk) + bool log_op = default_log_op; + + entry.ver = op.ver; + if (op.op == CLS_RGW_OP_CANCEL) { + log_op = false; // don't log cancelation + if (op.tag.size()) { + if (!entry.exists && entry.pending_map.empty()) { + // a racing delete succeeded, and we canceled the last pending op + CLS_LOG_BITX(bitx_inst, 20, + "INFO: %s: removing map entry with key=%s", + __func__, escape_str(idx).c_str()); + rc = cls_cxx_map_remove_key(hctx, idx); + if (rc < 0) { + CLS_LOG_BITX(bitx_inst, 1, + "ERROR: %s: unable to remove map key, key=%s, rc=%d", + __func__, escape_str(idx).c_str(), rc); + return rc; + } + } else { + // we removed this tag from pending_map so need to write the changes + CLS_LOG_BITX(bitx_inst, 20, + "INFO: %s: setting map entry at key=%s", + __func__, escape_str(idx).c_str()); + bufferlist new_key_bl; + encode(entry, new_key_bl); + rc = cls_cxx_map_set_val(hctx, idx, &new_key_bl); + if (rc < 0) { + CLS_LOG_BITX(bitx_inst, 1, + "ERROR: %s: unable to set map val, key=%s, rc=%d", + __func__, escape_str(idx).c_str(), rc); + return rc; + } + } + } + } // CLS_RGW_OP_CANCEL + else if (op.op == CLS_RGW_OP_DEL) { + // unaccount deleted entry + unaccount_entry(header, entry); + + CLS_LOG_BITX(bitx_inst, 20, + "INFO: %s: delete op, key=%s", + __func__, escape_str(idx).c_str()); + entry.meta = op.meta; + if (!ondisk) { + // no entry to erase + CLS_LOG_BITX(bitx_inst, 20, + "INFO: %s: key=%s not on disk, no action", + __func__, escape_str(idx).c_str()); + log_op = false; + } else if (!entry.pending_map.size()) { + CLS_LOG_BITX(bitx_inst, 20, + "INFO: %s: removing map entry with key=%s", + __func__, escape_str(idx).c_str()); + rc = cls_cxx_map_remove_key(hctx, idx); + if (rc < 0) { + CLS_LOG_BITX(bitx_inst, 1, + "ERROR: %s: unable to remove map key, key=%s, rc=%d", + __func__, escape_str(idx).c_str(), rc); + return rc; + } + } else { + entry.exists = false; + bufferlist new_key_bl; + encode(entry, new_key_bl); + CLS_LOG_BITX(bitx_inst, 20, + "INFO: %s: setting map entry at key=%s", + __func__, escape_str(idx).c_str()); + rc = cls_cxx_map_set_val(hctx, idx, &new_key_bl); + if (rc < 0) { + CLS_LOG_BITX(bitx_inst, 1, + "ERROR: %s: unable to set map val, key=%s, rc=%d", + __func__, escape_str(idx).c_str(), rc); + return rc; + } + } + } // CLS_RGW_OP_DEL + else if (op.op == CLS_RGW_OP_ADD) { + CLS_LOG_BITX(bitx_inst, 20, + "INFO: %s: add op, key=%s", + __func__, escape_str(idx).c_str()); + // unaccount overwritten entry + unaccount_entry(header, entry); + + rgw_bucket_dir_entry_meta& meta = op.meta; + rgw_bucket_category_stats& stats = header.stats[meta.category]; + entry.meta = meta; + entry.key = op.key; + entry.exists = true; + entry.tag = op.tag; + // account for new entry + stats.num_entries++; + stats.total_size += meta.accounted_size; + stats.total_size_rounded += cls_rgw_get_rounded_size(meta.accounted_size); + stats.actual_size += meta.size; + bufferlist new_key_bl; + encode(entry, new_key_bl); + CLS_LOG_BITX(bitx_inst, 20, + "INFO: %s: setting map entry at key=%s", + __func__, escape_str(idx).c_str()); + rc = cls_cxx_map_set_val(hctx, idx, &new_key_bl); + if (rc < 0) { + CLS_LOG_BITX(bitx_inst, 1, + "ERROR: %s: unable to set map value at key=%s, rc=%d", + __func__, escape_str(idx).c_str(), rc); + return rc; + } + } // CLS_RGW_OP_ADD + + if (log_op) { + rc = log_index_operation(hctx, op.key, op.op, op.tag, entry.meta.mtime, + entry.ver, CLS_RGW_STATE_COMPLETE, header.ver, + header.max_marker, op.bilog_flags, NULL, NULL, + &op.zones_trace); + if (rc < 0) { + CLS_LOG_BITX(bitx_inst, 0, + "ERROR: %s: log_index_operation failed with rc=%d", + __func__, rc); + return rc; + } + } + + CLS_LOG_BITX(bitx_inst, 20, "INFO: %s: remove_objs.size()=%d", + __func__, (int)op.remove_objs.size()); + for (const auto& remove_key : op.remove_objs) { + CLS_LOG_BITX(bitx_inst, 20, + "INFO: %s: completing object remove key=%s", + __func__, escape_str(remove_key.to_string()).c_str()); + rc = complete_remove_obj(hctx, header, remove_key, default_log_op); + if (rc < 0) { + CLS_LOG_BITX(bitx_inst, 1, + "WARNING: %s: complete_remove_obj, failed to remove entry, " + "name=%s read_index_entry ret=%d, continuing", + __func__, escape_str(remove_key.to_string()).c_str(), rc); + continue; // part cleanup errors are not fatal + } + } // remove loop + + CLS_LOG_BITX(bitx_inst, 20, + "INFO: %s: writing bucket header", __func__); + rc = write_bucket_header(hctx, &header); + if (rc < 0) { + CLS_LOG_BITX(bitx_inst, 0, + "ERROR: %s: failed to write bucket header ret=%d", + __func__, rc); + } + + CLS_LOG_BITX(bitx_inst, 10, + "EXITING %s: returning %d", __func__, rc); + return rc; +} // rgw_bucket_complete_op + +template <class T> +static int write_entry(cls_method_context_t hctx, T& entry, const string& key) +{ + bufferlist bl; + encode(entry, bl); + return cls_cxx_map_set_val(hctx, key, &bl); +} + +static int read_olh(cls_method_context_t hctx,cls_rgw_obj_key& obj_key, rgw_bucket_olh_entry *olh_data_entry, string *index_key, bool *found) +{ + cls_rgw_obj_key olh_key; + olh_key.name = obj_key.name; + + encode_olh_data_key(olh_key, index_key); + int ret = read_index_entry(hctx, *index_key, olh_data_entry); + if (ret < 0 && ret != -ENOENT) { + CLS_LOG(0, "ERROR: read_index_entry() olh_key=%s ret=%d", olh_key.name.c_str(), ret); + return ret; + } + if (found) { + *found = (ret != -ENOENT); + } + return 0; +} + +static void update_olh_log(rgw_bucket_olh_entry& olh_data_entry, OLHLogOp op, const string& op_tag, + cls_rgw_obj_key& key, bool delete_marker, uint64_t epoch) +{ + vector<rgw_bucket_olh_log_entry>& log = olh_data_entry.pending_log[olh_data_entry.epoch]; + rgw_bucket_olh_log_entry log_entry; + log_entry.epoch = epoch; + log_entry.op = op; + log_entry.op_tag = op_tag; + log_entry.key = key; + log_entry.delete_marker = delete_marker; + log.push_back(log_entry); +} + +static int write_obj_instance_entry(cls_method_context_t hctx, rgw_bucket_dir_entry& instance_entry, const string& instance_idx) +{ + CLS_LOG(20, "write_entry() instance=%s idx=%s flags=%d", escape_str(instance_entry.key.instance).c_str(), instance_idx.c_str(), instance_entry.flags); + /* write the instance entry */ + int ret = write_entry(hctx, instance_entry, instance_idx); + if (ret < 0) { + CLS_LOG(0, "ERROR: write_entry() instance_key=%s ret=%d", escape_str(instance_idx).c_str(), ret); + return ret; + } + return 0; +} + +/* + * write object instance entry, and if needed also the list entry + */ +static int write_obj_entries(cls_method_context_t hctx, rgw_bucket_dir_entry& instance_entry, const string& instance_idx) +{ + int ret = write_obj_instance_entry(hctx, instance_entry, instance_idx); + if (ret < 0) { + return ret; + } + string instance_list_idx; + get_list_index_key(instance_entry, &instance_list_idx); + + if (instance_idx != instance_list_idx) { + CLS_LOG(20, "write_entry() idx=%s flags=%d", escape_str(instance_list_idx).c_str(), instance_entry.flags); + /* write a new list entry for the object instance */ + ret = write_entry(hctx, instance_entry, instance_list_idx); + if (ret < 0) { + CLS_LOG(0, "ERROR: write_entry() instance=%s instance_list_idx=%s ret=%d", instance_entry.key.instance.c_str(), instance_list_idx.c_str(), ret); + return ret; + } + } + return 0; +} + + +class BIVerObjEntry { + cls_method_context_t hctx; + cls_rgw_obj_key key; + string instance_idx; + + rgw_bucket_dir_entry instance_entry; + + bool initialized; + +public: + BIVerObjEntry(cls_method_context_t& _hctx, const cls_rgw_obj_key& _key) : hctx(_hctx), key(_key), initialized(false) { + // empty + } + + int init(bool check_delete_marker = true) { + int ret = read_key_entry(hctx, key, &instance_idx, &instance_entry, + check_delete_marker && key.instance.empty()); /* this is potentially a delete marker, for null objects we + keep separate instance entry for the delete markers */ + + if (ret < 0) { + CLS_LOG(0, "ERROR: read_key_entry() idx=%s ret=%d", instance_idx.c_str(), ret); + return ret; + } + initialized = true; + CLS_LOG(20, "read instance_entry key.name=%s key.instance=%s flags=%d", instance_entry.key.name.c_str(), instance_entry.key.instance.c_str(), instance_entry.flags); + return 0; + } + + rgw_bucket_dir_entry& get_dir_entry() { + return instance_entry; + } + + void init_as_delete_marker(rgw_bucket_dir_entry_meta& meta) { + /* a deletion marker, need to initialize it, there's no instance entry for it yet */ + instance_entry.key = key; + instance_entry.flags = rgw_bucket_dir_entry::FLAG_DELETE_MARKER; + instance_entry.meta = meta; + instance_entry.tag = "delete-marker"; + + initialized = true; + } + + void set_epoch(uint64_t epoch) { + instance_entry.versioned_epoch = epoch; + } + + int unlink_list_entry() { + string list_idx; + /* this instance has a previous list entry, remove that entry */ + get_list_index_key(instance_entry, &list_idx); + CLS_LOG(20, "unlink_list_entry() list_idx=%s", escape_str(list_idx).c_str()); + int ret = cls_cxx_map_remove_key(hctx, list_idx); + if (ret < 0) { + CLS_LOG(0, "ERROR: cls_cxx_map_remove_key() list_idx=%s ret=%d", list_idx.c_str(), ret); + return ret; + } + return 0; + } + + int unlink() { + /* remove the instance entry */ + CLS_LOG(20, "unlink() idx=%s", escape_str(instance_idx).c_str()); + int ret = cls_cxx_map_remove_key(hctx, instance_idx); + if (ret < 0) { + CLS_LOG(0, "ERROR: cls_cxx_map_remove_key() instance_idx=%s ret=%d", instance_idx.c_str(), ret); + return ret; + } + return 0; + } + + int write_entries(uint64_t flags_set, uint64_t flags_reset) { + if (!initialized) { + int ret = init(); + if (ret < 0) { + return ret; + } + } + instance_entry.flags &= ~flags_reset; + instance_entry.flags |= flags_set; + + /* write the instance and list entries */ + bool special_delete_marker_key = (instance_entry.is_delete_marker() && instance_entry.key.instance.empty()); + encode_obj_versioned_data_key(key, &instance_idx, special_delete_marker_key); + int ret = write_obj_entries(hctx, instance_entry, instance_idx); + if (ret < 0) { + CLS_LOG(0, "ERROR: write_obj_entries() instance_idx=%s ret=%d", instance_idx.c_str(), ret); + return ret; + } + + return 0; + } + + int write(uint64_t epoch, bool current) { + if (instance_entry.versioned_epoch > 0) { + CLS_LOG(20, "%s: instance_entry.versioned_epoch=%d epoch=%d", __func__, (int)instance_entry.versioned_epoch, (int)epoch); + /* this instance has a previous list entry, remove that entry */ + int ret = unlink_list_entry(); + if (ret < 0) { + return ret; + } + } + + uint64_t flags = rgw_bucket_dir_entry::FLAG_VER; + if (current) { + flags |= rgw_bucket_dir_entry::FLAG_CURRENT; + } + + instance_entry.versioned_epoch = epoch; + return write_entries(flags, 0); + } + + int demote_current() { + return write_entries(0, rgw_bucket_dir_entry::FLAG_CURRENT); + } + + bool is_delete_marker() { + return instance_entry.is_delete_marker(); + } + + int find_next_key(cls_rgw_obj_key *next_key, bool *found) { + string list_idx; + /* this instance has a previous list entry, remove that entry */ + get_list_index_key(instance_entry, &list_idx); + /* this is the current head, need to update! */ + map<string, bufferlist> keys; + bool more; + string filter = key.name; /* list key starts with key name, filter it to avoid a case where we cross to + different namespace */ + int ret = cls_cxx_map_get_vals(hctx, list_idx, filter, 1, &keys, &more); + if (ret < 0) { + return ret; + } + + if (keys.size() < 1) { + *found = false; + return 0; + } + + rgw_bucket_dir_entry next_entry; + + auto last = keys.rbegin(); + try { + auto iter = last->second.cbegin(); + decode(next_entry, iter); + } catch (ceph::buffer::error& err) { + CLS_LOG(0, "ERROR; failed to decode entry: %s", last->first.c_str()); + return -EIO; + } + + *found = (key.name == next_entry.key.name); + if (*found) { + *next_key = next_entry.key; + } + + return 0; + } + + real_time mtime() { + return instance_entry.meta.mtime; + } +}; // class BIVerObjEntry + + +class BIOLHEntry { + cls_method_context_t hctx; + cls_rgw_obj_key key; + + string olh_data_idx; + rgw_bucket_olh_entry olh_data_entry; + + bool initialized; +public: + BIOLHEntry(cls_method_context_t& _hctx, const cls_rgw_obj_key& _key) : hctx(_hctx), key(_key), initialized(false) { } + + int init(bool *exists) { + /* read olh */ + int ret = read_olh(hctx, key, &olh_data_entry, &olh_data_idx, exists); + if (ret < 0) { + return ret; + } + + initialized = true; + return 0; + } + + bool start_modify(uint64_t candidate_epoch) { + if (candidate_epoch) { + if (candidate_epoch < olh_data_entry.epoch) { + return false; /* olh cannot be modified, old epoch */ + } + olh_data_entry.epoch = candidate_epoch; + } else { + if (olh_data_entry.epoch == 0) { + olh_data_entry.epoch = 2; /* versioned epoch should start with 2, 1 is reserved to converted plain entries */ + } else { + olh_data_entry.epoch++; + } + } + return true; + } + + uint64_t get_epoch() { + return olh_data_entry.epoch; + } + + rgw_bucket_olh_entry& get_entry() { + return olh_data_entry; + } + + void update(cls_rgw_obj_key& key, bool delete_marker) { + olh_data_entry.delete_marker = delete_marker; + olh_data_entry.key = key; + } + + int write() { + /* write the olh data entry */ + int ret = write_entry(hctx, olh_data_entry, olh_data_idx); + if (ret < 0) { + CLS_LOG(0, "ERROR: write_entry() olh_key=%s ret=%d", olh_data_idx.c_str(), ret); + return ret; + } + + return 0; + } + + void update_log(OLHLogOp op, const string& op_tag, cls_rgw_obj_key& key, bool delete_marker, uint64_t epoch = 0) { + if (epoch == 0) { + epoch = olh_data_entry.epoch; + } + update_olh_log(olh_data_entry, op, op_tag, key, delete_marker, epoch); + } + + bool exists() { return olh_data_entry.exists; } + + void set_exists(bool exists) { + olh_data_entry.exists = exists; + } + + bool pending_removal() { return olh_data_entry.pending_removal; } + + void set_pending_removal(bool pending_removal) { + olh_data_entry.pending_removal = pending_removal; + } + + const string& get_tag() { return olh_data_entry.tag; } + void set_tag(const string& tag) { + olh_data_entry.tag = tag; + } +}; + +static int write_version_marker(cls_method_context_t hctx, cls_rgw_obj_key& key) +{ + rgw_bucket_dir_entry entry; + entry.key = key; + entry.flags = rgw_bucket_dir_entry::FLAG_VER_MARKER; + int ret = write_entry(hctx, entry, key.name); + if (ret < 0) { + CLS_LOG(0, "ERROR: write_entry returned ret=%d", ret); + return ret; + } + return 0; +} + +/* + * plain entries are the ones who were created when bucket was not + * versioned, if we override these objects, we need to convert these + * to versioned entries -- ones that have both data entry, and listing + * key. Their version is going to be empty though + */ +static int convert_plain_entry_to_versioned(cls_method_context_t hctx, + cls_rgw_obj_key& key, + bool demote_current, + bool instance_only) +{ + if (!key.instance.empty()) { + return -EINVAL; + } + + rgw_bucket_dir_entry entry; + + string orig_idx; + int ret = read_key_entry(hctx, key, &orig_idx, &entry); + if (ret != -ENOENT) { + if (ret < 0) { + CLS_LOG(0, "ERROR: read_key_entry() returned ret=%d", ret); + return ret; + } + + entry.versioned_epoch = 1; /* converted entries are always 1 */ + entry.flags |= rgw_bucket_dir_entry::FLAG_VER; + + if (demote_current) { + entry.flags &= ~rgw_bucket_dir_entry::FLAG_CURRENT; + } + + string new_idx; + encode_obj_versioned_data_key(key, &new_idx); + + if (instance_only) { + ret = write_obj_instance_entry(hctx, entry, new_idx); + } else { + ret = write_obj_entries(hctx, entry, new_idx); + } + if (ret < 0) { + CLS_LOG(0, "ERROR: write_obj_entries new_idx=%s returned %d", + new_idx.c_str(), ret); + return ret; + } + } + + ret = write_version_marker(hctx, key); + if (ret < 0) { + return ret; + } + + return 0; +} + +/* + * Link an object version to an olh, update the relevant index + * entries. It will also handle the deletion marker case. We have a + * few entries that we need to take care of. For object 'foo', + * instance BAR, we'd update the following (not actual encoding): + * + * - olh data: [BI_BUCKET_OLH_DATA_INDEX]foo + * - object instance data: [BI_BUCKET_OBJ_INSTANCE_INDEX]foo,BAR + * - object instance list entry: foo,123,BAR + * + * The instance list entry needs to be ordered by newer to older, so + * we generate an appropriate number string that follows the name. + * The top instance for each object is marked appropriately. We + * generate instance entry for deletion markers here, as they are not + * created prior. + */ +static int rgw_bucket_link_olh(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + CLS_LOG(10, "entered %s", __func__); + string olh_data_idx; + string instance_idx; + + // decode request + rgw_cls_link_olh_op op; + auto iter = in->cbegin(); + try { + decode(op, iter); + } catch (ceph::buffer::error& err) { + CLS_LOG(0, "ERROR: rgw_bucket_link_olh_op(): failed to decode request\n"); + return -EINVAL; + } + + /* read instance entry */ + BIVerObjEntry obj(hctx, op.key); + int ret = obj.init(op.delete_marker); + + /* NOTE: When a delete is issued, a key instance is always provided, + * either the one for which the delete is requested or a new random + * one when no instance is specified. So we need to see which of + * these two cases we're dealing with. The variable `existed` will + * be true if the instance was specified and false if it was + * randomly generated. It might have been cleaner if the instance + * were empty and randomly generated here and returned in the reply, + * as that would better allow a typo in the instance id. This code + * should be audited and possibly cleaned up. */ + + bool existed = (ret == 0); + if (ret == -ENOENT && op.delete_marker) { + ret = 0; + } + if (ret < 0) { + return ret; + } + + BIOLHEntry olh(hctx, op.key); + bool olh_read_attempt = false; + bool olh_found = false; + if (!existed && op.delete_marker) { + /* read olh */ + ret = olh.init(&olh_found); + if (ret < 0) { + return ret; + } + olh_read_attempt = true; + + // if we're deleting (i.e., adding a delete marker, and the OLH + // indicates it already refers to a delete marker, error out) + if (olh_found && olh.get_entry().delete_marker) { + CLS_LOG(10, + "%s: delete marker received for \"%s\" although OLH" + " already refers to a delete marker", + __func__, escape_str(op.key.to_string()).c_str()); + return -ENOENT; + } + } + + if (existed && !real_clock::is_zero(op.unmod_since)) { + timespec mtime = ceph::real_clock::to_timespec(obj.mtime()); + timespec unmod = ceph::real_clock::to_timespec(op.unmod_since); + if (!op.high_precision_time) { + mtime.tv_nsec = 0; + unmod.tv_nsec = 0; + } + if (mtime >= unmod) { + return 0; /* no need tof set error, we just return 0 and avoid + * writing to the bi log */ + } + } + + bool removing; + + /* + * Special handling for null instance object / delete-marker. For + * these objects we're going to have separate instances for a data + * object vs. delete-marker to avoid collisions. We now check if we + * got to overwrite a previous entry, and in that case we'll remove + * its list entry. + */ + if (op.key.instance.empty()) { + BIVerObjEntry other_obj(hctx, op.key); + ret = other_obj.init(!op.delete_marker); /* try reading the other + * null versioned + * entry */ + existed = (ret >= 0 && !other_obj.is_delete_marker()); + if (ret >= 0 && other_obj.is_delete_marker() != op.delete_marker) { + ret = other_obj.unlink_list_entry(); + if (ret < 0) { + return ret; + } + } + + removing = existed && op.delete_marker; + if (!removing) { + ret = other_obj.unlink(); + if (ret < 0) { + return ret; + } + } + } else { + removing = (existed && !obj.is_delete_marker() && op.delete_marker); + } + + if (op.delete_marker) { + /* a deletion marker, need to initialize entry as such */ + obj.init_as_delete_marker(op.meta); + } + + /* read olh */ + if (!olh_read_attempt) { // only read if we didn't attempt earlier + ret = olh.init(&olh_found); + if (ret < 0) { + return ret; + } + olh_read_attempt = true; + } + + const uint64_t prev_epoch = olh.get_epoch(); + + if (!olh.start_modify(op.olh_epoch)) { + ret = obj.write(op.olh_epoch, false); + if (ret < 0) { + return ret; + } + if (removing) { + olh.update_log(CLS_RGW_OLH_OP_REMOVE_INSTANCE, op.op_tag, op.key, false, op.olh_epoch); + } + return 0; + } + + // promote this version to current if it's a newer epoch, or if it matches the + // current epoch and sorts after the current instance + const bool promote = (olh.get_epoch() > prev_epoch) || + (olh.get_epoch() == prev_epoch && + olh.get_entry().key.instance >= op.key.instance); + + if (olh_found) { + const string& olh_tag = olh.get_tag(); + if (op.olh_tag != olh_tag) { + if (!olh.pending_removal()) { + CLS_LOG(5, "NOTICE: op.olh_tag (%s) != olh.tag (%s)", op.olh_tag.c_str(), olh_tag.c_str()); + return -ECANCELED; + } + /* if pending removal, this is a new olh instance */ + olh.set_tag(op.olh_tag); + } + if (promote && olh.exists()) { + rgw_bucket_olh_entry& olh_entry = olh.get_entry(); + /* found olh, previous instance is no longer the latest, need to update */ + if (!(olh_entry.key == op.key)) { + BIVerObjEntry old_obj(hctx, olh_entry.key); + + ret = old_obj.demote_current(); + if (ret < 0) { + CLS_LOG(0, "ERROR: could not demote current on previous key ret=%d", ret); + return ret; + } + } + } + olh.set_pending_removal(false); + } else { + bool instance_only = (op.key.instance.empty() && op.delete_marker); + cls_rgw_obj_key key(op.key.name); + ret = convert_plain_entry_to_versioned(hctx, key, promote, instance_only); + if (ret < 0) { + CLS_LOG(0, "ERROR: convert_plain_entry_to_versioned ret=%d", ret); + return ret; + } + olh.set_tag(op.olh_tag); + if (op.key.instance.empty()){ + obj.set_epoch(1); + } + } + + /* update the olh log */ + olh.update_log(CLS_RGW_OLH_OP_LINK_OLH, op.op_tag, op.key, op.delete_marker); + if (removing) { + olh.update_log(CLS_RGW_OLH_OP_REMOVE_INSTANCE, op.op_tag, op.key, false); + } + + if (promote) { + olh.update(op.key, op.delete_marker); + } + olh.set_exists(true); + + ret = olh.write(); + if (ret < 0) { + CLS_LOG(0, "ERROR: failed to update olh ret=%d", ret); + return ret; + } + + /* write the instance and list entries */ + ret = obj.write(olh.get_epoch(), promote); + if (ret < 0) { + return ret; + } + + if (!op.log_op) { + return 0; + } + + rgw_bucket_dir_header header; + ret = read_bucket_header(hctx, &header); + if (ret < 0) { + CLS_LOG(1, "ERROR: rgw_bucket_link_olh(): failed to read header\n"); + return ret; + } + if (header.syncstopped) { + return 0; + } + + rgw_bucket_dir_entry& entry = obj.get_dir_entry(); + + rgw_bucket_entry_ver ver; + ver.epoch = (op.olh_epoch ? op.olh_epoch : olh.get_epoch()); + + string *powner = NULL; + string *powner_display_name = NULL; + + if (op.delete_marker) { + powner = &entry.meta.owner; + powner_display_name = &entry.meta.owner_display_name; + } + + RGWModifyOp operation = (op.delete_marker ? CLS_RGW_OP_LINK_OLH_DM : CLS_RGW_OP_LINK_OLH); + ret = log_index_operation(hctx, op.key, operation, op.op_tag, + entry.meta.mtime, ver, + CLS_RGW_STATE_COMPLETE, header.ver, header.max_marker, op.bilog_flags | RGW_BILOG_FLAG_VERSIONED_OP, + powner, powner_display_name, &op.zones_trace); + if (ret < 0) + return ret; + + return write_bucket_header(hctx, &header); /* updates header version */ +} + +static int rgw_bucket_unlink_instance(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + CLS_LOG(10, "entered %s", __func__); + string olh_data_idx; + string instance_idx; + + // decode request + rgw_cls_unlink_instance_op op; + auto iter = in->cbegin(); + try { + decode(op, iter); + } catch (ceph::buffer::error& err) { + CLS_LOG(0, "ERROR: rgw_bucket_rm_obj_instance_op(): failed to decode request\n"); + return -EINVAL; + } + + cls_rgw_obj_key dest_key = op.key; + if (dest_key.instance == "null") { + dest_key.instance.clear(); + } + + BIVerObjEntry obj(hctx, dest_key); + BIOLHEntry olh(hctx, dest_key); + + int ret = obj.init(); + if (ret == -ENOENT) { + return 0; /* already removed */ + } + if (ret < 0) { + CLS_LOG(0, "ERROR: obj.init() returned ret=%d", ret); + return ret; + } + + bool olh_found; + ret = olh.init(&olh_found); + if (ret < 0) { + CLS_LOG(0, "ERROR: olh.init() returned ret=%d", ret); + return ret; + } + + if (!olh_found) { + bool instance_only = false; + cls_rgw_obj_key key(dest_key.name); + ret = convert_plain_entry_to_versioned(hctx, key, true, instance_only); + if (ret < 0) { + CLS_LOG(0, "ERROR: convert_plain_entry_to_versioned ret=%d", ret); + return ret; + } + olh.update(dest_key, false); + olh.set_tag(op.olh_tag); + + obj.set_epoch(1); + } + + if (!olh.start_modify(op.olh_epoch)) { + ret = obj.unlink_list_entry(); + if (ret < 0) { + return ret; + } + + if (obj.is_delete_marker()) { + return 0; + } + + olh.update_log(CLS_RGW_OLH_OP_REMOVE_INSTANCE, op.op_tag, op.key, false, op.olh_epoch); + return olh.write(); + } + + rgw_bucket_olh_entry& olh_entry = olh.get_entry(); + cls_rgw_obj_key& olh_key = olh_entry.key; + CLS_LOG(20, "%s: updating olh log: existing olh entry: %s[%s] (delete_marker=%d)", __func__, + olh_key.name.c_str(), olh_key.instance.c_str(), olh_entry.delete_marker); + + if (olh_key == dest_key) { + /* this is the current head, need to update! */ + cls_rgw_obj_key next_key; + bool found = false; + ret = obj.find_next_key(&next_key, &found); + if (ret < 0) { + CLS_LOG(0, "ERROR: obj.find_next_key() returned ret=%d", ret); + return ret; + } + + if (found) { + BIVerObjEntry next(hctx, next_key); + ret = next.write(olh.get_epoch(), true); + if (ret < 0) { + CLS_LOG(0, "ERROR: next.write() returned ret=%d", ret); + return ret; + } + + CLS_LOG(20, "%s: updating olh log: link olh -> %s[%s] (is_delete=%d)", __func__, + next_key.name.c_str(), next_key.instance.c_str(), (int)next.is_delete_marker()); + + olh.update(next_key, next.is_delete_marker()); + olh.update_log(CLS_RGW_OLH_OP_LINK_OLH, op.op_tag, next_key, next.is_delete_marker()); + } else { + // next_key is empty, but we need to preserve its name in case this entry + // gets resharded, because this key is used for hash placement + next_key.name = dest_key.name; + olh.update(next_key, false); + olh.update_log(CLS_RGW_OLH_OP_UNLINK_OLH, op.op_tag, next_key, false); + olh.set_exists(false); + olh.set_pending_removal(true); + } + } + + if (!obj.is_delete_marker()) { + olh.update_log(CLS_RGW_OLH_OP_REMOVE_INSTANCE, op.op_tag, op.key, false); + } else { + /* this is a delete marker, it's our responsibility to remove its + * instance entry */ + ret = obj.unlink(); + if (ret < 0) { + return ret; + } + } + + ret = obj.unlink_list_entry(); + if (ret < 0) { + return ret; + } + + ret = olh.write(); + if (ret < 0) { + return ret; + } + + if (!op.log_op) { + return 0; + } + + rgw_bucket_dir_header header; + ret = read_bucket_header(hctx, &header); + if (ret < 0) { + CLS_LOG(1, "ERROR: rgw_bucket_unlink_instance(): failed to read header\n"); + return ret; + } + if (header.syncstopped) { + return 0; + } + + rgw_bucket_entry_ver ver; + ver.epoch = (op.olh_epoch ? op.olh_epoch : olh.get_epoch()); + + real_time mtime = obj.mtime(); /* mtime has no real meaning in + * instance removal context */ + ret = log_index_operation(hctx, op.key, CLS_RGW_OP_UNLINK_INSTANCE, op.op_tag, + mtime, ver, + CLS_RGW_STATE_COMPLETE, header.ver, header.max_marker, + op.bilog_flags | RGW_BILOG_FLAG_VERSIONED_OP, NULL, NULL, &op.zones_trace); + if (ret < 0) + return ret; + + return write_bucket_header(hctx, &header); /* updates header version */ +} + +static int rgw_bucket_read_olh_log(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + CLS_LOG(10, "entered %s", __func__); + // decode request + rgw_cls_read_olh_log_op op; + auto iter = in->cbegin(); + try { + decode(op, iter); + } catch (ceph::buffer::error& err) { + CLS_LOG(0, "ERROR: rgw_bucket_read_olh_log(): failed to decode request\n"); + return -EINVAL; + } + + if (!op.olh.instance.empty()) { + CLS_LOG(1, "bad key passed in (non empty instance)"); + return -EINVAL; + } + + rgw_bucket_olh_entry olh_data_entry; + string olh_data_key; + encode_olh_data_key(op.olh, &olh_data_key); + int ret = read_index_entry(hctx, olh_data_key, &olh_data_entry); + if (ret < 0 && ret != -ENOENT) { + CLS_LOG(0, "ERROR: read_index_entry() olh_key=%s ret=%d", olh_data_key.c_str(), ret); + return ret; + } + + if (olh_data_entry.tag != op.olh_tag) { + CLS_LOG(1, "NOTICE: %s: olh_tag_mismatch olh_data_entry.tag=%s op.olh_tag=%s", __func__, olh_data_entry.tag.c_str(), op.olh_tag.c_str()); + return -ECANCELED; + } + + rgw_cls_read_olh_log_ret op_ret; + +#define MAX_OLH_LOG_ENTRIES 1000 + map<uint64_t, vector<rgw_bucket_olh_log_entry> >& log = olh_data_entry.pending_log; + + if (log.begin()->first > op.ver_marker && log.size() <= MAX_OLH_LOG_ENTRIES) { + op_ret.log = log; + op_ret.is_truncated = false; + } else { + auto iter = log.upper_bound(op.ver_marker); + + for (int i = 0; i < MAX_OLH_LOG_ENTRIES && iter != log.end(); ++i, ++iter) { + op_ret.log[iter->first] = iter->second; + } + op_ret.is_truncated = (iter != log.end()); + } + + encode(op_ret, *out); + + return 0; +} + +static int rgw_bucket_trim_olh_log(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + CLS_LOG(10, "entered %s", __func__); + // decode request + rgw_cls_trim_olh_log_op op; + auto iter = in->cbegin(); + try { + decode(op, iter); + } catch (ceph::buffer::error& err) { + CLS_LOG(0, "ERROR: rgw_bucket_trim_olh_log(): failed to decode request\n"); + return -EINVAL; + } + + if (!op.olh.instance.empty()) { + CLS_LOG(1, "bad key passed in (non empty instance)"); + return -EINVAL; + } + + /* read olh entry */ + rgw_bucket_olh_entry olh_data_entry; + string olh_data_key; + encode_olh_data_key(op.olh, &olh_data_key); + int ret = read_index_entry(hctx, olh_data_key, &olh_data_entry); + if (ret < 0 && ret != -ENOENT) { + CLS_LOG(0, "ERROR: read_index_entry() olh_key=%s ret=%d", olh_data_key.c_str(), ret); + return ret; + } + + if (olh_data_entry.tag != op.olh_tag) { + CLS_LOG(1, "NOTICE: %s: olh_tag_mismatch olh_data_entry.tag=%s op.olh_tag=%s", __func__, olh_data_entry.tag.c_str(), op.olh_tag.c_str()); + return -ECANCELED; + } + + /* remove all versions up to and including ver from the pending map */ + auto& log = olh_data_entry.pending_log; + auto liter = log.begin(); + while (liter != log.end() && liter->first <= op.ver) { + auto rm_iter = liter; + ++liter; + log.erase(rm_iter); + } + + /* write the olh data entry */ + ret = write_entry(hctx, olh_data_entry, olh_data_key); + if (ret < 0) { + CLS_LOG(0, "ERROR: write_entry() olh_key=%s ret=%d", olh_data_key.c_str(), ret); + return ret; + } + + return 0; +} + +static int rgw_bucket_clear_olh(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + CLS_LOG(10, "entered %s", __func__); + // decode request + rgw_cls_bucket_clear_olh_op op; + auto iter = in->cbegin(); + try { + decode(op, iter); + } catch (ceph::buffer::error& err) { + CLS_LOG(0, "ERROR: rgw_bucket_clear_olh(): failed to decode request\n"); + return -EINVAL; + } + + if (!op.key.instance.empty()) { + CLS_LOG(1, "bad key passed in (non empty instance)"); + return -EINVAL; + } + + /* read olh entry */ + rgw_bucket_olh_entry olh_data_entry; + string olh_data_key; + encode_olh_data_key(op.key, &olh_data_key); + int ret = read_index_entry(hctx, olh_data_key, &olh_data_entry); + if (ret < 0 && ret != -ENOENT) { + CLS_LOG(0, "ERROR: read_index_entry() olh_key=%s ret=%d", olh_data_key.c_str(), ret); + return ret; + } + + if (olh_data_entry.tag != op.olh_tag) { + CLS_LOG(1, "NOTICE: %s: olh_tag_mismatch olh_data_entry.tag=%s op.olh_tag=%s", __func__, olh_data_entry.tag.c_str(), op.olh_tag.c_str()); + return -ECANCELED; + } + + ret = cls_cxx_map_remove_key(hctx, olh_data_key); + if (ret < 0) { + CLS_LOG(1, "NOTICE: %s: can't remove key %s ret=%d", __func__, olh_data_key.c_str(), ret); + return ret; + } + + rgw_bucket_dir_entry plain_entry; + + /* read plain entry, make sure it's a versioned place holder */ + ret = read_index_entry(hctx, op.key.name, &plain_entry); + if (ret == -ENOENT) { + /* we're done, no entry existing */ + return 0; + } + if (ret < 0) { + CLS_LOG(0, "ERROR: read_index_entry key=%s ret=%d", op.key.name.c_str(), ret); + return ret; + } + + if ((plain_entry.flags & rgw_bucket_dir_entry::FLAG_VER_MARKER) == 0) { + /* it's not a version marker, don't remove it */ + return 0; + } + + ret = cls_cxx_map_remove_key(hctx, op.key.name); + if (ret < 0) { + CLS_LOG(1, "NOTICE: %s: can't remove key %s ret=%d", __func__, op.key.name.c_str(), ret); + return ret; + } + + return 0; +} + +int rgw_dir_suggest_changes(cls_method_context_t hctx, + bufferlist *in, bufferlist *out) +{ + const ConfigProxy& conf = cls_get_config(hctx); + const object_info_t& oi = cls_get_object_info(hctx); + + // bucket index transaction instrumentation + const bool bitx_inst = + conf->rgw_bucket_index_transaction_instrumentation; + + CLS_LOG_BITX(bitx_inst, 10, "ENTERING %s for object oid=%s key=%s", + __func__, oi.soid.oid.name.c_str(), oi.soid.get_key().c_str()); + + bufferlist header_bl; + rgw_bucket_dir_header header; + bool header_changed = false; + + int rc = read_bucket_header(hctx, &header); + if (rc < 0) { + CLS_LOG_BITX(bitx_inst, 1, "ERROR: %s: failed to read header", __func__); + return rc; + } + + const uint64_t config_op_expiration = + conf->rgw_pending_bucket_index_op_expiration; + + // priority order -- 1) bucket header, 2) global config, 3) DEFAULT; + // a value of zero indicates go down the list + timespan tag_timeout( + std::chrono::seconds( + header.tag_timeout ? + header.tag_timeout : + (config_op_expiration ? + config_op_expiration : + CEPH_RGW_DEFAULT_TAG_TIMEOUT))); + CLS_LOG_BITX(bitx_inst, 10, "INFO: %s: tag_timeout=%ld", __func__, tag_timeout.count()); + + auto in_iter = in->cbegin(); + + while (!in_iter.end()) { + __u8 op; + rgw_bucket_dir_entry cur_change; + rgw_bucket_dir_entry cur_disk; + try { + decode(op, in_iter); + decode(cur_change, in_iter); + } catch (ceph::buffer::error& err) { + CLS_LOG_BITX(bitx_inst, 1, + "ERROR: %s: failed to decode request", __func__); + return -EINVAL; + } + + bufferlist cur_disk_bl; + // check if the log op flag is set and strip it from the op + bool log_op = (op & CEPH_RGW_DIR_SUGGEST_LOG_OP) != 0; + op &= CEPH_RGW_DIR_SUGGEST_OP_MASK; + + string cur_change_key; + encode_obj_index_key(cur_change.key, &cur_change_key); + + CLS_LOG_BITX(bitx_inst, 10, + "INFO: %s: op=%c, cur_change_key=%s, cur_change.exists=%d", + __func__, op, escape_str(cur_change_key).c_str(), cur_change.exists); + CLS_LOG_BITX(bitx_inst, 20, + "INFO: %s: setting map entry at key=%s", + __func__, escape_str(cur_change_key).c_str()); + int ret = cls_cxx_map_get_val(hctx, cur_change_key, &cur_disk_bl); + if (ret < 0 && ret != -ENOENT) { + CLS_LOG_BITX(bitx_inst, 20, + "ERROR: %s: accessing map, key=%s error=%d", __func__, + escape_str(cur_change_key).c_str(), ret); + return -EINVAL; + } + + if (ret == -ENOENT) { + CLS_LOG_BITX(bitx_inst, 20, + "WARNING: %s: accessing map, key not found key=%s, continuing", + __func__, escape_str(cur_change_key).c_str()); + continue; + } + + if (cur_disk_bl.length()) { + auto cur_disk_iter = cur_disk_bl.cbegin(); + try { + decode(cur_disk, cur_disk_iter); + } catch (ceph::buffer::error& error) { + CLS_LOG_BITX(bitx_inst, 1, "ERROR: %s: failed to decode cur_disk", + __func__); + return -EINVAL; + } + + // remove any pending entries whose tag timeout has expired. until expiry, + // these pending entries will prevent us from applying suggested changes + real_time cur_time = real_clock::now(); + auto iter = cur_disk.pending_map.begin(); + while (iter != cur_disk.pending_map.end()) { + auto cur_iter = iter++; // IMPORTANT, cur_iter might be invalidated + if (cur_time > (cur_iter->second.timestamp + timespan(tag_timeout))) { + CLS_LOG_BITX(bitx_inst, 0, + "WARNING: %s: expired pending map entry for \"%s\" " + "(pending_state=%d, op=%s) expired and was removed", + __func__, + cur_iter->first.c_str(), + cur_iter->second.state, + modify_op_str(iter->second.op).c_str()); + cur_disk.pending_map.erase(cur_iter); + } + } // while + } // if + + CLS_LOG_BITX(bitx_inst, 20, + "INFO: %s: op=%c cur_disk.pending_map.empty()=%d cur_disk.exists=%d " + "cur_disk.index_ver=%d cur_change.exists=%d cur_change.index_ver=%d", + __func__, op, cur_disk.pending_map.empty(), cur_disk.exists, + (int)cur_disk.index_ver, cur_change.exists, + (int)cur_change.index_ver); + + if (cur_change.index_ver < cur_disk.index_ver) { + // a pending on-disk entry was completed since this suggestion was made, + // don't apply it yet. if the index really is inconsistent, the next + // listing will get the latest version and resend the suggestion + continue; + } + + if (cur_disk.pending_map.empty()) { + CLS_LOG_BITX(bitx_inst, 10, "INFO: %s: cur_disk.pending_map is empty", __func__); + if (cur_disk.exists) { + rgw_bucket_category_stats& old_stats = header.stats[cur_disk.meta.category]; + CLS_LOG_BITX(bitx_inst, 10, "INFO: %s: stats.num_entries: %ld -> %ld", + __func__, old_stats.num_entries, old_stats.num_entries - 1); + old_stats.num_entries--; + old_stats.total_size -= cur_disk.meta.accounted_size; + old_stats.total_size_rounded -= cls_rgw_get_rounded_size(cur_disk.meta.accounted_size); + old_stats.actual_size -= cur_disk.meta.size; + header_changed = true; + } + rgw_bucket_category_stats& stats = header.stats[cur_change.meta.category]; + + switch(op) { + case CEPH_RGW_REMOVE: + CLS_LOG_BITX(bitx_inst, 10, + "INFO: %s: CEPH_RGW_REMOVE name=%s encoded=%s", + __func__, escape_str(cur_change.key.to_string()).c_str(), + escape_str(cur_change_key).c_str()); + + CLS_LOG_BITX(bitx_inst, 20, + "INFO: %s: removing map entry with key=%s", + __func__, escape_str(cur_change_key).c_str()); + ret = cls_cxx_map_remove_key(hctx, cur_change_key); + if (ret < 0) { + CLS_LOG_BITX(bitx_inst, 0, "ERROR: %s: unable to remove key, key=%s, error=%d", + __func__, escape_str(cur_change_key).c_str(), ret); + return ret; + } + if (log_op && cur_disk.exists && !header.syncstopped) { + ret = log_index_operation(hctx, cur_disk.key, CLS_RGW_OP_DEL, cur_disk.tag, cur_disk.meta.mtime, + cur_disk.ver, CLS_RGW_STATE_COMPLETE, header.ver, header.max_marker, 0, NULL, NULL, NULL); + if (ret < 0) { + CLS_LOG_BITX(bitx_inst, 0, "ERROR: %s: failed to log operation ret=%d", + __func__, ret); + return ret; + } + } + break; + case CEPH_RGW_UPDATE: + CLS_LOG_BITX(bitx_inst, 10, + "INFO: %s: CEPH_RGW_UPDATE name=%s stats.num_entries: %ld -> %ld", + __func__, escape_str(cur_change.key.to_string()).c_str(), + stats.num_entries, stats.num_entries + 1); + + stats.num_entries++; + stats.total_size += cur_change.meta.accounted_size; + stats.total_size_rounded += cls_rgw_get_rounded_size(cur_change.meta.accounted_size); + stats.actual_size += cur_change.meta.size; + header_changed = true; + cur_change.index_ver = header.ver; + bufferlist cur_state_bl; + encode(cur_change, cur_state_bl); + + CLS_LOG_BITX(bitx_inst, 20, + "INFO: %s: setting map entry at key=%s", + __func__, escape_str(cur_change.key.to_string()).c_str()); + ret = cls_cxx_map_set_val(hctx, cur_change_key, &cur_state_bl); + if (ret < 0) { + CLS_LOG_BITX(bitx_inst, 0, "ERROR: %s: unable to set value for key, key=%s, error=%d", + __func__, escape_str(cur_change_key).c_str(), ret); + return ret; + } + if (log_op && !header.syncstopped) { + ret = log_index_operation(hctx, cur_change.key, CLS_RGW_OP_ADD, cur_change.tag, cur_change.meta.mtime, + cur_change.ver, CLS_RGW_STATE_COMPLETE, header.ver, header.max_marker, 0, NULL, NULL, NULL); + if (ret < 0) { + CLS_LOG_BITX(bitx_inst, 0, "ERROR: %s: failed to log operation ret=%d", __func__, ret); + return ret; + } + } + break; + } // switch(op) + } // if (cur_disk.pending_map.empty()) + } // while (!in_iter.end()) + + if (header_changed) { + CLS_LOG_BITX(bitx_inst, 10, "INFO: %s: bucket header changed, writing", __func__); + int ret = write_bucket_header(hctx, &header); + if (ret < 0) { + CLS_LOG_BITX(bitx_inst, 0, + "ERROR: %s: failed to write bucket header ret=%d", + __func__, ret); + } else { + CLS_LOG_BITX(bitx_inst, 10, "EXITING %s, returning %d", __func__, ret); + } + return ret; + } + + CLS_LOG_BITX(bitx_inst, 10, "EXITING %s, returning 0", __func__); + return 0; +} // rgw_dir_suggest_changes + +static int rgw_obj_remove(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + CLS_LOG(10, "entered %s", __func__); + // decode request + rgw_cls_obj_remove_op op; + auto iter = in->cbegin(); + try { + decode(op, iter); + } catch (ceph::buffer::error& err) { + CLS_LOG(0, "ERROR: %s: failed to decode request", __func__); + return -EINVAL; + } + + if (op.keep_attr_prefixes.empty()) { + return cls_cxx_remove(hctx); + } + + map<string, bufferlist> attrset; + int ret = cls_cxx_getxattrs(hctx, &attrset); + if (ret < 0 && ret != -ENOENT) { + CLS_LOG(0, "ERROR: %s: cls_cxx_getxattrs() returned %d", __func__, ret); + return ret; + } + + map<string, bufferlist> new_attrs; + for (auto iter = op.keep_attr_prefixes.begin(); + iter != op.keep_attr_prefixes.end(); ++iter) { + auto& check_prefix = *iter; + + for (auto aiter = attrset.lower_bound(check_prefix); + aiter != attrset.end(); ++aiter) { + const string& attr = aiter->first; + + if (attr.substr(0, check_prefix.size()) > check_prefix) { + break; + } + + new_attrs[attr] = aiter->second; + } + } + + CLS_LOG(20, "%s: removing object", __func__); + ret = cls_cxx_remove(hctx); + if (ret < 0) { + CLS_LOG(0, "ERROR: %s: cls_cxx_remove returned %d", __func__, ret); + return ret; + } + + if (new_attrs.empty()) { + /* no data to keep */ + return 0; + } + + ret = cls_cxx_create(hctx, false); + if (ret < 0) { + CLS_LOG(0, "ERROR: %s: cls_cxx_create returned %d", __func__, ret); + return ret; + } + + for (auto aiter = new_attrs.begin(); + aiter != new_attrs.end(); ++aiter) { + const auto& attr = aiter->first; + + ret = cls_cxx_setxattr(hctx, attr.c_str(), &aiter->second); + CLS_LOG(20, "%s: setting attr: %s", __func__, attr.c_str()); + if (ret < 0) { + CLS_LOG(0, "ERROR: %s: cls_cxx_setxattr (attr=%s) returned %d", __func__, attr.c_str(), ret); + return ret; + } + } + + return 0; +} + +static int rgw_obj_store_pg_ver(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + CLS_LOG(10, "entered %s", __func__); + // decode request + rgw_cls_obj_store_pg_ver_op op; + auto iter = in->cbegin(); + try { + decode(op, iter); + } catch (ceph::buffer::error& err) { + CLS_LOG(0, "ERROR: %s: failed to decode request", __func__); + return -EINVAL; + } + + bufferlist bl; + uint64_t ver = cls_current_version(hctx); + encode(ver, bl); + int ret = cls_cxx_setxattr(hctx, op.attr.c_str(), &bl); + if (ret < 0) { + CLS_LOG(0, "ERROR: %s: cls_cxx_setxattr (attr=%s) returned %d", __func__, op.attr.c_str(), ret); + return ret; + } + + return 0; +} + +static int rgw_obj_check_attrs_prefix(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + CLS_LOG(10, "entered %s", __func__); + // decode request + rgw_cls_obj_check_attrs_prefix op; + auto iter = in->cbegin(); + try { + decode(op, iter); + } catch (ceph::buffer::error& err) { + CLS_LOG(0, "ERROR: %s: failed to decode request", __func__); + return -EINVAL; + } + + if (op.check_prefix.empty()) { + return -EINVAL; + } + + map<string, bufferlist> attrset; + int ret = cls_cxx_getxattrs(hctx, &attrset); + if (ret < 0 && ret != -ENOENT) { + CLS_LOG(0, "ERROR: %s: cls_cxx_getxattrs() returned %d", __func__, ret); + return ret; + } + + bool exist = false; + + for (auto aiter = attrset.lower_bound(op.check_prefix); + aiter != attrset.end(); ++aiter) { + const auto& attr = aiter->first; + + if (attr.substr(0, op.check_prefix.size()) > op.check_prefix) { + break; + } + + exist = true; + } + + if (exist == op.fail_if_exist) { + return -ECANCELED; + } + + return 0; +} + +static int rgw_obj_check_mtime(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + CLS_LOG(10, "entered %s", __func__); + // decode request + rgw_cls_obj_check_mtime op; + auto iter = in->cbegin(); + try { + decode(op, iter); + } catch (ceph::buffer::error& err) { + CLS_LOG(0, "ERROR: %s: failed to decode request", __func__); + return -EINVAL; + } + + real_time obj_ut; + int ret = cls_cxx_stat2(hctx, NULL, &obj_ut); + if (ret < 0 && ret != -ENOENT) { + CLS_LOG(0, "ERROR: %s: cls_cxx_stat() returned %d", __func__, ret); + return ret; + } + if (ret == -ENOENT) { + CLS_LOG(10, "object does not exist, skipping check"); + } + + ceph_timespec obj_ts = ceph::real_clock::to_ceph_timespec(obj_ut); + ceph_timespec op_ts = ceph::real_clock::to_ceph_timespec(op.mtime); + + if (!op.high_precision_time) { + obj_ts.tv_nsec = 0; + op_ts.tv_nsec = 0; + } + + CLS_LOG(10, "%s: obj_ut=%lld.%06lld op.mtime=%lld.%06lld", __func__, + (long long)obj_ts.tv_sec, (long long)obj_ts.tv_nsec, + (long long)op_ts.tv_sec, (long long)op_ts.tv_nsec); + + bool check; + + switch (op.type) { + case CLS_RGW_CHECK_TIME_MTIME_EQ: + check = (obj_ts == op_ts); + break; + case CLS_RGW_CHECK_TIME_MTIME_LT: + check = (obj_ts < op_ts); + break; + case CLS_RGW_CHECK_TIME_MTIME_LE: + check = (obj_ts <= op_ts); + break; + case CLS_RGW_CHECK_TIME_MTIME_GT: + check = (obj_ts > op_ts); + break; + case CLS_RGW_CHECK_TIME_MTIME_GE: + check = (obj_ts >= op_ts); + break; + default: + return -EINVAL; + }; + + if (!check) { + return -ECANCELED; + } + + return 0; +} + +static int rgw_bi_get_op(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + CLS_LOG(10, "entered %s", __func__); + // decode request + rgw_cls_bi_get_op op; + auto iter = in->cbegin(); + try { + decode(op, iter); + } catch (ceph::buffer::error& err) { + CLS_LOG(0, "ERROR: %s: failed to decode request", __func__); + return -EINVAL; + } + + string idx; + + switch (op.type) { + case BIIndexType::Plain: + idx = op.key.name; + break; + case BIIndexType::Instance: + encode_obj_index_key(op.key, &idx); + break; + case BIIndexType::OLH: + encode_olh_data_key(op.key, &idx); + break; + default: + CLS_LOG(10, "%s: invalid key type encoding: %d", + __func__, int(op.type)); + return -EINVAL; + } + + rgw_cls_bi_get_ret op_ret; + + rgw_cls_bi_entry& entry = op_ret.entry; + + entry.type = op.type; + entry.idx = idx; + + int r = cls_cxx_map_get_val(hctx, idx, &entry.data); + if (r < 0) { + CLS_LOG(10, "%s: cls_cxx_map_get_val() returned %d", __func__, r); + return r; + } + + encode(op_ret, *out); + + return 0; +} + +static int rgw_bi_put_op(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + CLS_LOG(10, "entered %s", __func__); + // decode request + rgw_cls_bi_put_op op; + auto iter = in->cbegin(); + try { + decode(op, iter); + } catch (ceph::buffer::error& err) { + CLS_LOG(0, "ERROR: %s: failed to decode request", __func__); + return -EINVAL; + } + + rgw_cls_bi_entry& entry = op.entry; + + int r = cls_cxx_map_set_val(hctx, entry.idx, &entry.data); + if (r < 0) { + CLS_LOG(0, "ERROR: %s: cls_cxx_map_set_val() returned r=%d", __func__, r); + } + + return 0; +} + + +/* The plain entries in the bucket index are divided into two regions + * divided by the special entries that begin with 0x80. Those below + * ("Low") are ascii entries. Those above ("High") bring in unicode + * entries. This enum allows either or both regions to be listed in + * list_plain_entries(). It's convenient that "Both" be in between the + * others so we can use "<= Both" or ">= Both" logic. + */ +enum class PlainEntriesRegion { + Low, Both, High +}; + + +/* Queries the omap for plain entries in the range of start_after_key + * to end_key, non-inclusive. Both of those values must either be + * before the "ugly namespace" or after it. + * + * Negative return values indicate errors. Non-negative return values + * indicate number of entries retrieved. */ +static int list_plain_entries_help(cls_method_context_t hctx, + const std::string& name_filter, + const std::string& start_after_key, // exclusive + const std::string& end_key, // exclusive + uint32_t max, + std::list<rgw_cls_bi_entry>* entries, + bool& end_key_reached, + bool& more) +{ + CLS_LOG(10, "Entered %s: name_filter=\"%s\", start_after_key=\"%s\", end_key=\"%s\", max=%d", + __func__, escape_str(name_filter).c_str(), escape_str(start_after_key).c_str(), + escape_str(end_key).c_str(), max); + int count = 0; + std::map<std::string, bufferlist> raw_entries; + int ret = cls_cxx_map_get_vals(hctx, start_after_key, name_filter, max, + &raw_entries, &more); + CLS_LOG(20, "%s: cls_cxx_map_get_vals ret=%d, raw_entries.size()=%lu, more=%d", + __func__, ret, raw_entries.size(), more); + if (ret < 0) { + return ret; + } + + end_key_reached = false; + for (auto iter : raw_entries) { + if (!end_key.empty() && iter.first >= end_key) { + CLS_LOG(20, "%s: end key reached at \"%s\"", + __func__, escape_str(iter.first).c_str()); + end_key_reached = true; + more = false; + return count; + } + + rgw_bucket_dir_entry e; + auto biter = iter.second.cbegin(); + try { + decode(e, biter); + } catch (ceph::buffer::error& err) { + CLS_LOG(0, "ERROR: %s: failed to decode buffer for plain bucket index entry \"%s\"", + __func__, escape_str(iter.first).c_str()); + return -EIO; + } + + if (!name_filter.empty() && e.key.name > name_filter) { + CLS_LOG(20, "%s: due to filter \"%s\", skipping entry.idx=\"%s\" e.key.name=\"%s\"", + __func__, + escape_str(name_filter).c_str(), + escape_str(iter.first).c_str(), + escape_str(e.key.name).c_str()); + // skip the rest of the entries + more = false; + end_key_reached = true; + return count; + } + + rgw_cls_bi_entry entry; + entry.type = BIIndexType::Plain; + entry.idx = iter.first; + entry.data = iter.second; + + entries->push_back(entry); + count++; + + CLS_LOG(20, "%s: adding entry %d entry.idx=\"%s\" e.key.name=\"%s\"", + __func__, + count, + escape_str(entry.idx).c_str(), + escape_str(e.key.name).c_str()); + + if (count >= int(max)) { + // NB: this looks redundant, but leave in for time being + return count; + } + } // iter for loop + + return count; +} // list_plain_entries_help + +/* + * Lists plain entries in either or both regions, the region of those + * beginning with an ASCII character or a non-ASCII character, which + * surround the "ugly" namespace used by special entries for versioned + * buckets. + * + * The entries parameter is not cleared and additional entries are + * appended to it. + */ +static int list_plain_entries(cls_method_context_t hctx, + const std::string& name_filter, + const std::string& marker, + uint32_t max, + std::list<rgw_cls_bi_entry>* entries, + bool* pmore, + const PlainEntriesRegion region = PlainEntriesRegion::Both) +{ + CLS_LOG(10, "entered %s: name_filter=\"%s\", marker=\"%s\", max=%d, region=%d", + __func__, escape_str(name_filter).c_str(), escape_str(marker).c_str(), max, static_cast<int>(region)); + int r = 0; + bool end_key_reached = false; + bool more = false; + const size_t start_size = entries->size(); + + if (region <= PlainEntriesRegion::Both && marker < BI_PREFIX_BEGIN) { + // listing ascii plain namespace + int r = list_plain_entries_help(hctx, name_filter, marker, BI_PREFIX_BEGIN, max, + entries, end_key_reached, more); + CLS_LOG(20, "%s: first list_plain_entries_help r=%d, end_key_reached=%d, more=%d", + __func__, r, end_key_reached, more); + if (r < 0) { + return r; + } + + // see if we're done for this call (there may be more for a later call) + if (r >= int(max) || !end_key_reached || (!more && region == PlainEntriesRegion::Low)) { + if (pmore) { + *pmore = more; + } + + return int(entries->size() - start_size); + } + + max = max - r; + } + + if (region >= PlainEntriesRegion::Both) { + const std::string start_after_key = std::max(marker, BI_PREFIX_END); + + // listing non-ascii plain namespace + r = list_plain_entries_help(hctx, name_filter, start_after_key, {}, max, + entries, end_key_reached, more); + CLS_LOG(20, "%s: second list_plain_entries_help r=%d, end_key_reached=%d, more=%d", + __func__, r, end_key_reached, more); + if (r < 0) { + return r; + } + } + + if (pmore) { + *pmore = more; + } + + return int(entries->size() - start_size); +} + +static int list_instance_entries(cls_method_context_t hctx, + const string& name, + const string& marker, + uint32_t max, + list<rgw_cls_bi_entry> *entries, + bool *pmore) +{ + cls_rgw_obj_key key(name); + string first_instance_idx; + encode_obj_versioned_data_key(key, &first_instance_idx); + string start_after_key; + + if (!name.empty()) { + start_after_key = first_instance_idx; + } else { + start_after_key = BI_PREFIX_CHAR; + start_after_key.append(bucket_index_prefixes[BI_BUCKET_OBJ_INSTANCE_INDEX]); + } + string filter = start_after_key; + if (bi_entry_gt(marker, start_after_key)) { + start_after_key = marker; + } + int count = 0; + map<string, bufferlist> keys; + bufferlist k; + int ret = cls_cxx_map_get_val(hctx, start_after_key, &k); + if (ret < 0 && ret != -ENOENT) { + return ret; + } + // we need to include the exact match if a filter (name) is + // specified and the marker has not yet advanced (i.e., been set) + bool found_first = (ret == 0) && (start_after_key != marker); + if (found_first) { + --max; + } + if (max > 0) { + ret = cls_cxx_map_get_vals(hctx, start_after_key, string(), max, + &keys, pmore); + CLS_LOG(20, "%s: start_after_key=\"%s\" first_instance_idx=\"%s\" keys.size()=%d", + __func__, escape_str(start_after_key).c_str(), + escape_str(first_instance_idx).c_str(), (int)keys.size()); + if (ret < 0) { + return ret; + } + } + if (found_first) { + keys[start_after_key] = std::move(k); + } + + for (auto iter = keys.begin(); iter != keys.end(); ++iter) { + rgw_cls_bi_entry entry; + entry.type = BIIndexType::Instance; + entry.idx = iter->first; + entry.data = iter->second; + + if (!filter.empty() && entry.idx.compare(0, filter.size(), filter) != 0) { + /* we are skipping the rest of the entries */ + if (pmore) { + *pmore = false; + } + return count; + } + + CLS_LOG(20, "%s: entry.idx=\"%s\"", __func__, escape_str(entry.idx).c_str()); + + auto biter = entry.data.cbegin(); + + rgw_bucket_dir_entry e; + try { + decode(e, biter); + } catch (ceph::buffer::error& err) { + CLS_LOG(0, "ERROR: %s: failed to decode buffer (size=%d)", __func__, entry.data.length()); + return -EIO; + } + + if (!name.empty() && e.key.name != name) { + /* we are skipping the rest of the entries */ + if (pmore) { + *pmore = false; + } + return count; + } + + entries->push_back(entry); + count++; + start_after_key = entry.idx; + } + + return count; +} + +static int list_olh_entries(cls_method_context_t hctx, + const string& name, + const string& marker, + uint32_t max, + list<rgw_cls_bi_entry> *entries, + bool *pmore) +{ + cls_rgw_obj_key key(name); + string first_instance_idx; + encode_olh_data_key(key, &first_instance_idx); + string start_after_key; + + if (!name.empty()) { + start_after_key = first_instance_idx; + } else { + start_after_key = BI_PREFIX_CHAR; + start_after_key.append(bucket_index_prefixes[BI_BUCKET_OLH_DATA_INDEX]); + } + string filter = start_after_key; + if (bi_entry_gt(marker, start_after_key)) { + start_after_key = marker; + } + int count = 0; + map<string, bufferlist> keys; + int ret; + bufferlist k; + ret = cls_cxx_map_get_val(hctx, start_after_key, &k); + if (ret < 0 && ret != -ENOENT) { + return ret; + } + // we need to include the exact match if a filter (name) is + // specified and the marker has not yet advanced (i.e., been set) + bool found_first = (ret == 0) && (start_after_key != marker); + if (found_first) { + --max; + } + if (max > 0) { + ret = cls_cxx_map_get_vals(hctx, start_after_key, string(), max, + &keys, pmore); + CLS_LOG(20, "%s: start_after_key=\"%s\", first_instance_idx=\"%s\", keys.size()=%d", + __func__, escape_str(start_after_key).c_str(), + escape_str(first_instance_idx).c_str(), (int)keys.size()); + if (ret < 0) { + return ret; + } + } + + if (found_first) { + keys[start_after_key] = std::move(k); + } + + for (auto iter = keys.begin(); iter != keys.end(); ++iter) { + rgw_cls_bi_entry entry; + entry.type = BIIndexType::OLH; + entry.idx = iter->first; + entry.data = iter->second; + + if (!filter.empty() && entry.idx.compare(0, filter.size(), filter) != 0) { + /* we are skipping the rest of the entries */ + if (pmore) { + *pmore = false; + } + return count; + } + + CLS_LOG(20, "%s: entry.idx=\"%s\"", __func__, escape_str(entry.idx).c_str()); + + auto biter = entry.data.cbegin(); + + rgw_bucket_olh_entry e; + try { + decode(e, biter); + } catch (ceph::buffer::error& err) { + CLS_LOG(0, "ERROR: %s: failed to decode buffer (size=%d)", __func__, entry.data.length()); + return -EIO; + } + + if (!name.empty() && e.key.name != name) { + /* we are skipping the rest of the entries */ + if (pmore) { + *pmore = false; + } + return count; + } + + entries->push_back(entry); + count++; + start_after_key = entry.idx; + } + + return count; +} + +static int check_index(cls_method_context_t hctx, + rgw_bucket_dir_header *existing_header, + rgw_bucket_dir_header *calc_header) +{ + int rc = read_bucket_header(hctx, existing_header); + if (rc < 0) { + CLS_LOG(1, "ERROR: check_index(): failed to read header\n"); + return rc; + } + + calc_header->tag_timeout = existing_header->tag_timeout; + calc_header->ver = existing_header->ver; + calc_header->syncstopped = existing_header->syncstopped; + + std::list<rgw_cls_bi_entry> entries; + string start_obj; + string filter_prefix; + +#define CHECK_CHUNK_SIZE 1000 + bool more; + + do { + rc = list_plain_entries(hctx, filter_prefix, start_obj, CHECK_CHUNK_SIZE, &entries, &more); + if (rc < 0) { + return rc; + } + + for (const auto & bientry : entries) { + rgw_bucket_dir_entry entry; + auto diter = bientry.data.cbegin(); + try { + decode(entry, diter); + } catch (ceph::buffer::error& err) { + CLS_LOG(1, "ERROR:check_index(): failed to decode entry, key=%s", bientry.idx.c_str()); + return -EIO; + } + + if (entry.exists && entry.flags == 0) { + rgw_bucket_category_stats& stats = calc_header->stats[entry.meta.category]; + stats.num_entries++; + stats.total_size += entry.meta.accounted_size; + stats.total_size_rounded += cls_rgw_get_rounded_size(entry.meta.accounted_size); + stats.actual_size += entry.meta.size; + } + start_obj = bientry.idx; + } + entries.clear(); + } while (more); + + start_obj = ""; + do { + rc = list_instance_entries(hctx, filter_prefix, start_obj, CHECK_CHUNK_SIZE, &entries, &more); + if (rc < 0) { + return rc; + } + + for (const auto & bientry : entries) { + rgw_bucket_dir_entry entry; + auto diter = bientry.data.cbegin(); + try { + decode(entry, diter); + } catch (ceph::buffer::error& err) { + CLS_LOG(1, "ERROR:check_index(): failed to decode entry, key=%s", bientry.idx.c_str()); + return -EIO; + } + + if (entry.exists) { + rgw_bucket_category_stats& stats = calc_header->stats[entry.meta.category]; + stats.num_entries++; + stats.total_size += entry.meta.accounted_size; + stats.total_size_rounded += cls_rgw_get_rounded_size(entry.meta.accounted_size); + stats.actual_size += entry.meta.size; + } + start_obj = bientry.idx; + } + entries.clear(); + } while (more); + + return 0; +} + +int rgw_bucket_rebuild_index(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + CLS_LOG(10, "entered %s", __func__); + rgw_bucket_dir_header existing_header; + rgw_bucket_dir_header calc_header; + int rc = check_index(hctx, &existing_header, &calc_header); + if (rc < 0) + return rc; + + return write_bucket_header(hctx, &calc_header); +} + + +int rgw_bucket_check_index(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + CLS_LOG(10, "entered %s", __func__); + rgw_cls_check_index_ret ret; + + int rc = check_index(hctx, &ret.existing_header, &ret.calculated_header); + if (rc < 0) + return rc; + + encode(ret, *out); + + return 0; +} + + +/* Lists all the entries that appear in a bucket index listing. + * + * It may not be obvious why this function calls three other "segment" + * functions (list_plain_entries (twice), list_instance_entries, + * list_olh_entries) that each list segments of the index space rather + * than just move a marker through the space from start to end. The + * reason is that a name filter may be provided in the op, and in that + * case most entries will be skipped over, and small segments within + * each larger segment will be listed. + * + * Ideally, each of the three segment functions should be able to + * handle a marker and filter, if either/both is provided, + * efficiently. So, for example, if the marker is after the segment, + * ideally return quickly rather than iterating through entries in the + * segment. + * + * Additionally, each of the three segment functions, if successful, + * is expected to return the number of entries added to the output + * list as a non-negative value. As per usual, negative return values + * indicate error condtions. + */ +static int rgw_bi_list_op(cls_method_context_t hctx, + bufferlist *in, + bufferlist *out) +{ + CLS_LOG(10, "entered %s", __func__); + // decode request + rgw_cls_bi_list_op op; + auto iter = in->cbegin(); + try { + decode(op, iter); + } catch (ceph::buffer::error& err) { + CLS_LOG(0, "ERROR: %s: failed to decode request", __func__); + return -EINVAL; + } + + constexpr uint32_t MAX_BI_LIST_ENTRIES = 1000; + const uint32_t max = std::min(op.max, MAX_BI_LIST_ENTRIES); + + CLS_LOG(20, "%s: op.marker=\"%s\", op.name_filter=\"%s\", op.max=%u max=%u", + __func__, escape_str(op.marker).c_str(), escape_str(op.name_filter).c_str(), + op.max, max); + + int ret; + uint32_t count = 0; + bool more = false; + rgw_cls_bi_list_ret op_ret; + + ret = list_plain_entries(hctx, op.name_filter, op.marker, max, + &op_ret.entries, &more, PlainEntriesRegion::Low); + if (ret < 0) { + CLS_LOG(0, "ERROR: %s: list_plain_entries (low) returned ret=%d, marker=\"%s\", filter=\"%s\", max=%d", + __func__, ret, escape_str(op.marker).c_str(), escape_str(op.name_filter).c_str(), max); + return ret; + } + + count = ret; + CLS_LOG(20, "%s: found %d plain ascii (low) entries, count=%u", __func__, ret, count); + + if (!more) { + ret = list_instance_entries(hctx, op.name_filter, op.marker, max - count, &op_ret.entries, &more); + if (ret < 0) { + CLS_LOG(0, "ERROR: %s: list_instance_entries returned ret=%d", __func__, ret); + return ret; + } + + count += ret; + CLS_LOG(20, "%s: found %d instance entries, count=%u", __func__, ret, count); + } + + if (!more) { + ret = list_olh_entries(hctx, op.name_filter, op.marker, max - count, &op_ret.entries, &more); + if (ret < 0) { + CLS_LOG(0, "ERROR: %s: list_olh_entries returned ret=%d", __func__, ret); + return ret; + } + + count += ret; + CLS_LOG(20, "%s: found %d olh entries, count=%u", __func__, ret, count); + } + + if (!more) { + ret = list_plain_entries(hctx, op.name_filter, op.marker, max - count, + &op_ret.entries, &more, PlainEntriesRegion::High); + if (ret < 0) { + CLS_LOG(0, "ERROR: %s: list_plain_entries (high) returned ret=%d, marker=\"%s\", filter=\"%s\", max=%d", + __func__, ret, escape_str(op.marker).c_str(), escape_str(op.name_filter).c_str(), max); + return ret; + } + + count += ret; + CLS_LOG(20, "%s: found %d non-ascii (high) plain entries, count=%u", __func__, ret, count); + } + + op_ret.is_truncated = (count > max) || more; + while (count > max) { + op_ret.entries.pop_back(); + count--; + } + + CLS_LOG(20, "%s: returning %lu entries, is_truncated=%d", __func__, op_ret.entries.size(), op_ret.is_truncated); + encode(op_ret, *out); + + return 0; +} // rgw_bi_list_op + + +int bi_log_record_decode(bufferlist& bl, rgw_bi_log_entry& e) +{ + auto iter = bl.cbegin(); + try { + decode(e, iter); + } catch (ceph::buffer::error& err) { + CLS_LOG(0, "ERROR: failed to decode rgw_bi_log_entry"); + return -EIO; + } + return 0; +} + + +static int bi_log_iterate_entries(cls_method_context_t hctx, + const string& marker, + const string& end_marker, + string& key_iter, + uint32_t max_entries, + bool *truncated, + int (*cb)(cls_method_context_t, const string&, rgw_bi_log_entry&, void *), + void *param) +{ + CLS_LOG(10, "bi_log_iterate_range"); + + map<string, bufferlist> keys; + string filter_prefix, end_key; + uint32_t i = 0; + string key; + + if (truncated) + *truncated = false; + + string start_after_key; + if (key_iter.empty()) { + key = BI_PREFIX_CHAR; + key.append(bucket_index_prefixes[BI_BUCKET_LOG_INDEX]); + key.append(marker); + + start_after_key = key; + } else { + start_after_key = key_iter; + } + + if (end_marker.empty()) { + end_key = BI_PREFIX_CHAR; + end_key.append(bucket_index_prefixes[BI_BUCKET_LOG_INDEX + 1]); + } else { + end_key = BI_PREFIX_CHAR; + end_key.append(bucket_index_prefixes[BI_BUCKET_LOG_INDEX]); + end_key.append(end_marker); + } + + CLS_LOG(10, "bi_log_iterate_entries start_after_key=%s end_key=%s", + start_after_key.c_str(), end_key.c_str()); + + string filter; + + int ret = cls_cxx_map_get_vals(hctx, start_after_key, filter, max_entries, + &keys, truncated); + if (ret < 0) + return ret; + + auto iter = keys.begin(); + if (iter == keys.end()) + return 0; + + uint32_t num_keys = keys.size(); + + for (; iter != keys.end(); ++iter,++i) { + const string& key = iter->first; + rgw_bi_log_entry e; + + CLS_LOG(10, "bi_log_iterate_entries key=%s bl.length=%d", key.c_str(), (int)iter->second.length()); + + if (key.compare(end_key) > 0) { + key_iter = key; + if (truncated) { + *truncated = false; + } + return 0; + } + + ret = bi_log_record_decode(iter->second, e); + if (ret < 0) + return ret; + + ret = cb(hctx, key, e, param); + if (ret < 0) + return ret; + + if (i == num_keys - 1) { + key_iter = key; + } + } + + return 0; +} + +static int bi_log_list_cb(cls_method_context_t hctx, const string& key, rgw_bi_log_entry& info, void *param) +{ + list<rgw_bi_log_entry> *l = (list<rgw_bi_log_entry> *)param; + l->push_back(info); + return 0; +} + +static int bi_log_list_entries(cls_method_context_t hctx, const string& marker, + uint32_t max, list<rgw_bi_log_entry>& entries, bool *truncated) +{ + string key_iter; + string end_marker; + int ret = bi_log_iterate_entries(hctx, marker, end_marker, + key_iter, max, truncated, + bi_log_list_cb, &entries); + return ret; +} + +static int rgw_bi_log_list(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + CLS_LOG(10, "entered %s", __func__); + auto in_iter = in->cbegin(); + + cls_rgw_bi_log_list_op op; + try { + decode(op, in_iter); + } catch (ceph::buffer::error& err) { + CLS_LOG(1, "ERROR: rgw_bi_log_list(): failed to decode entry\n"); + return -EINVAL; + } + + cls_rgw_bi_log_list_ret op_ret; + int ret = bi_log_list_entries(hctx, op.marker, op.max, op_ret.entries, &op_ret.truncated); + if (ret < 0) + return ret; + + encode(op_ret, *out); + + return 0; +} + +static int rgw_bi_log_trim(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + CLS_LOG(10, "entered %s", __func__); + auto in_iter = in->cbegin(); + + cls_rgw_bi_log_trim_op op; + try { + decode(op, in_iter); + } catch (ceph::buffer::error& err) { + CLS_LOG(1, "ERROR: rgw_bi_log_list(): failed to decode entry\n"); + return -EINVAL; + } + + string key_begin(1, BI_PREFIX_CHAR); + key_begin.append(bucket_index_prefixes[BI_BUCKET_LOG_INDEX]); + key_begin.append(op.start_marker); + + string key_end; + if (op.end_marker.empty()) { + key_end = BI_PREFIX_CHAR; + key_end.append(bucket_index_prefixes[BI_BUCKET_LOG_INDEX + 1]); + } else { + key_end = BI_PREFIX_CHAR; + key_end.append(bucket_index_prefixes[BI_BUCKET_LOG_INDEX]); + key_end.append(op.end_marker); + // cls_cxx_map_remove_range() expects one-past-end + key_end.append(1, '\0'); + } + + // list a single key to detect whether the range is empty + const size_t max_entries = 1; + std::set<std::string> keys; + bool more = false; + + int rc = cls_cxx_map_get_keys(hctx, key_begin, max_entries, &keys, &more); + if (rc < 0) { + CLS_LOG(1, "ERROR: cls_cxx_map_get_keys failed rc=%d", rc); + return rc; + } + + if (keys.empty()) { + CLS_LOG(20, "range is empty key_begin=%s", key_begin.c_str()); + return -ENODATA; + } + + const std::string& first_key = *keys.begin(); + if (key_end < first_key) { + CLS_LOG(20, "listed key %s past key_end=%s", first_key.c_str(), key_end.c_str()); + return -ENODATA; + } + + CLS_LOG(20, "listed key %s, removing through %s", + first_key.c_str(), key_end.c_str()); + + rc = cls_cxx_map_remove_range(hctx, first_key, key_end); + if (rc < 0) { + CLS_LOG(1, "ERROR: cls_cxx_map_remove_range failed rc=%d", rc); + return rc; + } + return 0; +} + +static int rgw_bi_log_resync(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + CLS_LOG(10, "entered %s", __func__); + rgw_bucket_dir_header header; + int rc = read_bucket_header(hctx, &header); + if (rc < 0) { + CLS_LOG(1, "ERROR: rgw_bucket_complete_op(): failed to read header\n"); + return rc; + } + + bufferlist bl; + + rgw_bi_log_entry entry; + + entry.timestamp = real_clock::now(); + entry.op = RGWModifyOp::CLS_RGW_OP_RESYNC; + entry.state = RGWPendingState::CLS_RGW_STATE_COMPLETE; + + string key; + bi_log_index_key(hctx, key, entry.id, header.ver); + + encode(entry, bl); + + if (entry.id > header.max_marker) + header.max_marker = entry.id; + + header.syncstopped = false; + + rc = cls_cxx_map_set_val(hctx, key, &bl); + if (rc < 0) + return rc; + + return write_bucket_header(hctx, &header); +} + +static int rgw_bi_log_stop(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + CLS_LOG(10, "entered %s", __func__); + rgw_bucket_dir_header header; + int rc = read_bucket_header(hctx, &header); + if (rc < 0) { + CLS_LOG(1, "ERROR: rgw_bucket_complete_op(): failed to read header\n"); + return rc; + } + + bufferlist bl; + + rgw_bi_log_entry entry; + + entry.timestamp = real_clock::now(); + entry.op = RGWModifyOp::CLS_RGW_OP_SYNCSTOP; + entry.state = RGWPendingState::CLS_RGW_STATE_COMPLETE; + + string key; + bi_log_index_key(hctx, key, entry.id, header.ver); + + encode(entry, bl); + + if (entry.id > header.max_marker) + header.max_marker = entry.id; + header.syncstopped = true; + + rc = cls_cxx_map_set_val(hctx, key, &bl); + if (rc < 0) + return rc; + + return write_bucket_header(hctx, &header); +} + + +static void usage_record_prefix_by_time(uint64_t epoch, string& key) +{ + char buf[32]; + snprintf(buf, sizeof(buf), "%011llu", (long long unsigned)epoch); + key = buf; +} + +static void usage_record_prefix_by_user(const string& user, uint64_t epoch, string& key) +{ + char buf[user.size() + 32]; + snprintf(buf, sizeof(buf), "%s_%011llu_", user.c_str(), (long long unsigned)epoch); + key = buf; +} + +static void usage_record_name_by_time(uint64_t epoch, const string& user, const string& bucket, string& key) +{ + char buf[32 + user.size() + bucket.size()]; + snprintf(buf, sizeof(buf), "%011llu_%s_%s", (long long unsigned)epoch, user.c_str(), bucket.c_str()); + key = buf; +} + +static void usage_record_name_by_user(const string& user, uint64_t epoch, const string& bucket, string& key) +{ + char buf[32 + user.size() + bucket.size()]; + snprintf(buf, sizeof(buf), "%s_%011llu_%s", user.c_str(), (long long unsigned)epoch, bucket.c_str()); + key = buf; +} + +static int usage_record_decode(bufferlist& record_bl, rgw_usage_log_entry& e) +{ + auto kiter = record_bl.cbegin(); + try { + decode(e, kiter); + } catch (ceph::buffer::error& err) { + CLS_LOG(1, "ERROR: usage_record_decode(): failed to decode record_bl\n"); + return -EINVAL; + } + + return 0; +} + +static int rgw_user_usage_log_add(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + CLS_LOG(10, "entered %s", __func__); + + auto in_iter = in->cbegin(); + rgw_cls_usage_log_add_op op; + + try { + decode(op, in_iter); + } catch (ceph::buffer::error& err) { + CLS_LOG(1, "ERROR: rgw_user_usage_log_add(): failed to decode request\n"); + return -EINVAL; + } + + rgw_usage_log_info& info = op.info; + + for (auto iter = info.entries.begin(); iter != info.entries.end(); ++iter) { + rgw_usage_log_entry& entry = *iter; + string key_by_time; + + rgw_user *puser = (entry.payer.empty() ? &entry.owner : &entry.payer); + + usage_record_name_by_time(entry.epoch, puser->to_str(), entry.bucket, key_by_time); + + CLS_LOG(10, "rgw_user_usage_log_add user=%s bucket=%s", puser->to_str().c_str(), entry.bucket.c_str()); + + bufferlist record_bl; + int ret = cls_cxx_map_get_val(hctx, key_by_time, &record_bl); + if (ret < 0 && ret != -ENOENT) { + CLS_LOG(1, "ERROR: rgw_user_usage_log_add(): cls_cxx_map_read_key returned %d", ret); + return -EINVAL; + } + if (ret >= 0) { + rgw_usage_log_entry e; + ret = usage_record_decode(record_bl, e); + if (ret < 0) + return ret; + CLS_LOG(10, "rgw_user_usage_log_add aggregating existing bucket\n"); + entry.aggregate(e); + } + + bufferlist new_record_bl; + encode(entry, new_record_bl); + ret = cls_cxx_map_set_val(hctx, key_by_time, &new_record_bl); + if (ret < 0) + return ret; + + string key_by_user; + usage_record_name_by_user(puser->to_str(), entry.epoch, entry.bucket, key_by_user); + ret = cls_cxx_map_set_val(hctx, key_by_user, &new_record_bl); + if (ret < 0) + return ret; + } + + return 0; +} + +static int usage_iterate_range(cls_method_context_t hctx, uint64_t start, uint64_t end, const string& user, + const string& bucket, string& key_iter, uint32_t max_entries, bool *truncated, + int (*cb)(cls_method_context_t, const string&, rgw_usage_log_entry&, void *), + void *param) +{ + CLS_LOG(10, "entered %s", __func__); + + map<string, bufferlist> keys; + string filter_prefix; + string start_key, end_key; + bool by_user = !user.empty(); + string user_key; + bool truncated_status = false; + + ceph_assert(truncated != nullptr); + + if (!by_user) { + usage_record_prefix_by_time(end, end_key); + } else { + user_key = user; + user_key.append("_"); + } + + if (key_iter.empty()) { + if (by_user) { + usage_record_prefix_by_user(user, start, start_key); + } else { + usage_record_prefix_by_time(start, start_key); + } + } else { + start_key = key_iter; + } + + CLS_LOG(20, "usage_iterate_range start_key=%s", start_key.c_str()); + int ret = cls_cxx_map_get_vals(hctx, start_key, filter_prefix, max_entries, &keys, &truncated_status); + if (ret < 0) + return ret; + + *truncated = truncated_status; + + auto iter = keys.begin(); + if (iter == keys.end()) + return 0; + + for (; iter != keys.end(); ++iter) { + const string& key = iter->first; + rgw_usage_log_entry e; + + key_iter = key; + if (!by_user && key.compare(end_key) >= 0) { + CLS_LOG(20, "usage_iterate_range reached key=%s, done", key.c_str()); + *truncated = false; + key_iter = key; + return 0; + } + + if (by_user && key.compare(0, user_key.size(), user_key) != 0) { + CLS_LOG(20, "usage_iterate_range reached key=%s, done", key.c_str()); + *truncated = false; + key_iter = key; + return 0; + } + + ret = usage_record_decode(iter->second, e); + if (ret < 0) + return ret; + + if (!bucket.empty() && bucket.compare(e.bucket)) + continue; + + if (e.epoch < start) + continue; + + /* keys are sorted by epoch, so once we're past end we're done */ + if (e.epoch >= end) { + *truncated = false; + return 0; + } + + ret = cb(hctx, key, e, param); + if (ret < 0) + return ret; + } + return 0; +} + +static int usage_log_read_cb(cls_method_context_t hctx, const string& key, rgw_usage_log_entry& entry, void *param) +{ + map<rgw_user_bucket, rgw_usage_log_entry> *usage = (map<rgw_user_bucket, rgw_usage_log_entry> *)param; + rgw_user *puser; + if (!entry.payer.empty()) { + puser = &entry.payer; + } else { + puser = &entry.owner; + } + rgw_user_bucket ub(puser->to_str(), entry.bucket); + rgw_usage_log_entry& le = (*usage)[ub]; + le.aggregate(entry); + + return 0; +} + +int rgw_user_usage_log_read(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + CLS_LOG(10, "entered %s", __func__); + + auto in_iter = in->cbegin(); + rgw_cls_usage_log_read_op op; + + try { + decode(op, in_iter); + } catch (ceph::buffer::error& err) { + CLS_LOG(1, "ERROR: rgw_user_usage_log_read(): failed to decode request\n"); + return -EINVAL; + } + + rgw_cls_usage_log_read_ret ret_info; + map<rgw_user_bucket, rgw_usage_log_entry> *usage = &ret_info.usage; + string iter = op.iter; +#define MAX_ENTRIES 1000 + uint32_t max_entries = (op.max_entries ? op.max_entries : MAX_ENTRIES); + int ret = usage_iterate_range(hctx, op.start_epoch, op.end_epoch, op.owner, op.bucket, iter, max_entries, &ret_info.truncated, usage_log_read_cb, (void *)usage); + if (ret < 0) + return ret; + + if (ret_info.truncated) + ret_info.next_iter = iter; + + encode(ret_info, *out); + return 0; +} + +static int usage_log_trim_cb(cls_method_context_t hctx, const string& key, rgw_usage_log_entry& entry, void *param) +{ + bool *found = (bool *)param; + if (found) { + *found = true; + } + string key_by_time; + string key_by_user; + + string o = entry.owner.to_str(); + usage_record_name_by_time(entry.epoch, o, entry.bucket, key_by_time); + usage_record_name_by_user(o, entry.epoch, entry.bucket, key_by_user); + + int ret = cls_cxx_map_remove_key(hctx, key_by_time); + if (ret < 0) + return ret; + + return cls_cxx_map_remove_key(hctx, key_by_user); +} + +int rgw_user_usage_log_trim(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + CLS_LOG(10, "entered %s", __func__); + + /* only continue if object exists! */ + int ret = cls_cxx_stat(hctx, NULL, NULL); + if (ret < 0) + return ret; + + auto in_iter = in->cbegin(); + rgw_cls_usage_log_trim_op op; + + try { + decode(op, in_iter); + } catch (ceph::buffer::error& err) { + CLS_LOG(1, "ERROR: rgw_user_log_usage_log_trim(): failed to decode request\n"); + return -EINVAL; + } + + string iter; + bool more; + bool found = false; +#define MAX_USAGE_TRIM_ENTRIES 1000 + ret = usage_iterate_range(hctx, op.start_epoch, op.end_epoch, op.user, op.bucket, iter, MAX_USAGE_TRIM_ENTRIES, &more, usage_log_trim_cb, (void *)&found); + if (ret < 0) + return ret; + + if (!more && !found) + return -ENODATA; + + return 0; +} + +int rgw_usage_log_clear(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + CLS_LOG(10, "entered %s", __func__); + + int ret = cls_cxx_map_clear(hctx); + /* if object doesn't exist all the logs are cleared anyway */ + if (ret == -ENOENT) + ret = 0; + + return ret; +} + +/* + * We hold the garbage collection chain data under two different + * indexes: the first 'name' index keeps them under a unique tag that + * represents the chains, and a second 'time' index keeps them by + * their expiration timestamp. Each is prefixed differently (see + * gc_index_prefixes below). + * + * Since key-value data is listed in lexical order by keys, generally + * the name entries are retrieved first and then the time entries. + * When listing the entries via `gc_iterate_entries` one parameter is + * a marker, and if we were to pass "1_" (i.e., + * gc_index_prefixes[GC_OBJ_TIME_INDEX]), the listing would skip over + * the 'name' entries and begin with the 'time' entries. + * + * Furthermore, the times are converted to strings such that lexical + * order correlates with chronological order, so the entries are + * returned chronologically from the earliest expiring to the latest + * expiring. This allows for starting at "1_" and to keep retrieving + * chunks of entries, and as long as they are prior to the current + * time, they're expired and processing can continue. + */ +#define GC_OBJ_NAME_INDEX 0 +#define GC_OBJ_TIME_INDEX 1 + +static string gc_index_prefixes[] = { "0_", + "1_" }; + +static void prepend_index_prefix(const string& src, int index, string *dest) +{ + *dest = gc_index_prefixes[index]; + dest->append(src); +} + +static int gc_omap_get(cls_method_context_t hctx, int type, const string& key, cls_rgw_gc_obj_info *info) +{ + string index; + prepend_index_prefix(key, type, &index); + + int ret = read_omap_entry(hctx, index, info); + if (ret < 0) + return ret; + + return 0; +} + +static int gc_omap_set(cls_method_context_t hctx, int type, const string& key, const cls_rgw_gc_obj_info *info) +{ + bufferlist bl; + encode(*info, bl); + + string index = gc_index_prefixes[type]; + index.append(key); + + int ret = cls_cxx_map_set_val(hctx, index, &bl); + if (ret < 0) + return ret; + + return 0; +} + +static int gc_omap_remove(cls_method_context_t hctx, int type, const string& key) +{ + string index = gc_index_prefixes[type]; + index.append(key); + + int ret = cls_cxx_map_remove_key(hctx, index); + if (ret < 0) + return ret; + + return 0; +} + +static bool key_in_index(const string& key, int index_type) +{ + const string& prefix = gc_index_prefixes[index_type]; + return (key.compare(0, prefix.size(), prefix) == 0); +} + + +static int gc_update_entry(cls_method_context_t hctx, uint32_t expiration_secs, + cls_rgw_gc_obj_info& info) +{ + cls_rgw_gc_obj_info old_info; + int ret = gc_omap_get(hctx, GC_OBJ_NAME_INDEX, info.tag, &old_info); + if (ret == 0) { + string key; + get_time_key(old_info.time, &key); + ret = gc_omap_remove(hctx, GC_OBJ_TIME_INDEX, key); + if (ret < 0 && ret != -ENOENT) { + CLS_LOG(0, "ERROR: failed to remove key=%s", key.c_str()); + return ret; + } + } + + // calculate time and time key + info.time = ceph::real_clock::now(); + info.time += make_timespan(expiration_secs); + string time_key; + get_time_key(info.time, &time_key); + + if (info.chain.objs.empty()) { + CLS_LOG(0, + "WARNING: %s setting GC log entry with zero-length chain, " + "tag='%s', timekey='%s'", + __func__, info.tag.c_str(), time_key.c_str()); + } + + ret = gc_omap_set(hctx, GC_OBJ_NAME_INDEX, info.tag, &info); + if (ret < 0) + return ret; + + ret = gc_omap_set(hctx, GC_OBJ_TIME_INDEX, time_key, &info); + if (ret < 0) + goto done_err; + + return 0; + +done_err: + + CLS_LOG(0, "ERROR: gc_set_entry error info.tag=%s, ret=%d", + info.tag.c_str(), ret); + gc_omap_remove(hctx, GC_OBJ_NAME_INDEX, info.tag); + + return ret; +} + +static int gc_defer_entry(cls_method_context_t hctx, const string& tag, uint32_t expiration_secs) +{ + cls_rgw_gc_obj_info info; + int ret = gc_omap_get(hctx, GC_OBJ_NAME_INDEX, tag, &info); + if (ret < 0) + return ret; + return gc_update_entry(hctx, expiration_secs, info); +} + +int gc_record_decode(bufferlist& bl, cls_rgw_gc_obj_info& e) +{ + auto iter = bl.cbegin(); + try { + decode(e, iter); + } catch (ceph::buffer::error& err) { + CLS_LOG(0, "ERROR: failed to decode cls_rgw_gc_obj_info"); + return -EIO; + } + return 0; +} + +static int rgw_cls_gc_set_entry(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + CLS_LOG(10, "entered %s", __func__); + auto in_iter = in->cbegin(); + + cls_rgw_gc_set_entry_op op; + try { + decode(op, in_iter); + } catch (ceph::buffer::error& err) { + CLS_LOG(1, "ERROR: rgw_cls_gc_set_entry(): failed to decode entry\n"); + return -EINVAL; + } + + return gc_update_entry(hctx, op.expiration_secs, op.info); +} + +static int rgw_cls_gc_defer_entry(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + CLS_LOG(10, "entered %s", __func__); + auto in_iter = in->cbegin(); + + cls_rgw_gc_defer_entry_op op; + try { + decode(op, in_iter); + } catch (ceph::buffer::error& err) { + CLS_LOG(1, "ERROR: rgw_cls_gc_defer_entry(): failed to decode entry\n"); + return -EINVAL; + } + + return gc_defer_entry(hctx, op.tag, op.expiration_secs); +} + +static int gc_iterate_entries(cls_method_context_t hctx, + const string& marker, + bool expired_only, + string& out_marker, + uint32_t max_entries, + bool *truncated, + int (*cb)(cls_method_context_t, + const string&, + cls_rgw_gc_obj_info&, + void *), + void *param) +{ + CLS_LOG(10, "gc_iterate_entries"); + + map<string, bufferlist> keys; + string filter_prefix, end_key; + string key; + + if (truncated) + *truncated = false; + + string start_key; + if (marker.empty()) { + prepend_index_prefix(marker, GC_OBJ_TIME_INDEX, &start_key); + } else { + start_key = marker; + } + + if (expired_only) { + real_time now = ceph::real_clock::now(); + string now_str; + get_time_key(now, &now_str); + prepend_index_prefix(now_str, GC_OBJ_TIME_INDEX, &end_key); + + CLS_LOG(10, "gc_iterate_entries end_key=%s", end_key.c_str()); + } + + string filter; + + int ret = cls_cxx_map_get_vals(hctx, start_key, filter, max_entries, + &keys, truncated); + if (ret < 0) + return ret; + + auto iter = keys.begin(); + if (iter == keys.end()) { + // if keys empty must not come back as truncated + ceph_assert(!truncated || !(*truncated)); + return 0; + } + + const string* last_key = nullptr; // last key processed, for end-marker + for (; iter != keys.end(); ++iter) { + const string& key = iter->first; + cls_rgw_gc_obj_info e; + + CLS_LOG(10, "gc_iterate_entries key=%s", key.c_str()); + + if (!end_key.empty() && key.compare(end_key) >= 0) { + if (truncated) + *truncated = false; + return 0; + } + + if (!key_in_index(key, GC_OBJ_TIME_INDEX)) { + if (truncated) + *truncated = false; + return 0; + } + + ret = gc_record_decode(iter->second, e); + if (ret < 0) + return ret; + + ret = cb(hctx, key, e, param); + if (ret < 0) + return ret; + last_key = &(iter->first); // update when callback successful + } + + // set the out marker if either caller does not capture truncated or + // if they do capture and we are truncated + if (!truncated || *truncated) { + assert(last_key); + out_marker = *last_key; + } + + return 0; +} + +static int gc_list_cb(cls_method_context_t hctx, const string& key, cls_rgw_gc_obj_info& info, void *param) +{ + list<cls_rgw_gc_obj_info> *l = (list<cls_rgw_gc_obj_info> *)param; + l->push_back(info); + return 0; +} + +static int gc_list_entries(cls_method_context_t hctx, const string& marker, + uint32_t max, bool expired_only, + list<cls_rgw_gc_obj_info>& entries, bool *truncated, string& next_marker) +{ + int ret = gc_iterate_entries(hctx, marker, expired_only, + next_marker, max, truncated, + gc_list_cb, &entries); + return ret; +} + +static int rgw_cls_gc_list(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + CLS_LOG(10, "entered %s", __func__); + auto in_iter = in->cbegin(); + + cls_rgw_gc_list_op op; + try { + decode(op, in_iter); + } catch (ceph::buffer::error& err) { + CLS_LOG(1, "ERROR: rgw_cls_gc_list(): failed to decode entry\n"); + return -EINVAL; + } + + cls_rgw_gc_list_ret op_ret; +#define GC_LIST_ENTRIES_DEFAULT 128 + int ret = gc_list_entries(hctx, op.marker, (op.max ? op.max : GC_LIST_ENTRIES_DEFAULT), op.expired_only, + op_ret.entries, &op_ret.truncated, op_ret.next_marker); + if (ret < 0) + return ret; + + encode(op_ret, *out); + + return 0; +} + +static int gc_remove(cls_method_context_t hctx, vector<string>& tags) +{ + for (auto iter = tags.begin(); iter != tags.end(); ++iter) { + string& tag = *iter; + cls_rgw_gc_obj_info info; + int ret = gc_omap_get(hctx, GC_OBJ_NAME_INDEX, tag, &info); + if (ret == -ENOENT) { + CLS_LOG(0, "couldn't find tag in name index tag=%s", tag.c_str()); + continue; + } + + if (ret < 0) + return ret; + + string time_key; + get_time_key(info.time, &time_key); + ret = gc_omap_remove(hctx, GC_OBJ_TIME_INDEX, time_key); + if (ret < 0 && ret != -ENOENT) + return ret; + if (ret == -ENOENT) { + CLS_LOG(0, "couldn't find key in time index key=%s", time_key.c_str()); + } + + ret = gc_omap_remove(hctx, GC_OBJ_NAME_INDEX, tag); + if (ret < 0 && ret != -ENOENT) + return ret; + } + + return 0; +} + +static int rgw_cls_gc_remove(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + CLS_LOG(10, "entered %s", __func__); + auto in_iter = in->cbegin(); + + cls_rgw_gc_remove_op op; + try { + decode(op, in_iter); + } catch (ceph::buffer::error& err) { + CLS_LOG(1, "ERROR: rgw_cls_gc_remove(): failed to decode entry\n"); + return -EINVAL; + } + + return gc_remove(hctx, op.tags); +} + +static int rgw_cls_lc_get_entry(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + CLS_LOG(10, "entered %s", __func__); + auto in_iter = in->cbegin(); + + cls_rgw_lc_get_entry_op op; + try { + decode(op, in_iter); + } catch (ceph::buffer::error& err) { + CLS_LOG(1, "ERROR: rgw_cls_lc_set_entry(): failed to decode entry\n"); + return -EINVAL; + } + + cls_rgw_lc_entry lc_entry; + int ret = read_omap_entry(hctx, op.marker, &lc_entry); + if (ret < 0) + return ret; + + cls_rgw_lc_get_entry_ret op_ret(std::move(lc_entry)); + encode(op_ret, *out); + return 0; +} + + +static int rgw_cls_lc_set_entry(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + CLS_LOG(10, "entered %s", __func__); + auto in_iter = in->cbegin(); + + cls_rgw_lc_set_entry_op op; + try { + decode(op, in_iter); + } catch (ceph::buffer::error& err) { + CLS_LOG(1, "ERROR: rgw_cls_lc_set_entry(): failed to decode entry\n"); + return -EINVAL; + } + + bufferlist bl; + encode(op.entry, bl); + + int ret = cls_cxx_map_set_val(hctx, op.entry.bucket, &bl); + return ret; +} + +static int rgw_cls_lc_rm_entry(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + CLS_LOG(10, "entered %s", __func__); + auto in_iter = in->cbegin(); + + cls_rgw_lc_rm_entry_op op; + try { + decode(op, in_iter); + } catch (ceph::buffer::error& err) { + CLS_LOG(1, "ERROR: rgw_cls_lc_rm_entry(): failed to decode entry\n"); + return -EINVAL; + } + + int ret = cls_cxx_map_remove_key(hctx, op.entry.bucket); + return ret; +} + +static int rgw_cls_lc_get_next_entry(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + CLS_LOG(10, "entered %s", __func__); + auto in_iter = in->cbegin(); + cls_rgw_lc_get_next_entry_ret op_ret; + cls_rgw_lc_get_next_entry_op op; + try { + decode(op, in_iter); + } catch (ceph::buffer::error& err) { + CLS_LOG(1, "ERROR: rgw_cls_lc_get_next_entry: failed to decode op\n"); + return -EINVAL; + } + + map<string, bufferlist> vals; + string filter_prefix; + bool more; + int ret = cls_cxx_map_get_vals(hctx, op.marker, filter_prefix, 1, &vals, &more); + if (ret < 0) + return ret; + cls_rgw_lc_entry entry; + if (!vals.empty()) { + auto it = vals.begin(); + in_iter = it->second.begin(); + try { + decode(entry, in_iter); + } catch (ceph::buffer::error& err) { + CLS_LOG(1, "ERROR: rgw_cls_lc_get_next_entry(): failed to decode entry\n"); + return -EIO; + } + } + op_ret.entry = entry; + encode(op_ret, *out); + return 0; +} + +static int rgw_cls_lc_list_entries(cls_method_context_t hctx, bufferlist *in, + bufferlist *out) +{ + CLS_LOG(10, "entered %s", __func__); + cls_rgw_lc_list_entries_op op; + auto in_iter = in->cbegin(); + try { + decode(op, in_iter); + } catch (ceph::buffer::error& err) { + CLS_LOG(1, "ERROR: rgw_cls_lc_list_entries(): failed to decode op\n"); + return -EINVAL; + } + + cls_rgw_lc_list_entries_ret op_ret(op.compat_v); + map<string, bufferlist> vals; + string filter_prefix; + int ret = cls_cxx_map_get_vals(hctx, op.marker, filter_prefix, op.max_entries, + &vals, &op_ret.is_truncated); + if (ret < 0) + return ret; + for (auto it = vals.begin(); it != vals.end(); ++it) { + cls_rgw_lc_entry entry; + auto iter = it->second.cbegin(); + try { + decode(entry, iter); + } catch (buffer::error& err) { + /* try backward compat */ + pair<string, int> oe; + try { + iter = it->second.begin(); + decode(oe, iter); + entry = {oe.first, 0 /* start */, uint32_t(oe.second)}; + } catch(buffer::error& err) { + CLS_LOG( + 1, "ERROR: rgw_cls_lc_list_entries(): failed to decode entry\n"); + return -EIO; + } + } + op_ret.entries.push_back(entry); + } + encode(op_ret, *out); + return 0; +} + +static int rgw_cls_lc_put_head(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + CLS_LOG(10, "entered %s", __func__); + auto in_iter = in->cbegin(); + + cls_rgw_lc_put_head_op op; + try { + decode(op, in_iter); + } catch (ceph::buffer::error& err) { + CLS_LOG(1, "ERROR: rgw_cls_lc_put_head(): failed to decode entry\n"); + return -EINVAL; + } + + bufferlist bl; + encode(op.head, bl); + int ret = cls_cxx_map_write_header(hctx,&bl); + return ret; +} + +static int rgw_cls_lc_get_head(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + CLS_LOG(10, "entered %s", __func__); + bufferlist bl; + int ret = cls_cxx_map_read_header(hctx, &bl); + if (ret < 0) + return ret; + cls_rgw_lc_obj_head head; + if (bl.length() != 0) { + auto iter = bl.cbegin(); + try { + decode(head, iter); + } catch (ceph::buffer::error& err) { + CLS_LOG(0, "ERROR: rgw_cls_lc_get_head(): failed to decode entry %s",err.what()); + return -EINVAL; + } + } else { + head.start_date = 0; + head.marker.clear(); + } + cls_rgw_lc_get_head_ret op_ret; + op_ret.head = head; + encode(op_ret, *out); + return 0; +} + +static int rgw_mp_upload_part_info_update(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + CLS_LOG(10, "entered %s", __func__); + cls_rgw_mp_upload_part_info_update_op op; + auto in_iter = in->cbegin(); + try { + decode(op, in_iter); + } catch (ceph::buffer::error& err) { + CLS_LOG(1, "ERROR: rgw_cls_mp_upload_part_info_update(): failed to decode op\n"); + return -EINVAL; + } + + RGWUploadPartInfo stored_info; + + int ret = read_omap_entry(hctx, op.part_key, &stored_info); + if (ret < 0 && ret != -ENOENT) { + return ret; + } + + /* merge all the prior (stored) manifest prefixes to carry forward */ + if (!stored_info.manifest.empty()) { + op.info.past_prefixes.insert(stored_info.manifest.get_prefix()); + } + op.info.past_prefixes.merge(stored_info.past_prefixes); + + if (op.info.past_prefixes.contains(op.info.manifest.get_prefix())) { + // Somehow the current chosen prefix collides with one of previous ones. + // Better fail this part upload so it can pick a different one in the next. + const object_info_t& oi = cls_get_object_info(hctx); + CLS_LOG(1, "ERROR: oid [%s]: Current prefix %s is also a past prefix for part %s", + oi.soid.oid.name.c_str(), + op.info.manifest.get_prefix().c_str(), + op.part_key.c_str()); + return -EEXIST; + } + + bufferlist bl; + encode(op.info, bl); + ret = cls_cxx_map_set_val(hctx, op.part_key, &bl); + CLS_LOG(10, "part info update on key [%s]: %zu past prefixes, ret %d", op.part_key.c_str(), op.info.past_prefixes.size(), ret); + return ret; +} + +static int rgw_reshard_add(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + CLS_LOG(10, "entered %s", __func__); + auto in_iter = in->cbegin(); + + cls_rgw_reshard_add_op op; + try { + decode(op, in_iter); + } catch (ceph::buffer::error& err) { + CLS_LOG(1, "ERROR: rgw_reshard_add: failed to decode entry\n"); + return -EINVAL; + } + + + string key; + op.entry.get_key(&key); + + bufferlist bl; + encode(op.entry, bl); + int ret = cls_cxx_map_set_val(hctx, key, &bl); + if (ret < 0) { + CLS_ERR("error adding reshard job for bucket %s with key %s",op.entry.bucket_name.c_str(), key.c_str()); + return ret; + } + + return ret; +} + +static int rgw_reshard_list(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + CLS_LOG(10, "entered %s", __func__); + cls_rgw_reshard_list_op op; + auto in_iter = in->cbegin(); + try { + decode(op, in_iter); + } catch (ceph::buffer::error& err) { + CLS_LOG(1, "ERROR: rgw_cls_rehard_list(): failed to decode entry\n"); + return -EINVAL; + } + cls_rgw_reshard_list_ret op_ret; + map<string, bufferlist> vals; + string filter_prefix; +#define MAX_RESHARD_LIST_ENTRIES 1000 + /* one extra entry for identifying truncation */ + int32_t max = (op.max && (op.max < MAX_RESHARD_LIST_ENTRIES) ? op.max : MAX_RESHARD_LIST_ENTRIES); + int ret = cls_cxx_map_get_vals(hctx, op.marker, filter_prefix, max, &vals, &op_ret.is_truncated); + if (ret < 0) + return ret; + cls_rgw_reshard_entry entry; + int i = 0; + for (auto it = vals.begin(); i < (int)op.max && it != vals.end(); ++it, ++i) { + auto iter = it->second.cbegin(); + try { + decode(entry, iter); + } catch (ceph::buffer::error& err) { + CLS_LOG(1, "ERROR: rgw_cls_rehard_list(): failed to decode entry\n"); + return -EIO; + } + op_ret.entries.push_back(entry); + } + encode(op_ret, *out); + return 0; +} + +static int rgw_reshard_get(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + CLS_LOG(10, "entered %s", __func__); + auto in_iter = in->cbegin(); + + cls_rgw_reshard_get_op op; + try { + decode(op, in_iter); + } catch (ceph::buffer::error& err) { + CLS_LOG(1, "ERROR: rgw_reshard_get: failed to decode entry\n"); + return -EINVAL; + } + + string key; + cls_rgw_reshard_entry entry; + op.entry.get_key(&key); + int ret = read_omap_entry(hctx, key, &entry); + if (ret < 0) { + return ret; + } + + cls_rgw_reshard_get_ret op_ret; + op_ret.entry = entry; + encode(op_ret, *out); + return 0; +} + +static int rgw_reshard_remove(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + CLS_LOG(10, "entered %s", __func__); + auto in_iter = in->cbegin(); + + cls_rgw_reshard_remove_op op; + try { + decode(op, in_iter); + } catch (ceph::buffer::error& err) { + CLS_LOG(1, "ERROR: rgw_cls_rehard_remove: failed to decode entry\n"); + return -EINVAL; + } + + string key; + cls_rgw_reshard_entry entry; + cls_rgw_reshard_entry::generate_key(op.tenant, op.bucket_name, &key); + int ret = read_omap_entry(hctx, key, &entry); + if (ret < 0) { + return ret; + } + + if (!op.bucket_id.empty() && + entry.bucket_id != op.bucket_id) { + return 0; + } + + ret = cls_cxx_map_remove_key(hctx, key); + if (ret < 0) { + CLS_LOG(0, "ERROR: failed to remove key: key=%s ret=%d", key.c_str(), ret); + return 0; + } + return ret; +} + +static int rgw_set_bucket_resharding(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + CLS_LOG(10, "entered %s", __func__); + cls_rgw_set_bucket_resharding_op op; + + auto in_iter = in->cbegin(); + try { + decode(op, in_iter); + } catch (ceph::buffer::error& err) { + CLS_LOG(1, "ERROR: cls_rgw_set_bucket_resharding: failed to decode entry\n"); + return -EINVAL; + } + + rgw_bucket_dir_header header; + int rc = read_bucket_header(hctx, &header); + if (rc < 0) { + CLS_LOG(1, "ERROR: %s: failed to read header", __func__); + return rc; + } + + header.new_instance.set_status(op.entry.reshard_status); + + return write_bucket_header(hctx, &header); +} + +static int rgw_clear_bucket_resharding(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + CLS_LOG(10, "entered %s", __func__); + cls_rgw_clear_bucket_resharding_op op; + + auto in_iter = in->cbegin(); + try { + decode(op, in_iter); + } catch (ceph::buffer::error& err) { + CLS_LOG(1, "ERROR: cls_rgw_clear_bucket_resharding: failed to decode entry\n"); + return -EINVAL; + } + + rgw_bucket_dir_header header; + int rc = read_bucket_header(hctx, &header); + if (rc < 0) { + CLS_LOG(1, "ERROR: %s: failed to read header", __func__); + return rc; + } + header.new_instance.clear(); + + return write_bucket_header(hctx, &header); +} + +static int rgw_guard_bucket_resharding(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + CLS_LOG(10, "entered %s", __func__); + cls_rgw_guard_bucket_resharding_op op; + + auto in_iter = in->cbegin(); + try { + decode(op, in_iter); + } catch (ceph::buffer::error& err) { + CLS_LOG(1, "ERROR: %s: failed to decode entry", __func__); + return -EINVAL; + } + + rgw_bucket_dir_header header; + int rc = read_bucket_header(hctx, &header); + if (rc < 0) { + CLS_LOG(1, "ERROR: %s: failed to read header", __func__); + return rc; + } + + if (header.resharding()) { + return op.ret_err; + } + + return 0; +} + +static int rgw_get_bucket_resharding(cls_method_context_t hctx, + bufferlist *in, bufferlist *out) +{ + CLS_LOG(10, "entered %s", __func__); + cls_rgw_get_bucket_resharding_op op; + + auto in_iter = in->cbegin(); + try { + decode(op, in_iter); + } catch (ceph::buffer::error& err) { + CLS_LOG(1, "ERROR: %s: failed to decode entry", __func__); + return -EINVAL; + } + + rgw_bucket_dir_header header; + int rc = read_bucket_header(hctx, &header); + if (rc < 0) { + CLS_LOG(1, "ERROR: %s: failed to read header", __func__); + return rc; + } + + cls_rgw_get_bucket_resharding_ret op_ret; + op_ret.new_instance = header.new_instance; + + encode(op_ret, *out); + + return 0; +} + +CLS_INIT(rgw) +{ + CLS_LOG(1, "Loaded rgw class!"); + + cls_handle_t h_class; + cls_method_handle_t h_rgw_bucket_init_index; + cls_method_handle_t h_rgw_bucket_set_tag_timeout; + cls_method_handle_t h_rgw_bucket_list; + cls_method_handle_t h_rgw_bucket_check_index; + cls_method_handle_t h_rgw_bucket_rebuild_index; + cls_method_handle_t h_rgw_bucket_update_stats; + cls_method_handle_t h_rgw_bucket_prepare_op; + cls_method_handle_t h_rgw_bucket_complete_op; + cls_method_handle_t h_rgw_bucket_link_olh; + cls_method_handle_t h_rgw_bucket_unlink_instance_op; + cls_method_handle_t h_rgw_bucket_read_olh_log; + cls_method_handle_t h_rgw_bucket_trim_olh_log; + cls_method_handle_t h_rgw_bucket_clear_olh; + cls_method_handle_t h_rgw_obj_remove; + cls_method_handle_t h_rgw_obj_store_pg_ver; + cls_method_handle_t h_rgw_obj_check_attrs_prefix; + cls_method_handle_t h_rgw_obj_check_mtime; + cls_method_handle_t h_rgw_bi_get_op; + cls_method_handle_t h_rgw_bi_put_op; + cls_method_handle_t h_rgw_bi_list_op; + cls_method_handle_t h_rgw_bi_log_list_op; + cls_method_handle_t h_rgw_bi_log_resync_op; + cls_method_handle_t h_rgw_bi_log_stop_op; + cls_method_handle_t h_rgw_dir_suggest_changes; + cls_method_handle_t h_rgw_user_usage_log_add; + cls_method_handle_t h_rgw_user_usage_log_read; + cls_method_handle_t h_rgw_user_usage_log_trim; + cls_method_handle_t h_rgw_usage_log_clear; + cls_method_handle_t h_rgw_gc_set_entry; + cls_method_handle_t h_rgw_gc_list; + cls_method_handle_t h_rgw_gc_remove; + cls_method_handle_t h_rgw_lc_get_entry; + cls_method_handle_t h_rgw_lc_set_entry; + cls_method_handle_t h_rgw_lc_rm_entry; + cls_method_handle_t h_rgw_lc_get_next_entry; + cls_method_handle_t h_rgw_lc_put_head; + cls_method_handle_t h_rgw_lc_get_head; + cls_method_handle_t h_rgw_lc_list_entries; + cls_method_handle_t h_rgw_mp_upload_part_info_update; + cls_method_handle_t h_rgw_reshard_add; + cls_method_handle_t h_rgw_reshard_list; + cls_method_handle_t h_rgw_reshard_get; + cls_method_handle_t h_rgw_reshard_remove; + cls_method_handle_t h_rgw_set_bucket_resharding; + cls_method_handle_t h_rgw_clear_bucket_resharding; + cls_method_handle_t h_rgw_guard_bucket_resharding; + cls_method_handle_t h_rgw_get_bucket_resharding; + + cls_register(RGW_CLASS, &h_class); + + /* bucket index */ + cls_register_cxx_method(h_class, RGW_BUCKET_INIT_INDEX, CLS_METHOD_RD | CLS_METHOD_WR, rgw_bucket_init_index, &h_rgw_bucket_init_index); + cls_register_cxx_method(h_class, RGW_BUCKET_SET_TAG_TIMEOUT, CLS_METHOD_RD | CLS_METHOD_WR, rgw_bucket_set_tag_timeout, &h_rgw_bucket_set_tag_timeout); + cls_register_cxx_method(h_class, RGW_BUCKET_LIST, CLS_METHOD_RD, rgw_bucket_list, &h_rgw_bucket_list); + cls_register_cxx_method(h_class, RGW_BUCKET_CHECK_INDEX, CLS_METHOD_RD, rgw_bucket_check_index, &h_rgw_bucket_check_index); + cls_register_cxx_method(h_class, RGW_BUCKET_REBUILD_INDEX, CLS_METHOD_RD | CLS_METHOD_WR, rgw_bucket_rebuild_index, &h_rgw_bucket_rebuild_index); + cls_register_cxx_method(h_class, RGW_BUCKET_UPDATE_STATS, CLS_METHOD_RD | CLS_METHOD_WR, rgw_bucket_update_stats, &h_rgw_bucket_update_stats); + cls_register_cxx_method(h_class, RGW_BUCKET_PREPARE_OP, CLS_METHOD_RD | CLS_METHOD_WR, rgw_bucket_prepare_op, &h_rgw_bucket_prepare_op); + cls_register_cxx_method(h_class, RGW_BUCKET_COMPLETE_OP, CLS_METHOD_RD | CLS_METHOD_WR, rgw_bucket_complete_op, &h_rgw_bucket_complete_op); + cls_register_cxx_method(h_class, RGW_BUCKET_LINK_OLH, CLS_METHOD_RD | CLS_METHOD_WR, rgw_bucket_link_olh, &h_rgw_bucket_link_olh); + cls_register_cxx_method(h_class, RGW_BUCKET_UNLINK_INSTANCE, CLS_METHOD_RD | CLS_METHOD_WR, rgw_bucket_unlink_instance, &h_rgw_bucket_unlink_instance_op); + cls_register_cxx_method(h_class, RGW_BUCKET_READ_OLH_LOG, CLS_METHOD_RD, rgw_bucket_read_olh_log, &h_rgw_bucket_read_olh_log); + cls_register_cxx_method(h_class, RGW_BUCKET_TRIM_OLH_LOG, CLS_METHOD_RD | CLS_METHOD_WR, rgw_bucket_trim_olh_log, &h_rgw_bucket_trim_olh_log); + cls_register_cxx_method(h_class, RGW_BUCKET_CLEAR_OLH, CLS_METHOD_RD | CLS_METHOD_WR, rgw_bucket_clear_olh, &h_rgw_bucket_clear_olh); + + cls_register_cxx_method(h_class, RGW_OBJ_REMOVE, CLS_METHOD_RD | CLS_METHOD_WR, rgw_obj_remove, &h_rgw_obj_remove); + cls_register_cxx_method(h_class, RGW_OBJ_STORE_PG_VER, CLS_METHOD_WR, rgw_obj_store_pg_ver, &h_rgw_obj_store_pg_ver); + cls_register_cxx_method(h_class, RGW_OBJ_CHECK_ATTRS_PREFIX, CLS_METHOD_RD, rgw_obj_check_attrs_prefix, &h_rgw_obj_check_attrs_prefix); + cls_register_cxx_method(h_class, RGW_OBJ_CHECK_MTIME, CLS_METHOD_RD, rgw_obj_check_mtime, &h_rgw_obj_check_mtime); + + cls_register_cxx_method(h_class, RGW_BI_GET, CLS_METHOD_RD, rgw_bi_get_op, &h_rgw_bi_get_op); + cls_register_cxx_method(h_class, RGW_BI_PUT, CLS_METHOD_RD | CLS_METHOD_WR, rgw_bi_put_op, &h_rgw_bi_put_op); + cls_register_cxx_method(h_class, RGW_BI_LIST, CLS_METHOD_RD, rgw_bi_list_op, &h_rgw_bi_list_op); + + cls_register_cxx_method(h_class, RGW_BI_LOG_LIST, CLS_METHOD_RD, rgw_bi_log_list, &h_rgw_bi_log_list_op); + cls_register_cxx_method(h_class, RGW_BI_LOG_TRIM, CLS_METHOD_RD | CLS_METHOD_WR, rgw_bi_log_trim, &h_rgw_bi_log_list_op); + cls_register_cxx_method(h_class, RGW_DIR_SUGGEST_CHANGES, CLS_METHOD_RD | CLS_METHOD_WR, rgw_dir_suggest_changes, &h_rgw_dir_suggest_changes); + + cls_register_cxx_method(h_class, RGW_BI_LOG_RESYNC, CLS_METHOD_RD | CLS_METHOD_WR, rgw_bi_log_resync, &h_rgw_bi_log_resync_op); + cls_register_cxx_method(h_class, RGW_BI_LOG_STOP, CLS_METHOD_RD | CLS_METHOD_WR, rgw_bi_log_stop, &h_rgw_bi_log_stop_op); + + /* usage logging */ + cls_register_cxx_method(h_class, RGW_USER_USAGE_LOG_ADD, CLS_METHOD_RD | CLS_METHOD_WR, rgw_user_usage_log_add, &h_rgw_user_usage_log_add); + cls_register_cxx_method(h_class, RGW_USER_USAGE_LOG_READ, CLS_METHOD_RD, rgw_user_usage_log_read, &h_rgw_user_usage_log_read); + cls_register_cxx_method(h_class, RGW_USER_USAGE_LOG_TRIM, CLS_METHOD_RD | CLS_METHOD_WR, rgw_user_usage_log_trim, &h_rgw_user_usage_log_trim); + cls_register_cxx_method(h_class, RGW_USAGE_LOG_CLEAR, CLS_METHOD_WR, rgw_usage_log_clear, &h_rgw_usage_log_clear); + + /* garbage collection */ + cls_register_cxx_method(h_class, RGW_GC_SET_ENTRY, CLS_METHOD_RD | CLS_METHOD_WR, rgw_cls_gc_set_entry, &h_rgw_gc_set_entry); + cls_register_cxx_method(h_class, RGW_GC_DEFER_ENTRY, CLS_METHOD_RD | CLS_METHOD_WR, rgw_cls_gc_defer_entry, &h_rgw_gc_set_entry); + cls_register_cxx_method(h_class, RGW_GC_LIST, CLS_METHOD_RD, rgw_cls_gc_list, &h_rgw_gc_list); + cls_register_cxx_method(h_class, RGW_GC_REMOVE, CLS_METHOD_RD | CLS_METHOD_WR, rgw_cls_gc_remove, &h_rgw_gc_remove); + + /* lifecycle bucket list */ + cls_register_cxx_method(h_class, RGW_LC_GET_ENTRY, CLS_METHOD_RD, rgw_cls_lc_get_entry, &h_rgw_lc_get_entry); + cls_register_cxx_method(h_class, RGW_LC_SET_ENTRY, CLS_METHOD_RD | CLS_METHOD_WR, rgw_cls_lc_set_entry, &h_rgw_lc_set_entry); + cls_register_cxx_method(h_class, RGW_LC_RM_ENTRY, CLS_METHOD_RD | CLS_METHOD_WR, rgw_cls_lc_rm_entry, &h_rgw_lc_rm_entry); + cls_register_cxx_method(h_class, RGW_LC_GET_NEXT_ENTRY, CLS_METHOD_RD, rgw_cls_lc_get_next_entry, &h_rgw_lc_get_next_entry); + cls_register_cxx_method(h_class, RGW_LC_PUT_HEAD, CLS_METHOD_RD| CLS_METHOD_WR, rgw_cls_lc_put_head, &h_rgw_lc_put_head); + cls_register_cxx_method(h_class, RGW_LC_GET_HEAD, CLS_METHOD_RD, rgw_cls_lc_get_head, &h_rgw_lc_get_head); + cls_register_cxx_method(h_class, RGW_LC_LIST_ENTRIES, CLS_METHOD_RD, rgw_cls_lc_list_entries, &h_rgw_lc_list_entries); + + /* multipart */ + cls_register_cxx_method(h_class, RGW_MP_UPLOAD_PART_INFO_UPDATE, CLS_METHOD_RD | CLS_METHOD_WR, rgw_mp_upload_part_info_update, &h_rgw_mp_upload_part_info_update); + + /* resharding */ + cls_register_cxx_method(h_class, RGW_RESHARD_ADD, CLS_METHOD_RD | CLS_METHOD_WR, rgw_reshard_add, &h_rgw_reshard_add); + cls_register_cxx_method(h_class, RGW_RESHARD_LIST, CLS_METHOD_RD, rgw_reshard_list, &h_rgw_reshard_list); + cls_register_cxx_method(h_class, RGW_RESHARD_GET, CLS_METHOD_RD,rgw_reshard_get, &h_rgw_reshard_get); + cls_register_cxx_method(h_class, RGW_RESHARD_REMOVE, CLS_METHOD_RD | CLS_METHOD_WR, rgw_reshard_remove, &h_rgw_reshard_remove); + + /* resharding attribute */ + cls_register_cxx_method(h_class, RGW_SET_BUCKET_RESHARDING, CLS_METHOD_RD | CLS_METHOD_WR, + rgw_set_bucket_resharding, &h_rgw_set_bucket_resharding); + cls_register_cxx_method(h_class, RGW_CLEAR_BUCKET_RESHARDING, CLS_METHOD_RD | CLS_METHOD_WR, + rgw_clear_bucket_resharding, &h_rgw_clear_bucket_resharding); + cls_register_cxx_method(h_class, RGW_GUARD_BUCKET_RESHARDING, CLS_METHOD_RD , + rgw_guard_bucket_resharding, &h_rgw_guard_bucket_resharding); + cls_register_cxx_method(h_class, RGW_GET_BUCKET_RESHARDING, CLS_METHOD_RD , + rgw_get_bucket_resharding, &h_rgw_get_bucket_resharding); + + return; +} diff --git a/src/cls/rgw/cls_rgw_client.cc b/src/cls/rgw/cls_rgw_client.cc new file mode 100644 index 000000000..73a79490a --- /dev/null +++ b/src/cls/rgw/cls_rgw_client.cc @@ -0,0 +1,1221 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab + +#include <errno.h> + +#include "cls/rgw/cls_rgw_const.h" +#include "cls/rgw/cls_rgw_client.h" + +#include "common/debug.h" + +using std::list; +using std::map; +using std::pair; +using std::string; +using std::vector; + +using ceph::real_time; + +using namespace librados; + +const string BucketIndexShardsManager::KEY_VALUE_SEPARATOR = "#"; +const string BucketIndexShardsManager::SHARDS_SEPARATOR = ","; + + +int CLSRGWConcurrentIO::operator()() { + int ret = 0; + iter = objs_container.begin(); + for (; iter != objs_container.end() && max_aio-- > 0; ++iter) { + ret = issue_op(iter->first, iter->second); + if (ret < 0) + break; + } + + int num_completions = 0, r = 0; + std::map<int, std::string> completed_objs; + std::map<int, std::string> retry_objs; + while (manager.wait_for_completions(valid_ret_code(), &num_completions, &r, + need_multiple_rounds() ? &completed_objs : nullptr, + !need_multiple_rounds() ? &retry_objs : nullptr)) { + if (r >= 0 && ret >= 0) { + for (; num_completions && iter != objs_container.end(); --num_completions, ++iter) { + int issue_ret = issue_op(iter->first, iter->second); + if (issue_ret < 0) { + ret = issue_ret; + break; + } + } + } else if (ret >= 0) { + ret = r; + } + + // if we're at the end with this round, see if another round is needed + if (iter == objs_container.end()) { + if (need_multiple_rounds() && !completed_objs.empty()) { + // For those objects which need another round, use them to reset + // the container + reset_container(completed_objs); + iter = objs_container.begin(); + } else if (! need_multiple_rounds() && !retry_objs.empty()) { + reset_container(retry_objs); + iter = objs_container.begin(); + } + + // re-issue ops if container was reset above (i.e., iter != + // objs_container.end()); if it was not reset above (i.e., iter + // == objs_container.end()) the loop will exit immediately + // without iterating + for (; num_completions && iter != objs_container.end(); --num_completions, ++iter) { + int issue_ret = issue_op(iter->first, iter->second); + if (issue_ret < 0) { + ret = issue_ret; + break; + } + } + } + } + + if (ret < 0) { + cleanup(); + } + return ret; +} // CLSRGWConcurrintIO::operator()() + + +/** + * This class represents the bucket index object operation callback context. + */ +template <typename T> +class ClsBucketIndexOpCtx : public ObjectOperationCompletion { +private: + T *data; + int *ret_code; +public: + ClsBucketIndexOpCtx(T* _data, int *_ret_code) : data(_data), ret_code(_ret_code) { ceph_assert(data); } + ~ClsBucketIndexOpCtx() override {} + void handle_completion(int r, bufferlist& outbl) override { + // if successful, or we're asked for a retry, copy result into + // destination (*data) + if (r >= 0 || r == RGWBIAdvanceAndRetryError) { + try { + auto iter = outbl.cbegin(); + decode((*data), iter); + } catch (ceph::buffer::error& err) { + r = -EIO; + } + } + if (ret_code) { + *ret_code = r; + } + } +}; + +void BucketIndexAioManager::do_completion(const int request_id) { + std::lock_guard l{lock}; + + auto iter = pendings.find(request_id); + ceph_assert(iter != pendings.end()); + completions[request_id] = iter->second; + pendings.erase(iter); + + // If the caller needs a list of finished objects, store them + // for further processing + auto miter = pending_objs.find(request_id); + if (miter != pending_objs.end()) { + completion_objs.emplace(request_id, miter->second); + pending_objs.erase(miter); + } + + cond.notify_all(); +} + +bool BucketIndexAioManager::wait_for_completions(int valid_ret_code, + int *num_completions, + int *ret_code, + std::map<int, std::string> *completed_objs, + std::map<int, std::string> *retry_objs) +{ + std::unique_lock locker{lock}; + if (pendings.empty() && completions.empty()) { + return false; + } + + if (completions.empty()) { + // Wait for AIO completion + cond.wait(locker); + } + + // Clear the completed AIOs + auto iter = completions.begin(); + for (; iter != completions.end(); ++iter) { + int r = iter->second->get_return_value(); + + // see if we may need to copy completions or retries + if (completed_objs || retry_objs) { + auto liter = completion_objs.find(iter->first); + if (liter != completion_objs.end()) { + if (completed_objs && r == 0) { /* update list of successfully completed objs */ + (*completed_objs)[liter->second.shard_id] = liter->second.oid; + } + + if (r == RGWBIAdvanceAndRetryError) { + r = 0; + if (retry_objs) { + (*retry_objs)[liter->second.shard_id] = liter->second.oid; + } + } + } else { + // NB: should we log an error here; currently no logging + // context to use + } + } + + if (ret_code && (r < 0 && r != valid_ret_code)) { + (*ret_code) = r; + } + + iter->second->release(); + } + + if (num_completions) { + (*num_completions) = completions.size(); + } + + completions.clear(); + + return true; +} + +// note: currently only called by tesing code +void cls_rgw_bucket_init_index(ObjectWriteOperation& o) +{ + bufferlist in; + o.exec(RGW_CLASS, RGW_BUCKET_INIT_INDEX, in); +} + +static bool issue_bucket_index_init_op(librados::IoCtx& io_ctx, + const int shard_id, + const string& oid, + BucketIndexAioManager *manager) { + bufferlist in; + librados::ObjectWriteOperation op; + op.create(true); + op.exec(RGW_CLASS, RGW_BUCKET_INIT_INDEX, in); + return manager->aio_operate(io_ctx, shard_id, oid, &op); +} + +static bool issue_bucket_index_clean_op(librados::IoCtx& io_ctx, + const int shard_id, + const string& oid, + BucketIndexAioManager *manager) { + bufferlist in; + librados::ObjectWriteOperation op; + op.remove(); + return manager->aio_operate(io_ctx, shard_id, oid, &op); +} + +static bool issue_bucket_set_tag_timeout_op(librados::IoCtx& io_ctx, + const int shard_id, + const string& oid, + uint64_t timeout, + BucketIndexAioManager *manager) { + bufferlist in; + rgw_cls_tag_timeout_op call; + call.tag_timeout = timeout; + encode(call, in); + ObjectWriteOperation op; + op.exec(RGW_CLASS, RGW_BUCKET_SET_TAG_TIMEOUT, in); + return manager->aio_operate(io_ctx, shard_id, oid, &op); +} + +int CLSRGWIssueBucketIndexInit::issue_op(const int shard_id, const string& oid) +{ + return issue_bucket_index_init_op(io_ctx, shard_id, oid, &manager); +} + +void CLSRGWIssueBucketIndexInit::cleanup() +{ + // Do best effort removal + for (auto citer = objs_container.begin(); citer != iter; ++citer) { + io_ctx.remove(citer->second); + } +} + +int CLSRGWIssueBucketIndexClean::issue_op(const int shard_id, const string& oid) +{ + return issue_bucket_index_clean_op(io_ctx, shard_id, oid, &manager); +} + +int CLSRGWIssueSetTagTimeout::issue_op(const int shard_id, const string& oid) +{ + return issue_bucket_set_tag_timeout_op(io_ctx, shard_id, oid, tag_timeout, &manager); +} + +void cls_rgw_bucket_update_stats(librados::ObjectWriteOperation& o, + bool absolute, + const map<RGWObjCategory, rgw_bucket_category_stats>& stats) +{ + rgw_cls_bucket_update_stats_op call; + call.absolute = absolute; + call.stats = stats; + bufferlist in; + encode(call, in); + o.exec(RGW_CLASS, RGW_BUCKET_UPDATE_STATS, in); +} + +void cls_rgw_bucket_prepare_op(ObjectWriteOperation& o, RGWModifyOp op, const string& tag, + const cls_rgw_obj_key& key, const string& locator, bool log_op, + uint16_t bilog_flags, const rgw_zone_set& zones_trace) +{ + rgw_cls_obj_prepare_op call; + call.op = op; + call.tag = tag; + call.key = key; + call.locator = locator; + call.log_op = log_op; + call.bilog_flags = bilog_flags; + call.zones_trace = zones_trace; + bufferlist in; + encode(call, in); + o.exec(RGW_CLASS, RGW_BUCKET_PREPARE_OP, in); +} + +void cls_rgw_bucket_complete_op(ObjectWriteOperation& o, RGWModifyOp op, const string& tag, + const rgw_bucket_entry_ver& ver, + const cls_rgw_obj_key& key, + const rgw_bucket_dir_entry_meta& dir_meta, + const list<cls_rgw_obj_key> *remove_objs, bool log_op, + uint16_t bilog_flags, + const rgw_zone_set *zones_trace) +{ + + bufferlist in; + rgw_cls_obj_complete_op call; + call.op = op; + call.tag = tag; + call.key = key; + call.ver = ver; + call.meta = dir_meta; + call.log_op = log_op; + call.bilog_flags = bilog_flags; + if (remove_objs) + call.remove_objs = *remove_objs; + if (zones_trace) { + call.zones_trace = *zones_trace; + } + encode(call, in); + o.exec(RGW_CLASS, RGW_BUCKET_COMPLETE_OP, in); +} + +void cls_rgw_bucket_list_op(librados::ObjectReadOperation& op, + const cls_rgw_obj_key& start_obj, + const std::string& filter_prefix, + const std::string& delimiter, + uint32_t num_entries, + bool list_versions, + rgw_cls_list_ret* result) +{ + bufferlist in; + rgw_cls_list_op call; + call.start_obj = start_obj; + call.filter_prefix = filter_prefix; + call.delimiter = delimiter; + call.num_entries = num_entries; + call.list_versions = list_versions; + encode(call, in); + + op.exec(RGW_CLASS, RGW_BUCKET_LIST, in, + new ClsBucketIndexOpCtx<rgw_cls_list_ret>(result, NULL)); +} + +static bool issue_bucket_list_op(librados::IoCtx& io_ctx, + const int shard_id, + const std::string& oid, + const cls_rgw_obj_key& start_obj, + const std::string& filter_prefix, + const std::string& delimiter, + uint32_t num_entries, + bool list_versions, + BucketIndexAioManager *manager, + rgw_cls_list_ret *pdata) +{ + librados::ObjectReadOperation op; + cls_rgw_bucket_list_op(op, + start_obj, filter_prefix, delimiter, + num_entries, list_versions, pdata); + return manager->aio_operate(io_ctx, shard_id, oid, &op); +} + +int CLSRGWIssueBucketList::issue_op(const int shard_id, const string& oid) +{ + // set the marker depending on whether we've already queried this + // shard and gotten a RGWBIAdvanceAndRetryError (defined + // constant) return value; if we have use the marker in the return + // to advance the search, otherwise use the marker passed in by the + // caller + cls_rgw_obj_key marker; + auto iter = result.find(shard_id); + if (iter != result.end()) { + marker = iter->second.marker; + } else { + marker = start_obj; + } + + return issue_bucket_list_op(io_ctx, shard_id, oid, + marker, filter_prefix, delimiter, + num_entries, list_versions, &manager, + &result[shard_id]); +} + + +void CLSRGWIssueBucketList::reset_container(std::map<int, std::string>& objs) +{ + objs_container.swap(objs); + iter = objs_container.begin(); + objs.clear(); +} + + +void cls_rgw_remove_obj(librados::ObjectWriteOperation& o, list<string>& keep_attr_prefixes) +{ + bufferlist in; + rgw_cls_obj_remove_op call; + call.keep_attr_prefixes = keep_attr_prefixes; + encode(call, in); + o.exec(RGW_CLASS, RGW_OBJ_REMOVE, in); +} + +void cls_rgw_obj_store_pg_ver(librados::ObjectWriteOperation& o, const string& attr) +{ + bufferlist in; + rgw_cls_obj_store_pg_ver_op call; + call.attr = attr; + encode(call, in); + o.exec(RGW_CLASS, RGW_OBJ_STORE_PG_VER, in); +} + +void cls_rgw_obj_check_attrs_prefix(librados::ObjectOperation& o, const string& prefix, bool fail_if_exist) +{ + bufferlist in; + rgw_cls_obj_check_attrs_prefix call; + call.check_prefix = prefix; + call.fail_if_exist = fail_if_exist; + encode(call, in); + o.exec(RGW_CLASS, RGW_OBJ_CHECK_ATTRS_PREFIX, in); +} + +void cls_rgw_obj_check_mtime(librados::ObjectOperation& o, const real_time& mtime, bool high_precision_time, RGWCheckMTimeType type) +{ + bufferlist in; + rgw_cls_obj_check_mtime call; + call.mtime = mtime; + call.high_precision_time = high_precision_time; + call.type = type; + encode(call, in); + o.exec(RGW_CLASS, RGW_OBJ_CHECK_MTIME, in); +} + +int cls_rgw_bi_get(librados::IoCtx& io_ctx, const string oid, + BIIndexType index_type, const cls_rgw_obj_key& key, + rgw_cls_bi_entry *entry) +{ + bufferlist in, out; + rgw_cls_bi_get_op call; + call.key = key; + call.type = index_type; + encode(call, in); + int r = io_ctx.exec(oid, RGW_CLASS, RGW_BI_GET, in, out); + if (r < 0) + return r; + + rgw_cls_bi_get_ret op_ret; + auto iter = out.cbegin(); + try { + decode(op_ret, iter); + } catch (ceph::buffer::error& err) { + return -EIO; + } + + *entry = op_ret.entry; + + return 0; +} + +int cls_rgw_bi_put(librados::IoCtx& io_ctx, const string oid, const rgw_cls_bi_entry& entry) +{ + bufferlist in, out; + rgw_cls_bi_put_op call; + call.entry = entry; + encode(call, in); + int r = io_ctx.exec(oid, RGW_CLASS, RGW_BI_PUT, in, out); + if (r < 0) + return r; + + return 0; +} + +void cls_rgw_bi_put(ObjectWriteOperation& op, const string oid, const rgw_cls_bi_entry& entry) +{ + bufferlist in, out; + rgw_cls_bi_put_op call; + call.entry = entry; + encode(call, in); + op.exec(RGW_CLASS, RGW_BI_PUT, in); +} + +/* nb: any entries passed in are replaced with the results of the cls + * call, so caller does not need to clear entries between calls + */ +int cls_rgw_bi_list(librados::IoCtx& io_ctx, const std::string& oid, + const std::string& name_filter, const std::string& marker, uint32_t max, + std::list<rgw_cls_bi_entry> *entries, bool *is_truncated) +{ + bufferlist in, out; + rgw_cls_bi_list_op call; + call.name_filter = name_filter; + call.marker = marker; + call.max = max; + encode(call, in); + int r = io_ctx.exec(oid, RGW_CLASS, RGW_BI_LIST, in, out); + if (r < 0) + return r; + + rgw_cls_bi_list_ret op_ret; + auto iter = out.cbegin(); + try { + decode(op_ret, iter); + } catch (ceph::buffer::error& err) { + return -EIO; + } + + entries->swap(op_ret.entries); + *is_truncated = op_ret.is_truncated; + + return 0; +} + +int cls_rgw_bucket_link_olh(librados::IoCtx& io_ctx, const string& oid, + const cls_rgw_obj_key& key, const bufferlist& olh_tag, + bool delete_marker, const string& op_tag, const rgw_bucket_dir_entry_meta *meta, + uint64_t olh_epoch, ceph::real_time unmod_since, bool high_precision_time, bool log_op, const rgw_zone_set& zones_trace) +{ + librados::ObjectWriteOperation op; + cls_rgw_bucket_link_olh(op, key, olh_tag, delete_marker, op_tag, meta, + olh_epoch, unmod_since, high_precision_time, log_op, + zones_trace); + + return io_ctx.operate(oid, &op); +} + + +void cls_rgw_bucket_link_olh(librados::ObjectWriteOperation& op, const cls_rgw_obj_key& key, + const bufferlist& olh_tag, bool delete_marker, + const string& op_tag, const rgw_bucket_dir_entry_meta *meta, + uint64_t olh_epoch, ceph::real_time unmod_since, bool high_precision_time, bool log_op, const rgw_zone_set& zones_trace) +{ + bufferlist in, out; + rgw_cls_link_olh_op call; + call.key = key; + call.olh_tag = olh_tag.to_str(); + call.op_tag = op_tag; + call.delete_marker = delete_marker; + if (meta) { + call.meta = *meta; + } + call.olh_epoch = olh_epoch; + call.log_op = log_op; + call.unmod_since = unmod_since; + call.high_precision_time = high_precision_time; + call.zones_trace = zones_trace; + encode(call, in); + op.exec(RGW_CLASS, RGW_BUCKET_LINK_OLH, in); +} + +int cls_rgw_bucket_unlink_instance(librados::IoCtx& io_ctx, const string& oid, + const cls_rgw_obj_key& key, const string& op_tag, + const string& olh_tag, uint64_t olh_epoch, bool log_op, const rgw_zone_set& zones_trace) +{ + librados::ObjectWriteOperation op; + cls_rgw_bucket_unlink_instance(op, key, op_tag, olh_tag, olh_epoch, log_op, zones_trace); + int r = io_ctx.operate(oid, &op); + if (r < 0) + return r; + + return 0; +} + +void cls_rgw_bucket_unlink_instance(librados::ObjectWriteOperation& op, + const cls_rgw_obj_key& key, const string& op_tag, + const string& olh_tag, uint64_t olh_epoch, bool log_op, const rgw_zone_set& zones_trace) +{ + bufferlist in, out; + rgw_cls_unlink_instance_op call; + call.key = key; + call.op_tag = op_tag; + call.olh_epoch = olh_epoch; + call.olh_tag = olh_tag; + call.log_op = log_op; + call.zones_trace = zones_trace; + encode(call, in); + op.exec(RGW_CLASS, RGW_BUCKET_UNLINK_INSTANCE, in); +} + +void cls_rgw_get_olh_log(librados::ObjectReadOperation& op, const cls_rgw_obj_key& olh, uint64_t ver_marker, const string& olh_tag, rgw_cls_read_olh_log_ret& log_ret, int& op_ret) +{ + bufferlist in; + rgw_cls_read_olh_log_op call; + call.olh = olh; + call.ver_marker = ver_marker; + call.olh_tag = olh_tag; + encode(call, in); + op.exec(RGW_CLASS, RGW_BUCKET_READ_OLH_LOG, in, new ClsBucketIndexOpCtx<rgw_cls_read_olh_log_ret>(&log_ret, &op_ret)); +} + +int cls_rgw_get_olh_log(IoCtx& io_ctx, string& oid, const cls_rgw_obj_key& olh, uint64_t ver_marker, + const string& olh_tag, + rgw_cls_read_olh_log_ret& log_ret) +{ + int op_ret = 0; + librados::ObjectReadOperation op; + cls_rgw_get_olh_log(op, olh, ver_marker, olh_tag, log_ret, op_ret); + int r = io_ctx.operate(oid, &op, NULL); + if (r < 0) { + return r; + } + if (op_ret < 0) { + return op_ret; + } + + return r; +} + +void cls_rgw_trim_olh_log(librados::ObjectWriteOperation& op, const cls_rgw_obj_key& olh, uint64_t ver, const string& olh_tag) +{ + bufferlist in; + rgw_cls_trim_olh_log_op call; + call.olh = olh; + call.ver = ver; + call.olh_tag = olh_tag; + encode(call, in); + op.exec(RGW_CLASS, RGW_BUCKET_TRIM_OLH_LOG, in); +} + +int cls_rgw_clear_olh(IoCtx& io_ctx, string& oid, const cls_rgw_obj_key& olh, const string& olh_tag) +{ + librados::ObjectWriteOperation op; + cls_rgw_clear_olh(op, olh, olh_tag); + + return io_ctx.operate(oid, &op); +} + +void cls_rgw_clear_olh(librados::ObjectWriteOperation& op, const cls_rgw_obj_key& olh, const string& olh_tag) +{ + bufferlist in; + rgw_cls_bucket_clear_olh_op call; + call.key = olh; + call.olh_tag = olh_tag; + encode(call, in); + op.exec(RGW_CLASS, RGW_BUCKET_CLEAR_OLH, in); +} + +void cls_rgw_bilog_list(librados::ObjectReadOperation& op, + const std::string& marker, uint32_t max, + cls_rgw_bi_log_list_ret *pdata, int *ret) +{ + cls_rgw_bi_log_list_op call; + call.marker = marker; + call.max = max; + + bufferlist in; + encode(call, in); + op.exec(RGW_CLASS, RGW_BI_LOG_LIST, in, new ClsBucketIndexOpCtx<cls_rgw_bi_log_list_ret>(pdata, ret)); +} + +static bool issue_bi_log_list_op(librados::IoCtx& io_ctx, const string& oid, const int shard_id, + BucketIndexShardsManager& marker_mgr, uint32_t max, + BucketIndexAioManager *manager, + cls_rgw_bi_log_list_ret *pdata) +{ + librados::ObjectReadOperation op; + cls_rgw_bilog_list(op, marker_mgr.get(shard_id, ""), max, pdata, nullptr); + return manager->aio_operate(io_ctx, shard_id, oid, &op); +} + +int CLSRGWIssueBILogList::issue_op(const int shard_id, const string& oid) +{ + return issue_bi_log_list_op(io_ctx, oid, shard_id, marker_mgr, max, &manager, &result[shard_id]); +} + +void cls_rgw_bilog_trim(librados::ObjectWriteOperation& op, + const std::string& start_marker, + const std::string& end_marker) +{ + cls_rgw_bi_log_trim_op call; + call.start_marker = start_marker; + call.end_marker = end_marker; + + bufferlist in; + encode(call, in); + op.exec(RGW_CLASS, RGW_BI_LOG_TRIM, in); +} + +static bool issue_bi_log_trim(librados::IoCtx& io_ctx, const string& oid, const int shard_id, + BucketIndexShardsManager& start_marker_mgr, + BucketIndexShardsManager& end_marker_mgr, BucketIndexAioManager *manager) { + cls_rgw_bi_log_trim_op call; + librados::ObjectWriteOperation op; + cls_rgw_bilog_trim(op, start_marker_mgr.get(shard_id, ""), + end_marker_mgr.get(shard_id, "")); + return manager->aio_operate(io_ctx, shard_id, oid, &op); +} + +int CLSRGWIssueBILogTrim::issue_op(const int shard_id, const string& oid) +{ + return issue_bi_log_trim(io_ctx, oid, shard_id, start_marker_mgr, end_marker_mgr, &manager); +} + +static bool issue_bucket_check_index_op(IoCtx& io_ctx, const int shard_id, const string& oid, BucketIndexAioManager *manager, + rgw_cls_check_index_ret *pdata) { + bufferlist in; + librados::ObjectReadOperation op; + op.exec(RGW_CLASS, RGW_BUCKET_CHECK_INDEX, in, new ClsBucketIndexOpCtx<rgw_cls_check_index_ret>( + pdata, NULL)); + return manager->aio_operate(io_ctx, shard_id, oid, &op); +} + +int CLSRGWIssueBucketCheck::issue_op(int shard_id, const string& oid) +{ + return issue_bucket_check_index_op(io_ctx, shard_id, oid, &manager, &result[shard_id]); +} + +static bool issue_bucket_rebuild_index_op(IoCtx& io_ctx, const int shard_id, const string& oid, + BucketIndexAioManager *manager) { + bufferlist in; + librados::ObjectWriteOperation op; + op.exec(RGW_CLASS, RGW_BUCKET_REBUILD_INDEX, in); + return manager->aio_operate(io_ctx, shard_id, oid, &op); +} + +int CLSRGWIssueBucketRebuild::issue_op(const int shard_id, const string& oid) +{ + return issue_bucket_rebuild_index_op(io_ctx, shard_id, oid, &manager); +} + +void cls_rgw_encode_suggestion(char op, rgw_bucket_dir_entry& dirent, bufferlist& updates) +{ + updates.append(op); + encode(dirent, updates); +} + +void cls_rgw_suggest_changes(ObjectWriteOperation& o, bufferlist& updates) +{ + o.exec(RGW_CLASS, RGW_DIR_SUGGEST_CHANGES, updates); +} + +int CLSRGWIssueGetDirHeader::issue_op(const int shard_id, const string& oid) +{ + cls_rgw_obj_key empty_key; + string empty_prefix; + string empty_delimiter; + return issue_bucket_list_op(io_ctx, shard_id, oid, + empty_key, empty_prefix, empty_delimiter, + 0, false, &manager, &result[shard_id]); +} + +static bool issue_resync_bi_log(librados::IoCtx& io_ctx, const int shard_id, const string& oid, BucketIndexAioManager *manager) +{ + bufferlist in; + librados::ObjectWriteOperation op; + op.exec(RGW_CLASS, RGW_BI_LOG_RESYNC, in); + return manager->aio_operate(io_ctx, shard_id, oid, &op); +} + +int CLSRGWIssueResyncBucketBILog::issue_op(const int shard_id, const string& oid) +{ + return issue_resync_bi_log(io_ctx, shard_id, oid, &manager); +} + +static bool issue_bi_log_stop(librados::IoCtx& io_ctx, const int shard_id, const string& oid, BucketIndexAioManager *manager) +{ + bufferlist in; + librados::ObjectWriteOperation op; + op.exec(RGW_CLASS, RGW_BI_LOG_STOP, in); + return manager->aio_operate(io_ctx, shard_id, oid, &op); +} + +int CLSRGWIssueBucketBILogStop::issue_op(const int shard_id, const string& oid) +{ + return issue_bi_log_stop(io_ctx, shard_id, oid, &manager); +} + +class GetDirHeaderCompletion : public ObjectOperationCompletion { + RGWGetDirHeader_CB *ret_ctx; +public: + explicit GetDirHeaderCompletion(RGWGetDirHeader_CB *_ctx) : ret_ctx(_ctx) {} + ~GetDirHeaderCompletion() override { + ret_ctx->put(); + } + void handle_completion(int r, bufferlist& outbl) override { + rgw_cls_list_ret ret; + try { + auto iter = outbl.cbegin(); + decode(ret, iter); + } catch (ceph::buffer::error& err) { + r = -EIO; + } + + ret_ctx->handle_response(r, ret.dir.header); + } +}; + +int cls_rgw_get_dir_header_async(IoCtx& io_ctx, string& oid, RGWGetDirHeader_CB *ctx) +{ + bufferlist in, out; + rgw_cls_list_op call; + call.num_entries = 0; + encode(call, in); + ObjectReadOperation op; + GetDirHeaderCompletion *cb = new GetDirHeaderCompletion(ctx); + op.exec(RGW_CLASS, RGW_BUCKET_LIST, in, cb); + AioCompletion *c = librados::Rados::aio_create_completion(nullptr, nullptr); + int r = io_ctx.aio_operate(oid, c, &op, NULL); + c->release(); + if (r < 0) + return r; + + return 0; +} + +int cls_rgw_usage_log_read(IoCtx& io_ctx, const string& oid, const string& user, const string& bucket, + uint64_t start_epoch, uint64_t end_epoch, uint32_t max_entries, + string& read_iter, map<rgw_user_bucket, rgw_usage_log_entry>& usage, + bool *is_truncated) +{ + if (is_truncated) + *is_truncated = false; + + bufferlist in, out; + rgw_cls_usage_log_read_op call; + call.start_epoch = start_epoch; + call.end_epoch = end_epoch; + call.owner = user; + call.max_entries = max_entries; + call.bucket = bucket; + call.iter = read_iter; + encode(call, in); + int r = io_ctx.exec(oid, RGW_CLASS, RGW_USER_USAGE_LOG_READ, in, out); + if (r < 0) + return r; + + try { + rgw_cls_usage_log_read_ret result; + auto iter = out.cbegin(); + decode(result, iter); + read_iter = result.next_iter; + if (is_truncated) + *is_truncated = result.truncated; + + usage = result.usage; + } catch (ceph::buffer::error& e) { + return -EINVAL; + } + + return 0; +} + +int cls_rgw_usage_log_trim(IoCtx& io_ctx, const string& oid, const string& user, const string& bucket, + uint64_t start_epoch, uint64_t end_epoch) +{ + bufferlist in; + rgw_cls_usage_log_trim_op call; + call.start_epoch = start_epoch; + call.end_epoch = end_epoch; + call.user = user; + call.bucket = bucket; + encode(call, in); + + bool done = false; + do { + ObjectWriteOperation op; + op.exec(RGW_CLASS, RGW_USER_USAGE_LOG_TRIM, in); + int r = io_ctx.operate(oid, &op); + if (r == -ENODATA) + done = true; + else if (r < 0) + return r; + } while (!done); + + return 0; +} + +void cls_rgw_usage_log_trim(librados::ObjectWriteOperation& op, const string& user, const string& bucket, uint64_t start_epoch, uint64_t end_epoch) +{ + bufferlist in; + rgw_cls_usage_log_trim_op call; + call.start_epoch = start_epoch; + call.end_epoch = end_epoch; + call.user = user; + call.bucket = bucket; + encode(call, in); + + op.exec(RGW_CLASS, RGW_USER_USAGE_LOG_TRIM, in); +} + +void cls_rgw_usage_log_clear(ObjectWriteOperation& op) +{ + bufferlist in; + op.exec(RGW_CLASS, RGW_USAGE_LOG_CLEAR, in); +} + +void cls_rgw_usage_log_add(ObjectWriteOperation& op, rgw_usage_log_info& info) +{ + bufferlist in; + rgw_cls_usage_log_add_op call; + call.info = info; + encode(call, in); + op.exec(RGW_CLASS, RGW_USER_USAGE_LOG_ADD, in); +} + +/* garbage collection */ + +void cls_rgw_gc_set_entry(ObjectWriteOperation& op, uint32_t expiration_secs, cls_rgw_gc_obj_info& info) +{ + bufferlist in; + cls_rgw_gc_set_entry_op call; + call.expiration_secs = expiration_secs; + call.info = info; + encode(call, in); + op.exec(RGW_CLASS, RGW_GC_SET_ENTRY, in); +} + +void cls_rgw_gc_defer_entry(ObjectWriteOperation& op, uint32_t expiration_secs, const string& tag) +{ + bufferlist in; + cls_rgw_gc_defer_entry_op call; + call.expiration_secs = expiration_secs; + call.tag = tag; + encode(call, in); + op.exec(RGW_CLASS, RGW_GC_DEFER_ENTRY, in); +} + +int cls_rgw_gc_list(IoCtx& io_ctx, string& oid, string& marker, uint32_t max, bool expired_only, + list<cls_rgw_gc_obj_info>& entries, bool *truncated, string& next_marker) +{ + bufferlist in, out; + cls_rgw_gc_list_op call; + call.marker = marker; + call.max = max; + call.expired_only = expired_only; + encode(call, in); + int r = io_ctx.exec(oid, RGW_CLASS, RGW_GC_LIST, in, out); + if (r < 0) + return r; + + cls_rgw_gc_list_ret ret; + try { + auto iter = out.cbegin(); + decode(ret, iter); + } catch (ceph::buffer::error& err) { + return -EIO; + } + + entries.swap(ret.entries); + + if (truncated) + *truncated = ret.truncated; + next_marker = std::move(ret.next_marker); + return r; +} + +void cls_rgw_gc_remove(librados::ObjectWriteOperation& op, const vector<string>& tags) +{ + bufferlist in; + cls_rgw_gc_remove_op call; + call.tags = tags; + encode(call, in); + op.exec(RGW_CLASS, RGW_GC_REMOVE, in); +} + +int cls_rgw_lc_get_head(IoCtx& io_ctx, const string& oid, cls_rgw_lc_obj_head& head) +{ + bufferlist in, out; + int r = io_ctx.exec(oid, RGW_CLASS, RGW_LC_GET_HEAD, in, out); + if (r < 0) + return r; + + cls_rgw_lc_get_head_ret ret; + try { + auto iter = out.cbegin(); + decode(ret, iter); + } catch (ceph::buffer::error& err) { + return -EIO; + } + head = ret.head; + + return r; +} + +int cls_rgw_lc_put_head(IoCtx& io_ctx, const string& oid, cls_rgw_lc_obj_head& head) +{ + bufferlist in, out; + cls_rgw_lc_put_head_op call; + call.head = head; + encode(call, in); + int r = io_ctx.exec(oid, RGW_CLASS, RGW_LC_PUT_HEAD, in, out); + return r; +} + +int cls_rgw_lc_get_next_entry(IoCtx& io_ctx, const string& oid, const string& marker, + cls_rgw_lc_entry& entry) +{ + bufferlist in, out; + cls_rgw_lc_get_next_entry_op call; + call.marker = marker; + encode(call, in); + int r = io_ctx.exec(oid, RGW_CLASS, RGW_LC_GET_NEXT_ENTRY, in, out); + if (r < 0) + return r; + + cls_rgw_lc_get_next_entry_ret ret; + try { + auto iter = out.cbegin(); + decode(ret, iter); + } catch (ceph::buffer::error& err) { + return -EIO; + } + entry = ret.entry; + + return r; +} + +int cls_rgw_lc_rm_entry(IoCtx& io_ctx, const string& oid, + const cls_rgw_lc_entry& entry) +{ + bufferlist in, out; + cls_rgw_lc_rm_entry_op call; + call.entry = entry; + encode(call, in); + int r = io_ctx.exec(oid, RGW_CLASS, RGW_LC_RM_ENTRY, in, out); + return r; +} + +int cls_rgw_lc_set_entry(IoCtx& io_ctx, const string& oid, + const cls_rgw_lc_entry& entry) +{ + bufferlist in, out; + cls_rgw_lc_set_entry_op call; + call.entry = entry; + encode(call, in); + int r = io_ctx.exec(oid, RGW_CLASS, RGW_LC_SET_ENTRY, in, out); + return r; +} + +int cls_rgw_lc_get_entry(IoCtx& io_ctx, const string& oid, + const std::string& marker, cls_rgw_lc_entry& entry) +{ + bufferlist in, out; + cls_rgw_lc_get_entry_op call{marker};; + encode(call, in); + int r = io_ctx.exec(oid, RGW_CLASS, RGW_LC_GET_ENTRY, in, out); + + if (r < 0) { + return r; + } + + cls_rgw_lc_get_entry_ret ret; + try { + auto iter = out.cbegin(); + decode(ret, iter); + } catch (ceph::buffer::error& err) { + return -EIO; + } + + entry = std::move(ret.entry); + return r; +} + +int cls_rgw_lc_list(IoCtx& io_ctx, const string& oid, + const string& marker, + uint32_t max_entries, + vector<cls_rgw_lc_entry>& entries) +{ + bufferlist in, out; + cls_rgw_lc_list_entries_op op; + + entries.clear(); + + op.marker = marker; + op.max_entries = max_entries; + + encode(op, in); + + int r = io_ctx.exec(oid, RGW_CLASS, RGW_LC_LIST_ENTRIES, in, out); + if (r < 0) + return r; + + cls_rgw_lc_list_entries_ret ret; + try { + auto iter = out.cbegin(); + decode(ret, iter); + } catch (ceph::buffer::error& err) { + return -EIO; + } + + std::sort(std::begin(ret.entries), std::end(ret.entries), + [](const cls_rgw_lc_entry& a, const cls_rgw_lc_entry& b) + { return a.bucket < b.bucket; }); + entries = std::move(ret.entries); + return r; +} + +void cls_rgw_mp_upload_part_info_update(librados::ObjectWriteOperation& op, + const std::string& part_key, + const RGWUploadPartInfo& info) +{ + cls_rgw_mp_upload_part_info_update_op call; + call.part_key = part_key; + call.info = info; + + buffer::list in; + encode(call, in); + + op.exec(RGW_CLASS, RGW_MP_UPLOAD_PART_INFO_UPDATE, in); +} + +void cls_rgw_reshard_add(librados::ObjectWriteOperation& op, const cls_rgw_reshard_entry& entry) +{ + bufferlist in; + cls_rgw_reshard_add_op call; + call.entry = entry; + encode(call, in); + op.exec(RGW_CLASS, RGW_RESHARD_ADD, in); +} + +int cls_rgw_reshard_list(librados::IoCtx& io_ctx, const string& oid, string& marker, uint32_t max, + list<cls_rgw_reshard_entry>& entries, bool* is_truncated) +{ + bufferlist in, out; + cls_rgw_reshard_list_op call; + call.marker = marker; + call.max = max; + encode(call, in); + int r = io_ctx.exec(oid, RGW_CLASS, RGW_RESHARD_LIST, in, out); + if (r < 0) + return r; + + cls_rgw_reshard_list_ret op_ret; + auto iter = out.cbegin(); + try { + decode(op_ret, iter); + } catch (ceph::buffer::error& err) { + return -EIO; + } + + entries.swap(op_ret.entries); + *is_truncated = op_ret.is_truncated; + + return 0; +} + +int cls_rgw_reshard_get(librados::IoCtx& io_ctx, const string& oid, cls_rgw_reshard_entry& entry) +{ + bufferlist in, out; + cls_rgw_reshard_get_op call; + call.entry = entry; + encode(call, in); + int r = io_ctx.exec(oid, RGW_CLASS, RGW_RESHARD_GET, in, out); + if (r < 0) + return r; + + cls_rgw_reshard_get_ret op_ret; + auto iter = out.cbegin(); + try { + decode(op_ret, iter); + } catch (ceph::buffer::error& err) { + return -EIO; + } + + entry = op_ret.entry; + + return 0; +} + +void cls_rgw_reshard_remove(librados::ObjectWriteOperation& op, const cls_rgw_reshard_entry& entry) +{ + bufferlist in; + cls_rgw_reshard_remove_op call; + call.tenant = entry.tenant; + call.bucket_name = entry.bucket_name; + call.bucket_id = entry.bucket_id; + encode(call, in); + op.exec(RGW_CLASS, RGW_RESHARD_REMOVE, in); +} + +int cls_rgw_set_bucket_resharding(librados::IoCtx& io_ctx, const string& oid, + const cls_rgw_bucket_instance_entry& entry) +{ + bufferlist in, out; + cls_rgw_set_bucket_resharding_op call; + call.entry = entry; + encode(call, in); + return io_ctx.exec(oid, RGW_CLASS, RGW_SET_BUCKET_RESHARDING, in, out); +} + +int cls_rgw_clear_bucket_resharding(librados::IoCtx& io_ctx, const string& oid) +{ + bufferlist in, out; + cls_rgw_clear_bucket_resharding_op call; + encode(call, in); + return io_ctx.exec(oid, RGW_CLASS, RGW_CLEAR_BUCKET_RESHARDING, in, out); +} + +int cls_rgw_get_bucket_resharding(librados::IoCtx& io_ctx, const string& oid, + cls_rgw_bucket_instance_entry *entry) +{ + bufferlist in, out; + cls_rgw_get_bucket_resharding_op call; + encode(call, in); + int r= io_ctx.exec(oid, RGW_CLASS, RGW_GET_BUCKET_RESHARDING, in, out); + if (r < 0) + return r; + + cls_rgw_get_bucket_resharding_ret op_ret; + auto iter = out.cbegin(); + try { + decode(op_ret, iter); + } catch (ceph::buffer::error& err) { + return -EIO; + } + + *entry = op_ret.new_instance; + + return 0; +} + +void cls_rgw_guard_bucket_resharding(librados::ObjectOperation& op, int ret_err) +{ + bufferlist in, out; + cls_rgw_guard_bucket_resharding_op call; + call.ret_err = ret_err; + encode(call, in); + op.exec(RGW_CLASS, RGW_GUARD_BUCKET_RESHARDING, in); +} + +static bool issue_set_bucket_resharding(librados::IoCtx& io_ctx, + const int shard_id, const string& oid, + const cls_rgw_bucket_instance_entry& entry, + BucketIndexAioManager *manager) { + bufferlist in; + cls_rgw_set_bucket_resharding_op call; + call.entry = entry; + encode(call, in); + librados::ObjectWriteOperation op; + op.assert_exists(); // the shard must exist; if not fail rather than recreate + op.exec(RGW_CLASS, RGW_SET_BUCKET_RESHARDING, in); + return manager->aio_operate(io_ctx, shard_id, oid, &op); +} + +int CLSRGWIssueSetBucketResharding::issue_op(const int shard_id, const string& oid) +{ + return issue_set_bucket_resharding(io_ctx, shard_id, oid, entry, &manager); +} diff --git a/src/cls/rgw/cls_rgw_client.h b/src/cls/rgw/cls_rgw_client.h new file mode 100644 index 000000000..139dbdb19 --- /dev/null +++ b/src/cls/rgw/cls_rgw_client.h @@ -0,0 +1,645 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab + +#pragma once + +#include "include/str_list.h" +#include "include/rados/librados.hpp" +#include "cls_rgw_ops.h" +#include "cls_rgw_const.h" +#include "common/RefCountedObj.h" +#include "common/strtol.h" +#include "include/compat.h" +#include "common/ceph_time.h" +#include "common/ceph_mutex.h" + + +// Forward declaration +class BucketIndexAioManager; +/* + * Bucket index AIO request argument, this is used to pass a argument + * to callback. + */ +struct BucketIndexAioArg : public RefCountedObject { + BucketIndexAioArg(int _id, BucketIndexAioManager* _manager) : + id(_id), manager(_manager) {} + int id; + BucketIndexAioManager* manager; +}; + +/* + * This class manages AIO completions. This class is not completely + * thread-safe, methods like *get_next_request_id* is not thread-safe + * and is expected to be called from within one thread. + */ +class BucketIndexAioManager { +public: + + // allows us to reaccess the shard id and shard's oid during and + // after the asynchronous call is made + struct RequestObj { + int shard_id; + std::string oid; + + RequestObj(int _shard_id, const std::string& _oid) : + shard_id(_shard_id), oid(_oid) + {/* empty */} + }; + + +private: + // NB: the following 4 maps use the request_id as the key; this + // is not the same as the shard_id! + std::map<int, librados::AioCompletion*> pendings; + std::map<int, librados::AioCompletion*> completions; + std::map<int, const RequestObj> pending_objs; + std::map<int, const RequestObj> completion_objs; + + int next = 0; + ceph::mutex lock = ceph::make_mutex("BucketIndexAioManager::lock"); + ceph::condition_variable cond; + /* + * Callback implementation for AIO request. + */ + static void bucket_index_op_completion_cb(void* cb, void* arg) { + BucketIndexAioArg* cb_arg = (BucketIndexAioArg*) arg; + cb_arg->manager->do_completion(cb_arg->id); + cb_arg->put(); + } + + /* + * Get next request ID. This method is not thread-safe. + * + * Return next request ID. + */ + int get_next_request_id() { return next++; } + + /* + * Add a new pending AIO completion instance. + * + * @param id - the request ID. + * @param completion - the AIO completion instance. + * @param oid - the object id associated with the object, if it is NULL, we don't + * track the object id per callback. + */ + void add_pending(int request_id, librados::AioCompletion* completion, const int shard_id, const std::string& oid) { + pendings[request_id] = completion; + pending_objs.emplace(request_id, RequestObj(shard_id, oid)); + } + +public: + /* + * Create a new instance. + */ + BucketIndexAioManager() = default; + + /* + * Do completion for the given AIO request. + */ + void do_completion(int request_id); + + /* + * Wait for AIO completions. + * + * valid_ret_code - valid AIO return code. + * num_completions - number of completions. + * ret_code - return code of failed AIO. + * objs - a std::list of objects that has been finished the AIO. + * + * Return false if there is no pending AIO, true otherwise. + */ + bool wait_for_completions(int valid_ret_code, + int *num_completions = nullptr, + int *ret_code = nullptr, + std::map<int, std::string> *completed_objs = nullptr, + std::map<int, std::string> *retry_objs = nullptr); + + /** + * Do aio read operation. + */ + bool aio_operate(librados::IoCtx& io_ctx, const int shard_id, const std::string& oid, librados::ObjectReadOperation *op) { + std::lock_guard l{lock}; + const int request_id = get_next_request_id(); + BucketIndexAioArg *arg = new BucketIndexAioArg(request_id, this); + librados::AioCompletion *c = librados::Rados::aio_create_completion((void*)arg, bucket_index_op_completion_cb); + int r = io_ctx.aio_operate(oid, c, (librados::ObjectReadOperation*)op, NULL); + if (r >= 0) { + add_pending(arg->id, c, shard_id, oid); + } else { + arg->put(); + c->release(); + } + return r; + } + + /** + * Do aio write operation. + */ + bool aio_operate(librados::IoCtx& io_ctx, const int shard_id, const std::string& oid, librados::ObjectWriteOperation *op) { + std::lock_guard l{lock}; + const int request_id = get_next_request_id(); + BucketIndexAioArg *arg = new BucketIndexAioArg(request_id, this); + librados::AioCompletion *c = librados::Rados::aio_create_completion((void*)arg, bucket_index_op_completion_cb); + int r = io_ctx.aio_operate(oid, c, (librados::ObjectWriteOperation*)op); + if (r >= 0) { + add_pending(arg->id, c, shard_id, oid); + } else { + arg->put(); + c->release(); + } + return r; + } +}; + +class RGWGetDirHeader_CB : public RefCountedObject { +public: + ~RGWGetDirHeader_CB() override {} + virtual void handle_response(int r, rgw_bucket_dir_header& header) = 0; +}; + +class BucketIndexShardsManager { +private: + // Per shard setting manager, for example, marker. + std::map<int, std::string> value_by_shards; +public: + const static std::string KEY_VALUE_SEPARATOR; + const static std::string SHARDS_SEPARATOR; + + void add(int shard, const std::string& value) { + value_by_shards[shard] = value; + } + + const std::string& get(int shard, const std::string& default_value) const { + auto iter = value_by_shards.find(shard); + return (iter == value_by_shards.end() ? default_value : iter->second); + } + + const std::map<int, std::string>& get() const { + return value_by_shards; + } + std::map<int, std::string>& get() { + return value_by_shards; + } + + bool empty() const { + return value_by_shards.empty(); + } + + void to_string(std::string *out) const { + if (!out) { + return; + } + out->clear(); + for (auto iter = value_by_shards.begin(); + iter != value_by_shards.end(); ++iter) { + if (out->length()) { + // Not the first item, append a separator first + out->append(SHARDS_SEPARATOR); + } + char buf[16]; + snprintf(buf, sizeof(buf), "%d", iter->first); + out->append(buf); + out->append(KEY_VALUE_SEPARATOR); + out->append(iter->second); + } + } + + static bool is_shards_marker(const std::string& marker) { + return marker.find(KEY_VALUE_SEPARATOR) != std::string::npos; + } + + /* + * convert from std::string. There are two options of how the std::string looks like: + * + * 1. Single shard, no shard id specified, e.g. 000001.23.1 + * + * for this case, if passed shard_id >= 0, use this shard id, otherwise assume that it's a + * bucket with no shards. + * + * 2. One or more shards, shard id specified for each shard, e.g., 0#00002.12,1#00003.23.2 + * + */ + int from_string(std::string_view composed_marker, int shard_id) { + value_by_shards.clear(); + std::vector<std::string> shards; + get_str_vec(composed_marker, SHARDS_SEPARATOR.c_str(), shards); + if (shards.size() > 1 && shard_id >= 0) { + return -EINVAL; + } + for (auto iter = shards.begin(); iter != shards.end(); ++iter) { + size_t pos = iter->find(KEY_VALUE_SEPARATOR); + if (pos == std::string::npos) { + if (!value_by_shards.empty()) { + return -EINVAL; + } + if (shard_id < 0) { + add(0, *iter); + } else { + add(shard_id, *iter); + } + return 0; + } + std::string shard_str = iter->substr(0, pos); + std::string err; + int shard = (int)strict_strtol(shard_str.c_str(), 10, &err); + if (!err.empty()) { + return -EINVAL; + } + add(shard, iter->substr(pos + 1)); + } + return 0; + } + + // trim the '<shard-id>#' prefix from a single shard marker if present + static std::string get_shard_marker(const std::string& marker) { + auto p = marker.find(KEY_VALUE_SEPARATOR); + if (p == marker.npos) { + return marker; + } + return marker.substr(p + 1); + } +}; + +/* bucket index */ +void cls_rgw_bucket_init_index(librados::ObjectWriteOperation& o); + +class CLSRGWConcurrentIO { +protected: + librados::IoCtx& io_ctx; + + // map of shard # to oid; the shards that are remaining to be processed + std::map<int, std::string>& objs_container; + // iterator to work through objs_container + std::map<int, std::string>::iterator iter; + + uint32_t max_aio; + BucketIndexAioManager manager; + + virtual int issue_op(int shard_id, const std::string& oid) = 0; + + virtual void cleanup() {} + virtual int valid_ret_code() { return 0; } + // Return true if multiple rounds of OPs might be needed, this happens when + // OP needs to be re-send until a certain code is returned. + virtual bool need_multiple_rounds() { return false; } + // Add a new object to the end of the container. + virtual void add_object(int shard, const std::string& oid) {} + virtual void reset_container(std::map<int, std::string>& objs) {} + +public: + + CLSRGWConcurrentIO(librados::IoCtx& ioc, + std::map<int, std::string>& _objs_container, + uint32_t _max_aio) : + io_ctx(ioc), objs_container(_objs_container), max_aio(_max_aio) + {} + + virtual ~CLSRGWConcurrentIO() {} + + int operator()(); +}; // class CLSRGWConcurrentIO + + +class CLSRGWIssueBucketIndexInit : public CLSRGWConcurrentIO { +protected: + int issue_op(int shard_id, const std::string& oid) override; + int valid_ret_code() override { return -EEXIST; } + void cleanup() override; +public: + CLSRGWIssueBucketIndexInit(librados::IoCtx& ioc, + std::map<int, std::string>& _bucket_objs, + uint32_t _max_aio) : + CLSRGWConcurrentIO(ioc, _bucket_objs, _max_aio) {} + virtual ~CLSRGWIssueBucketIndexInit() override {} +}; + + +class CLSRGWIssueBucketIndexClean : public CLSRGWConcurrentIO { +protected: + int issue_op(int shard_id, const std::string& oid) override; + int valid_ret_code() override { + return -ENOENT; + } + +public: + CLSRGWIssueBucketIndexClean(librados::IoCtx& ioc, + std::map<int, std::string>& _bucket_objs, + uint32_t _max_aio) : + CLSRGWConcurrentIO(ioc, _bucket_objs, _max_aio) + {} + virtual ~CLSRGWIssueBucketIndexClean() override {} +}; + + +class CLSRGWIssueSetTagTimeout : public CLSRGWConcurrentIO { + uint64_t tag_timeout; +protected: + int issue_op(int shard_id, const std::string& oid) override; +public: + CLSRGWIssueSetTagTimeout(librados::IoCtx& ioc, std::map<int, std::string>& _bucket_objs, + uint32_t _max_aio, uint64_t _tag_timeout) : + CLSRGWConcurrentIO(ioc, _bucket_objs, _max_aio), tag_timeout(_tag_timeout) {} + virtual ~CLSRGWIssueSetTagTimeout() override {} +}; + +void cls_rgw_bucket_update_stats(librados::ObjectWriteOperation& o, + bool absolute, + const std::map<RGWObjCategory, rgw_bucket_category_stats>& stats); + +void cls_rgw_bucket_prepare_op(librados::ObjectWriteOperation& o, RGWModifyOp op, const std::string& tag, + const cls_rgw_obj_key& key, const std::string& locator, bool log_op, + uint16_t bilog_op, const rgw_zone_set& zones_trace); + +void cls_rgw_bucket_complete_op(librados::ObjectWriteOperation& o, RGWModifyOp op, const std::string& tag, + const rgw_bucket_entry_ver& ver, + const cls_rgw_obj_key& key, + const rgw_bucket_dir_entry_meta& dir_meta, + const std::list<cls_rgw_obj_key> *remove_objs, bool log_op, + uint16_t bilog_op, const rgw_zone_set *zones_trace); + +void cls_rgw_remove_obj(librados::ObjectWriteOperation& o, std::list<std::string>& keep_attr_prefixes); +void cls_rgw_obj_store_pg_ver(librados::ObjectWriteOperation& o, const std::string& attr); +void cls_rgw_obj_check_attrs_prefix(librados::ObjectOperation& o, const std::string& prefix, bool fail_if_exist); +void cls_rgw_obj_check_mtime(librados::ObjectOperation& o, const ceph::real_time& mtime, bool high_precision_time, RGWCheckMTimeType type); + +int cls_rgw_bi_get(librados::IoCtx& io_ctx, const std::string oid, + BIIndexType index_type, const cls_rgw_obj_key& key, + rgw_cls_bi_entry *entry); +int cls_rgw_bi_put(librados::IoCtx& io_ctx, const std::string oid, const rgw_cls_bi_entry& entry); +void cls_rgw_bi_put(librados::ObjectWriteOperation& op, const std::string oid, const rgw_cls_bi_entry& entry); +int cls_rgw_bi_list(librados::IoCtx& io_ctx, const std::string& oid, + const std::string& name, const std::string& marker, uint32_t max, + std::list<rgw_cls_bi_entry> *entries, bool *is_truncated); + + +void cls_rgw_bucket_link_olh(librados::ObjectWriteOperation& op, + const cls_rgw_obj_key& key, const ceph::buffer::list& olh_tag, + bool delete_marker, const std::string& op_tag, const rgw_bucket_dir_entry_meta *meta, + uint64_t olh_epoch, ceph::real_time unmod_since, bool high_precision_time, bool log_op, const rgw_zone_set& zones_trace); +void cls_rgw_bucket_unlink_instance(librados::ObjectWriteOperation& op, + const cls_rgw_obj_key& key, const std::string& op_tag, + const std::string& olh_tag, uint64_t olh_epoch, bool log_op, const rgw_zone_set& zones_trace); +void cls_rgw_get_olh_log(librados::ObjectReadOperation& op, const cls_rgw_obj_key& olh, uint64_t ver_marker, const std::string& olh_tag, rgw_cls_read_olh_log_ret& log_ret, int& op_ret); +void cls_rgw_trim_olh_log(librados::ObjectWriteOperation& op, const cls_rgw_obj_key& olh, uint64_t ver, const std::string& olh_tag); +void cls_rgw_clear_olh(librados::ObjectWriteOperation& op, const cls_rgw_obj_key& olh, const std::string& olh_tag); + +// these overloads which call io_ctx.operate() should not be called in the rgw. +// rgw_rados_operate() should be called after the overloads w/o calls to io_ctx.operate() +#ifndef CLS_CLIENT_HIDE_IOCTX +int cls_rgw_bucket_link_olh(librados::IoCtx& io_ctx, const std::string& oid, + const cls_rgw_obj_key& key, const ceph::buffer::list& olh_tag, + bool delete_marker, const std::string& op_tag, const rgw_bucket_dir_entry_meta *meta, + uint64_t olh_epoch, ceph::real_time unmod_since, bool high_precision_time, bool log_op, const rgw_zone_set& zones_trace); +int cls_rgw_bucket_unlink_instance(librados::IoCtx& io_ctx, const std::string& oid, + const cls_rgw_obj_key& key, const std::string& op_tag, + const std::string& olh_tag, uint64_t olh_epoch, bool log_op, const rgw_zone_set& zones_trace); +int cls_rgw_get_olh_log(librados::IoCtx& io_ctx, std::string& oid, const cls_rgw_obj_key& olh, uint64_t ver_marker, + const std::string& olh_tag, rgw_cls_read_olh_log_ret& log_ret); +int cls_rgw_clear_olh(librados::IoCtx& io_ctx, std::string& oid, const cls_rgw_obj_key& olh, const std::string& olh_tag); +int cls_rgw_usage_log_trim(librados::IoCtx& io_ctx, const std::string& oid, const std::string& user, const std::string& bucket, + uint64_t start_epoch, uint64_t end_epoch); +#endif + + +/** + * Std::list the bucket with the starting object and filter prefix. + * NOTE: this method do listing requests for each bucket index shards identified by + * the keys of the *list_results* std::map, which means the std::map should be popludated + * by the caller to fill with each bucket index object id. + * + * io_ctx - IO context for rados. + * start_obj - marker for the listing. + * filter_prefix - filter prefix. + * num_entries - number of entries to request for each object (note the total + * amount of entries returned depends on the number of shardings). + * list_results - the std::list results keyed by bucket index object id. + * max_aio - the maximum number of AIO (for throttling). + * + * Return 0 on success, a failure code otherwise. +*/ + +class CLSRGWIssueBucketList : public CLSRGWConcurrentIO { + cls_rgw_obj_key start_obj; + std::string filter_prefix; + std::string delimiter; + uint32_t num_entries; + bool list_versions; + std::map<int, rgw_cls_list_ret>& result; // request_id -> return value + +protected: + int issue_op(int shard_id, const std::string& oid) override; + void reset_container(std::map<int, std::string>& objs) override; + +public: + CLSRGWIssueBucketList(librados::IoCtx& io_ctx, + const cls_rgw_obj_key& _start_obj, + const std::string& _filter_prefix, + const std::string& _delimiter, + uint32_t _num_entries, + bool _list_versions, + std::map<int, std::string>& oids, // shard_id -> shard_oid + // shard_id -> return value + std::map<int, rgw_cls_list_ret>& list_results, + uint32_t max_aio) : + CLSRGWConcurrentIO(io_ctx, oids, max_aio), + start_obj(_start_obj), filter_prefix(_filter_prefix), delimiter(_delimiter), + num_entries(_num_entries), list_versions(_list_versions), + result(list_results) + {} +}; + +void cls_rgw_bucket_list_op(librados::ObjectReadOperation& op, + const cls_rgw_obj_key& start_obj, + const std::string& filter_prefix, + const std::string& delimiter, + uint32_t num_entries, + bool list_versions, + rgw_cls_list_ret* result); + +void cls_rgw_bilog_list(librados::ObjectReadOperation& op, + const std::string& marker, uint32_t max, + cls_rgw_bi_log_list_ret *pdata, int *ret = nullptr); + +class CLSRGWIssueBILogList : public CLSRGWConcurrentIO { + std::map<int, cls_rgw_bi_log_list_ret>& result; + BucketIndexShardsManager& marker_mgr; + uint32_t max; +protected: + int issue_op(int shard_id, const std::string& oid) override; +public: + CLSRGWIssueBILogList(librados::IoCtx& io_ctx, BucketIndexShardsManager& _marker_mgr, uint32_t _max, + std::map<int, std::string>& oids, + std::map<int, cls_rgw_bi_log_list_ret>& bi_log_lists, uint32_t max_aio) : + CLSRGWConcurrentIO(io_ctx, oids, max_aio), result(bi_log_lists), + marker_mgr(_marker_mgr), max(_max) {} + virtual ~CLSRGWIssueBILogList() override {} +}; + +void cls_rgw_bilog_trim(librados::ObjectWriteOperation& op, + const std::string& start_marker, + const std::string& end_marker); + +class CLSRGWIssueBILogTrim : public CLSRGWConcurrentIO { + BucketIndexShardsManager& start_marker_mgr; + BucketIndexShardsManager& end_marker_mgr; +protected: + int issue_op(int shard_id, const std::string& oid) override; + // Trim until -ENODATA is returned. + int valid_ret_code() override { return -ENODATA; } + bool need_multiple_rounds() override { return true; } + void add_object(int shard, const std::string& oid) override { objs_container[shard] = oid; } + void reset_container(std::map<int, std::string>& objs) override { + objs_container.swap(objs); + iter = objs_container.begin(); + objs.clear(); + } +public: + CLSRGWIssueBILogTrim(librados::IoCtx& io_ctx, BucketIndexShardsManager& _start_marker_mgr, + BucketIndexShardsManager& _end_marker_mgr, std::map<int, std::string>& _bucket_objs, uint32_t max_aio) : + CLSRGWConcurrentIO(io_ctx, _bucket_objs, max_aio), + start_marker_mgr(_start_marker_mgr), end_marker_mgr(_end_marker_mgr) {} + virtual ~CLSRGWIssueBILogTrim() override {} +}; + +/** + * Check the bucket index. + * + * io_ctx - IO context for rados. + * bucket_objs_ret - check result for all shards. + * max_aio - the maximum number of AIO (for throttling). + * + * Return 0 on success, a failure code otherwise. + */ +class CLSRGWIssueBucketCheck : public CLSRGWConcurrentIO /*<std::map<std::string, rgw_cls_check_index_ret> >*/ { + std::map<int, rgw_cls_check_index_ret>& result; +protected: + int issue_op(int shard_id, const std::string& oid) override; +public: + CLSRGWIssueBucketCheck(librados::IoCtx& ioc, std::map<int, std::string>& oids, + std::map<int, rgw_cls_check_index_ret>& bucket_objs_ret, + uint32_t _max_aio) : + CLSRGWConcurrentIO(ioc, oids, _max_aio), result(bucket_objs_ret) {} + virtual ~CLSRGWIssueBucketCheck() override {} +}; + +class CLSRGWIssueBucketRebuild : public CLSRGWConcurrentIO { +protected: + int issue_op(int shard_id, const std::string& oid) override; +public: + CLSRGWIssueBucketRebuild(librados::IoCtx& io_ctx, std::map<int, std::string>& bucket_objs, + uint32_t max_aio) : CLSRGWConcurrentIO(io_ctx, bucket_objs, max_aio) {} + virtual ~CLSRGWIssueBucketRebuild() override {} +}; + +class CLSRGWIssueGetDirHeader : public CLSRGWConcurrentIO { + std::map<int, rgw_cls_list_ret>& result; +protected: + int issue_op(int shard_id, const std::string& oid) override; +public: + CLSRGWIssueGetDirHeader(librados::IoCtx& io_ctx, std::map<int, std::string>& oids, std::map<int, rgw_cls_list_ret>& dir_headers, + uint32_t max_aio) : + CLSRGWConcurrentIO(io_ctx, oids, max_aio), result(dir_headers) {} + virtual ~CLSRGWIssueGetDirHeader() override {} +}; + +class CLSRGWIssueSetBucketResharding : public CLSRGWConcurrentIO { + cls_rgw_bucket_instance_entry entry; +protected: + int issue_op(int shard_id, const std::string& oid) override; +public: + CLSRGWIssueSetBucketResharding(librados::IoCtx& ioc, std::map<int, std::string>& _bucket_objs, + const cls_rgw_bucket_instance_entry& _entry, + uint32_t _max_aio) : CLSRGWConcurrentIO(ioc, _bucket_objs, _max_aio), entry(_entry) {} + virtual ~CLSRGWIssueSetBucketResharding() override {} +}; + +class CLSRGWIssueResyncBucketBILog : public CLSRGWConcurrentIO { +protected: + int issue_op(int shard_id, const std::string& oid); +public: + CLSRGWIssueResyncBucketBILog(librados::IoCtx& io_ctx, std::map<int, std::string>& _bucket_objs, uint32_t max_aio) : + CLSRGWConcurrentIO(io_ctx, _bucket_objs, max_aio) {} + virtual ~CLSRGWIssueResyncBucketBILog() override {} +}; + +class CLSRGWIssueBucketBILogStop : public CLSRGWConcurrentIO { +protected: + int issue_op(int shard_id, const std::string& oid); +public: + CLSRGWIssueBucketBILogStop(librados::IoCtx& io_ctx, std::map<int, std::string>& _bucket_objs, uint32_t max_aio) : + CLSRGWConcurrentIO(io_ctx, _bucket_objs, max_aio) {} + virtual ~CLSRGWIssueBucketBILogStop() override {} +}; + +int cls_rgw_get_dir_header_async(librados::IoCtx& io_ctx, std::string& oid, RGWGetDirHeader_CB *ctx); + +void cls_rgw_encode_suggestion(char op, rgw_bucket_dir_entry& dirent, ceph::buffer::list& updates); + +void cls_rgw_suggest_changes(librados::ObjectWriteOperation& o, ceph::buffer::list& updates); + +/* usage logging */ +// these overloads which call io_ctx.operate() should not be called in the rgw. +// rgw_rados_operate() should be called after the overloads w/o calls to io_ctx.operate() +#ifndef CLS_CLIENT_HIDE_IOCTX +int cls_rgw_usage_log_read(librados::IoCtx& io_ctx, const std::string& oid, const std::string& user, const std::string& bucket, + uint64_t start_epoch, uint64_t end_epoch, uint32_t max_entries, std::string& read_iter, + std::map<rgw_user_bucket, rgw_usage_log_entry>& usage, bool *is_truncated); +#endif + +void cls_rgw_usage_log_trim(librados::ObjectWriteOperation& op, const std::string& user, const std::string& bucket, uint64_t start_epoch, uint64_t end_epoch); + +void cls_rgw_usage_log_clear(librados::ObjectWriteOperation& op); +void cls_rgw_usage_log_add(librados::ObjectWriteOperation& op, rgw_usage_log_info& info); + +/* garbage collection */ +void cls_rgw_gc_set_entry(librados::ObjectWriteOperation& op, uint32_t expiration_secs, cls_rgw_gc_obj_info& info); +void cls_rgw_gc_defer_entry(librados::ObjectWriteOperation& op, uint32_t expiration_secs, const std::string& tag); +void cls_rgw_gc_remove(librados::ObjectWriteOperation& op, const std::vector<std::string>& tags); + +// these overloads which call io_ctx.operate() should not be called in the rgw. +// rgw_rados_operate() should be called after the overloads w/o calls to io_ctx.operate() +#ifndef CLS_CLIENT_HIDE_IOCTX +int cls_rgw_gc_list(librados::IoCtx& io_ctx, std::string& oid, std::string& marker, uint32_t max, bool expired_only, + std::list<cls_rgw_gc_obj_info>& entries, bool *truncated, std::string& next_marker); +#endif + +/* lifecycle */ +// these overloads which call io_ctx.operate() should not be called in the rgw. +// rgw_rados_operate() should be called after the overloads w/o calls to io_ctx.operate() +#ifndef CLS_CLIENT_HIDE_IOCTX +int cls_rgw_lc_get_head(librados::IoCtx& io_ctx, const std::string& oid, cls_rgw_lc_obj_head& head); +int cls_rgw_lc_put_head(librados::IoCtx& io_ctx, const std::string& oid, cls_rgw_lc_obj_head& head); +int cls_rgw_lc_get_next_entry(librados::IoCtx& io_ctx, const std::string& oid, const std::string& marker, cls_rgw_lc_entry& entry); +int cls_rgw_lc_rm_entry(librados::IoCtx& io_ctx, const std::string& oid, const cls_rgw_lc_entry& entry); +int cls_rgw_lc_set_entry(librados::IoCtx& io_ctx, const std::string& oid, const cls_rgw_lc_entry& entry); +int cls_rgw_lc_get_entry(librados::IoCtx& io_ctx, const std::string& oid, const std::string& marker, cls_rgw_lc_entry& entry); +int cls_rgw_lc_list(librados::IoCtx& io_ctx, const std::string& oid, + const std::string& marker, uint32_t max_entries, + std::vector<cls_rgw_lc_entry>& entries); +#endif + +/* multipart */ +void cls_rgw_mp_upload_part_info_update(librados::ObjectWriteOperation& op, const std::string& part_key, const RGWUploadPartInfo& info); + +/* resharding */ +void cls_rgw_reshard_add(librados::ObjectWriteOperation& op, const cls_rgw_reshard_entry& entry); +void cls_rgw_reshard_remove(librados::ObjectWriteOperation& op, const cls_rgw_reshard_entry& entry); +// these overloads which call io_ctx.operate() should not be called in the rgw. +// rgw_rados_operate() should be called after the overloads w/o calls to io_ctx.operate() +#ifndef CLS_CLIENT_HIDE_IOCTX +int cls_rgw_reshard_list(librados::IoCtx& io_ctx, const std::string& oid, std::string& marker, uint32_t max, + std::list<cls_rgw_reshard_entry>& entries, bool* is_truncated); +int cls_rgw_reshard_get(librados::IoCtx& io_ctx, const std::string& oid, cls_rgw_reshard_entry& entry); +#endif + +/* resharding attribute on bucket index shard headers */ +void cls_rgw_guard_bucket_resharding(librados::ObjectOperation& op, int ret_err); +// these overloads which call io_ctx.operate() should not be called in the rgw. +// rgw_rados_operate() should be called after the overloads w/o calls to io_ctx.operate() +#ifndef CLS_CLIENT_HIDE_IOCTX +int cls_rgw_set_bucket_resharding(librados::IoCtx& io_ctx, const std::string& oid, + const cls_rgw_bucket_instance_entry& entry); +int cls_rgw_clear_bucket_resharding(librados::IoCtx& io_ctx, const std::string& oid); +int cls_rgw_get_bucket_resharding(librados::IoCtx& io_ctx, const std::string& oid, + cls_rgw_bucket_instance_entry *entry); +#endif diff --git a/src/cls/rgw/cls_rgw_const.h b/src/cls/rgw/cls_rgw_const.h new file mode 100644 index 000000000..8595db3c9 --- /dev/null +++ b/src/cls/rgw/cls_rgw_const.h @@ -0,0 +1,80 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab + +#pragma once + +#define RGW_CLASS "rgw" + +/* Special error code returned by cls bucket list operation if it was + * unable to skip past enough not visibile entries to return any + * entries in the call. */ +constexpr int RGWBIAdvanceAndRetryError = -EFBIG; + +/* bucket index */ +#define RGW_BUCKET_INIT_INDEX "bucket_init_index" + + +#define RGW_BUCKET_SET_TAG_TIMEOUT "bucket_set_tag_timeout" +#define RGW_BUCKET_LIST "bucket_list" +#define RGW_BUCKET_CHECK_INDEX "bucket_check_index" +#define RGW_BUCKET_REBUILD_INDEX "bucket_rebuild_index" +#define RGW_BUCKET_UPDATE_STATS "bucket_update_stats" +#define RGW_BUCKET_PREPARE_OP "bucket_prepare_op" +#define RGW_BUCKET_COMPLETE_OP "bucket_complete_op" +#define RGW_BUCKET_LINK_OLH "bucket_link_olh" +#define RGW_BUCKET_UNLINK_INSTANCE "bucket_unlink_instance" +#define RGW_BUCKET_READ_OLH_LOG "bucket_read_olh_log" +#define RGW_BUCKET_TRIM_OLH_LOG "bucket_trim_olh_log" +#define RGW_BUCKET_CLEAR_OLH "bucket_clear_olh" + +#define RGW_OBJ_REMOVE "obj_remove" +#define RGW_OBJ_STORE_PG_VER "obj_store_pg_ver" +#define RGW_OBJ_CHECK_ATTRS_PREFIX "obj_check_attrs_prefix" +#define RGW_OBJ_CHECK_MTIME "obj_check_mtime" + +#define RGW_BI_GET "bi_get" +#define RGW_BI_PUT "bi_put" +#define RGW_BI_LIST "bi_list" + +#define RGW_BI_LOG_LIST "bi_log_list" +#define RGW_BI_LOG_TRIM "bi_log_trim" +#define RGW_DIR_SUGGEST_CHANGES "dir_suggest_changes" + +#define RGW_BI_LOG_RESYNC "bi_log_resync" +#define RGW_BI_LOG_STOP "bi_log_stop" + +/* usage logging */ +#define RGW_USER_USAGE_LOG_ADD "user_usage_log_add" +#define RGW_USER_USAGE_LOG_READ "user_usage_log_read" +#define RGW_USER_USAGE_LOG_TRIM "user_usage_log_trim" +#define RGW_USAGE_LOG_CLEAR "usage_log_clear" + +/* garbage collection */ +#define RGW_GC_SET_ENTRY "gc_set_entry" +#define RGW_GC_DEFER_ENTRY "gc_defer_entry" +#define RGW_GC_LIST "gc_list" +#define RGW_GC_REMOVE "gc_remove" + +/* lifecycle bucket list */ +#define RGW_LC_GET_ENTRY "lc_get_entry" +#define RGW_LC_SET_ENTRY "lc_set_entry" +#define RGW_LC_RM_ENTRY "lc_rm_entry" +#define RGW_LC_GET_NEXT_ENTRY "lc_get_next_entry" +#define RGW_LC_PUT_HEAD "lc_put_head" +#define RGW_LC_GET_HEAD "lc_get_head" +#define RGW_LC_LIST_ENTRIES "lc_list_entries" + +/* multipart */ +#define RGW_MP_UPLOAD_PART_INFO_UPDATE "mp_upload_part_info_update" + +/* resharding */ +#define RGW_RESHARD_ADD "reshard_add" +#define RGW_RESHARD_LIST "reshard_list" +#define RGW_RESHARD_GET "reshard_get" +#define RGW_RESHARD_REMOVE "reshard_remove" + +/* resharding attribute */ +#define RGW_SET_BUCKET_RESHARDING "set_bucket_resharding" +#define RGW_CLEAR_BUCKET_RESHARDING "clear_bucket_resharding" +#define RGW_GUARD_BUCKET_RESHARDING "guard_bucket_resharding" +#define RGW_GET_BUCKET_RESHARDING "get_bucket_resharding" diff --git a/src/cls/rgw/cls_rgw_ops.cc b/src/cls/rgw/cls_rgw_ops.cc new file mode 100644 index 000000000..15bcba333 --- /dev/null +++ b/src/cls/rgw/cls_rgw_ops.cc @@ -0,0 +1,573 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab + +#include "cls/rgw/cls_rgw_ops.h" + +#include "common/Formatter.h" +#include "common/ceph_json.h" +#include "include/utime.h" + +using std::list; +using std::map; + +using ceph::Formatter; + +void rgw_cls_tag_timeout_op::dump(Formatter *f) const +{ + f->dump_int("tag_timeout", tag_timeout); +} + +void rgw_cls_tag_timeout_op::generate_test_instances(list<rgw_cls_tag_timeout_op*>& ls) +{ + ls.push_back(new rgw_cls_tag_timeout_op); + ls.push_back(new rgw_cls_tag_timeout_op); + ls.back()->tag_timeout = 23323; +} + +void cls_rgw_gc_set_entry_op::dump(Formatter *f) const +{ + f->dump_unsigned("expiration_secs", expiration_secs); + f->open_object_section("obj_info"); + info.dump(f); + f->close_section(); +} + +void cls_rgw_gc_set_entry_op::generate_test_instances(list<cls_rgw_gc_set_entry_op*>& ls) +{ + ls.push_back(new cls_rgw_gc_set_entry_op); + ls.push_back(new cls_rgw_gc_set_entry_op); + ls.back()->expiration_secs = 123; +} + +void cls_rgw_gc_defer_entry_op::dump(Formatter *f) const +{ + f->dump_unsigned("expiration_secs", expiration_secs); + f->dump_string("tag", tag); +} + +void cls_rgw_gc_defer_entry_op::generate_test_instances(list<cls_rgw_gc_defer_entry_op*>& ls) +{ + ls.push_back(new cls_rgw_gc_defer_entry_op); + ls.push_back(new cls_rgw_gc_defer_entry_op); + ls.back()->expiration_secs = 123; + ls.back()->tag = "footag"; +} + +void cls_rgw_gc_list_op::dump(Formatter *f) const +{ + f->dump_string("marker", marker); + f->dump_unsigned("max", max); + f->dump_bool("expired_only", expired_only); +} + +void cls_rgw_gc_list_op::generate_test_instances(list<cls_rgw_gc_list_op*>& ls) +{ + ls.push_back(new cls_rgw_gc_list_op); + ls.push_back(new cls_rgw_gc_list_op); + ls.back()->marker = "mymarker"; + ls.back()->max = 2312; +} + +void cls_rgw_gc_list_ret::dump(Formatter *f) const +{ + encode_json("entries", entries, f); + f->dump_string("next_marker", next_marker); + f->dump_int("truncated", (int)truncated); +} + +void cls_rgw_gc_list_ret::generate_test_instances(list<cls_rgw_gc_list_ret*>& ls) +{ + ls.push_back(new cls_rgw_gc_list_ret); + ls.push_back(new cls_rgw_gc_list_ret); + ls.back()->entries.push_back(cls_rgw_gc_obj_info()); + ls.back()->truncated = true; +} + +void cls_rgw_gc_remove_op::dump(Formatter *f) const +{ + encode_json("tags", tags, f); +} + +void cls_rgw_gc_remove_op::generate_test_instances(list<cls_rgw_gc_remove_op*>& ls) +{ + ls.push_back(new cls_rgw_gc_remove_op); + ls.push_back(new cls_rgw_gc_remove_op); + ls.back()->tags.push_back("tag1"); + ls.back()->tags.push_back("tag2"); +} + +void cls_rgw_lc_get_entry_ret::dump(Formatter *f) const +{ + encode_json("entry", entry, f); +} + +void cls_rgw_lc_get_entry_ret::generate_test_instances(list<cls_rgw_lc_get_entry_ret*>& ls) +{ + cls_rgw_lc_entry entry("bucket1", 6000, 0); + ls.push_back(new cls_rgw_lc_get_entry_ret); + ls.back()->entry = entry; +} + +void rgw_cls_obj_prepare_op::generate_test_instances(list<rgw_cls_obj_prepare_op*>& o) +{ + rgw_cls_obj_prepare_op *op = new rgw_cls_obj_prepare_op; + op->op = CLS_RGW_OP_ADD; + op->key.name = "name"; + op->tag = "tag"; + op->locator = "locator"; + o.push_back(op); + o.push_back(new rgw_cls_obj_prepare_op); +} + +void rgw_cls_obj_prepare_op::dump(Formatter *f) const +{ + f->dump_int("op", op); + f->dump_string("name", key.name); + f->dump_string("tag", tag); + f->dump_string("locator", locator); + f->dump_bool("log_op", log_op); + f->dump_int("bilog_flags", bilog_flags); + encode_json("zones_trace", zones_trace, f); +} + +void rgw_cls_obj_complete_op::generate_test_instances(list<rgw_cls_obj_complete_op*>& o) +{ + rgw_cls_obj_complete_op *op = new rgw_cls_obj_complete_op; + op->op = CLS_RGW_OP_DEL; + op->key.name = "name"; + op->locator = "locator"; + op->ver.pool = 2; + op->ver.epoch = 100; + op->tag = "tag"; + + list<rgw_bucket_dir_entry_meta *> l; + rgw_bucket_dir_entry_meta::generate_test_instances(l); + auto iter = l.begin(); + op->meta = *(*iter); + + o.push_back(op); + + o.push_back(new rgw_cls_obj_complete_op); +} + +void rgw_cls_obj_complete_op::dump(Formatter *f) const +{ + f->dump_int("op", (int)op); + f->dump_string("name", key.name); + f->dump_string("instance", key.instance); + f->dump_string("locator", locator); + f->open_object_section("ver"); + ver.dump(f); + f->close_section(); + f->open_object_section("meta"); + meta.dump(f); + f->close_section(); + f->dump_string("tag", tag); + f->dump_bool("log_op", log_op); + f->dump_int("bilog_flags", bilog_flags); + encode_json("zones_trace", zones_trace, f); +} + +void rgw_cls_link_olh_op::generate_test_instances(list<rgw_cls_link_olh_op*>& o) +{ + rgw_cls_link_olh_op *op = new rgw_cls_link_olh_op; + op->key.name = "name"; + op->olh_tag = "olh_tag"; + op->delete_marker = true; + op->op_tag = "op_tag"; + op->olh_epoch = 123; + list<rgw_bucket_dir_entry_meta *> l; + rgw_bucket_dir_entry_meta::generate_test_instances(l); + auto iter = l.begin(); + op->meta = *(*iter); + op->log_op = true; + + o.push_back(op); + + o.push_back(new rgw_cls_link_olh_op); +} + +void rgw_cls_link_olh_op::dump(Formatter *f) const +{ + encode_json("key", key, f); + encode_json("olh_tag", olh_tag, f); + encode_json("delete_marker", delete_marker, f); + encode_json("op_tag", op_tag, f); + encode_json("meta", meta, f); + encode_json("olh_epoch", olh_epoch, f); + encode_json("log_op", log_op, f); + encode_json("bilog_flags", (uint32_t)bilog_flags, f); + utime_t ut(unmod_since); + encode_json("unmod_since", ut, f); + encode_json("high_precision_time", high_precision_time, f); + encode_json("zones_trace", zones_trace, f); +} + +void rgw_cls_unlink_instance_op::generate_test_instances(list<rgw_cls_unlink_instance_op*>& o) +{ + rgw_cls_unlink_instance_op *op = new rgw_cls_unlink_instance_op; + op->key.name = "name"; + op->op_tag = "op_tag"; + op->olh_epoch = 124; + op->log_op = true; + + o.push_back(op); + + o.push_back(new rgw_cls_unlink_instance_op); +} + +void rgw_cls_unlink_instance_op::dump(Formatter *f) const +{ + encode_json("key", key, f); + encode_json("op_tag", op_tag, f); + encode_json("olh_epoch", olh_epoch, f); + encode_json("log_op", log_op, f); + encode_json("bilog_flags", (uint32_t)bilog_flags, f); + encode_json("zones_trace", zones_trace, f); +} + +void rgw_cls_read_olh_log_op::generate_test_instances(list<rgw_cls_read_olh_log_op*>& o) +{ + rgw_cls_read_olh_log_op *op = new rgw_cls_read_olh_log_op; + op->olh.name = "name"; + op->ver_marker = 123; + op->olh_tag = "olh_tag"; + + o.push_back(op); + + o.push_back(new rgw_cls_read_olh_log_op); +} + +void rgw_cls_read_olh_log_op::dump(Formatter *f) const +{ + encode_json("olh", olh, f); + encode_json("ver_marker", ver_marker, f); + encode_json("olh_tag", olh_tag, f); +} + +void rgw_cls_read_olh_log_ret::generate_test_instances(list<rgw_cls_read_olh_log_ret*>& o) +{ + rgw_cls_read_olh_log_ret *r = new rgw_cls_read_olh_log_ret; + r->is_truncated = true; + list<rgw_bucket_olh_log_entry *> l; + rgw_bucket_olh_log_entry::generate_test_instances(l); + auto iter = l.begin(); + r->log[1].push_back(*(*iter)); + + o.push_back(r); + + o.push_back(new rgw_cls_read_olh_log_ret); +} + +void rgw_cls_read_olh_log_ret::dump(Formatter *f) const +{ + encode_json("log", log, f); + encode_json("is_truncated", is_truncated, f); +} + +void rgw_cls_trim_olh_log_op::generate_test_instances(list<rgw_cls_trim_olh_log_op*>& o) +{ + rgw_cls_trim_olh_log_op *op = new rgw_cls_trim_olh_log_op; + op->olh.name = "olh.name"; + op->ver = 100; + op->olh_tag = "olh_tag"; + + o.push_back(op); + + o.push_back(new rgw_cls_trim_olh_log_op); +} + +void rgw_cls_trim_olh_log_op::dump(Formatter *f) const +{ + encode_json("olh", olh, f); + encode_json("ver", ver, f); + encode_json("olh_tag", olh_tag, f); +} + +void rgw_cls_bucket_clear_olh_op::generate_test_instances(list<rgw_cls_bucket_clear_olh_op *>& o) +{ + + rgw_cls_bucket_clear_olh_op *op = new rgw_cls_bucket_clear_olh_op; + op->key.name = "key.name"; + op->olh_tag = "olh_tag"; + + o.push_back(op); + o.push_back(new rgw_cls_bucket_clear_olh_op); +} + +void rgw_cls_bucket_clear_olh_op::dump(Formatter *f) const +{ + encode_json("key", key, f); + encode_json("olh_tag", olh_tag, f); +} + +void rgw_cls_list_op::generate_test_instances(list<rgw_cls_list_op*>& o) +{ + rgw_cls_list_op *op = new rgw_cls_list_op; + op->start_obj.name = "start_obj"; + op->num_entries = 100; + op->filter_prefix = "filter_prefix"; + o.push_back(op); + o.push_back(new rgw_cls_list_op); +} + +void rgw_cls_list_op::dump(Formatter *f) const +{ + f->dump_string("start_obj", start_obj.name); + f->dump_unsigned("num_entries", num_entries); +} + +void rgw_cls_list_ret::generate_test_instances(list<rgw_cls_list_ret*>& o) +{ + list<rgw_bucket_dir *> l; + rgw_bucket_dir::generate_test_instances(l); + for (auto iter = l.begin(); iter != l.end(); ++iter) { + rgw_bucket_dir *d = *iter; + + rgw_cls_list_ret *ret = new rgw_cls_list_ret; + ret->dir = *d; + ret->is_truncated = true; + + o.push_back(ret); + + delete d; + } + + o.push_back(new rgw_cls_list_ret); +} + +void rgw_cls_list_ret::dump(Formatter *f) const +{ + f->open_object_section("dir"); + dir.dump(f); + f->close_section(); + f->dump_int("is_truncated", (int)is_truncated); +} + +void rgw_cls_check_index_ret::generate_test_instances(list<rgw_cls_check_index_ret*>& o) +{ + list<rgw_bucket_dir_header *> h; + rgw_bucket_dir_header::generate_test_instances(h); + rgw_cls_check_index_ret *r = new rgw_cls_check_index_ret; + r->existing_header = *(h.front()); + r->calculated_header = *(h.front()); + o.push_back(r); + + for (auto iter = h.begin(); iter != h.end(); ++iter) { + delete *iter; + } + o.push_back(new rgw_cls_check_index_ret); +} + +void rgw_cls_check_index_ret::dump(Formatter *f) const +{ + encode_json("existing_header", existing_header, f); + encode_json("calculated_header", calculated_header, f); +} + +void rgw_cls_bucket_update_stats_op::generate_test_instances(list<rgw_cls_bucket_update_stats_op*>& o) +{ + rgw_cls_bucket_update_stats_op *r = new rgw_cls_bucket_update_stats_op; + r->absolute = true; + rgw_bucket_category_stats& s = r->stats[RGWObjCategory::None]; + s.total_size = 1; + s.total_size_rounded = 4096; + s.num_entries = 1; + o.push_back(r); + + o.push_back(new rgw_cls_bucket_update_stats_op); +} + +void rgw_cls_bucket_update_stats_op::dump(Formatter *f) const +{ + encode_json("absolute", absolute, f); + map<int, rgw_bucket_category_stats> s; + for (auto& entry : stats) { + s[(int)entry.first] = entry.second; + } + encode_json("stats", s, f); +} + +void cls_rgw_bi_log_list_op::dump(Formatter *f) const +{ + f->dump_string("marker", marker); + f->dump_unsigned("max", max); +} + +void cls_rgw_bi_log_list_op::generate_test_instances(list<cls_rgw_bi_log_list_op*>& ls) +{ + ls.push_back(new cls_rgw_bi_log_list_op); + ls.push_back(new cls_rgw_bi_log_list_op); + ls.back()->marker = "mark"; + ls.back()->max = 123; +} + +void cls_rgw_bi_log_trim_op::dump(Formatter *f) const +{ + f->dump_string("start_marker", start_marker); + f->dump_string("end_marker", end_marker); +} + +void cls_rgw_bi_log_trim_op::generate_test_instances(list<cls_rgw_bi_log_trim_op*>& ls) +{ + ls.push_back(new cls_rgw_bi_log_trim_op); + ls.push_back(new cls_rgw_bi_log_trim_op); + ls.back()->start_marker = "foo"; + ls.back()->end_marker = "bar"; +} + +void cls_rgw_bi_log_list_ret::dump(Formatter *f) const +{ + encode_json("entries", entries, f); + f->dump_unsigned("truncated", (int)truncated); +} + +void cls_rgw_bi_log_list_ret::generate_test_instances(list<cls_rgw_bi_log_list_ret*>& ls) +{ + ls.push_back(new cls_rgw_bi_log_list_ret); + ls.push_back(new cls_rgw_bi_log_list_ret); + ls.back()->entries.push_back(rgw_bi_log_entry()); + ls.back()->truncated = true; +} + +void cls_rgw_mp_upload_part_info_update_op::generate_test_instances(std::list<cls_rgw_mp_upload_part_info_update_op*>& ls) +{ + ls.push_back(new cls_rgw_mp_upload_part_info_update_op); + ls.back()->part_key = "part1"; + ls.push_back(new cls_rgw_mp_upload_part_info_update_op); + ls.back()->part_key = "part2"; +} + +void cls_rgw_mp_upload_part_info_update_op::dump(Formatter* f) const +{ + encode_json("part_key", part_key, f); + encode_json("part_num", info.num, f); + encode_json("part_prefix", info.manifest.get_prefix(), f); +} + +void cls_rgw_reshard_add_op::generate_test_instances(list<cls_rgw_reshard_add_op*>& ls) +{ + ls.push_back(new cls_rgw_reshard_add_op); + ls.push_back(new cls_rgw_reshard_add_op); + list<cls_rgw_reshard_entry *> l; + cls_rgw_reshard_entry::generate_test_instances(l); + auto iter = l.begin(); + ls.back()->entry = *(*iter); +} + +void cls_rgw_reshard_add_op::dump(Formatter *f) const +{ + encode_json("entry", entry, f); +} + +void cls_rgw_reshard_list_op::generate_test_instances(list<cls_rgw_reshard_list_op*>& ls) +{ + ls.push_back(new cls_rgw_reshard_list_op); + ls.push_back(new cls_rgw_reshard_list_op); + ls.back()->max = 1000; + ls.back()->marker = "foo"; +} + +void cls_rgw_reshard_list_op::dump(Formatter *f) const +{ + encode_json("max", max, f); + encode_json("marker", marker, f); +} + +void cls_rgw_reshard_list_ret::generate_test_instances(list<cls_rgw_reshard_list_ret*>& ls) +{ + ls.push_back(new cls_rgw_reshard_list_ret); + ls.push_back(new cls_rgw_reshard_list_ret); + ls.back()->entries.push_back(cls_rgw_reshard_entry()); + ls.back()->is_truncated = true; +} + +void cls_rgw_reshard_list_ret::dump(Formatter *f) const +{ + encode_json("entries", entries, f); + encode_json("is_truncated", is_truncated, f); +} + +void cls_rgw_reshard_get_op::generate_test_instances(list<cls_rgw_reshard_get_op*>& ls) +{ + ls.push_back(new cls_rgw_reshard_get_op); + ls.push_back(new cls_rgw_reshard_get_op); +} + +void cls_rgw_reshard_get_op::dump(Formatter *f) const +{ + encode_json("entry", entry, f); +} + +void cls_rgw_reshard_get_ret::generate_test_instances(list<cls_rgw_reshard_get_ret*>& ls) +{ + ls.push_back(new cls_rgw_reshard_get_ret); + ls.push_back(new cls_rgw_reshard_get_ret); +} + +void cls_rgw_reshard_get_ret::dump(Formatter *f) const +{ + encode_json("entry", entry, f); +} + +void cls_rgw_reshard_remove_op::generate_test_instances(list<cls_rgw_reshard_remove_op*>& ls) +{ + ls.push_back(new cls_rgw_reshard_remove_op); + ls.push_back(new cls_rgw_reshard_remove_op); + ls.back()->bucket_name = "foo"; + ls.back()->bucket_id = "bucket_id"; +} + +void cls_rgw_reshard_remove_op::dump(Formatter *f) const +{ + encode_json("bucket_name", bucket_name, f); + encode_json("bucket_id", bucket_name, f); +} + + +void cls_rgw_set_bucket_resharding_op::generate_test_instances( + list<cls_rgw_set_bucket_resharding_op*>& ls) +{ + ls.push_back(new cls_rgw_set_bucket_resharding_op); + ls.push_back(new cls_rgw_set_bucket_resharding_op); +} + +void cls_rgw_set_bucket_resharding_op::dump(Formatter *f) const +{ + encode_json("entry", entry, f); +} + +void cls_rgw_clear_bucket_resharding_op::generate_test_instances( + list<cls_rgw_clear_bucket_resharding_op*>& ls) +{ + ls.push_back(new cls_rgw_clear_bucket_resharding_op); + ls.push_back(new cls_rgw_clear_bucket_resharding_op); +} + +void cls_rgw_clear_bucket_resharding_op::dump(Formatter *f) const +{ +} + +void cls_rgw_guard_bucket_resharding_op::generate_test_instances( + list<cls_rgw_guard_bucket_resharding_op*>& ls) +{ + ls.push_back(new cls_rgw_guard_bucket_resharding_op); + ls.push_back(new cls_rgw_guard_bucket_resharding_op); +} + +void cls_rgw_guard_bucket_resharding_op::dump(Formatter *f) const +{ + encode_json("ret_err", ret_err, f); +} + + +void cls_rgw_get_bucket_resharding_op::generate_test_instances( + list<cls_rgw_get_bucket_resharding_op*>& ls) +{ + ls.push_back(new cls_rgw_get_bucket_resharding_op); + ls.push_back(new cls_rgw_get_bucket_resharding_op); +} + +void cls_rgw_get_bucket_resharding_op::dump(Formatter *f) const +{ +} diff --git a/src/cls/rgw/cls_rgw_ops.h b/src/cls/rgw/cls_rgw_ops.h new file mode 100644 index 000000000..2891a3b61 --- /dev/null +++ b/src/cls/rgw/cls_rgw_ops.h @@ -0,0 +1,1543 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab + +#pragma once + +#include "cls/rgw/cls_rgw_types.h" + +struct rgw_cls_tag_timeout_op +{ + uint64_t tag_timeout; + + rgw_cls_tag_timeout_op() : tag_timeout(0) {} + + void encode(ceph::buffer::list &bl) const { + ENCODE_START(1, 1, bl); + encode(tag_timeout, bl); + ENCODE_FINISH(bl); + } + void decode(ceph::buffer::list::const_iterator &bl) { + DECODE_START(1, bl); + decode(tag_timeout, bl); + DECODE_FINISH(bl); + } + void dump(ceph::Formatter *f) const; + static void generate_test_instances(std::list<rgw_cls_tag_timeout_op*>& ls); +}; +WRITE_CLASS_ENCODER(rgw_cls_tag_timeout_op) + +struct rgw_cls_obj_prepare_op +{ + RGWModifyOp op; + cls_rgw_obj_key key; + std::string tag; + std::string locator; + bool log_op; + uint16_t bilog_flags; + rgw_zone_set zones_trace; + + rgw_cls_obj_prepare_op() : op(CLS_RGW_OP_UNKNOWN), log_op(false), bilog_flags(0) {} + + void encode(ceph::buffer::list &bl) const { + ENCODE_START(7, 5, bl); + uint8_t c = (uint8_t)op; + encode(c, bl); + encode(tag, bl); + encode(locator, bl); + encode(log_op, bl); + encode(key, bl); + encode(bilog_flags, bl); + encode(zones_trace, bl); + ENCODE_FINISH(bl); + } + void decode(ceph::buffer::list::const_iterator &bl) { + DECODE_START_LEGACY_COMPAT_LEN(7, 3, 3, bl); + uint8_t c; + decode(c, bl); + op = (RGWModifyOp)c; + if (struct_v < 5) { + decode(key.name, bl); + } + decode(tag, bl); + if (struct_v >= 2) { + decode(locator, bl); + } + if (struct_v >= 4) { + decode(log_op, bl); + } + if (struct_v >= 5) { + decode(key, bl); + } + if (struct_v >= 6) { + decode(bilog_flags, bl); + } + if (struct_v >= 7) { + decode(zones_trace, bl); + } + DECODE_FINISH(bl); + } + void dump(ceph::Formatter *f) const; + static void generate_test_instances(std::list<rgw_cls_obj_prepare_op*>& o); +}; +WRITE_CLASS_ENCODER(rgw_cls_obj_prepare_op) + +struct rgw_cls_obj_complete_op +{ + RGWModifyOp op; + cls_rgw_obj_key key; + std::string locator; + rgw_bucket_entry_ver ver; + rgw_bucket_dir_entry_meta meta; + std::string tag; + bool log_op; + uint16_t bilog_flags; + + std::list<cls_rgw_obj_key> remove_objs; + rgw_zone_set zones_trace; + + rgw_cls_obj_complete_op() : op(CLS_RGW_OP_ADD), log_op(false), bilog_flags(0) {} + + void encode(ceph::buffer::list &bl) const { + ENCODE_START(9, 7, bl); + uint8_t c = (uint8_t)op; + encode(c, bl); + encode(ver.epoch, bl); + encode(meta, bl); + encode(tag, bl); + encode(locator, bl); + encode(remove_objs, bl); + encode(ver, bl); + encode(log_op, bl); + encode(key, bl); + encode(bilog_flags, bl); + encode(zones_trace, bl); + ENCODE_FINISH(bl); + } + void decode(ceph::buffer::list::const_iterator &bl) { + DECODE_START_LEGACY_COMPAT_LEN(9, 3, 3, bl); + uint8_t c; + decode(c, bl); + op = (RGWModifyOp)c; + if (struct_v < 7) { + decode(key.name, bl); + } + decode(ver.epoch, bl); + decode(meta, bl); + decode(tag, bl); + if (struct_v >= 2) { + decode(locator, bl); + } + if (struct_v >= 4 && struct_v < 7) { + std::list<std::string> old_remove_objs; + decode(old_remove_objs, bl); + + for (auto iter = old_remove_objs.begin(); + iter != old_remove_objs.end(); ++iter) { + cls_rgw_obj_key k; + k.name = *iter; + remove_objs.push_back(k); + } + } else { + decode(remove_objs, bl); + } + if (struct_v >= 5) { + decode(ver, bl); + } else { + ver.pool = -1; + } + if (struct_v >= 6) { + decode(log_op, bl); + } + if (struct_v >= 7) { + decode(key, bl); + } + if (struct_v >= 8) { + decode(bilog_flags, bl); + } + if (struct_v >= 9) { + decode(zones_trace, bl); + } + DECODE_FINISH(bl); + } + void dump(ceph::Formatter *f) const; + static void generate_test_instances(std::list<rgw_cls_obj_complete_op*>& o); +}; +WRITE_CLASS_ENCODER(rgw_cls_obj_complete_op) + +struct rgw_cls_link_olh_op { + cls_rgw_obj_key key; + std::string olh_tag; + bool delete_marker; + std::string op_tag; + rgw_bucket_dir_entry_meta meta; + uint64_t olh_epoch; + bool log_op; + uint16_t bilog_flags; + ceph::real_time unmod_since; /* only create delete marker if newer then this */ + bool high_precision_time; + rgw_zone_set zones_trace; + + rgw_cls_link_olh_op() : delete_marker(false), olh_epoch(0), log_op(false), bilog_flags(0), high_precision_time(false) {} + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(5, 1, bl); + encode(key, bl); + encode(olh_tag, bl); + encode(delete_marker, bl); + encode(op_tag, bl); + encode(meta, bl); + encode(olh_epoch, bl); + encode(log_op, bl); + encode(bilog_flags, bl); + uint64_t t = ceph::real_clock::to_time_t(unmod_since); + encode(t, bl); + encode(unmod_since, bl); + encode(high_precision_time, bl); + encode(zones_trace, bl); + ENCODE_FINISH(bl); + } + + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(5, bl); + decode(key, bl); + decode(olh_tag, bl); + decode(delete_marker, bl); + decode(op_tag, bl); + decode(meta, bl); + decode(olh_epoch, bl); + decode(log_op, bl); + decode(bilog_flags, bl); + if (struct_v == 2) { + uint64_t t; + decode(t, bl); + unmod_since = ceph::real_clock::from_time_t(static_cast<time_t>(t)); + } + if (struct_v >= 3) { + uint64_t t; + decode(t, bl); + decode(unmod_since, bl); + } + if (struct_v >= 4) { + decode(high_precision_time, bl); + } + if (struct_v >= 5) { + decode(zones_trace, bl); + } + DECODE_FINISH(bl); + } + + static void generate_test_instances(std::list<rgw_cls_link_olh_op *>& o); + void dump(ceph::Formatter *f) const; +}; +WRITE_CLASS_ENCODER(rgw_cls_link_olh_op) + +struct rgw_cls_unlink_instance_op { + cls_rgw_obj_key key; + std::string op_tag; + uint64_t olh_epoch; + bool log_op; + uint16_t bilog_flags; + std::string olh_tag; + rgw_zone_set zones_trace; + + rgw_cls_unlink_instance_op() : olh_epoch(0), log_op(false), bilog_flags(0) {} + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(3, 1, bl); + encode(key, bl); + encode(op_tag, bl); + encode(olh_epoch, bl); + encode(log_op, bl); + encode(bilog_flags, bl); + encode(olh_tag, bl); + encode(zones_trace, bl); + ENCODE_FINISH(bl); + } + + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(3, bl); + decode(key, bl); + decode(op_tag, bl); + decode(olh_epoch, bl); + decode(log_op, bl); + decode(bilog_flags, bl); + if (struct_v >= 2) { + decode(olh_tag, bl); + } + if (struct_v >= 3) { + decode(zones_trace, bl); + } + DECODE_FINISH(bl); + } + + static void generate_test_instances(std::list<rgw_cls_unlink_instance_op *>& o); + void dump(ceph::Formatter *f) const; +}; +WRITE_CLASS_ENCODER(rgw_cls_unlink_instance_op) + +struct rgw_cls_read_olh_log_op +{ + cls_rgw_obj_key olh; + uint64_t ver_marker; + std::string olh_tag; + + rgw_cls_read_olh_log_op() : ver_marker(0) {} + + void encode(ceph::buffer::list &bl) const { + ENCODE_START(1, 1, bl); + encode(olh, bl); + encode(ver_marker, bl); + encode(olh_tag, bl); + ENCODE_FINISH(bl); + } + void decode(ceph::buffer::list::const_iterator &bl) { + DECODE_START(1, bl); + decode(olh, bl); + decode(ver_marker, bl); + decode(olh_tag, bl); + DECODE_FINISH(bl); + } + static void generate_test_instances(std::list<rgw_cls_read_olh_log_op *>& o); + void dump(ceph::Formatter *f) const; +}; +WRITE_CLASS_ENCODER(rgw_cls_read_olh_log_op) + + +struct rgw_cls_read_olh_log_ret +{ + std::map<uint64_t, std::vector<rgw_bucket_olh_log_entry> > log; + bool is_truncated; + + rgw_cls_read_olh_log_ret() : is_truncated(false) {} + + void encode(ceph::buffer::list &bl) const { + ENCODE_START(1, 1, bl); + encode(log, bl); + encode(is_truncated, bl); + ENCODE_FINISH(bl); + } + void decode(ceph::buffer::list::const_iterator &bl) { + DECODE_START(1, bl); + decode(log, bl); + decode(is_truncated, bl); + DECODE_FINISH(bl); + } + static void generate_test_instances(std::list<rgw_cls_read_olh_log_ret *>& o); + void dump(ceph::Formatter *f) const; +}; +WRITE_CLASS_ENCODER(rgw_cls_read_olh_log_ret) + +struct rgw_cls_trim_olh_log_op +{ + cls_rgw_obj_key olh; + uint64_t ver; + std::string olh_tag; + + rgw_cls_trim_olh_log_op() : ver(0) {} + + void encode(ceph::buffer::list &bl) const { + ENCODE_START(1, 1, bl); + encode(olh, bl); + encode(ver, bl); + encode(olh_tag, bl); + ENCODE_FINISH(bl); + } + void decode(ceph::buffer::list::const_iterator &bl) { + DECODE_START(1, bl); + decode(olh, bl); + decode(ver, bl); + decode(olh_tag, bl); + DECODE_FINISH(bl); + } + static void generate_test_instances(std::list<rgw_cls_trim_olh_log_op *>& o); + void dump(ceph::Formatter *f) const; +}; +WRITE_CLASS_ENCODER(rgw_cls_trim_olh_log_op) + +struct rgw_cls_bucket_clear_olh_op { + cls_rgw_obj_key key; + std::string olh_tag; + + rgw_cls_bucket_clear_olh_op() {} + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(1, 1, bl); + encode(key, bl); + encode(olh_tag, bl); + ENCODE_FINISH(bl); + } + + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(1, bl); + decode(key, bl); + decode(olh_tag, bl); + DECODE_FINISH(bl); + } + + static void generate_test_instances(std::list<rgw_cls_bucket_clear_olh_op *>& o); + void dump(ceph::Formatter *f) const; +}; +WRITE_CLASS_ENCODER(rgw_cls_bucket_clear_olh_op) + +struct rgw_cls_list_op +{ + cls_rgw_obj_key start_obj; + uint32_t num_entries; + std::string filter_prefix; + bool list_versions; + std::string delimiter; + + rgw_cls_list_op() : num_entries(0), list_versions(false) {} + + void encode(ceph::buffer::list &bl) const { + ENCODE_START(6, 4, bl); + encode(num_entries, bl); + encode(filter_prefix, bl); + encode(start_obj, bl); + encode(list_versions, bl); + encode(delimiter, bl); + ENCODE_FINISH(bl); + } + void decode(ceph::buffer::list::const_iterator &bl) { + DECODE_START_LEGACY_COMPAT_LEN(6, 2, 2, bl); + if (struct_v < 4) { + decode(start_obj.name, bl); + } + decode(num_entries, bl); + if (struct_v >= 3) { + decode(filter_prefix, bl); + } + if (struct_v >= 4) { + decode(start_obj, bl); + } + if (struct_v >= 5) { + decode(list_versions, bl); + } + if (struct_v >= 6) { + decode(delimiter, bl); + } + DECODE_FINISH(bl); + } + void dump(ceph::Formatter *f) const; + static void generate_test_instances(std::list<rgw_cls_list_op*>& o); +}; +WRITE_CLASS_ENCODER(rgw_cls_list_op) + +struct rgw_cls_list_ret { + rgw_bucket_dir dir; + bool is_truncated; + + // if is_truncated is true, starting marker for next iteration; this + // is necessary as it's possible after maximum number of tries we + // still might have zero entries to return, in which case we have to + // at least move the ball foward + cls_rgw_obj_key marker; + + // cls_filtered is not transmitted; it is assumed true for versions + // on/after 3 and false for prior versions; this allows the rgw + // layer to know when an older osd (cls) does not do the filtering + bool cls_filtered; + + rgw_cls_list_ret() : + is_truncated(false), + cls_filtered(true) + {} + + void encode(ceph::buffer::list &bl) const { + ENCODE_START(4, 2, bl); + encode(dir, bl); + encode(is_truncated, bl); + encode(marker, bl); + ENCODE_FINISH(bl); + } + void decode(ceph::buffer::list::const_iterator &bl) { + DECODE_START_LEGACY_COMPAT_LEN(4, 2, 2, bl); + decode(dir, bl); + decode(is_truncated, bl); + cls_filtered = struct_v >= 3; + if (struct_v >= 4) { + decode(marker, bl); + } + DECODE_FINISH(bl); + } + void dump(ceph::Formatter *f) const; + static void generate_test_instances(std::list<rgw_cls_list_ret*>& o); +}; +WRITE_CLASS_ENCODER(rgw_cls_list_ret) + +struct rgw_cls_check_index_ret +{ + rgw_bucket_dir_header existing_header; + rgw_bucket_dir_header calculated_header; + + rgw_cls_check_index_ret() {} + + void encode(ceph::buffer::list &bl) const { + ENCODE_START(1, 1, bl); + encode(existing_header, bl); + encode(calculated_header, bl); + ENCODE_FINISH(bl); + } + void decode(ceph::buffer::list::const_iterator &bl) { + DECODE_START(1, bl); + decode(existing_header, bl); + decode(calculated_header, bl); + DECODE_FINISH(bl); + } + void dump(ceph::Formatter *f) const; + static void generate_test_instances(std::list<rgw_cls_check_index_ret *>& o); +}; +WRITE_CLASS_ENCODER(rgw_cls_check_index_ret) + +struct rgw_cls_bucket_update_stats_op +{ + bool absolute{false}; + std::map<RGWObjCategory, rgw_bucket_category_stats> stats; + + rgw_cls_bucket_update_stats_op() {} + + void encode(ceph::buffer::list &bl) const { + ENCODE_START(1, 1, bl); + encode(absolute, bl); + encode(stats, bl); + ENCODE_FINISH(bl); + } + void decode(ceph::buffer::list::const_iterator &bl) { + DECODE_START(1, bl); + decode(absolute, bl); + decode(stats, bl); + DECODE_FINISH(bl); + } + void dump(ceph::Formatter *f) const; + static void generate_test_instances(std::list<rgw_cls_bucket_update_stats_op *>& o); +}; +WRITE_CLASS_ENCODER(rgw_cls_bucket_update_stats_op) + +struct rgw_cls_obj_remove_op { + std::list<std::string> keep_attr_prefixes; + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(1, 1, bl); + encode(keep_attr_prefixes, bl); + ENCODE_FINISH(bl); + } + + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(1, bl); + decode(keep_attr_prefixes, bl); + DECODE_FINISH(bl); + } +}; +WRITE_CLASS_ENCODER(rgw_cls_obj_remove_op) + +struct rgw_cls_obj_store_pg_ver_op { + std::string attr; + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(1, 1, bl); + encode(attr, bl); + ENCODE_FINISH(bl); + } + + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(1, bl); + decode(attr, bl); + DECODE_FINISH(bl); + } +}; +WRITE_CLASS_ENCODER(rgw_cls_obj_store_pg_ver_op) + +struct rgw_cls_obj_check_attrs_prefix { + std::string check_prefix; + bool fail_if_exist; + + rgw_cls_obj_check_attrs_prefix() : fail_if_exist(false) {} + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(1, 1, bl); + encode(check_prefix, bl); + encode(fail_if_exist, bl); + ENCODE_FINISH(bl); + } + + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(1, bl); + decode(check_prefix, bl); + decode(fail_if_exist, bl); + DECODE_FINISH(bl); + } +}; +WRITE_CLASS_ENCODER(rgw_cls_obj_check_attrs_prefix) + +struct rgw_cls_obj_check_mtime { + ceph::real_time mtime; + RGWCheckMTimeType type; + bool high_precision_time; + + rgw_cls_obj_check_mtime() : type(CLS_RGW_CHECK_TIME_MTIME_EQ), high_precision_time(false) {} + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(2, 1, bl); + encode(mtime, bl); + encode((uint8_t)type, bl); + encode(high_precision_time, bl); + ENCODE_FINISH(bl); + } + + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(2, bl); + decode(mtime, bl); + uint8_t c; + decode(c, bl); + type = (RGWCheckMTimeType)c; + if (struct_v >= 2) { + decode(high_precision_time, bl); + } + DECODE_FINISH(bl); + } +}; +WRITE_CLASS_ENCODER(rgw_cls_obj_check_mtime) + +struct rgw_cls_usage_log_add_op { + rgw_usage_log_info info; + rgw_user user; + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(2, 1, bl); + encode(info, bl); + encode(user.to_str(), bl); + ENCODE_FINISH(bl); + } + + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(2, bl); + decode(info, bl); + if (struct_v >= 2) { + std::string s; + decode(s, bl); + user.from_str(s); + } + DECODE_FINISH(bl); + } +}; +WRITE_CLASS_ENCODER(rgw_cls_usage_log_add_op) + +struct rgw_cls_bi_get_op { + cls_rgw_obj_key key; + BIIndexType type; /* namespace: plain, instance, olh */ + + rgw_cls_bi_get_op() : type(BIIndexType::Plain) {} + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(1, 1, bl); + encode(key, bl); + encode((uint8_t)type, bl); + ENCODE_FINISH(bl); + } + + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(1, bl); + decode(key, bl); + uint8_t c; + decode(c, bl); + type = (BIIndexType)c; + DECODE_FINISH(bl); + } +}; +WRITE_CLASS_ENCODER(rgw_cls_bi_get_op) + +struct rgw_cls_bi_get_ret { + rgw_cls_bi_entry entry; + + rgw_cls_bi_get_ret() {} + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(1, 1, bl); + encode(entry, bl); + ENCODE_FINISH(bl); + } + + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(1, bl); + decode(entry, bl); + DECODE_FINISH(bl); + } +}; +WRITE_CLASS_ENCODER(rgw_cls_bi_get_ret) + +struct rgw_cls_bi_put_op { + rgw_cls_bi_entry entry; + + rgw_cls_bi_put_op() {} + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(1, 1, bl); + encode(entry, bl); + ENCODE_FINISH(bl); + } + + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(1, bl); + decode(entry, bl); + DECODE_FINISH(bl); + } +}; +WRITE_CLASS_ENCODER(rgw_cls_bi_put_op) + +struct rgw_cls_bi_list_op { + uint32_t max; + std::string name_filter; // limit resultto one object and its instances + std::string marker; + + rgw_cls_bi_list_op() : max(0) {} + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(1, 1, bl); + encode(max, bl); + encode(name_filter, bl); + encode(marker, bl); + ENCODE_FINISH(bl); + } + + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(1, bl); + decode(max, bl); + decode(name_filter, bl); + decode(marker, bl); + DECODE_FINISH(bl); + } +}; +WRITE_CLASS_ENCODER(rgw_cls_bi_list_op) + +struct rgw_cls_bi_list_ret { + std::list<rgw_cls_bi_entry> entries; + bool is_truncated; + + rgw_cls_bi_list_ret() : is_truncated(false) {} + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(1, 1, bl); + encode(entries, bl); + encode(is_truncated, bl); + ENCODE_FINISH(bl); + } + + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(1, bl); + decode(entries, bl); + decode(is_truncated, bl); + DECODE_FINISH(bl); + } +}; +WRITE_CLASS_ENCODER(rgw_cls_bi_list_ret) + +struct rgw_cls_usage_log_read_op { + uint64_t start_epoch; + uint64_t end_epoch; + std::string owner; + std::string bucket; + + std::string iter; // should be empty for the first call, non empty for subsequent calls + uint32_t max_entries; + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(2, 1, bl); + encode(start_epoch, bl); + encode(end_epoch, bl); + encode(owner, bl); + encode(iter, bl); + encode(max_entries, bl); + encode(bucket, bl); + ENCODE_FINISH(bl); + } + + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(2, bl); + decode(start_epoch, bl); + decode(end_epoch, bl); + decode(owner, bl); + decode(iter, bl); + decode(max_entries, bl); + if (struct_v >= 2) { + decode(bucket, bl); + } + DECODE_FINISH(bl); + } +}; +WRITE_CLASS_ENCODER(rgw_cls_usage_log_read_op) + +struct rgw_cls_usage_log_read_ret { + std::map<rgw_user_bucket, rgw_usage_log_entry> usage; + bool truncated; + std::string next_iter; + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(1, 1, bl); + encode(usage, bl); + encode(truncated, bl); + encode(next_iter, bl); + ENCODE_FINISH(bl); + } + + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(1, bl); + decode(usage, bl); + decode(truncated, bl); + decode(next_iter, bl); + DECODE_FINISH(bl); + } +}; +WRITE_CLASS_ENCODER(rgw_cls_usage_log_read_ret) + +struct rgw_cls_usage_log_trim_op { + uint64_t start_epoch; + uint64_t end_epoch; + std::string user; + std::string bucket; + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(3, 2, bl); + encode(start_epoch, bl); + encode(end_epoch, bl); + encode(user, bl); + encode(bucket, bl); + ENCODE_FINISH(bl); + } + + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(3, bl); + decode(start_epoch, bl); + decode(end_epoch, bl); + decode(user, bl); + if (struct_v >= 3) { + decode(bucket, bl); + } + DECODE_FINISH(bl); + } +}; +WRITE_CLASS_ENCODER(rgw_cls_usage_log_trim_op) + +struct cls_rgw_gc_set_entry_op { + uint32_t expiration_secs; + cls_rgw_gc_obj_info info; + cls_rgw_gc_set_entry_op() : expiration_secs(0) {} + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(1, 1, bl); + encode(expiration_secs, bl); + encode(info, bl); + ENCODE_FINISH(bl); + } + + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(1, bl); + decode(expiration_secs, bl); + decode(info, bl); + DECODE_FINISH(bl); + } + + void dump(ceph::Formatter *f) const; + static void generate_test_instances(std::list<cls_rgw_gc_set_entry_op*>& ls); + + size_t estimate_encoded_size() const { + constexpr size_t start_overhead = sizeof(__u8) + sizeof(__u8) + sizeof(ceph_le32); // version and length prefix + constexpr size_t expr_secs_overhead = sizeof(__u32); // expiration_seconds_overhead + return start_overhead + expr_secs_overhead + info.estimate_encoded_size(); + } +}; +WRITE_CLASS_ENCODER(cls_rgw_gc_set_entry_op) + +struct cls_rgw_gc_defer_entry_op { + uint32_t expiration_secs; + std::string tag; + cls_rgw_gc_defer_entry_op() : expiration_secs(0) {} + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(1, 1, bl); + encode(expiration_secs, bl); + encode(tag, bl); + ENCODE_FINISH(bl); + } + + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(1, bl); + decode(expiration_secs, bl); + decode(tag, bl); + DECODE_FINISH(bl); + } + + void dump(ceph::Formatter *f) const; + static void generate_test_instances(std::list<cls_rgw_gc_defer_entry_op*>& ls); +}; +WRITE_CLASS_ENCODER(cls_rgw_gc_defer_entry_op) + +struct cls_rgw_gc_list_op { + std::string marker; + uint32_t max; + bool expired_only; + + cls_rgw_gc_list_op() : max(0), expired_only(true) {} + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(2, 1, bl); + encode(marker, bl); + encode(max, bl); + encode(expired_only, bl); + ENCODE_FINISH(bl); + } + + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(2, bl); + decode(marker, bl); + decode(max, bl); + if (struct_v >= 2) { + decode(expired_only, bl); + } + DECODE_FINISH(bl); + } + + void dump(ceph::Formatter *f) const; + static void generate_test_instances(std::list<cls_rgw_gc_list_op*>& ls); +}; +WRITE_CLASS_ENCODER(cls_rgw_gc_list_op) + +struct cls_rgw_gc_list_ret { + std::list<cls_rgw_gc_obj_info> entries; + std::string next_marker; + bool truncated; + + cls_rgw_gc_list_ret() : truncated(false) {} + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(2, 1, bl); + encode(entries, bl); + encode(next_marker, bl); + encode(truncated, bl); + ENCODE_FINISH(bl); + } + + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(2, bl); + decode(entries, bl); + if (struct_v >= 2) + decode(next_marker, bl); + decode(truncated, bl); + DECODE_FINISH(bl); + } + + void dump(ceph::Formatter *f) const; + static void generate_test_instances(std::list<cls_rgw_gc_list_ret*>& ls); +}; +WRITE_CLASS_ENCODER(cls_rgw_gc_list_ret) + +struct cls_rgw_gc_remove_op { + std::vector<std::string> tags; + + cls_rgw_gc_remove_op() {} + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(1, 1, bl); + encode(tags, bl); + ENCODE_FINISH(bl); + } + + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(1, bl); + decode(tags, bl); + DECODE_FINISH(bl); + } + + void dump(ceph::Formatter *f) const; + static void generate_test_instances(std::list<cls_rgw_gc_remove_op*>& ls); +}; +WRITE_CLASS_ENCODER(cls_rgw_gc_remove_op) + +struct cls_rgw_bi_log_list_op { + std::string marker; + uint32_t max; + + cls_rgw_bi_log_list_op() : max(0) {} + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(1, 1, bl); + encode(marker, bl); + encode(max, bl); + ENCODE_FINISH(bl); + } + + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(1, bl); + decode(marker, bl); + decode(max, bl); + DECODE_FINISH(bl); + } + + void dump(ceph::Formatter *f) const; + static void generate_test_instances(std::list<cls_rgw_bi_log_list_op*>& ls); +}; +WRITE_CLASS_ENCODER(cls_rgw_bi_log_list_op) + +struct cls_rgw_bi_log_trim_op { + std::string start_marker; + std::string end_marker; + + cls_rgw_bi_log_trim_op() {} + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(1, 1, bl); + encode(start_marker, bl); + encode(end_marker, bl); + ENCODE_FINISH(bl); + } + + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(1, bl); + decode(start_marker, bl); + decode(end_marker, bl); + DECODE_FINISH(bl); + } + + void dump(ceph::Formatter *f) const; + static void generate_test_instances(std::list<cls_rgw_bi_log_trim_op*>& ls); +}; +WRITE_CLASS_ENCODER(cls_rgw_bi_log_trim_op) + +struct cls_rgw_bi_log_list_ret { + std::list<rgw_bi_log_entry> entries; + bool truncated; + + cls_rgw_bi_log_list_ret() : truncated(false) {} + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(1, 1, bl); + encode(entries, bl); + encode(truncated, bl); + ENCODE_FINISH(bl); + } + + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(1, bl); + decode(entries, bl); + decode(truncated, bl); + DECODE_FINISH(bl); + } + + void dump(ceph::Formatter *f) const; + static void generate_test_instances(std::list<cls_rgw_bi_log_list_ret*>& ls); +}; +WRITE_CLASS_ENCODER(cls_rgw_bi_log_list_ret) + +struct cls_rgw_lc_get_next_entry_op { + std::string marker; + cls_rgw_lc_get_next_entry_op() {} + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(1, 1, bl); + encode(marker, bl); + ENCODE_FINISH(bl); + } + + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(1, bl); + decode(marker, bl); + DECODE_FINISH(bl); + } +}; +WRITE_CLASS_ENCODER(cls_rgw_lc_get_next_entry_op) + +struct cls_rgw_lc_get_next_entry_ret { + cls_rgw_lc_entry entry; + + cls_rgw_lc_get_next_entry_ret() {} + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(2, 2, bl); + encode(entry, bl); + ENCODE_FINISH(bl); + } + + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(2, bl); + if (struct_v < 2) { + std::pair<std::string, int> oe; + decode(oe, bl); + entry = {oe.first, 0 /* start */, uint32_t(oe.second)}; + } else { + decode(entry, bl); + } + DECODE_FINISH(bl); + } + +}; +WRITE_CLASS_ENCODER(cls_rgw_lc_get_next_entry_ret) + +struct cls_rgw_lc_get_entry_op { + std::string marker; + cls_rgw_lc_get_entry_op() {} + cls_rgw_lc_get_entry_op(const std::string& _marker) : marker(_marker) {} + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(1, 1, bl); + encode(marker, bl); + ENCODE_FINISH(bl); + } + + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(1, bl); + decode(marker, bl); + DECODE_FINISH(bl); + } +}; +WRITE_CLASS_ENCODER(cls_rgw_lc_get_entry_op) + +struct cls_rgw_lc_get_entry_ret { + cls_rgw_lc_entry entry; + + cls_rgw_lc_get_entry_ret() {} + cls_rgw_lc_get_entry_ret(cls_rgw_lc_entry&& _entry) + : entry(std::move(_entry)) {} + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(2, 2, bl); + encode(entry, bl); + ENCODE_FINISH(bl); + } + + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(2, bl); + if (struct_v < 2) { + /* there was an unmarked change in the encoding during v1, so + * if the sender version is v1, try decoding both ways (sorry) */ + ceph::buffer::list::const_iterator save_bl = bl; + try { + decode(entry, bl); + } catch (ceph::buffer::error& e) { + std::pair<std::string, int> oe; + bl = save_bl; + decode(oe, bl); + entry.bucket = oe.first; + entry.start_time = 0; + entry.status = oe.second; + } + } else { + decode(entry, bl); + } + DECODE_FINISH(bl); + } + void dump(ceph::Formatter *f) const; + static void generate_test_instances(std::list<cls_rgw_lc_get_entry_ret*>& ls); +}; +WRITE_CLASS_ENCODER(cls_rgw_lc_get_entry_ret) + +struct cls_rgw_lc_rm_entry_op { + cls_rgw_lc_entry entry; + cls_rgw_lc_rm_entry_op() {} + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(2, 2, bl); + encode(entry, bl); + ENCODE_FINISH(bl); + } + + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(2, bl); + if (struct_v < 2) { + std::pair<std::string, int> oe; + decode(oe, bl); + entry = {oe.first, 0 /* start */, uint32_t(oe.second)}; + } else { + decode(entry, bl); + } + DECODE_FINISH(bl); + } +}; +WRITE_CLASS_ENCODER(cls_rgw_lc_rm_entry_op) + +struct cls_rgw_lc_set_entry_op { + cls_rgw_lc_entry entry; + cls_rgw_lc_set_entry_op() {} + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(2, 2, bl); + encode(entry, bl); + ENCODE_FINISH(bl); + } + + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(2, bl); + if (struct_v < 2) { + std::pair<std::string, int> oe; + decode(oe, bl); + entry = {oe.first, 0 /* start */, uint32_t(oe.second)}; + } else { + decode(entry, bl); + } + DECODE_FINISH(bl); + } +}; +WRITE_CLASS_ENCODER(cls_rgw_lc_set_entry_op) + +struct cls_rgw_lc_put_head_op { + cls_rgw_lc_obj_head head; + + + cls_rgw_lc_put_head_op() {} + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(1, 1, bl); + encode(head, bl); + ENCODE_FINISH(bl); + } + + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(1, bl); + decode(head, bl); + DECODE_FINISH(bl); + } + +}; +WRITE_CLASS_ENCODER(cls_rgw_lc_put_head_op) + +struct cls_rgw_lc_get_head_ret { + cls_rgw_lc_obj_head head; + + cls_rgw_lc_get_head_ret() {} + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(1, 1, bl); + encode(head, bl); + ENCODE_FINISH(bl); + } + + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(1, bl); + decode(head, bl); + DECODE_FINISH(bl); + } + +}; +WRITE_CLASS_ENCODER(cls_rgw_lc_get_head_ret) + +struct cls_rgw_lc_list_entries_op { + std::string marker; + uint32_t max_entries = 0; + uint8_t compat_v{0}; + + cls_rgw_lc_list_entries_op() {} + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(3, 1, bl); + encode(marker, bl); + encode(max_entries, bl); + ENCODE_FINISH(bl); + } + + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(3, bl); + compat_v = struct_v; + decode(marker, bl); + decode(max_entries, bl); + DECODE_FINISH(bl); + } + +}; +WRITE_CLASS_ENCODER(cls_rgw_lc_list_entries_op) + +struct cls_rgw_lc_list_entries_ret { + std::vector<cls_rgw_lc_entry> entries; + bool is_truncated{false}; + uint8_t compat_v; + +cls_rgw_lc_list_entries_ret(uint8_t compat_v = 3) + : compat_v(compat_v) {} + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(compat_v, 1, bl); + if (compat_v <= 2) { + std::map<std::string, int> oes; + std::for_each(entries.begin(), entries.end(), + [&oes](const cls_rgw_lc_entry& elt) + {oes.insert({elt.bucket, elt.status});}); + encode(oes, bl); + } else { + encode(entries, bl); + } + encode(is_truncated, bl); + ENCODE_FINISH(bl); + } + + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(3, bl); + compat_v = struct_v; + if (struct_v <= 2) { + std::map<std::string, int> oes; + decode(oes, bl); + std::for_each(oes.begin(), oes.end(), + [this](const std::pair<std::string, int>& oe) + {entries.push_back({oe.first, 0 /* start */, + uint32_t(oe.second)});}); + } else { + decode(entries, bl); + } + if (struct_v >= 2) { + decode(is_truncated, bl); + } + DECODE_FINISH(bl); + } +}; +WRITE_CLASS_ENCODER(cls_rgw_lc_list_entries_ret) + +struct cls_rgw_mp_upload_part_info_update_op { + std::string part_key; + RGWUploadPartInfo info; + + cls_rgw_mp_upload_part_info_update_op() {} + + void encode(buffer::list& bl) const { + ENCODE_START(1, 1, bl); + encode(part_key, bl); + encode(info, bl); + ENCODE_FINISH(bl); + } + + void decode(buffer::list::const_iterator& bl) { + DECODE_START(1, bl); + decode(part_key, bl); + decode(info, bl); + DECODE_FINISH(bl); + } + + static void generate_test_instances(std::list<cls_rgw_mp_upload_part_info_update_op*>& ls); + void dump(Formatter* f) const; +}; +WRITE_CLASS_ENCODER(cls_rgw_mp_upload_part_info_update_op) + +struct cls_rgw_reshard_add_op { + cls_rgw_reshard_entry entry; + + cls_rgw_reshard_add_op() {} + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(1, 1, bl); + encode(entry, bl); + ENCODE_FINISH(bl); + } + + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(1, bl); + decode(entry, bl); + DECODE_FINISH(bl); + } + static void generate_test_instances(std::list<cls_rgw_reshard_add_op*>& o); + void dump(ceph::Formatter *f) const; +}; +WRITE_CLASS_ENCODER(cls_rgw_reshard_add_op) + +struct cls_rgw_reshard_list_op { + uint32_t max{0}; + std::string marker; + + cls_rgw_reshard_list_op() {} + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(1, 1, bl); + encode(max, bl); + encode(marker, bl); + ENCODE_FINISH(bl); + } + + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(1, bl); + decode(max, bl); + decode(marker, bl); + DECODE_FINISH(bl); + } + static void generate_test_instances(std::list<cls_rgw_reshard_list_op*>& o); + void dump(ceph::Formatter *f) const; +}; +WRITE_CLASS_ENCODER(cls_rgw_reshard_list_op) + + +struct cls_rgw_reshard_list_ret { + std::list<cls_rgw_reshard_entry> entries; + bool is_truncated{false}; + + cls_rgw_reshard_list_ret() {} + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(1, 1, bl); + encode(entries, bl); + encode(is_truncated, bl); + ENCODE_FINISH(bl); + } + + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(1, bl); + decode(entries, bl); + decode(is_truncated, bl); + DECODE_FINISH(bl); + } + static void generate_test_instances(std::list<cls_rgw_reshard_list_ret*>& o); + void dump(ceph::Formatter *f) const; +}; +WRITE_CLASS_ENCODER(cls_rgw_reshard_list_ret) + +struct cls_rgw_reshard_get_op { + cls_rgw_reshard_entry entry; + + cls_rgw_reshard_get_op() {} + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(1, 1, bl); + encode(entry, bl); + ENCODE_FINISH(bl); + } + + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(1, bl); + decode(entry, bl); + DECODE_FINISH(bl); + } + static void generate_test_instances(std::list<cls_rgw_reshard_get_op*>& o); + void dump(ceph::Formatter *f) const; +}; +WRITE_CLASS_ENCODER(cls_rgw_reshard_get_op) + +struct cls_rgw_reshard_get_ret { + cls_rgw_reshard_entry entry; + + cls_rgw_reshard_get_ret() {} + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(1, 1, bl); + encode(entry, bl); + ENCODE_FINISH(bl); + } + + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(1, bl); + decode(entry, bl); + DECODE_FINISH(bl); + } + static void generate_test_instances(std::list<cls_rgw_reshard_get_ret*>& o); + void dump(ceph::Formatter *f) const; +}; +WRITE_CLASS_ENCODER(cls_rgw_reshard_get_ret) + +struct cls_rgw_reshard_remove_op { + std::string tenant; + std::string bucket_name; + std::string bucket_id; + + cls_rgw_reshard_remove_op() {} + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(1, 1, bl); + encode(tenant, bl); + encode(bucket_name, bl); + encode(bucket_id, bl); + ENCODE_FINISH(bl); + } + + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(1, bl); + decode(tenant, bl); + decode(bucket_name, bl); + decode(bucket_id, bl); + DECODE_FINISH(bl); + } + static void generate_test_instances(std::list<cls_rgw_reshard_remove_op*>& o); + void dump(ceph::Formatter *f) const; +}; +WRITE_CLASS_ENCODER(cls_rgw_reshard_remove_op) + +struct cls_rgw_set_bucket_resharding_op { + cls_rgw_bucket_instance_entry entry; + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(1, 1, bl); + encode(entry, bl); + ENCODE_FINISH(bl); + } + + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(1, bl); + decode(entry, bl); + DECODE_FINISH(bl); + } + static void generate_test_instances(std::list<cls_rgw_set_bucket_resharding_op*>& o); + void dump(ceph::Formatter *f) const; +}; +WRITE_CLASS_ENCODER(cls_rgw_set_bucket_resharding_op) + +struct cls_rgw_clear_bucket_resharding_op { + void encode(ceph::buffer::list& bl) const { + ENCODE_START(1, 1, bl); + ENCODE_FINISH(bl); + } + + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(1, bl); + DECODE_FINISH(bl); + } + static void generate_test_instances(std::list<cls_rgw_clear_bucket_resharding_op*>& o); + void dump(ceph::Formatter *f) const; +}; +WRITE_CLASS_ENCODER(cls_rgw_clear_bucket_resharding_op) + +struct cls_rgw_guard_bucket_resharding_op { + int ret_err{0}; + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(1, 1, bl); + encode(ret_err, bl); + ENCODE_FINISH(bl); + } + + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(1, bl); + decode(ret_err, bl); + DECODE_FINISH(bl); + } + + static void generate_test_instances(std::list<cls_rgw_guard_bucket_resharding_op*>& o); + void dump(ceph::Formatter *f) const; +}; +WRITE_CLASS_ENCODER(cls_rgw_guard_bucket_resharding_op) + +struct cls_rgw_get_bucket_resharding_op { + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(1, 1, bl); + ENCODE_FINISH(bl); + } + + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(1, bl); + DECODE_FINISH(bl); + } + + static void generate_test_instances(std::list<cls_rgw_get_bucket_resharding_op*>& o); + void dump(ceph::Formatter *f) const; +}; +WRITE_CLASS_ENCODER(cls_rgw_get_bucket_resharding_op) + +struct cls_rgw_get_bucket_resharding_ret { + cls_rgw_bucket_instance_entry new_instance; + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(1, 1, bl); + encode(new_instance, bl); + ENCODE_FINISH(bl); + } + + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(1, bl); + decode(new_instance, bl); + DECODE_FINISH(bl); + } + + static void generate_test_instances(std::list<cls_rgw_get_bucket_resharding_ret*>& o); + void dump(ceph::Formatter *f) const; +}; +WRITE_CLASS_ENCODER(cls_rgw_get_bucket_resharding_ret) diff --git a/src/cls/rgw/cls_rgw_types.cc b/src/cls/rgw/cls_rgw_types.cc new file mode 100644 index 000000000..3a71860c3 --- /dev/null +++ b/src/cls/rgw/cls_rgw_types.cc @@ -0,0 +1,870 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab + +#include "cls/rgw/cls_rgw_types.h" +#include "common/ceph_json.h" +#include "include/utime.h" + +using std::list; +using std::string; + +using ceph::bufferlist; +using ceph::Formatter; + +void rgw_zone_set_entry::from_str(const string& s) +{ + auto pos = s.find(':'); + if (pos == string::npos) { + zone = s; + location_key.reset(); + } else { + zone = s.substr(0, pos); + location_key = s.substr(pos + 1); + } +} + +string rgw_zone_set_entry::to_str() const +{ + string s = zone; + if (location_key) { + s = s + ":" + *location_key; + } + return s; +} + +void rgw_zone_set_entry::encode(bufferlist &bl) const +{ + /* no ENCODE_START, ENCODE_END for backward compatibility */ + ceph::encode(to_str(), bl); +} + +void rgw_zone_set_entry::decode(bufferlist::const_iterator &bl) +{ + /* no DECODE_START, DECODE_END for backward compatibility */ + string s; + ceph::decode(s, bl); + from_str(s); +} + +void rgw_zone_set_entry::dump(Formatter *f) const +{ + encode_json("entry", to_str(), f); +} + +void rgw_zone_set_entry::decode_json(JSONObj *obj) { + string s; + JSONDecoder::decode_json("entry", s, obj); + from_str(s); +} + +void rgw_zone_set::insert(const string& zone, std::optional<string> location_key) +{ + entries.insert(rgw_zone_set_entry(zone, location_key)); +} + +bool rgw_zone_set::exists(const string& zone, std::optional<string> location_key) const +{ + return entries.find(rgw_zone_set_entry(zone, location_key)) != entries.end(); +} + +void encode_json(const char *name, const rgw_zone_set& zs, ceph::Formatter *f) +{ + encode_json(name, zs.entries, f); +} + +void decode_json_obj(rgw_zone_set& zs, JSONObj *obj) +{ + decode_json_obj(zs.entries, obj); +} + +std::string_view to_string(RGWModifyOp op) +{ + switch (op) { + case CLS_RGW_OP_ADD: return "write"; + case CLS_RGW_OP_DEL: return "del"; + case CLS_RGW_OP_CANCEL: return "cancel"; + case CLS_RGW_OP_LINK_OLH: return "link_olh"; + case CLS_RGW_OP_LINK_OLH_DM: return "link_olh_del"; + case CLS_RGW_OP_UNLINK_INSTANCE: return "unlink_instance"; + case CLS_RGW_OP_SYNCSTOP: return "syncstop"; + case CLS_RGW_OP_RESYNC: return "resync"; + default: + case CLS_RGW_OP_UNKNOWN: return "unknown"; + } +} + +RGWModifyOp parse_modify_op(std::string_view name) +{ + if (name == "write") { + return CLS_RGW_OP_ADD; + } else if (name == "del") { + return CLS_RGW_OP_DEL; + } else if (name == "cancel") { + return CLS_RGW_OP_CANCEL; + } else if (name == "link_olh") { + return CLS_RGW_OP_LINK_OLH; + } else if (name == "link_olh_del") { + return CLS_RGW_OP_LINK_OLH_DM; + } else if (name == "unlink_instance") { + return CLS_RGW_OP_UNLINK_INSTANCE; + } else if (name == "syncstop") { + return CLS_RGW_OP_SYNCSTOP; + } else if (name == "resync") { + return CLS_RGW_OP_RESYNC; + } else { + return CLS_RGW_OP_UNKNOWN; + } +} + +std::string_view to_string(RGWObjCategory c) +{ + switch (c) { + case RGWObjCategory::None: return "rgw.none"; + case RGWObjCategory::Main: return "rgw.main"; + case RGWObjCategory::Shadow: return "rgw.shadow"; + case RGWObjCategory::MultiMeta: return "rgw.multimeta"; + case RGWObjCategory::CloudTiered: return "rgw.cloudtiered"; + default: return "unknown"; + } +} + +void rgw_bucket_pending_info::generate_test_instances(list<rgw_bucket_pending_info*>& o) +{ + rgw_bucket_pending_info *i = new rgw_bucket_pending_info; + i->state = CLS_RGW_STATE_COMPLETE; + i->op = CLS_RGW_OP_DEL; + o.push_back(i); + o.push_back(new rgw_bucket_pending_info); +} + +void rgw_bucket_pending_info::dump(Formatter *f) const +{ + encode_json("state", (int)state, f); + utime_t ut(timestamp); + encode_json("timestamp", ut, f); + encode_json("op", (int)op, f); +} + +void rgw_bucket_pending_info::decode_json(JSONObj *obj) { + int val; + JSONDecoder::decode_json("state", val, obj); + state = (RGWPendingState)val; + utime_t ut(timestamp); + JSONDecoder::decode_json("timestamp", ut, obj); + JSONDecoder::decode_json("op", val, obj); + op = (uint8_t)val; +} + +void cls_rgw_obj_key::decode_json(JSONObj *obj) { + JSONDecoder::decode_json("name", name, obj); + JSONDecoder::decode_json("instance", instance, obj); +} + +void rgw_bucket_dir_entry_meta::generate_test_instances(list<rgw_bucket_dir_entry_meta*>& o) +{ + rgw_bucket_dir_entry_meta *m = new rgw_bucket_dir_entry_meta; + m->category = RGWObjCategory::Main; + m->size = 100; + m->etag = "etag"; + m->owner = "owner"; + m->owner_display_name = "display name"; + m->content_type = "content/type"; + o.push_back(m); + o.push_back(new rgw_bucket_dir_entry_meta); +} + +void rgw_bucket_dir_entry_meta::dump(Formatter *f) const +{ + encode_json("category", (int)category, f); + encode_json("size", size, f); + utime_t ut(mtime); + encode_json("mtime", ut, f); + encode_json("etag", etag, f); + encode_json("storage_class", storage_class, f); + encode_json("owner", owner, f); + encode_json("owner_display_name", owner_display_name, f); + encode_json("content_type", content_type, f); + encode_json("accounted_size", accounted_size, f); + encode_json("user_data", user_data, f); + encode_json("appendable", appendable, f); +} + +void rgw_bucket_dir_entry_meta::decode_json(JSONObj *obj) { + int val; + JSONDecoder::decode_json("category", val, obj); + category = static_cast<RGWObjCategory>(val); + JSONDecoder::decode_json("size", size, obj); + utime_t ut; + JSONDecoder::decode_json("mtime", ut, obj); + mtime = ut.to_real_time(); + JSONDecoder::decode_json("etag", etag, obj); + JSONDecoder::decode_json("storage_class", storage_class, obj); + JSONDecoder::decode_json("owner", owner, obj); + JSONDecoder::decode_json("owner_display_name", owner_display_name, obj); + JSONDecoder::decode_json("content_type", content_type, obj); + JSONDecoder::decode_json("accounted_size", accounted_size, obj); + JSONDecoder::decode_json("user_data", user_data, obj); + JSONDecoder::decode_json("appendable", appendable, obj); +} + +void rgw_bucket_dir_entry::generate_test_instances(list<rgw_bucket_dir_entry*>& o) +{ + list<rgw_bucket_dir_entry_meta *> l; + rgw_bucket_dir_entry_meta::generate_test_instances(l); + + for (auto iter = l.begin(); iter != l.end(); ++iter) { + rgw_bucket_dir_entry_meta *m = *iter; + rgw_bucket_dir_entry *e = new rgw_bucket_dir_entry; + e->key.name = "name"; + e->ver.pool = 1; + e->ver.epoch = 1234; + e->locator = "locator"; + e->exists = true; + e->meta = *m; + e->tag = "tag"; + + o.push_back(e); + + delete m; + } + o.push_back(new rgw_bucket_dir_entry); +} + +void rgw_bucket_entry_ver::dump(Formatter *f) const +{ + encode_json("pool", pool, f); + encode_json("epoch", epoch, f); +} + +void rgw_bucket_entry_ver::decode_json(JSONObj *obj) { + JSONDecoder::decode_json("pool", pool, obj); + JSONDecoder::decode_json("epoch", epoch, obj); +} + +void rgw_bucket_entry_ver::generate_test_instances(list<rgw_bucket_entry_ver*>& ls) +{ + ls.push_back(new rgw_bucket_entry_ver); + ls.push_back(new rgw_bucket_entry_ver); + ls.back()->pool = 123; + ls.back()->epoch = 12322; +} + + +void rgw_bucket_dir_entry::dump(Formatter *f) const +{ + encode_json("name", key.name, f); + encode_json("instance", key.instance , f); + encode_json("ver", ver , f); + encode_json("locator", locator , f); + encode_json("exists", exists , f); + encode_json("meta", meta , f); + encode_json("tag", tag , f); + encode_json("flags", (int)flags , f); + encode_json("pending_map", pending_map, f); + encode_json("versioned_epoch", versioned_epoch , f); +} + +void rgw_bucket_dir_entry::decode_json(JSONObj *obj) { + JSONDecoder::decode_json("name", key.name, obj); + JSONDecoder::decode_json("instance", key.instance , obj); + JSONDecoder::decode_json("ver", ver , obj); + JSONDecoder::decode_json("locator", locator , obj); + JSONDecoder::decode_json("exists", exists , obj); + JSONDecoder::decode_json("meta", meta , obj); + JSONDecoder::decode_json("tag", tag , obj); + int val; + JSONDecoder::decode_json("flags", val , obj); + flags = (uint16_t)val; + JSONDecoder::decode_json("pending_map", pending_map, obj); + JSONDecoder::decode_json("versioned_epoch", versioned_epoch, obj); +} + +static void dump_bi_entry(bufferlist bl, BIIndexType index_type, Formatter *formatter) +{ + auto iter = bl.cbegin(); + switch (index_type) { + case BIIndexType::Plain: + case BIIndexType::Instance: + { + rgw_bucket_dir_entry entry; + decode(entry, iter); + encode_json("entry", entry, formatter); + } + break; + case BIIndexType::OLH: + { + rgw_bucket_olh_entry entry; + decode(entry, iter); + encode_json("entry", entry, formatter); + } + break; + default: + break; + } +} + +void rgw_cls_bi_entry::decode_json(JSONObj *obj, cls_rgw_obj_key *effective_key) { + JSONDecoder::decode_json("idx", idx, obj); + string s; + JSONDecoder::decode_json("type", s, obj); + if (s == "plain") { + type = BIIndexType::Plain; + } else if (s == "instance") { + type = BIIndexType::Instance; + } else if (s == "olh") { + type = BIIndexType::OLH; + } else { + type = BIIndexType::Invalid; + } + using ceph::encode; + switch (type) { + case BIIndexType::Plain: + case BIIndexType::Instance: + { + rgw_bucket_dir_entry entry; + JSONDecoder::decode_json("entry", entry, obj); + encode(entry, data); + + if (effective_key) { + *effective_key = entry.key; + } + } + break; + case BIIndexType::OLH: + { + rgw_bucket_olh_entry entry; + JSONDecoder::decode_json("entry", entry, obj); + encode(entry, data); + + if (effective_key) { + *effective_key = entry.key; + } + } + break; + default: + break; + } +} + +void rgw_cls_bi_entry::dump(Formatter *f) const +{ + string type_str; + switch (type) { + case BIIndexType::Plain: + type_str = "plain"; + break; + case BIIndexType::Instance: + type_str = "instance"; + break; + case BIIndexType::OLH: + type_str = "olh"; + break; + default: + type_str = "invalid"; + } + encode_json("type", type_str, f); + encode_json("idx", idx, f); + dump_bi_entry(data, type, f); +} + +bool rgw_cls_bi_entry::get_info(cls_rgw_obj_key *key, + RGWObjCategory *category, + rgw_bucket_category_stats *accounted_stats) +{ + using ceph::decode; + auto iter = data.cbegin(); + if (type == BIIndexType::OLH) { + rgw_bucket_olh_entry entry; + decode(entry, iter); + *key = entry.key; + return false; + } + + rgw_bucket_dir_entry entry; + decode(entry, iter); + *key = entry.key; + *category = entry.meta.category; + accounted_stats->num_entries++; + accounted_stats->total_size += entry.meta.accounted_size; + accounted_stats->total_size_rounded += cls_rgw_get_rounded_size(entry.meta.accounted_size); + accounted_stats->actual_size += entry.meta.size; + if (type == BIIndexType::Plain) { + return entry.exists && entry.flags == 0; + } else if (type == BIIndexType::Instance) { + return entry.exists; + } + return false; +} + +void rgw_cls_bi_entry::generate_test_instances(list<rgw_cls_bi_entry*>& o) +{ + using ceph::encode; + rgw_cls_bi_entry *m = new rgw_cls_bi_entry; + rgw_bucket_olh_entry entry; + entry.delete_marker = true; + entry.epoch = 1234; + entry.tag = "tag"; + entry.key.name = "key.name"; + entry.key.instance = "key.instance"; + entry.exists = true; + entry.pending_removal = true; + m->type = BIIndexType::OLH; + m->idx = "idx"; + encode(entry,m->data); + o.push_back(m); + o.push_back(new rgw_cls_bi_entry); +} + +void rgw_bucket_olh_entry::dump(Formatter *f) const +{ + encode_json("key", key, f); + encode_json("delete_marker", delete_marker, f); + encode_json("epoch", epoch, f); + encode_json("pending_log", pending_log, f); + encode_json("tag", tag, f); + encode_json("exists", exists, f); + encode_json("pending_removal", pending_removal, f); +} + +void rgw_bucket_olh_entry::decode_json(JSONObj *obj) +{ + JSONDecoder::decode_json("key", key, obj); + JSONDecoder::decode_json("delete_marker", delete_marker, obj); + JSONDecoder::decode_json("epoch", epoch, obj); + JSONDecoder::decode_json("pending_log", pending_log, obj); + JSONDecoder::decode_json("tag", tag, obj); + JSONDecoder::decode_json("exists", exists, obj); + JSONDecoder::decode_json("pending_removal", pending_removal, obj); +} + +void rgw_bucket_olh_entry::generate_test_instances(list<rgw_bucket_olh_entry*>& o) +{ + rgw_bucket_olh_entry *entry = new rgw_bucket_olh_entry; + entry->delete_marker = true; + entry->epoch = 1234; + entry->tag = "tag"; + entry->key.name = "key.name"; + entry->key.instance = "key.instance"; + entry->exists = true; + entry->pending_removal = true; + o.push_back(entry); + o.push_back(new rgw_bucket_olh_entry); +} + +void rgw_bucket_olh_log_entry::generate_test_instances(list<rgw_bucket_olh_log_entry*>& o) +{ + rgw_bucket_olh_log_entry *entry = new rgw_bucket_olh_log_entry; + entry->epoch = 1234; + entry->op = CLS_RGW_OLH_OP_LINK_OLH; + entry->op_tag = "op_tag"; + entry->key.name = "key.name"; + entry->key.instance = "key.instance"; + entry->delete_marker = true; + o.push_back(entry); + o.push_back(new rgw_bucket_olh_log_entry); +} + +void rgw_bucket_olh_log_entry::dump(Formatter *f) const +{ + encode_json("epoch", epoch, f); + const char *op_str; + switch (op) { + case CLS_RGW_OLH_OP_LINK_OLH: + op_str = "link_olh"; + break; + case CLS_RGW_OLH_OP_UNLINK_OLH: + op_str = "unlink_olh"; + break; + case CLS_RGW_OLH_OP_REMOVE_INSTANCE: + op_str = "remove_instance"; + break; + default: + op_str = "unknown"; + } + encode_json("op", op_str, f); + encode_json("op_tag", op_tag, f); + encode_json("key", key, f); + encode_json("delete_marker", delete_marker, f); +} + +void rgw_bucket_olh_log_entry::decode_json(JSONObj *obj) +{ + JSONDecoder::decode_json("epoch", epoch, obj); + string op_str; + JSONDecoder::decode_json("op", op_str, obj); + if (op_str == "link_olh") { + op = CLS_RGW_OLH_OP_LINK_OLH; + } else if (op_str == "unlink_olh") { + op = CLS_RGW_OLH_OP_UNLINK_OLH; + } else if (op_str == "remove_instance") { + op = CLS_RGW_OLH_OP_REMOVE_INSTANCE; + } else { + op = CLS_RGW_OLH_OP_UNKNOWN; + } + JSONDecoder::decode_json("op_tag", op_tag, obj); + JSONDecoder::decode_json("key", key, obj); + JSONDecoder::decode_json("delete_marker", delete_marker, obj); +} +void rgw_bi_log_entry::decode_json(JSONObj *obj) +{ + JSONDecoder::decode_json("op_id", id, obj); + JSONDecoder::decode_json("op_tag", tag, obj); + string op_str; + JSONDecoder::decode_json("op", op_str, obj); + op = parse_modify_op(op_str); + JSONDecoder::decode_json("object", object, obj); + JSONDecoder::decode_json("instance", instance, obj); + string state_str; + JSONDecoder::decode_json("state", state_str, obj); + if (state_str == "pending") { + state = CLS_RGW_STATE_PENDING_MODIFY; + } else if (state_str == "complete") { + state = CLS_RGW_STATE_COMPLETE; + } else { + state = CLS_RGW_STATE_UNKNOWN; + } + JSONDecoder::decode_json("index_ver", index_ver, obj); + utime_t ut; + JSONDecoder::decode_json("timestamp", ut, obj); + timestamp = ut.to_real_time(); + uint32_t f; + JSONDecoder::decode_json("bilog_flags", f, obj); + JSONDecoder::decode_json("ver", ver, obj); + bilog_flags = (uint16_t)f; + JSONDecoder::decode_json("owner", owner, obj); + JSONDecoder::decode_json("owner_display_name", owner_display_name, obj); + JSONDecoder::decode_json("zones_trace", zones_trace, obj); +} + +void rgw_bi_log_entry::dump(Formatter *f) const +{ + f->dump_string("op_id", id); + f->dump_string("op_tag", tag); + f->dump_string("op", to_string(op)); + f->dump_string("object", object); + f->dump_string("instance", instance); + + switch (state) { + case CLS_RGW_STATE_PENDING_MODIFY: + f->dump_string("state", "pending"); + break; + case CLS_RGW_STATE_COMPLETE: + f->dump_string("state", "complete"); + break; + default: + f->dump_string("state", "invalid"); + break; + } + + f->dump_int("index_ver", index_ver); + utime_t ut(timestamp); + ut.gmtime_nsec(f->dump_stream("timestamp")); + f->open_object_section("ver"); + ver.dump(f); + f->close_section(); + f->dump_int("bilog_flags", bilog_flags); + f->dump_bool("versioned", (bilog_flags & RGW_BILOG_FLAG_VERSIONED_OP) != 0); + f->dump_string("owner", owner); + f->dump_string("owner_display_name", owner_display_name); + encode_json("zones_trace", zones_trace, f); +} + +void rgw_bi_log_entry::generate_test_instances(list<rgw_bi_log_entry*>& ls) +{ + ls.push_back(new rgw_bi_log_entry); + ls.push_back(new rgw_bi_log_entry); + ls.back()->id = "midf"; + ls.back()->object = "obj"; + ls.back()->timestamp = ceph::real_clock::from_ceph_timespec({ceph_le32(2), ceph_le32(3)}); + ls.back()->index_ver = 4323; + ls.back()->tag = "tagasdfds"; + ls.back()->op = CLS_RGW_OP_DEL; + ls.back()->state = CLS_RGW_STATE_PENDING_MODIFY; +} + +void rgw_bucket_category_stats::generate_test_instances(list<rgw_bucket_category_stats*>& o) +{ + rgw_bucket_category_stats *s = new rgw_bucket_category_stats; + s->total_size = 1024; + s->total_size_rounded = 4096; + s->num_entries = 2; + s->actual_size = 1024; + o.push_back(s); + o.push_back(new rgw_bucket_category_stats); +} + +void rgw_bucket_category_stats::dump(Formatter *f) const +{ + f->dump_unsigned("total_size", total_size); + f->dump_unsigned("total_size_rounded", total_size_rounded); + f->dump_unsigned("num_entries", num_entries); + f->dump_unsigned("actual_size", actual_size); +} + +void rgw_bucket_dir_header::generate_test_instances(list<rgw_bucket_dir_header*>& o) +{ + list<rgw_bucket_category_stats *> l; + rgw_bucket_category_stats::generate_test_instances(l); + + uint8_t i = 0; + for (auto iter = l.begin(); iter != l.end(); ++iter, ++i) { + RGWObjCategory c = static_cast<RGWObjCategory>(i); + rgw_bucket_dir_header *h = new rgw_bucket_dir_header; + rgw_bucket_category_stats *s = *iter; + h->stats[c] = *s; + + o.push_back(h); + + delete s; + } + + o.push_back(new rgw_bucket_dir_header); +} + +void rgw_bucket_dir_header::dump(Formatter *f) const +{ + f->dump_int("ver", ver); + f->dump_int("master_ver", master_ver); + f->open_array_section("stats"); + for (auto iter = stats.begin(); iter != stats.end(); ++iter) { + f->dump_int("category", int(iter->first)); + f->open_object_section("category_stats"); + iter->second.dump(f); + f->close_section(); + } + f->close_section(); + ::encode_json("new_instance", new_instance, f); +} + +void rgw_bucket_dir::generate_test_instances(list<rgw_bucket_dir*>& o) +{ + list<rgw_bucket_dir_header *> l; + rgw_bucket_dir_header::generate_test_instances(l); + + uint8_t i = 0; + for (auto iter = l.begin(); iter != l.end(); ++iter, ++i) { + rgw_bucket_dir *d = new rgw_bucket_dir; + rgw_bucket_dir_header *h = *iter; + d->header = *h; + + list<rgw_bucket_dir_entry *> el; + for (auto eiter = el.begin(); eiter != el.end(); ++eiter) { + rgw_bucket_dir_entry *e = *eiter; + d->m[e->key.name] = *e; + + delete e; + } + + o.push_back(d); + + delete h; + } + + o.push_back(new rgw_bucket_dir); +} + +void rgw_bucket_dir::dump(Formatter *f) const +{ + f->open_object_section("header"); + header.dump(f); + f->close_section(); + auto iter = m.cbegin(); + f->open_array_section("map"); + for (; iter != m.cend(); ++iter) { + f->dump_string("key", iter->first); + f->open_object_section("dir_entry"); + iter->second.dump(f); + f->close_section(); + } + f->close_section(); +} + +void rgw_usage_data::generate_test_instances(list<rgw_usage_data*>& o) +{ + rgw_usage_data *s = new rgw_usage_data; + s->bytes_sent = 1024; + s->bytes_received = 1024; + s->ops = 2; + s->successful_ops = 1; + o.push_back(s); + o.push_back(new rgw_usage_data); +} + +void rgw_usage_data::dump(Formatter *f) const +{ + f->dump_int("bytes_sent", bytes_sent); + f->dump_int("bytes_received", bytes_received); + f->dump_int("ops", ops); + f->dump_int("successful_ops", successful_ops); +} + +void rgw_usage_log_info::generate_test_instances(list<rgw_usage_log_info*>& o) +{ + rgw_usage_log_info *s = new rgw_usage_log_info; + std::string owner = "owner"; + std::string payer = "payer"; + std::string bucket = "bucket"; + + rgw_usage_log_entry r(owner, payer, bucket); + s->entries.push_back(r); + o.push_back(s); + o.push_back(new rgw_usage_log_info); +} + +void rgw_usage_log_info::dump(Formatter *f) const +{ + encode_json("entries", entries, f); +} + +void rgw_user_bucket::generate_test_instances(list<rgw_user_bucket*>& o) +{ + rgw_user_bucket *s = new rgw_user_bucket; + s->user = "user"; + s->bucket = "bucket"; + o.push_back(s); + o.push_back(new rgw_user_bucket); +} + +void rgw_user_bucket::dump(Formatter *f) const +{ + f->dump_string("user", user); + f->dump_string("bucket", bucket); +} + +void rgw_usage_log_entry::dump(Formatter *f) const +{ + f->dump_string("owner", owner.to_str()); + f->dump_string("payer", payer.to_str()); + f->dump_string("bucket", bucket); + f->dump_unsigned("epoch", epoch); + + f->open_object_section("total_usage"); + f->dump_unsigned("bytes_sent", total_usage.bytes_sent); + f->dump_unsigned("bytes_received", total_usage.bytes_received); + f->dump_unsigned("ops", total_usage.ops); + f->dump_unsigned("successful_ops", total_usage.successful_ops); + f->close_section(); + + f->open_array_section("categories"); + if (usage_map.size() > 0) { + for (auto it = usage_map.begin(); it != usage_map.end(); it++) { + const rgw_usage_data& total_usage = it->second; + f->open_object_section("entry"); + f->dump_string("category", it->first.c_str()); + f->dump_unsigned("bytes_sent", total_usage.bytes_sent); + f->dump_unsigned("bytes_received", total_usage.bytes_received); + f->dump_unsigned("ops", total_usage.ops); + f->dump_unsigned("successful_ops", total_usage.successful_ops); + f->close_section(); + } + } + f->close_section(); +} + +void rgw_usage_log_entry::generate_test_instances(list<rgw_usage_log_entry *> &o) +{ + rgw_usage_log_entry *entry = new rgw_usage_log_entry; + rgw_usage_data usage_data{1024, 2048}; + entry->owner = rgw_user("owner"); + entry->payer = rgw_user("payer"); + entry->bucket = "bucket"; + entry->epoch = 1234; + entry->total_usage.bytes_sent = usage_data.bytes_sent; + entry->total_usage.bytes_received = usage_data.bytes_received; + entry->total_usage.ops = usage_data.ops; + entry->total_usage.successful_ops = usage_data.successful_ops; + entry->usage_map["get_obj"] = usage_data; + o.push_back(entry); + o.push_back(new rgw_usage_log_entry); +} + +void cls_rgw_reshard_entry::generate_key(const string& tenant, const string& bucket_name, string *key) +{ + *key = tenant + ":" + bucket_name; +} + +void cls_rgw_reshard_entry::get_key(string *key) const +{ + generate_key(tenant, bucket_name, key); +} + +void cls_rgw_reshard_entry::dump(Formatter *f) const +{ + utime_t ut(time); + encode_json("time",ut, f); + encode_json("tenant", tenant, f); + encode_json("bucket_name", bucket_name, f); + encode_json("bucket_id", bucket_id, f); + encode_json("old_num_shards", old_num_shards, f); + encode_json("tentative_new_num_shards", new_num_shards, f); +} + +void cls_rgw_reshard_entry::generate_test_instances(list<cls_rgw_reshard_entry*>& ls) +{ + ls.push_back(new cls_rgw_reshard_entry); + ls.push_back(new cls_rgw_reshard_entry); + ls.back()->time = ceph::real_clock::from_ceph_timespec({ceph_le32(2), ceph_le32(3)}); + ls.back()->tenant = "tenant"; + ls.back()->bucket_name = "bucket1"""; + ls.back()->bucket_id = "bucket_id"; + ls.back()->old_num_shards = 8; + ls.back()->new_num_shards = 64; +} + +void cls_rgw_bucket_instance_entry::dump(Formatter *f) const +{ + encode_json("reshard_status", to_string(reshard_status), f); +} + +void cls_rgw_bucket_instance_entry::generate_test_instances( +list<cls_rgw_bucket_instance_entry*>& ls) +{ + ls.push_back(new cls_rgw_bucket_instance_entry); + ls.push_back(new cls_rgw_bucket_instance_entry); + ls.back()->reshard_status = RESHARD_STATUS::IN_PROGRESS; +} + +void cls_rgw_lc_entry::dump(Formatter *f) const +{ + encode_json("bucket", bucket, f); + encode_json("start_time", start_time, f); + encode_json("status", status, f); +} + +void cls_rgw_lc_entry::generate_test_instances(list<cls_rgw_lc_entry*>& o) +{ + cls_rgw_lc_entry *s = new cls_rgw_lc_entry; + s->bucket = "bucket"; + s->start_time = 10; + s->status = 1; + o.push_back(s); + o.push_back(new cls_rgw_lc_entry); +} + +void cls_rgw_lc_obj_head::dump(Formatter *f) const +{ + encode_json("start_date", start_date, f); + encode_json("marker", marker, f); +} + +void cls_rgw_lc_obj_head::generate_test_instances(list<cls_rgw_lc_obj_head*>& ls) +{ +} + +std::ostream& operator<<(std::ostream& out, cls_rgw_reshard_status status) { + switch (status) { + case cls_rgw_reshard_status::NOT_RESHARDING: + out << "NOT_RESHARDING"; + break; + case cls_rgw_reshard_status::IN_PROGRESS: + out << "IN_PROGRESS"; + break; + case cls_rgw_reshard_status::DONE: + out << "DONE"; + break; + default: + out << "UNKNOWN_STATUS"; + } + + return out; +} diff --git a/src/cls/rgw/cls_rgw_types.h b/src/cls/rgw/cls_rgw_types.h new file mode 100644 index 000000000..f94bf114f --- /dev/null +++ b/src/cls/rgw/cls_rgw_types.h @@ -0,0 +1,1329 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab + +#pragma once + +#include <string> +#include <list> +#include <boost/container/flat_map.hpp> +#include "common/ceph_time.h" +#include "common/Formatter.h" + +#include <fmt/format.h> + +#include "rgw/rgw_basic_types.h" + +#define CEPH_RGW_REMOVE 'r' // value 114 +#define CEPH_RGW_UPDATE 'u' // value 117 +#define CEPH_RGW_DIR_SUGGEST_LOG_OP 0x80 +#define CEPH_RGW_DIR_SUGGEST_OP_MASK 0x7f + +constexpr uint64_t CEPH_RGW_DEFAULT_TAG_TIMEOUT = 120; // in seconds + +class JSONObj; + +using ceph::operator <<; + +struct rgw_zone_set_entry { + std::string zone; + std::optional<std::string> location_key; + + bool operator<(const rgw_zone_set_entry& e) const { + if (zone < e.zone) { + return true; + } + if (zone > e.zone) { + return false; + } + return (location_key < e.location_key); + } + + bool operator==(const rgw_zone_set_entry& e) const { + return zone == e.zone && location_key == e.location_key; + } + + rgw_zone_set_entry() {} + rgw_zone_set_entry(const std::string& _zone, + std::optional<std::string> _location_key) : zone(_zone), + location_key(_location_key) {} + rgw_zone_set_entry(const std::string& s) { + from_str(s); + } + + void from_str(const std::string& s); + std::string to_str() const; + + void encode(ceph::buffer::list &bl) const; + void decode(ceph::buffer::list::const_iterator &bl); + + void dump(ceph::Formatter *f) const; + void decode_json(JSONObj *obj); +}; +WRITE_CLASS_ENCODER(rgw_zone_set_entry) + +struct rgw_zone_set { + std::set<rgw_zone_set_entry> entries; + + void encode(ceph::buffer::list &bl) const { + /* no ENCODE_START, ENCODE_END for backward compatibility */ + ceph::encode(entries, bl); + } + void decode(ceph::buffer::list::const_iterator &bl) { + /* no DECODE_START, DECODE_END for backward compatibility */ + ceph::decode(entries, bl); + } + + void insert(const std::string& zone, std::optional<std::string> location_key); + bool exists(const std::string& zone, std::optional<std::string> location_key) const; +}; +WRITE_CLASS_ENCODER(rgw_zone_set) + +/* backward compatibility, rgw_zone_set needs to encode/decode the same as std::set */ +void encode_json(const char *name, const rgw_zone_set& zs, ceph::Formatter *f); +void decode_json_obj(rgw_zone_set& zs, JSONObj *obj); + + +enum RGWPendingState { + CLS_RGW_STATE_PENDING_MODIFY = 0, + CLS_RGW_STATE_COMPLETE = 1, + CLS_RGW_STATE_UNKNOWN = 2, +}; + +enum RGWModifyOp { + CLS_RGW_OP_ADD = 0, + CLS_RGW_OP_DEL = 1, + CLS_RGW_OP_CANCEL = 2, + CLS_RGW_OP_UNKNOWN = 3, + CLS_RGW_OP_LINK_OLH = 4, + CLS_RGW_OP_LINK_OLH_DM = 5, /* creation of delete marker */ + CLS_RGW_OP_UNLINK_INSTANCE = 6, + CLS_RGW_OP_SYNCSTOP = 7, + CLS_RGW_OP_RESYNC = 8, +}; + +std::string_view to_string(RGWModifyOp op); +RGWModifyOp parse_modify_op(std::string_view name); + +inline std::ostream& operator<<(std::ostream& out, RGWModifyOp op) { + return out << to_string(op); +} + +enum RGWBILogFlags { + RGW_BILOG_FLAG_VERSIONED_OP = 0x1, +}; + +enum RGWCheckMTimeType { + CLS_RGW_CHECK_TIME_MTIME_EQ = 0, + CLS_RGW_CHECK_TIME_MTIME_LT = 1, + CLS_RGW_CHECK_TIME_MTIME_LE = 2, + CLS_RGW_CHECK_TIME_MTIME_GT = 3, + CLS_RGW_CHECK_TIME_MTIME_GE = 4, +}; + +#define ROUND_BLOCK_SIZE 4096 + +inline uint64_t cls_rgw_get_rounded_size(uint64_t size) { + return (size + ROUND_BLOCK_SIZE - 1) & ~(ROUND_BLOCK_SIZE - 1); +} + +/* + * This takes a std::string that either wholly contains a delimiter or is a + * path that ends with a delimiter and appends a new character to the + * end such that when a we request bucket-index entries *after* this, + * we'll get the next object after the "subdirectory". This works + * because we append a '\xFF' charater, and no valid UTF-8 character + * can contain that byte, so no valid entries can be skipped. + */ +inline std::string cls_rgw_after_delim(const std::string& path) { + // assert: ! path.empty() + return path + '\xFF'; +} + +struct rgw_bucket_pending_info { + RGWPendingState state; + ceph::real_time timestamp; + uint8_t op; + + rgw_bucket_pending_info() : state(CLS_RGW_STATE_PENDING_MODIFY), op(0) {} + + void encode(ceph::buffer::list &bl) const { + ENCODE_START(2, 2, bl); + uint8_t s = (uint8_t)state; + encode(s, bl); + encode(timestamp, bl); + encode(op, bl); + ENCODE_FINISH(bl); + } + void decode(ceph::buffer::list::const_iterator &bl) { + DECODE_START_LEGACY_COMPAT_LEN(2, 2, 2, bl); + uint8_t s; + decode(s, bl); + state = (RGWPendingState)s; + decode(timestamp, bl); + decode(op, bl); + DECODE_FINISH(bl); + } + void dump(ceph::Formatter *f) const; + void decode_json(JSONObj *obj); + static void generate_test_instances(std::list<rgw_bucket_pending_info*>& o); +}; +WRITE_CLASS_ENCODER(rgw_bucket_pending_info) + + +// categories of objects stored in a bucket index (b-i) and used to +// differentiate their associated statistics (bucket stats, and in +// some cases user stats) +enum class RGWObjCategory : uint8_t { + None = 0, // b-i entries for delete markers; also used in + // testing and for default values in default + // constructors + + Main = 1, // b-i entries for standard objs + + Shadow = 2, // presumfably intended for multipart shadow + // uploads; not currently used in the codebase + + MultiMeta = 3, // b-i entries for multipart upload metadata objs + + CloudTiered = 4, // b-i entries which are tiered to external cloud +}; + +std::string_view to_string(RGWObjCategory c); + +inline std::ostream& operator<<(std::ostream& out, RGWObjCategory c) { + return out << to_string(c); +} + +struct rgw_bucket_dir_entry_meta { + RGWObjCategory category; + uint64_t size; + ceph::real_time mtime; + std::string etag; + std::string owner; + std::string owner_display_name; + std::string content_type; + uint64_t accounted_size; + std::string user_data; + std::string storage_class; + bool appendable; + + rgw_bucket_dir_entry_meta() : + category(RGWObjCategory::None), size(0), accounted_size(0), appendable(false) { } + + void encode(ceph::buffer::list &bl) const { + ENCODE_START(7, 3, bl); + encode(category, bl); + encode(size, bl); + encode(mtime, bl); + encode(etag, bl); + encode(owner, bl); + encode(owner_display_name, bl); + encode(content_type, bl); + encode(accounted_size, bl); + encode(user_data, bl); + encode(storage_class, bl); + encode(appendable, bl); + ENCODE_FINISH(bl); + } + + void decode(ceph::buffer::list::const_iterator &bl) { + DECODE_START_LEGACY_COMPAT_LEN(6, 3, 3, bl); + decode(category, bl); + decode(size, bl); + decode(mtime, bl); + decode(etag, bl); + decode(owner, bl); + decode(owner_display_name, bl); + if (struct_v >= 2) + decode(content_type, bl); + if (struct_v >= 4) + decode(accounted_size, bl); + else + accounted_size = size; + if (struct_v >= 5) + decode(user_data, bl); + if (struct_v >= 6) + decode(storage_class, bl); + if (struct_v >= 7) + decode(appendable, bl); + DECODE_FINISH(bl); + } + void dump(ceph::Formatter *f) const; + void decode_json(JSONObj *obj); + static void generate_test_instances(std::list<rgw_bucket_dir_entry_meta*>& o); +}; +WRITE_CLASS_ENCODER(rgw_bucket_dir_entry_meta) + +template<class T> +void encode_packed_val(T val, ceph::buffer::list& bl) +{ + using ceph::encode; + if ((uint64_t)val < 0x80) { + encode((uint8_t)val, bl); + } else { + unsigned char c = 0x80; + + if ((uint64_t)val < 0x100) { + c |= 1; + encode(c, bl); + encode((uint8_t)val, bl); + } else if ((uint64_t)val <= 0x10000) { + c |= 2; + encode(c, bl); + encode((uint16_t)val, bl); + } else if ((uint64_t)val <= 0x1000000) { + c |= 4; + encode(c, bl); + encode((uint32_t)val, bl); + } else { + c |= 8; + encode(c, bl); + encode((uint64_t)val, bl); + } + } +} + +template<class T> +void decode_packed_val(T& val, ceph::buffer::list::const_iterator& bl) +{ + using ceph::decode; + unsigned char c; + decode(c, bl); + if (c < 0x80) { + val = c; + return; + } + + c &= ~0x80; + + switch (c) { + case 1: + { + uint8_t v; + decode(v, bl); + val = v; + } + break; + case 2: + { + uint16_t v; + decode(v, bl); + val = v; + } + break; + case 4: + { + uint32_t v; + decode(v, bl); + val = v; + } + break; + case 8: + { + uint64_t v; + decode(v, bl); + val = v; + } + break; + default: + throw ceph::buffer::malformed_input(); + } +} + +struct rgw_bucket_entry_ver { + int64_t pool; + uint64_t epoch; + + rgw_bucket_entry_ver() : pool(-1), epoch(0) {} + + void encode(ceph::buffer::list &bl) const { + ENCODE_START(1, 1, bl); + encode_packed_val(pool, bl); + encode_packed_val(epoch, bl); + ENCODE_FINISH(bl); + } + void decode(ceph::buffer::list::const_iterator &bl) { + DECODE_START(1, bl); + decode_packed_val(pool, bl); + decode_packed_val(epoch, bl); + DECODE_FINISH(bl); + } + void dump(ceph::Formatter *f) const; + void decode_json(JSONObj *obj); + static void generate_test_instances(std::list<rgw_bucket_entry_ver*>& o); +}; +WRITE_CLASS_ENCODER(rgw_bucket_entry_ver) + +typedef rgw_obj_index_key cls_rgw_obj_key; + +inline std::ostream& operator<<(std::ostream& out, const cls_rgw_obj_key& o) { + out << o.name; + if (!o.instance.empty()) { + out << '[' << o.instance << ']'; + } + return out; +} + +struct rgw_bucket_dir_entry { + /* a versioned object instance */ + static constexpr uint16_t FLAG_VER = 0x1; + /* the last object instance of a versioned object */ + static constexpr uint16_t FLAG_CURRENT = 0x2; + /* delete marker */ + static constexpr uint16_t FLAG_DELETE_MARKER = 0x4; + /* object is versioned, a placeholder for the plain entry */ + static constexpr uint16_t FLAG_VER_MARKER = 0x8; + /* object is a proxy; it is not listed in the bucket index but is a + * prefix ending with a delimiter, perhaps common to multiple + * entries; it is only useful when a delimiter is used and + * represents a "subdirectory" (again, ending in a delimiter) that + * may contain one or more actual entries/objects */ + static constexpr uint16_t FLAG_COMMON_PREFIX = 0x8000; + + cls_rgw_obj_key key; + rgw_bucket_entry_ver ver; + std::string locator; + bool exists; + rgw_bucket_dir_entry_meta meta; + std::multimap<std::string, rgw_bucket_pending_info> pending_map; + uint64_t index_ver; + std::string tag; + uint16_t flags; + uint64_t versioned_epoch; + + rgw_bucket_dir_entry() : + exists(false), index_ver(0), flags(0), versioned_epoch(0) {} + + void encode(ceph::buffer::list &bl) const { + ENCODE_START(8, 3, bl); + encode(key.name, bl); + encode(ver.epoch, bl); + encode(exists, bl); + encode(meta, bl); + encode(pending_map, bl); + encode(locator, bl); + encode(ver, bl); + encode_packed_val(index_ver, bl); + encode(tag, bl); + encode(key.instance, bl); + encode(flags, bl); + encode(versioned_epoch, bl); + ENCODE_FINISH(bl); + } + void decode(ceph::buffer::list::const_iterator &bl) { + DECODE_START_LEGACY_COMPAT_LEN(8, 3, 3, bl); + decode(key.name, bl); + decode(ver.epoch, bl); + decode(exists, bl); + decode(meta, bl); + decode(pending_map, bl); + if (struct_v >= 2) { + decode(locator, bl); + } + if (struct_v >= 4) { + decode(ver, bl); + } else { + ver.pool = -1; + } + if (struct_v >= 5) { + decode_packed_val(index_ver, bl); + decode(tag, bl); + } + if (struct_v >= 6) { + decode(key.instance, bl); + } + if (struct_v >= 7) { + decode(flags, bl); + } + if (struct_v >= 8) { + decode(versioned_epoch, bl); + } + DECODE_FINISH(bl); + } + + bool is_current() const { + int test_flags = + rgw_bucket_dir_entry::FLAG_VER | rgw_bucket_dir_entry::FLAG_CURRENT; + return (flags & rgw_bucket_dir_entry::FLAG_VER) == 0 || + (flags & test_flags) == test_flags; + } + bool is_delete_marker() const { + return (flags & rgw_bucket_dir_entry::FLAG_DELETE_MARKER) != 0; + } + bool is_visible() const { + return is_current() && !is_delete_marker(); + } + bool is_valid() const { + return (flags & rgw_bucket_dir_entry::FLAG_VER_MARKER) == 0; + } + bool is_common_prefix() const { + return flags & rgw_bucket_dir_entry::FLAG_COMMON_PREFIX; + } + + void dump(ceph::Formatter *f) const; + void decode_json(JSONObj *obj); + static void generate_test_instances(std::list<rgw_bucket_dir_entry*>& o); +}; +WRITE_CLASS_ENCODER(rgw_bucket_dir_entry) + +enum class BIIndexType : uint8_t { + Invalid = 0, + Plain = 1, + Instance = 2, + OLH = 3, +}; + +struct rgw_bucket_category_stats; + +struct rgw_cls_bi_entry { + BIIndexType type; + std::string idx; + ceph::buffer::list data; + + rgw_cls_bi_entry() : type(BIIndexType::Invalid) {} + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(1, 1, bl); + encode(type, bl); + encode(idx, bl); + encode(data, bl); + ENCODE_FINISH(bl); + } + + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(1, bl); + uint8_t c; + decode(c, bl); + type = (BIIndexType)c; + decode(idx, bl); + decode(data, bl); + DECODE_FINISH(bl); + } + + void dump(ceph::Formatter *f) const; + void decode_json(JSONObj *obj, cls_rgw_obj_key *effective_key = NULL); + static void generate_test_instances(std::list<rgw_cls_bi_entry*>& o); + bool get_info(cls_rgw_obj_key *key, RGWObjCategory *category, + rgw_bucket_category_stats *accounted_stats); +}; +WRITE_CLASS_ENCODER(rgw_cls_bi_entry) + +enum OLHLogOp { + CLS_RGW_OLH_OP_UNKNOWN = 0, + CLS_RGW_OLH_OP_LINK_OLH = 1, + CLS_RGW_OLH_OP_UNLINK_OLH = 2, /* object does not exist */ + CLS_RGW_OLH_OP_REMOVE_INSTANCE = 3, +}; + +struct rgw_bucket_olh_log_entry { + uint64_t epoch; + OLHLogOp op; + std::string op_tag; + cls_rgw_obj_key key; + bool delete_marker; + + rgw_bucket_olh_log_entry() : epoch(0), op(CLS_RGW_OLH_OP_UNKNOWN), delete_marker(false) {} + + + void encode(ceph::buffer::list &bl) const { + ENCODE_START(1, 1, bl); + encode(epoch, bl); + encode((__u8)op, bl); + encode(op_tag, bl); + encode(key, bl); + encode(delete_marker, bl); + ENCODE_FINISH(bl); + } + void decode(ceph::buffer::list::const_iterator &bl) { + DECODE_START(1, bl); + decode(epoch, bl); + uint8_t c; + decode(c, bl); + op = (OLHLogOp)c; + decode(op_tag, bl); + decode(key, bl); + decode(delete_marker, bl); + DECODE_FINISH(bl); + } + static void generate_test_instances(std::list<rgw_bucket_olh_log_entry*>& o); + void dump(ceph::Formatter *f) const; + void decode_json(JSONObj *obj); +}; +WRITE_CLASS_ENCODER(rgw_bucket_olh_log_entry) + +struct rgw_bucket_olh_entry { + cls_rgw_obj_key key; + bool delete_marker; + uint64_t epoch; + std::map<uint64_t, std::vector<struct rgw_bucket_olh_log_entry> > pending_log; + std::string tag; + bool exists; + bool pending_removal; + + rgw_bucket_olh_entry() : delete_marker(false), epoch(0), exists(false), pending_removal(false) {} + + void encode(ceph::buffer::list &bl) const { + ENCODE_START(1, 1, bl); + encode(key, bl); + encode(delete_marker, bl); + encode(epoch, bl); + encode(pending_log, bl); + encode(tag, bl); + encode(exists, bl); + encode(pending_removal, bl); + ENCODE_FINISH(bl); + } + void decode(ceph::buffer::list::const_iterator &bl) { + DECODE_START(1, bl); + decode(key, bl); + decode(delete_marker, bl); + decode(epoch, bl); + decode(pending_log, bl); + decode(tag, bl); + decode(exists, bl); + decode(pending_removal, bl); + DECODE_FINISH(bl); + } + void dump(ceph::Formatter *f) const; + void decode_json(JSONObj *obj); + static void generate_test_instances(std::list<rgw_bucket_olh_entry*>& o); +}; +WRITE_CLASS_ENCODER(rgw_bucket_olh_entry) + +struct rgw_bi_log_entry { + std::string id; + std::string object; + std::string instance; + ceph::real_time timestamp; + rgw_bucket_entry_ver ver; + RGWModifyOp op; + RGWPendingState state; + uint64_t index_ver; + std::string tag; + uint16_t bilog_flags; + std::string owner; /* only being set if it's a delete marker */ + std::string owner_display_name; /* only being set if it's a delete marker */ + rgw_zone_set zones_trace; + + rgw_bi_log_entry() : op(CLS_RGW_OP_UNKNOWN), state(CLS_RGW_STATE_PENDING_MODIFY), index_ver(0), bilog_flags(0) {} + + void encode(ceph::buffer::list &bl) const { + ENCODE_START(4, 1, bl); + encode(id, bl); + encode(object, bl); + encode(timestamp, bl); + encode(ver, bl); + encode(tag, bl); + uint8_t c = (uint8_t)op; + encode(c, bl); + c = (uint8_t)state; + encode(c, bl); + encode_packed_val(index_ver, bl); + encode(instance, bl); + encode(bilog_flags, bl); + encode(owner, bl); + encode(owner_display_name, bl); + encode(zones_trace, bl); + ENCODE_FINISH(bl); + } + void decode(ceph::buffer::list::const_iterator &bl) { + DECODE_START(4, bl); + decode(id, bl); + decode(object, bl); + decode(timestamp, bl); + decode(ver, bl); + decode(tag, bl); + uint8_t c; + decode(c, bl); + op = (RGWModifyOp)c; + decode(c, bl); + state = (RGWPendingState)c; + decode_packed_val(index_ver, bl); + if (struct_v >= 2) { + decode(instance, bl); + decode(bilog_flags, bl); + } + if (struct_v >= 3) { + decode(owner, bl); + decode(owner_display_name, bl); + } + if (struct_v >= 4) { + decode(zones_trace, bl); + } + DECODE_FINISH(bl); + } + void dump(ceph::Formatter *f) const; + void decode_json(JSONObj *obj); + static void generate_test_instances(std::list<rgw_bi_log_entry*>& o); + + bool is_versioned() { + return ((bilog_flags & RGW_BILOG_FLAG_VERSIONED_OP) != 0); + } +}; +WRITE_CLASS_ENCODER(rgw_bi_log_entry) + +struct rgw_bucket_category_stats { + uint64_t total_size; + uint64_t total_size_rounded; + uint64_t num_entries; + uint64_t actual_size{0}; //< account for compression, encryption + + rgw_bucket_category_stats() : total_size(0), total_size_rounded(0), num_entries(0) {} + + void encode(ceph::buffer::list &bl) const { + ENCODE_START(3, 2, bl); + encode(total_size, bl); + encode(total_size_rounded, bl); + encode(num_entries, bl); + encode(actual_size, bl); + ENCODE_FINISH(bl); + } + void decode(ceph::buffer::list::const_iterator &bl) { + DECODE_START_LEGACY_COMPAT_LEN(3, 2, 2, bl); + decode(total_size, bl); + decode(total_size_rounded, bl); + decode(num_entries, bl); + if (struct_v >= 3) { + decode(actual_size, bl); + } else { + actual_size = total_size; + } + DECODE_FINISH(bl); + } + void dump(ceph::Formatter *f) const; + static void generate_test_instances(std::list<rgw_bucket_category_stats*>& o); +}; +WRITE_CLASS_ENCODER(rgw_bucket_category_stats) + +inline bool operator==(const rgw_bucket_category_stats& lhs, + const rgw_bucket_category_stats& rhs) { + return lhs.total_size == rhs.total_size + && lhs.total_size_rounded == rhs.total_size_rounded + && lhs.num_entries == rhs.num_entries + && lhs.actual_size == rhs.actual_size; +} +inline bool operator!=(const rgw_bucket_category_stats& lhs, + const rgw_bucket_category_stats& rhs) { + return !(lhs == rhs); +} + +enum class cls_rgw_reshard_status : uint8_t { + NOT_RESHARDING = 0, + IN_PROGRESS = 1, + DONE = 2 +}; +std::ostream& operator<<(std::ostream&, cls_rgw_reshard_status); + +inline std::string to_string(const cls_rgw_reshard_status status) +{ + switch (status) { + case cls_rgw_reshard_status::NOT_RESHARDING: + return "not-resharding"; + case cls_rgw_reshard_status::IN_PROGRESS: + return "in-progress"; + case cls_rgw_reshard_status::DONE: + return "done"; + }; + return "Unknown reshard status"; +} + +struct cls_rgw_bucket_instance_entry { + using RESHARD_STATUS = cls_rgw_reshard_status; + + cls_rgw_reshard_status reshard_status{RESHARD_STATUS::NOT_RESHARDING}; + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(3, 1, bl); + encode((uint8_t)reshard_status, bl); + { // fields removed in v2 but added back as empty in v3 + std::string bucket_instance_id; + encode(bucket_instance_id, bl); + int32_t num_shards{-1}; + encode(num_shards, bl); + } + ENCODE_FINISH(bl); + } + + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(3, bl); + uint8_t s; + decode(s, bl); + reshard_status = (cls_rgw_reshard_status)s; + if (struct_v != 2) { // fields removed from v2, added back in v3 + std::string bucket_instance_id; + decode(bucket_instance_id, bl); + int32_t num_shards{-1}; + decode(num_shards, bl); + } + DECODE_FINISH(bl); + } + + void dump(ceph::Formatter *f) const; + static void generate_test_instances(std::list<cls_rgw_bucket_instance_entry*>& o); + + void clear() { + reshard_status = RESHARD_STATUS::NOT_RESHARDING; + } + + void set_status(cls_rgw_reshard_status s) { + reshard_status = s; + } + + bool resharding() const { + return reshard_status != RESHARD_STATUS::NOT_RESHARDING; + } + + bool resharding_in_progress() const { + return reshard_status == RESHARD_STATUS::IN_PROGRESS; + } + + friend std::ostream& operator<<(std::ostream& out, const cls_rgw_bucket_instance_entry& v) { + out << "instance entry reshard status: " << v.reshard_status; + return out; + } +}; +WRITE_CLASS_ENCODER(cls_rgw_bucket_instance_entry) + +using rgw_bucket_dir_stats = std::map<RGWObjCategory, rgw_bucket_category_stats>; + +struct rgw_bucket_dir_header { + rgw_bucket_dir_stats stats; + uint64_t tag_timeout; + uint64_t ver; + uint64_t master_ver; + std::string max_marker; + cls_rgw_bucket_instance_entry new_instance; + bool syncstopped; + + rgw_bucket_dir_header() : tag_timeout(0), ver(0), master_ver(0), syncstopped(false) {} + + void encode(ceph::buffer::list &bl) const { + ENCODE_START(7, 2, bl); + encode(stats, bl); + encode(tag_timeout, bl); + encode(ver, bl); + encode(master_ver, bl); + encode(max_marker, bl); + encode(new_instance, bl); + encode(syncstopped,bl); + ENCODE_FINISH(bl); + } + void decode(ceph::buffer::list::const_iterator &bl) { + DECODE_START_LEGACY_COMPAT_LEN(6, 2, 2, bl); + decode(stats, bl); + if (struct_v > 2) { + decode(tag_timeout, bl); + } else { + tag_timeout = 0; + } + if (struct_v >= 4) { + decode(ver, bl); + decode(master_ver, bl); + } else { + ver = 0; + } + if (struct_v >= 5) { + decode(max_marker, bl); + } + if (struct_v >= 6) { + decode(new_instance, bl); + } else { + new_instance = cls_rgw_bucket_instance_entry(); + } + if (struct_v >= 7) { + decode(syncstopped,bl); + } + DECODE_FINISH(bl); + } + void dump(ceph::Formatter *f) const; + static void generate_test_instances(std::list<rgw_bucket_dir_header*>& o); + + bool resharding() const { + return new_instance.resharding(); + } + bool resharding_in_progress() const { + return new_instance.resharding_in_progress(); + } +}; +WRITE_CLASS_ENCODER(rgw_bucket_dir_header) + +struct rgw_bucket_dir { + rgw_bucket_dir_header header; + boost::container::flat_map<std::string, rgw_bucket_dir_entry> m; + + void encode(ceph::buffer::list &bl) const { + ENCODE_START(2, 2, bl); + encode(header, bl); + encode(m, bl); + ENCODE_FINISH(bl); + } + void decode(ceph::buffer::list::const_iterator &bl) { + DECODE_START_LEGACY_COMPAT_LEN(2, 2, 2, bl); + decode(header, bl); + decode(m, bl); + DECODE_FINISH(bl); + } + void dump(ceph::Formatter *f) const; + static void generate_test_instances(std::list<rgw_bucket_dir*>& o); +}; +WRITE_CLASS_ENCODER(rgw_bucket_dir) + +struct rgw_usage_data { + uint64_t bytes_sent; + uint64_t bytes_received; + uint64_t ops; + uint64_t successful_ops; + + rgw_usage_data() : bytes_sent(0), bytes_received(0), ops(0), successful_ops(0) {} + rgw_usage_data(uint64_t sent, uint64_t received) : bytes_sent(sent), bytes_received(received), ops(0), successful_ops(0) {} + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(1, 1, bl); + encode(bytes_sent, bl); + encode(bytes_received, bl); + encode(ops, bl); + encode(successful_ops, bl); + ENCODE_FINISH(bl); + } + + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(1, bl); + decode(bytes_sent, bl); + decode(bytes_received, bl); + decode(ops, bl); + decode(successful_ops, bl); + DECODE_FINISH(bl); + } + + void aggregate(const rgw_usage_data& usage) { + bytes_sent += usage.bytes_sent; + bytes_received += usage.bytes_received; + ops += usage.ops; + successful_ops += usage.successful_ops; + } + void dump(ceph::Formatter *f) const; + static void generate_test_instances(std::list<rgw_usage_data*>& o); +}; +WRITE_CLASS_ENCODER(rgw_usage_data) + + +struct rgw_usage_log_entry { + rgw_user owner; + rgw_user payer; /* if empty, same as owner */ + std::string bucket; + uint64_t epoch; + rgw_usage_data total_usage; /* this one is kept for backwards compatibility */ + std::map<std::string, rgw_usage_data> usage_map; + + rgw_usage_log_entry() : epoch(0) {} + rgw_usage_log_entry(std::string& o, std::string& b) : owner(o), bucket(b), epoch(0) {} + rgw_usage_log_entry(std::string& o, std::string& p, std::string& b) : owner(o), payer(p), bucket(b), epoch(0) {} + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(3, 1, bl); + encode(owner.to_str(), bl); + encode(bucket, bl); + encode(epoch, bl); + encode(total_usage.bytes_sent, bl); + encode(total_usage.bytes_received, bl); + encode(total_usage.ops, bl); + encode(total_usage.successful_ops, bl); + encode(usage_map, bl); + encode(payer.to_str(), bl); + ENCODE_FINISH(bl); + } + + + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(3, bl); + std::string s; + decode(s, bl); + owner.from_str(s); + decode(bucket, bl); + decode(epoch, bl); + decode(total_usage.bytes_sent, bl); + decode(total_usage.bytes_received, bl); + decode(total_usage.ops, bl); + decode(total_usage.successful_ops, bl); + if (struct_v < 2) { + usage_map[""] = total_usage; + } else { + decode(usage_map, bl); + } + if (struct_v >= 3) { + std::string p; + decode(p, bl); + payer.from_str(p); + } + DECODE_FINISH(bl); + } + + void aggregate(const rgw_usage_log_entry& e, + std::map<std::string, bool> *categories = NULL) { + if (owner.empty()) { + owner = e.owner; + bucket = e.bucket; + epoch = e.epoch; + payer = e.payer; + } + + for (auto iter = e.usage_map.begin(); iter != e.usage_map.end(); ++iter) { + if (!categories || !categories->size() || categories->count(iter->first)) { + add(iter->first, iter->second); + } + } + } + + void sum(rgw_usage_data& usage, + std::map<std::string, bool>& categories) const { + usage = rgw_usage_data(); + for (auto iter = usage_map.begin(); iter != usage_map.end(); ++iter) { + if (!categories.size() || categories.count(iter->first)) { + usage.aggregate(iter->second); + } + } + } + + void add(const std::string& category, const rgw_usage_data& data) { + usage_map[category].aggregate(data); + total_usage.aggregate(data); + } + + void dump(ceph::Formatter* f) const; + static void generate_test_instances(std::list<rgw_usage_log_entry*>& o); + +}; +WRITE_CLASS_ENCODER(rgw_usage_log_entry) + +struct rgw_usage_log_info { + std::vector<rgw_usage_log_entry> entries; + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(1, 1, bl); + encode(entries, bl); + ENCODE_FINISH(bl); + } + + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(1, bl); + decode(entries, bl); + DECODE_FINISH(bl); + } + void dump(ceph::Formatter* f) const; + static void generate_test_instances(std::list<rgw_usage_log_info*>& o); + + rgw_usage_log_info() {} +}; +WRITE_CLASS_ENCODER(rgw_usage_log_info) + +struct rgw_user_bucket { + std::string user; + std::string bucket; + + rgw_user_bucket() {} + rgw_user_bucket(const std::string& u, const std::string& b) : user(u), bucket(b) {} + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(1, 1, bl); + encode(user, bl); + encode(bucket, bl); + ENCODE_FINISH(bl); + } + + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(1, bl); + decode(user, bl); + decode(bucket, bl); + DECODE_FINISH(bl); + } + + bool operator<(const rgw_user_bucket& ub2) const { + int comp = user.compare(ub2.user); + if (comp < 0) + return true; + else if (!comp) + return bucket.compare(ub2.bucket) < 0; + + return false; + } + void dump(ceph::Formatter* f) const; + static void generate_test_instances(std::list<rgw_user_bucket*>& o); +}; +WRITE_CLASS_ENCODER(rgw_user_bucket) + +enum cls_rgw_gc_op { + CLS_RGW_GC_DEL_OBJ, + CLS_RGW_GC_DEL_BUCKET, +}; + +struct cls_rgw_obj { + std::string pool; + cls_rgw_obj_key key; + std::string loc; + + cls_rgw_obj() {} + cls_rgw_obj(std::string& _p, cls_rgw_obj_key& _k) : pool(_p), key(_k) {} + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(2, 1, bl); + encode(pool, bl); + encode(key.name, bl); + encode(loc, bl); + encode(key, bl); + ENCODE_FINISH(bl); + } + + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(2, bl); + decode(pool, bl); + decode(key.name, bl); + decode(loc, bl); + if (struct_v >= 2) { + decode(key, bl); + } + DECODE_FINISH(bl); + } + + void dump(ceph::Formatter *f) const { + f->dump_string("pool", pool); + f->dump_string("oid", key.name); + f->dump_string("key", loc); + f->dump_string("instance", key.instance); + } + static void generate_test_instances(std::list<cls_rgw_obj*>& ls) { + ls.push_back(new cls_rgw_obj); + ls.push_back(new cls_rgw_obj); + ls.back()->pool = "mypool"; + ls.back()->key.name = "myoid"; + ls.back()->loc = "mykey"; + } + + size_t estimate_encoded_size() const { + constexpr size_t start_overhead = sizeof(__u8) + sizeof(__u8) + sizeof(ceph_le32); // version and length prefix + constexpr size_t string_overhead = sizeof(__u32); // strings are encoded with 32-bit length prefix + return start_overhead + + string_overhead + pool.size() + + string_overhead + key.name.size() + + string_overhead + loc.size() + + key.estimate_encoded_size(); + } +}; +WRITE_CLASS_ENCODER(cls_rgw_obj) + +struct cls_rgw_obj_chain { + std::list<cls_rgw_obj> objs; + + cls_rgw_obj_chain() {} + + void push_obj(const std::string& pool, const cls_rgw_obj_key& key, const std::string& loc) { + cls_rgw_obj obj; + obj.pool = pool; + obj.key = key; + obj.loc = loc; + objs.push_back(obj); + } + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(1, 1, bl); + encode(objs, bl); + ENCODE_FINISH(bl); + } + + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(1, bl); + decode(objs, bl); + DECODE_FINISH(bl); + } + + void dump(ceph::Formatter *f) const { + f->open_array_section("objs"); + for (std::list<cls_rgw_obj>::const_iterator p = objs.begin(); p != objs.end(); ++p) { + f->open_object_section("obj"); + p->dump(f); + f->close_section(); + } + f->close_section(); + } + static void generate_test_instances(std::list<cls_rgw_obj_chain*>& ls) { + ls.push_back(new cls_rgw_obj_chain); + } + + bool empty() { + return objs.empty(); + } + + size_t estimate_encoded_size() const { + constexpr size_t start_overhead = sizeof(__u8) + sizeof(__u8) + sizeof(ceph_le32); + constexpr size_t size_overhead = sizeof(__u32); // size of the chain + size_t chain_overhead = 0; + for (auto& it : objs) { + chain_overhead += it.estimate_encoded_size(); + } + return (start_overhead + size_overhead + chain_overhead); + } +}; +WRITE_CLASS_ENCODER(cls_rgw_obj_chain) + +struct cls_rgw_gc_obj_info +{ + std::string tag; + cls_rgw_obj_chain chain; + ceph::real_time time; + + cls_rgw_gc_obj_info() {} + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(1, 1, bl); + encode(tag, bl); + encode(chain, bl); + encode(time, bl); + ENCODE_FINISH(bl); + } + + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(1, bl); + decode(tag, bl); + decode(chain, bl); + decode(time, bl); + DECODE_FINISH(bl); + } + + void dump(ceph::Formatter *f) const { + f->dump_string("tag", tag); + f->open_object_section("chain"); + chain.dump(f); + f->close_section(); + f->dump_stream("time") << time; + } + static void generate_test_instances(std::list<cls_rgw_gc_obj_info*>& ls) { + ls.push_back(new cls_rgw_gc_obj_info); + ls.push_back(new cls_rgw_gc_obj_info); + ls.back()->tag = "footag"; + ceph_timespec ts{ceph_le32(21), ceph_le32(32)}; + ls.back()->time = ceph::real_clock::from_ceph_timespec(ts); + } + + size_t estimate_encoded_size() const { + constexpr size_t start_overhead = sizeof(__u8) + sizeof(__u8) + sizeof(ceph_le32); // version and length prefix + constexpr size_t string_overhead = sizeof(__u32); // strings are encoded with 32-bit length prefix + constexpr size_t time_overhead = 2 * sizeof(ceph_le32); // time is stored as tv_sec and tv_nsec + return start_overhead + string_overhead + tag.size() + + time_overhead + chain.estimate_encoded_size(); + } +}; +WRITE_CLASS_ENCODER(cls_rgw_gc_obj_info) + +struct cls_rgw_lc_obj_head +{ + time_t start_date = 0; + std::string marker; + time_t shard_rollover_date = 0; + + cls_rgw_lc_obj_head() {} + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(2, 2, bl); + uint64_t t = start_date; + encode(t, bl); + encode(marker, bl); + encode(shard_rollover_date, bl); + ENCODE_FINISH(bl); + } + + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(2, bl); + uint64_t t; + decode(t, bl); + start_date = static_cast<time_t>(t); + decode(marker, bl); + if (struct_v < 2) { + shard_rollover_date = 0; + } else { + decode(t, bl); + shard_rollover_date = static_cast<time_t>(t); + } + DECODE_FINISH(bl); + } + + void dump(ceph::Formatter *f) const; + static void generate_test_instances(std::list<cls_rgw_lc_obj_head*>& ls); +}; +WRITE_CLASS_ENCODER(cls_rgw_lc_obj_head) + +struct cls_rgw_lc_entry { + std::string bucket; + uint64_t start_time; // if in_progress + uint32_t status; + + cls_rgw_lc_entry() + : start_time(0), status(0) {} + + cls_rgw_lc_entry(const cls_rgw_lc_entry& rhs) = default; + + cls_rgw_lc_entry(const std::string& b, uint64_t t, uint32_t s) + : bucket(b), start_time(t), status(s) {}; + + void encode(bufferlist& bl) const { + ENCODE_START(1, 1, bl); + encode(bucket, bl); + encode(start_time, bl); + encode(status, bl); + ENCODE_FINISH(bl); + } + + void decode(bufferlist::const_iterator& bl) { + DECODE_START(1, bl); + decode(bucket, bl); + decode(start_time, bl); + decode(status, bl); + DECODE_FINISH(bl); + } + void dump(Formatter *f) const; + static void generate_test_instances(std::list<cls_rgw_lc_entry*>& ls); +}; +WRITE_CLASS_ENCODER(cls_rgw_lc_entry); + +struct cls_rgw_reshard_entry +{ + ceph::real_time time; + std::string tenant; + std::string bucket_name; + std::string bucket_id; + uint32_t old_num_shards{0}; + uint32_t new_num_shards{0}; + + cls_rgw_reshard_entry() {} + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(2, 1, bl); + encode(time, bl); + encode(tenant, bl); + encode(bucket_name, bl); + encode(bucket_id, bl); + encode(old_num_shards, bl); + encode(new_num_shards, bl); + ENCODE_FINISH(bl); + } + + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(2, bl); + decode(time, bl); + decode(tenant, bl); + decode(bucket_name, bl); + decode(bucket_id, bl); + if (struct_v < 2) { + std::string new_instance_id; // removed in v2 + decode(new_instance_id, bl); + } + decode(old_num_shards, bl); + decode(new_num_shards, bl); + DECODE_FINISH(bl); + } + + void dump(ceph::Formatter *f) const; + static void generate_test_instances(std::list<cls_rgw_reshard_entry*>& o); + + static void generate_key(const std::string& tenant, const std::string& bucket_name, std::string *key); + void get_key(std::string *key) const; +}; +WRITE_CLASS_ENCODER(cls_rgw_reshard_entry) diff --git a/src/cls/rgw_gc/cls_rgw_gc.cc b/src/cls/rgw_gc/cls_rgw_gc.cc new file mode 100644 index 000000000..44a5d7b33 --- /dev/null +++ b/src/cls/rgw_gc/cls_rgw_gc.cc @@ -0,0 +1,559 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab + +#include "include/types.h" + +#include <errno.h> + +#include "objclass/objclass.h" +#include "cls/rgw/cls_rgw_ops.h" +#include "cls/rgw/cls_rgw_types.h" +#include "cls/rgw_gc/cls_rgw_gc_types.h" +#include "cls/rgw_gc/cls_rgw_gc_ops.h" +#include "cls/queue/cls_queue_ops.h" +#include "cls/rgw_gc/cls_rgw_gc_const.h" +#include "cls/queue/cls_queue_src.h" + +#include "common/ceph_context.h" +#include "global/global_context.h" + + +#define GC_LIST_DEFAULT_MAX 128 + +using std::string; + +using ceph::bufferlist; +using ceph::decode; +using ceph::encode; +using ceph::make_timespan; +using ceph::real_time; + +CLS_VER(1,0) +CLS_NAME(rgw_gc) + +static int cls_rgw_gc_queue_init(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + auto in_iter = in->cbegin(); + + cls_rgw_gc_queue_init_op op; + try { + decode(op, in_iter); + } catch (ceph::buffer::error& err) { + CLS_LOG(5, "ERROR: cls_rgw_gc_queue_init: failed to decode entry\n"); + return -EINVAL; + } + + cls_rgw_gc_urgent_data urgent_data; + urgent_data.num_urgent_data_entries = op.num_deferred_entries; + + cls_queue_init_op init_op; + + CLS_LOG(10, "INFO: cls_rgw_gc_queue_init: queue size is %lu\n", op.size); + + init_op.queue_size = op.size; + init_op.max_urgent_data_size = g_ceph_context->_conf->rgw_gc_max_deferred_entries_size; + encode(urgent_data, init_op.bl_urgent_data); + + return queue_init(hctx, init_op); +} + +static int cls_rgw_gc_queue_enqueue(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + auto in_iter = in->cbegin(); + cls_rgw_gc_set_entry_op op; + try { + decode(op, in_iter); + } catch (ceph::buffer::error& err) { + CLS_LOG(1, "ERROR: cls_rgw_gc_queue_enqueue: failed to decode entry\n"); + return -EINVAL; + } + + op.info.time = ceph::real_clock::now(); + op.info.time += make_timespan(op.expiration_secs); + + //get head + cls_queue_head head; + int ret = queue_read_head(hctx, head); + if (ret < 0) { + return ret; + } + + cls_queue_enqueue_op enqueue_op; + bufferlist bl_data; + encode(op.info, bl_data); + enqueue_op.bl_data_vec.emplace_back(bl_data); + + CLS_LOG(20, "INFO: cls_rgw_gc_queue_enqueue: Data size is: %u \n", bl_data.length()); + + ret = queue_enqueue(hctx, enqueue_op, head); + if (ret < 0) { + return ret; + } + + //Write back head + return queue_write_head(hctx, head); +} + +static int cls_rgw_gc_queue_list_entries(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + auto in_iter = in->cbegin(); + cls_rgw_gc_list_op op; + try { + decode(op, in_iter); + } catch (ceph::buffer::error& err) { + CLS_LOG(5, "ERROR: cls_rgw_gc_queue_list_entries(): failed to decode input\n"); + return -EINVAL; + } + + cls_queue_head head; + auto ret = queue_read_head(hctx, head); + if (ret < 0) { + return ret; + } + + cls_rgw_gc_urgent_data urgent_data; + if (head.bl_urgent_data.length() > 0) { + auto iter_urgent_data = head.bl_urgent_data.cbegin(); + try { + decode(urgent_data, iter_urgent_data); + } catch (ceph::buffer::error& err) { + CLS_LOG(5, "ERROR: cls_rgw_gc_queue_list_entries(): failed to decode urgent data\n"); + return -EINVAL; + } + } + + cls_queue_list_op list_op; + if (! op.max) { + op.max = GC_LIST_DEFAULT_MAX; + } + + list_op.max = op.max; + list_op.start_marker = op.marker; + + cls_rgw_gc_list_ret list_ret; + uint32_t num_entries = 0; //Entries excluding the deferred ones + bool is_truncated = true; + string next_marker; + do { + cls_queue_list_ret op_ret; + int ret = queue_list_entries(hctx, list_op, op_ret, head); + if (ret < 0) { + CLS_LOG(5, "ERROR: queue_list_entries(): returned error %d\n", ret); + return ret; + } + is_truncated = op_ret.is_truncated; + next_marker = op_ret.next_marker; + + if (op_ret.entries.size()) { + for (auto it : op_ret.entries) { + cls_rgw_gc_obj_info info; + try { + decode(info, it.data); + } catch (ceph::buffer::error& err) { + CLS_LOG(5, "ERROR: cls_rgw_gc_queue_list_entries(): failed to decode gc info\n"); + return -EINVAL; + } + bool found = false; + //Check for info tag in urgent data map + auto iter = urgent_data.urgent_data_map.find(info.tag); + if (iter != urgent_data.urgent_data_map.end()) { + found = true; + if (iter->second > info.time) { + CLS_LOG(10, "INFO: cls_rgw_gc_queue_list_entries(): tag found in urgent data: %s\n", info.tag.c_str()); + continue; + } + } + //Search in xattrs + if (! found && urgent_data.num_xattr_urgent_entries > 0) { + bufferlist bl_xattrs; + int ret = cls_cxx_getxattr(hctx, "cls_queue_urgent_data", &bl_xattrs); + if (ret < 0 && (ret != -ENOENT && ret != -ENODATA)) { + CLS_LOG(0, "ERROR: %s(): cls_cxx_getxattrs() returned %d", __func__, ret); + return ret; + } + if (ret != -ENOENT && ret != -ENODATA) { + std::unordered_map<string,ceph::real_time> xattr_urgent_data_map; + auto iter = bl_xattrs.cbegin(); + try { + decode(xattr_urgent_data_map, iter); + } catch (ceph::buffer::error& err) { + CLS_LOG(1, "ERROR: cls_rgw_gc_queue_list_entries(): failed to decode xattrs urgent data map\n"); + return -EINVAL; + } //end - catch + auto xattr_iter = xattr_urgent_data_map.find(info.tag); + if (xattr_iter != xattr_urgent_data_map.end()) { + if (xattr_iter->second > info.time) { + CLS_LOG(1, "INFO: cls_rgw_gc_queue_list_entries(): tag found in xattrs urgent data map: %s\n", info.tag.c_str()); + continue; + } + } + } // end - ret != ENOENT && ENODATA + } // end - if not found + if (op.expired_only) { + real_time now = ceph::real_clock::now(); + if (info.time <= now) { + list_ret.entries.emplace_back(info); + } + //Can break out here if info.time > now, since all subsequent entries won't have expired + } else { + list_ret.entries.emplace_back(info); + } + num_entries++; + } + CLS_LOG(10, "INFO: cls_rgw_gc_queue_list_entries(): num_entries: %u and op.max: %u\n", num_entries, op.max); + if (num_entries < op.max) { + list_op.max = (op.max - num_entries); + list_op.start_marker = op_ret.next_marker; + out->clear(); + } else { + //We've reached the max number of entries needed + break; + } + } else { + //We dont have data to process + break; + } + } while(is_truncated); + + list_ret.truncated = is_truncated; + if (list_ret.truncated) { + list_ret.next_marker = next_marker; + } + out->clear(); + encode(list_ret, *out); + return 0; +} + +static int cls_rgw_gc_queue_remove_entries(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + auto in_iter = in->cbegin(); + + cls_rgw_gc_queue_remove_entries_op op; + try { + decode(op, in_iter); + } catch (ceph::buffer::error& err) { + CLS_LOG(5, "ERROR: cls_rgw_gc_queue_remove_entries(): failed to decode input\n"); + return -EINVAL; + } + + cls_queue_head head; + auto ret = queue_read_head(hctx, head); + if (ret < 0) { + return ret; + } + + cls_rgw_gc_urgent_data urgent_data; + if (head.bl_urgent_data.length() > 0) { + auto iter_urgent_data = head.bl_urgent_data.cbegin(); + try { + decode(urgent_data, iter_urgent_data); + } catch (ceph::buffer::error& err) { + CLS_LOG(5, "ERROR: cls_rgw_gc_queue_remove_entries(): failed to decode urgent data\n"); + return -EINVAL; + } + } + + // List entries and calculate total number of entries (including invalid entries) + if (! op.num_entries) { + op.num_entries = GC_LIST_DEFAULT_MAX; + } + cls_queue_list_op list_op; + list_op.max = op.num_entries + 1; // +1 to get the offset of last + 1 entry + bool is_truncated = true; + uint32_t total_num_entries = 0, num_entries = 0; + string end_marker; + do { + cls_queue_list_ret op_ret; + int ret = queue_list_entries(hctx, list_op, op_ret, head); + if (ret < 0) { + CLS_LOG(5, "ERROR: queue_list_entries(): returned error %d\n", ret); + return ret; + } + + is_truncated = op_ret.is_truncated; + unsigned int index = 0; + // If data is not empty + if (op_ret.entries.size()) { + for (auto it : op_ret.entries) { + cls_rgw_gc_obj_info info; + try { + decode(info, it.data); + } catch (ceph::buffer::error& err) { + CLS_LOG(5, "ERROR: cls_rgw_gc_queue_remove_entries(): failed to decode gc info\n"); + return -EINVAL; + } + CLS_LOG(20, "INFO: cls_rgw_gc_queue_remove_entries(): entry: %s\n", info.tag.c_str()); + total_num_entries++; + index++; + bool found = false; + //Search for tag in urgent data map + auto iter = urgent_data.urgent_data_map.find(info.tag); + if (iter != urgent_data.urgent_data_map.end()) { + found = true; + if (iter->second > info.time) { + CLS_LOG(10, "INFO: cls_rgw_gc_queue_remove_entries(): tag found in urgent data: %s\n", info.tag.c_str()); + continue; + } else if (iter->second == info.time) { + CLS_LOG(10, "INFO: cls_rgw_gc_queue_remove_entries(): erasing tag from urgent data: %s\n", info.tag.c_str()); + urgent_data.urgent_data_map.erase(info.tag); //erase entry from map, as it will be removed later from queue + urgent_data.num_head_urgent_entries -= 1; + } + }//end-if map end + if (! found && urgent_data.num_xattr_urgent_entries > 0) { + //Search in xattrs + bufferlist bl_xattrs; + int ret = cls_cxx_getxattr(hctx, "cls_queue_urgent_data", &bl_xattrs); + if (ret < 0 && (ret != -ENOENT && ret != -ENODATA)) { + CLS_LOG(0, "ERROR: %s(): cls_cxx_getxattrs() returned %d", __func__, ret); + return ret; + } + if (ret != -ENOENT && ret != -ENODATA) { + std::unordered_map<string,ceph::real_time> xattr_urgent_data_map; + auto iter = bl_xattrs.cbegin(); + try { + decode(xattr_urgent_data_map, iter); + } catch (ceph::buffer::error& err) { + CLS_LOG(5, "ERROR: cls_rgw_gc_queue_remove_entries(): failed to decode xattrs urgent data map\n"); + return -EINVAL; + } //end - catch + auto xattr_iter = xattr_urgent_data_map.find(info.tag); + if (xattr_iter != xattr_urgent_data_map.end()) { + if (xattr_iter->second > info.time) { + CLS_LOG(10, "INFO: cls_rgw_gc_queue_remove_entries(): tag found in xattrs urgent data map: %s\n", info.tag.c_str()); + continue; + } else if (xattr_iter->second == info.time) { + CLS_LOG(10, "INFO: cls_rgw_gc_queue_remove_entries(): erasing tag from xattrs urgent data: %s\n", info.tag.c_str()); + xattr_urgent_data_map.erase(info.tag); //erase entry from map, as it will be removed later + urgent_data.num_xattr_urgent_entries -= 1; + } + } + } // end - ret != ENOENT && ENODATA + }// search in xattrs + num_entries++; + }//end-for + + if (num_entries < (op.num_entries + 1)) { + if (! op_ret.is_truncated) { + end_marker = op_ret.next_marker; + CLS_LOG(10, "INFO: cls_rgw_gc_queue_remove_entries(): not truncated and end offset is %s\n", end_marker.c_str()); + break; + } else { + list_op.max = ((op.num_entries + 1) - num_entries); + list_op.start_marker = op_ret.next_marker; + out->clear(); + } + } else { + end_marker = op_ret.entries[index - 1].marker; + CLS_LOG(1, "INFO: cls_rgw_gc_queue_remove_entries(): index is %u and end_offset is: %s\n", index, end_marker.c_str()); + break; + } + } //end-if + else { + break; + } + } while(is_truncated); + + CLS_LOG(10, "INFO: cls_rgw_gc_queue_remove_entries(): Total number of entries to remove: %d\n", total_num_entries); + CLS_LOG(10, "INFO: cls_rgw_gc_queue_remove_entries(): End offset is %s\n", end_marker.c_str()); + + if (! end_marker.empty()) { + cls_queue_remove_op rem_op; + rem_op.end_marker = end_marker; + int ret = queue_remove_entries(hctx, rem_op, head); + if (ret < 0) { + CLS_LOG(5, "ERROR: queue_remove_entries(): returned error %d\n", ret); + return ret; + } + } + + //Update urgent data map + head.bl_urgent_data.clear(); + encode(urgent_data, head.bl_urgent_data); + CLS_LOG(5, "INFO: cls_rgw_gc_queue_remove_entries(): Urgent data size is %u\n", head.bl_urgent_data.length()); + + return queue_write_head(hctx, head); +} + +static int cls_rgw_gc_queue_update_entry(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + int ret = 0; + auto in_iter = in->cbegin(); + + cls_rgw_gc_queue_defer_entry_op op; + try { + decode(op, in_iter); + } catch (ceph::buffer::error& err) { + CLS_LOG(5, "ERROR: cls_rgw_gc_queue_update_entry(): failed to decode input\n"); + return -EINVAL; + } + + op.info.time = ceph::real_clock::now(); + op.info.time += make_timespan(op.expiration_secs); + + // Read head + cls_queue_head head; + ret = queue_read_head(hctx, head); + if (ret < 0) { + return ret; + } + + auto bl_iter = head.bl_urgent_data.cbegin(); + cls_rgw_gc_urgent_data urgent_data; + try { + decode(urgent_data, bl_iter); + } catch (ceph::buffer::error& err) { + CLS_LOG(5, "ERROR: cls_rgw_gc_queue_update_entry(): failed to decode urgent data\n"); + return -EINVAL; + } + + //has_urgent_data signifies whether urgent data in queue has changed + bool has_urgent_data = false, tag_found = false; + //search in unordered map in head + auto it = urgent_data.urgent_data_map.find(op.info.tag); + if (it != urgent_data.urgent_data_map.end()) { + it->second = op.info.time; + tag_found = true; + has_urgent_data = true; + } else { //search in xattrs + bufferlist bl_xattrs; + int ret = cls_cxx_getxattr(hctx, "cls_queue_urgent_data", &bl_xattrs); + if (ret < 0 && (ret != -ENOENT && ret != -ENODATA)) { + CLS_LOG(0, "ERROR: %s(): cls_cxx_getxattrs() returned %d", __func__, ret); + return ret; + } + if (ret != -ENOENT && ret != -ENODATA) { + std::unordered_map<string,ceph::real_time> xattr_urgent_data_map; + auto iter = bl_xattrs.cbegin(); + try { + decode(xattr_urgent_data_map, iter); + } catch (ceph::buffer::error& err) { + CLS_LOG(1, "ERROR: cls_rgw_gc_queue_update_entry(): failed to decode xattrs urgent data map\n"); + return -EINVAL; + } //end - catch + auto xattr_iter = xattr_urgent_data_map.find(op.info.tag); + if (xattr_iter != xattr_urgent_data_map.end()) { + xattr_iter->second = op.info.time; + tag_found = true; + //write the updated map back + bufferlist bl_map; + encode(xattr_urgent_data_map, bl_map); + ret = cls_cxx_setxattr(hctx, "cls_queue_urgent_data", &bl_map); + CLS_LOG(20, "%s(): setting attr: %s", __func__, "cls_queue_urgent_data"); + if (ret < 0) { + CLS_LOG(0, "ERROR: %s(): cls_cxx_setxattr (attr=%s) returned %d", __func__, "cls_queue_urgent_data", ret); + return ret; + } + } + }// end ret != ENOENT ... + } + + if (! tag_found) { + //try inserting in queue head + urgent_data.urgent_data_map.insert({op.info.tag, op.info.time}); + urgent_data.num_head_urgent_entries += 1; + has_urgent_data = true; + + bufferlist bl_urgent_data; + encode(urgent_data, bl_urgent_data); + //insert as xattrs + if (bl_urgent_data.length() > head.max_urgent_data_size) { + //remove inserted entry from urgent data + urgent_data.urgent_data_map.erase(op.info.tag); + urgent_data.num_head_urgent_entries -= 1; + has_urgent_data = false; + + bufferlist bl_xattrs; + int ret = cls_cxx_getxattr(hctx, "cls_queue_urgent_data", &bl_xattrs); + if (ret < 0 && (ret != -ENOENT && ret != -ENODATA)) { + CLS_LOG(0, "ERROR: %s(): cls_cxx_getxattrs() returned %d", __func__, ret); + return ret; + } + std::unordered_map<string,ceph::real_time> xattr_urgent_data_map; + if (ret != -ENOENT && ret != -ENODATA) { + auto iter = bl_xattrs.cbegin(); + try { + decode(xattr_urgent_data_map, iter); + } catch (ceph::buffer::error& err) { + CLS_LOG(1, "ERROR: cls_rgw_gc_queue_remove_entries(): failed to decode xattrs urgent data map\n"); + return -EINVAL; + } //end - catch + } + xattr_urgent_data_map.insert({op.info.tag, op.info.time}); + urgent_data.num_xattr_urgent_entries += 1; + has_urgent_data = true; + bufferlist bl_map; + encode(xattr_urgent_data_map, bl_map); + ret = cls_cxx_setxattr(hctx, "cls_queue_urgent_data", &bl_map); + CLS_LOG(20, "%s(): setting attr: %s", __func__, "cls_queue_urgent_data"); + if (ret < 0) { + CLS_LOG(0, "ERROR: %s(): cls_cxx_setxattr (attr=%s) returned %d", __func__, "cls_queue_urgent_data", ret); + return ret; + } + } + } + + if ((urgent_data.num_head_urgent_entries + urgent_data.num_xattr_urgent_entries) > urgent_data.num_urgent_data_entries) { + CLS_LOG(20, "Total num entries %u", urgent_data.num_urgent_data_entries); + CLS_LOG(20, "Num xattr entries %u", urgent_data.num_xattr_urgent_entries); + CLS_LOG(20, "Num head entries %u", urgent_data.num_head_urgent_entries); + CLS_LOG(0, "ERROR: Number of urgent data entries exceeded that requested by user, returning no space!"); + return -ENOSPC; + } + + // Due to Tracker 47866 we are no longer executing this code, as it + // appears to possibly create a GC entry for an object that has not + // been deleted. Instead we will log at level 0 to perhaps confirm + // that when and how often this bug would otherwise be hit. +#if 0 + cls_queue_enqueue_op enqueue_op; + bufferlist bl_data; + encode(op.info, bl_data); + enqueue_op.bl_data_vec.emplace_back(bl_data); + CLS_LOG(10, "INFO: cls_gc_update_entry: Data size is: %u \n", bl_data.length()); + + ret = queue_enqueue(hctx, enqueue_op, head); + if (ret < 0) { + return ret; + } +#else + std::string first_chain = "<empty-chain>"; + if (! op.info.chain.objs.empty()) { + first_chain = op.info.chain.objs.cbegin()->key.name; + } + CLS_LOG(0, + "INFO: refrained from enqueueing GC entry during GC defer" + " tag=%s, first_chain=%s\n", + op.info.tag.c_str(), first_chain.c_str()); +#endif + + if (has_urgent_data) { + head.bl_urgent_data.clear(); + encode(urgent_data, head.bl_urgent_data); + } + + return queue_write_head(hctx, head); +} + +CLS_INIT(rgw_gc) +{ + CLS_LOG(1, "Loaded rgw gc class!"); + + cls_handle_t h_class; + cls_method_handle_t h_rgw_gc_queue_init; + cls_method_handle_t h_rgw_gc_queue_enqueue; + cls_method_handle_t h_rgw_gc_queue_list_entries; + cls_method_handle_t h_rgw_gc_queue_remove_entries; + cls_method_handle_t h_rgw_gc_queue_update_entry; + + cls_register(RGW_GC_CLASS, &h_class); + + /* gc */ + cls_register_cxx_method(h_class, RGW_GC_QUEUE_INIT, CLS_METHOD_RD | CLS_METHOD_WR, cls_rgw_gc_queue_init, &h_rgw_gc_queue_init); + cls_register_cxx_method(h_class, RGW_GC_QUEUE_ENQUEUE, CLS_METHOD_RD | CLS_METHOD_WR, cls_rgw_gc_queue_enqueue, &h_rgw_gc_queue_enqueue); + cls_register_cxx_method(h_class, RGW_GC_QUEUE_LIST_ENTRIES, CLS_METHOD_RD, cls_rgw_gc_queue_list_entries, &h_rgw_gc_queue_list_entries); + cls_register_cxx_method(h_class, RGW_GC_QUEUE_REMOVE_ENTRIES, CLS_METHOD_RD | CLS_METHOD_WR, cls_rgw_gc_queue_remove_entries, &h_rgw_gc_queue_remove_entries); + cls_register_cxx_method(h_class, RGW_GC_QUEUE_UPDATE_ENTRY, CLS_METHOD_RD | CLS_METHOD_WR, cls_rgw_gc_queue_update_entry, &h_rgw_gc_queue_update_entry); + + return; +} + diff --git a/src/cls/rgw_gc/cls_rgw_gc_client.cc b/src/cls/rgw_gc/cls_rgw_gc_client.cc new file mode 100644 index 000000000..415ce8b75 --- /dev/null +++ b/src/cls/rgw_gc/cls_rgw_gc_client.cc @@ -0,0 +1,108 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab +#include <errno.h> + +#include "cls/rgw/cls_rgw_ops.h" +#include "cls/rgw_gc/cls_rgw_gc_ops.h" +#include "cls/queue/cls_queue_ops.h" +#include "cls/rgw_gc/cls_rgw_gc_const.h" +#include "cls/queue/cls_queue_const.h" +#include "cls/rgw_gc/cls_rgw_gc_client.h" + +using std::list; +using std::string; + +using ceph::decode; +using ceph::encode; + +using namespace librados; + +void cls_rgw_gc_queue_init(ObjectWriteOperation& op, uint64_t size, uint64_t num_deferred_entries) +{ + bufferlist in; + cls_rgw_gc_queue_init_op call; + call.size = size; + call.num_deferred_entries = num_deferred_entries; + encode(call, in); + op.exec(RGW_GC_CLASS, RGW_GC_QUEUE_INIT, in); +} + +int cls_rgw_gc_queue_get_capacity(IoCtx& io_ctx, const string& oid, uint64_t& size) +{ + bufferlist in, out; + int r = io_ctx.exec(oid, QUEUE_CLASS, QUEUE_GET_CAPACITY, in, out); + if (r < 0) + return r; + + cls_queue_get_capacity_ret op_ret; + auto iter = out.cbegin(); + try { + decode(op_ret, iter); + } catch (ceph::buffer::error& err) { + return -EIO; + } + + size = op_ret.queue_capacity; + + return 0; +} + +void cls_rgw_gc_queue_enqueue(ObjectWriteOperation& op, uint32_t expiration_secs, const cls_rgw_gc_obj_info& info) +{ + bufferlist in; + cls_rgw_gc_set_entry_op call; + call.expiration_secs = expiration_secs; + call.info = info; + encode(call, in); + op.exec(RGW_GC_CLASS, RGW_GC_QUEUE_ENQUEUE, in); +} + +int cls_rgw_gc_queue_list_entries(IoCtx& io_ctx, const string& oid, const string& marker, uint32_t max, bool expired_only, + list<cls_rgw_gc_obj_info>& entries, bool *truncated, string& next_marker) +{ + bufferlist in, out; + cls_rgw_gc_list_op op; + op.marker = marker; + op.max = max; + op.expired_only = expired_only; + encode(op, in); + + int r = io_ctx.exec(oid, RGW_GC_CLASS, RGW_GC_QUEUE_LIST_ENTRIES, in, out); + if (r < 0) + return r; + + cls_rgw_gc_list_ret ret; + auto iter = out.cbegin(); + try { + decode(ret, iter); + } catch (ceph::buffer::error& err) { + return -EIO; + } + + entries.swap(ret.entries); + + *truncated = ret.truncated; + + next_marker = std::move(ret.next_marker); + + return 0; +} + +void cls_rgw_gc_queue_remove_entries(ObjectWriteOperation& op, uint32_t num_entries) +{ + bufferlist in, out; + cls_rgw_gc_queue_remove_entries_op rem_op; + rem_op.num_entries = num_entries; + encode(rem_op, in); + op.exec(RGW_GC_CLASS, RGW_GC_QUEUE_REMOVE_ENTRIES, in); +} + +void cls_rgw_gc_queue_defer_entry(ObjectWriteOperation& op, uint32_t expiration_secs, const cls_rgw_gc_obj_info& info) +{ + bufferlist in; + cls_rgw_gc_queue_defer_entry_op defer_op; + defer_op.expiration_secs = expiration_secs; + defer_op.info = info; + encode(defer_op, in); + op.exec(RGW_GC_CLASS, RGW_GC_QUEUE_UPDATE_ENTRY, in); +} diff --git a/src/cls/rgw_gc/cls_rgw_gc_client.h b/src/cls/rgw_gc/cls_rgw_gc_client.h new file mode 100644 index 000000000..bce510b9c --- /dev/null +++ b/src/cls/rgw_gc/cls_rgw_gc_client.h @@ -0,0 +1,20 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab + +#pragma once + +#include "include/rados/librados.hpp" + +#include "common/ceph_time.h" + +#include "cls/queue/cls_queue_ops.h" +#include "cls/rgw/cls_rgw_types.h" +#include "cls/rgw_gc/cls_rgw_gc_types.h" + +void cls_rgw_gc_queue_init(librados::ObjectWriteOperation& op, uint64_t size, uint64_t num_deferred_entries); +int cls_rgw_gc_queue_get_capacity(librados::IoCtx& io_ctx, const std::string& oid, uint64_t& size); +void cls_rgw_gc_queue_enqueue(librados::ObjectWriteOperation& op, uint32_t expiration_secs, const cls_rgw_gc_obj_info& info); +int cls_rgw_gc_queue_list_entries(librados::IoCtx& io_ctx, const std::string& oid, const std::string& marker, uint32_t max, bool expired_only, + std::list<cls_rgw_gc_obj_info>& entries, bool *truncated, std::string& next_marker); +void cls_rgw_gc_queue_remove_entries(librados::ObjectWriteOperation& op, uint32_t num_entries); +void cls_rgw_gc_queue_defer_entry(librados::ObjectWriteOperation& op, uint32_t expiration_secs, const cls_rgw_gc_obj_info& info); diff --git a/src/cls/rgw_gc/cls_rgw_gc_const.h b/src/cls/rgw_gc/cls_rgw_gc_const.h new file mode 100644 index 000000000..ae33e3ff0 --- /dev/null +++ b/src/cls/rgw_gc/cls_rgw_gc_const.h @@ -0,0 +1,12 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab + +#pragma once + +#define RGW_GC_CLASS "rgw_gc" + +#define RGW_GC_QUEUE_INIT "rgw_gc_queue_init" +#define RGW_GC_QUEUE_ENQUEUE "rgw_gc_queue_enqueue" +#define RGW_GC_QUEUE_LIST_ENTRIES "rgw_gc_queue_list_entries" +#define RGW_GC_QUEUE_REMOVE_ENTRIES "rgw_gc_queue_remove_entries" +#define RGW_GC_QUEUE_UPDATE_ENTRY "rgw_gc_queue_update_entry" diff --git a/src/cls/rgw_gc/cls_rgw_gc_ops.h b/src/cls/rgw_gc/cls_rgw_gc_ops.h new file mode 100644 index 000000000..22ddbad06 --- /dev/null +++ b/src/cls/rgw_gc/cls_rgw_gc_ops.h @@ -0,0 +1,69 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab + +#pragma once + +#include "cls/rgw/cls_rgw_types.h" + +struct cls_rgw_gc_queue_init_op { + uint64_t size; + uint64_t num_deferred_entries{0}; + + cls_rgw_gc_queue_init_op() {} + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(1, 1, bl); + encode(size, bl); + encode(num_deferred_entries, bl); + ENCODE_FINISH(bl); + } + + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(1, bl); + decode(size, bl); + decode(num_deferred_entries, bl); + DECODE_FINISH(bl); + } + +}; +WRITE_CLASS_ENCODER(cls_rgw_gc_queue_init_op) + +struct cls_rgw_gc_queue_remove_entries_op { + uint64_t num_entries; + + cls_rgw_gc_queue_remove_entries_op() {} + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(1, 1, bl); + encode(num_entries, bl); + ENCODE_FINISH(bl); + } + + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(1, bl); + decode(num_entries, bl); + DECODE_FINISH(bl); + } +}; +WRITE_CLASS_ENCODER(cls_rgw_gc_queue_remove_entries_op) + +struct cls_rgw_gc_queue_defer_entry_op { + uint32_t expiration_secs; + cls_rgw_gc_obj_info info; + cls_rgw_gc_queue_defer_entry_op() : expiration_secs(0) {} + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(1, 1, bl); + encode(expiration_secs, bl); + encode(info, bl); + ENCODE_FINISH(bl); + } + + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(1, bl); + decode(expiration_secs, bl); + decode(info, bl); + DECODE_FINISH(bl); + } +}; +WRITE_CLASS_ENCODER(cls_rgw_gc_queue_defer_entry_op) diff --git a/src/cls/rgw_gc/cls_rgw_gc_types.h b/src/cls/rgw_gc/cls_rgw_gc_types.h new file mode 100644 index 000000000..885bf14b9 --- /dev/null +++ b/src/cls/rgw_gc/cls_rgw_gc_types.h @@ -0,0 +1,34 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab + +#pragma once + +#include "include/types.h" +#include <unordered_map> + +struct cls_rgw_gc_urgent_data +{ + std::unordered_map<std::string, ceph::real_time> urgent_data_map; + uint32_t num_urgent_data_entries{0}; // requested by user + uint32_t num_head_urgent_entries{0}; // actual number of entries in queue head + uint32_t num_xattr_urgent_entries{0}; // actual number of entries in xattr in case of spill over + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(1, 1, bl); + encode(urgent_data_map, bl); + encode(num_urgent_data_entries, bl); + encode(num_head_urgent_entries, bl); + encode(num_xattr_urgent_entries, bl); + ENCODE_FINISH(bl); + } + + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(1, bl); + decode(urgent_data_map, bl); + decode(num_urgent_data_entries, bl); + decode(num_head_urgent_entries, bl); + decode(num_xattr_urgent_entries, bl); + DECODE_FINISH(bl); + } +}; +WRITE_CLASS_ENCODER(cls_rgw_gc_urgent_data) diff --git a/src/cls/sdk/cls_sdk.cc b/src/cls/sdk/cls_sdk.cc new file mode 100644 index 000000000..843369f1c --- /dev/null +++ b/src/cls/sdk/cls_sdk.cc @@ -0,0 +1,131 @@ +/* + * This is an example RADOS object class built using only the Ceph SDK interface. + */ +#include "include/rados/objclass.h" + +CLS_VER(1,0) +CLS_NAME(sdk) + +cls_handle_t h_class; +cls_method_handle_t h_test_coverage_write; +cls_method_handle_t h_test_coverage_replay; + +/** + * test_coverage_write - a "write" method that creates an object + * + * This method modifies the object by making multiple write calls (write, + * setxattr and set_val). + */ +static int test_coverage_write(cls_method_context_t hctx, ceph::buffer::list *in, ceph::buffer::list *out) +{ + // create the object + int ret = cls_cxx_create(hctx, false); + if (ret < 0) { + CLS_LOG(0, "ERROR: %s(): cls_cxx_create returned %d", __func__, ret); + return ret; + } + + uint64_t size; + // get the size of the object + ret = cls_cxx_stat(hctx, &size, NULL); + if (ret < 0) + return ret; + + std::string c = "test"; + ceph::buffer::list bl; + bl.append(c); + + // write to the object + ret = cls_cxx_write(hctx, 0, bl.length(), &bl); + if (ret < 0) + return ret; + + uint64_t new_size; + // get the new size of the object + ret = cls_cxx_stat(hctx, &new_size, NULL); + if (ret < 0) + return ret; + + // make some change to the xattr + ret = cls_cxx_setxattr(hctx, "foo", &bl); + if (ret < 0) + return ret; + + // make some change to the omap + ret = cls_cxx_map_set_val(hctx, "foo", &bl); + if (ret < 0) + return ret; + + return 0; +} + +/** + * test_coverage_replay - a "read" method to retrieve previously written data + * + * This method reads the object by making multiple read calls (read, getxattr + * and get_val). It also removes the object after reading. + */ + +static int test_coverage_replay(cls_method_context_t hctx, ceph::buffer::list *in, ceph::buffer::list *out) +{ + CLS_LOG(0, "reading already written object"); + uint64_t size; + // get the size of the object + int ret = cls_cxx_stat(hctx, &size, NULL); + if (ret < 0) + return ret; + + ceph::buffer::list bl; + // read the object entry + ret = cls_cxx_read(hctx, 0, size, &bl); + if (ret < 0) + return ret; + + // if the size is incorrect + if (bl.length() != size) + return -EIO; + + bl.clear(); + + // read xattr entry + ret = cls_cxx_getxattr(hctx, "foo", &bl); + if (ret < 0) + return ret; + + // if the size is incorrect + if (bl.length() != size) + return -EIO; + + bl.clear(); + + // read omap entry + ret = cls_cxx_map_get_val(hctx, "foo", &bl); + if (ret < 0) + return ret; + + // if the size is incorrect + if (bl.length() != size) + return -EIO; + + // remove the object + ret = cls_cxx_remove(hctx); + if (ret < 0) + return ret; + + return 0; +} + +CLS_INIT(sdk) +{ + CLS_LOG(0, "loading cls_sdk"); + + cls_register("sdk", &h_class); + + cls_register_cxx_method(h_class, "test_coverage_write", + CLS_METHOD_RD|CLS_METHOD_WR, + test_coverage_write, &h_test_coverage_write); + + cls_register_cxx_method(h_class, "test_coverage_replay", + CLS_METHOD_RD|CLS_METHOD_WR, + test_coverage_replay, &h_test_coverage_replay); +} diff --git a/src/cls/test_remote_reads/cls_test_remote_reads.cc b/src/cls/test_remote_reads/cls_test_remote_reads.cc new file mode 100644 index 000000000..33b0e9dc1 --- /dev/null +++ b/src/cls/test_remote_reads/cls_test_remote_reads.cc @@ -0,0 +1,87 @@ +/* + * This is an example RADOS object class that shows how to use remote reads. + */ + +#include "common/ceph_json.h" +#include "objclass/objclass.h" + +CLS_VER(1,0) +CLS_NAME(test_remote_reads) + +cls_handle_t h_class; +cls_method_handle_t h_test_read; +cls_method_handle_t h_test_gather; + +/** + * read data + */ +static int test_read(cls_method_context_t hctx, bufferlist *in, bufferlist *out) { + int r = cls_cxx_read(hctx, 0, 0, out); + if (r < 0) { + CLS_ERR("%s: error reading data", __PRETTY_FUNCTION__); + return r; + } + return 0; +} + +/** + * gather data from other objects using remote reads + */ +static int test_gather(cls_method_context_t hctx, bufferlist *in, bufferlist *out) { + std::map<std::string, bufferlist> src_obj_buffs; + int r = cls_cxx_get_gathered_data(hctx, &src_obj_buffs); + if (src_obj_buffs.empty()) { + // start remote reads + JSONParser parser; + bool b = parser.parse(in->c_str(), in->length()); + if (!b) { + CLS_ERR("%s: failed to parse json", __PRETTY_FUNCTION__); + return -EBADMSG; + } + auto *o_cls = parser.find_obj("cls"); + ceph_assert(o_cls); + std::string cls = o_cls->get_data_val().str; + + auto *o_method = parser.find_obj("method"); + ceph_assert(o_method); + std::string method = o_method->get_data_val().str; + + auto *o_pool = parser.find_obj("pool"); + ceph_assert(o_pool); + std::string pool = o_pool->get_data_val().str; + + auto *o_src_objects = parser.find_obj("src_objects"); + ceph_assert(o_src_objects); + auto src_objects_v = o_src_objects->get_array_elements(); + std::set<std::string> src_objects; + for (auto it = src_objects_v.begin(); it != src_objects_v.end(); it++) { + std::string oid_without_double_quotes = it->substr(1, it->size()-2); + src_objects.insert(oid_without_double_quotes); + } + r = cls_cxx_gather(hctx, src_objects, pool, cls.c_str(), method.c_str(), *in); + } else { + // write data gathered using remote reads + int offset = 0; + for (std::map<std::string, bufferlist>::iterator it = src_obj_buffs.begin(); it != src_obj_buffs.end(); it++) { + bufferlist bl= it->second; + r = cls_cxx_write(hctx, offset, bl.length(), &bl); + offset += bl.length(); + } + } + return r; +} + +CLS_INIT(test_remote_reads) +{ + CLS_LOG(0, "loading cls_test_remote_reads"); + + cls_register("test_remote_reads", &h_class); + + cls_register_cxx_method(h_class, "test_read", + CLS_METHOD_RD, + test_read, &h_test_read); + + cls_register_cxx_method(h_class, "test_gather", + CLS_METHOD_RD | CLS_METHOD_WR, + test_gather, &h_test_gather); +} diff --git a/src/cls/timeindex/cls_timeindex.cc b/src/cls/timeindex/cls_timeindex.cc new file mode 100644 index 000000000..5ad8883d8 --- /dev/null +++ b/src/cls/timeindex/cls_timeindex.cc @@ -0,0 +1,266 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab + +#include <errno.h> + +#include "objclass/objclass.h" + +#include "cls_timeindex_ops.h" + +#include "include/compat.h" + +using std::map; +using std::string; + +using ceph::bufferlist; + +CLS_VER(1,0) +CLS_NAME(timeindex) + +static const size_t MAX_LIST_ENTRIES = 1000; +static const size_t MAX_TRIM_ENTRIES = 1000; + +static const string TIMEINDEX_PREFIX = "1_"; + +static void get_index_time_prefix(const utime_t& ts, + string& index) +{ + char buf[32]; + + snprintf(buf, sizeof(buf), "%s%010ld.%06ld_", TIMEINDEX_PREFIX.c_str(), + (long)ts.sec(), (long)ts.usec()); + buf[sizeof(buf) - 1] = '\0'; + + index = buf; +} + +static void get_index(cls_method_context_t hctx, + const utime_t& key_ts, + const string& key_ext, + string& index) +{ + get_index_time_prefix(key_ts, index); + index.append(key_ext); +} + +static int parse_index(const string& index, + utime_t& key_ts, + string& key_ext) +{ + int sec, usec; + char keyext[256]; + + int ret = sscanf(index.c_str(), "1_%d.%d_%255s", &sec, &usec, keyext); + + key_ts = utime_t(sec, usec); + key_ext = string(keyext); + return ret; +} + +static int cls_timeindex_add(cls_method_context_t hctx, + bufferlist * const in, + bufferlist * const out) +{ + auto in_iter = in->cbegin(); + + cls_timeindex_add_op op; + try { + decode(op, in_iter); + } catch (ceph::buffer::error& err) { + CLS_LOG(1, "ERROR: cls_timeindex_add_op(): failed to decode op"); + return -EINVAL; + } + + for (auto iter = op.entries.begin(); + iter != op.entries.end(); + ++iter) { + cls_timeindex_entry& entry = *iter; + + string index; + get_index(hctx, entry.key_ts, entry.key_ext, index); + + CLS_LOG(20, "storing entry at %s", index.c_str()); + + int ret = cls_cxx_map_set_val(hctx, index, &entry.value); + if (ret < 0) { + return ret; + } + } + + return 0; +} + +static int cls_timeindex_list(cls_method_context_t hctx, + bufferlist * const in, + bufferlist * const out) +{ + auto in_iter = in->cbegin(); + + cls_timeindex_list_op op; + try { + decode(op, in_iter); + } catch (ceph::buffer::error& err) { + CLS_LOG(1, "ERROR: cls_timeindex_list_op(): failed to decode op"); + return -EINVAL; + } + + map<string, bufferlist> keys; + + string from_index; + string to_index; + + if (op.marker.empty()) { + get_index_time_prefix(op.from_time, from_index); + } else { + from_index = op.marker; + } + const bool use_time_boundary = (op.to_time >= op.from_time); + + if (use_time_boundary) { + get_index_time_prefix(op.to_time, to_index); + } + + size_t max_entries = op.max_entries; + if (max_entries > MAX_LIST_ENTRIES) { + max_entries = MAX_LIST_ENTRIES; + } + + cls_timeindex_list_ret ret; + + int rc = cls_cxx_map_get_vals(hctx, from_index, TIMEINDEX_PREFIX, + max_entries, &keys, &ret.truncated); + if (rc < 0) { + return rc; + } + + auto& entries = ret.entries; + auto iter = keys.begin(); + + string marker; + + for (; iter != keys.end(); ++iter) { + const string& index = iter->first; + bufferlist& bl = iter->second; + + if (use_time_boundary && index.compare(0, to_index.size(), to_index) >= 0) { + CLS_LOG(20, "DEBUG: cls_timeindex_list: finishing on to_index=%s", + to_index.c_str()); + ret.truncated = false; + break; + } + + cls_timeindex_entry e; + + if (parse_index(index, e.key_ts, e.key_ext) < 0) { + CLS_LOG(0, "ERROR: cls_timeindex_list: could not parse index=%s", + index.c_str()); + } else { + CLS_LOG(20, "DEBUG: cls_timeindex_list: index=%s, key_ext=%s, bl.len = %d", + index.c_str(), e.key_ext.c_str(), bl.length()); + e.value = bl; + entries.push_back(e); + } + marker = index; + } + + ret.marker = marker; + + encode(ret, *out); + + return 0; +} + + +static int cls_timeindex_trim(cls_method_context_t hctx, + bufferlist * const in, + bufferlist * const out) +{ + auto in_iter = in->cbegin(); + + cls_timeindex_trim_op op; + try { + decode(op, in_iter); + } catch (ceph::buffer::error& err) { + CLS_LOG(1, "ERROR: cls_timeindex_trim: failed to decode entry"); + return -EINVAL; + } + + map<string, bufferlist> keys; + + string from_index; + string to_index; + + if (op.from_marker.empty()) { + get_index_time_prefix(op.from_time, from_index); + } else { + from_index = op.from_marker; + } + + if (op.to_marker.empty()) { + get_index_time_prefix(op.to_time, to_index); + } else { + to_index = op.to_marker; + } + + bool more; + + int rc = cls_cxx_map_get_vals(hctx, from_index, TIMEINDEX_PREFIX, + MAX_TRIM_ENTRIES, &keys, &more); + if (rc < 0) { + return rc; + } + + auto iter = keys.begin(); + + bool removed = false; + for (; iter != keys.end(); ++iter) { + const string& index = iter->first; + + CLS_LOG(20, "index=%s to_index=%s", index.c_str(), to_index.c_str()); + + if (index.compare(0, to_index.size(), to_index) > 0) { + CLS_LOG(20, "DEBUG: cls_timeindex_trim: finishing on to_index=%s", + to_index.c_str()); + break; + } + + CLS_LOG(20, "removing key: index=%s", index.c_str()); + + int rc = cls_cxx_map_remove_key(hctx, index); + if (rc < 0) { + CLS_LOG(1, "ERROR: cls_cxx_map_remove_key failed rc=%d", rc); + return rc; + } + + removed = true; + } + + if (!removed) { + return -ENODATA; + } + + return 0; +} + +CLS_INIT(timeindex) +{ + CLS_LOG(1, "Loaded timeindex class!"); + + cls_handle_t h_class; + cls_method_handle_t h_timeindex_add; + cls_method_handle_t h_timeindex_list; + cls_method_handle_t h_timeindex_trim; + + cls_register("timeindex", &h_class); + + /* timeindex */ + cls_register_cxx_method(h_class, "add", CLS_METHOD_RD | CLS_METHOD_WR, + cls_timeindex_add, &h_timeindex_add); + cls_register_cxx_method(h_class, "list", CLS_METHOD_RD, + cls_timeindex_list, &h_timeindex_list); + cls_register_cxx_method(h_class, "trim", CLS_METHOD_RD | CLS_METHOD_WR, + cls_timeindex_trim, &h_timeindex_trim); + + return; +} + diff --git a/src/cls/timeindex/cls_timeindex_client.cc b/src/cls/timeindex/cls_timeindex_client.cc new file mode 100644 index 000000000..7a38ff5fa --- /dev/null +++ b/src/cls/timeindex/cls_timeindex_client.cc @@ -0,0 +1,120 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab + +#include <errno.h> + +#include "cls/timeindex/cls_timeindex_ops.h" +#include "cls/timeindex/cls_timeindex_client.h" +#include "include/compat.h" + +void cls_timeindex_add( + librados::ObjectWriteOperation& op, + std::list<cls_timeindex_entry>& entries) +{ + librados::bufferlist in; + cls_timeindex_add_op call; + call.entries = entries; + + encode(call, in); + op.exec("timeindex", "add", in); +} + +void cls_timeindex_add( + librados::ObjectWriteOperation& op, + cls_timeindex_entry& entry) +{ + librados::bufferlist in; + cls_timeindex_add_op call; + call.entries.push_back(entry); + + encode(call, in); + op.exec("timeindex", "add", in); +} + +void cls_timeindex_add_prepare_entry( + cls_timeindex_entry& entry, + const utime_t& key_timestamp, + const std::string& key_ext, + const librados::bufferlist& bl) +{ + entry.key_ts = key_timestamp; + entry.key_ext = key_ext; + entry.value = bl; +} + +void cls_timeindex_add( + librados::ObjectWriteOperation& op, + const utime_t& key_timestamp, + const std::string& key_ext, + const librados::bufferlist& bl) +{ + cls_timeindex_entry entry; + cls_timeindex_add_prepare_entry(entry, key_timestamp, key_ext, bl); + cls_timeindex_add(op, entry); +} + +void cls_timeindex_trim( + librados::ObjectWriteOperation& op, + const utime_t& from_time, + const utime_t& to_time, + const std::string& from_marker, + const std::string& to_marker) +{ + librados::bufferlist in; + cls_timeindex_trim_op call; + call.from_time = from_time; + call.to_time = to_time; + call.from_marker = from_marker; + call.to_marker = to_marker; + + encode(call, in); + + op.exec("timeindex", "trim", in); +} + +int cls_timeindex_trim( + librados::IoCtx& io_ctx, + const std::string& oid, + const utime_t& from_time, + const utime_t& to_time, + const std::string& from_marker, + const std::string& to_marker) +{ + bool done = false; + + do { + librados::ObjectWriteOperation op; + cls_timeindex_trim(op, from_time, to_time, from_marker, to_marker); + int r = io_ctx.operate(oid, &op); + + if (r == -ENODATA) + done = true; + else if (r < 0) + return r; + } while (!done); + + return 0; +} + +void cls_timeindex_list( + librados::ObjectReadOperation& op, + const utime_t& from, + const utime_t& to, + const std::string& in_marker, + const int max_entries, + std::list<cls_timeindex_entry>& entries, + std::string *out_marker, + bool *truncated) +{ + librados::bufferlist in; + cls_timeindex_list_op call; + call.from_time = from; + call.to_time = to; + call.marker = in_marker; + call.max_entries = max_entries; + + encode(call, in); + + op.exec("timeindex", "list", in, + new TimeindexListCtx(&entries, out_marker, truncated)); +} diff --git a/src/cls/timeindex/cls_timeindex_client.h b/src/cls/timeindex/cls_timeindex_client.h new file mode 100644 index 000000000..818d4b0c4 --- /dev/null +++ b/src/cls/timeindex/cls_timeindex_client.h @@ -0,0 +1,98 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab + +#ifndef CEPH_CLS_TIMEINDEX_CLIENT_H +#define CEPH_CLS_TIMEINDEX_CLIENT_H + +#include "include/rados/librados.hpp" + +#include "cls_timeindex_ops.h" + +/** + * timeindex objclass + */ +class TimeindexListCtx : public librados::ObjectOperationCompletion { + std::list<cls_timeindex_entry> *entries; + std::string *marker; + bool *truncated; + +public: + ///* ctor + TimeindexListCtx( + std::list<cls_timeindex_entry> *_entries, + std::string *_marker, + bool *_truncated) + : entries(_entries), marker(_marker), truncated(_truncated) {} + + ///* dtor + ~TimeindexListCtx() {} + + void handle_completion(int r, ceph::buffer::list& bl) override { + if (r >= 0) { + cls_timeindex_list_ret ret; + try { + auto iter = bl.cbegin(); + decode(ret, iter); + if (entries) + *entries = ret.entries; + if (truncated) + *truncated = ret.truncated; + if (marker) + *marker = ret.marker; + } catch (ceph::buffer::error& err) { + // nothing we can do about it atm + } + } + } +}; + +void cls_timeindex_add_prepare_entry( + cls_timeindex_entry& entry, + const utime_t& key_timestamp, + const std::string& key_ext, + ceph::buffer::list& bl); + +void cls_timeindex_add( + librados::ObjectWriteOperation& op, + const std::list<cls_timeindex_entry>& entry); + +void cls_timeindex_add( + librados::ObjectWriteOperation& op, + const cls_timeindex_entry& entry); + +void cls_timeindex_add( + librados::ObjectWriteOperation& op, + const utime_t& timestamp, + const std::string& name, + const ceph::buffer::list& bl); + +void cls_timeindex_list( + librados::ObjectReadOperation& op, + const utime_t& from, + const utime_t& to, + const std::string& in_marker, + const int max_entries, + std::list<cls_timeindex_entry>& entries, + std::string *out_marker, + bool *truncated); + +void cls_timeindex_trim( + librados::ObjectWriteOperation& op, + const utime_t& from_time, + const utime_t& to_time, + const std::string& from_marker = std::string(), + const std::string& to_marker = std::string()); + +// these overloads which call io_ctx.operate() should not be called in the rgw. +// rgw_rados_operate() should be called after the overloads w/o calls to io_ctx.operate() +#ifndef CLS_CLIENT_HIDE_IOCTX +int cls_timeindex_trim( + librados::IoCtx& io_ctx, + const std::string& oid, + const utime_t& from_time, + const utime_t& to_time, + const std::string& from_marker = std::string(), + const std::string& to_marker = std::string()); +#endif + +#endif diff --git a/src/cls/timeindex/cls_timeindex_ops.h b/src/cls/timeindex/cls_timeindex_ops.h new file mode 100644 index 000000000..f40058954 --- /dev/null +++ b/src/cls/timeindex/cls_timeindex_ops.h @@ -0,0 +1,115 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab + +#ifndef CEPH_CLS_TIMEINDEX_OPS_H +#define CEPH_CLS_TIMEINDEX_OPS_H + +#include "cls_timeindex_types.h" + +struct cls_timeindex_add_op { + std::list<cls_timeindex_entry> entries; + + cls_timeindex_add_op() {} + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(1, 1, bl); + encode(entries, bl); + ENCODE_FINISH(bl); + } + + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(1, bl); + decode(entries, bl); + DECODE_FINISH(bl); + } +}; +WRITE_CLASS_ENCODER(cls_timeindex_add_op) + +struct cls_timeindex_list_op { + utime_t from_time; + std::string marker; /* if not empty, overrides from_time */ + utime_t to_time; /* not inclusive */ + int max_entries; /* upperbound to returned num of entries + might return less than that and still be truncated */ + + cls_timeindex_list_op() : max_entries(0) {} + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(1, 1, bl); + encode(from_time, bl); + encode(marker, bl); + encode(to_time, bl); + encode(max_entries, bl); + ENCODE_FINISH(bl); + } + + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(1, bl); + decode(from_time, bl); + decode(marker, bl); + decode(to_time, bl); + decode(max_entries, bl); + DECODE_FINISH(bl); + } +}; +WRITE_CLASS_ENCODER(cls_timeindex_list_op) + +struct cls_timeindex_list_ret { + std::list<cls_timeindex_entry> entries; + std::string marker; + bool truncated; + + cls_timeindex_list_ret() : truncated(false) {} + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(1, 1, bl); + encode(entries, bl); + encode(marker, bl); + encode(truncated, bl); + ENCODE_FINISH(bl); + } + + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(1, bl); + decode(entries, bl); + decode(marker, bl); + decode(truncated, bl); + DECODE_FINISH(bl); + } +}; +WRITE_CLASS_ENCODER(cls_timeindex_list_ret) + + +/* + * operation will return 0 when successfully removed but not done. Will return + * -ENODATA when done, so caller needs to repeat sending request until that. + */ +struct cls_timeindex_trim_op { + utime_t from_time; + utime_t to_time; /* inclusive */ + std::string from_marker; + std::string to_marker; + + cls_timeindex_trim_op() {} + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(1, 1, bl); + encode(from_time, bl); + encode(to_time, bl); + encode(from_marker, bl); + encode(to_marker, bl); + ENCODE_FINISH(bl); + } + + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(1, bl); + decode(from_time, bl); + decode(to_time, bl); + decode(from_marker, bl); + decode(to_marker, bl); + DECODE_FINISH(bl); + } +}; +WRITE_CLASS_ENCODER(cls_timeindex_trim_op) + +#endif /* CEPH_CLS_TIMEINDEX_OPS_H */ diff --git a/src/cls/timeindex/cls_timeindex_types.cc b/src/cls/timeindex/cls_timeindex_types.cc new file mode 100644 index 000000000..1a748967b --- /dev/null +++ b/src/cls/timeindex/cls_timeindex_types.cc @@ -0,0 +1,21 @@ +#include "cls_timeindex_types.h" +#include "common/Formatter.h" + +void cls_timeindex_entry::dump(Formatter *f) const +{ + f->dump_stream("key_ts") << key_ts; + f->dump_string("key_ext", key_ext); + f->dump_string("value", value.to_str()); +} + +void cls_timeindex_entry::generate_test_instances(std::list<cls_timeindex_entry*>& o) +{ + cls_timeindex_entry *i = new cls_timeindex_entry; + i->key_ts = utime_t(0,0); + i->key_ext = "foo"; + bufferlist bl; + bl.append("bar"); + i->value = bl; + o.push_back(i); + o.push_back(new cls_timeindex_entry); +} diff --git a/src/cls/timeindex/cls_timeindex_types.h b/src/cls/timeindex/cls_timeindex_types.h new file mode 100644 index 000000000..d33886881 --- /dev/null +++ b/src/cls/timeindex/cls_timeindex_types.h @@ -0,0 +1,46 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab + +#ifndef CEPH_CLS_TIMEINDEX_TYPES_H +#define CEPH_CLS_TIMEINDEX_TYPES_H + +#include "include/encoding.h" +#include "include/types.h" + +#include "include/utime.h" + +class JSONObj; + +struct cls_timeindex_entry { + /* Mandatory timestamp. Will be part of the key. */ + utime_t key_ts; + /* Not mandatory. The name_ext field, if not empty, will form second + * part of the key. */ + std::string key_ext; + /* Become value of OMAP-based mapping. */ + ceph::buffer::list value; + + cls_timeindex_entry() {} + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(1, 1, bl); + encode(key_ts, bl); + encode(key_ext, bl); + encode(value, bl); + ENCODE_FINISH(bl); + } + + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(1, bl); + decode(key_ts, bl); + decode(key_ext, bl); + decode(value, bl); + DECODE_FINISH(bl); + } + + void dump(ceph::Formatter *f) const; + static void generate_test_instances(std::list<cls_timeindex_entry*>& o); +}; +WRITE_CLASS_ENCODER(cls_timeindex_entry) + +#endif /* CEPH_CLS_TIMEINDEX_TYPES_H */ diff --git a/src/cls/user/cls_user.cc b/src/cls/user/cls_user.cc new file mode 100644 index 000000000..e278ad7fc --- /dev/null +++ b/src/cls/user/cls_user.cc @@ -0,0 +1,531 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab + +#include <errno.h> + +#include "include/utime.h" +#include "objclass/objclass.h" + +#include "cls_user_ops.h" + +using std::map; +using std::string; + +using ceph::bufferlist; +using ceph::decode; +using ceph::encode; + +CLS_VER(1,0) +CLS_NAME(user) + +static int write_entry(cls_method_context_t hctx, const string& key, const cls_user_bucket_entry& entry) +{ + bufferlist bl; + encode(entry, bl); + + int ret = cls_cxx_map_set_val(hctx, key, &bl); + if (ret < 0) + return ret; + + return 0; +} + +static int remove_entry(cls_method_context_t hctx, const string& key) +{ + int ret = cls_cxx_map_remove_key(hctx, key); + if (ret < 0) + return ret; + + return 0; +} + +static void get_key_by_bucket_name(const string& bucket_name, string *key) +{ + *key = bucket_name; +} + +static int get_existing_bucket_entry(cls_method_context_t hctx, const string& bucket_name, + cls_user_bucket_entry& entry) +{ + if (bucket_name.empty()) { + return -EINVAL; + } + + string key; + get_key_by_bucket_name(bucket_name, &key); + + bufferlist bl; + int rc = cls_cxx_map_get_val(hctx, key, &bl); + if (rc < 0) { + CLS_LOG(10, "could not read entry %s", key.c_str()); + return rc; + } + try { + auto iter = bl.cbegin(); + decode(entry, iter); + } catch (ceph::buffer::error& err) { + CLS_LOG(0, "ERROR: failed to decode entry %s", key.c_str()); + return -EIO; + } + + return 0; +} + +static int read_header(cls_method_context_t hctx, cls_user_header *header) +{ + bufferlist bl; + + int ret = cls_cxx_map_read_header(hctx, &bl); + if (ret < 0) + return ret; + + if (bl.length() == 0) { + *header = cls_user_header(); + return 0; + } + + try { + decode(*header, bl); + } catch (ceph::buffer::error& err) { + CLS_LOG(0, "ERROR: failed to decode user header"); + return -EIO; + } + + return 0; +} + +static void add_header_stats(cls_user_stats *stats, cls_user_bucket_entry& entry) +{ + stats->total_entries += entry.count; + stats->total_bytes += entry.size; + stats->total_bytes_rounded += entry.size_rounded; +} + +static void dec_header_stats(cls_user_stats *stats, cls_user_bucket_entry& entry) +{ + stats->total_bytes -= entry.size; + stats->total_bytes_rounded -= entry.size_rounded; + stats->total_entries -= entry.count; +} + +static void apply_entry_stats(const cls_user_bucket_entry& src_entry, cls_user_bucket_entry *target_entry) +{ + target_entry->size = src_entry.size; + target_entry->size_rounded = src_entry.size_rounded; + target_entry->count = src_entry.count; +} + +static int cls_user_set_buckets_info(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + auto in_iter = in->cbegin(); + + cls_user_set_buckets_op op; + try { + decode(op, in_iter); + } catch (ceph::buffer::error& err) { + CLS_LOG(1, "ERROR: cls_user_add_op(): failed to decode op"); + return -EINVAL; + } + + cls_user_header header; + int ret = read_header(hctx, &header); + if (ret < 0) { + CLS_LOG(0, "ERROR: failed to read user info header ret=%d", ret); + return ret; + } + + for (auto iter = op.entries.begin(); iter != op.entries.end(); ++iter) { + cls_user_bucket_entry& update_entry = *iter; + + string key; + + get_key_by_bucket_name(update_entry.bucket.name, &key); + + cls_user_bucket_entry entry; + ret = get_existing_bucket_entry(hctx, key, entry); + + if (ret == -ENOENT) { + if (!op.add) + continue; /* racing bucket removal */ + + entry = update_entry; + + ret = 0; + } else if (op.add) { + // bucket id may have changed (ie reshard) + entry.bucket.bucket_id = update_entry.bucket.bucket_id; + // creation date may have changed (ie delete/recreate bucket) + entry.creation_time = update_entry.creation_time; + } + + if (ret < 0) { + CLS_LOG(0, "ERROR: get_existing_bucket_entry() key=%s returned %d", key.c_str(), ret); + return ret; + } else if (ret >= 0 && entry.user_stats_sync) { + dec_header_stats(&header.stats, entry); + } + + CLS_LOG(20, "storing entry for key=%s size=%lld count=%lld", + key.c_str(), (long long)update_entry.size, (long long)update_entry.count); + + // sync entry stats when not an op.add, as when the case is op.add if its a + // new entry we already have copied update_entry earlier, OTOH, for an existing entry + // we end up clobbering the existing stats for the bucket + if (!op.add){ + apply_entry_stats(update_entry, &entry); + } + entry.user_stats_sync = true; + + ret = write_entry(hctx, key, entry); + if (ret < 0) + return ret; + + add_header_stats(&header.stats, entry); + } + + bufferlist bl; + + CLS_LOG(20, "header: total bytes=%lld entries=%lld", (long long)header.stats.total_bytes, (long long)header.stats.total_entries); + + if (header.last_stats_update < op.time) + header.last_stats_update = op.time; + + encode(header, bl); + + ret = cls_cxx_map_write_header(hctx, &bl); + if (ret < 0) + return ret; + + return 0; +} + +static int cls_user_complete_stats_sync(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + auto in_iter = in->cbegin(); + + cls_user_complete_stats_sync_op op; + try { + decode(op, in_iter); + } catch (ceph::buffer::error& err) { + CLS_LOG(1, "ERROR: cls_user_add_op(): failed to decode op"); + return -EINVAL; + } + + cls_user_header header; + int ret = read_header(hctx, &header); + if (ret < 0) { + CLS_LOG(0, "ERROR: failed to read user info header ret=%d", ret); + return ret; + } + + if (header.last_stats_sync < op.time) + header.last_stats_sync = op.time; + + bufferlist bl; + + encode(header, bl); + + ret = cls_cxx_map_write_header(hctx, &bl); + if (ret < 0) + return ret; + + return 0; +} + +static int cls_user_remove_bucket(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + auto in_iter = in->cbegin(); + + cls_user_remove_bucket_op op; + try { + decode(op, in_iter); + } catch (ceph::buffer::error& err) { + CLS_LOG(1, "ERROR: cls_user_add_op(): failed to decode op"); + return -EINVAL; + } + + cls_user_header header; + int ret = read_header(hctx, &header); + if (ret < 0) { + CLS_LOG(0, "ERROR: failed to read user info header ret=%d", ret); + return ret; + } + + string key; + + get_key_by_bucket_name(op.bucket.name, &key); + + cls_user_bucket_entry entry; + ret = get_existing_bucket_entry(hctx, key, entry); + if (ret == -ENOENT) { + return 0; /* idempotent removal */ + } + if (ret < 0) { + CLS_LOG(0, "ERROR: get existing bucket entry, key=%s ret=%d", key.c_str(), ret); + return ret; + } + + CLS_LOG(20, "removing entry at %s", key.c_str()); + + ret = remove_entry(hctx, key); + if (ret < 0) + return ret; + + if (!entry.user_stats_sync) { + return 0; + } + + dec_header_stats(&header.stats, entry); + + CLS_LOG(20, "header: total bytes=%lld entries=%lld", (long long)header.stats.total_bytes, (long long)header.stats.total_entries); + + bufferlist bl; + encode(header, bl); + return cls_cxx_map_write_header(hctx, &bl); +} + +static int cls_user_list_buckets(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + auto in_iter = in->cbegin(); + + cls_user_list_buckets_op op; + try { + decode(op, in_iter); + } catch (ceph::buffer::error& err) { + CLS_LOG(1, "ERROR: cls_user_list_op(): failed to decode op"); + return -EINVAL; + } + + map<string, bufferlist> keys; + + const string& from_index = op.marker; + const string& to_index = op.end_marker; + const bool to_index_valid = !to_index.empty(); + +#define MAX_ENTRIES 1000 + size_t max_entries = op.max_entries; + if (max_entries > MAX_ENTRIES) + max_entries = MAX_ENTRIES; + + string match_prefix; + cls_user_list_buckets_ret ret; + + int rc = cls_cxx_map_get_vals(hctx, from_index, match_prefix, max_entries, &keys, &ret.truncated); + if (rc < 0) + return rc; + + CLS_LOG(20, "from_index=%s to_index=%s match_prefix=%s", + from_index.c_str(), + to_index.c_str(), + match_prefix.c_str()); + + auto& entries = ret.entries; + auto iter = keys.begin(); + + string marker; + + for (; iter != keys.end(); ++iter) { + const string& index = iter->first; + marker = index; + + if (to_index_valid && to_index.compare(index) <= 0) { + ret.truncated = false; + break; + } + + bufferlist& bl = iter->second; + auto biter = bl.cbegin(); + try { + cls_user_bucket_entry e; + decode(e, biter); + entries.push_back(e); + } catch (ceph::buffer::error& err) { + CLS_LOG(0, "ERROR: cls_user_list: could not decode entry, index=%s", index.c_str()); + } + } + + if (ret.truncated) { + ret.marker = marker; + } + + encode(ret, *out); + + return 0; +} + +static int cls_user_get_header(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + auto in_iter = in->cbegin(); + + cls_user_get_header_op op; + try { + decode(op, in_iter); + } catch (ceph::buffer::error& err) { + CLS_LOG(1, "ERROR: cls_user_get_header_op(): failed to decode op"); + return -EINVAL; + } + + cls_user_get_header_ret op_ret; + + int ret = read_header(hctx, &op_ret.header); + if (ret < 0) + return ret; + + encode(op_ret, *out); + + return 0; +} + +/// A method to reset the user.buckets header stats in accordance to +/// the values seen in the user.buckets omap keys. This is not be +/// equivalent to --sync-stats which also re-calculates the stats for +/// each bucket. +static int cls_user_reset_stats(cls_method_context_t hctx, + bufferlist *in, bufferlist *out /*ignore*/) +{ + cls_user_reset_stats_op op; + + try { + auto bliter = in->cbegin(); + decode(op, bliter); + } catch (ceph::buffer::error& err) { + CLS_LOG(0, "ERROR: %s failed to decode op", __func__); + return -EINVAL; + } + + cls_user_header header; + bool truncated = false; + string from_index, prefix; + do { + map<string, bufferlist> keys; + int rc = cls_cxx_map_get_vals(hctx, from_index, prefix, MAX_ENTRIES, + &keys, &truncated); + if (rc < 0) { + CLS_LOG(0, "ERROR: %s failed to retrieve omap key-values", __func__); + return rc; + } + CLS_LOG(20, "%s: read %lu key-values, truncated=%d", + __func__, keys.size(), truncated); + + for (const auto& kv : keys) { + cls_user_bucket_entry e; + try { + auto bl = kv.second; + auto bliter = bl.cbegin(); + decode(e, bliter); + } catch (ceph::buffer::error& err) { + CLS_LOG(0, "ERROR: %s failed to decode bucket entry for %s", + __func__, kv.first.c_str()); + return -EIO; + } + add_header_stats(&header.stats, e); + } + if (!keys.empty()) { + from_index = keys.rbegin()->first; + } + } while (truncated); + + bufferlist bl; + header.last_stats_update = op.time; + encode(header, bl); + + CLS_LOG(20, "%s: updating header", __func__); + return cls_cxx_map_write_header(hctx, &bl); +} /* legacy cls_user_reset_stats */ + +/// A method to reset the user.buckets header stats in accordance to +/// the values seen in the user.buckets omap keys. This is not be +/// equivalent to --sync-stats which also re-calculates the stats for +/// each bucket. +static int cls_user_reset_stats2(cls_method_context_t hctx, + buffer::list *in, buffer::list *out) +{ + cls_user_reset_stats2_op op; + + try { + auto bliter = in->cbegin(); + decode(op, bliter); + } catch (ceph::buffer::error& err) { + CLS_LOG(0, "ERROR: %s failed to decode op", __func__); + return -EINVAL; + } + + cls_user_header header; + string from_index{op.marker}, prefix; + cls_user_reset_stats2_ret ret; + + map<string, buffer::list> keys; + int rc = cls_cxx_map_get_vals(hctx, from_index, prefix, MAX_ENTRIES, + &keys, &ret.truncated); + if (rc < 0) { + CLS_LOG(0, "ERROR: %s failed to retrieve omap key-values", __func__); + return rc; + } + CLS_LOG(20, "%s: read %lu key-values, truncated=%d", + __func__, keys.size(), ret.truncated); + + for (const auto& kv : keys) { + cls_user_bucket_entry e; + try { + auto& bl = kv.second; + auto bliter = bl.cbegin(); + decode(e, bliter); + } catch (ceph::buffer::error& err) { + CLS_LOG(0, "ERROR: %s failed to decode bucket entry for %s", + __func__, kv.first.c_str()); + return -EIO; + } + add_header_stats(&ret.acc_stats, e); + } + + /* try-update marker */ + if(!keys.empty()) + ret.marker = (--keys.cend())->first; + + if (! ret.truncated) { + buffer::list bl; + header.last_stats_update = op.time; + header.stats = ret.acc_stats; + encode(header, bl); + + CLS_LOG(20, "%s: updating header", __func__); + rc = cls_cxx_map_write_header(hctx, &bl); + + /* return final result */ + encode(ret, *out); + return rc; + } + + /* return partial result */ + encode(ret, *out); + return 0; +} /* cls_user_reset_stats2 */ + +CLS_INIT(user) +{ + CLS_LOG(1, "Loaded user class!"); + + cls_handle_t h_class; + cls_method_handle_t h_user_set_buckets_info; + cls_method_handle_t h_user_complete_stats_sync; + cls_method_handle_t h_user_remove_bucket; + cls_method_handle_t h_user_list_buckets; + cls_method_handle_t h_user_get_header; + cls_method_handle_t h_user_reset_stats; + cls_method_handle_t h_user_reset_stats2; + + cls_register("user", &h_class); + + /* log */ + cls_register_cxx_method(h_class, "set_buckets_info", CLS_METHOD_RD | CLS_METHOD_WR, + cls_user_set_buckets_info, &h_user_set_buckets_info); + cls_register_cxx_method(h_class, "complete_stats_sync", CLS_METHOD_RD | CLS_METHOD_WR, + cls_user_complete_stats_sync, &h_user_complete_stats_sync); + cls_register_cxx_method(h_class, "remove_bucket", CLS_METHOD_RD | CLS_METHOD_WR, cls_user_remove_bucket, &h_user_remove_bucket); + cls_register_cxx_method(h_class, "list_buckets", CLS_METHOD_RD, cls_user_list_buckets, &h_user_list_buckets); + cls_register_cxx_method(h_class, "get_header", CLS_METHOD_RD, cls_user_get_header, &h_user_get_header); + cls_register_cxx_method(h_class, "reset_user_stats", CLS_METHOD_RD | CLS_METHOD_WR, cls_user_reset_stats, &h_user_reset_stats); + cls_register_cxx_method(h_class, "reset_user_stats2", CLS_METHOD_RD | CLS_METHOD_WR, cls_user_reset_stats2, &h_user_reset_stats2); + + return; +} diff --git a/src/cls/user/cls_user_client.cc b/src/cls/user/cls_user_client.cc new file mode 100644 index 000000000..b74f55b48 --- /dev/null +++ b/src/cls/user/cls_user_client.cc @@ -0,0 +1,164 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab + +#include <errno.h> + +#include "cls/user/cls_user_client.h" +#include "include/rados/librados.hpp" + +using std::list; +using std::string; + +using ceph::bufferlist; +using ceph::real_clock; + +using librados::IoCtx; +using librados::ObjectOperationCompletion; +using librados::ObjectReadOperation; + +void cls_user_set_buckets(librados::ObjectWriteOperation& op, list<cls_user_bucket_entry>& entries, bool add) +{ + bufferlist in; + cls_user_set_buckets_op call; + call.entries = entries; + call.add = add; + call.time = real_clock::now(); + encode(call, in); + op.exec("user", "set_buckets_info", in); +} + +void cls_user_complete_stats_sync(librados::ObjectWriteOperation& op) +{ + bufferlist in; + cls_user_complete_stats_sync_op call; + call.time = real_clock::now(); + encode(call, in); + op.exec("user", "complete_stats_sync", in); +} + +void cls_user_remove_bucket(librados::ObjectWriteOperation& op, const cls_user_bucket& bucket) +{ + bufferlist in; + cls_user_remove_bucket_op call; + call.bucket = bucket; + encode(call, in); + op.exec("user", "remove_bucket", in); +} + +class ClsUserListCtx : public ObjectOperationCompletion { + list<cls_user_bucket_entry> *entries; + string *marker; + bool *truncated; + int *pret; +public: + ClsUserListCtx(list<cls_user_bucket_entry> *_entries, string *_marker, bool *_truncated, int *_pret) : + entries(_entries), marker(_marker), truncated(_truncated), pret(_pret) {} + void handle_completion(int r, bufferlist& outbl) override { + if (r >= 0) { + cls_user_list_buckets_ret ret; + try { + auto iter = outbl.cbegin(); + decode(ret, iter); + if (entries) + *entries = ret.entries; + if (truncated) + *truncated = ret.truncated; + if (marker) + *marker = ret.marker; + } catch (ceph::buffer::error& err) { + r = -EIO; + } + } + if (pret) { + *pret = r; + } + } +}; + +void cls_user_bucket_list(librados::ObjectReadOperation& op, + const string& in_marker, + const string& end_marker, + int max_entries, + list<cls_user_bucket_entry>& entries, + string *out_marker, + bool *truncated, + int *pret) +{ + bufferlist inbl; + cls_user_list_buckets_op call; + call.marker = in_marker; + call.end_marker = end_marker; + call.max_entries = max_entries; + + encode(call, inbl); + + op.exec("user", "list_buckets", inbl, new ClsUserListCtx(&entries, out_marker, truncated, pret)); +} + +class ClsUserGetHeaderCtx : public ObjectOperationCompletion { + cls_user_header *header; + RGWGetUserHeader_CB *ret_ctx; + int *pret; +public: + ClsUserGetHeaderCtx(cls_user_header *_h, RGWGetUserHeader_CB *_ctx, int *_pret) : header(_h), ret_ctx(_ctx), pret(_pret) {} + ~ClsUserGetHeaderCtx() override { + if (ret_ctx) { + ret_ctx->put(); + } + } + void handle_completion(int r, bufferlist& outbl) override { + if (r >= 0) { + cls_user_get_header_ret ret; + try { + auto iter = outbl.cbegin(); + decode(ret, iter); + if (header) + *header = ret.header; + } catch (ceph::buffer::error& err) { + r = -EIO; + } + if (ret_ctx) { + ret_ctx->handle_response(r, ret.header); + } + } + if (pret) { + *pret = r; + } + } +}; + +void cls_user_get_header(librados::ObjectReadOperation& op, + cls_user_header *header, int *pret) +{ + bufferlist inbl; + cls_user_get_header_op call; + + encode(call, inbl); + + op.exec("user", "get_header", inbl, new ClsUserGetHeaderCtx(header, NULL, pret)); +} + +void cls_user_reset_stats(librados::ObjectWriteOperation &op) +{ + bufferlist inbl; + cls_user_reset_stats_op call; + call.time = real_clock::now(); + encode(call, inbl); + op.exec("user", "reset_user_stats", inbl); +} + +int cls_user_get_header_async(IoCtx& io_ctx, string& oid, RGWGetUserHeader_CB *ctx) +{ + bufferlist in, out; + cls_user_get_header_op call; + encode(call, in); + ObjectReadOperation op; + op.exec("user", "get_header", in, new ClsUserGetHeaderCtx(NULL, ctx, NULL)); /* no need to pass pret, as we'll call ctx->handle_response() with correct error */ + auto c = librados::Rados::aio_create_completion(nullptr, nullptr); + int r = io_ctx.aio_operate(oid, c, &op, NULL); + c->release(); + if (r < 0) + return r; + + return 0; +} diff --git a/src/cls/user/cls_user_client.h b/src/cls/user/cls_user_client.h new file mode 100644 index 000000000..03d975c59 --- /dev/null +++ b/src/cls/user/cls_user_client.h @@ -0,0 +1,36 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab + +#ifndef CEPH_CLS_USER_CLIENT_H +#define CEPH_CLS_USER_CLIENT_H + +#include "include/rados/librados_fwd.hpp" +#include "cls_user_ops.h" +#include "common/RefCountedObj.h" + +class RGWGetUserHeader_CB : public RefCountedObject { +public: + ~RGWGetUserHeader_CB() override {} + virtual void handle_response(int r, cls_user_header& header) = 0; +}; + +/* + * user objclass + */ + +void cls_user_set_buckets(librados::ObjectWriteOperation& op, std::list<cls_user_bucket_entry>& entries, bool add); +void cls_user_complete_stats_sync(librados::ObjectWriteOperation& op); +void cls_user_remove_bucket(librados::ObjectWriteOperation& op, const cls_user_bucket& bucket); +void cls_user_bucket_list(librados::ObjectReadOperation& op, + const std::string& in_marker, + const std::string& end_marker, + int max_entries, + std::list<cls_user_bucket_entry>& entries, + std::string *out_marker, + bool *truncated, + int *pret); +void cls_user_get_header(librados::ObjectReadOperation& op, cls_user_header *header, int *pret); +int cls_user_get_header_async(librados::IoCtx& io_ctx, std::string& oid, RGWGetUserHeader_CB *ctx); +void cls_user_reset_stats(librados::ObjectWriteOperation& op); + +#endif diff --git a/src/cls/user/cls_user_ops.cc b/src/cls/user/cls_user_ops.cc new file mode 100644 index 000000000..5ae9d2c93 --- /dev/null +++ b/src/cls/user/cls_user_ops.cc @@ -0,0 +1,118 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab + +#include "cls/user/cls_user_ops.h" +#include "common/Formatter.h" +#include "common/ceph_json.h" + +using std::list; + +using ceph::Formatter; + +void cls_user_set_buckets_op::dump(Formatter *f) const +{ + encode_json("entries", entries, f); + encode_json("add", add, f); + encode_json("time", utime_t(time), f); +} + +void cls_user_set_buckets_op::generate_test_instances(list<cls_user_set_buckets_op*>& ls) +{ + ls.push_back(new cls_user_set_buckets_op); + cls_user_set_buckets_op *op = new cls_user_set_buckets_op; + for (int i = 0; i < 3; i++) { + cls_user_bucket_entry e; + cls_user_gen_test_bucket_entry(&e, i); + op->entries.push_back(e); + } + op->add = true; + op->time = utime_t(1, 0).to_real_time(); + ls.push_back(op); +} + +void cls_user_remove_bucket_op::dump(Formatter *f) const +{ + encode_json("bucket", bucket, f); +} + +void cls_user_remove_bucket_op::generate_test_instances(list<cls_user_remove_bucket_op*>& ls) +{ + ls.push_back(new cls_user_remove_bucket_op); + cls_user_remove_bucket_op *op = new cls_user_remove_bucket_op; + cls_user_gen_test_bucket(&op->bucket, 0); + ls.push_back(op); +} + +void cls_user_list_buckets_op::dump(Formatter *f) const +{ + encode_json("marker", marker, f); + encode_json("max_entries", max_entries, f); +} + +void cls_user_list_buckets_op::generate_test_instances(list<cls_user_list_buckets_op*>& ls) +{ + ls.push_back(new cls_user_list_buckets_op); + cls_user_list_buckets_op *op = new cls_user_list_buckets_op; + op->marker = "marker"; + op->max_entries = 1000; + ls.push_back(op); +} + +void cls_user_list_buckets_ret::dump(Formatter *f) const +{ + encode_json("entries", entries, f); + encode_json("marker", marker, f); + encode_json("truncated", truncated, f); +} + +void cls_user_list_buckets_ret::generate_test_instances(list<cls_user_list_buckets_ret*>& ls) +{ + ls.push_back(new cls_user_list_buckets_ret); + cls_user_list_buckets_ret *ret = new cls_user_list_buckets_ret; + for (int i = 0; i < 3; i++) { + cls_user_bucket_entry e; + cls_user_gen_test_bucket_entry(&e, i); + ret->entries.push_back(e); + } + ret->marker = "123"; + ret->truncated = true; + ls.push_back(ret); +} + +void cls_user_get_header_op::dump(Formatter *f) const +{ + // empty! +} + +void cls_user_get_header_op::generate_test_instances(list<cls_user_get_header_op*>& ls) +{ + ls.push_back(new cls_user_get_header_op); +} + +void cls_user_get_header_ret::dump(Formatter *f) const +{ + encode_json("header", header, f); +} + +void cls_user_get_header_ret::generate_test_instances(list<cls_user_get_header_ret*>& ls) +{ + ls.push_back(new cls_user_get_header_ret); + cls_user_get_header_ret *ret = new cls_user_get_header_ret; + cls_user_gen_test_header(&ret->header); + ls.push_back(ret); +} + +void cls_user_complete_stats_sync_op::dump(Formatter *f) const +{ + encode_json("time", utime_t(time), f); +} + +void cls_user_complete_stats_sync_op::generate_test_instances(list<cls_user_complete_stats_sync_op*>& ls) +{ + ls.push_back(new cls_user_complete_stats_sync_op); + cls_user_complete_stats_sync_op *op = new cls_user_complete_stats_sync_op; + op->time = utime_t(12345, 0).to_real_time(); + ls.push_back(op); +} + + diff --git a/src/cls/user/cls_user_ops.h b/src/cls/user/cls_user_ops.h new file mode 100644 index 000000000..7edd1bc15 --- /dev/null +++ b/src/cls/user/cls_user_ops.h @@ -0,0 +1,267 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab + +#ifndef CEPH_CLS_USER_OPS_H +#define CEPH_CLS_USER_OPS_H + +#include "cls_user_types.h" + +struct cls_user_set_buckets_op { + std::list<cls_user_bucket_entry> entries; + bool add; + ceph::real_time time; /* op time */ + + cls_user_set_buckets_op() : add(false) {} + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(1, 1, bl); + encode(entries, bl); + encode(add, bl); + encode(time, bl); + ENCODE_FINISH(bl); + } + + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(1, bl); + decode(entries, bl); + decode(add, bl); + decode(time, bl); + DECODE_FINISH(bl); + } + + void dump(ceph::Formatter *f) const; + static void generate_test_instances(std::list<cls_user_set_buckets_op*>& ls); +}; +WRITE_CLASS_ENCODER(cls_user_set_buckets_op) + +struct cls_user_remove_bucket_op { + cls_user_bucket bucket; + + cls_user_remove_bucket_op() {} + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(1, 1, bl); + encode(bucket, bl); + ENCODE_FINISH(bl); + } + + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(1, bl); + decode(bucket, bl); + DECODE_FINISH(bl); + } + + void dump(ceph::Formatter *f) const; + static void generate_test_instances(std::list<cls_user_remove_bucket_op*>& ls); +}; +WRITE_CLASS_ENCODER(cls_user_remove_bucket_op) + +struct cls_user_list_buckets_op { + std::string marker; + std::string end_marker; + int max_entries; /* upperbound to returned num of entries + might return less than that and still be truncated */ + + cls_user_list_buckets_op() + : max_entries(0) {} + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(2, 1, bl); + encode(marker, bl); + encode(max_entries, bl); + encode(end_marker, bl); + ENCODE_FINISH(bl); + } + + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(2, bl); + decode(marker, bl); + decode(max_entries, bl); + if (struct_v >= 2) { + decode(end_marker, bl); + } + DECODE_FINISH(bl); + } + + void dump(ceph::Formatter *f) const; + static void generate_test_instances(std::list<cls_user_list_buckets_op*>& ls); +}; +WRITE_CLASS_ENCODER(cls_user_list_buckets_op) + +struct cls_user_list_buckets_ret { + std::list<cls_user_bucket_entry> entries; + std::string marker; + bool truncated; + + cls_user_list_buckets_ret() : truncated(false) {} + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(1, 1, bl); + encode(entries, bl); + encode(marker, bl); + encode(truncated, bl); + ENCODE_FINISH(bl); + } + + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(1, bl); + decode(entries, bl); + decode(marker, bl); + decode(truncated, bl); + DECODE_FINISH(bl); + } + + void dump(ceph::Formatter *f) const; + static void generate_test_instances(std::list<cls_user_list_buckets_ret*>& ls); +}; +WRITE_CLASS_ENCODER(cls_user_list_buckets_ret) + + +struct cls_user_get_header_op { + cls_user_get_header_op() {} + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(1, 1, bl); + ENCODE_FINISH(bl); + } + + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(1, bl); + DECODE_FINISH(bl); + } + + void dump(ceph::Formatter *f) const; + static void generate_test_instances(std::list<cls_user_get_header_op*>& ls); +}; +WRITE_CLASS_ENCODER(cls_user_get_header_op) + +struct cls_user_reset_stats_op { + ceph::real_time time; + cls_user_reset_stats_op() {} + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(1, 1, bl); + encode(time, bl); + ENCODE_FINISH(bl); + } + + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(1, bl); + decode(time, bl); + DECODE_FINISH(bl); + } + + void dump(ceph::Formatter *f) const; + static void generate_test_instances(std::list<cls_user_reset_stats_op*>& ls); +}; +WRITE_CLASS_ENCODER(cls_user_reset_stats_op); + +struct cls_user_reset_stats2_op { + ceph::real_time time; + std::string marker; + cls_user_stats acc_stats; + + cls_user_reset_stats2_op() {} + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(1, 1, bl); + encode(time, bl); + encode(marker, bl); + encode(acc_stats, bl); + ENCODE_FINISH(bl); + } + + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(1, bl); + decode(time, bl); + decode(marker, bl); + decode(acc_stats, bl); + DECODE_FINISH(bl); + } + + void dump(ceph::Formatter *f) const; + static void generate_test_instances(std::list<cls_user_reset_stats2_op*>& ls); +}; +WRITE_CLASS_ENCODER(cls_user_reset_stats2_op); + +struct cls_user_reset_stats2_ret { + std::string marker; + cls_user_stats acc_stats; /* 0-initialized */ + bool truncated; + + cls_user_reset_stats2_ret() + : truncated(false) {} + + void update_call(cls_user_reset_stats2_op& call) { + call.marker = marker; + call.acc_stats = acc_stats; + } + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(1, 1, bl); + encode(marker, bl); + encode(acc_stats, bl); + encode(truncated, bl); + ENCODE_FINISH(bl); + } + + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(1, bl); + decode(marker, bl); + decode(acc_stats, bl); + decode(truncated, bl); + DECODE_FINISH(bl); + } + + void dump(ceph::Formatter *f) const; + static void generate_test_instances( + std::list<cls_user_reset_stats2_ret*>& ls); +}; +WRITE_CLASS_ENCODER(cls_user_reset_stats2_ret); + +struct cls_user_get_header_ret { + cls_user_header header; + + cls_user_get_header_ret() {} + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(1, 1, bl); + encode(header, bl); + ENCODE_FINISH(bl); + } + + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(1, bl); + decode(header, bl); + DECODE_FINISH(bl); + } + + void dump(ceph::Formatter *f) const; + static void generate_test_instances(std::list<cls_user_get_header_ret*>& ls); +}; +WRITE_CLASS_ENCODER(cls_user_get_header_ret) + +struct cls_user_complete_stats_sync_op { + ceph::real_time time; + + cls_user_complete_stats_sync_op() {} + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(1, 1, bl); + encode(time, bl); + ENCODE_FINISH(bl); + } + + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(1, bl); + decode(time, bl); + DECODE_FINISH(bl); + } + + void dump(ceph::Formatter *f) const; + static void generate_test_instances(std::list<cls_user_complete_stats_sync_op*>& ls); +}; +WRITE_CLASS_ENCODER(cls_user_complete_stats_sync_op) + + +#endif diff --git a/src/cls/user/cls_user_types.cc b/src/cls/user/cls_user_types.cc new file mode 100644 index 000000000..0d823f0be --- /dev/null +++ b/src/cls/user/cls_user_types.cc @@ -0,0 +1,111 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab + +#include "cls/user/cls_user_types.h" +#include "common/Formatter.h" +#include "common/ceph_json.h" +#include "include/utime.h" + +using std::list; +using std::string; + +using ceph::Formatter; +using ceph::bufferlist; +using ceph::real_clock; + +void cls_user_gen_test_bucket(cls_user_bucket *bucket, int i) +{ + char buf[16]; + snprintf(buf, sizeof(buf), ".%d", i); + + bucket->name = string("buck") + buf; + bucket->marker = string("mark") + buf; + bucket->bucket_id = string("bucket.id") + buf; +} + +void cls_user_bucket::dump(Formatter *f) const +{ + encode_json("name", name, f); + encode_json("marker", marker,f); + encode_json("bucket_id", bucket_id,f); +} + +void cls_user_bucket::generate_test_instances(list<cls_user_bucket*>& ls) +{ + ls.push_back(new cls_user_bucket); + cls_user_bucket *b = new cls_user_bucket; + cls_user_gen_test_bucket(b, 0); + ls.push_back(b); +} + +void cls_user_bucket_entry::dump(Formatter *f) const +{ + encode_json("bucket", bucket, f); + encode_json("size", size, f); + encode_json("size_rounded", size_rounded, f); + encode_json("creation_time", utime_t(creation_time), f); + encode_json("count", count, f); + encode_json("user_stats_sync", user_stats_sync, f); +} + +void cls_user_gen_test_bucket_entry(cls_user_bucket_entry *entry, int i) +{ + cls_user_gen_test_bucket(&entry->bucket, i); + entry->size = i + 1; + entry->size_rounded = i + 2; + entry->creation_time = real_clock::from_time_t(i + 3); + entry->count = i + 4; + entry->user_stats_sync = true; +} + +void cls_user_bucket_entry::generate_test_instances(list<cls_user_bucket_entry*>& ls) +{ + ls.push_back(new cls_user_bucket_entry); + cls_user_bucket_entry *entry = new cls_user_bucket_entry; + cls_user_gen_test_bucket_entry(entry, 0); + ls.push_back(entry); +} + +void cls_user_gen_test_stats(cls_user_stats *s) +{ + s->total_entries = 1; + s->total_bytes = 2; + s->total_bytes_rounded = 3; +} + +void cls_user_stats::dump(Formatter *f) const +{ + f->dump_int("total_entries", total_entries); + f->dump_int("total_bytes", total_bytes); + f->dump_int("total_bytes_rounded", total_bytes_rounded); +} + +void cls_user_stats::generate_test_instances(list<cls_user_stats*>& ls) +{ + ls.push_back(new cls_user_stats); + cls_user_stats *s = new cls_user_stats; + cls_user_gen_test_stats(s); + ls.push_back(s); +} + +void cls_user_gen_test_header(cls_user_header *h) +{ + cls_user_gen_test_stats(&h->stats); + h->last_stats_sync = utime_t(1, 0).to_real_time(); + h->last_stats_update = utime_t(2, 0).to_real_time(); +} + +void cls_user_header::dump(Formatter *f) const +{ + encode_json("stats", stats, f); + encode_json("last_stats_sync", utime_t(last_stats_sync), f); + encode_json("last_stats_update", utime_t(last_stats_update), f); +} + +void cls_user_header::generate_test_instances(list<cls_user_header*>& ls) +{ + ls.push_back(new cls_user_header); + cls_user_header *h = new cls_user_header; + cls_user_gen_test_header(h); + ls.push_back(h); +} diff --git a/src/cls/user/cls_user_types.h b/src/cls/user/cls_user_types.h new file mode 100644 index 000000000..a139449d3 --- /dev/null +++ b/src/cls/user/cls_user_types.h @@ -0,0 +1,224 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab + +#ifndef CEPH_CLS_USER_TYPES_H +#define CEPH_CLS_USER_TYPES_H + +#include "include/encoding.h" +#include "include/types.h" +#include "include/utime.h" +#include "common/ceph_time.h" + +/* + * this needs to be compatible with rgw_bucket, as it replaces it + */ +struct cls_user_bucket { + std::string name; + std::string marker; + std::string bucket_id; + std::string placement_id; + struct { + std::string data_pool; + std::string index_pool; + std::string data_extra_pool; + } explicit_placement; + + void encode(ceph::buffer::list& bl) const { + /* since new version of this structure is not backward compatible, + * we have older rgw running against newer osd if we encode it + * in the new way. Only encode newer version if placement_id is + * not empty, otherwise keep handling it as before + */ + if (!placement_id.empty()) { + ENCODE_START(9, 8, bl); + encode(name, bl); + encode(marker, bl); + encode(bucket_id, bl); + encode(placement_id, bl); + ENCODE_FINISH(bl); + } else { + ENCODE_START(7, 3, bl); + encode(name, bl); + encode(explicit_placement.data_pool, bl); + encode(marker, bl); + encode(bucket_id, bl); + encode(explicit_placement.index_pool, bl); + encode(explicit_placement.data_extra_pool, bl); + ENCODE_FINISH(bl); + } + } + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START_LEGACY_COMPAT_LEN(8, 3, 3, bl); + decode(name, bl); + if (struct_v < 8) { + decode(explicit_placement.data_pool, bl); + } + if (struct_v >= 2) { + decode(marker, bl); + if (struct_v <= 3) { + uint64_t id; + decode(id, bl); + char buf[16]; + snprintf(buf, sizeof(buf), "%llu", (long long)id); + bucket_id = buf; + } else { + decode(bucket_id, bl); + } + } + if (struct_v < 8) { + if (struct_v >= 5) { + decode(explicit_placement.index_pool, bl); + } else { + explicit_placement.index_pool = explicit_placement.data_pool; + } + if (struct_v >= 7) { + decode(explicit_placement.data_extra_pool, bl); + } + } else { + decode(placement_id, bl); + if (struct_v == 8 && placement_id.empty()) { + decode(explicit_placement.data_pool, bl); + decode(explicit_placement.index_pool, bl); + decode(explicit_placement.data_extra_pool, bl); + } + } + DECODE_FINISH(bl); + } + + bool operator<(const cls_user_bucket& b) const { + return name.compare(b.name) < 0; + } + + void dump(ceph::Formatter *f) const; + static void generate_test_instances(std::list<cls_user_bucket*>& ls); +}; +WRITE_CLASS_ENCODER(cls_user_bucket) + +/* + * this structure overrides RGWBucketEnt + */ +struct cls_user_bucket_entry { + cls_user_bucket bucket; + size_t size; + size_t size_rounded; + ceph::real_time creation_time; + uint64_t count; + bool user_stats_sync; + + cls_user_bucket_entry() : size(0), size_rounded(0), count(0), user_stats_sync(false) {} + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(9, 5, bl); + uint64_t s = size; + __u32 mt = ceph::real_clock::to_time_t(creation_time); + std::string empty_str; // originally had the bucket name here, but we encode bucket later + encode(empty_str, bl); + encode(s, bl); + encode(mt, bl); + encode(count, bl); + encode(bucket, bl); + s = size_rounded; + encode(s, bl); + encode(user_stats_sync, bl); + encode(creation_time, bl); + //::encode(placement_rule, bl); removed in v9 + ENCODE_FINISH(bl); + } + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START_LEGACY_COMPAT_LEN(9, 5, 5, bl); + __u32 mt; + uint64_t s; + std::string empty_str; // backward compatibility + decode(empty_str, bl); + decode(s, bl); + decode(mt, bl); + size = s; + if (struct_v < 7) { + creation_time = ceph::real_clock::from_time_t(mt); + } + if (struct_v >= 2) + decode(count, bl); + if (struct_v >= 3) + decode(bucket, bl); + if (struct_v >= 4) + decode(s, bl); + size_rounded = s; + if (struct_v >= 6) + decode(user_stats_sync, bl); + if (struct_v >= 7) + decode(creation_time, bl); + if (struct_v == 8) { // added in v8, removed in v9 + std::string placement_rule; + decode(placement_rule, bl); + } + DECODE_FINISH(bl); + } + void dump(ceph::Formatter *f) const; + static void generate_test_instances(std::list<cls_user_bucket_entry*>& ls); +}; +WRITE_CLASS_ENCODER(cls_user_bucket_entry) + +struct cls_user_stats { + uint64_t total_entries; + uint64_t total_bytes; + uint64_t total_bytes_rounded; + + cls_user_stats() + : total_entries(0), + total_bytes(0), + total_bytes_rounded(0) {} + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(1, 1, bl); + encode(total_entries, bl); + encode(total_bytes, bl); + encode(total_bytes_rounded, bl); + ENCODE_FINISH(bl); + } + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(1, bl); + decode(total_entries, bl); + decode(total_bytes, bl); + decode(total_bytes_rounded, bl); + DECODE_FINISH(bl); + } + + void dump(ceph::Formatter *f) const; + static void generate_test_instances(std::list<cls_user_stats*>& ls); +}; +WRITE_CLASS_ENCODER(cls_user_stats) + +/* + * this needs to be compatible with rgw_bucket, as it replaces it + */ +struct cls_user_header { + cls_user_stats stats; + ceph::real_time last_stats_sync; /* last time a full stats sync completed */ + ceph::real_time last_stats_update; /* last time a stats update was done */ + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(1, 1, bl); + encode(stats, bl); + encode(last_stats_sync, bl); + encode(last_stats_update, bl); + ENCODE_FINISH(bl); + } + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(1, bl); + decode(stats, bl); + decode(last_stats_sync, bl); + decode(last_stats_update, bl); + DECODE_FINISH(bl); + } + + void dump(ceph::Formatter *f) const; + static void generate_test_instances(std::list<cls_user_header*>& ls); +}; +WRITE_CLASS_ENCODER(cls_user_header) + +void cls_user_gen_test_bucket(cls_user_bucket *bucket, int i); +void cls_user_gen_test_bucket_entry(cls_user_bucket_entry *entry, int i); +void cls_user_gen_test_stats(cls_user_stats *stats); +void cls_user_gen_test_header(cls_user_header *h); + +#endif diff --git a/src/cls/version/cls_version.cc b/src/cls/version/cls_version.cc new file mode 100644 index 000000000..2e8ec91ed --- /dev/null +++ b/src/cls/version/cls_version.cc @@ -0,0 +1,238 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab + +#include <errno.h> + +#include "objclass/objclass.h" + +#include "cls/version/cls_version_ops.h" + +#include "include/compat.h" + +using std::list; + +using ceph::bufferlist; + +CLS_VER(1,0) +CLS_NAME(version) + + +#define VERSION_ATTR "ceph.objclass.version" + +static int set_version(cls_method_context_t hctx, struct obj_version *objv) +{ + bufferlist bl; + + encode(*objv, bl); + + CLS_LOG(20, "cls_version: set_version %s:%d", objv->tag.c_str(), (int)objv->ver); + + int ret = cls_cxx_setxattr(hctx, VERSION_ATTR, &bl); + if (ret < 0) + return ret; + + return 0; +} + +static int init_version(cls_method_context_t hctx, struct obj_version *objv) +{ +#define TAG_LEN 24 + char buf[TAG_LEN + 1]; + + int ret = cls_gen_rand_base64(buf, sizeof(buf)); + if (ret < 0) + return ret; + + objv->ver = 1; + objv->tag = buf; + + CLS_LOG(20, "cls_version: init_version %s:%d", objv->tag.c_str(), (int)objv->ver); + + return set_version(hctx, objv); +} + +/* implicit create should be true only if called from a write operation (set, inc), never from a read operation (read, check) */ +static int read_version(cls_method_context_t hctx, obj_version *objv, bool implicit_create) +{ + bufferlist bl; + int ret = cls_cxx_getxattr(hctx, VERSION_ATTR, &bl); + if (ret == -ENOENT || ret == -ENODATA) { + objv->ver = 0; + + if (implicit_create) { + return init_version(hctx, objv); + } + return 0; + } + if (ret < 0) + return ret; + + try { + auto iter = bl.cbegin(); + decode(*objv, iter); + } catch (ceph::buffer::error& err) { + CLS_LOG(0, "ERROR: read_version(): failed to decode version entry\n"); + return -EIO; + } + CLS_LOG(20, "cls_version: read_version %s:%d", objv->tag.c_str(), (int)objv->ver); + + return 0; +} + +static int cls_version_set(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + auto in_iter = in->cbegin(); + + cls_version_set_op op; + try { + decode(op, in_iter); + } catch (ceph::buffer::error& err) { + CLS_LOG(1, "ERROR: cls_version_get(): failed to decode entry\n"); + return -EINVAL; + } + + int ret = set_version(hctx, &op.objv); + if (ret < 0) + return ret; + + return 0; +} + +static bool check_conds(list<obj_version_cond>& conds, obj_version& objv) +{ + if (conds.empty()) + return true; + + for (list<obj_version_cond>::iterator iter = conds.begin(); iter != conds.end(); ++iter) { + obj_version_cond& cond = *iter; + obj_version& v = cond.ver; + CLS_LOG(20, "cls_version: check_version %s:%d (cond=%d)", v.tag.c_str(), (int)v.ver, (int)cond.cond); + + switch (cond.cond) { + case VER_COND_NONE: + break; + case VER_COND_EQ: + if (!objv.compare(&v)) + return false; + break; + case VER_COND_GT: + if (!(objv.ver > v.ver)) + return false; + break; + case VER_COND_GE: + if (!(objv.ver >= v.ver)) + return false; + break; + case VER_COND_LT: + if (!(objv.ver < v.ver)) + return false; + break; + case VER_COND_LE: + if (!(objv.ver <= v.ver)) + return false; + break; + case VER_COND_TAG_EQ: + if (objv.tag.compare(v.tag) != 0) + return false; + break; + case VER_COND_TAG_NE: + if (objv.tag.compare(v.tag) == 0) + return false; + break; + } + } + + return true; +} + +static int cls_version_inc(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + auto in_iter = in->cbegin(); + + cls_version_inc_op op; + try { + decode(op, in_iter); + } catch (ceph::buffer::error& err) { + CLS_LOG(1, "ERROR: cls_version_get(): failed to decode entry\n"); + return -EINVAL; + } + + obj_version objv; + int ret = read_version(hctx, &objv, true); + if (ret < 0) + return ret; + + if (!check_conds(op.conds, objv)) { + return -ECANCELED; + } + objv.inc(); + + ret = set_version(hctx, &objv); + if (ret < 0) + return ret; + + return 0; +} + +static int cls_version_check(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + auto in_iter = in->cbegin(); + + cls_version_check_op op; + try { + decode(op, in_iter); + } catch (ceph::buffer::error& err) { + CLS_LOG(1, "ERROR: cls_version_get(): failed to decode entry\n"); + return -EINVAL; + } + + obj_version objv; + int ret = read_version(hctx, &objv, false); + if (ret < 0) + return ret; + + if (!check_conds(op.conds, objv)) { + CLS_LOG(20, "cls_version: failed condition check"); + return -ECANCELED; + } + + return 0; +} + +static int cls_version_read(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + obj_version objv; + + cls_version_read_ret read_ret; + int ret = read_version(hctx, &read_ret.objv, false); + if (ret < 0) + return ret; + + encode(read_ret, *out); + + return 0; +} + +CLS_INIT(version) +{ + CLS_LOG(1, "Loaded version class!"); + + cls_handle_t h_class; + cls_method_handle_t h_version_set; + cls_method_handle_t h_version_inc; + cls_method_handle_t h_version_inc_conds; + cls_method_handle_t h_version_read; + cls_method_handle_t h_version_check_conds; + + cls_register("version", &h_class); + + /* version */ + cls_register_cxx_method(h_class, "set", CLS_METHOD_RD | CLS_METHOD_WR, cls_version_set, &h_version_set); + cls_register_cxx_method(h_class, "inc", CLS_METHOD_RD | CLS_METHOD_WR, cls_version_inc, &h_version_inc); + cls_register_cxx_method(h_class, "inc_conds", CLS_METHOD_RD | CLS_METHOD_WR, cls_version_inc, &h_version_inc_conds); + cls_register_cxx_method(h_class, "read", CLS_METHOD_RD, cls_version_read, &h_version_read); + cls_register_cxx_method(h_class, "check_conds", CLS_METHOD_RD, cls_version_check, &h_version_check_conds); + + return; +} + diff --git a/src/cls/version/cls_version_client.cc b/src/cls/version/cls_version_client.cc new file mode 100644 index 000000000..769a7b77b --- /dev/null +++ b/src/cls/version/cls_version_client.cc @@ -0,0 +1,104 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab + +#include <errno.h> + +#include "cls/version/cls_version_client.h" +#include "include/rados/librados.hpp" + + +using namespace librados; + + +void cls_version_set(librados::ObjectWriteOperation& op, obj_version& objv) +{ + bufferlist in; + cls_version_set_op call; + call.objv = objv; + encode(call, in); + op.exec("version", "set", in); +} + +void cls_version_inc(librados::ObjectWriteOperation& op) +{ + bufferlist in; + cls_version_inc_op call; + encode(call, in); + op.exec("version", "inc", in); +} + +void cls_version_inc(librados::ObjectWriteOperation& op, obj_version& objv, VersionCond cond) +{ + bufferlist in; + cls_version_inc_op call; + call.objv = objv; + + obj_version_cond c; + c.cond = cond; + c.ver = objv; + + call.conds.push_back(c); + + encode(call, in); + op.exec("version", "inc_conds", in); +} + +void cls_version_check(librados::ObjectOperation& op, obj_version& objv, VersionCond cond) +{ + bufferlist in; + cls_version_check_op call; + call.objv = objv; + + obj_version_cond c; + c.cond = cond; + c.ver = objv; + + call.conds.push_back(c); + + encode(call, in); + op.exec("version", "check_conds", in); +} + +class VersionReadCtx : public ObjectOperationCompletion { + obj_version *objv; +public: + explicit VersionReadCtx(obj_version *_objv) : objv(_objv) {} + void handle_completion(int r, bufferlist& outbl) override { + if (r >= 0) { + cls_version_read_ret ret; + try { + auto iter = outbl.cbegin(); + decode(ret, iter); + *objv = ret.objv; + } catch (ceph::buffer::error& err) { + // nothing we can do about it atm + } + } + } +}; + +void cls_version_read(librados::ObjectReadOperation& op, obj_version *objv) +{ + bufferlist inbl; + op.exec("version", "read", inbl, new VersionReadCtx(objv)); +} + +int cls_version_read(librados::IoCtx& io_ctx, std::string& oid, obj_version *ver) +{ + bufferlist in, out; + int r = io_ctx.exec(oid, "version", "read", in, out); + if (r < 0) + return r; + + cls_version_read_ret ret; + try { + auto iter = out.cbegin(); + decode(ret, iter); + } catch (ceph::buffer::error& err) { + return -EIO; + } + + *ver = ret.objv; + + return r; +} diff --git a/src/cls/version/cls_version_client.h b/src/cls/version/cls_version_client.h new file mode 100644 index 000000000..19457855a --- /dev/null +++ b/src/cls/version/cls_version_client.h @@ -0,0 +1,32 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab + +#ifndef CEPH_CLS_VERSION_CLIENT_H +#define CEPH_CLS_VERSION_CLIENT_H + +#include "include/rados/librados_fwd.hpp" +#include "cls_version_ops.h" + +/* + * version objclass + */ + +void cls_version_set(librados::ObjectWriteOperation& op, obj_version& ver); + +/* increase anyway */ +void cls_version_inc(librados::ObjectWriteOperation& op); + +/* conditional increase, return -EAGAIN if condition fails */ +void cls_version_inc(librados::ObjectWriteOperation& op, obj_version& ver, VersionCond cond); + +void cls_version_read(librados::ObjectReadOperation& op, obj_version *objv); + +// these overloads which call io_ctx.operate() or io_ctx.exec() should not be called in the rgw. +// rgw_rados_operate() should be called after the overloads w/o calls to io_ctx.operate()/exec() +#ifndef CLS_CLIENT_HIDE_IOCTX +int cls_version_read(librados::IoCtx& io_ctx, std::string& oid, obj_version *ver); +#endif + +void cls_version_check(librados::ObjectOperation& op, obj_version& ver, VersionCond cond); + +#endif diff --git a/src/cls/version/cls_version_ops.h b/src/cls/version/cls_version_ops.h new file mode 100644 index 000000000..62cd11729 --- /dev/null +++ b/src/cls/version/cls_version_ops.h @@ -0,0 +1,92 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab + +#ifndef CEPH_CLS_VERSION_OPS_H +#define CEPH_CLS_VERSION_OPS_H + +#include "cls_version_types.h" + +struct cls_version_set_op { + obj_version objv; + + cls_version_set_op() {} + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(1, 1, bl); + encode(objv, bl); + ENCODE_FINISH(bl); + } + + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(1, bl); + decode(objv, bl); + DECODE_FINISH(bl); + } +}; +WRITE_CLASS_ENCODER(cls_version_set_op) + +struct cls_version_inc_op { + obj_version objv; + std::list<obj_version_cond> conds; + + cls_version_inc_op() {} + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(1, 1, bl); + encode(objv, bl); + encode(conds, bl); + ENCODE_FINISH(bl); + } + + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(1, bl); + decode(objv, bl); + decode(conds, bl); + DECODE_FINISH(bl); + } +}; +WRITE_CLASS_ENCODER(cls_version_inc_op) + +struct cls_version_check_op { + obj_version objv; + std::list<obj_version_cond> conds; + + cls_version_check_op() {} + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(1, 1, bl); + encode(objv, bl); + encode(conds, bl); + ENCODE_FINISH(bl); + } + + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(1, bl); + decode(objv, bl); + decode(conds, bl); + DECODE_FINISH(bl); + } +}; +WRITE_CLASS_ENCODER(cls_version_check_op) + +struct cls_version_read_ret { + obj_version objv; + + cls_version_read_ret() {} + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(1, 1, bl); + encode(objv, bl); + ENCODE_FINISH(bl); + } + + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(1, bl); + decode(objv, bl); + DECODE_FINISH(bl); + } +}; +WRITE_CLASS_ENCODER(cls_version_read_ret) + + +#endif diff --git a/src/cls/version/cls_version_types.cc b/src/cls/version/cls_version_types.cc new file mode 100644 index 000000000..b82f6aa8a --- /dev/null +++ b/src/cls/version/cls_version_types.cc @@ -0,0 +1,19 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab + +#include "cls/version/cls_version_types.h" +#include "common/Formatter.h" +#include "common/ceph_json.h" + + +void obj_version::dump(ceph::Formatter *f) const +{ + f->dump_int("ver", ver); + f->dump_string("tag", tag); +} + +void obj_version::decode_json(JSONObj *obj) +{ + JSONDecoder::decode_json("ver", ver, obj); + JSONDecoder::decode_json("tag", tag, obj); +} diff --git a/src/cls/version/cls_version_types.h b/src/cls/version/cls_version_types.h new file mode 100644 index 000000000..62cc16e33 --- /dev/null +++ b/src/cls/version/cls_version_types.h @@ -0,0 +1,98 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab + +#ifndef CEPH_CLS_VERSION_TYPES_H +#define CEPH_CLS_VERSION_TYPES_H + +#include "include/encoding.h" +#include "include/types.h" + +class JSONObj; + + +struct obj_version { + uint64_t ver; + std::string tag; + + obj_version() : ver(0) {} + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(1, 1, bl); + encode(ver, bl); + encode(tag, bl); + ENCODE_FINISH(bl); + } + + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(1, bl); + decode(ver, bl); + decode(tag, bl); + DECODE_FINISH(bl); + } + + void inc() { + ver++; + } + + void clear() { + ver = 0; + tag.clear(); + } + + bool empty() const { + return tag.empty(); + } + + bool compare(struct obj_version *v) const { + return (ver == v->ver && + tag.compare(v->tag) == 0); + } + + bool operator==(const struct obj_version& v) const { + return (ver == v.ver && + tag.compare(v.tag) == 0); + } + + void dump(ceph::Formatter *f) const; + void decode_json(JSONObj *obj); + static void generate_test_instances(std::list<obj_version*>& o); +}; +WRITE_CLASS_ENCODER(obj_version) + +enum VersionCond { + VER_COND_NONE = 0, + VER_COND_EQ, /* equal */ + VER_COND_GT, /* greater than */ + VER_COND_GE, /* greater or equal */ + VER_COND_LT, /* less than */ + VER_COND_LE, /* less or equal */ + VER_COND_TAG_EQ, + VER_COND_TAG_NE, +}; + +struct obj_version_cond { + struct obj_version ver; + VersionCond cond; + + void encode(ceph::buffer::list& bl) const { + ENCODE_START(1, 1, bl); + encode(ver, bl); + uint32_t c = (uint32_t)cond; + encode(c, bl); + ENCODE_FINISH(bl); + } + + void decode(ceph::buffer::list::const_iterator& bl) { + DECODE_START(1, bl); + decode(ver, bl); + uint32_t c; + decode(c, bl); + cond = (VersionCond)c; + DECODE_FINISH(bl); + } + +}; +WRITE_CLASS_ENCODER(obj_version_cond) + + +#endif |