summaryrefslogtreecommitdiffstats
path: root/src/cls/rgw
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:45:59 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:45:59 +0000
commit19fcec84d8d7d21e796c7624e521b60d28ee21ed (patch)
tree42d26aa27d1e3f7c0b8bd3fd14e7d7082f5008dc /src/cls/rgw
parentInitial commit. (diff)
downloadceph-upstream/16.2.11+ds.tar.xz
ceph-upstream/16.2.11+ds.zip
Adding upstream version 16.2.11+ds.upstream/16.2.11+dsupstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'src/cls/rgw')
-rw-r--r--src/cls/rgw/cls_rgw.cc4478
-rw-r--r--src/cls/rgw/cls_rgw_client.cc1207
-rw-r--r--src/cls/rgw/cls_rgw_client.h635
-rw-r--r--src/cls/rgw/cls_rgw_const.h80
-rw-r--r--src/cls/rgw/cls_rgw_ops.cc547
-rw-r--r--src/cls/rgw/cls_rgw_ops.h1504
-rw-r--r--src/cls/rgw/cls_rgw_types.cc784
-rw-r--r--src/cls/rgw/cls_rgw_types.h1347
8 files changed, 10582 insertions, 0 deletions
diff --git a/src/cls/rgw/cls_rgw.cc b/src/cls/rgw/cls_rgw.cc
new file mode 100644
index 000000000..61585ce64
--- /dev/null
+++ b/src/cls/rgw/cls_rgw.cc
@@ -0,0 +1,4478 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#include "include/types.h"
+
+#include <errno.h>
+
+#include <boost/algorithm/string.hpp>
+
+#include "objclass/objclass.h"
+#include "cls/rgw/cls_rgw_ops.h"
+#include "cls/rgw/cls_rgw_const.h"
+#include "common/Clock.h"
+#include "common/strtol.h"
+#include "common/escape.h"
+
+#include "include/compat.h"
+#include <boost/lexical_cast.hpp>
+
+using std::pair;
+using std::list;
+using std::map;
+using std::string;
+using std::vector;
+
+using ceph::bufferlist;
+using ceph::decode;
+using ceph::encode;
+using ceph::make_timespan;
+using ceph::real_clock;
+using ceph::real_time;
+using ceph::timespan;
+
+CLS_VER(1,0)
+CLS_NAME(rgw)
+
+
+// No UTF-8 character can begin with 0x80, so this is a safe indicator
+// of a special bucket-index entry for the first byte. Note: although
+// it has no impact, the 2nd, 3rd, or 4th byte of a UTF-8 character
+// may be 0x80.
+#define BI_PREFIX_CHAR 0x80
+
+#define BI_BUCKET_OBJS_INDEX 0
+#define BI_BUCKET_LOG_INDEX 1
+#define BI_BUCKET_OBJ_INSTANCE_INDEX 2
+#define BI_BUCKET_OLH_DATA_INDEX 3
+
+#define BI_BUCKET_LAST_INDEX 4
+
+static std::string bucket_index_prefixes[] = { "", /* special handling for the objs list index */
+ "0_", /* bucket log index */
+ "1000_", /* obj instance index */
+ "1001_", /* olh data index */
+
+ /* this must be the last index */
+ "9999_",};
+
+// this string is greater than all ascii plain entries and less than
+// all special entries
+static const std::string BI_PREFIX_BEGIN = string(1, BI_PREFIX_CHAR);
+
+// this string is greater than all special entries and less than all
+// non-ascii plain entries
+static const std::string BI_PREFIX_END = string(1, BI_PREFIX_CHAR) +
+ bucket_index_prefixes[BI_BUCKET_LAST_INDEX];
+
+/* Returns whether parameter is not a key for a special entry. Empty
+ * strings are considered plain also, so, for example, an empty marker
+ * is also considered plain. TODO: check to make sure all callers are
+ * using appropriately.
+ */
+static bool bi_is_plain_entry(const std::string& s) {
+ return (s.empty() || (unsigned char)s[0] != BI_PREFIX_CHAR);
+}
+
+int bi_entry_type(const string& s)
+{
+ if (bi_is_plain_entry(s)) {
+ return BI_BUCKET_OBJS_INDEX;
+ }
+
+ for (size_t i = 1;
+ i < sizeof(bucket_index_prefixes) / sizeof(bucket_index_prefixes[0]);
+ ++i) {
+ const string& t = bucket_index_prefixes[i];
+
+ if (s.compare(1, t.size(), t) == 0) {
+ return i;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static bool bi_entry_gt(const string& first, const string& second)
+{
+ int fi = bi_entry_type(first);
+ int si = bi_entry_type(second);
+
+ if (fi > si) {
+ return true;
+ } else if (fi < si) {
+ return false;
+ }
+
+ return first > second;
+}
+
+static void get_time_key(real_time& ut, string *key)
+{
+ char buf[32];
+ ceph_timespec ts = ceph::real_clock::to_ceph_timespec(ut);
+ snprintf(buf, 32, "%011llu.%09u", (unsigned long long)ts.tv_sec, (unsigned int)ts.tv_nsec);
+ *key = buf;
+}
+
+static void get_index_ver_key(cls_method_context_t hctx, uint64_t index_ver, string *key)
+{
+ char buf[48];
+ snprintf(buf, sizeof(buf), "%011llu.%llu.%d", (unsigned long long)index_ver,
+ (unsigned long long)cls_current_version(hctx),
+ cls_current_subop_num(hctx));
+ *key = buf;
+}
+
+static void bi_log_prefix(string& key)
+{
+ key = BI_PREFIX_CHAR;
+ key.append(bucket_index_prefixes[BI_BUCKET_LOG_INDEX]);
+}
+
+static void bi_log_index_key(cls_method_context_t hctx, string& key, string& id, uint64_t index_ver)
+{
+ bi_log_prefix(key);
+ get_index_ver_key(hctx, index_ver, &id);
+ key.append(id);
+}
+
+static int log_index_operation(cls_method_context_t hctx, cls_rgw_obj_key& obj_key, RGWModifyOp op,
+ string& tag, real_time& timestamp,
+ rgw_bucket_entry_ver& ver, RGWPendingState state, uint64_t index_ver,
+ string& max_marker, uint16_t bilog_flags, string *owner, string *owner_display_name, rgw_zone_set *zones_trace)
+{
+ bufferlist bl;
+
+ rgw_bi_log_entry entry;
+
+ entry.object = obj_key.name;
+ entry.instance = obj_key.instance;
+ entry.timestamp = timestamp;
+ entry.op = op;
+ entry.ver = ver;
+ entry.state = state;
+ entry.index_ver = index_ver;
+ entry.tag = tag;
+ entry.bilog_flags = bilog_flags;
+ if (owner) {
+ entry.owner = *owner;
+ }
+ if (owner_display_name) {
+ entry.owner_display_name = *owner_display_name;
+ }
+ if (zones_trace) {
+ entry.zones_trace = std::move(*zones_trace);
+ }
+
+ string key;
+ bi_log_index_key(hctx, key, entry.id, index_ver);
+
+ encode(entry, bl);
+
+ if (entry.id > max_marker)
+ max_marker = entry.id;
+
+ return cls_cxx_map_set_val(hctx, key, &bl);
+}
+
+/*
+ * Read list of objects, skipping objects in the "ugly namespace". The
+ * "ugly namespace" entries begin with BI_PREFIX_CHAR (0x80). Valid
+ * UTF-8 object names can *both* preceed and follow the "ugly
+ * namespace".
+ */
+static int get_obj_vals(cls_method_context_t hctx,
+ const std::string& start,
+ const std::string& filter_prefix,
+ int num_entries,
+ std::map<std::string, bufferlist> *pkeys,
+ bool *pmore)
+{
+ int ret = cls_cxx_map_get_vals(hctx, start, filter_prefix,
+ num_entries, pkeys, pmore);
+ if (ret < 0) {
+ return ret;
+ }
+
+ if (pkeys->empty()) {
+ return 0;
+ }
+
+ auto last_element = pkeys->crbegin();
+ if ((unsigned char)last_element->first[0] < BI_PREFIX_CHAR) {
+ /* if the first character of the last entry is less than the
+ * prefix then all entries must preceed the "ugly namespace" and
+ * we're done
+ */
+ return 0;
+ }
+
+ auto first_element = pkeys->cbegin();
+ if ((unsigned char)first_element->first[0] > BI_PREFIX_CHAR) {
+ /* if the first character of the first entry is after the "ugly
+ * namespace" then all entries must follow the "ugly namespace"
+ * then all entries do and we're done
+ */
+ return 0;
+ }
+
+ /* at this point we know we have entries that could precede the
+ * "ugly namespace", be in the "ugly namespace", and follow the
+ * "ugly namespace", so let's rebuild the list, only keeping entries
+ * outside the "ugly namespace"
+ */
+
+ auto comp = [](const pair<std::string, bufferlist>& l, const std::string &r) {
+ return l.first < r;
+ };
+ std::string new_start = {static_cast<char>(BI_PREFIX_CHAR + 1)};
+
+ auto lower = pkeys->lower_bound(string{static_cast<char>(BI_PREFIX_CHAR)});
+ auto upper = std::lower_bound(lower, pkeys->end(), new_start, comp);
+ pkeys->erase(lower, upper);
+
+ if (num_entries == (int)pkeys->size() || !(*pmore)) {
+ return 0;
+ }
+
+ if (pkeys->size() && new_start < pkeys->crbegin()->first) {
+ new_start = pkeys->rbegin()->first;
+ }
+
+ std::map<std::string, bufferlist> new_keys;
+
+ /* now get some more keys */
+ ret = cls_cxx_map_get_vals(hctx, new_start, filter_prefix,
+ num_entries - pkeys->size(), &new_keys, pmore);
+ if (ret < 0) {
+ return ret;
+ }
+
+ pkeys->insert(std::make_move_iterator(new_keys.begin()),
+ std::make_move_iterator(new_keys.end()));
+
+ return 0;
+}
+
+/*
+ * get a monotonically decreasing string representation.
+ * For num = x, num = y, where x > y, str(x) < str(y)
+ * Another property is that string size starts short and grows as num increases
+ */
+static void decreasing_str(uint64_t num, string *str)
+{
+ char buf[32];
+ if (num < 0x10) { /* 16 */
+ snprintf(buf, sizeof(buf), "9%02lld", 15 - (long long)num);
+ } else if (num < 0x100) { /* 256 */
+ snprintf(buf, sizeof(buf), "8%03lld", 255 - (long long)num);
+ } else if (num < 0x1000) /* 4096 */ {
+ snprintf(buf, sizeof(buf), "7%04lld", 4095 - (long long)num);
+ } else if (num < 0x10000) /* 65536 */ {
+ snprintf(buf, sizeof(buf), "6%05lld", 65535 - (long long)num);
+ } else if (num < 0x100000000) /* 4G */ {
+ snprintf(buf, sizeof(buf), "5%010lld", 0xFFFFFFFF - (long long)num);
+ } else {
+ snprintf(buf, sizeof(buf), "4%020lld", (long long)-num);
+ }
+
+ *str = buf;
+}
+
+/*
+ * We hold two different indexes for objects. The first one holds the
+ * list of objects in the order that we want them to be listed. The
+ * second one only holds the objects instances (for versioned
+ * objects), and they're not arranged in any particular order. When
+ * listing objects we'll use the first index, when doing operations on
+ * the objects themselves we'll use the second index. Note that
+ * regular objects only map to the first index anyway
+ */
+
+static void get_list_index_key(rgw_bucket_dir_entry& entry, string *index_key)
+{
+ *index_key = entry.key.name;
+
+ string ver_str;
+ decreasing_str(entry.versioned_epoch, &ver_str);
+ string instance_delim("\0i", 2);
+ string ver_delim("\0v", 2);
+
+ index_key->append(ver_delim);
+ index_key->append(ver_str);
+ index_key->append(instance_delim);
+ index_key->append(entry.key.instance);
+}
+
+static void encode_obj_versioned_data_key(const cls_rgw_obj_key& key, string *index_key, bool append_delete_marker_suffix = false)
+{
+ *index_key = BI_PREFIX_CHAR;
+ index_key->append(bucket_index_prefixes[BI_BUCKET_OBJ_INSTANCE_INDEX]);
+ index_key->append(key.name);
+ string delim("\0i", 2);
+ index_key->append(delim);
+ index_key->append(key.instance);
+ if (append_delete_marker_suffix) {
+ string dm("\0d", 2);
+ index_key->append(dm);
+ }
+}
+
+static void encode_obj_index_key(const cls_rgw_obj_key& key, string *index_key)
+{
+ if (key.instance.empty()) {
+ *index_key = key.name;
+ } else {
+ encode_obj_versioned_data_key(key, index_key);
+ }
+}
+
+static void encode_olh_data_key(const cls_rgw_obj_key& key, string *index_key)
+{
+ *index_key = BI_PREFIX_CHAR;
+ index_key->append(bucket_index_prefixes[BI_BUCKET_OLH_DATA_INDEX]);
+ index_key->append(key.name);
+}
+
+template <class T>
+static int read_index_entry(cls_method_context_t hctx, string& name, T *entry);
+
+static int encode_list_index_key(cls_method_context_t hctx, const cls_rgw_obj_key& key, string *index_key)
+{
+ if (key.instance.empty()) {
+ *index_key = key.name;
+ return 0;
+ }
+
+ string obj_index_key;
+ cls_rgw_obj_key tmp_key(key);
+ if (tmp_key.instance == "null") {
+ tmp_key.instance.clear();
+ }
+ encode_obj_versioned_data_key(tmp_key, &obj_index_key);
+
+ rgw_bucket_dir_entry entry;
+
+ int ret = read_index_entry(hctx, obj_index_key, &entry);
+ if (ret == -ENOENT) {
+ /* couldn't find the entry, set key value after the current object */
+ char buf[2] = { 0x1, 0 };
+ string s(buf);
+ *index_key = key.name + s;
+ return 0;
+ }
+ if (ret < 0) {
+ CLS_LOG(1, "ERROR: encode_list_index_key(): cls_cxx_map_get_val returned %d", ret);
+ return ret;
+ }
+
+ get_list_index_key(entry, index_key);
+
+ return 0;
+}
+
+static void split_key(const string& key, list<string>& vals)
+{
+ size_t pos = 0;
+ const char *p = key.c_str();
+ while (pos < key.size()) {
+ size_t len = strlen(p);
+ vals.push_back(p);
+ pos += len + 1;
+ p += len + 1;
+ }
+}
+
+static std::string escape_str(const std::string& s)
+{
+ int len = escape_json_attr_len(s.c_str(), s.size());
+ std::string escaped(len, 0);
+ escape_json_attr(s.c_str(), s.size(), escaped.data());
+ return escaped;
+}
+
+/*
+ * list index key structure:
+ *
+ * <obj name>\0[v<ver>\0i<instance id>]
+ */
+static int decode_list_index_key(const string& index_key, cls_rgw_obj_key *key, uint64_t *ver)
+{
+ size_t len = strlen(index_key.c_str());
+
+ key->instance.clear();
+ *ver = 0;
+
+ if (len == index_key.size()) {
+ key->name = index_key;
+ return 0;
+ }
+
+ list<string> vals;
+ split_key(index_key, vals);
+
+ if (vals.empty()) {
+ CLS_LOG(0, "ERROR: %s: bad index_key (%s): split_key() returned empty vals", __func__, escape_str(index_key).c_str());
+ return -EIO;
+ }
+
+ auto iter = vals.begin();
+ key->name = *iter;
+ ++iter;
+
+ if (iter == vals.end()) {
+ CLS_LOG(0, "ERROR: %s: bad index_key (%s): no vals", __func__, escape_str(index_key).c_str());
+ return -EIO;
+ }
+
+ for (; iter != vals.end(); ++iter) {
+ string& val = *iter;
+ if (val[0] == 'i') {
+ key->instance = val.substr(1);
+ } else if (val[0] == 'v') {
+ string err;
+ const char *s = val.c_str() + 1;
+ *ver = strict_strtoll(s, 10, &err);
+ if (!err.empty()) {
+ CLS_LOG(0, "ERROR: %s: bad index_key (%s): could not parse val (v=%s)", __func__, escape_str(index_key).c_str(), s);
+ return -EIO;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int read_bucket_header(cls_method_context_t hctx,
+ rgw_bucket_dir_header *header)
+{
+ bufferlist bl;
+ int rc = cls_cxx_map_read_header(hctx, &bl);
+ if (rc < 0)
+ return rc;
+
+ if (bl.length() == 0) {
+ *header = rgw_bucket_dir_header();
+ return 0;
+ }
+ auto iter = bl.cbegin();
+ try {
+ decode(*header, iter);
+ } catch (ceph::buffer::error& err) {
+ CLS_LOG(1, "ERROR: read_bucket_header(): failed to decode header\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int rgw_bucket_list(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ CLS_LOG(10, "entered %s()\n", __func__);
+
+ // maximum number of calls to get_obj_vals we'll try; compromise
+ // between wanting to return the requested # of entries, but not
+ // wanting to slow down this op with too many omap reads
+ constexpr int max_attempts = 8;
+
+ auto iter = in->cbegin();
+
+ rgw_cls_list_op op;
+ try {
+ decode(op, iter);
+ } catch (ceph::buffer::error& err) {
+ CLS_LOG(1, "ERROR: %s: failed to decode request", __func__);
+ return -EINVAL;
+ }
+
+ rgw_cls_list_ret ret;
+ rgw_bucket_dir& new_dir = ret.dir;
+ auto& name_entry_map = new_dir.m; // map of keys to entries
+
+ int rc = read_bucket_header(hctx, &new_dir.header);
+ if (rc < 0) {
+ CLS_LOG(1, "ERROR: %s: failed to read header", __func__);
+ return rc;
+ }
+
+ // some calls just want the header and request 0 entries
+ if (op.num_entries <= 0) {
+ ret.is_truncated = false;
+ encode(ret, *out);
+ return 0;
+ }
+
+ // key that we can start listing at, one of a) sent in by caller, b)
+ // last item visited, or c) when delimiter present, a key that will
+ // move past the subdirectory
+ std::string start_after_omap_key;
+ encode_list_index_key(hctx, op.start_obj, &start_after_omap_key);
+
+ // this is set whenenver start_after_omap_key is set to keep them in
+ // sync since this will be the returned marker when a marker is
+ // returned
+ cls_rgw_obj_key start_after_entry_key;
+
+ // last key stored in result, so if we have to call get_obj_vals
+ // multiple times, we do not add the overlap to result
+ std::string prev_omap_key;
+
+ // last prefix_key stored in result, so we can skip over entries
+ // with the same prefix_key
+ std::string prev_prefix_omap_key;
+
+ bool done = false; // whether we need to keep calling get_obj_vals
+ bool more = true; // output parameter of get_obj_vals
+ bool has_delimiter = !op.delimiter.empty();
+
+ if (has_delimiter &&
+ start_after_omap_key > op.filter_prefix &&
+ boost::algorithm::ends_with(start_after_omap_key, op.delimiter)) {
+ // advance past all subdirectory entries if we start after a
+ // subdirectory
+ start_after_omap_key = cls_rgw_after_delim(start_after_omap_key);
+ }
+
+ for (int attempt = 0;
+ attempt < max_attempts &&
+ more &&
+ !done &&
+ name_entry_map.size() < op.num_entries;
+ ++attempt) {
+ std::map<std::string, bufferlist> keys;
+
+ // note: get_obj_vals skips past the "ugly namespace" (i.e.,
+ // entries that start with the BI_PREFIX_CHAR), so no need to
+ // check for such entries
+ rc = get_obj_vals(hctx, start_after_omap_key, op.filter_prefix,
+ op.num_entries - name_entry_map.size(),
+ &keys, &more);
+ if (rc < 0) {
+ return rc;
+ }
+ CLS_LOG(20, "%s: on attempt %d get_obj_vls returned %ld entries, more=%d",
+ __func__, attempt, keys.size(), more);
+
+ done = keys.empty();
+
+ for (auto kiter = keys.cbegin(); kiter != keys.cend(); ++kiter) {
+ rgw_bucket_dir_entry entry;
+ try {
+ const bufferlist& entrybl = kiter->second;
+ auto eiter = entrybl.cbegin();
+ decode(entry, eiter);
+ } catch (ceph::buffer::error& err) {
+ CLS_LOG(1, "ERROR: %s: failed to decode entry, key=%s",
+ __func__, kiter->first.c_str());
+ return -EINVAL;
+ }
+
+ start_after_omap_key = kiter->first;
+ start_after_entry_key = entry.key;
+ CLS_LOG(20, "%s: working on key=%s len=%zu",
+ __func__, kiter->first.c_str(), kiter->first.size());
+
+ cls_rgw_obj_key key;
+ uint64_t ver;
+ int ret = decode_list_index_key(kiter->first, &key, &ver);
+ if (ret < 0) {
+ CLS_LOG(0, "ERROR: %s: failed to decode list index key (%s)",
+ __func__, escape_str(kiter->first).c_str());
+ continue;
+ }
+
+ if (!entry.is_valid()) {
+ CLS_LOG(20, "%s: entry %s[%s] is not valid",
+ __func__, key.name.c_str(), key.instance.c_str());
+ continue;
+ }
+
+ // filter out noncurrent versions, delete markers, and initial marker
+ if (!op.list_versions &&
+ (!entry.is_visible() || op.start_obj.name == key.name)) {
+ CLS_LOG(20, "%s: entry %s[%s] is not visible",
+ __func__, key.name.c_str(), key.instance.c_str());
+ continue;
+ }
+
+ if (has_delimiter) {
+ int delim_pos = key.name.find(op.delimiter, op.filter_prefix.size());
+
+ if (delim_pos >= 0) {
+ /* extract key with trailing delimiter */
+ string prefix_key =
+ key.name.substr(0, delim_pos + op.delimiter.length());
+
+ if (prefix_key == prev_prefix_omap_key) {
+ continue; // we've already added this;
+ } else {
+ prev_prefix_omap_key = prefix_key;
+ }
+
+ if (name_entry_map.size() < op.num_entries) {
+ rgw_bucket_dir_entry proxy_entry;
+ cls_rgw_obj_key proxy_key(prefix_key);
+ proxy_entry.key = cls_rgw_obj_key(proxy_key);
+ proxy_entry.flags = rgw_bucket_dir_entry::FLAG_COMMON_PREFIX;
+ name_entry_map[prefix_key] = proxy_entry;
+
+ CLS_LOG(20, "%s: got common prefix entry %s[%s] num entries=%lu",
+ __func__, proxy_key.name.c_str(), proxy_key.instance.c_str(),
+ name_entry_map.size());
+ }
+
+ // make sure that if this is the last item added to the
+ // result from this call to get_obj_vals, the next call will
+ // skip past rest of "subdirectory"
+ start_after_omap_key = cls_rgw_after_delim(prefix_key);
+ start_after_entry_key.set(start_after_omap_key);
+
+ // advance past this subdirectory, but then back up one,
+ // so the loop increment will put us in the right place
+ kiter = keys.lower_bound(start_after_omap_key);
+ --kiter;
+
+ continue;
+ }
+
+ // no delimiter after prefix found, so this is a "top-level"
+ // item and we can just fall through
+ }
+
+ if (name_entry_map.size() < op.num_entries &&
+ kiter->first != prev_omap_key) {
+ name_entry_map[kiter->first] = entry;
+ prev_omap_key = kiter->first;
+ CLS_LOG(20, "%s: got object entry %s[%s] num entries=%d",
+ __func__, key.name.c_str(), key.instance.c_str(),
+ int(name_entry_map.size()));
+ }
+ } // for (auto kiter...
+ } // for (int attempt...
+
+ ret.is_truncated = more && !done;
+ if (ret.is_truncated) {
+ ret.marker = start_after_entry_key;
+ }
+ CLS_LOG(20, "%s: normal exit returning %ld entries, is_truncated=%d",
+ __func__, ret.dir.m.size(), ret.is_truncated);
+ encode(ret, *out);
+
+ if (ret.is_truncated && name_entry_map.size() == 0) {
+ CLS_LOG(5, "%s: returning value RGWBIAdvanceAndRetryError", __func__);
+ return RGWBIAdvanceAndRetryError;
+ } else {
+ return 0;
+ }
+} // rgw_bucket_list
+
+
+static int check_index(cls_method_context_t hctx,
+ rgw_bucket_dir_header *existing_header,
+ rgw_bucket_dir_header *calc_header)
+{
+ int rc = read_bucket_header(hctx, existing_header);
+ if (rc < 0) {
+ CLS_LOG(1, "ERROR: check_index(): failed to read header\n");
+ return rc;
+ }
+
+ calc_header->tag_timeout = existing_header->tag_timeout;
+ calc_header->ver = existing_header->ver;
+ calc_header->syncstopped = existing_header->syncstopped;
+
+ map<string, bufferlist> keys;
+ string start_obj;
+ string filter_prefix;
+
+#define CHECK_CHUNK_SIZE 1000
+ bool done = false;
+ bool more;
+
+ do {
+ rc = get_obj_vals(hctx, start_obj, filter_prefix, CHECK_CHUNK_SIZE, &keys, &more);
+ if (rc < 0)
+ return rc;
+
+ for (auto kiter = keys.begin(); kiter != keys.end(); ++kiter) {
+ if (!bi_is_plain_entry(kiter->first)) {
+ done = true;
+ break;
+ }
+
+ rgw_bucket_dir_entry entry;
+ auto eiter = kiter->second.cbegin();
+ try {
+ decode(entry, eiter);
+ } catch (ceph::buffer::error& err) {
+ CLS_LOG(1, "ERROR: rgw_bucket_list(): failed to decode entry, key=%s", kiter->first.c_str());
+ return -EIO;
+ }
+ rgw_bucket_category_stats& stats = calc_header->stats[entry.meta.category];
+ stats.num_entries++;
+ stats.total_size += entry.meta.accounted_size;
+ stats.total_size_rounded += cls_rgw_get_rounded_size(entry.meta.accounted_size);
+ stats.actual_size += entry.meta.size;
+
+ start_obj = kiter->first;
+ }
+ } while (keys.size() == CHECK_CHUNK_SIZE && !done);
+
+ return 0;
+}
+
+int rgw_bucket_check_index(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ CLS_LOG(10, "entered %s", __func__);
+ rgw_cls_check_index_ret ret;
+
+ int rc = check_index(hctx, &ret.existing_header, &ret.calculated_header);
+ if (rc < 0)
+ return rc;
+
+ encode(ret, *out);
+
+ return 0;
+}
+
+static int write_bucket_header(cls_method_context_t hctx, rgw_bucket_dir_header *header)
+{
+ header->ver++;
+
+ bufferlist header_bl;
+ encode(*header, header_bl);
+ return cls_cxx_map_write_header(hctx, &header_bl);
+}
+
+
+int rgw_bucket_rebuild_index(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ CLS_LOG(10, "entered %s()\n", __func__);
+ rgw_bucket_dir_header existing_header;
+ rgw_bucket_dir_header calc_header;
+ int rc = check_index(hctx, &existing_header, &calc_header);
+ if (rc < 0)
+ return rc;
+
+ return write_bucket_header(hctx, &calc_header);
+}
+
+int rgw_bucket_update_stats(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ CLS_LOG(10, "entered %s()\n", __func__);
+ // decode request
+ rgw_cls_bucket_update_stats_op op;
+ auto iter = in->cbegin();
+ try {
+ decode(op, iter);
+ } catch (ceph::buffer::error& err) {
+ CLS_LOG(1, "ERROR: %s: failed to decode request", __func__);
+ return -EINVAL;
+ }
+
+ rgw_bucket_dir_header header;
+ int rc = read_bucket_header(hctx, &header);
+ if (rc < 0) {
+ CLS_LOG(1, "ERROR: %s: failed to read header", __func__);
+ return rc;
+ }
+
+ for (auto& s : op.stats) {
+ auto& dest = header.stats[s.first];
+ if (op.absolute) {
+ dest = s.second;
+ } else {
+ dest.total_size += s.second.total_size;
+ dest.total_size_rounded += s.second.total_size_rounded;
+ dest.num_entries += s.second.num_entries;
+ dest.actual_size += s.second.actual_size;
+ }
+ }
+
+ return write_bucket_header(hctx, &header);
+}
+
+int rgw_bucket_init_index(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ CLS_LOG(10, "entered %s()\n", __func__);
+ bufferlist header_bl;
+ int rc = cls_cxx_map_read_header(hctx, &header_bl);
+ if (rc < 0) {
+ switch (rc) {
+ case -ENODATA:
+ case -ENOENT:
+ break;
+ default:
+ return rc;
+ }
+ }
+
+ if (header_bl.length() != 0) {
+ CLS_LOG(1, "ERROR: index already initialized\n");
+ return -EINVAL;
+ }
+
+ rgw_bucket_dir dir;
+
+ return write_bucket_header(hctx, &dir.header);
+}
+
+int rgw_bucket_set_tag_timeout(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ CLS_LOG(10, "entered %s()\n", __func__);
+ // decode request
+ rgw_cls_tag_timeout_op op;
+ auto iter = in->cbegin();
+ try {
+ decode(op, iter);
+ } catch (ceph::buffer::error& err) {
+ CLS_LOG(1, "ERROR: rgw_bucket_set_tag_timeout(): failed to decode request\n");
+ return -EINVAL;
+ }
+
+ rgw_bucket_dir_header header;
+ int rc = read_bucket_header(hctx, &header);
+ if (rc < 0) {
+ CLS_LOG(1, "ERROR: rgw_bucket_set_tag_timeout(): failed to read header\n");
+ return rc;
+ }
+
+ header.tag_timeout = op.tag_timeout;
+
+ return write_bucket_header(hctx, &header);
+}
+
+static int read_key_entry(cls_method_context_t hctx, cls_rgw_obj_key& key,
+ string *idx, rgw_bucket_dir_entry *entry,
+ bool special_delete_marker_name = false);
+
+int rgw_bucket_prepare_op(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ CLS_LOG(10, "entered %s()\n", __func__);
+ // decode request
+ rgw_cls_obj_prepare_op op;
+ auto iter = in->cbegin();
+ try {
+ decode(op, iter);
+ } catch (ceph::buffer::error& err) {
+ CLS_LOG(1, "ERROR: rgw_bucket_prepare_op(): failed to decode request\n");
+ return -EINVAL;
+ }
+
+ if (op.tag.empty()) {
+ CLS_LOG(1, "ERROR: tag is empty\n");
+ return -EINVAL;
+ }
+
+ CLS_LOG(1, "rgw_bucket_prepare_op(): request: op=%d name=%s instance=%s tag=%s",
+ op.op, op.key.name.c_str(), op.key.instance.c_str(), op.tag.c_str());
+
+ // get on-disk state
+ string idx;
+
+ rgw_bucket_dir_entry entry;
+ int rc = read_key_entry(hctx, op.key, &idx, &entry);
+ if (rc < 0 && rc != -ENOENT)
+ return rc;
+
+ bool noent = (rc == -ENOENT);
+
+ rc = 0;
+
+ if (noent) { // no entry, initialize fields
+ entry.key = op.key;
+ entry.ver = rgw_bucket_entry_ver();
+ entry.exists = false;
+ entry.locator = op.locator;
+ }
+
+ // fill in proper state
+ rgw_bucket_pending_info info;
+ info.timestamp = real_clock::now();
+ info.state = CLS_RGW_STATE_PENDING_MODIFY;
+ info.op = op.op;
+ entry.pending_map.insert(pair<string, rgw_bucket_pending_info>(op.tag, info));
+
+ // write out new key to disk
+ bufferlist info_bl;
+ encode(entry, info_bl);
+ return cls_cxx_map_set_val(hctx, idx, &info_bl);
+}
+
+static void unaccount_entry(rgw_bucket_dir_header& header,
+ rgw_bucket_dir_entry& entry)
+{
+ if (entry.exists) {
+ rgw_bucket_category_stats& stats = header.stats[entry.meta.category];
+ stats.num_entries--;
+ stats.total_size -= entry.meta.accounted_size;
+ stats.total_size_rounded -=
+ cls_rgw_get_rounded_size(entry.meta.accounted_size);
+ stats.actual_size -= entry.meta.size;
+ }
+}
+
+static void log_entry(const char *func, const char *str, rgw_bucket_dir_entry *entry)
+{
+ CLS_LOG(1, "%s: %s: ver=%ld:%llu name=%s instance=%s locator=%s", func, str,
+ (long)entry->ver.pool, (unsigned long long)entry->ver.epoch,
+ entry->key.name.c_str(), entry->key.instance.c_str(), entry->locator.c_str());
+}
+
+static void log_entry(const char *func, const char *str, rgw_bucket_olh_entry *entry)
+{
+ CLS_LOG(1, "%s: %s: epoch=%llu name=%s instance=%s tag=%s", func, str,
+ (unsigned long long)entry->epoch, entry->key.name.c_str(), entry->key.instance.c_str(),
+ entry->tag.c_str());
+}
+
+template <class T>
+static int read_omap_entry(cls_method_context_t hctx, const std::string& name,
+ T* entry)
+{
+ bufferlist current_entry;
+ int rc = cls_cxx_map_get_val(hctx, name, &current_entry);
+ if (rc < 0) {
+ return rc;
+ }
+
+ auto cur_iter = current_entry.cbegin();
+ try {
+ decode(*entry, cur_iter);
+ } catch (ceph::buffer::error& err) {
+ CLS_LOG(1, "ERROR: %s: failed to decode entry", __func__);
+ return -EIO;
+ }
+ return 0;
+}
+
+template <class T>
+static int read_index_entry(cls_method_context_t hctx, string& name, T* entry)
+{
+ int ret = read_omap_entry(hctx, name, entry);
+ if (ret < 0) {
+ return ret;
+ }
+
+ log_entry(__func__, "existing entry", entry);
+ return 0;
+}
+
+static int read_key_entry(cls_method_context_t hctx, cls_rgw_obj_key& key,
+ string *idx, rgw_bucket_dir_entry *entry,
+ bool special_delete_marker_name)
+{
+ encode_obj_index_key(key, idx);
+ int rc = read_index_entry(hctx, *idx, entry);
+ if (rc < 0) {
+ return rc;
+ }
+
+ if (key.instance.empty() &&
+ entry->flags & rgw_bucket_dir_entry::FLAG_VER_MARKER) {
+ /* we only do it where key.instance is empty. In this case the
+ * delete marker will have a separate entry in the index to avoid
+ * collisions with the actual object, as it's mutable
+ */
+ if (special_delete_marker_name) {
+ encode_obj_versioned_data_key(key, idx, true);
+ rc = read_index_entry(hctx, *idx, entry);
+ if (rc == 0) {
+ return 0;
+ }
+ }
+ encode_obj_versioned_data_key(key, idx);
+ rc = read_index_entry(hctx, *idx, entry);
+ if (rc < 0) {
+ *entry = rgw_bucket_dir_entry(); /* need to reset entry because we initialized it earlier */
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+int rgw_bucket_complete_op(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ CLS_LOG(10, "entered %s", __func__);
+
+ // decode request
+ rgw_cls_obj_complete_op op;
+ auto iter = in->cbegin();
+ try {
+ decode(op, iter);
+ } catch (ceph::buffer::error& err) {
+ CLS_LOG(1, "ERROR: rgw_bucket_complete_op(): failed to decode request\n");
+ return -EINVAL;
+ }
+
+ CLS_LOG(1, "rgw_bucket_complete_op(): request: op=%d name=%s instance=%s ver=%lu:%llu tag=%s",
+ op.op, op.key.name.c_str(), op.key.instance.c_str(),
+ (unsigned long)op.ver.pool, (unsigned long long)op.ver.epoch,
+ op.tag.c_str());
+
+ rgw_bucket_dir_header header;
+ int rc = read_bucket_header(hctx, &header);
+ if (rc < 0) {
+ CLS_LOG(1, "ERROR: rgw_bucket_complete_op(): failed to read header\n");
+ return -EINVAL;
+ }
+
+ rgw_bucket_dir_entry entry;
+ bool ondisk = true;
+
+ std::string idx;
+ rc = read_key_entry(hctx, op.key, &idx, &entry);
+ if (rc == -ENOENT) {
+ entry.key = op.key;
+ entry.ver = op.ver;
+ entry.meta = op.meta;
+ entry.locator = op.locator;
+ ondisk = false;
+ } else if (rc < 0) {
+ return rc;
+ }
+
+ entry.index_ver = header.ver;
+ /* resetting entry flags, entry might have been previously a delete
+ * marker */
+ entry.flags &= rgw_bucket_dir_entry::FLAG_VER;
+
+ if (op.tag.size()) {
+ auto pinter = entry.pending_map.find(op.tag);
+ if (pinter == entry.pending_map.end()) {
+ CLS_LOG(1, "ERROR: couldn't find tag for pending operation\n");
+ return -EINVAL;
+ }
+ entry.pending_map.erase(pinter);
+ }
+
+ bool cancel = false;
+ bufferlist update_bl;
+
+ if (op.tag.size() && op.op == CLS_RGW_OP_CANCEL) {
+ CLS_LOG(1, "rgw_bucket_complete_op(): cancel requested\n");
+ cancel = true;
+ } else if (op.ver.pool == entry.ver.pool &&
+ op.ver.epoch && op.ver.epoch <= entry.ver.epoch) {
+ CLS_LOG(1, "rgw_bucket_complete_op(): skipping request, old epoch\n");
+ cancel = true;
+ }
+
+ bufferlist op_bl;
+ if (cancel) {
+ if (op.tag.size()) {
+ bufferlist new_key_bl;
+ encode(entry, new_key_bl);
+ return cls_cxx_map_set_val(hctx, idx, &new_key_bl);
+ }
+ return 0;
+ }
+
+ unaccount_entry(header, entry);
+
+ entry.ver = op.ver;
+ switch ((int)op.op) {
+ case CLS_RGW_OP_DEL:
+ entry.meta = op.meta;
+ if (ondisk) {
+ if (!entry.pending_map.size()) {
+ int ret = cls_cxx_map_remove_key(hctx, idx);
+ if (ret < 0)
+ return ret;
+ } else {
+ entry.exists = false;
+ bufferlist new_key_bl;
+ encode(entry, new_key_bl);
+ int ret = cls_cxx_map_set_val(hctx, idx, &new_key_bl);
+ if (ret < 0)
+ return ret;
+ }
+ } else {
+ return -ENOENT;
+ }
+ break;
+ case CLS_RGW_OP_ADD:
+ {
+ rgw_bucket_dir_entry_meta& meta = op.meta;
+ rgw_bucket_category_stats& stats = header.stats[meta.category];
+ entry.meta = meta;
+ entry.key = op.key;
+ entry.exists = true;
+ entry.tag = op.tag;
+ stats.num_entries++;
+ stats.total_size += meta.accounted_size;
+ stats.total_size_rounded += cls_rgw_get_rounded_size(meta.accounted_size);
+ stats.actual_size += meta.size;
+ bufferlist new_key_bl;
+ encode(entry, new_key_bl);
+ int ret = cls_cxx_map_set_val(hctx, idx, &new_key_bl);
+ if (ret < 0)
+ return ret;
+ }
+ break;
+ }
+
+ if (op.log_op && !header.syncstopped) {
+ rc = log_index_operation(hctx, op.key, op.op, op.tag, entry.meta.mtime,
+ entry.ver, CLS_RGW_STATE_COMPLETE, header.ver,
+ header.max_marker, op.bilog_flags, NULL, NULL,
+ &op.zones_trace);
+ if (rc < 0) {
+ return rc;
+ }
+ }
+
+ CLS_LOG(20, "rgw_bucket_complete_op(): remove_objs.size()=%d",
+ int(op.remove_objs.size()));
+
+ for (auto remove_iter = op.remove_objs.begin();
+ remove_iter != op.remove_objs.end();
+ ++remove_iter) {
+ cls_rgw_obj_key& remove_key = *remove_iter;
+ CLS_LOG(1, "rgw_bucket_complete_op(): removing entries, read_index_entry name=%s instance=%s",
+ remove_key.name.c_str(), remove_key.instance.c_str());
+ rgw_bucket_dir_entry remove_entry;
+ std::string k;
+ int ret = read_key_entry(hctx, remove_key, &k, &remove_entry);
+ if (ret < 0) {
+ CLS_LOG(1, "rgw_bucket_complete_op(): removing entries, read_index_entry name=%s instance=%s ret=%d",
+ remove_key.name.c_str(), remove_key.instance.c_str(), ret);
+ continue;
+ }
+ CLS_LOG(0,
+ "rgw_bucket_complete_op(): entry.name=%s entry.instance=%s entry.meta.category=%d",
+ remove_entry.key.name.c_str(),
+ remove_entry.key.instance.c_str(),
+ int(remove_entry.meta.category));
+
+ unaccount_entry(header, remove_entry);
+
+ if (op.log_op && !header.syncstopped) {
+ ++header.ver; // increment index version, or we'll overwrite keys previously written
+ rc = log_index_operation(hctx, remove_key, CLS_RGW_OP_DEL, op.tag,
+ remove_entry.meta.mtime, remove_entry.ver,
+ CLS_RGW_STATE_COMPLETE, header.ver,
+ header.max_marker, op.bilog_flags, NULL,
+ NULL, &op.zones_trace);
+ if (rc < 0) {
+ continue;
+ }
+ }
+
+ ret = cls_cxx_map_remove_key(hctx, k);
+ if (ret < 0) {
+ CLS_LOG(1, "rgw_bucket_complete_op(): cls_cxx_map_remove_key, failed to remove entry, name=%s instance=%s read_index_entry ret=%d",
+ remove_key.name.c_str(), remove_key.instance.c_str(), rc);
+ continue;
+ }
+ }
+
+ return write_bucket_header(hctx, &header);
+} // rgw_bucket_complete_op
+
+template <class T>
+static int write_entry(cls_method_context_t hctx, T& entry, const string& key)
+{
+ bufferlist bl;
+ encode(entry, bl);
+ return cls_cxx_map_set_val(hctx, key, &bl);
+}
+
+static int read_olh(cls_method_context_t hctx,cls_rgw_obj_key& obj_key, rgw_bucket_olh_entry *olh_data_entry, string *index_key, bool *found)
+{
+ cls_rgw_obj_key olh_key;
+ olh_key.name = obj_key.name;
+
+ encode_olh_data_key(olh_key, index_key);
+ int ret = read_index_entry(hctx, *index_key, olh_data_entry);
+ if (ret < 0 && ret != -ENOENT) {
+ CLS_LOG(0, "ERROR: read_index_entry() olh_key=%s ret=%d", olh_key.name.c_str(), ret);
+ return ret;
+ }
+ if (found) {
+ *found = (ret != -ENOENT);
+ }
+ return 0;
+}
+
+static void update_olh_log(rgw_bucket_olh_entry& olh_data_entry, OLHLogOp op, const string& op_tag,
+ cls_rgw_obj_key& key, bool delete_marker, uint64_t epoch)
+{
+ vector<rgw_bucket_olh_log_entry>& log = olh_data_entry.pending_log[olh_data_entry.epoch];
+ rgw_bucket_olh_log_entry log_entry;
+ log_entry.epoch = epoch;
+ log_entry.op = op;
+ log_entry.op_tag = op_tag;
+ log_entry.key = key;
+ log_entry.delete_marker = delete_marker;
+ log.push_back(log_entry);
+}
+
+static int write_obj_instance_entry(cls_method_context_t hctx, rgw_bucket_dir_entry& instance_entry, const string& instance_idx)
+{
+ CLS_LOG(20, "write_entry() instance=%s idx=%s flags=%d", escape_str(instance_entry.key.instance).c_str(), instance_idx.c_str(), instance_entry.flags);
+ /* write the instance entry */
+ int ret = write_entry(hctx, instance_entry, instance_idx);
+ if (ret < 0) {
+ CLS_LOG(0, "ERROR: write_entry() instance_key=%s ret=%d", escape_str(instance_idx).c_str(), ret);
+ return ret;
+ }
+ return 0;
+}
+
+/*
+ * write object instance entry, and if needed also the list entry
+ */
+static int write_obj_entries(cls_method_context_t hctx, rgw_bucket_dir_entry& instance_entry, const string& instance_idx)
+{
+ int ret = write_obj_instance_entry(hctx, instance_entry, instance_idx);
+ if (ret < 0) {
+ return ret;
+ }
+ string instance_list_idx;
+ get_list_index_key(instance_entry, &instance_list_idx);
+
+ if (instance_idx != instance_list_idx) {
+ CLS_LOG(20, "write_entry() idx=%s flags=%d", escape_str(instance_list_idx).c_str(), instance_entry.flags);
+ /* write a new list entry for the object instance */
+ ret = write_entry(hctx, instance_entry, instance_list_idx);
+ if (ret < 0) {
+ CLS_LOG(0, "ERROR: write_entry() instance=%s instance_list_idx=%s ret=%d", instance_entry.key.instance.c_str(), instance_list_idx.c_str(), ret);
+ return ret;
+ }
+ }
+ return 0;
+}
+
+
+class BIVerObjEntry {
+ cls_method_context_t hctx;
+ cls_rgw_obj_key key;
+ string instance_idx;
+
+ rgw_bucket_dir_entry instance_entry;
+
+ bool initialized;
+
+public:
+ BIVerObjEntry(cls_method_context_t& _hctx, const cls_rgw_obj_key& _key) : hctx(_hctx), key(_key), initialized(false) {
+ // empty
+ }
+
+ int init(bool check_delete_marker = true) {
+ int ret = read_key_entry(hctx, key, &instance_idx, &instance_entry,
+ check_delete_marker && key.instance.empty()); /* this is potentially a delete marker, for null objects we
+ keep separate instance entry for the delete markers */
+
+ if (ret < 0) {
+ CLS_LOG(0, "ERROR: read_key_entry() idx=%s ret=%d", instance_idx.c_str(), ret);
+ return ret;
+ }
+ initialized = true;
+ CLS_LOG(20, "read instance_entry key.name=%s key.instance=%s flags=%d", instance_entry.key.name.c_str(), instance_entry.key.instance.c_str(), instance_entry.flags);
+ return 0;
+ }
+
+ rgw_bucket_dir_entry& get_dir_entry() {
+ return instance_entry;
+ }
+
+ void init_as_delete_marker(rgw_bucket_dir_entry_meta& meta) {
+ /* a deletion marker, need to initialize it, there's no instance entry for it yet */
+ instance_entry.key = key;
+ instance_entry.flags = rgw_bucket_dir_entry::FLAG_DELETE_MARKER;
+ instance_entry.meta = meta;
+ instance_entry.tag = "delete-marker";
+
+ initialized = true;
+ }
+
+ void set_epoch(uint64_t epoch) {
+ instance_entry.versioned_epoch = epoch;
+ }
+
+ int unlink_list_entry() {
+ string list_idx;
+ /* this instance has a previous list entry, remove that entry */
+ get_list_index_key(instance_entry, &list_idx);
+ CLS_LOG(20, "unlink_list_entry() list_idx=%s", escape_str(list_idx).c_str());
+ int ret = cls_cxx_map_remove_key(hctx, list_idx);
+ if (ret < 0) {
+ CLS_LOG(0, "ERROR: cls_cxx_map_remove_key() list_idx=%s ret=%d", list_idx.c_str(), ret);
+ return ret;
+ }
+ return 0;
+ }
+
+ int unlink() {
+ /* remove the instance entry */
+ CLS_LOG(20, "unlink() idx=%s", escape_str(instance_idx).c_str());
+ int ret = cls_cxx_map_remove_key(hctx, instance_idx);
+ if (ret < 0) {
+ CLS_LOG(0, "ERROR: cls_cxx_map_remove_key() instance_idx=%s ret=%d", instance_idx.c_str(), ret);
+ return ret;
+ }
+ return 0;
+ }
+
+ int write_entries(uint64_t flags_set, uint64_t flags_reset) {
+ if (!initialized) {
+ int ret = init();
+ if (ret < 0) {
+ return ret;
+ }
+ }
+ instance_entry.flags &= ~flags_reset;
+ instance_entry.flags |= flags_set;
+
+ /* write the instance and list entries */
+ bool special_delete_marker_key = (instance_entry.is_delete_marker() && instance_entry.key.instance.empty());
+ encode_obj_versioned_data_key(key, &instance_idx, special_delete_marker_key);
+ int ret = write_obj_entries(hctx, instance_entry, instance_idx);
+ if (ret < 0) {
+ CLS_LOG(0, "ERROR: write_obj_entries() instance_idx=%s ret=%d", instance_idx.c_str(), ret);
+ return ret;
+ }
+
+ return 0;
+ }
+
+ int write(uint64_t epoch, bool current) {
+ if (instance_entry.versioned_epoch > 0) {
+ CLS_LOG(20, "%s: instance_entry.versioned_epoch=%d epoch=%d", __func__, (int)instance_entry.versioned_epoch, (int)epoch);
+ /* this instance has a previous list entry, remove that entry */
+ int ret = unlink_list_entry();
+ if (ret < 0) {
+ return ret;
+ }
+ }
+
+ uint64_t flags = rgw_bucket_dir_entry::FLAG_VER;
+ if (current) {
+ flags |= rgw_bucket_dir_entry::FLAG_CURRENT;
+ }
+
+ instance_entry.versioned_epoch = epoch;
+ return write_entries(flags, 0);
+ }
+
+ int demote_current() {
+ return write_entries(0, rgw_bucket_dir_entry::FLAG_CURRENT);
+ }
+
+ bool is_delete_marker() {
+ return instance_entry.is_delete_marker();
+ }
+
+ int find_next_key(cls_rgw_obj_key *next_key, bool *found) {
+ string list_idx;
+ /* this instance has a previous list entry, remove that entry */
+ get_list_index_key(instance_entry, &list_idx);
+ /* this is the current head, need to update! */
+ map<string, bufferlist> keys;
+ bool more;
+ string filter = key.name; /* list key starts with key name, filter it to avoid a case where we cross to
+ different namespace */
+ int ret = cls_cxx_map_get_vals(hctx, list_idx, filter, 1, &keys, &more);
+ if (ret < 0) {
+ return ret;
+ }
+
+ if (keys.size() < 1) {
+ *found = false;
+ return 0;
+ }
+
+ rgw_bucket_dir_entry next_entry;
+
+ auto last = keys.rbegin();
+ try {
+ auto iter = last->second.cbegin();
+ decode(next_entry, iter);
+ } catch (ceph::buffer::error& err) {
+ CLS_LOG(0, "ERROR; failed to decode entry: %s", last->first.c_str());
+ return -EIO;
+ }
+
+ *found = (key.name == next_entry.key.name);
+ if (*found) {
+ *next_key = next_entry.key;
+ }
+
+ return 0;
+ }
+
+ real_time mtime() {
+ return instance_entry.meta.mtime;
+ }
+}; // class BIVerObjEntry
+
+
+class BIOLHEntry {
+ cls_method_context_t hctx;
+ cls_rgw_obj_key key;
+
+ string olh_data_idx;
+ rgw_bucket_olh_entry olh_data_entry;
+
+ bool initialized;
+public:
+ BIOLHEntry(cls_method_context_t& _hctx, const cls_rgw_obj_key& _key) : hctx(_hctx), key(_key), initialized(false) { }
+
+ int init(bool *exists) {
+ /* read olh */
+ int ret = read_olh(hctx, key, &olh_data_entry, &olh_data_idx, exists);
+ if (ret < 0) {
+ return ret;
+ }
+
+ initialized = true;
+ return 0;
+ }
+
+ bool start_modify(uint64_t candidate_epoch) {
+ if (candidate_epoch) {
+ if (candidate_epoch < olh_data_entry.epoch) {
+ return false; /* olh cannot be modified, old epoch */
+ }
+ olh_data_entry.epoch = candidate_epoch;
+ } else {
+ if (olh_data_entry.epoch == 0) {
+ olh_data_entry.epoch = 2; /* versioned epoch should start with 2, 1 is reserved to converted plain entries */
+ } else {
+ olh_data_entry.epoch++;
+ }
+ }
+ return true;
+ }
+
+ uint64_t get_epoch() {
+ return olh_data_entry.epoch;
+ }
+
+ rgw_bucket_olh_entry& get_entry() {
+ return olh_data_entry;
+ }
+
+ void update(cls_rgw_obj_key& key, bool delete_marker) {
+ olh_data_entry.delete_marker = delete_marker;
+ olh_data_entry.key = key;
+ }
+
+ int write() {
+ /* write the olh data entry */
+ int ret = write_entry(hctx, olh_data_entry, olh_data_idx);
+ if (ret < 0) {
+ CLS_LOG(0, "ERROR: write_entry() olh_key=%s ret=%d", olh_data_idx.c_str(), ret);
+ return ret;
+ }
+
+ return 0;
+ }
+
+ void update_log(OLHLogOp op, const string& op_tag, cls_rgw_obj_key& key, bool delete_marker, uint64_t epoch = 0) {
+ if (epoch == 0) {
+ epoch = olh_data_entry.epoch;
+ }
+ update_olh_log(olh_data_entry, op, op_tag, key, delete_marker, epoch);
+ }
+
+ bool exists() { return olh_data_entry.exists; }
+
+ void set_exists(bool exists) {
+ olh_data_entry.exists = exists;
+ }
+
+ bool pending_removal() { return olh_data_entry.pending_removal; }
+
+ void set_pending_removal(bool pending_removal) {
+ olh_data_entry.pending_removal = pending_removal;
+ }
+
+ const string& get_tag() { return olh_data_entry.tag; }
+ void set_tag(const string& tag) {
+ olh_data_entry.tag = tag;
+ }
+};
+
+static int write_version_marker(cls_method_context_t hctx, cls_rgw_obj_key& key)
+{
+ rgw_bucket_dir_entry entry;
+ entry.key = key;
+ entry.flags = rgw_bucket_dir_entry::FLAG_VER_MARKER;
+ int ret = write_entry(hctx, entry, key.name);
+ if (ret < 0) {
+ CLS_LOG(0, "ERROR: write_entry returned ret=%d", ret);
+ return ret;
+ }
+ return 0;
+}
+
+/*
+ * plain entries are the ones who were created when bucket was not
+ * versioned, if we override these objects, we need to convert these
+ * to versioned entries -- ones that have both data entry, and listing
+ * key. Their version is going to be empty though
+ */
+static int convert_plain_entry_to_versioned(cls_method_context_t hctx,
+ cls_rgw_obj_key& key,
+ bool demote_current,
+ bool instance_only)
+{
+ if (!key.instance.empty()) {
+ return -EINVAL;
+ }
+
+ rgw_bucket_dir_entry entry;
+
+ string orig_idx;
+ int ret = read_key_entry(hctx, key, &orig_idx, &entry);
+ if (ret != -ENOENT) {
+ if (ret < 0) {
+ CLS_LOG(0, "ERROR: read_key_entry() returned ret=%d", ret);
+ return ret;
+ }
+
+ entry.versioned_epoch = 1; /* converted entries are always 1 */
+ entry.flags |= rgw_bucket_dir_entry::FLAG_VER;
+
+ if (demote_current) {
+ entry.flags &= ~rgw_bucket_dir_entry::FLAG_CURRENT;
+ }
+
+ string new_idx;
+ encode_obj_versioned_data_key(key, &new_idx);
+
+ if (instance_only) {
+ ret = write_obj_instance_entry(hctx, entry, new_idx);
+ } else {
+ ret = write_obj_entries(hctx, entry, new_idx);
+ }
+ if (ret < 0) {
+ CLS_LOG(0, "ERROR: write_obj_entries new_idx=%s returned %d",
+ new_idx.c_str(), ret);
+ return ret;
+ }
+ }
+
+ ret = write_version_marker(hctx, key);
+ if (ret < 0) {
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * Link an object version to an olh, update the relevant index
+ * entries. It will also handle the deletion marker case. We have a
+ * few entries that we need to take care of. For object 'foo',
+ * instance BAR, we'd update the following (not actual encoding):
+ *
+ * - olh data: [BI_BUCKET_OLH_DATA_INDEX]foo
+ * - object instance data: [BI_BUCKET_OBJ_INSTANCE_INDEX]foo,BAR
+ * - object instance list entry: foo,123,BAR
+ *
+ * The instance list entry needs to be ordered by newer to older, so
+ * we generate an appropriate number string that follows the name.
+ * The top instance for each object is marked appropriately. We
+ * generate instance entry for deletion markers here, as they are not
+ * created prior.
+ */
+static int rgw_bucket_link_olh(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ CLS_LOG(10, "entered %s()\n", __func__);
+ string olh_data_idx;
+ string instance_idx;
+
+ // decode request
+ rgw_cls_link_olh_op op;
+ auto iter = in->cbegin();
+ try {
+ decode(op, iter);
+ } catch (ceph::buffer::error& err) {
+ CLS_LOG(0, "ERROR: rgw_bucket_link_olh_op(): failed to decode request\n");
+ return -EINVAL;
+ }
+
+ /* read instance entry */
+ BIVerObjEntry obj(hctx, op.key);
+ int ret = obj.init(op.delete_marker);
+
+ /* NOTE: When a delete is issued, a key instance is always provided,
+ * either the one for which the delete is requested or a new random
+ * one when no instance is specified. So we need to see which of
+ * these two cases we're dealing with. The variable `existed` will
+ * be true if the instance was specified and false if it was
+ * randomly generated. It might have been cleaner if the instance
+ * were empty and randomly generated here and returned in the reply,
+ * as that would better allow a typo in the instance id. This code
+ * should be audited and possibly cleaned up. */
+
+ bool existed = (ret == 0);
+ if (ret == -ENOENT && op.delete_marker) {
+ ret = 0;
+ }
+ if (ret < 0) {
+ return ret;
+ }
+
+ BIOLHEntry olh(hctx, op.key);
+ bool olh_read_attempt = false;
+ bool olh_found = false;
+ if (!existed && op.delete_marker) {
+ /* read olh */
+ ret = olh.init(&olh_found);
+ if (ret < 0) {
+ return ret;
+ }
+ olh_read_attempt = true;
+
+ // if we're deleting (i.e., adding a delete marker, and the OLH
+ // indicates it already refers to a delete marker, error out)
+ if (olh_found && olh.get_entry().delete_marker) {
+ CLS_LOG(10,
+ "%s: delete marker received for \"%s\" although OLH"
+ " already refers to a delete marker",
+ __func__, escape_str(op.key.to_string()).c_str());
+ return -ENOENT;
+ }
+ }
+
+ if (existed && !real_clock::is_zero(op.unmod_since)) {
+ timespec mtime = ceph::real_clock::to_timespec(obj.mtime());
+ timespec unmod = ceph::real_clock::to_timespec(op.unmod_since);
+ if (!op.high_precision_time) {
+ mtime.tv_nsec = 0;
+ unmod.tv_nsec = 0;
+ }
+ if (mtime >= unmod) {
+ return 0; /* no need tof set error, we just return 0 and avoid
+ * writing to the bi log */
+ }
+ }
+
+ bool removing;
+
+ /*
+ * Special handling for null instance object / delete-marker. For
+ * these objects we're going to have separate instances for a data
+ * object vs. delete-marker to avoid collisions. We now check if we
+ * got to overwrite a previous entry, and in that case we'll remove
+ * its list entry.
+ */
+ if (op.key.instance.empty()) {
+ BIVerObjEntry other_obj(hctx, op.key);
+ ret = other_obj.init(!op.delete_marker); /* try reading the other
+ * null versioned
+ * entry */
+ existed = (ret >= 0 && !other_obj.is_delete_marker());
+ if (ret >= 0 && other_obj.is_delete_marker() != op.delete_marker) {
+ ret = other_obj.unlink_list_entry();
+ if (ret < 0) {
+ return ret;
+ }
+ }
+
+ removing = existed && op.delete_marker;
+ if (!removing) {
+ ret = other_obj.unlink();
+ if (ret < 0) {
+ return ret;
+ }
+ }
+ } else {
+ removing = (existed && !obj.is_delete_marker() && op.delete_marker);
+ }
+
+ if (op.delete_marker) {
+ /* a deletion marker, need to initialize entry as such */
+ obj.init_as_delete_marker(op.meta);
+ }
+
+ /* read olh */
+ if (!olh_read_attempt) { // only read if we didn't attempt earlier
+ ret = olh.init(&olh_found);
+ if (ret < 0) {
+ return ret;
+ }
+ olh_read_attempt = true;
+ }
+
+ const uint64_t prev_epoch = olh.get_epoch();
+
+ if (!olh.start_modify(op.olh_epoch)) {
+ ret = obj.write(op.olh_epoch, false);
+ if (ret < 0) {
+ return ret;
+ }
+ if (removing) {
+ olh.update_log(CLS_RGW_OLH_OP_REMOVE_INSTANCE, op.op_tag, op.key, false, op.olh_epoch);
+ }
+ return 0;
+ }
+
+ // promote this version to current if it's a newer epoch, or if it matches the
+ // current epoch and sorts after the current instance
+ const bool promote = (olh.get_epoch() > prev_epoch) ||
+ (olh.get_epoch() == prev_epoch &&
+ olh.get_entry().key.instance >= op.key.instance);
+
+ if (olh_found) {
+ const string& olh_tag = olh.get_tag();
+ if (op.olh_tag != olh_tag) {
+ if (!olh.pending_removal()) {
+ CLS_LOG(5, "NOTICE: op.olh_tag (%s) != olh.tag (%s)", op.olh_tag.c_str(), olh_tag.c_str());
+ return -ECANCELED;
+ }
+ /* if pending removal, this is a new olh instance */
+ olh.set_tag(op.olh_tag);
+ }
+ if (promote && olh.exists()) {
+ rgw_bucket_olh_entry& olh_entry = olh.get_entry();
+ /* found olh, previous instance is no longer the latest, need to update */
+ if (!(olh_entry.key == op.key)) {
+ BIVerObjEntry old_obj(hctx, olh_entry.key);
+
+ ret = old_obj.demote_current();
+ if (ret < 0) {
+ CLS_LOG(0, "ERROR: could not demote current on previous key ret=%d", ret);
+ return ret;
+ }
+ }
+ }
+ olh.set_pending_removal(false);
+ } else {
+ bool instance_only = (op.key.instance.empty() && op.delete_marker);
+ cls_rgw_obj_key key(op.key.name);
+ ret = convert_plain_entry_to_versioned(hctx, key, promote, instance_only);
+ if (ret < 0) {
+ CLS_LOG(0, "ERROR: convert_plain_entry_to_versioned ret=%d", ret);
+ return ret;
+ }
+ olh.set_tag(op.olh_tag);
+ }
+
+ /* update the olh log */
+ olh.update_log(CLS_RGW_OLH_OP_LINK_OLH, op.op_tag, op.key, op.delete_marker);
+ if (removing) {
+ olh.update_log(CLS_RGW_OLH_OP_REMOVE_INSTANCE, op.op_tag, op.key, false);
+ }
+
+ if (promote) {
+ olh.update(op.key, op.delete_marker);
+ }
+ olh.set_exists(true);
+
+ ret = olh.write();
+ if (ret < 0) {
+ CLS_LOG(0, "ERROR: failed to update olh ret=%d", ret);
+ return ret;
+ }
+
+ /* write the instance and list entries */
+ ret = obj.write(olh.get_epoch(), promote);
+ if (ret < 0) {
+ return ret;
+ }
+
+ if (!op.log_op) {
+ return 0;
+ }
+
+ rgw_bucket_dir_header header;
+ ret = read_bucket_header(hctx, &header);
+ if (ret < 0) {
+ CLS_LOG(1, "ERROR: rgw_bucket_link_olh(): failed to read header\n");
+ return ret;
+ }
+ if (header.syncstopped) {
+ return 0;
+ }
+
+ rgw_bucket_dir_entry& entry = obj.get_dir_entry();
+
+ rgw_bucket_entry_ver ver;
+ ver.epoch = (op.olh_epoch ? op.olh_epoch : olh.get_epoch());
+
+ string *powner = NULL;
+ string *powner_display_name = NULL;
+
+ if (op.delete_marker) {
+ powner = &entry.meta.owner;
+ powner_display_name = &entry.meta.owner_display_name;
+ }
+
+ RGWModifyOp operation = (op.delete_marker ? CLS_RGW_OP_LINK_OLH_DM : CLS_RGW_OP_LINK_OLH);
+ ret = log_index_operation(hctx, op.key, operation, op.op_tag,
+ entry.meta.mtime, ver,
+ CLS_RGW_STATE_COMPLETE, header.ver, header.max_marker, op.bilog_flags | RGW_BILOG_FLAG_VERSIONED_OP,
+ powner, powner_display_name, &op.zones_trace);
+ if (ret < 0)
+ return ret;
+
+ return write_bucket_header(hctx, &header); /* updates header version */
+}
+
+static int rgw_bucket_unlink_instance(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ CLS_LOG(10, "entered %s()\n", __func__);
+ string olh_data_idx;
+ string instance_idx;
+
+ // decode request
+ rgw_cls_unlink_instance_op op;
+ auto iter = in->cbegin();
+ try {
+ decode(op, iter);
+ } catch (ceph::buffer::error& err) {
+ CLS_LOG(0, "ERROR: rgw_bucket_rm_obj_instance_op(): failed to decode request\n");
+ return -EINVAL;
+ }
+
+ cls_rgw_obj_key dest_key = op.key;
+ if (dest_key.instance == "null") {
+ dest_key.instance.clear();
+ }
+
+ BIVerObjEntry obj(hctx, dest_key);
+ BIOLHEntry olh(hctx, dest_key);
+
+ int ret = obj.init();
+ if (ret == -ENOENT) {
+ return 0; /* already removed */
+ }
+ if (ret < 0) {
+ CLS_LOG(0, "ERROR: obj.init() returned ret=%d", ret);
+ return ret;
+ }
+
+ bool olh_found;
+ ret = olh.init(&olh_found);
+ if (ret < 0) {
+ CLS_LOG(0, "ERROR: olh.init() returned ret=%d", ret);
+ return ret;
+ }
+
+ if (!olh_found) {
+ bool instance_only = false;
+ cls_rgw_obj_key key(dest_key.name);
+ ret = convert_plain_entry_to_versioned(hctx, key, true, instance_only);
+ if (ret < 0) {
+ CLS_LOG(0, "ERROR: convert_plain_entry_to_versioned ret=%d", ret);
+ return ret;
+ }
+ olh.update(dest_key, false);
+ olh.set_tag(op.olh_tag);
+
+ obj.set_epoch(1);
+ }
+
+ if (!olh.start_modify(op.olh_epoch)) {
+ ret = obj.unlink_list_entry();
+ if (ret < 0) {
+ return ret;
+ }
+
+ if (obj.is_delete_marker()) {
+ return 0;
+ }
+
+ olh.update_log(CLS_RGW_OLH_OP_REMOVE_INSTANCE, op.op_tag, op.key, false, op.olh_epoch);
+ return olh.write();
+ }
+
+ rgw_bucket_olh_entry& olh_entry = olh.get_entry();
+ cls_rgw_obj_key& olh_key = olh_entry.key;
+ CLS_LOG(20, "%s: updating olh log: existing olh entry: %s[%s] (delete_marker=%d)", __func__,
+ olh_key.name.c_str(), olh_key.instance.c_str(), olh_entry.delete_marker);
+
+ if (olh_key == dest_key) {
+ /* this is the current head, need to update! */
+ cls_rgw_obj_key next_key;
+ bool found = false;
+ ret = obj.find_next_key(&next_key, &found);
+ if (ret < 0) {
+ CLS_LOG(0, "ERROR: obj.find_next_key() returned ret=%d", ret);
+ return ret;
+ }
+
+ if (found) {
+ BIVerObjEntry next(hctx, next_key);
+ ret = next.write(olh.get_epoch(), true);
+ if (ret < 0) {
+ CLS_LOG(0, "ERROR: next.write() returned ret=%d", ret);
+ return ret;
+ }
+
+ CLS_LOG(20, "%s: updating olh log: link olh -> %s[%s] (is_delete=%d)", __func__,
+ next_key.name.c_str(), next_key.instance.c_str(), (int)next.is_delete_marker());
+
+ olh.update(next_key, next.is_delete_marker());
+ olh.update_log(CLS_RGW_OLH_OP_LINK_OLH, op.op_tag, next_key, next.is_delete_marker());
+ } else {
+ // next_key is empty, but we need to preserve its name in case this entry
+ // gets resharded, because this key is used for hash placement
+ next_key.name = dest_key.name;
+ olh.update(next_key, false);
+ olh.update_log(CLS_RGW_OLH_OP_UNLINK_OLH, op.op_tag, next_key, false);
+ olh.set_exists(false);
+ olh.set_pending_removal(true);
+ }
+ }
+
+ if (!obj.is_delete_marker()) {
+ olh.update_log(CLS_RGW_OLH_OP_REMOVE_INSTANCE, op.op_tag, op.key, false);
+ } else {
+ /* this is a delete marker, it's our responsibility to remove its
+ * instance entry */
+ ret = obj.unlink();
+ if (ret < 0) {
+ return ret;
+ }
+ }
+
+ ret = obj.unlink_list_entry();
+ if (ret < 0) {
+ return ret;
+ }
+
+ ret = olh.write();
+ if (ret < 0) {
+ return ret;
+ }
+
+ if (!op.log_op) {
+ return 0;
+ }
+
+ rgw_bucket_dir_header header;
+ ret = read_bucket_header(hctx, &header);
+ if (ret < 0) {
+ CLS_LOG(1, "ERROR: rgw_bucket_unlink_instance(): failed to read header\n");
+ return ret;
+ }
+ if (header.syncstopped) {
+ return 0;
+ }
+
+ rgw_bucket_entry_ver ver;
+ ver.epoch = (op.olh_epoch ? op.olh_epoch : olh.get_epoch());
+
+ real_time mtime = obj.mtime(); /* mtime has no real meaning in
+ * instance removal context */
+ ret = log_index_operation(hctx, op.key, CLS_RGW_OP_UNLINK_INSTANCE, op.op_tag,
+ mtime, ver,
+ CLS_RGW_STATE_COMPLETE, header.ver, header.max_marker,
+ op.bilog_flags | RGW_BILOG_FLAG_VERSIONED_OP, NULL, NULL, &op.zones_trace);
+ if (ret < 0)
+ return ret;
+
+ return write_bucket_header(hctx, &header); /* updates header version */
+}
+
+static int rgw_bucket_read_olh_log(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ CLS_LOG(10, "entered %s()\n", __func__);
+ // decode request
+ rgw_cls_read_olh_log_op op;
+ auto iter = in->cbegin();
+ try {
+ decode(op, iter);
+ } catch (ceph::buffer::error& err) {
+ CLS_LOG(0, "ERROR: rgw_bucket_read_olh_log(): failed to decode request\n");
+ return -EINVAL;
+ }
+
+ if (!op.olh.instance.empty()) {
+ CLS_LOG(1, "bad key passed in (non empty instance)");
+ return -EINVAL;
+ }
+
+ rgw_bucket_olh_entry olh_data_entry;
+ string olh_data_key;
+ encode_olh_data_key(op.olh, &olh_data_key);
+ int ret = read_index_entry(hctx, olh_data_key, &olh_data_entry);
+ if (ret < 0 && ret != -ENOENT) {
+ CLS_LOG(0, "ERROR: read_index_entry() olh_key=%s ret=%d", olh_data_key.c_str(), ret);
+ return ret;
+ }
+
+ if (olh_data_entry.tag != op.olh_tag) {
+ CLS_LOG(1, "NOTICE: %s: olh_tag_mismatch olh_data_entry.tag=%s op.olh_tag=%s", __func__, olh_data_entry.tag.c_str(), op.olh_tag.c_str());
+ return -ECANCELED;
+ }
+
+ rgw_cls_read_olh_log_ret op_ret;
+
+#define MAX_OLH_LOG_ENTRIES 1000
+ map<uint64_t, vector<rgw_bucket_olh_log_entry> >& log = olh_data_entry.pending_log;
+
+ if (log.begin()->first > op.ver_marker && log.size() <= MAX_OLH_LOG_ENTRIES) {
+ op_ret.log = log;
+ op_ret.is_truncated = false;
+ } else {
+ auto iter = log.upper_bound(op.ver_marker);
+
+ for (int i = 0; i < MAX_OLH_LOG_ENTRIES && iter != log.end(); ++i, ++iter) {
+ op_ret.log[iter->first] = iter->second;
+ }
+ op_ret.is_truncated = (iter != log.end());
+ }
+
+ encode(op_ret, *out);
+
+ return 0;
+}
+
+static int rgw_bucket_trim_olh_log(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ CLS_LOG(10, "entered %s()\n", __func__);
+ // decode request
+ rgw_cls_trim_olh_log_op op;
+ auto iter = in->cbegin();
+ try {
+ decode(op, iter);
+ } catch (ceph::buffer::error& err) {
+ CLS_LOG(0, "ERROR: rgw_bucket_trim_olh_log(): failed to decode request\n");
+ return -EINVAL;
+ }
+
+ if (!op.olh.instance.empty()) {
+ CLS_LOG(1, "bad key passed in (non empty instance)");
+ return -EINVAL;
+ }
+
+ /* read olh entry */
+ rgw_bucket_olh_entry olh_data_entry;
+ string olh_data_key;
+ encode_olh_data_key(op.olh, &olh_data_key);
+ int ret = read_index_entry(hctx, olh_data_key, &olh_data_entry);
+ if (ret < 0 && ret != -ENOENT) {
+ CLS_LOG(0, "ERROR: read_index_entry() olh_key=%s ret=%d", olh_data_key.c_str(), ret);
+ return ret;
+ }
+
+ if (olh_data_entry.tag != op.olh_tag) {
+ CLS_LOG(1, "NOTICE: %s: olh_tag_mismatch olh_data_entry.tag=%s op.olh_tag=%s", __func__, olh_data_entry.tag.c_str(), op.olh_tag.c_str());
+ return -ECANCELED;
+ }
+
+ /* remove all versions up to and including ver from the pending map */
+ auto& log = olh_data_entry.pending_log;
+ auto liter = log.begin();
+ while (liter != log.end() && liter->first <= op.ver) {
+ auto rm_iter = liter;
+ ++liter;
+ log.erase(rm_iter);
+ }
+
+ /* write the olh data entry */
+ ret = write_entry(hctx, olh_data_entry, olh_data_key);
+ if (ret < 0) {
+ CLS_LOG(0, "ERROR: write_entry() olh_key=%s ret=%d", olh_data_key.c_str(), ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int rgw_bucket_clear_olh(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ CLS_LOG(10, "entered %s()\n", __func__);
+ // decode request
+ rgw_cls_bucket_clear_olh_op op;
+ auto iter = in->cbegin();
+ try {
+ decode(op, iter);
+ } catch (ceph::buffer::error& err) {
+ CLS_LOG(0, "ERROR: rgw_bucket_clear_olh(): failed to decode request\n");
+ return -EINVAL;
+ }
+
+ if (!op.key.instance.empty()) {
+ CLS_LOG(1, "bad key passed in (non empty instance)");
+ return -EINVAL;
+ }
+
+ /* read olh entry */
+ rgw_bucket_olh_entry olh_data_entry;
+ string olh_data_key;
+ encode_olh_data_key(op.key, &olh_data_key);
+ int ret = read_index_entry(hctx, olh_data_key, &olh_data_entry);
+ if (ret < 0 && ret != -ENOENT) {
+ CLS_LOG(0, "ERROR: read_index_entry() olh_key=%s ret=%d", olh_data_key.c_str(), ret);
+ return ret;
+ }
+
+ if (olh_data_entry.tag != op.olh_tag) {
+ CLS_LOG(1, "NOTICE: %s: olh_tag_mismatch olh_data_entry.tag=%s op.olh_tag=%s", __func__, olh_data_entry.tag.c_str(), op.olh_tag.c_str());
+ return -ECANCELED;
+ }
+
+ ret = cls_cxx_map_remove_key(hctx, olh_data_key);
+ if (ret < 0) {
+ CLS_LOG(1, "NOTICE: %s: can't remove key %s ret=%d", __func__, olh_data_key.c_str(), ret);
+ return ret;
+ }
+
+ rgw_bucket_dir_entry plain_entry;
+
+ /* read plain entry, make sure it's a versioned place holder */
+ ret = read_index_entry(hctx, op.key.name, &plain_entry);
+ if (ret == -ENOENT) {
+ /* we're done, no entry existing */
+ return 0;
+ }
+ if (ret < 0) {
+ CLS_LOG(0, "ERROR: read_index_entry key=%s ret=%d", op.key.name.c_str(), ret);
+ return ret;
+ }
+
+ if ((plain_entry.flags & rgw_bucket_dir_entry::FLAG_VER_MARKER) == 0) {
+ /* it's not a version marker, don't remove it */
+ return 0;
+ }
+
+ ret = cls_cxx_map_remove_key(hctx, op.key.name);
+ if (ret < 0) {
+ CLS_LOG(1, "NOTICE: %s: can't remove key %s ret=%d", __func__, op.key.name.c_str(), ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+int rgw_dir_suggest_changes(cls_method_context_t hctx,
+ bufferlist *in, bufferlist *out)
+{
+ CLS_LOG(1, "entered %s()\n", __func__);
+
+ bufferlist header_bl;
+ rgw_bucket_dir_header header;
+ bool header_changed = false;
+
+ int rc = read_bucket_header(hctx, &header);
+ if (rc < 0) {
+ CLS_LOG(1, "ERROR: rgw_dir_suggest_changes(): failed to read header\n");
+ return rc;
+ }
+
+ timespan tag_timeout(
+ std::chrono::seconds(
+ header.tag_timeout ? header.tag_timeout : CEPH_RGW_TAG_TIMEOUT));
+
+ auto in_iter = in->cbegin();
+
+ while (!in_iter.end()) {
+ __u8 op;
+ rgw_bucket_dir_entry cur_change;
+ rgw_bucket_dir_entry cur_disk;
+ try {
+ decode(op, in_iter);
+ decode(cur_change, in_iter);
+ } catch (ceph::buffer::error& err) {
+ CLS_LOG(1, "ERROR: rgw_dir_suggest_changes(): failed to decode request\n");
+ return -EINVAL;
+ }
+
+ bufferlist cur_disk_bl;
+ string cur_change_key;
+ encode_obj_index_key(cur_change.key, &cur_change_key);
+ int ret = cls_cxx_map_get_val(hctx, cur_change_key, &cur_disk_bl);
+ if (ret < 0 && ret != -ENOENT)
+ return -EINVAL;
+
+ if (ret == -ENOENT) {
+ continue;
+ }
+
+ if (cur_disk_bl.length()) {
+ auto cur_disk_iter = cur_disk_bl.cbegin();
+ try {
+ decode(cur_disk, cur_disk_iter);
+ } catch (ceph::buffer::error& error) {
+ CLS_LOG(1, "ERROR: rgw_dir_suggest_changes(): failed to decode cur_disk\n");
+ return -EINVAL;
+ }
+
+ // remove any pending entries whose tag timeout has expired. until expiry,
+ // these pending entries will prevent us from applying suggested changes
+ real_time cur_time = real_clock::now();
+ auto iter = cur_disk.pending_map.begin();
+ while(iter != cur_disk.pending_map.end()) {
+ auto cur_iter = iter++;
+ if (cur_time > (cur_iter->second.timestamp + timespan(tag_timeout))) {
+ cur_disk.pending_map.erase(cur_iter);
+ }
+ }
+ }
+
+ CLS_LOG(20, "cur_disk.pending_map.empty()=%d op=%d cur_disk.exists=%d "
+ "cur_disk.index_ver=%d cur_change.exists=%d cur_change.index_ver=%d",
+ cur_disk.pending_map.empty(), (int)op, cur_disk.exists,
+ (int)cur_disk.index_ver, cur_change.exists,
+ (int)cur_change.index_ver);
+
+ if (cur_change.index_ver < cur_disk.index_ver) {
+ // a pending on-disk entry was completed since this suggestion was made,
+ // don't apply it yet. if the index really is inconsistent, the next
+ // listing will get the latest version and resend the suggestion
+ continue;
+ }
+
+ if (cur_disk.pending_map.empty()) {
+ if (cur_disk.exists) {
+ rgw_bucket_category_stats& old_stats = header.stats[cur_disk.meta.category];
+ CLS_LOG(10, "total_entries: %" PRId64 " -> %" PRId64 "", old_stats.num_entries, old_stats.num_entries - 1);
+ old_stats.num_entries--;
+ old_stats.total_size -= cur_disk.meta.accounted_size;
+ old_stats.total_size_rounded -= cls_rgw_get_rounded_size(cur_disk.meta.accounted_size);
+ old_stats.actual_size -= cur_disk.meta.size;
+ header_changed = true;
+ }
+ rgw_bucket_category_stats& stats = header.stats[cur_change.meta.category];
+ bool log_op = (op & CEPH_RGW_DIR_SUGGEST_LOG_OP) != 0;
+ op &= CEPH_RGW_DIR_SUGGEST_OP_MASK;
+ switch(op) {
+ case CEPH_RGW_REMOVE:
+ CLS_LOG(10, "CEPH_RGW_REMOVE name=%s instance=%s", cur_change.key.name.c_str(), cur_change.key.instance.c_str());
+ ret = cls_cxx_map_remove_key(hctx, cur_change_key);
+ if (ret < 0)
+ return ret;
+ if (log_op && cur_disk.exists && !header.syncstopped) {
+ ret = log_index_operation(hctx, cur_disk.key, CLS_RGW_OP_DEL, cur_disk.tag, cur_disk.meta.mtime,
+ cur_disk.ver, CLS_RGW_STATE_COMPLETE, header.ver, header.max_marker, 0, NULL, NULL, NULL);
+ if (ret < 0) {
+ CLS_LOG(0, "ERROR: %s: failed to log operation ret=%d", __func__, ret);
+ return ret;
+ }
+ }
+ break;
+ case CEPH_RGW_UPDATE:
+ CLS_LOG(10, "CEPH_RGW_UPDATE name=%s instance=%s total_entries: %" PRId64 " -> %" PRId64 "",
+ cur_change.key.name.c_str(), cur_change.key.instance.c_str(), stats.num_entries, stats.num_entries + 1);
+
+ stats.num_entries++;
+ stats.total_size += cur_change.meta.accounted_size;
+ stats.total_size_rounded += cls_rgw_get_rounded_size(cur_change.meta.accounted_size);
+ stats.actual_size += cur_change.meta.size;
+ header_changed = true;
+ cur_change.index_ver = header.ver;
+ bufferlist cur_state_bl;
+ encode(cur_change, cur_state_bl);
+ ret = cls_cxx_map_set_val(hctx, cur_change_key, &cur_state_bl);
+ if (ret < 0)
+ return ret;
+ if (log_op && !header.syncstopped) {
+ ret = log_index_operation(hctx, cur_change.key, CLS_RGW_OP_ADD, cur_change.tag, cur_change.meta.mtime,
+ cur_change.ver, CLS_RGW_STATE_COMPLETE, header.ver, header.max_marker, 0, NULL, NULL, NULL);
+ if (ret < 0) {
+ CLS_LOG(0, "ERROR: %s: failed to log operation ret=%d", __func__, ret);
+ return ret;
+ }
+ }
+ break;
+ } // switch(op)
+ } // if (cur_disk.pending_map.empty())
+ } // while (!in_iter.end())
+
+ if (header_changed) {
+ return write_bucket_header(hctx, &header);
+ }
+ return 0;
+}
+
+static int rgw_obj_remove(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ CLS_LOG(10, "entered %s()\n", __func__);
+ // decode request
+ rgw_cls_obj_remove_op op;
+ auto iter = in->cbegin();
+ try {
+ decode(op, iter);
+ } catch (ceph::buffer::error& err) {
+ CLS_LOG(0, "ERROR: %s: failed to decode request", __func__);
+ return -EINVAL;
+ }
+
+ if (op.keep_attr_prefixes.empty()) {
+ return cls_cxx_remove(hctx);
+ }
+
+ map<string, bufferlist> attrset;
+ int ret = cls_cxx_getxattrs(hctx, &attrset);
+ if (ret < 0 && ret != -ENOENT) {
+ CLS_LOG(0, "ERROR: %s: cls_cxx_getxattrs() returned %d", __func__, ret);
+ return ret;
+ }
+
+ map<string, bufferlist> new_attrs;
+ for (auto iter = op.keep_attr_prefixes.begin();
+ iter != op.keep_attr_prefixes.end(); ++iter) {
+ auto& check_prefix = *iter;
+
+ for (auto aiter = attrset.lower_bound(check_prefix);
+ aiter != attrset.end(); ++aiter) {
+ const string& attr = aiter->first;
+
+ if (attr.substr(0, check_prefix.size()) > check_prefix) {
+ break;
+ }
+
+ new_attrs[attr] = aiter->second;
+ }
+ }
+
+ CLS_LOG(20, "%s: removing object", __func__);
+ ret = cls_cxx_remove(hctx);
+ if (ret < 0) {
+ CLS_LOG(0, "ERROR: %s: cls_cxx_remove returned %d", __func__, ret);
+ return ret;
+ }
+
+ if (new_attrs.empty()) {
+ /* no data to keep */
+ return 0;
+ }
+
+ ret = cls_cxx_create(hctx, false);
+ if (ret < 0) {
+ CLS_LOG(0, "ERROR: %s: cls_cxx_create returned %d", __func__, ret);
+ return ret;
+ }
+
+ for (auto aiter = new_attrs.begin();
+ aiter != new_attrs.end(); ++aiter) {
+ const auto& attr = aiter->first;
+
+ ret = cls_cxx_setxattr(hctx, attr.c_str(), &aiter->second);
+ CLS_LOG(20, "%s: setting attr: %s", __func__, attr.c_str());
+ if (ret < 0) {
+ CLS_LOG(0, "ERROR: %s: cls_cxx_setxattr (attr=%s) returned %d", __func__, attr.c_str(), ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int rgw_obj_store_pg_ver(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ CLS_LOG(10, "entered %s()\n", __func__);
+ // decode request
+ rgw_cls_obj_store_pg_ver_op op;
+ auto iter = in->cbegin();
+ try {
+ decode(op, iter);
+ } catch (ceph::buffer::error& err) {
+ CLS_LOG(0, "ERROR: %s: failed to decode request", __func__);
+ return -EINVAL;
+ }
+
+ bufferlist bl;
+ uint64_t ver = cls_current_version(hctx);
+ encode(ver, bl);
+ int ret = cls_cxx_setxattr(hctx, op.attr.c_str(), &bl);
+ if (ret < 0) {
+ CLS_LOG(0, "ERROR: %s: cls_cxx_setxattr (attr=%s) returned %d", __func__, op.attr.c_str(), ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int rgw_obj_check_attrs_prefix(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ CLS_LOG(10, "entered %s()\n", __func__);
+ // decode request
+ rgw_cls_obj_check_attrs_prefix op;
+ auto iter = in->cbegin();
+ try {
+ decode(op, iter);
+ } catch (ceph::buffer::error& err) {
+ CLS_LOG(0, "ERROR: %s: failed to decode request", __func__);
+ return -EINVAL;
+ }
+
+ if (op.check_prefix.empty()) {
+ return -EINVAL;
+ }
+
+ map<string, bufferlist> attrset;
+ int ret = cls_cxx_getxattrs(hctx, &attrset);
+ if (ret < 0 && ret != -ENOENT) {
+ CLS_LOG(0, "ERROR: %s: cls_cxx_getxattrs() returned %d", __func__, ret);
+ return ret;
+ }
+
+ bool exist = false;
+
+ for (auto aiter = attrset.lower_bound(op.check_prefix);
+ aiter != attrset.end(); ++aiter) {
+ const auto& attr = aiter->first;
+
+ if (attr.substr(0, op.check_prefix.size()) > op.check_prefix) {
+ break;
+ }
+
+ exist = true;
+ }
+
+ if (exist == op.fail_if_exist) {
+ return -ECANCELED;
+ }
+
+ return 0;
+}
+
+static int rgw_obj_check_mtime(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ CLS_LOG(10, "entered %s()\n", __func__);
+ // decode request
+ rgw_cls_obj_check_mtime op;
+ auto iter = in->cbegin();
+ try {
+ decode(op, iter);
+ } catch (ceph::buffer::error& err) {
+ CLS_LOG(0, "ERROR: %s: failed to decode request", __func__);
+ return -EINVAL;
+ }
+
+ real_time obj_ut;
+ int ret = cls_cxx_stat2(hctx, NULL, &obj_ut);
+ if (ret < 0 && ret != -ENOENT) {
+ CLS_LOG(0, "ERROR: %s: cls_cxx_stat() returned %d", __func__, ret);
+ return ret;
+ }
+ if (ret == -ENOENT) {
+ CLS_LOG(10, "object does not exist, skipping check");
+ }
+
+ ceph_timespec obj_ts = ceph::real_clock::to_ceph_timespec(obj_ut);
+ ceph_timespec op_ts = ceph::real_clock::to_ceph_timespec(op.mtime);
+
+ if (!op.high_precision_time) {
+ obj_ts.tv_nsec = 0;
+ op_ts.tv_nsec = 0;
+ }
+
+ CLS_LOG(10, "%s: obj_ut=%lld.%06lld op.mtime=%lld.%06lld", __func__,
+ (long long)obj_ts.tv_sec, (long long)obj_ts.tv_nsec,
+ (long long)op_ts.tv_sec, (long long)op_ts.tv_nsec);
+
+ bool check;
+
+ switch (op.type) {
+ case CLS_RGW_CHECK_TIME_MTIME_EQ:
+ check = (obj_ts == op_ts);
+ break;
+ case CLS_RGW_CHECK_TIME_MTIME_LT:
+ check = (obj_ts < op_ts);
+ break;
+ case CLS_RGW_CHECK_TIME_MTIME_LE:
+ check = (obj_ts <= op_ts);
+ break;
+ case CLS_RGW_CHECK_TIME_MTIME_GT:
+ check = (obj_ts > op_ts);
+ break;
+ case CLS_RGW_CHECK_TIME_MTIME_GE:
+ check = (obj_ts >= op_ts);
+ break;
+ default:
+ return -EINVAL;
+ };
+
+ if (!check) {
+ return -ECANCELED;
+ }
+
+ return 0;
+}
+
+static int rgw_bi_get_op(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ CLS_LOG(10, "entered %s()\n", __func__);
+ // decode request
+ rgw_cls_bi_get_op op;
+ auto iter = in->cbegin();
+ try {
+ decode(op, iter);
+ } catch (ceph::buffer::error& err) {
+ CLS_LOG(0, "ERROR: %s: failed to decode request", __func__);
+ return -EINVAL;
+ }
+
+ string idx;
+
+ switch (op.type) {
+ case BIIndexType::Plain:
+ idx = op.key.name;
+ break;
+ case BIIndexType::Instance:
+ encode_obj_index_key(op.key, &idx);
+ break;
+ case BIIndexType::OLH:
+ encode_olh_data_key(op.key, &idx);
+ break;
+ default:
+ CLS_LOG(10, "%s: invalid key type encoding: %d",
+ __func__, int(op.type));
+ return -EINVAL;
+ }
+
+ rgw_cls_bi_get_ret op_ret;
+
+ rgw_cls_bi_entry& entry = op_ret.entry;
+
+ entry.type = op.type;
+ entry.idx = idx;
+
+ int r = cls_cxx_map_get_val(hctx, idx, &entry.data);
+ if (r < 0) {
+ CLS_LOG(10, "%s: cls_cxx_map_get_val() returned %d", __func__, r);
+ return r;
+ }
+
+ encode(op_ret, *out);
+
+ return 0;
+}
+
+static int rgw_bi_put_op(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ CLS_LOG(10, "entered %s()\n", __func__);
+ // decode request
+ rgw_cls_bi_put_op op;
+ auto iter = in->cbegin();
+ try {
+ decode(op, iter);
+ } catch (ceph::buffer::error& err) {
+ CLS_LOG(0, "ERROR: %s: failed to decode request", __func__);
+ return -EINVAL;
+ }
+
+ rgw_cls_bi_entry& entry = op.entry;
+
+ int r = cls_cxx_map_set_val(hctx, entry.idx, &entry.data);
+ if (r < 0) {
+ CLS_LOG(0, "ERROR: %s: cls_cxx_map_set_val() returned r=%d", __func__, r);
+ }
+
+ return 0;
+}
+
+
+/* The plain entries in the bucket index are divided into two regions
+ * divided by the special entries that begin with 0x80. Those below
+ * ("Low") are ascii entries. Those above ("High") bring in unicode
+ * entries. This enum allows either or both regions to be listed in
+ * list_plain_entries(). It's convenient that "Both" be in between the
+ * others so we can use "<= Both" or ">= Both" logic.
+ */
+enum class PlainEntriesRegion {
+ Low, Both, High
+};
+
+
+/* Queries the omap for plain entries in the range of start_after_key
+ * to end_key, non-inclusive. Both of those values must either be
+ * before the "ugly namespace" or after it.
+ *
+ * Negative return values indicate errors. Non-negative return values
+ * indicate number of entries retrieved. */
+static int list_plain_entries_help(cls_method_context_t hctx,
+ const std::string& name_filter,
+ const std::string& start_after_key, // exclusive
+ const std::string& end_key, // exclusive
+ uint32_t max,
+ std::list<rgw_cls_bi_entry>* entries,
+ bool& end_key_reached,
+ bool& more)
+{
+ CLS_LOG(10, "Entered %s: name_filter=\"%s\", start_after_key=\"%s\", end_key=\"%s\", max=%d",
+ __func__, escape_str(name_filter).c_str(), escape_str(start_after_key).c_str(),
+ escape_str(end_key).c_str(), max);
+ int count = 0;
+ std::map<std::string, bufferlist> raw_entries;
+ int ret = cls_cxx_map_get_vals(hctx, start_after_key, name_filter, max,
+ &raw_entries, &more);
+ CLS_LOG(20, "%s: cls_cxx_map_get_vals ret=%d, raw_entries.size()=%lu, more=%d",
+ __func__, ret, raw_entries.size(), more);
+ if (ret < 0) {
+ return ret;
+ }
+
+ end_key_reached = false;
+ for (auto iter : raw_entries) {
+ if (!end_key.empty() && iter.first >= end_key) {
+ CLS_LOG(20, "%s: end key reached at \"%s\"",
+ __func__, escape_str(iter.first).c_str());
+ end_key_reached = true;
+ more = false;
+ return count;
+ }
+
+ rgw_bucket_dir_entry e;
+ auto biter = iter.second.cbegin();
+ try {
+ decode(e, biter);
+ } catch (ceph::buffer::error& err) {
+ CLS_LOG(0, "ERROR: %s: failed to decode buffer for plain bucket index entry \"%s\"",
+ __func__, escape_str(iter.first).c_str());
+ return -EIO;
+ }
+
+ if (!name_filter.empty() && e.key.name > name_filter) {
+ CLS_LOG(20, "%s: due to filter \"%s\", skipping entry.idx=\"%s\" e.key.name=\"%s\"",
+ __func__,
+ escape_str(name_filter).c_str(),
+ escape_str(iter.first).c_str(),
+ escape_str(e.key.name).c_str());
+ // skip the rest of the entries
+ more = false;
+ end_key_reached = true;
+ return count;
+ }
+
+ rgw_cls_bi_entry entry;
+ entry.type = BIIndexType::Plain;
+ entry.idx = iter.first;
+ entry.data = iter.second;
+
+ entries->push_back(entry);
+ count++;
+
+ CLS_LOG(20, "%s: adding entry %d entry.idx=\"%s\" e.key.name=\"%s\"",
+ __func__,
+ count,
+ escape_str(entry.idx).c_str(),
+ escape_str(e.key.name).c_str());
+
+ if (count >= int(max)) {
+ // NB: this looks redundant, but leave in for time being
+ return count;
+ }
+ } // iter for loop
+
+ return count;
+} // list_plain_entries_help
+
+/*
+ * Lists plain entries in either or both regions, the region of those
+ * beginning with an ASCII character or a non-ASCII character, which
+ * surround the "ugly" namespace used by special entries for versioned
+ * buckets.
+ *
+ * The entries parameter is not cleared and additional entries are
+ * appended to it.
+ */
+static int list_plain_entries(cls_method_context_t hctx,
+ const std::string& name_filter,
+ const std::string& marker,
+ uint32_t max,
+ std::list<rgw_cls_bi_entry>* entries,
+ bool* pmore,
+ const PlainEntriesRegion region = PlainEntriesRegion::Both)
+{
+ CLS_LOG(10, "entered %s: name_filter=\"%s\", marker=\"%s\", max=%d, region=%d",
+ __func__, escape_str(name_filter).c_str(), escape_str(marker).c_str(), max, static_cast<int>(region));
+ int r = 0;
+ bool end_key_reached = false;
+ bool more = false;
+ const size_t start_size = entries->size();
+
+ if (region <= PlainEntriesRegion::Both && marker < BI_PREFIX_BEGIN) {
+ // listing ascii plain namespace
+ int r = list_plain_entries_help(hctx, name_filter, marker, BI_PREFIX_BEGIN, max,
+ entries, end_key_reached, more);
+ CLS_LOG(20, "%s: first list_plain_entries_help r=%d, end_key_reached=%d, more=%d",
+ __func__, r, end_key_reached, more);
+ if (r < 0) {
+ return r;
+ }
+
+ // see if we're done for this call (there may be more for a later call)
+ if (r >= int(max) || !end_key_reached || (!more && region == PlainEntriesRegion::Low)) {
+ if (pmore) {
+ *pmore = more;
+ }
+
+ return int(entries->size() - start_size);
+ }
+
+ max = max - r;
+ }
+
+ if (region >= PlainEntriesRegion::Both) {
+ const std::string start_after_key = std::max(marker, BI_PREFIX_END);
+
+ // listing non-ascii plain namespace
+ r = list_plain_entries_help(hctx, name_filter, start_after_key, {}, max,
+ entries, end_key_reached, more);
+ CLS_LOG(20, "%s: second list_plain_entries_help r=%d, end_key_reached=%d, more=%d",
+ __func__, r, end_key_reached, more);
+ if (r < 0) {
+ return r;
+ }
+ }
+
+ if (pmore) {
+ *pmore = more;
+ }
+
+ return int(entries->size() - start_size);
+}
+
+static int list_instance_entries(cls_method_context_t hctx,
+ const string& name,
+ const string& marker,
+ uint32_t max,
+ list<rgw_cls_bi_entry> *entries,
+ bool *pmore)
+{
+ cls_rgw_obj_key key(name);
+ string first_instance_idx;
+ encode_obj_versioned_data_key(key, &first_instance_idx);
+ string start_after_key;
+
+ if (!name.empty()) {
+ start_after_key = first_instance_idx;
+ } else {
+ start_after_key = BI_PREFIX_CHAR;
+ start_after_key.append(bucket_index_prefixes[BI_BUCKET_OBJ_INSTANCE_INDEX]);
+ }
+ string filter = start_after_key;
+ if (bi_entry_gt(marker, start_after_key)) {
+ start_after_key = marker;
+ }
+ int count = 0;
+ map<string, bufferlist> keys;
+ bufferlist k;
+ int ret = cls_cxx_map_get_val(hctx, start_after_key, &k);
+ if (ret < 0 && ret != -ENOENT) {
+ return ret;
+ }
+ bool found_first = (ret == 0);
+ if (found_first) {
+ --max;
+ }
+ if (max > 0) {
+ ret = cls_cxx_map_get_vals(hctx, start_after_key, string(), max,
+ &keys, pmore);
+ CLS_LOG(20, "%s: start_after_key=\"%s\" first_instance_idx=\"%s\" keys.size()=%d",
+ __func__, escape_str(start_after_key).c_str(),
+ escape_str(first_instance_idx).c_str(), (int)keys.size());
+ if (ret < 0) {
+ return ret;
+ }
+ }
+ if (found_first) {
+ keys[start_after_key] = std::move(k);
+ }
+
+ for (auto iter = keys.begin(); iter != keys.end(); ++iter) {
+ rgw_cls_bi_entry entry;
+ entry.type = BIIndexType::Instance;
+ entry.idx = iter->first;
+ entry.data = iter->second;
+
+ if (!filter.empty() && entry.idx.compare(0, filter.size(), filter) != 0) {
+ /* we are skipping the rest of the entries */
+ if (pmore) {
+ *pmore = false;
+ }
+ return count;
+ }
+
+ CLS_LOG(20, "%s: entry.idx=\"%s\"", __func__, escape_str(entry.idx).c_str());
+
+ auto biter = entry.data.cbegin();
+
+ rgw_bucket_dir_entry e;
+ try {
+ decode(e, biter);
+ } catch (ceph::buffer::error& err) {
+ CLS_LOG(0, "ERROR: %s: failed to decode buffer (size=%d)", __func__, entry.data.length());
+ return -EIO;
+ }
+
+ if (!name.empty() && e.key.name != name) {
+ /* we are skipping the rest of the entries */
+ if (pmore) {
+ *pmore = false;
+ }
+ return count;
+ }
+
+ entries->push_back(entry);
+ count++;
+ start_after_key = entry.idx;
+ }
+
+ return count;
+}
+
+static int list_olh_entries(cls_method_context_t hctx,
+ const string& name,
+ const string& marker,
+ uint32_t max,
+ list<rgw_cls_bi_entry> *entries,
+ bool *pmore)
+{
+ cls_rgw_obj_key key(name);
+ string first_instance_idx;
+ encode_olh_data_key(key, &first_instance_idx);
+ string start_after_key;
+
+ if (!name.empty()) {
+ start_after_key = first_instance_idx;
+ } else {
+ start_after_key = BI_PREFIX_CHAR;
+ start_after_key.append(bucket_index_prefixes[BI_BUCKET_OLH_DATA_INDEX]);
+ }
+ string filter = start_after_key;
+ if (bi_entry_gt(marker, start_after_key)) {
+ start_after_key = marker;
+ }
+ int count = 0;
+ map<string, bufferlist> keys;
+ int ret;
+ bufferlist k;
+ ret = cls_cxx_map_get_val(hctx, start_after_key, &k);
+ if (ret < 0 && ret != -ENOENT) {
+ return ret;
+ }
+ bool found_first = (ret == 0);
+ if (found_first) {
+ --max;
+ }
+ if (max > 0) {
+ ret = cls_cxx_map_get_vals(hctx, start_after_key, string(), max,
+ &keys, pmore);
+ CLS_LOG(20, "%s: start_after_key=\"%s\", first_instance_idx=\"%s\", keys.size()=%d",
+ __func__, escape_str(start_after_key).c_str(),
+ escape_str(first_instance_idx).c_str(), (int)keys.size());
+ if (ret < 0) {
+ return ret;
+ }
+ }
+
+ if (found_first) {
+ keys[start_after_key] = std::move(k);
+ }
+
+ for (auto iter = keys.begin(); iter != keys.end(); ++iter) {
+ rgw_cls_bi_entry entry;
+ entry.type = BIIndexType::OLH;
+ entry.idx = iter->first;
+ entry.data = iter->second;
+
+ if (!filter.empty() && entry.idx.compare(0, filter.size(), filter) != 0) {
+ /* we are skipping the rest of the entries */
+ if (pmore) {
+ *pmore = false;
+ }
+ return count;
+ }
+
+ CLS_LOG(20, "%s: entry.idx=\"%s\"", __func__, escape_str(entry.idx).c_str());
+
+ auto biter = entry.data.cbegin();
+
+ rgw_bucket_olh_entry e;
+ try {
+ decode(e, biter);
+ } catch (ceph::buffer::error& err) {
+ CLS_LOG(0, "ERROR: %s: failed to decode buffer (size=%d)", __func__, entry.data.length());
+ return -EIO;
+ }
+
+ if (!name.empty() && e.key.name != name) {
+ /* we are skipping the rest of the entries */
+ if (pmore) {
+ *pmore = false;
+ }
+ return count;
+ }
+
+ entries->push_back(entry);
+ count++;
+ start_after_key = entry.idx;
+ }
+
+ return count;
+}
+
+/* Lists all the entries that appear in a bucket index listing.
+ *
+ * It may not be obvious why this function calls three other "segment"
+ * functions (list_plain_entries (twice), list_instance_entries,
+ * list_olh_entries) that each list segments of the index space rather
+ * than just move a marker through the space from start to end. The
+ * reason is that a name filter may be provided in the op, and in that
+ * case most entries will be skipped over, and small segments within
+ * each larger segment will be listed.
+ *
+ * Ideally, each of the three segment functions should be able to
+ * handle a marker and filter, if either/both is provided,
+ * efficiently. So, for example, if the marker is after the segment,
+ * ideally return quickly rather than iterating through entries in the
+ * segment.
+ *
+ * Additionally, each of the three segment functions, if successful,
+ * is expected to return the number of entries added to the output
+ * list as a non-negative value. As per usual, negative return values
+ * indicate error condtions.
+ */
+static int rgw_bi_list_op(cls_method_context_t hctx,
+ bufferlist *in,
+ bufferlist *out)
+{
+ CLS_LOG(10, "entered %s()\n", __func__);
+ // decode request
+ rgw_cls_bi_list_op op;
+ auto iter = in->cbegin();
+ try {
+ decode(op, iter);
+ } catch (ceph::buffer::error& err) {
+ CLS_LOG(0, "ERROR: %s: failed to decode request", __func__);
+ return -EINVAL;
+ }
+
+ constexpr uint32_t MAX_BI_LIST_ENTRIES = 1000;
+ const uint32_t max = std::min(op.max, MAX_BI_LIST_ENTRIES);
+
+ CLS_LOG(20, "%s: op.marker=\"%s\", op.name_filter=\"%s\", op.max=%u max=%u",
+ __func__, escape_str(op.marker).c_str(), escape_str(op.name_filter).c_str(),
+ op.max, max);
+
+ int ret;
+ uint32_t count = 0;
+ bool more = false;
+ rgw_cls_bi_list_ret op_ret;
+
+ ret = list_plain_entries(hctx, op.name_filter, op.marker, max,
+ &op_ret.entries, &more, PlainEntriesRegion::Low);
+ if (ret < 0) {
+ CLS_LOG(0, "ERROR: %s: list_plain_entries (low) returned ret=%d, marker=\"%s\", filter=\"%s\", max=%d",
+ __func__, ret, escape_str(op.marker).c_str(), escape_str(op.name_filter).c_str(), max);
+ return ret;
+ }
+
+ count = ret;
+ CLS_LOG(20, "%s: found %d plain ascii (low) entries, count=%u", __func__, ret, count);
+
+ if (!more) {
+ ret = list_instance_entries(hctx, op.name_filter, op.marker, max - count, &op_ret.entries, &more);
+ if (ret < 0) {
+ CLS_LOG(0, "ERROR: %s: list_instance_entries returned ret=%d", __func__, ret);
+ return ret;
+ }
+
+ count += ret;
+ CLS_LOG(20, "%s: found %d instance entries, count=%u", __func__, ret, count);
+ }
+
+ if (!more) {
+ ret = list_olh_entries(hctx, op.name_filter, op.marker, max - count, &op_ret.entries, &more);
+ if (ret < 0) {
+ CLS_LOG(0, "ERROR: %s: list_olh_entries returned ret=%d", __func__, ret);
+ return ret;
+ }
+
+ count += ret;
+ CLS_LOG(20, "%s: found %d olh entries, count=%u", __func__, ret, count);
+ }
+
+ if (!more) {
+ ret = list_plain_entries(hctx, op.name_filter, op.marker, max - count,
+ &op_ret.entries, &more, PlainEntriesRegion::High);
+ if (ret < 0) {
+ CLS_LOG(0, "ERROR: %s: list_plain_entries (high) returned ret=%d, marker=\"%s\", filter=\"%s\", max=%d",
+ __func__, ret, escape_str(op.marker).c_str(), escape_str(op.name_filter).c_str(), max);
+ return ret;
+ }
+
+ count += ret;
+ CLS_LOG(20, "%s: found %d non-ascii (high) plain entries, count=%u", __func__, ret, count);
+ }
+
+ op_ret.is_truncated = (count > max) || more;
+ while (count > max) {
+ op_ret.entries.pop_back();
+ count--;
+ }
+
+ CLS_LOG(20, "%s: returning %lu entries, is_truncated=%d", __func__, op_ret.entries.size(), op_ret.is_truncated);
+ encode(op_ret, *out);
+
+ return 0;
+} // rgw_bi_list_op
+
+
+int bi_log_record_decode(bufferlist& bl, rgw_bi_log_entry& e)
+{
+ auto iter = bl.cbegin();
+ try {
+ decode(e, iter);
+ } catch (ceph::buffer::error& err) {
+ CLS_LOG(0, "ERROR: failed to decode rgw_bi_log_entry");
+ return -EIO;
+ }
+ return 0;
+}
+
+
+static int bi_log_iterate_entries(cls_method_context_t hctx,
+ const string& marker,
+ const string& end_marker,
+ string& key_iter,
+ uint32_t max_entries,
+ bool *truncated,
+ int (*cb)(cls_method_context_t, const string&, rgw_bi_log_entry&, void *),
+ void *param)
+{
+ CLS_LOG(10, "bi_log_iterate_range");
+
+ map<string, bufferlist> keys;
+ string filter_prefix, end_key;
+ uint32_t i = 0;
+ string key;
+
+ if (truncated)
+ *truncated = false;
+
+ string start_after_key;
+ if (key_iter.empty()) {
+ key = BI_PREFIX_CHAR;
+ key.append(bucket_index_prefixes[BI_BUCKET_LOG_INDEX]);
+ key.append(marker);
+
+ start_after_key = key;
+ } else {
+ start_after_key = key_iter;
+ }
+
+ if (end_marker.empty()) {
+ end_key = BI_PREFIX_CHAR;
+ end_key.append(bucket_index_prefixes[BI_BUCKET_LOG_INDEX + 1]);
+ } else {
+ end_key = BI_PREFIX_CHAR;
+ end_key.append(bucket_index_prefixes[BI_BUCKET_LOG_INDEX]);
+ end_key.append(end_marker);
+ }
+
+ CLS_LOG(10, "bi_log_iterate_entries start_after_key=%s end_key=%s",
+ start_after_key.c_str(), end_key.c_str());
+
+ string filter;
+
+ int ret = cls_cxx_map_get_vals(hctx, start_after_key, filter, max_entries,
+ &keys, truncated);
+ if (ret < 0)
+ return ret;
+
+ auto iter = keys.begin();
+ if (iter == keys.end())
+ return 0;
+
+ uint32_t num_keys = keys.size();
+
+ for (; iter != keys.end(); ++iter,++i) {
+ const string& key = iter->first;
+ rgw_bi_log_entry e;
+
+ CLS_LOG(10, "bi_log_iterate_entries key=%s bl.length=%d", key.c_str(), (int)iter->second.length());
+
+ if (key.compare(end_key) > 0) {
+ key_iter = key;
+ if (truncated) {
+ *truncated = false;
+ }
+ return 0;
+ }
+
+ ret = bi_log_record_decode(iter->second, e);
+ if (ret < 0)
+ return ret;
+
+ ret = cb(hctx, key, e, param);
+ if (ret < 0)
+ return ret;
+
+ if (i == num_keys - 1) {
+ key_iter = key;
+ }
+ }
+
+ return 0;
+}
+
+static int bi_log_list_cb(cls_method_context_t hctx, const string& key, rgw_bi_log_entry& info, void *param)
+{
+ list<rgw_bi_log_entry> *l = (list<rgw_bi_log_entry> *)param;
+ l->push_back(info);
+ return 0;
+}
+
+static int bi_log_list_entries(cls_method_context_t hctx, const string& marker,
+ uint32_t max, list<rgw_bi_log_entry>& entries, bool *truncated)
+{
+ string key_iter;
+ string end_marker;
+ int ret = bi_log_iterate_entries(hctx, marker, end_marker,
+ key_iter, max, truncated,
+ bi_log_list_cb, &entries);
+ return ret;
+}
+
+static int rgw_bi_log_list(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ CLS_LOG(10, "entered %s()\n", __func__);
+ auto in_iter = in->cbegin();
+
+ cls_rgw_bi_log_list_op op;
+ try {
+ decode(op, in_iter);
+ } catch (ceph::buffer::error& err) {
+ CLS_LOG(1, "ERROR: rgw_bi_log_list(): failed to decode entry\n");
+ return -EINVAL;
+ }
+
+ cls_rgw_bi_log_list_ret op_ret;
+ int ret = bi_log_list_entries(hctx, op.marker, op.max, op_ret.entries, &op_ret.truncated);
+ if (ret < 0)
+ return ret;
+
+ encode(op_ret, *out);
+
+ return 0;
+}
+
+static int rgw_bi_log_trim(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ CLS_LOG(10, "entered %s()\n", __func__);
+ auto in_iter = in->cbegin();
+
+ cls_rgw_bi_log_trim_op op;
+ try {
+ decode(op, in_iter);
+ } catch (ceph::buffer::error& err) {
+ CLS_LOG(1, "ERROR: rgw_bi_log_list(): failed to decode entry\n");
+ return -EINVAL;
+ }
+
+ string key_begin(1, BI_PREFIX_CHAR);
+ key_begin.append(bucket_index_prefixes[BI_BUCKET_LOG_INDEX]);
+ key_begin.append(op.start_marker);
+
+ string key_end;
+ if (op.end_marker.empty()) {
+ key_end = BI_PREFIX_CHAR;
+ key_end.append(bucket_index_prefixes[BI_BUCKET_LOG_INDEX + 1]);
+ } else {
+ key_end = BI_PREFIX_CHAR;
+ key_end.append(bucket_index_prefixes[BI_BUCKET_LOG_INDEX]);
+ key_end.append(op.end_marker);
+ // cls_cxx_map_remove_range() expects one-past-end
+ key_end.append(1, '\0');
+ }
+
+ // list a single key to detect whether the range is empty
+ const size_t max_entries = 1;
+ std::set<std::string> keys;
+ bool more = false;
+
+ int rc = cls_cxx_map_get_keys(hctx, key_begin, max_entries, &keys, &more);
+ if (rc < 0) {
+ CLS_LOG(1, "ERROR: cls_cxx_map_get_keys failed rc=%d", rc);
+ return rc;
+ }
+
+ if (keys.empty()) {
+ CLS_LOG(20, "range is empty key_begin=%s", key_begin.c_str());
+ return -ENODATA;
+ }
+
+ const std::string& first_key = *keys.begin();
+ if (key_end < first_key) {
+ CLS_LOG(20, "listed key %s past key_end=%s", first_key.c_str(), key_end.c_str());
+ return -ENODATA;
+ }
+
+ CLS_LOG(20, "listed key %s, removing through %s",
+ first_key.c_str(), key_end.c_str());
+
+ rc = cls_cxx_map_remove_range(hctx, first_key, key_end);
+ if (rc < 0) {
+ CLS_LOG(1, "ERROR: cls_cxx_map_remove_range failed rc=%d", rc);
+ return rc;
+ }
+ return 0;
+}
+
+static int rgw_bi_log_resync(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ CLS_LOG(10, "entered %s()\n", __func__);
+ rgw_bucket_dir_header header;
+ int rc = read_bucket_header(hctx, &header);
+ if (rc < 0) {
+ CLS_LOG(1, "ERROR: rgw_bucket_complete_op(): failed to read header\n");
+ return rc;
+ }
+
+ bufferlist bl;
+
+ rgw_bi_log_entry entry;
+
+ entry.timestamp = real_clock::now();
+ entry.op = RGWModifyOp::CLS_RGW_OP_RESYNC;
+ entry.state = RGWPendingState::CLS_RGW_STATE_COMPLETE;
+
+ string key;
+ bi_log_index_key(hctx, key, entry.id, header.ver);
+
+ encode(entry, bl);
+
+ if (entry.id > header.max_marker)
+ header.max_marker = entry.id;
+
+ header.syncstopped = false;
+
+ rc = cls_cxx_map_set_val(hctx, key, &bl);
+ if (rc < 0)
+ return rc;
+
+ return write_bucket_header(hctx, &header);
+}
+
+static int rgw_bi_log_stop(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ CLS_LOG(10, "entered %s()\n", __func__);
+ rgw_bucket_dir_header header;
+ int rc = read_bucket_header(hctx, &header);
+ if (rc < 0) {
+ CLS_LOG(1, "ERROR: rgw_bucket_complete_op(): failed to read header\n");
+ return rc;
+ }
+
+ bufferlist bl;
+
+ rgw_bi_log_entry entry;
+
+ entry.timestamp = real_clock::now();
+ entry.op = RGWModifyOp::CLS_RGW_OP_SYNCSTOP;
+ entry.state = RGWPendingState::CLS_RGW_STATE_COMPLETE;
+
+ string key;
+ bi_log_index_key(hctx, key, entry.id, header.ver);
+
+ encode(entry, bl);
+
+ if (entry.id > header.max_marker)
+ header.max_marker = entry.id;
+ header.syncstopped = true;
+
+ rc = cls_cxx_map_set_val(hctx, key, &bl);
+ if (rc < 0)
+ return rc;
+
+ return write_bucket_header(hctx, &header);
+}
+
+
+static void usage_record_prefix_by_time(uint64_t epoch, string& key)
+{
+ char buf[32];
+ snprintf(buf, sizeof(buf), "%011llu", (long long unsigned)epoch);
+ key = buf;
+}
+
+static void usage_record_prefix_by_user(const string& user, uint64_t epoch, string& key)
+{
+ char buf[user.size() + 32];
+ snprintf(buf, sizeof(buf), "%s_%011llu_", user.c_str(), (long long unsigned)epoch);
+ key = buf;
+}
+
+static void usage_record_name_by_time(uint64_t epoch, const string& user, const string& bucket, string& key)
+{
+ char buf[32 + user.size() + bucket.size()];
+ snprintf(buf, sizeof(buf), "%011llu_%s_%s", (long long unsigned)epoch, user.c_str(), bucket.c_str());
+ key = buf;
+}
+
+static void usage_record_name_by_user(const string& user, uint64_t epoch, const string& bucket, string& key)
+{
+ char buf[32 + user.size() + bucket.size()];
+ snprintf(buf, sizeof(buf), "%s_%011llu_%s", user.c_str(), (long long unsigned)epoch, bucket.c_str());
+ key = buf;
+}
+
+static int usage_record_decode(bufferlist& record_bl, rgw_usage_log_entry& e)
+{
+ auto kiter = record_bl.cbegin();
+ try {
+ decode(e, kiter);
+ } catch (ceph::buffer::error& err) {
+ CLS_LOG(1, "ERROR: usage_record_decode(): failed to decode record_bl\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int rgw_user_usage_log_add(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ CLS_LOG(10, "entered %s()\n", __func__);
+
+ auto in_iter = in->cbegin();
+ rgw_cls_usage_log_add_op op;
+
+ try {
+ decode(op, in_iter);
+ } catch (ceph::buffer::error& err) {
+ CLS_LOG(1, "ERROR: rgw_user_usage_log_add(): failed to decode request\n");
+ return -EINVAL;
+ }
+
+ rgw_usage_log_info& info = op.info;
+
+ for (auto iter = info.entries.begin(); iter != info.entries.end(); ++iter) {
+ rgw_usage_log_entry& entry = *iter;
+ string key_by_time;
+
+ rgw_user *puser = (entry.payer.empty() ? &entry.owner : &entry.payer);
+
+ usage_record_name_by_time(entry.epoch, puser->to_str(), entry.bucket, key_by_time);
+
+ CLS_LOG(10, "rgw_user_usage_log_add user=%s bucket=%s", puser->to_str().c_str(), entry.bucket.c_str());
+
+ bufferlist record_bl;
+ int ret = cls_cxx_map_get_val(hctx, key_by_time, &record_bl);
+ if (ret < 0 && ret != -ENOENT) {
+ CLS_LOG(1, "ERROR: rgw_user_usage_log_add(): cls_cxx_map_read_key returned %d", ret);
+ return -EINVAL;
+ }
+ if (ret >= 0) {
+ rgw_usage_log_entry e;
+ ret = usage_record_decode(record_bl, e);
+ if (ret < 0)
+ return ret;
+ CLS_LOG(10, "rgw_user_usage_log_add aggregating existing bucket\n");
+ entry.aggregate(e);
+ }
+
+ bufferlist new_record_bl;
+ encode(entry, new_record_bl);
+ ret = cls_cxx_map_set_val(hctx, key_by_time, &new_record_bl);
+ if (ret < 0)
+ return ret;
+
+ string key_by_user;
+ usage_record_name_by_user(puser->to_str(), entry.epoch, entry.bucket, key_by_user);
+ ret = cls_cxx_map_set_val(hctx, key_by_user, &new_record_bl);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int usage_iterate_range(cls_method_context_t hctx, uint64_t start, uint64_t end, const string& user,
+ const string& bucket, string& key_iter, uint32_t max_entries, bool *truncated,
+ int (*cb)(cls_method_context_t, const string&, rgw_usage_log_entry&, void *),
+ void *param)
+{
+ CLS_LOG(10, "entered %s()\n", __func__);
+
+ map<string, bufferlist> keys;
+ string filter_prefix;
+ string start_key, end_key;
+ bool by_user = !user.empty();
+ string user_key;
+ bool truncated_status = false;
+
+ ceph_assert(truncated != nullptr);
+
+ if (!by_user) {
+ usage_record_prefix_by_time(end, end_key);
+ } else {
+ user_key = user;
+ user_key.append("_");
+ }
+
+ if (key_iter.empty()) {
+ if (by_user) {
+ usage_record_prefix_by_user(user, start, start_key);
+ } else {
+ usage_record_prefix_by_time(start, start_key);
+ }
+ } else {
+ start_key = key_iter;
+ }
+
+ CLS_LOG(20, "usage_iterate_range start_key=%s", start_key.c_str());
+ int ret = cls_cxx_map_get_vals(hctx, start_key, filter_prefix, max_entries, &keys, &truncated_status);
+ if (ret < 0)
+ return ret;
+
+ *truncated = truncated_status;
+
+ auto iter = keys.begin();
+ if (iter == keys.end())
+ return 0;
+
+ for (; iter != keys.end(); ++iter) {
+ const string& key = iter->first;
+ rgw_usage_log_entry e;
+
+ key_iter = key;
+ if (!by_user && key.compare(end_key) >= 0) {
+ CLS_LOG(20, "usage_iterate_range reached key=%s, done", key.c_str());
+ *truncated = false;
+ key_iter = key;
+ return 0;
+ }
+
+ if (by_user && key.compare(0, user_key.size(), user_key) != 0) {
+ CLS_LOG(20, "usage_iterate_range reached key=%s, done", key.c_str());
+ *truncated = false;
+ key_iter = key;
+ return 0;
+ }
+
+ ret = usage_record_decode(iter->second, e);
+ if (ret < 0)
+ return ret;
+
+ if (!bucket.empty() && bucket.compare(e.bucket))
+ continue;
+
+ if (e.epoch < start)
+ continue;
+
+ /* keys are sorted by epoch, so once we're past end we're done */
+ if (e.epoch >= end) {
+ *truncated = false;
+ return 0;
+ }
+
+ ret = cb(hctx, key, e, param);
+ if (ret < 0)
+ return ret;
+ }
+ return 0;
+}
+
+static int usage_log_read_cb(cls_method_context_t hctx, const string& key, rgw_usage_log_entry& entry, void *param)
+{
+ map<rgw_user_bucket, rgw_usage_log_entry> *usage = (map<rgw_user_bucket, rgw_usage_log_entry> *)param;
+ rgw_user *puser;
+ if (!entry.payer.empty()) {
+ puser = &entry.payer;
+ } else {
+ puser = &entry.owner;
+ }
+ rgw_user_bucket ub(puser->to_str(), entry.bucket);
+ rgw_usage_log_entry& le = (*usage)[ub];
+ le.aggregate(entry);
+
+ return 0;
+}
+
+int rgw_user_usage_log_read(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ CLS_LOG(10, "entered %s()\n", __func__);
+
+ auto in_iter = in->cbegin();
+ rgw_cls_usage_log_read_op op;
+
+ try {
+ decode(op, in_iter);
+ } catch (ceph::buffer::error& err) {
+ CLS_LOG(1, "ERROR: rgw_user_usage_log_read(): failed to decode request\n");
+ return -EINVAL;
+ }
+
+ rgw_cls_usage_log_read_ret ret_info;
+ map<rgw_user_bucket, rgw_usage_log_entry> *usage = &ret_info.usage;
+ string iter = op.iter;
+#define MAX_ENTRIES 1000
+ uint32_t max_entries = (op.max_entries ? op.max_entries : MAX_ENTRIES);
+ int ret = usage_iterate_range(hctx, op.start_epoch, op.end_epoch, op.owner, op.bucket, iter, max_entries, &ret_info.truncated, usage_log_read_cb, (void *)usage);
+ if (ret < 0)
+ return ret;
+
+ if (ret_info.truncated)
+ ret_info.next_iter = iter;
+
+ encode(ret_info, *out);
+ return 0;
+}
+
+static int usage_log_trim_cb(cls_method_context_t hctx, const string& key, rgw_usage_log_entry& entry, void *param)
+{
+ bool *found = (bool *)param;
+ if (found) {
+ *found = true;
+ }
+ string key_by_time;
+ string key_by_user;
+
+ string o = entry.owner.to_str();
+ usage_record_name_by_time(entry.epoch, o, entry.bucket, key_by_time);
+ usage_record_name_by_user(o, entry.epoch, entry.bucket, key_by_user);
+
+ int ret = cls_cxx_map_remove_key(hctx, key_by_time);
+ if (ret < 0)
+ return ret;
+
+ return cls_cxx_map_remove_key(hctx, key_by_user);
+}
+
+int rgw_user_usage_log_trim(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ CLS_LOG(10, "entered %s()\n", __func__);
+
+ /* only continue if object exists! */
+ int ret = cls_cxx_stat(hctx, NULL, NULL);
+ if (ret < 0)
+ return ret;
+
+ auto in_iter = in->cbegin();
+ rgw_cls_usage_log_trim_op op;
+
+ try {
+ decode(op, in_iter);
+ } catch (ceph::buffer::error& err) {
+ CLS_LOG(1, "ERROR: rgw_user_log_usage_log_trim(): failed to decode request\n");
+ return -EINVAL;
+ }
+
+ string iter;
+ bool more;
+ bool found = false;
+#define MAX_USAGE_TRIM_ENTRIES 1000
+ ret = usage_iterate_range(hctx, op.start_epoch, op.end_epoch, op.user, op.bucket, iter, MAX_USAGE_TRIM_ENTRIES, &more, usage_log_trim_cb, (void *)&found);
+ if (ret < 0)
+ return ret;
+
+ if (!more && !found)
+ return -ENODATA;
+
+ return 0;
+}
+
+int rgw_usage_log_clear(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ CLS_LOG(10, "entered %s()\n", __func__);
+
+ int ret = cls_cxx_map_clear(hctx);
+ /* if object doesn't exist all the logs are cleared anyway */
+ if (ret == -ENOENT)
+ ret = 0;
+
+ return ret;
+}
+
+/*
+ * We hold the garbage collection chain data under two different
+ * indexes: the first 'name' index keeps them under a unique tag that
+ * represents the chains, and a second 'time' index keeps them by
+ * their expiration timestamp. Each is prefixed differently (see
+ * gc_index_prefixes below).
+ *
+ * Since key-value data is listed in lexical order by keys, generally
+ * the name entries are retrieved first and then the time entries.
+ * When listing the entries via `gc_iterate_entries` one parameter is
+ * a marker, and if we were to pass "1_" (i.e.,
+ * gc_index_prefixes[GC_OBJ_TIME_INDEX]), the listing would skip over
+ * the 'name' entries and begin with the 'time' entries.
+ *
+ * Furthermore, the times are converted to strings such that lexical
+ * order correlates with chronological order, so the entries are
+ * returned chronologically from the earliest expiring to the latest
+ * expiring. This allows for starting at "1_" and to keep retrieving
+ * chunks of entries, and as long as they are prior to the current
+ * time, they're expired and processing can continue.
+ */
+#define GC_OBJ_NAME_INDEX 0
+#define GC_OBJ_TIME_INDEX 1
+
+static string gc_index_prefixes[] = { "0_",
+ "1_" };
+
+static void prepend_index_prefix(const string& src, int index, string *dest)
+{
+ *dest = gc_index_prefixes[index];
+ dest->append(src);
+}
+
+static int gc_omap_get(cls_method_context_t hctx, int type, const string& key, cls_rgw_gc_obj_info *info)
+{
+ string index;
+ prepend_index_prefix(key, type, &index);
+
+ int ret = read_omap_entry(hctx, index, info);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int gc_omap_set(cls_method_context_t hctx, int type, const string& key, const cls_rgw_gc_obj_info *info)
+{
+ bufferlist bl;
+ encode(*info, bl);
+
+ string index = gc_index_prefixes[type];
+ index.append(key);
+
+ int ret = cls_cxx_map_set_val(hctx, index, &bl);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int gc_omap_remove(cls_method_context_t hctx, int type, const string& key)
+{
+ string index = gc_index_prefixes[type];
+ index.append(key);
+
+ int ret = cls_cxx_map_remove_key(hctx, index);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static bool key_in_index(const string& key, int index_type)
+{
+ const string& prefix = gc_index_prefixes[index_type];
+ return (key.compare(0, prefix.size(), prefix) == 0);
+}
+
+
+static int gc_update_entry(cls_method_context_t hctx, uint32_t expiration_secs,
+ cls_rgw_gc_obj_info& info)
+{
+ cls_rgw_gc_obj_info old_info;
+ int ret = gc_omap_get(hctx, GC_OBJ_NAME_INDEX, info.tag, &old_info);
+ if (ret == 0) {
+ string key;
+ get_time_key(old_info.time, &key);
+ ret = gc_omap_remove(hctx, GC_OBJ_TIME_INDEX, key);
+ if (ret < 0 && ret != -ENOENT) {
+ CLS_LOG(0, "ERROR: failed to remove key=%s", key.c_str());
+ return ret;
+ }
+ }
+
+ // calculate time and time key
+ info.time = ceph::real_clock::now();
+ info.time += make_timespan(expiration_secs);
+ string time_key;
+ get_time_key(info.time, &time_key);
+
+ if (info.chain.objs.empty()) {
+ CLS_LOG(0,
+ "WARNING: %s setting GC log entry with zero-length chain, "
+ "tag='%s', timekey='%s'",
+ __func__, info.tag.c_str(), time_key.c_str());
+ }
+
+ ret = gc_omap_set(hctx, GC_OBJ_NAME_INDEX, info.tag, &info);
+ if (ret < 0)
+ return ret;
+
+ ret = gc_omap_set(hctx, GC_OBJ_TIME_INDEX, time_key, &info);
+ if (ret < 0)
+ goto done_err;
+
+ return 0;
+
+done_err:
+
+ CLS_LOG(0, "ERROR: gc_set_entry error info.tag=%s, ret=%d",
+ info.tag.c_str(), ret);
+ gc_omap_remove(hctx, GC_OBJ_NAME_INDEX, info.tag);
+
+ return ret;
+}
+
+static int gc_defer_entry(cls_method_context_t hctx, const string& tag, uint32_t expiration_secs)
+{
+ cls_rgw_gc_obj_info info;
+ int ret = gc_omap_get(hctx, GC_OBJ_NAME_INDEX, tag, &info);
+ if (ret < 0)
+ return ret;
+ return gc_update_entry(hctx, expiration_secs, info);
+}
+
+int gc_record_decode(bufferlist& bl, cls_rgw_gc_obj_info& e)
+{
+ auto iter = bl.cbegin();
+ try {
+ decode(e, iter);
+ } catch (ceph::buffer::error& err) {
+ CLS_LOG(0, "ERROR: failed to decode cls_rgw_gc_obj_info");
+ return -EIO;
+ }
+ return 0;
+}
+
+static int rgw_cls_gc_set_entry(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ CLS_LOG(10, "entered %s()\n", __func__);
+ auto in_iter = in->cbegin();
+
+ cls_rgw_gc_set_entry_op op;
+ try {
+ decode(op, in_iter);
+ } catch (ceph::buffer::error& err) {
+ CLS_LOG(1, "ERROR: rgw_cls_gc_set_entry(): failed to decode entry\n");
+ return -EINVAL;
+ }
+
+ return gc_update_entry(hctx, op.expiration_secs, op.info);
+}
+
+static int rgw_cls_gc_defer_entry(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ CLS_LOG(10, "entered %s()\n", __func__);
+ auto in_iter = in->cbegin();
+
+ cls_rgw_gc_defer_entry_op op;
+ try {
+ decode(op, in_iter);
+ } catch (ceph::buffer::error& err) {
+ CLS_LOG(1, "ERROR: rgw_cls_gc_defer_entry(): failed to decode entry\n");
+ return -EINVAL;
+ }
+
+ return gc_defer_entry(hctx, op.tag, op.expiration_secs);
+}
+
+static int gc_iterate_entries(cls_method_context_t hctx,
+ const string& marker,
+ bool expired_only,
+ string& out_marker,
+ uint32_t max_entries,
+ bool *truncated,
+ int (*cb)(cls_method_context_t,
+ const string&,
+ cls_rgw_gc_obj_info&,
+ void *),
+ void *param)
+{
+ CLS_LOG(10, "gc_iterate_entries");
+
+ map<string, bufferlist> keys;
+ string filter_prefix, end_key;
+ string key;
+
+ if (truncated)
+ *truncated = false;
+
+ string start_key;
+ if (marker.empty()) {
+ prepend_index_prefix(marker, GC_OBJ_TIME_INDEX, &start_key);
+ } else {
+ start_key = marker;
+ }
+
+ if (expired_only) {
+ real_time now = ceph::real_clock::now();
+ string now_str;
+ get_time_key(now, &now_str);
+ prepend_index_prefix(now_str, GC_OBJ_TIME_INDEX, &end_key);
+
+ CLS_LOG(10, "gc_iterate_entries end_key=%s", end_key.c_str());
+ }
+
+ string filter;
+
+ int ret = cls_cxx_map_get_vals(hctx, start_key, filter, max_entries,
+ &keys, truncated);
+ if (ret < 0)
+ return ret;
+
+ auto iter = keys.begin();
+ if (iter == keys.end()) {
+ // if keys empty must not come back as truncated
+ ceph_assert(!truncated || !(*truncated));
+ return 0;
+ }
+
+ const string* last_key = nullptr; // last key processed, for end-marker
+ for (; iter != keys.end(); ++iter) {
+ const string& key = iter->first;
+ cls_rgw_gc_obj_info e;
+
+ CLS_LOG(10, "gc_iterate_entries key=%s", key.c_str());
+
+ if (!end_key.empty() && key.compare(end_key) >= 0) {
+ if (truncated)
+ *truncated = false;
+ return 0;
+ }
+
+ if (!key_in_index(key, GC_OBJ_TIME_INDEX)) {
+ if (truncated)
+ *truncated = false;
+ return 0;
+ }
+
+ ret = gc_record_decode(iter->second, e);
+ if (ret < 0)
+ return ret;
+
+ ret = cb(hctx, key, e, param);
+ if (ret < 0)
+ return ret;
+ last_key = &(iter->first); // update when callback successful
+ }
+
+ // set the out marker if either caller does not capture truncated or
+ // if they do capture and we are truncated
+ if (!truncated || *truncated) {
+ assert(last_key);
+ out_marker = *last_key;
+ }
+
+ return 0;
+}
+
+static int gc_list_cb(cls_method_context_t hctx, const string& key, cls_rgw_gc_obj_info& info, void *param)
+{
+ list<cls_rgw_gc_obj_info> *l = (list<cls_rgw_gc_obj_info> *)param;
+ l->push_back(info);
+ return 0;
+}
+
+static int gc_list_entries(cls_method_context_t hctx, const string& marker,
+ uint32_t max, bool expired_only,
+ list<cls_rgw_gc_obj_info>& entries, bool *truncated, string& next_marker)
+{
+ int ret = gc_iterate_entries(hctx, marker, expired_only,
+ next_marker, max, truncated,
+ gc_list_cb, &entries);
+ return ret;
+}
+
+static int rgw_cls_gc_list(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ CLS_LOG(10, "entered %s()\n", __func__);
+ auto in_iter = in->cbegin();
+
+ cls_rgw_gc_list_op op;
+ try {
+ decode(op, in_iter);
+ } catch (ceph::buffer::error& err) {
+ CLS_LOG(1, "ERROR: rgw_cls_gc_list(): failed to decode entry\n");
+ return -EINVAL;
+ }
+
+ cls_rgw_gc_list_ret op_ret;
+#define GC_LIST_ENTRIES_DEFAULT 128
+ int ret = gc_list_entries(hctx, op.marker, (op.max ? op.max : GC_LIST_ENTRIES_DEFAULT), op.expired_only,
+ op_ret.entries, &op_ret.truncated, op_ret.next_marker);
+ if (ret < 0)
+ return ret;
+
+ encode(op_ret, *out);
+
+ return 0;
+}
+
+static int gc_remove(cls_method_context_t hctx, vector<string>& tags)
+{
+ for (auto iter = tags.begin(); iter != tags.end(); ++iter) {
+ string& tag = *iter;
+ cls_rgw_gc_obj_info info;
+ int ret = gc_omap_get(hctx, GC_OBJ_NAME_INDEX, tag, &info);
+ if (ret == -ENOENT) {
+ CLS_LOG(0, "couldn't find tag in name index tag=%s", tag.c_str());
+ continue;
+ }
+
+ if (ret < 0)
+ return ret;
+
+ string time_key;
+ get_time_key(info.time, &time_key);
+ ret = gc_omap_remove(hctx, GC_OBJ_TIME_INDEX, time_key);
+ if (ret < 0 && ret != -ENOENT)
+ return ret;
+ if (ret == -ENOENT) {
+ CLS_LOG(0, "couldn't find key in time index key=%s", time_key.c_str());
+ }
+
+ ret = gc_omap_remove(hctx, GC_OBJ_NAME_INDEX, tag);
+ if (ret < 0 && ret != -ENOENT)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int rgw_cls_gc_remove(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ CLS_LOG(10, "entered %s()\n", __func__);
+ auto in_iter = in->cbegin();
+
+ cls_rgw_gc_remove_op op;
+ try {
+ decode(op, in_iter);
+ } catch (ceph::buffer::error& err) {
+ CLS_LOG(1, "ERROR: rgw_cls_gc_remove(): failed to decode entry\n");
+ return -EINVAL;
+ }
+
+ return gc_remove(hctx, op.tags);
+}
+
+static int rgw_cls_lc_get_entry(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ CLS_LOG(10, "entered %s()\n", __func__);
+ auto in_iter = in->cbegin();
+
+ cls_rgw_lc_get_entry_op op;
+ try {
+ decode(op, in_iter);
+ } catch (ceph::buffer::error& err) {
+ CLS_LOG(1, "ERROR: rgw_cls_lc_set_entry(): failed to decode entry\n");
+ return -EINVAL;
+ }
+
+ cls_rgw_lc_entry lc_entry;
+ int ret = read_omap_entry(hctx, op.marker, &lc_entry);
+ if (ret < 0)
+ return ret;
+
+ cls_rgw_lc_get_entry_ret op_ret(std::move(lc_entry));
+ encode(op_ret, *out);
+ return 0;
+}
+
+
+static int rgw_cls_lc_set_entry(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ CLS_LOG(10, "entered %s()\n", __func__);
+ auto in_iter = in->cbegin();
+
+ cls_rgw_lc_set_entry_op op;
+ try {
+ decode(op, in_iter);
+ } catch (ceph::buffer::error& err) {
+ CLS_LOG(1, "ERROR: rgw_cls_lc_set_entry(): failed to decode entry\n");
+ return -EINVAL;
+ }
+
+ bufferlist bl;
+ encode(op.entry, bl);
+
+ int ret = cls_cxx_map_set_val(hctx, op.entry.bucket, &bl);
+ return ret;
+}
+
+static int rgw_cls_lc_rm_entry(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ CLS_LOG(10, "entered %s()\n", __func__);
+ auto in_iter = in->cbegin();
+
+ cls_rgw_lc_rm_entry_op op;
+ try {
+ decode(op, in_iter);
+ } catch (ceph::buffer::error& err) {
+ CLS_LOG(1, "ERROR: rgw_cls_lc_rm_entry(): failed to decode entry\n");
+ return -EINVAL;
+ }
+
+ int ret = cls_cxx_map_remove_key(hctx, op.entry.bucket);
+ return ret;
+}
+
+static int rgw_cls_lc_get_next_entry(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ CLS_LOG(10, "entered %s()\n", __func__);
+ auto in_iter = in->cbegin();
+ cls_rgw_lc_get_next_entry_ret op_ret;
+ cls_rgw_lc_get_next_entry_op op;
+ try {
+ decode(op, in_iter);
+ } catch (ceph::buffer::error& err) {
+ CLS_LOG(1, "ERROR: rgw_cls_lc_get_next_entry: failed to decode op\n");
+ return -EINVAL;
+ }
+
+ map<string, bufferlist> vals;
+ string filter_prefix;
+ bool more;
+ int ret = cls_cxx_map_get_vals(hctx, op.marker, filter_prefix, 1, &vals, &more);
+ if (ret < 0)
+ return ret;
+ cls_rgw_lc_entry entry;
+ if (!vals.empty()) {
+ auto it = vals.begin();
+ in_iter = it->second.begin();
+ try {
+ decode(entry, in_iter);
+ } catch (ceph::buffer::error& err) {
+ CLS_LOG(1, "ERROR: rgw_cls_lc_get_next_entry(): failed to decode entry\n");
+ return -EIO;
+ }
+ }
+ op_ret.entry = entry;
+ encode(op_ret, *out);
+ return 0;
+}
+
+static int rgw_cls_lc_list_entries(cls_method_context_t hctx, bufferlist *in,
+ bufferlist *out)
+{
+ CLS_LOG(10, "entered %s()\n", __func__);
+ cls_rgw_lc_list_entries_op op;
+ auto in_iter = in->cbegin();
+ try {
+ decode(op, in_iter);
+ } catch (ceph::buffer::error& err) {
+ CLS_LOG(1, "ERROR: rgw_cls_lc_list_entries(): failed to decode op\n");
+ return -EINVAL;
+ }
+
+ cls_rgw_lc_list_entries_ret op_ret(op.compat_v);
+ map<string, bufferlist> vals;
+ string filter_prefix;
+ int ret = cls_cxx_map_get_vals(hctx, op.marker, filter_prefix, op.max_entries,
+ &vals, &op_ret.is_truncated);
+ if (ret < 0)
+ return ret;
+ for (auto it = vals.begin(); it != vals.end(); ++it) {
+ cls_rgw_lc_entry entry;
+ auto iter = it->second.cbegin();
+ try {
+ decode(entry, iter);
+ } catch (buffer::error& err) {
+ /* try backward compat */
+ pair<string, int> oe;
+ try {
+ iter = it->second.begin();
+ decode(oe, iter);
+ entry = {oe.first, 0 /* start */, uint32_t(oe.second)};
+ } catch(buffer::error& err) {
+ CLS_LOG(
+ 1, "ERROR: rgw_cls_lc_list_entries(): failed to decode entry\n");
+ return -EIO;
+ }
+ }
+ op_ret.entries.push_back(entry);
+ }
+ encode(op_ret, *out);
+ return 0;
+}
+
+static int rgw_cls_lc_put_head(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ CLS_LOG(10, "entered %s()\n", __func__);
+ auto in_iter = in->cbegin();
+
+ cls_rgw_lc_put_head_op op;
+ try {
+ decode(op, in_iter);
+ } catch (ceph::buffer::error& err) {
+ CLS_LOG(1, "ERROR: rgw_cls_lc_put_head(): failed to decode entry\n");
+ return -EINVAL;
+ }
+
+ bufferlist bl;
+ encode(op.head, bl);
+ int ret = cls_cxx_map_write_header(hctx,&bl);
+ return ret;
+}
+
+static int rgw_cls_lc_get_head(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ CLS_LOG(10, "entered %s()\n", __func__);
+ bufferlist bl;
+ int ret = cls_cxx_map_read_header(hctx, &bl);
+ if (ret < 0)
+ return ret;
+ cls_rgw_lc_obj_head head;
+ if (bl.length() != 0) {
+ auto iter = bl.cbegin();
+ try {
+ decode(head, iter);
+ } catch (ceph::buffer::error& err) {
+ CLS_LOG(0, "ERROR: rgw_cls_lc_get_head(): failed to decode entry %s",err.what());
+ return -EINVAL;
+ }
+ } else {
+ head.start_date = 0;
+ head.marker.clear();
+ }
+ cls_rgw_lc_get_head_ret op_ret;
+ op_ret.head = head;
+ encode(op_ret, *out);
+ return 0;
+}
+
+static int rgw_reshard_add(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ CLS_LOG(10, "entered %s()\n", __func__);
+ auto in_iter = in->cbegin();
+
+ cls_rgw_reshard_add_op op;
+ try {
+ decode(op, in_iter);
+ } catch (ceph::buffer::error& err) {
+ CLS_LOG(1, "ERROR: rgw_reshard_add: failed to decode entry\n");
+ return -EINVAL;
+ }
+
+
+ string key;
+ op.entry.get_key(&key);
+
+ bufferlist bl;
+ encode(op.entry, bl);
+ int ret = cls_cxx_map_set_val(hctx, key, &bl);
+ if (ret < 0) {
+ CLS_ERR("error adding reshard job for bucket %s with key %s",op.entry.bucket_name.c_str(), key.c_str());
+ return ret;
+ }
+
+ return ret;
+}
+
+static int rgw_reshard_list(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ CLS_LOG(10, "entered %s()\n", __func__);
+ cls_rgw_reshard_list_op op;
+ auto in_iter = in->cbegin();
+ try {
+ decode(op, in_iter);
+ } catch (ceph::buffer::error& err) {
+ CLS_LOG(1, "ERROR: rgw_cls_rehard_list(): failed to decode entry\n");
+ return -EINVAL;
+ }
+ cls_rgw_reshard_list_ret op_ret;
+ map<string, bufferlist> vals;
+ string filter_prefix;
+#define MAX_RESHARD_LIST_ENTRIES 1000
+ /* one extra entry for identifying truncation */
+ int32_t max = (op.max && (op.max < MAX_RESHARD_LIST_ENTRIES) ? op.max : MAX_RESHARD_LIST_ENTRIES);
+ int ret = cls_cxx_map_get_vals(hctx, op.marker, filter_prefix, max, &vals, &op_ret.is_truncated);
+ if (ret < 0)
+ return ret;
+ cls_rgw_reshard_entry entry;
+ int i = 0;
+ for (auto it = vals.begin(); i < (int)op.max && it != vals.end(); ++it, ++i) {
+ auto iter = it->second.cbegin();
+ try {
+ decode(entry, iter);
+ } catch (ceph::buffer::error& err) {
+ CLS_LOG(1, "ERROR: rgw_cls_rehard_list(): failed to decode entry\n");
+ return -EIO;
+ }
+ op_ret.entries.push_back(entry);
+ }
+ encode(op_ret, *out);
+ return 0;
+}
+
+static int rgw_reshard_get(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ CLS_LOG(10, "entered %s()\n", __func__);
+ auto in_iter = in->cbegin();
+
+ cls_rgw_reshard_get_op op;
+ try {
+ decode(op, in_iter);
+ } catch (ceph::buffer::error& err) {
+ CLS_LOG(1, "ERROR: rgw_reshard_get: failed to decode entry\n");
+ return -EINVAL;
+ }
+
+ string key;
+ cls_rgw_reshard_entry entry;
+ op.entry.get_key(&key);
+ int ret = read_omap_entry(hctx, key, &entry);
+ if (ret < 0) {
+ return ret;
+ }
+
+ cls_rgw_reshard_get_ret op_ret;
+ op_ret.entry = entry;
+ encode(op_ret, *out);
+ return 0;
+}
+
+static int rgw_reshard_remove(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ CLS_LOG(10, "entered %s()\n", __func__);
+ auto in_iter = in->cbegin();
+
+ cls_rgw_reshard_remove_op op;
+ try {
+ decode(op, in_iter);
+ } catch (ceph::buffer::error& err) {
+ CLS_LOG(1, "ERROR: rgw_cls_rehard_remove: failed to decode entry\n");
+ return -EINVAL;
+ }
+
+ string key;
+ cls_rgw_reshard_entry entry;
+ cls_rgw_reshard_entry::generate_key(op.tenant, op.bucket_name, &key);
+ int ret = read_omap_entry(hctx, key, &entry);
+ if (ret < 0) {
+ return ret;
+ }
+
+ if (!op.bucket_id.empty() &&
+ entry.bucket_id != op.bucket_id) {
+ return 0;
+ }
+
+ ret = cls_cxx_map_remove_key(hctx, key);
+ if (ret < 0) {
+ CLS_LOG(0, "ERROR: failed to remove key: key=%s ret=%d", key.c_str(), ret);
+ return 0;
+ }
+ return ret;
+}
+
+static int rgw_set_bucket_resharding(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ CLS_LOG(10, "entered %s()\n", __func__);
+ cls_rgw_set_bucket_resharding_op op;
+
+ auto in_iter = in->cbegin();
+ try {
+ decode(op, in_iter);
+ } catch (ceph::buffer::error& err) {
+ CLS_LOG(1, "ERROR: cls_rgw_set_bucket_resharding: failed to decode entry\n");
+ return -EINVAL;
+ }
+
+ rgw_bucket_dir_header header;
+ int rc = read_bucket_header(hctx, &header);
+ if (rc < 0) {
+ CLS_LOG(1, "ERROR: %s: failed to read header", __func__);
+ return rc;
+ }
+
+ header.new_instance.set_status(op.entry.new_bucket_instance_id, op.entry.num_shards, op.entry.reshard_status);
+
+ return write_bucket_header(hctx, &header);
+}
+
+static int rgw_clear_bucket_resharding(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ CLS_LOG(10, "entered %s()\n", __func__);
+ cls_rgw_clear_bucket_resharding_op op;
+
+ auto in_iter = in->cbegin();
+ try {
+ decode(op, in_iter);
+ } catch (ceph::buffer::error& err) {
+ CLS_LOG(1, "ERROR: cls_rgw_clear_bucket_resharding: failed to decode entry\n");
+ return -EINVAL;
+ }
+
+ rgw_bucket_dir_header header;
+ int rc = read_bucket_header(hctx, &header);
+ if (rc < 0) {
+ CLS_LOG(1, "ERROR: %s: failed to read header", __func__);
+ return rc;
+ }
+ header.new_instance.clear();
+
+ return write_bucket_header(hctx, &header);
+}
+
+static int rgw_guard_bucket_resharding(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+{
+ CLS_LOG(10, "entered %s()\n", __func__);
+ cls_rgw_guard_bucket_resharding_op op;
+
+ auto in_iter = in->cbegin();
+ try {
+ decode(op, in_iter);
+ } catch (ceph::buffer::error& err) {
+ CLS_LOG(1, "ERROR: %s: failed to decode entry", __func__);
+ return -EINVAL;
+ }
+
+ rgw_bucket_dir_header header;
+ int rc = read_bucket_header(hctx, &header);
+ if (rc < 0) {
+ CLS_LOG(1, "ERROR: %s: failed to read header", __func__);
+ return rc;
+ }
+
+ if (header.resharding()) {
+ return op.ret_err;
+ }
+
+ return 0;
+}
+
+static int rgw_get_bucket_resharding(cls_method_context_t hctx,
+ bufferlist *in, bufferlist *out)
+{
+ CLS_LOG(10, "entered %s()\n", __func__);
+ cls_rgw_get_bucket_resharding_op op;
+
+ auto in_iter = in->cbegin();
+ try {
+ decode(op, in_iter);
+ } catch (ceph::buffer::error& err) {
+ CLS_LOG(1, "ERROR: %s: failed to decode entry", __func__);
+ return -EINVAL;
+ }
+
+ rgw_bucket_dir_header header;
+ int rc = read_bucket_header(hctx, &header);
+ if (rc < 0) {
+ CLS_LOG(1, "ERROR: %s: failed to read header", __func__);
+ return rc;
+ }
+
+ cls_rgw_get_bucket_resharding_ret op_ret;
+ op_ret.new_instance = header.new_instance;
+
+ encode(op_ret, *out);
+
+ return 0;
+}
+
+CLS_INIT(rgw)
+{
+ CLS_LOG(1, "Loaded rgw class!");
+
+ cls_handle_t h_class;
+ cls_method_handle_t h_rgw_bucket_init_index;
+ cls_method_handle_t h_rgw_bucket_set_tag_timeout;
+ cls_method_handle_t h_rgw_bucket_list;
+ cls_method_handle_t h_rgw_bucket_check_index;
+ cls_method_handle_t h_rgw_bucket_rebuild_index;
+ cls_method_handle_t h_rgw_bucket_update_stats;
+ cls_method_handle_t h_rgw_bucket_prepare_op;
+ cls_method_handle_t h_rgw_bucket_complete_op;
+ cls_method_handle_t h_rgw_bucket_link_olh;
+ cls_method_handle_t h_rgw_bucket_unlink_instance_op;
+ cls_method_handle_t h_rgw_bucket_read_olh_log;
+ cls_method_handle_t h_rgw_bucket_trim_olh_log;
+ cls_method_handle_t h_rgw_bucket_clear_olh;
+ cls_method_handle_t h_rgw_obj_remove;
+ cls_method_handle_t h_rgw_obj_store_pg_ver;
+ cls_method_handle_t h_rgw_obj_check_attrs_prefix;
+ cls_method_handle_t h_rgw_obj_check_mtime;
+ cls_method_handle_t h_rgw_bi_get_op;
+ cls_method_handle_t h_rgw_bi_put_op;
+ cls_method_handle_t h_rgw_bi_list_op;
+ cls_method_handle_t h_rgw_bi_log_list_op;
+ cls_method_handle_t h_rgw_bi_log_resync_op;
+ cls_method_handle_t h_rgw_bi_log_stop_op;
+ cls_method_handle_t h_rgw_dir_suggest_changes;
+ cls_method_handle_t h_rgw_user_usage_log_add;
+ cls_method_handle_t h_rgw_user_usage_log_read;
+ cls_method_handle_t h_rgw_user_usage_log_trim;
+ cls_method_handle_t h_rgw_usage_log_clear;
+ cls_method_handle_t h_rgw_gc_set_entry;
+ cls_method_handle_t h_rgw_gc_list;
+ cls_method_handle_t h_rgw_gc_remove;
+ cls_method_handle_t h_rgw_lc_get_entry;
+ cls_method_handle_t h_rgw_lc_set_entry;
+ cls_method_handle_t h_rgw_lc_rm_entry;
+ cls_method_handle_t h_rgw_lc_get_next_entry;
+ cls_method_handle_t h_rgw_lc_put_head;
+ cls_method_handle_t h_rgw_lc_get_head;
+ cls_method_handle_t h_rgw_lc_list_entries;
+ cls_method_handle_t h_rgw_reshard_add;
+ cls_method_handle_t h_rgw_reshard_list;
+ cls_method_handle_t h_rgw_reshard_get;
+ cls_method_handle_t h_rgw_reshard_remove;
+ cls_method_handle_t h_rgw_set_bucket_resharding;
+ cls_method_handle_t h_rgw_clear_bucket_resharding;
+ cls_method_handle_t h_rgw_guard_bucket_resharding;
+ cls_method_handle_t h_rgw_get_bucket_resharding;
+
+ cls_register(RGW_CLASS, &h_class);
+
+ /* bucket index */
+ cls_register_cxx_method(h_class, RGW_BUCKET_INIT_INDEX, CLS_METHOD_RD | CLS_METHOD_WR, rgw_bucket_init_index, &h_rgw_bucket_init_index);
+ cls_register_cxx_method(h_class, RGW_BUCKET_SET_TAG_TIMEOUT, CLS_METHOD_RD | CLS_METHOD_WR, rgw_bucket_set_tag_timeout, &h_rgw_bucket_set_tag_timeout);
+ cls_register_cxx_method(h_class, RGW_BUCKET_LIST, CLS_METHOD_RD, rgw_bucket_list, &h_rgw_bucket_list);
+ cls_register_cxx_method(h_class, RGW_BUCKET_CHECK_INDEX, CLS_METHOD_RD, rgw_bucket_check_index, &h_rgw_bucket_check_index);
+ cls_register_cxx_method(h_class, RGW_BUCKET_REBUILD_INDEX, CLS_METHOD_RD | CLS_METHOD_WR, rgw_bucket_rebuild_index, &h_rgw_bucket_rebuild_index);
+ cls_register_cxx_method(h_class, RGW_BUCKET_UPDATE_STATS, CLS_METHOD_RD | CLS_METHOD_WR, rgw_bucket_update_stats, &h_rgw_bucket_update_stats);
+ cls_register_cxx_method(h_class, RGW_BUCKET_PREPARE_OP, CLS_METHOD_RD | CLS_METHOD_WR, rgw_bucket_prepare_op, &h_rgw_bucket_prepare_op);
+ cls_register_cxx_method(h_class, RGW_BUCKET_COMPLETE_OP, CLS_METHOD_RD | CLS_METHOD_WR, rgw_bucket_complete_op, &h_rgw_bucket_complete_op);
+ cls_register_cxx_method(h_class, RGW_BUCKET_LINK_OLH, CLS_METHOD_RD | CLS_METHOD_WR, rgw_bucket_link_olh, &h_rgw_bucket_link_olh);
+ cls_register_cxx_method(h_class, RGW_BUCKET_UNLINK_INSTANCE, CLS_METHOD_RD | CLS_METHOD_WR, rgw_bucket_unlink_instance, &h_rgw_bucket_unlink_instance_op);
+ cls_register_cxx_method(h_class, RGW_BUCKET_READ_OLH_LOG, CLS_METHOD_RD, rgw_bucket_read_olh_log, &h_rgw_bucket_read_olh_log);
+ cls_register_cxx_method(h_class, RGW_BUCKET_TRIM_OLH_LOG, CLS_METHOD_RD | CLS_METHOD_WR, rgw_bucket_trim_olh_log, &h_rgw_bucket_trim_olh_log);
+ cls_register_cxx_method(h_class, RGW_BUCKET_CLEAR_OLH, CLS_METHOD_RD | CLS_METHOD_WR, rgw_bucket_clear_olh, &h_rgw_bucket_clear_olh);
+
+ cls_register_cxx_method(h_class, RGW_OBJ_REMOVE, CLS_METHOD_RD | CLS_METHOD_WR, rgw_obj_remove, &h_rgw_obj_remove);
+ cls_register_cxx_method(h_class, RGW_OBJ_STORE_PG_VER, CLS_METHOD_WR, rgw_obj_store_pg_ver, &h_rgw_obj_store_pg_ver);
+ cls_register_cxx_method(h_class, RGW_OBJ_CHECK_ATTRS_PREFIX, CLS_METHOD_RD, rgw_obj_check_attrs_prefix, &h_rgw_obj_check_attrs_prefix);
+ cls_register_cxx_method(h_class, RGW_OBJ_CHECK_MTIME, CLS_METHOD_RD, rgw_obj_check_mtime, &h_rgw_obj_check_mtime);
+
+ cls_register_cxx_method(h_class, RGW_BI_GET, CLS_METHOD_RD, rgw_bi_get_op, &h_rgw_bi_get_op);
+ cls_register_cxx_method(h_class, RGW_BI_PUT, CLS_METHOD_RD | CLS_METHOD_WR, rgw_bi_put_op, &h_rgw_bi_put_op);
+ cls_register_cxx_method(h_class, RGW_BI_LIST, CLS_METHOD_RD, rgw_bi_list_op, &h_rgw_bi_list_op);
+
+ cls_register_cxx_method(h_class, RGW_BI_LOG_LIST, CLS_METHOD_RD, rgw_bi_log_list, &h_rgw_bi_log_list_op);
+ cls_register_cxx_method(h_class, RGW_BI_LOG_TRIM, CLS_METHOD_RD | CLS_METHOD_WR, rgw_bi_log_trim, &h_rgw_bi_log_list_op);
+ cls_register_cxx_method(h_class, RGW_DIR_SUGGEST_CHANGES, CLS_METHOD_RD | CLS_METHOD_WR, rgw_dir_suggest_changes, &h_rgw_dir_suggest_changes);
+
+ cls_register_cxx_method(h_class, RGW_BI_LOG_RESYNC, CLS_METHOD_RD | CLS_METHOD_WR, rgw_bi_log_resync, &h_rgw_bi_log_resync_op);
+ cls_register_cxx_method(h_class, RGW_BI_LOG_STOP, CLS_METHOD_RD | CLS_METHOD_WR, rgw_bi_log_stop, &h_rgw_bi_log_stop_op);
+
+ /* usage logging */
+ cls_register_cxx_method(h_class, RGW_USER_USAGE_LOG_ADD, CLS_METHOD_RD | CLS_METHOD_WR, rgw_user_usage_log_add, &h_rgw_user_usage_log_add);
+ cls_register_cxx_method(h_class, RGW_USER_USAGE_LOG_READ, CLS_METHOD_RD, rgw_user_usage_log_read, &h_rgw_user_usage_log_read);
+ cls_register_cxx_method(h_class, RGW_USER_USAGE_LOG_TRIM, CLS_METHOD_RD | CLS_METHOD_WR, rgw_user_usage_log_trim, &h_rgw_user_usage_log_trim);
+ cls_register_cxx_method(h_class, RGW_USAGE_LOG_CLEAR, CLS_METHOD_WR, rgw_usage_log_clear, &h_rgw_usage_log_clear);
+
+ /* garbage collection */
+ cls_register_cxx_method(h_class, RGW_GC_SET_ENTRY, CLS_METHOD_RD | CLS_METHOD_WR, rgw_cls_gc_set_entry, &h_rgw_gc_set_entry);
+ cls_register_cxx_method(h_class, RGW_GC_DEFER_ENTRY, CLS_METHOD_RD | CLS_METHOD_WR, rgw_cls_gc_defer_entry, &h_rgw_gc_set_entry);
+ cls_register_cxx_method(h_class, RGW_GC_LIST, CLS_METHOD_RD, rgw_cls_gc_list, &h_rgw_gc_list);
+ cls_register_cxx_method(h_class, RGW_GC_REMOVE, CLS_METHOD_RD | CLS_METHOD_WR, rgw_cls_gc_remove, &h_rgw_gc_remove);
+
+ /* lifecycle bucket list */
+ cls_register_cxx_method(h_class, RGW_LC_GET_ENTRY, CLS_METHOD_RD, rgw_cls_lc_get_entry, &h_rgw_lc_get_entry);
+ cls_register_cxx_method(h_class, RGW_LC_SET_ENTRY, CLS_METHOD_RD | CLS_METHOD_WR, rgw_cls_lc_set_entry, &h_rgw_lc_set_entry);
+ cls_register_cxx_method(h_class, RGW_LC_RM_ENTRY, CLS_METHOD_RD | CLS_METHOD_WR, rgw_cls_lc_rm_entry, &h_rgw_lc_rm_entry);
+ cls_register_cxx_method(h_class, RGW_LC_GET_NEXT_ENTRY, CLS_METHOD_RD, rgw_cls_lc_get_next_entry, &h_rgw_lc_get_next_entry);
+ cls_register_cxx_method(h_class, RGW_LC_PUT_HEAD, CLS_METHOD_RD| CLS_METHOD_WR, rgw_cls_lc_put_head, &h_rgw_lc_put_head);
+ cls_register_cxx_method(h_class, RGW_LC_GET_HEAD, CLS_METHOD_RD, rgw_cls_lc_get_head, &h_rgw_lc_get_head);
+ cls_register_cxx_method(h_class, RGW_LC_LIST_ENTRIES, CLS_METHOD_RD, rgw_cls_lc_list_entries, &h_rgw_lc_list_entries);
+
+ /* resharding */
+ cls_register_cxx_method(h_class, RGW_RESHARD_ADD, CLS_METHOD_RD | CLS_METHOD_WR, rgw_reshard_add, &h_rgw_reshard_add);
+ cls_register_cxx_method(h_class, RGW_RESHARD_LIST, CLS_METHOD_RD, rgw_reshard_list, &h_rgw_reshard_list);
+ cls_register_cxx_method(h_class, RGW_RESHARD_GET, CLS_METHOD_RD,rgw_reshard_get, &h_rgw_reshard_get);
+ cls_register_cxx_method(h_class, RGW_RESHARD_REMOVE, CLS_METHOD_RD | CLS_METHOD_WR, rgw_reshard_remove, &h_rgw_reshard_remove);
+
+ /* resharding attribute */
+ cls_register_cxx_method(h_class, RGW_SET_BUCKET_RESHARDING, CLS_METHOD_RD | CLS_METHOD_WR,
+ rgw_set_bucket_resharding, &h_rgw_set_bucket_resharding);
+ cls_register_cxx_method(h_class, RGW_CLEAR_BUCKET_RESHARDING, CLS_METHOD_RD | CLS_METHOD_WR,
+ rgw_clear_bucket_resharding, &h_rgw_clear_bucket_resharding);
+ cls_register_cxx_method(h_class, RGW_GUARD_BUCKET_RESHARDING, CLS_METHOD_RD ,
+ rgw_guard_bucket_resharding, &h_rgw_guard_bucket_resharding);
+ cls_register_cxx_method(h_class, RGW_GET_BUCKET_RESHARDING, CLS_METHOD_RD ,
+ rgw_get_bucket_resharding, &h_rgw_get_bucket_resharding);
+
+ return;
+}
diff --git a/src/cls/rgw/cls_rgw_client.cc b/src/cls/rgw/cls_rgw_client.cc
new file mode 100644
index 000000000..cddd735c5
--- /dev/null
+++ b/src/cls/rgw/cls_rgw_client.cc
@@ -0,0 +1,1207 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#include <errno.h>
+
+#include "cls/rgw/cls_rgw_const.h"
+#include "cls/rgw/cls_rgw_client.h"
+
+#include "common/debug.h"
+
+using std::list;
+using std::map;
+using std::pair;
+using std::string;
+using std::vector;
+
+using ceph::real_time;
+
+using namespace librados;
+
+const string BucketIndexShardsManager::KEY_VALUE_SEPARATOR = "#";
+const string BucketIndexShardsManager::SHARDS_SEPARATOR = ",";
+
+
+int CLSRGWConcurrentIO::operator()() {
+ int ret = 0;
+ iter = objs_container.begin();
+ for (; iter != objs_container.end() && max_aio-- > 0; ++iter) {
+ ret = issue_op(iter->first, iter->second);
+ if (ret < 0)
+ break;
+ }
+
+ int num_completions = 0, r = 0;
+ std::map<int, std::string> completed_objs;
+ std::map<int, std::string> retry_objs;
+ while (manager.wait_for_completions(valid_ret_code(), &num_completions, &r,
+ need_multiple_rounds() ? &completed_objs : nullptr,
+ !need_multiple_rounds() ? &retry_objs : nullptr)) {
+ if (r >= 0 && ret >= 0) {
+ for (; num_completions && iter != objs_container.end(); --num_completions, ++iter) {
+ int issue_ret = issue_op(iter->first, iter->second);
+ if (issue_ret < 0) {
+ ret = issue_ret;
+ break;
+ }
+ }
+ } else if (ret >= 0) {
+ ret = r;
+ }
+
+ // if we're at the end with this round, see if another round is needed
+ if (iter == objs_container.end()) {
+ if (need_multiple_rounds() && !completed_objs.empty()) {
+ // For those objects which need another round, use them to reset
+ // the container
+ reset_container(completed_objs);
+ iter = objs_container.begin();
+ } else if (! need_multiple_rounds() && !retry_objs.empty()) {
+ reset_container(retry_objs);
+ iter = objs_container.begin();
+ }
+
+ // re-issue ops if container was reset above (i.e., iter !=
+ // objs_container.end()); if it was not reset above (i.e., iter
+ // == objs_container.end()) the loop will exit immediately
+ // without iterating
+ for (; num_completions && iter != objs_container.end(); --num_completions, ++iter) {
+ int issue_ret = issue_op(iter->first, iter->second);
+ if (issue_ret < 0) {
+ ret = issue_ret;
+ break;
+ }
+ }
+ }
+ }
+
+ if (ret < 0) {
+ cleanup();
+ }
+ return ret;
+} // CLSRGWConcurrintIO::operator()()
+
+
+/**
+ * This class represents the bucket index object operation callback context.
+ */
+template <typename T>
+class ClsBucketIndexOpCtx : public ObjectOperationCompletion {
+private:
+ T *data;
+ int *ret_code;
+public:
+ ClsBucketIndexOpCtx(T* _data, int *_ret_code) : data(_data), ret_code(_ret_code) { ceph_assert(data); }
+ ~ClsBucketIndexOpCtx() override {}
+ void handle_completion(int r, bufferlist& outbl) override {
+ // if successful, or we're asked for a retry, copy result into
+ // destination (*data)
+ if (r >= 0 || r == RGWBIAdvanceAndRetryError) {
+ try {
+ auto iter = outbl.cbegin();
+ decode((*data), iter);
+ } catch (ceph::buffer::error& err) {
+ r = -EIO;
+ }
+ }
+ if (ret_code) {
+ *ret_code = r;
+ }
+ }
+};
+
+void BucketIndexAioManager::do_completion(const int request_id) {
+ std::lock_guard l{lock};
+
+ auto iter = pendings.find(request_id);
+ ceph_assert(iter != pendings.end());
+ completions[request_id] = iter->second;
+ pendings.erase(iter);
+
+ // If the caller needs a list of finished objects, store them
+ // for further processing
+ auto miter = pending_objs.find(request_id);
+ if (miter != pending_objs.end()) {
+ completion_objs.emplace(request_id, miter->second);
+ pending_objs.erase(miter);
+ }
+
+ cond.notify_all();
+}
+
+bool BucketIndexAioManager::wait_for_completions(int valid_ret_code,
+ int *num_completions,
+ int *ret_code,
+ std::map<int, std::string> *completed_objs,
+ std::map<int, std::string> *retry_objs)
+{
+ std::unique_lock locker{lock};
+ if (pendings.empty() && completions.empty()) {
+ return false;
+ }
+
+ if (completions.empty()) {
+ // Wait for AIO completion
+ cond.wait(locker);
+ }
+
+ // Clear the completed AIOs
+ auto iter = completions.begin();
+ for (; iter != completions.end(); ++iter) {
+ int r = iter->second->get_return_value();
+
+ // see if we may need to copy completions or retries
+ if (completed_objs || retry_objs) {
+ auto liter = completion_objs.find(iter->first);
+ if (liter != completion_objs.end()) {
+ if (completed_objs && r == 0) { /* update list of successfully completed objs */
+ (*completed_objs)[liter->second.shard_id] = liter->second.oid;
+ }
+
+ if (r == RGWBIAdvanceAndRetryError) {
+ r = 0;
+ if (retry_objs) {
+ (*retry_objs)[liter->second.shard_id] = liter->second.oid;
+ }
+ }
+ } else {
+ // NB: should we log an error here; currently no logging
+ // context to use
+ }
+ }
+
+ if (ret_code && (r < 0 && r != valid_ret_code)) {
+ (*ret_code) = r;
+ }
+
+ iter->second->release();
+ }
+
+ if (num_completions) {
+ (*num_completions) = completions.size();
+ }
+
+ completions.clear();
+
+ return true;
+}
+
+// note: currently only called by tesing code
+void cls_rgw_bucket_init_index(ObjectWriteOperation& o)
+{
+ bufferlist in;
+ o.exec(RGW_CLASS, RGW_BUCKET_INIT_INDEX, in);
+}
+
+static bool issue_bucket_index_init_op(librados::IoCtx& io_ctx,
+ const int shard_id,
+ const string& oid,
+ BucketIndexAioManager *manager) {
+ bufferlist in;
+ librados::ObjectWriteOperation op;
+ op.create(true);
+ op.exec(RGW_CLASS, RGW_BUCKET_INIT_INDEX, in);
+ return manager->aio_operate(io_ctx, shard_id, oid, &op);
+}
+
+static bool issue_bucket_index_clean_op(librados::IoCtx& io_ctx,
+ const int shard_id,
+ const string& oid,
+ BucketIndexAioManager *manager) {
+ bufferlist in;
+ librados::ObjectWriteOperation op;
+ op.remove();
+ return manager->aio_operate(io_ctx, shard_id, oid, &op);
+}
+
+static bool issue_bucket_set_tag_timeout_op(librados::IoCtx& io_ctx,
+ const int shard_id,
+ const string& oid,
+ uint64_t timeout,
+ BucketIndexAioManager *manager) {
+ bufferlist in;
+ rgw_cls_tag_timeout_op call;
+ call.tag_timeout = timeout;
+ encode(call, in);
+ ObjectWriteOperation op;
+ op.exec(RGW_CLASS, RGW_BUCKET_SET_TAG_TIMEOUT, in);
+ return manager->aio_operate(io_ctx, shard_id, oid, &op);
+}
+
+int CLSRGWIssueBucketIndexInit::issue_op(const int shard_id, const string& oid)
+{
+ return issue_bucket_index_init_op(io_ctx, shard_id, oid, &manager);
+}
+
+void CLSRGWIssueBucketIndexInit::cleanup()
+{
+ // Do best effort removal
+ for (auto citer = objs_container.begin(); citer != iter; ++citer) {
+ io_ctx.remove(citer->second);
+ }
+}
+
+int CLSRGWIssueBucketIndexClean::issue_op(const int shard_id, const string& oid)
+{
+ return issue_bucket_index_clean_op(io_ctx, shard_id, oid, &manager);
+}
+
+int CLSRGWIssueSetTagTimeout::issue_op(const int shard_id, const string& oid)
+{
+ return issue_bucket_set_tag_timeout_op(io_ctx, shard_id, oid, tag_timeout, &manager);
+}
+
+void cls_rgw_bucket_update_stats(librados::ObjectWriteOperation& o,
+ bool absolute,
+ const map<RGWObjCategory, rgw_bucket_category_stats>& stats)
+{
+ rgw_cls_bucket_update_stats_op call;
+ call.absolute = absolute;
+ call.stats = stats;
+ bufferlist in;
+ encode(call, in);
+ o.exec(RGW_CLASS, RGW_BUCKET_UPDATE_STATS, in);
+}
+
+void cls_rgw_bucket_prepare_op(ObjectWriteOperation& o, RGWModifyOp op, string& tag,
+ const cls_rgw_obj_key& key, const string& locator, bool log_op,
+ uint16_t bilog_flags, rgw_zone_set& zones_trace)
+{
+ rgw_cls_obj_prepare_op call;
+ call.op = op;
+ call.tag = tag;
+ call.key = key;
+ call.locator = locator;
+ call.log_op = log_op;
+ call.bilog_flags = bilog_flags;
+ call.zones_trace = zones_trace;
+ bufferlist in;
+ encode(call, in);
+ o.exec(RGW_CLASS, RGW_BUCKET_PREPARE_OP, in);
+}
+
+void cls_rgw_bucket_complete_op(ObjectWriteOperation& o, RGWModifyOp op, string& tag,
+ rgw_bucket_entry_ver& ver,
+ const cls_rgw_obj_key& key,
+ rgw_bucket_dir_entry_meta& dir_meta,
+ list<cls_rgw_obj_key> *remove_objs, bool log_op,
+ uint16_t bilog_flags,
+ rgw_zone_set *zones_trace)
+{
+
+ bufferlist in;
+ rgw_cls_obj_complete_op call;
+ call.op = op;
+ call.tag = tag;
+ call.key = key;
+ call.ver = ver;
+ call.meta = dir_meta;
+ call.log_op = log_op;
+ call.bilog_flags = bilog_flags;
+ if (remove_objs)
+ call.remove_objs = *remove_objs;
+ if (zones_trace) {
+ call.zones_trace = *zones_trace;
+ }
+ encode(call, in);
+ o.exec(RGW_CLASS, RGW_BUCKET_COMPLETE_OP, in);
+}
+
+void cls_rgw_bucket_list_op(librados::ObjectReadOperation& op,
+ const cls_rgw_obj_key& start_obj,
+ const std::string& filter_prefix,
+ const std::string& delimiter,
+ uint32_t num_entries,
+ bool list_versions,
+ rgw_cls_list_ret* result)
+{
+ bufferlist in;
+ rgw_cls_list_op call;
+ call.start_obj = start_obj;
+ call.filter_prefix = filter_prefix;
+ call.delimiter = delimiter;
+ call.num_entries = num_entries;
+ call.list_versions = list_versions;
+ encode(call, in);
+
+ op.exec(RGW_CLASS, RGW_BUCKET_LIST, in,
+ new ClsBucketIndexOpCtx<rgw_cls_list_ret>(result, NULL));
+}
+
+static bool issue_bucket_list_op(librados::IoCtx& io_ctx,
+ const int shard_id,
+ const std::string& oid,
+ const cls_rgw_obj_key& start_obj,
+ const std::string& filter_prefix,
+ const std::string& delimiter,
+ uint32_t num_entries,
+ bool list_versions,
+ BucketIndexAioManager *manager,
+ rgw_cls_list_ret *pdata)
+{
+ librados::ObjectReadOperation op;
+ cls_rgw_bucket_list_op(op,
+ start_obj, filter_prefix, delimiter,
+ num_entries, list_versions, pdata);
+ return manager->aio_operate(io_ctx, shard_id, oid, &op);
+}
+
+int CLSRGWIssueBucketList::issue_op(const int shard_id, const string& oid)
+{
+ // set the marker depending on whether we've already queried this
+ // shard and gotten a RGWBIAdvanceAndRetryError (defined
+ // constant) return value; if we have use the marker in the return
+ // to advance the search, otherwise use the marker passed in by the
+ // caller
+ cls_rgw_obj_key marker;
+ auto iter = result.find(shard_id);
+ if (iter != result.end()) {
+ marker = iter->second.marker;
+ } else {
+ marker = start_obj;
+ }
+
+ return issue_bucket_list_op(io_ctx, shard_id, oid,
+ marker, filter_prefix, delimiter,
+ num_entries, list_versions, &manager,
+ &result[shard_id]);
+}
+
+
+void CLSRGWIssueBucketList::reset_container(std::map<int, std::string>& objs)
+{
+ objs_container.swap(objs);
+ iter = objs_container.begin();
+ objs.clear();
+}
+
+
+void cls_rgw_remove_obj(librados::ObjectWriteOperation& o, list<string>& keep_attr_prefixes)
+{
+ bufferlist in;
+ rgw_cls_obj_remove_op call;
+ call.keep_attr_prefixes = keep_attr_prefixes;
+ encode(call, in);
+ o.exec(RGW_CLASS, RGW_OBJ_REMOVE, in);
+}
+
+void cls_rgw_obj_store_pg_ver(librados::ObjectWriteOperation& o, const string& attr)
+{
+ bufferlist in;
+ rgw_cls_obj_store_pg_ver_op call;
+ call.attr = attr;
+ encode(call, in);
+ o.exec(RGW_CLASS, RGW_OBJ_STORE_PG_VER, in);
+}
+
+void cls_rgw_obj_check_attrs_prefix(librados::ObjectOperation& o, const string& prefix, bool fail_if_exist)
+{
+ bufferlist in;
+ rgw_cls_obj_check_attrs_prefix call;
+ call.check_prefix = prefix;
+ call.fail_if_exist = fail_if_exist;
+ encode(call, in);
+ o.exec(RGW_CLASS, RGW_OBJ_CHECK_ATTRS_PREFIX, in);
+}
+
+void cls_rgw_obj_check_mtime(librados::ObjectOperation& o, const real_time& mtime, bool high_precision_time, RGWCheckMTimeType type)
+{
+ bufferlist in;
+ rgw_cls_obj_check_mtime call;
+ call.mtime = mtime;
+ call.high_precision_time = high_precision_time;
+ call.type = type;
+ encode(call, in);
+ o.exec(RGW_CLASS, RGW_OBJ_CHECK_MTIME, in);
+}
+
+int cls_rgw_bi_get(librados::IoCtx& io_ctx, const string oid,
+ BIIndexType index_type, cls_rgw_obj_key& key,
+ rgw_cls_bi_entry *entry)
+{
+ bufferlist in, out;
+ rgw_cls_bi_get_op call;
+ call.key = key;
+ call.type = index_type;
+ encode(call, in);
+ int r = io_ctx.exec(oid, RGW_CLASS, RGW_BI_GET, in, out);
+ if (r < 0)
+ return r;
+
+ rgw_cls_bi_get_ret op_ret;
+ auto iter = out.cbegin();
+ try {
+ decode(op_ret, iter);
+ } catch (ceph::buffer::error& err) {
+ return -EIO;
+ }
+
+ *entry = op_ret.entry;
+
+ return 0;
+}
+
+int cls_rgw_bi_put(librados::IoCtx& io_ctx, const string oid, rgw_cls_bi_entry& entry)
+{
+ bufferlist in, out;
+ rgw_cls_bi_put_op call;
+ call.entry = entry;
+ encode(call, in);
+ int r = io_ctx.exec(oid, RGW_CLASS, RGW_BI_PUT, in, out);
+ if (r < 0)
+ return r;
+
+ return 0;
+}
+
+void cls_rgw_bi_put(ObjectWriteOperation& op, const string oid, rgw_cls_bi_entry& entry)
+{
+ bufferlist in, out;
+ rgw_cls_bi_put_op call;
+ call.entry = entry;
+ encode(call, in);
+ op.exec(RGW_CLASS, RGW_BI_PUT, in);
+}
+
+/* nb: any entries passed in are replaced with the results of the cls
+ * call, so caller does not need to clear entries between calls
+ */
+int cls_rgw_bi_list(librados::IoCtx& io_ctx, const std::string& oid,
+ const std::string& name_filter, const std::string& marker, uint32_t max,
+ std::list<rgw_cls_bi_entry> *entries, bool *is_truncated)
+{
+ bufferlist in, out;
+ rgw_cls_bi_list_op call;
+ call.name_filter = name_filter;
+ call.marker = marker;
+ call.max = max;
+ encode(call, in);
+ int r = io_ctx.exec(oid, RGW_CLASS, RGW_BI_LIST, in, out);
+ if (r < 0)
+ return r;
+
+ rgw_cls_bi_list_ret op_ret;
+ auto iter = out.cbegin();
+ try {
+ decode(op_ret, iter);
+ } catch (ceph::buffer::error& err) {
+ return -EIO;
+ }
+
+ entries->swap(op_ret.entries);
+ *is_truncated = op_ret.is_truncated;
+
+ return 0;
+}
+
+int cls_rgw_bucket_link_olh(librados::IoCtx& io_ctx, const string& oid,
+ const cls_rgw_obj_key& key, bufferlist& olh_tag,
+ bool delete_marker, const string& op_tag, rgw_bucket_dir_entry_meta *meta,
+ uint64_t olh_epoch, ceph::real_time unmod_since, bool high_precision_time, bool log_op, rgw_zone_set& zones_trace)
+{
+ librados::ObjectWriteOperation op;
+ cls_rgw_bucket_link_olh(op, key, olh_tag, delete_marker, op_tag, meta,
+ olh_epoch, unmod_since, high_precision_time, log_op,
+ zones_trace);
+
+ return io_ctx.operate(oid, &op);
+}
+
+
+void cls_rgw_bucket_link_olh(librados::ObjectWriteOperation& op, const cls_rgw_obj_key& key,
+ bufferlist& olh_tag, bool delete_marker,
+ const string& op_tag, rgw_bucket_dir_entry_meta *meta,
+ uint64_t olh_epoch, ceph::real_time unmod_since, bool high_precision_time, bool log_op, rgw_zone_set& zones_trace)
+{
+ bufferlist in, out;
+ rgw_cls_link_olh_op call;
+ call.key = key;
+ call.olh_tag = string(olh_tag.c_str(), olh_tag.length());
+ call.op_tag = op_tag;
+ call.delete_marker = delete_marker;
+ if (meta) {
+ call.meta = *meta;
+ }
+ call.olh_epoch = olh_epoch;
+ call.log_op = log_op;
+ call.unmod_since = unmod_since;
+ call.high_precision_time = high_precision_time;
+ call.zones_trace = zones_trace;
+ encode(call, in);
+ op.exec(RGW_CLASS, RGW_BUCKET_LINK_OLH, in);
+}
+
+int cls_rgw_bucket_unlink_instance(librados::IoCtx& io_ctx, const string& oid,
+ const cls_rgw_obj_key& key, const string& op_tag,
+ const string& olh_tag, uint64_t olh_epoch, bool log_op, rgw_zone_set& zones_trace)
+{
+ librados::ObjectWriteOperation op;
+ cls_rgw_bucket_unlink_instance(op, key, op_tag, olh_tag, olh_epoch, log_op, zones_trace);
+ int r = io_ctx.operate(oid, &op);
+ if (r < 0)
+ return r;
+
+ return 0;
+}
+
+void cls_rgw_bucket_unlink_instance(librados::ObjectWriteOperation& op,
+ const cls_rgw_obj_key& key, const string& op_tag,
+ const string& olh_tag, uint64_t olh_epoch, bool log_op, rgw_zone_set& zones_trace)
+{
+ bufferlist in, out;
+ rgw_cls_unlink_instance_op call;
+ call.key = key;
+ call.op_tag = op_tag;
+ call.olh_epoch = olh_epoch;
+ call.olh_tag = olh_tag;
+ call.log_op = log_op;
+ call.zones_trace = zones_trace;
+ encode(call, in);
+ op.exec(RGW_CLASS, RGW_BUCKET_UNLINK_INSTANCE, in);
+}
+
+void cls_rgw_get_olh_log(librados::ObjectReadOperation& op, const cls_rgw_obj_key& olh, uint64_t ver_marker, const string& olh_tag, rgw_cls_read_olh_log_ret& log_ret, int& op_ret)
+{
+ bufferlist in;
+ rgw_cls_read_olh_log_op call;
+ call.olh = olh;
+ call.ver_marker = ver_marker;
+ call.olh_tag = olh_tag;
+ encode(call, in);
+ op.exec(RGW_CLASS, RGW_BUCKET_READ_OLH_LOG, in, new ClsBucketIndexOpCtx<rgw_cls_read_olh_log_ret>(&log_ret, &op_ret));
+}
+
+int cls_rgw_get_olh_log(IoCtx& io_ctx, string& oid, const cls_rgw_obj_key& olh, uint64_t ver_marker,
+ const string& olh_tag,
+ rgw_cls_read_olh_log_ret& log_ret)
+{
+ int op_ret = 0;
+ librados::ObjectReadOperation op;
+ cls_rgw_get_olh_log(op, olh, ver_marker, olh_tag, log_ret, op_ret);
+ int r = io_ctx.operate(oid, &op, NULL);
+ if (r < 0) {
+ return r;
+ }
+ if (op_ret < 0) {
+ return op_ret;
+ }
+
+ return r;
+}
+
+void cls_rgw_trim_olh_log(librados::ObjectWriteOperation& op, const cls_rgw_obj_key& olh, uint64_t ver, const string& olh_tag)
+{
+ bufferlist in;
+ rgw_cls_trim_olh_log_op call;
+ call.olh = olh;
+ call.ver = ver;
+ call.olh_tag = olh_tag;
+ encode(call, in);
+ op.exec(RGW_CLASS, RGW_BUCKET_TRIM_OLH_LOG, in);
+}
+
+int cls_rgw_clear_olh(IoCtx& io_ctx, string& oid, const cls_rgw_obj_key& olh, const string& olh_tag)
+{
+ librados::ObjectWriteOperation op;
+ cls_rgw_clear_olh(op, olh, olh_tag);
+
+ return io_ctx.operate(oid, &op);
+}
+
+void cls_rgw_clear_olh(librados::ObjectWriteOperation& op, const cls_rgw_obj_key& olh, const string& olh_tag)
+{
+ bufferlist in;
+ rgw_cls_bucket_clear_olh_op call;
+ call.key = olh;
+ call.olh_tag = olh_tag;
+ encode(call, in);
+ op.exec(RGW_CLASS, RGW_BUCKET_CLEAR_OLH, in);
+}
+
+void cls_rgw_bilog_list(librados::ObjectReadOperation& op,
+ const std::string& marker, uint32_t max,
+ cls_rgw_bi_log_list_ret *pdata, int *ret)
+{
+ cls_rgw_bi_log_list_op call;
+ call.marker = marker;
+ call.max = max;
+
+ bufferlist in;
+ encode(call, in);
+ op.exec(RGW_CLASS, RGW_BI_LOG_LIST, in, new ClsBucketIndexOpCtx<cls_rgw_bi_log_list_ret>(pdata, ret));
+}
+
+static bool issue_bi_log_list_op(librados::IoCtx& io_ctx, const string& oid, const int shard_id,
+ BucketIndexShardsManager& marker_mgr, uint32_t max,
+ BucketIndexAioManager *manager,
+ cls_rgw_bi_log_list_ret *pdata)
+{
+ librados::ObjectReadOperation op;
+ cls_rgw_bilog_list(op, marker_mgr.get(shard_id, ""), max, pdata, nullptr);
+ return manager->aio_operate(io_ctx, shard_id, oid, &op);
+}
+
+int CLSRGWIssueBILogList::issue_op(const int shard_id, const string& oid)
+{
+ return issue_bi_log_list_op(io_ctx, oid, shard_id, marker_mgr, max, &manager, &result[shard_id]);
+}
+
+void cls_rgw_bilog_trim(librados::ObjectWriteOperation& op,
+ const std::string& start_marker,
+ const std::string& end_marker)
+{
+ cls_rgw_bi_log_trim_op call;
+ call.start_marker = start_marker;
+ call.end_marker = end_marker;
+
+ bufferlist in;
+ encode(call, in);
+ op.exec(RGW_CLASS, RGW_BI_LOG_TRIM, in);
+}
+
+static bool issue_bi_log_trim(librados::IoCtx& io_ctx, const string& oid, const int shard_id,
+ BucketIndexShardsManager& start_marker_mgr,
+ BucketIndexShardsManager& end_marker_mgr, BucketIndexAioManager *manager) {
+ cls_rgw_bi_log_trim_op call;
+ librados::ObjectWriteOperation op;
+ cls_rgw_bilog_trim(op, start_marker_mgr.get(shard_id, ""),
+ end_marker_mgr.get(shard_id, ""));
+ return manager->aio_operate(io_ctx, shard_id, oid, &op);
+}
+
+int CLSRGWIssueBILogTrim::issue_op(const int shard_id, const string& oid)
+{
+ return issue_bi_log_trim(io_ctx, oid, shard_id, start_marker_mgr, end_marker_mgr, &manager);
+}
+
+static bool issue_bucket_check_index_op(IoCtx& io_ctx, const int shard_id, const string& oid, BucketIndexAioManager *manager,
+ rgw_cls_check_index_ret *pdata) {
+ bufferlist in;
+ librados::ObjectReadOperation op;
+ op.exec(RGW_CLASS, RGW_BUCKET_CHECK_INDEX, in, new ClsBucketIndexOpCtx<rgw_cls_check_index_ret>(
+ pdata, NULL));
+ return manager->aio_operate(io_ctx, shard_id, oid, &op);
+}
+
+int CLSRGWIssueBucketCheck::issue_op(int shard_id, const string& oid)
+{
+ return issue_bucket_check_index_op(io_ctx, shard_id, oid, &manager, &result[shard_id]);
+}
+
+static bool issue_bucket_rebuild_index_op(IoCtx& io_ctx, const int shard_id, const string& oid,
+ BucketIndexAioManager *manager) {
+ bufferlist in;
+ librados::ObjectWriteOperation op;
+ op.exec(RGW_CLASS, RGW_BUCKET_REBUILD_INDEX, in);
+ return manager->aio_operate(io_ctx, shard_id, oid, &op);
+}
+
+int CLSRGWIssueBucketRebuild::issue_op(const int shard_id, const string& oid)
+{
+ return issue_bucket_rebuild_index_op(io_ctx, shard_id, oid, &manager);
+}
+
+void cls_rgw_encode_suggestion(char op, rgw_bucket_dir_entry& dirent, bufferlist& updates)
+{
+ updates.append(op);
+ encode(dirent, updates);
+}
+
+void cls_rgw_suggest_changes(ObjectWriteOperation& o, bufferlist& updates)
+{
+ o.exec(RGW_CLASS, RGW_DIR_SUGGEST_CHANGES, updates);
+}
+
+int CLSRGWIssueGetDirHeader::issue_op(const int shard_id, const string& oid)
+{
+ cls_rgw_obj_key empty_key;
+ string empty_prefix;
+ string empty_delimiter;
+ return issue_bucket_list_op(io_ctx, shard_id, oid,
+ empty_key, empty_prefix, empty_delimiter,
+ 0, false, &manager, &result[shard_id]);
+}
+
+static bool issue_resync_bi_log(librados::IoCtx& io_ctx, const int shard_id, const string& oid, BucketIndexAioManager *manager)
+{
+ bufferlist in;
+ librados::ObjectWriteOperation op;
+ op.exec(RGW_CLASS, RGW_BI_LOG_RESYNC, in);
+ return manager->aio_operate(io_ctx, shard_id, oid, &op);
+}
+
+int CLSRGWIssueResyncBucketBILog::issue_op(const int shard_id, const string& oid)
+{
+ return issue_resync_bi_log(io_ctx, shard_id, oid, &manager);
+}
+
+static bool issue_bi_log_stop(librados::IoCtx& io_ctx, const int shard_id, const string& oid, BucketIndexAioManager *manager)
+{
+ bufferlist in;
+ librados::ObjectWriteOperation op;
+ op.exec(RGW_CLASS, RGW_BI_LOG_STOP, in);
+ return manager->aio_operate(io_ctx, shard_id, oid, &op);
+}
+
+int CLSRGWIssueBucketBILogStop::issue_op(const int shard_id, const string& oid)
+{
+ return issue_bi_log_stop(io_ctx, shard_id, oid, &manager);
+}
+
+class GetDirHeaderCompletion : public ObjectOperationCompletion {
+ RGWGetDirHeader_CB *ret_ctx;
+public:
+ explicit GetDirHeaderCompletion(RGWGetDirHeader_CB *_ctx) : ret_ctx(_ctx) {}
+ ~GetDirHeaderCompletion() override {
+ ret_ctx->put();
+ }
+ void handle_completion(int r, bufferlist& outbl) override {
+ rgw_cls_list_ret ret;
+ try {
+ auto iter = outbl.cbegin();
+ decode(ret, iter);
+ } catch (ceph::buffer::error& err) {
+ r = -EIO;
+ }
+
+ ret_ctx->handle_response(r, ret.dir.header);
+ }
+};
+
+int cls_rgw_get_dir_header_async(IoCtx& io_ctx, string& oid, RGWGetDirHeader_CB *ctx)
+{
+ bufferlist in, out;
+ rgw_cls_list_op call;
+ call.num_entries = 0;
+ encode(call, in);
+ ObjectReadOperation op;
+ GetDirHeaderCompletion *cb = new GetDirHeaderCompletion(ctx);
+ op.exec(RGW_CLASS, RGW_BUCKET_LIST, in, cb);
+ AioCompletion *c = librados::Rados::aio_create_completion(nullptr, nullptr);
+ int r = io_ctx.aio_operate(oid, c, &op, NULL);
+ c->release();
+ if (r < 0)
+ return r;
+
+ return 0;
+}
+
+int cls_rgw_usage_log_read(IoCtx& io_ctx, const string& oid, const string& user, const string& bucket,
+ uint64_t start_epoch, uint64_t end_epoch, uint32_t max_entries,
+ string& read_iter, map<rgw_user_bucket, rgw_usage_log_entry>& usage,
+ bool *is_truncated)
+{
+ if (is_truncated)
+ *is_truncated = false;
+
+ bufferlist in, out;
+ rgw_cls_usage_log_read_op call;
+ call.start_epoch = start_epoch;
+ call.end_epoch = end_epoch;
+ call.owner = user;
+ call.max_entries = max_entries;
+ call.bucket = bucket;
+ call.iter = read_iter;
+ encode(call, in);
+ int r = io_ctx.exec(oid, RGW_CLASS, RGW_USER_USAGE_LOG_READ, in, out);
+ if (r < 0)
+ return r;
+
+ try {
+ rgw_cls_usage_log_read_ret result;
+ auto iter = out.cbegin();
+ decode(result, iter);
+ read_iter = result.next_iter;
+ if (is_truncated)
+ *is_truncated = result.truncated;
+
+ usage = result.usage;
+ } catch (ceph::buffer::error& e) {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int cls_rgw_usage_log_trim(IoCtx& io_ctx, const string& oid, const string& user, const string& bucket,
+ uint64_t start_epoch, uint64_t end_epoch)
+{
+ bufferlist in;
+ rgw_cls_usage_log_trim_op call;
+ call.start_epoch = start_epoch;
+ call.end_epoch = end_epoch;
+ call.user = user;
+ call.bucket = bucket;
+ encode(call, in);
+
+ bool done = false;
+ do {
+ ObjectWriteOperation op;
+ op.exec(RGW_CLASS, RGW_USER_USAGE_LOG_TRIM, in);
+ int r = io_ctx.operate(oid, &op);
+ if (r == -ENODATA)
+ done = true;
+ else if (r < 0)
+ return r;
+ } while (!done);
+
+ return 0;
+}
+
+void cls_rgw_usage_log_trim(librados::ObjectWriteOperation& op, const string& user, const string& bucket, uint64_t start_epoch, uint64_t end_epoch)
+{
+ bufferlist in;
+ rgw_cls_usage_log_trim_op call;
+ call.start_epoch = start_epoch;
+ call.end_epoch = end_epoch;
+ call.user = user;
+ call.bucket = bucket;
+ encode(call, in);
+
+ op.exec(RGW_CLASS, RGW_USER_USAGE_LOG_TRIM, in);
+}
+
+void cls_rgw_usage_log_clear(ObjectWriteOperation& op)
+{
+ bufferlist in;
+ op.exec(RGW_CLASS, RGW_USAGE_LOG_CLEAR, in);
+}
+
+void cls_rgw_usage_log_add(ObjectWriteOperation& op, rgw_usage_log_info& info)
+{
+ bufferlist in;
+ rgw_cls_usage_log_add_op call;
+ call.info = info;
+ encode(call, in);
+ op.exec(RGW_CLASS, RGW_USER_USAGE_LOG_ADD, in);
+}
+
+/* garbage collection */
+
+void cls_rgw_gc_set_entry(ObjectWriteOperation& op, uint32_t expiration_secs, cls_rgw_gc_obj_info& info)
+{
+ bufferlist in;
+ cls_rgw_gc_set_entry_op call;
+ call.expiration_secs = expiration_secs;
+ call.info = info;
+ encode(call, in);
+ op.exec(RGW_CLASS, RGW_GC_SET_ENTRY, in);
+}
+
+void cls_rgw_gc_defer_entry(ObjectWriteOperation& op, uint32_t expiration_secs, const string& tag)
+{
+ bufferlist in;
+ cls_rgw_gc_defer_entry_op call;
+ call.expiration_secs = expiration_secs;
+ call.tag = tag;
+ encode(call, in);
+ op.exec(RGW_CLASS, RGW_GC_DEFER_ENTRY, in);
+}
+
+int cls_rgw_gc_list(IoCtx& io_ctx, string& oid, string& marker, uint32_t max, bool expired_only,
+ list<cls_rgw_gc_obj_info>& entries, bool *truncated, string& next_marker)
+{
+ bufferlist in, out;
+ cls_rgw_gc_list_op call;
+ call.marker = marker;
+ call.max = max;
+ call.expired_only = expired_only;
+ encode(call, in);
+ int r = io_ctx.exec(oid, RGW_CLASS, RGW_GC_LIST, in, out);
+ if (r < 0)
+ return r;
+
+ cls_rgw_gc_list_ret ret;
+ try {
+ auto iter = out.cbegin();
+ decode(ret, iter);
+ } catch (ceph::buffer::error& err) {
+ return -EIO;
+ }
+
+ entries.swap(ret.entries);
+
+ if (truncated)
+ *truncated = ret.truncated;
+ next_marker = std::move(ret.next_marker);
+ return r;
+}
+
+void cls_rgw_gc_remove(librados::ObjectWriteOperation& op, const vector<string>& tags)
+{
+ bufferlist in;
+ cls_rgw_gc_remove_op call;
+ call.tags = tags;
+ encode(call, in);
+ op.exec(RGW_CLASS, RGW_GC_REMOVE, in);
+}
+
+int cls_rgw_lc_get_head(IoCtx& io_ctx, const string& oid, cls_rgw_lc_obj_head& head)
+{
+ bufferlist in, out;
+ int r = io_ctx.exec(oid, RGW_CLASS, RGW_LC_GET_HEAD, in, out);
+ if (r < 0)
+ return r;
+
+ cls_rgw_lc_get_head_ret ret;
+ try {
+ auto iter = out.cbegin();
+ decode(ret, iter);
+ } catch (ceph::buffer::error& err) {
+ return -EIO;
+ }
+ head = ret.head;
+
+ return r;
+}
+
+int cls_rgw_lc_put_head(IoCtx& io_ctx, const string& oid, cls_rgw_lc_obj_head& head)
+{
+ bufferlist in, out;
+ cls_rgw_lc_put_head_op call;
+ call.head = head;
+ encode(call, in);
+ int r = io_ctx.exec(oid, RGW_CLASS, RGW_LC_PUT_HEAD, in, out);
+ return r;
+}
+
+int cls_rgw_lc_get_next_entry(IoCtx& io_ctx, const string& oid, string& marker,
+ cls_rgw_lc_entry& entry)
+{
+ bufferlist in, out;
+ cls_rgw_lc_get_next_entry_op call;
+ call.marker = marker;
+ encode(call, in);
+ int r = io_ctx.exec(oid, RGW_CLASS, RGW_LC_GET_NEXT_ENTRY, in, out);
+ if (r < 0)
+ return r;
+
+ cls_rgw_lc_get_next_entry_ret ret;
+ try {
+ auto iter = out.cbegin();
+ decode(ret, iter);
+ } catch (ceph::buffer::error& err) {
+ return -EIO;
+ }
+ entry = ret.entry;
+
+ return r;
+}
+
+int cls_rgw_lc_rm_entry(IoCtx& io_ctx, const string& oid,
+ const cls_rgw_lc_entry& entry)
+{
+ bufferlist in, out;
+ cls_rgw_lc_rm_entry_op call;
+ call.entry = entry;
+ encode(call, in);
+ int r = io_ctx.exec(oid, RGW_CLASS, RGW_LC_RM_ENTRY, in, out);
+ return r;
+}
+
+int cls_rgw_lc_set_entry(IoCtx& io_ctx, const string& oid,
+ const cls_rgw_lc_entry& entry)
+{
+ bufferlist in, out;
+ cls_rgw_lc_set_entry_op call;
+ call.entry = entry;
+ encode(call, in);
+ int r = io_ctx.exec(oid, RGW_CLASS, RGW_LC_SET_ENTRY, in, out);
+ return r;
+}
+
+int cls_rgw_lc_get_entry(IoCtx& io_ctx, const string& oid,
+ const std::string& marker, cls_rgw_lc_entry& entry)
+{
+ bufferlist in, out;
+ cls_rgw_lc_get_entry_op call{marker};;
+ encode(call, in);
+ int r = io_ctx.exec(oid, RGW_CLASS, RGW_LC_GET_ENTRY, in, out);
+
+ if (r < 0) {
+ return r;
+ }
+
+ cls_rgw_lc_get_entry_ret ret;
+ try {
+ auto iter = out.cbegin();
+ decode(ret, iter);
+ } catch (ceph::buffer::error& err) {
+ return -EIO;
+ }
+
+ entry = std::move(ret.entry);
+ return r;
+}
+
+int cls_rgw_lc_list(IoCtx& io_ctx, const string& oid,
+ const string& marker,
+ uint32_t max_entries,
+ vector<cls_rgw_lc_entry>& entries)
+{
+ bufferlist in, out;
+ cls_rgw_lc_list_entries_op op;
+
+ entries.clear();
+
+ op.marker = marker;
+ op.max_entries = max_entries;
+
+ encode(op, in);
+
+ int r = io_ctx.exec(oid, RGW_CLASS, RGW_LC_LIST_ENTRIES, in, out);
+ if (r < 0)
+ return r;
+
+ cls_rgw_lc_list_entries_ret ret;
+ try {
+ auto iter = out.cbegin();
+ decode(ret, iter);
+ } catch (ceph::buffer::error& err) {
+ return -EIO;
+ }
+
+ std::sort(std::begin(ret.entries), std::end(ret.entries),
+ [](const cls_rgw_lc_entry& a, const cls_rgw_lc_entry& b)
+ { return a.bucket < b.bucket; });
+ entries = std::move(ret.entries);
+ return r;
+}
+
+void cls_rgw_reshard_add(librados::ObjectWriteOperation& op, const cls_rgw_reshard_entry& entry)
+{
+ bufferlist in;
+ cls_rgw_reshard_add_op call;
+ call.entry = entry;
+ encode(call, in);
+ op.exec(RGW_CLASS, RGW_RESHARD_ADD, in);
+}
+
+int cls_rgw_reshard_list(librados::IoCtx& io_ctx, const string& oid, string& marker, uint32_t max,
+ list<cls_rgw_reshard_entry>& entries, bool* is_truncated)
+{
+ bufferlist in, out;
+ cls_rgw_reshard_list_op call;
+ call.marker = marker;
+ call.max = max;
+ encode(call, in);
+ int r = io_ctx.exec(oid, RGW_CLASS, RGW_RESHARD_LIST, in, out);
+ if (r < 0)
+ return r;
+
+ cls_rgw_reshard_list_ret op_ret;
+ auto iter = out.cbegin();
+ try {
+ decode(op_ret, iter);
+ } catch (ceph::buffer::error& err) {
+ return -EIO;
+ }
+
+ entries.swap(op_ret.entries);
+ *is_truncated = op_ret.is_truncated;
+
+ return 0;
+}
+
+int cls_rgw_reshard_get(librados::IoCtx& io_ctx, const string& oid, cls_rgw_reshard_entry& entry)
+{
+ bufferlist in, out;
+ cls_rgw_reshard_get_op call;
+ call.entry = entry;
+ encode(call, in);
+ int r = io_ctx.exec(oid, RGW_CLASS, RGW_RESHARD_GET, in, out);
+ if (r < 0)
+ return r;
+
+ cls_rgw_reshard_get_ret op_ret;
+ auto iter = out.cbegin();
+ try {
+ decode(op_ret, iter);
+ } catch (ceph::buffer::error& err) {
+ return -EIO;
+ }
+
+ entry = op_ret.entry;
+
+ return 0;
+}
+
+void cls_rgw_reshard_remove(librados::ObjectWriteOperation& op, const cls_rgw_reshard_entry& entry)
+{
+ bufferlist in;
+ cls_rgw_reshard_remove_op call;
+ call.tenant = entry.tenant;
+ call.bucket_name = entry.bucket_name;
+ call.bucket_id = entry.bucket_id;
+ encode(call, in);
+ op.exec(RGW_CLASS, RGW_RESHARD_REMOVE, in);
+}
+
+int cls_rgw_set_bucket_resharding(librados::IoCtx& io_ctx, const string& oid,
+ const cls_rgw_bucket_instance_entry& entry)
+{
+ bufferlist in, out;
+ cls_rgw_set_bucket_resharding_op call;
+ call.entry = entry;
+ encode(call, in);
+ return io_ctx.exec(oid, RGW_CLASS, RGW_SET_BUCKET_RESHARDING, in, out);
+}
+
+int cls_rgw_clear_bucket_resharding(librados::IoCtx& io_ctx, const string& oid)
+{
+ bufferlist in, out;
+ cls_rgw_clear_bucket_resharding_op call;
+ encode(call, in);
+ return io_ctx.exec(oid, RGW_CLASS, RGW_CLEAR_BUCKET_RESHARDING, in, out);
+}
+
+int cls_rgw_get_bucket_resharding(librados::IoCtx& io_ctx, const string& oid,
+ cls_rgw_bucket_instance_entry *entry)
+{
+ bufferlist in, out;
+ cls_rgw_get_bucket_resharding_op call;
+ encode(call, in);
+ int r= io_ctx.exec(oid, RGW_CLASS, RGW_GET_BUCKET_RESHARDING, in, out);
+ if (r < 0)
+ return r;
+
+ cls_rgw_get_bucket_resharding_ret op_ret;
+ auto iter = out.cbegin();
+ try {
+ decode(op_ret, iter);
+ } catch (ceph::buffer::error& err) {
+ return -EIO;
+ }
+
+ *entry = op_ret.new_instance;
+
+ return 0;
+}
+
+void cls_rgw_guard_bucket_resharding(librados::ObjectOperation& op, int ret_err)
+{
+ bufferlist in, out;
+ cls_rgw_guard_bucket_resharding_op call;
+ call.ret_err = ret_err;
+ encode(call, in);
+ op.exec(RGW_CLASS, RGW_GUARD_BUCKET_RESHARDING, in);
+}
+
+static bool issue_set_bucket_resharding(librados::IoCtx& io_ctx,
+ const int shard_id, const string& oid,
+ const cls_rgw_bucket_instance_entry& entry,
+ BucketIndexAioManager *manager) {
+ bufferlist in;
+ cls_rgw_set_bucket_resharding_op call;
+ call.entry = entry;
+ encode(call, in);
+ librados::ObjectWriteOperation op;
+ op.assert_exists(); // the shard must exist; if not fail rather than recreate
+ op.exec(RGW_CLASS, RGW_SET_BUCKET_RESHARDING, in);
+ return manager->aio_operate(io_ctx, shard_id, oid, &op);
+}
+
+int CLSRGWIssueSetBucketResharding::issue_op(const int shard_id, const string& oid)
+{
+ return issue_set_bucket_resharding(io_ctx, shard_id, oid, entry, &manager);
+}
diff --git a/src/cls/rgw/cls_rgw_client.h b/src/cls/rgw/cls_rgw_client.h
new file mode 100644
index 000000000..78c559d71
--- /dev/null
+++ b/src/cls/rgw/cls_rgw_client.h
@@ -0,0 +1,635 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#ifndef CEPH_CLS_RGW_CLIENT_H
+#define CEPH_CLS_RGW_CLIENT_H
+
+#include "include/str_list.h"
+#include "include/rados/librados.hpp"
+#include "cls_rgw_ops.h"
+#include "cls_rgw_const.h"
+#include "common/RefCountedObj.h"
+#include "common/strtol.h"
+#include "include/compat.h"
+#include "common/ceph_time.h"
+#include "common/ceph_mutex.h"
+
+
+// Forward declaration
+class BucketIndexAioManager;
+/*
+ * Bucket index AIO request argument, this is used to pass a argument
+ * to callback.
+ */
+struct BucketIndexAioArg : public RefCountedObject {
+ BucketIndexAioArg(int _id, BucketIndexAioManager* _manager) :
+ id(_id), manager(_manager) {}
+ int id;
+ BucketIndexAioManager* manager;
+};
+
+/*
+ * This class manages AIO completions. This class is not completely
+ * thread-safe, methods like *get_next_request_id* is not thread-safe
+ * and is expected to be called from within one thread.
+ */
+class BucketIndexAioManager {
+public:
+
+ // allows us to reaccess the shard id and shard's oid during and
+ // after the asynchronous call is made
+ struct RequestObj {
+ int shard_id;
+ std::string oid;
+
+ RequestObj(int _shard_id, const std::string& _oid) :
+ shard_id(_shard_id), oid(_oid)
+ {/* empty */}
+ };
+
+
+private:
+ // NB: the following 4 maps use the request_id as the key; this
+ // is not the same as the shard_id!
+ std::map<int, librados::AioCompletion*> pendings;
+ std::map<int, librados::AioCompletion*> completions;
+ std::map<int, const RequestObj> pending_objs;
+ std::map<int, const RequestObj> completion_objs;
+
+ int next = 0;
+ ceph::mutex lock = ceph::make_mutex("BucketIndexAioManager::lock");
+ ceph::condition_variable cond;
+ /*
+ * Callback implementation for AIO request.
+ */
+ static void bucket_index_op_completion_cb(void* cb, void* arg) {
+ BucketIndexAioArg* cb_arg = (BucketIndexAioArg*) arg;
+ cb_arg->manager->do_completion(cb_arg->id);
+ cb_arg->put();
+ }
+
+ /*
+ * Get next request ID. This method is not thread-safe.
+ *
+ * Return next request ID.
+ */
+ int get_next_request_id() { return next++; }
+
+ /*
+ * Add a new pending AIO completion instance.
+ *
+ * @param id - the request ID.
+ * @param completion - the AIO completion instance.
+ * @param oid - the object id associated with the object, if it is NULL, we don't
+ * track the object id per callback.
+ */
+ void add_pending(int request_id, librados::AioCompletion* completion, const int shard_id, const std::string& oid) {
+ pendings[request_id] = completion;
+ pending_objs.emplace(request_id, RequestObj(shard_id, oid));
+ }
+
+public:
+ /*
+ * Create a new instance.
+ */
+ BucketIndexAioManager() = default;
+
+ /*
+ * Do completion for the given AIO request.
+ */
+ void do_completion(int request_id);
+
+ /*
+ * Wait for AIO completions.
+ *
+ * valid_ret_code - valid AIO return code.
+ * num_completions - number of completions.
+ * ret_code - return code of failed AIO.
+ * objs - a std::list of objects that has been finished the AIO.
+ *
+ * Return false if there is no pending AIO, true otherwise.
+ */
+ bool wait_for_completions(int valid_ret_code,
+ int *num_completions = nullptr,
+ int *ret_code = nullptr,
+ std::map<int, std::string> *completed_objs = nullptr,
+ std::map<int, std::string> *retry_objs = nullptr);
+
+ /**
+ * Do aio read operation.
+ */
+ bool aio_operate(librados::IoCtx& io_ctx, const int shard_id, const std::string& oid, librados::ObjectReadOperation *op) {
+ std::lock_guard l{lock};
+ const int request_id = get_next_request_id();
+ BucketIndexAioArg *arg = new BucketIndexAioArg(request_id, this);
+ librados::AioCompletion *c = librados::Rados::aio_create_completion((void*)arg, bucket_index_op_completion_cb);
+ int r = io_ctx.aio_operate(oid, c, (librados::ObjectReadOperation*)op, NULL);
+ if (r >= 0) {
+ add_pending(arg->id, c, shard_id, oid);
+ } else {
+ arg->put();
+ c->release();
+ }
+ return r;
+ }
+
+ /**
+ * Do aio write operation.
+ */
+ bool aio_operate(librados::IoCtx& io_ctx, const int shard_id, const std::string& oid, librados::ObjectWriteOperation *op) {
+ std::lock_guard l{lock};
+ const int request_id = get_next_request_id();
+ BucketIndexAioArg *arg = new BucketIndexAioArg(request_id, this);
+ librados::AioCompletion *c = librados::Rados::aio_create_completion((void*)arg, bucket_index_op_completion_cb);
+ int r = io_ctx.aio_operate(oid, c, (librados::ObjectWriteOperation*)op);
+ if (r >= 0) {
+ add_pending(arg->id, c, shard_id, oid);
+ } else {
+ arg->put();
+ c->release();
+ }
+ return r;
+ }
+};
+
+class RGWGetDirHeader_CB : public RefCountedObject {
+public:
+ ~RGWGetDirHeader_CB() override {}
+ virtual void handle_response(int r, rgw_bucket_dir_header& header) = 0;
+};
+
+class BucketIndexShardsManager {
+private:
+ // Per shard setting manager, for example, marker.
+ std::map<int, std::string> value_by_shards;
+public:
+ const static std::string KEY_VALUE_SEPARATOR;
+ const static std::string SHARDS_SEPARATOR;
+
+ void add(int shard, const std::string& value) {
+ value_by_shards[shard] = value;
+ }
+
+ const std::string& get(int shard, const std::string& default_value) const {
+ auto iter = value_by_shards.find(shard);
+ return (iter == value_by_shards.end() ? default_value : iter->second);
+ }
+
+ const std::map<int, std::string>& get() const {
+ return value_by_shards;
+ }
+ std::map<int, std::string>& get() {
+ return value_by_shards;
+ }
+
+ bool empty() const {
+ return value_by_shards.empty();
+ }
+
+ void to_string(std::string *out) const {
+ if (!out) {
+ return;
+ }
+ out->clear();
+ for (auto iter = value_by_shards.begin();
+ iter != value_by_shards.end(); ++iter) {
+ if (out->length()) {
+ // Not the first item, append a separator first
+ out->append(SHARDS_SEPARATOR);
+ }
+ char buf[16];
+ snprintf(buf, sizeof(buf), "%d", iter->first);
+ out->append(buf);
+ out->append(KEY_VALUE_SEPARATOR);
+ out->append(iter->second);
+ }
+ }
+
+ static bool is_shards_marker(const std::string& marker) {
+ return marker.find(KEY_VALUE_SEPARATOR) != std::string::npos;
+ }
+
+ /*
+ * convert from std::string. There are two options of how the std::string looks like:
+ *
+ * 1. Single shard, no shard id specified, e.g. 000001.23.1
+ *
+ * for this case, if passed shard_id >= 0, use this shard id, otherwise assume that it's a
+ * bucket with no shards.
+ *
+ * 2. One or more shards, shard id specified for each shard, e.g., 0#00002.12,1#00003.23.2
+ *
+ */
+ int from_string(const std::string& composed_marker, int shard_id) {
+ value_by_shards.clear();
+ std::vector<std::string> shards;
+ get_str_vec(composed_marker, SHARDS_SEPARATOR.c_str(), shards);
+ if (shards.size() > 1 && shard_id >= 0) {
+ return -EINVAL;
+ }
+ for (auto iter = shards.begin(); iter != shards.end(); ++iter) {
+ size_t pos = iter->find(KEY_VALUE_SEPARATOR);
+ if (pos == std::string::npos) {
+ if (!value_by_shards.empty()) {
+ return -EINVAL;
+ }
+ if (shard_id < 0) {
+ add(0, *iter);
+ } else {
+ add(shard_id, *iter);
+ }
+ return 0;
+ }
+ std::string shard_str = iter->substr(0, pos);
+ std::string err;
+ int shard = (int)strict_strtol(shard_str.c_str(), 10, &err);
+ if (!err.empty()) {
+ return -EINVAL;
+ }
+ add(shard, iter->substr(pos + 1));
+ }
+ return 0;
+ }
+
+ // trim the '<shard-id>#' prefix from a single shard marker if present
+ static std::string get_shard_marker(const std::string& marker) {
+ auto p = marker.find(KEY_VALUE_SEPARATOR);
+ if (p == marker.npos) {
+ return marker;
+ }
+ return marker.substr(p + 1);
+ }
+};
+
+/* bucket index */
+void cls_rgw_bucket_init_index(librados::ObjectWriteOperation& o);
+
+class CLSRGWConcurrentIO {
+protected:
+ librados::IoCtx& io_ctx;
+
+ // map of shard # to oid; the shards that are remaining to be processed
+ std::map<int, std::string>& objs_container;
+ // iterator to work through objs_container
+ std::map<int, std::string>::iterator iter;
+
+ uint32_t max_aio;
+ BucketIndexAioManager manager;
+
+ virtual int issue_op(int shard_id, const std::string& oid) = 0;
+
+ virtual void cleanup() {}
+ virtual int valid_ret_code() { return 0; }
+ // Return true if multiple rounds of OPs might be needed, this happens when
+ // OP needs to be re-send until a certain code is returned.
+ virtual bool need_multiple_rounds() { return false; }
+ // Add a new object to the end of the container.
+ virtual void add_object(int shard, const std::string& oid) {}
+ virtual void reset_container(std::map<int, std::string>& objs) {}
+
+public:
+
+ CLSRGWConcurrentIO(librados::IoCtx& ioc,
+ std::map<int, std::string>& _objs_container,
+ uint32_t _max_aio) :
+ io_ctx(ioc), objs_container(_objs_container), max_aio(_max_aio)
+ {}
+
+ virtual ~CLSRGWConcurrentIO()
+ {}
+
+ int operator()();
+}; // class CLSRGWConcurrentIO
+
+
+class CLSRGWIssueBucketIndexInit : public CLSRGWConcurrentIO {
+protected:
+ int issue_op(int shard_id, const std::string& oid) override;
+ int valid_ret_code() override { return -EEXIST; }
+ void cleanup() override;
+public:
+ CLSRGWIssueBucketIndexInit(librados::IoCtx& ioc,
+ std::map<int, std::string>& _bucket_objs,
+ uint32_t _max_aio) :
+ CLSRGWConcurrentIO(ioc, _bucket_objs, _max_aio) {}
+};
+
+
+class CLSRGWIssueBucketIndexClean : public CLSRGWConcurrentIO {
+protected:
+ int issue_op(int shard_id, const std::string& oid) override;
+ int valid_ret_code() override {
+ return -ENOENT;
+ }
+
+public:
+ CLSRGWIssueBucketIndexClean(librados::IoCtx& ioc,
+ std::map<int, std::string>& _bucket_objs,
+ uint32_t _max_aio) :
+ CLSRGWConcurrentIO(ioc, _bucket_objs, _max_aio)
+ {}
+};
+
+
+class CLSRGWIssueSetTagTimeout : public CLSRGWConcurrentIO {
+ uint64_t tag_timeout;
+protected:
+ int issue_op(int shard_id, const std::string& oid) override;
+public:
+ CLSRGWIssueSetTagTimeout(librados::IoCtx& ioc, std::map<int, std::string>& _bucket_objs,
+ uint32_t _max_aio, uint64_t _tag_timeout) :
+ CLSRGWConcurrentIO(ioc, _bucket_objs, _max_aio), tag_timeout(_tag_timeout) {}
+};
+
+void cls_rgw_bucket_update_stats(librados::ObjectWriteOperation& o,
+ bool absolute,
+ const std::map<RGWObjCategory, rgw_bucket_category_stats>& stats);
+
+void cls_rgw_bucket_prepare_op(librados::ObjectWriteOperation& o, RGWModifyOp op, std::string& tag,
+ const cls_rgw_obj_key& key, const std::string& locator, bool log_op,
+ uint16_t bilog_op, rgw_zone_set& zones_trace);
+
+void cls_rgw_bucket_complete_op(librados::ObjectWriteOperation& o, RGWModifyOp op, std::string& tag,
+ rgw_bucket_entry_ver& ver,
+ const cls_rgw_obj_key& key,
+ rgw_bucket_dir_entry_meta& dir_meta,
+ std::list<cls_rgw_obj_key> *remove_objs, bool log_op,
+ uint16_t bilog_op, rgw_zone_set *zones_trace);
+
+void cls_rgw_remove_obj(librados::ObjectWriteOperation& o, std::list<std::string>& keep_attr_prefixes);
+void cls_rgw_obj_store_pg_ver(librados::ObjectWriteOperation& o, const std::string& attr);
+void cls_rgw_obj_check_attrs_prefix(librados::ObjectOperation& o, const std::string& prefix, bool fail_if_exist);
+void cls_rgw_obj_check_mtime(librados::ObjectOperation& o, const ceph::real_time& mtime, bool high_precision_time, RGWCheckMTimeType type);
+
+int cls_rgw_bi_get(librados::IoCtx& io_ctx, const std::string oid,
+ BIIndexType index_type, cls_rgw_obj_key& key,
+ rgw_cls_bi_entry *entry);
+int cls_rgw_bi_put(librados::IoCtx& io_ctx, const std::string oid, rgw_cls_bi_entry& entry);
+void cls_rgw_bi_put(librados::ObjectWriteOperation& op, const std::string oid, rgw_cls_bi_entry& entry);
+int cls_rgw_bi_list(librados::IoCtx& io_ctx, const std::string& oid,
+ const std::string& name, const std::string& marker, uint32_t max,
+ std::list<rgw_cls_bi_entry> *entries, bool *is_truncated);
+
+
+void cls_rgw_bucket_link_olh(librados::ObjectWriteOperation& op,
+ const cls_rgw_obj_key& key, ceph::buffer::list& olh_tag,
+ bool delete_marker, const std::string& op_tag, rgw_bucket_dir_entry_meta *meta,
+ uint64_t olh_epoch, ceph::real_time unmod_since, bool high_precision_time, bool log_op, rgw_zone_set& zones_trace);
+void cls_rgw_bucket_unlink_instance(librados::ObjectWriteOperation& op,
+ const cls_rgw_obj_key& key, const std::string& op_tag,
+ const std::string& olh_tag, uint64_t olh_epoch, bool log_op, rgw_zone_set& zones_trace);
+void cls_rgw_get_olh_log(librados::ObjectReadOperation& op, const cls_rgw_obj_key& olh, uint64_t ver_marker, const std::string& olh_tag, rgw_cls_read_olh_log_ret& log_ret, int& op_ret);
+void cls_rgw_trim_olh_log(librados::ObjectWriteOperation& op, const cls_rgw_obj_key& olh, uint64_t ver, const std::string& olh_tag);
+void cls_rgw_clear_olh(librados::ObjectWriteOperation& op, const cls_rgw_obj_key& olh, const std::string& olh_tag);
+
+// these overloads which call io_ctx.operate() should not be called in the rgw.
+// rgw_rados_operate() should be called after the overloads w/o calls to io_ctx.operate()
+#ifndef CLS_CLIENT_HIDE_IOCTX
+int cls_rgw_bucket_link_olh(librados::IoCtx& io_ctx, const std::string& oid,
+ const cls_rgw_obj_key& key, ceph::buffer::list& olh_tag,
+ bool delete_marker, const std::string& op_tag, rgw_bucket_dir_entry_meta *meta,
+ uint64_t olh_epoch, ceph::real_time unmod_since, bool high_precision_time, bool log_op, rgw_zone_set& zones_trace);
+int cls_rgw_bucket_unlink_instance(librados::IoCtx& io_ctx, const std::string& oid,
+ const cls_rgw_obj_key& key, const std::string& op_tag,
+ const std::string& olh_tag, uint64_t olh_epoch, bool log_op, rgw_zone_set& zones_trace);
+int cls_rgw_get_olh_log(librados::IoCtx& io_ctx, std::string& oid, const cls_rgw_obj_key& olh, uint64_t ver_marker,
+ const std::string& olh_tag, rgw_cls_read_olh_log_ret& log_ret);
+int cls_rgw_clear_olh(librados::IoCtx& io_ctx, std::string& oid, const cls_rgw_obj_key& olh, const std::string& olh_tag);
+int cls_rgw_usage_log_trim(librados::IoCtx& io_ctx, const std::string& oid, const std::string& user, const std::string& bucket,
+ uint64_t start_epoch, uint64_t end_epoch);
+#endif
+
+
+/**
+ * Std::list the bucket with the starting object and filter prefix.
+ * NOTE: this method do listing requests for each bucket index shards identified by
+ * the keys of the *list_results* std::map, which means the std::map should be popludated
+ * by the caller to fill with each bucket index object id.
+ *
+ * io_ctx - IO context for rados.
+ * start_obj - marker for the listing.
+ * filter_prefix - filter prefix.
+ * num_entries - number of entries to request for each object (note the total
+ * amount of entries returned depends on the number of shardings).
+ * list_results - the std::list results keyed by bucket index object id.
+ * max_aio - the maximum number of AIO (for throttling).
+ *
+ * Return 0 on success, a failure code otherwise.
+*/
+
+class CLSRGWIssueBucketList : public CLSRGWConcurrentIO {
+ cls_rgw_obj_key start_obj;
+ std::string filter_prefix;
+ std::string delimiter;
+ uint32_t num_entries;
+ bool list_versions;
+ std::map<int, rgw_cls_list_ret>& result; // request_id -> return value
+
+protected:
+ int issue_op(int shard_id, const std::string& oid) override;
+ void reset_container(std::map<int, std::string>& objs) override;
+
+public:
+ CLSRGWIssueBucketList(librados::IoCtx& io_ctx,
+ const cls_rgw_obj_key& _start_obj,
+ const std::string& _filter_prefix,
+ const std::string& _delimiter,
+ uint32_t _num_entries,
+ bool _list_versions,
+ std::map<int, std::string>& oids, // shard_id -> shard_oid
+ // shard_id -> return value
+ std::map<int, rgw_cls_list_ret>& list_results,
+ uint32_t max_aio) :
+ CLSRGWConcurrentIO(io_ctx, oids, max_aio),
+ start_obj(_start_obj), filter_prefix(_filter_prefix), delimiter(_delimiter),
+ num_entries(_num_entries), list_versions(_list_versions),
+ result(list_results)
+ {}
+};
+
+void cls_rgw_bucket_list_op(librados::ObjectReadOperation& op,
+ const cls_rgw_obj_key& start_obj,
+ const std::string& filter_prefix,
+ const std::string& delimiter,
+ uint32_t num_entries,
+ bool list_versions,
+ rgw_cls_list_ret* result);
+
+void cls_rgw_bilog_list(librados::ObjectReadOperation& op,
+ const std::string& marker, uint32_t max,
+ cls_rgw_bi_log_list_ret *pdata, int *ret = nullptr);
+
+class CLSRGWIssueBILogList : public CLSRGWConcurrentIO {
+ std::map<int, cls_rgw_bi_log_list_ret>& result;
+ BucketIndexShardsManager& marker_mgr;
+ uint32_t max;
+protected:
+ int issue_op(int shard_id, const std::string& oid) override;
+public:
+ CLSRGWIssueBILogList(librados::IoCtx& io_ctx, BucketIndexShardsManager& _marker_mgr, uint32_t _max,
+ std::map<int, std::string>& oids,
+ std::map<int, cls_rgw_bi_log_list_ret>& bi_log_lists, uint32_t max_aio) :
+ CLSRGWConcurrentIO(io_ctx, oids, max_aio), result(bi_log_lists),
+ marker_mgr(_marker_mgr), max(_max) {}
+};
+
+void cls_rgw_bilog_trim(librados::ObjectWriteOperation& op,
+ const std::string& start_marker,
+ const std::string& end_marker);
+
+class CLSRGWIssueBILogTrim : public CLSRGWConcurrentIO {
+ BucketIndexShardsManager& start_marker_mgr;
+ BucketIndexShardsManager& end_marker_mgr;
+protected:
+ int issue_op(int shard_id, const std::string& oid) override;
+ // Trim until -ENODATA is returned.
+ int valid_ret_code() override { return -ENODATA; }
+ bool need_multiple_rounds() override { return true; }
+ void add_object(int shard, const std::string& oid) override { objs_container[shard] = oid; }
+ void reset_container(std::map<int, std::string>& objs) override {
+ objs_container.swap(objs);
+ iter = objs_container.begin();
+ objs.clear();
+ }
+public:
+ CLSRGWIssueBILogTrim(librados::IoCtx& io_ctx, BucketIndexShardsManager& _start_marker_mgr,
+ BucketIndexShardsManager& _end_marker_mgr, std::map<int, std::string>& _bucket_objs, uint32_t max_aio) :
+ CLSRGWConcurrentIO(io_ctx, _bucket_objs, max_aio),
+ start_marker_mgr(_start_marker_mgr), end_marker_mgr(_end_marker_mgr) {}
+};
+
+/**
+ * Check the bucket index.
+ *
+ * io_ctx - IO context for rados.
+ * bucket_objs_ret - check result for all shards.
+ * max_aio - the maximum number of AIO (for throttling).
+ *
+ * Return 0 on success, a failure code otherwise.
+ */
+class CLSRGWIssueBucketCheck : public CLSRGWConcurrentIO /*<std::map<std::string, rgw_cls_check_index_ret> >*/ {
+ std::map<int, rgw_cls_check_index_ret>& result;
+protected:
+ int issue_op(int shard_id, const std::string& oid) override;
+public:
+ CLSRGWIssueBucketCheck(librados::IoCtx& ioc, std::map<int, std::string>& oids,
+ std::map<int, rgw_cls_check_index_ret>& bucket_objs_ret,
+ uint32_t _max_aio) :
+ CLSRGWConcurrentIO(ioc, oids, _max_aio), result(bucket_objs_ret) {}
+};
+
+class CLSRGWIssueBucketRebuild : public CLSRGWConcurrentIO {
+protected:
+ int issue_op(int shard_id, const std::string& oid) override;
+public:
+ CLSRGWIssueBucketRebuild(librados::IoCtx& io_ctx, std::map<int, std::string>& bucket_objs,
+ uint32_t max_aio) : CLSRGWConcurrentIO(io_ctx, bucket_objs, max_aio) {}
+};
+
+class CLSRGWIssueGetDirHeader : public CLSRGWConcurrentIO {
+ std::map<int, rgw_cls_list_ret>& result;
+protected:
+ int issue_op(int shard_id, const std::string& oid) override;
+public:
+ CLSRGWIssueGetDirHeader(librados::IoCtx& io_ctx, std::map<int, std::string>& oids, std::map<int, rgw_cls_list_ret>& dir_headers,
+ uint32_t max_aio) :
+ CLSRGWConcurrentIO(io_ctx, oids, max_aio), result(dir_headers) {}
+};
+
+class CLSRGWIssueSetBucketResharding : public CLSRGWConcurrentIO {
+ cls_rgw_bucket_instance_entry entry;
+protected:
+ int issue_op(int shard_id, const std::string& oid) override;
+public:
+ CLSRGWIssueSetBucketResharding(librados::IoCtx& ioc, std::map<int, std::string>& _bucket_objs,
+ const cls_rgw_bucket_instance_entry& _entry,
+ uint32_t _max_aio) : CLSRGWConcurrentIO(ioc, _bucket_objs, _max_aio), entry(_entry) {}
+};
+
+class CLSRGWIssueResyncBucketBILog : public CLSRGWConcurrentIO {
+protected:
+ int issue_op(int shard_id, const std::string& oid);
+public:
+ CLSRGWIssueResyncBucketBILog(librados::IoCtx& io_ctx, std::map<int, std::string>& _bucket_objs, uint32_t max_aio) :
+ CLSRGWConcurrentIO(io_ctx, _bucket_objs, max_aio) {}
+};
+
+class CLSRGWIssueBucketBILogStop : public CLSRGWConcurrentIO {
+protected:
+ int issue_op(int shard_id, const std::string& oid);
+public:
+ CLSRGWIssueBucketBILogStop(librados::IoCtx& io_ctx, std::map<int, std::string>& _bucket_objs, uint32_t max_aio) :
+ CLSRGWConcurrentIO(io_ctx, _bucket_objs, max_aio) {}
+};
+
+int cls_rgw_get_dir_header_async(librados::IoCtx& io_ctx, std::string& oid, RGWGetDirHeader_CB *ctx);
+
+void cls_rgw_encode_suggestion(char op, rgw_bucket_dir_entry& dirent, ceph::buffer::list& updates);
+
+void cls_rgw_suggest_changes(librados::ObjectWriteOperation& o, ceph::buffer::list& updates);
+
+/* usage logging */
+// these overloads which call io_ctx.operate() should not be called in the rgw.
+// rgw_rados_operate() should be called after the overloads w/o calls to io_ctx.operate()
+#ifndef CLS_CLIENT_HIDE_IOCTX
+int cls_rgw_usage_log_read(librados::IoCtx& io_ctx, const std::string& oid, const std::string& user, const std::string& bucket,
+ uint64_t start_epoch, uint64_t end_epoch, uint32_t max_entries, std::string& read_iter,
+ std::map<rgw_user_bucket, rgw_usage_log_entry>& usage, bool *is_truncated);
+#endif
+
+void cls_rgw_usage_log_trim(librados::ObjectWriteOperation& op, const std::string& user, const std::string& bucket, uint64_t start_epoch, uint64_t end_epoch);
+
+void cls_rgw_usage_log_clear(librados::ObjectWriteOperation& op);
+void cls_rgw_usage_log_add(librados::ObjectWriteOperation& op, rgw_usage_log_info& info);
+
+/* garbage collection */
+void cls_rgw_gc_set_entry(librados::ObjectWriteOperation& op, uint32_t expiration_secs, cls_rgw_gc_obj_info& info);
+void cls_rgw_gc_defer_entry(librados::ObjectWriteOperation& op, uint32_t expiration_secs, const std::string& tag);
+void cls_rgw_gc_remove(librados::ObjectWriteOperation& op, const std::vector<std::string>& tags);
+
+// these overloads which call io_ctx.operate() should not be called in the rgw.
+// rgw_rados_operate() should be called after the overloads w/o calls to io_ctx.operate()
+#ifndef CLS_CLIENT_HIDE_IOCTX
+int cls_rgw_gc_list(librados::IoCtx& io_ctx, std::string& oid, std::string& marker, uint32_t max, bool expired_only,
+ std::list<cls_rgw_gc_obj_info>& entries, bool *truncated, std::string& next_marker);
+#endif
+
+/* lifecycle */
+// these overloads which call io_ctx.operate() should not be called in the rgw.
+// rgw_rados_operate() should be called after the overloads w/o calls to io_ctx.operate()
+#ifndef CLS_CLIENT_HIDE_IOCTX
+int cls_rgw_lc_get_head(librados::IoCtx& io_ctx, const std::string& oid, cls_rgw_lc_obj_head& head);
+int cls_rgw_lc_put_head(librados::IoCtx& io_ctx, const std::string& oid, cls_rgw_lc_obj_head& head);
+int cls_rgw_lc_get_next_entry(librados::IoCtx& io_ctx, const std::string& oid, string& marker, cls_rgw_lc_entry& entry);
+int cls_rgw_lc_rm_entry(librados::IoCtx& io_ctx, const std::string& oid, const cls_rgw_lc_entry& entry);
+int cls_rgw_lc_set_entry(librados::IoCtx& io_ctx, const std::string& oid, const cls_rgw_lc_entry& entry);
+int cls_rgw_lc_get_entry(librados::IoCtx& io_ctx, const std::string& oid, const std::string& marker, cls_rgw_lc_entry& entry);
+int cls_rgw_lc_list(librados::IoCtx& io_ctx, const std::string& oid,
+ const std::string& marker, uint32_t max_entries,
+ vector<cls_rgw_lc_entry>& entries);
+#endif
+
+/* resharding */
+void cls_rgw_reshard_add(librados::ObjectWriteOperation& op, const cls_rgw_reshard_entry& entry);
+void cls_rgw_reshard_remove(librados::ObjectWriteOperation& op, const cls_rgw_reshard_entry& entry);
+// these overloads which call io_ctx.operate() should not be called in the rgw.
+// rgw_rados_operate() should be called after the overloads w/o calls to io_ctx.operate()
+#ifndef CLS_CLIENT_HIDE_IOCTX
+int cls_rgw_reshard_list(librados::IoCtx& io_ctx, const std::string& oid, std::string& marker, uint32_t max,
+ std::list<cls_rgw_reshard_entry>& entries, bool* is_truncated);
+int cls_rgw_reshard_get(librados::IoCtx& io_ctx, const std::string& oid, cls_rgw_reshard_entry& entry);
+#endif
+
+/* resharding attribute on bucket index shard headers */
+void cls_rgw_guard_bucket_resharding(librados::ObjectOperation& op, int ret_err);
+// these overloads which call io_ctx.operate() should not be called in the rgw.
+// rgw_rados_operate() should be called after the overloads w/o calls to io_ctx.operate()
+#ifndef CLS_CLIENT_HIDE_IOCTX
+int cls_rgw_set_bucket_resharding(librados::IoCtx& io_ctx, const std::string& oid,
+ const cls_rgw_bucket_instance_entry& entry);
+int cls_rgw_clear_bucket_resharding(librados::IoCtx& io_ctx, const std::string& oid);
+int cls_rgw_get_bucket_resharding(librados::IoCtx& io_ctx, const std::string& oid,
+ cls_rgw_bucket_instance_entry *entry);
+#endif
+
+#endif
diff --git a/src/cls/rgw/cls_rgw_const.h b/src/cls/rgw/cls_rgw_const.h
new file mode 100644
index 000000000..61f06eac9
--- /dev/null
+++ b/src/cls/rgw/cls_rgw_const.h
@@ -0,0 +1,80 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#ifndef CEPH_CLS_RGW_CONST_H
+#define CEPH_CLS_RGW_CONST_H
+
+#define RGW_CLASS "rgw"
+
+/* Special error code returned by cls bucket list operation if it was
+ * unable to skip past enough not visibile entries to return any
+ * entries in the call. */
+constexpr int RGWBIAdvanceAndRetryError = -EFBIG;
+
+/* bucket index */
+#define RGW_BUCKET_INIT_INDEX "bucket_init_index"
+
+
+#define RGW_BUCKET_SET_TAG_TIMEOUT "bucket_set_tag_timeout"
+#define RGW_BUCKET_LIST "bucket_list"
+#define RGW_BUCKET_CHECK_INDEX "bucket_check_index"
+#define RGW_BUCKET_REBUILD_INDEX "bucket_rebuild_index"
+#define RGW_BUCKET_UPDATE_STATS "bucket_update_stats"
+#define RGW_BUCKET_PREPARE_OP "bucket_prepare_op"
+#define RGW_BUCKET_COMPLETE_OP "bucket_complete_op"
+#define RGW_BUCKET_LINK_OLH "bucket_link_olh"
+#define RGW_BUCKET_UNLINK_INSTANCE "bucket_unlink_instance"
+#define RGW_BUCKET_READ_OLH_LOG "bucket_read_olh_log"
+#define RGW_BUCKET_TRIM_OLH_LOG "bucket_trim_olh_log"
+#define RGW_BUCKET_CLEAR_OLH "bucket_clear_olh"
+
+#define RGW_OBJ_REMOVE "obj_remove"
+#define RGW_OBJ_STORE_PG_VER "obj_store_pg_ver"
+#define RGW_OBJ_CHECK_ATTRS_PREFIX "obj_check_attrs_prefix"
+#define RGW_OBJ_CHECK_MTIME "obj_check_mtime"
+
+#define RGW_BI_GET "bi_get"
+#define RGW_BI_PUT "bi_put"
+#define RGW_BI_LIST "bi_list"
+
+#define RGW_BI_LOG_LIST "bi_log_list"
+#define RGW_BI_LOG_TRIM "bi_log_trim"
+#define RGW_DIR_SUGGEST_CHANGES "dir_suggest_changes"
+
+#define RGW_BI_LOG_RESYNC "bi_log_resync"
+#define RGW_BI_LOG_STOP "bi_log_stop"
+
+/* usage logging */
+#define RGW_USER_USAGE_LOG_ADD "user_usage_log_add"
+#define RGW_USER_USAGE_LOG_READ "user_usage_log_read"
+#define RGW_USER_USAGE_LOG_TRIM "user_usage_log_trim"
+#define RGW_USAGE_LOG_CLEAR "usage_log_clear"
+
+/* garbage collection */
+#define RGW_GC_SET_ENTRY "gc_set_entry"
+#define RGW_GC_DEFER_ENTRY "gc_defer_entry"
+#define RGW_GC_LIST "gc_list"
+#define RGW_GC_REMOVE "gc_remove"
+
+/* lifecycle bucket list */
+#define RGW_LC_GET_ENTRY "lc_get_entry"
+#define RGW_LC_SET_ENTRY "lc_set_entry"
+#define RGW_LC_RM_ENTRY "lc_rm_entry"
+#define RGW_LC_GET_NEXT_ENTRY "lc_get_next_entry"
+#define RGW_LC_PUT_HEAD "lc_put_head"
+#define RGW_LC_GET_HEAD "lc_get_head"
+#define RGW_LC_LIST_ENTRIES "lc_list_entries"
+
+/* resharding */
+#define RGW_RESHARD_ADD "reshard_add"
+#define RGW_RESHARD_LIST "reshard_list"
+#define RGW_RESHARD_GET "reshard_get"
+#define RGW_RESHARD_REMOVE "reshard_remove"
+
+/* resharding attribute */
+#define RGW_SET_BUCKET_RESHARDING "set_bucket_resharding"
+#define RGW_CLEAR_BUCKET_RESHARDING "clear_bucket_resharding"
+#define RGW_GUARD_BUCKET_RESHARDING "guard_bucket_resharding"
+#define RGW_GET_BUCKET_RESHARDING "get_bucket_resharding"
+
+#endif
diff --git a/src/cls/rgw/cls_rgw_ops.cc b/src/cls/rgw/cls_rgw_ops.cc
new file mode 100644
index 000000000..d779ea567
--- /dev/null
+++ b/src/cls/rgw/cls_rgw_ops.cc
@@ -0,0 +1,547 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#include "cls/rgw/cls_rgw_ops.h"
+
+#include "common/Formatter.h"
+#include "common/ceph_json.h"
+#include "include/utime.h"
+
+using std::list;
+using std::map;
+
+using ceph::Formatter;
+
+void rgw_cls_tag_timeout_op::dump(Formatter *f) const
+{
+ f->dump_int("tag_timeout", tag_timeout);
+}
+
+void rgw_cls_tag_timeout_op::generate_test_instances(list<rgw_cls_tag_timeout_op*>& ls)
+{
+ ls.push_back(new rgw_cls_tag_timeout_op);
+ ls.push_back(new rgw_cls_tag_timeout_op);
+ ls.back()->tag_timeout = 23323;
+}
+
+void cls_rgw_gc_set_entry_op::dump(Formatter *f) const
+{
+ f->dump_unsigned("expiration_secs", expiration_secs);
+ f->open_object_section("obj_info");
+ info.dump(f);
+ f->close_section();
+}
+
+void cls_rgw_gc_set_entry_op::generate_test_instances(list<cls_rgw_gc_set_entry_op*>& ls)
+{
+ ls.push_back(new cls_rgw_gc_set_entry_op);
+ ls.push_back(new cls_rgw_gc_set_entry_op);
+ ls.back()->expiration_secs = 123;
+}
+
+void cls_rgw_gc_defer_entry_op::dump(Formatter *f) const
+{
+ f->dump_unsigned("expiration_secs", expiration_secs);
+ f->dump_string("tag", tag);
+}
+
+void cls_rgw_gc_defer_entry_op::generate_test_instances(list<cls_rgw_gc_defer_entry_op*>& ls)
+{
+ ls.push_back(new cls_rgw_gc_defer_entry_op);
+ ls.push_back(new cls_rgw_gc_defer_entry_op);
+ ls.back()->expiration_secs = 123;
+ ls.back()->tag = "footag";
+}
+
+void cls_rgw_gc_list_op::dump(Formatter *f) const
+{
+ f->dump_string("marker", marker);
+ f->dump_unsigned("max", max);
+ f->dump_bool("expired_only", expired_only);
+}
+
+void cls_rgw_gc_list_op::generate_test_instances(list<cls_rgw_gc_list_op*>& ls)
+{
+ ls.push_back(new cls_rgw_gc_list_op);
+ ls.push_back(new cls_rgw_gc_list_op);
+ ls.back()->marker = "mymarker";
+ ls.back()->max = 2312;
+}
+
+void cls_rgw_gc_list_ret::dump(Formatter *f) const
+{
+ encode_json("entries", entries, f);
+ f->dump_string("next_marker", next_marker);
+ f->dump_int("truncated", (int)truncated);
+}
+
+void cls_rgw_gc_list_ret::generate_test_instances(list<cls_rgw_gc_list_ret*>& ls)
+{
+ ls.push_back(new cls_rgw_gc_list_ret);
+ ls.push_back(new cls_rgw_gc_list_ret);
+ ls.back()->entries.push_back(cls_rgw_gc_obj_info());
+ ls.back()->truncated = true;
+}
+
+
+void cls_rgw_gc_remove_op::dump(Formatter *f) const
+{
+ encode_json("tags", tags, f);
+}
+
+void cls_rgw_gc_remove_op::generate_test_instances(list<cls_rgw_gc_remove_op*>& ls)
+{
+ ls.push_back(new cls_rgw_gc_remove_op);
+ ls.push_back(new cls_rgw_gc_remove_op);
+ ls.back()->tags.push_back("tag1");
+ ls.back()->tags.push_back("tag2");
+}
+
+void rgw_cls_obj_prepare_op::generate_test_instances(list<rgw_cls_obj_prepare_op*>& o)
+{
+ rgw_cls_obj_prepare_op *op = new rgw_cls_obj_prepare_op;
+ op->op = CLS_RGW_OP_ADD;
+ op->key.name = "name";
+ op->tag = "tag";
+ op->locator = "locator";
+ o.push_back(op);
+ o.push_back(new rgw_cls_obj_prepare_op);
+}
+
+void rgw_cls_obj_prepare_op::dump(Formatter *f) const
+{
+ f->dump_int("op", op);
+ f->dump_string("name", key.name);
+ f->dump_string("tag", tag);
+ f->dump_string("locator", locator);
+ f->dump_bool("log_op", log_op);
+ f->dump_int("bilog_flags", bilog_flags);
+ encode_json("zones_trace", zones_trace, f);
+}
+
+void rgw_cls_obj_complete_op::generate_test_instances(list<rgw_cls_obj_complete_op*>& o)
+{
+ rgw_cls_obj_complete_op *op = new rgw_cls_obj_complete_op;
+ op->op = CLS_RGW_OP_DEL;
+ op->key.name = "name";
+ op->locator = "locator";
+ op->ver.pool = 2;
+ op->ver.epoch = 100;
+ op->tag = "tag";
+
+ list<rgw_bucket_dir_entry_meta *> l;
+ rgw_bucket_dir_entry_meta::generate_test_instances(l);
+ auto iter = l.begin();
+ op->meta = *(*iter);
+
+ o.push_back(op);
+
+ o.push_back(new rgw_cls_obj_complete_op);
+}
+
+void rgw_cls_obj_complete_op::dump(Formatter *f) const
+{
+ f->dump_int("op", (int)op);
+ f->dump_string("name", key.name);
+ f->dump_string("instance", key.instance);
+ f->dump_string("locator", locator);
+ f->open_object_section("ver");
+ ver.dump(f);
+ f->close_section();
+ f->open_object_section("meta");
+ meta.dump(f);
+ f->close_section();
+ f->dump_string("tag", tag);
+ f->dump_bool("log_op", log_op);
+ f->dump_int("bilog_flags", bilog_flags);
+ encode_json("zones_trace", zones_trace, f);
+}
+
+void rgw_cls_link_olh_op::generate_test_instances(list<rgw_cls_link_olh_op*>& o)
+{
+ rgw_cls_link_olh_op *op = new rgw_cls_link_olh_op;
+ op->key.name = "name";
+ op->olh_tag = "olh_tag";
+ op->delete_marker = true;
+ op->op_tag = "op_tag";
+ op->olh_epoch = 123;
+ list<rgw_bucket_dir_entry_meta *> l;
+ rgw_bucket_dir_entry_meta::generate_test_instances(l);
+ auto iter = l.begin();
+ op->meta = *(*iter);
+ op->log_op = true;
+
+ o.push_back(op);
+
+ o.push_back(new rgw_cls_link_olh_op);
+}
+
+void rgw_cls_link_olh_op::dump(Formatter *f) const
+{
+ encode_json("key", key, f);
+ encode_json("olh_tag", olh_tag, f);
+ encode_json("delete_marker", delete_marker, f);
+ encode_json("op_tag", op_tag, f);
+ encode_json("meta", meta, f);
+ encode_json("olh_epoch", olh_epoch, f);
+ encode_json("log_op", log_op, f);
+ encode_json("bilog_flags", (uint32_t)bilog_flags, f);
+ utime_t ut(unmod_since);
+ encode_json("unmod_since", ut, f);
+ encode_json("high_precision_time", high_precision_time, f);
+ encode_json("zones_trace", zones_trace, f);
+}
+
+void rgw_cls_unlink_instance_op::generate_test_instances(list<rgw_cls_unlink_instance_op*>& o)
+{
+ rgw_cls_unlink_instance_op *op = new rgw_cls_unlink_instance_op;
+ op->key.name = "name";
+ op->op_tag = "op_tag";
+ op->olh_epoch = 124;
+ op->log_op = true;
+
+ o.push_back(op);
+
+ o.push_back(new rgw_cls_unlink_instance_op);
+}
+
+void rgw_cls_unlink_instance_op::dump(Formatter *f) const
+{
+ encode_json("key", key, f);
+ encode_json("op_tag", op_tag, f);
+ encode_json("olh_epoch", olh_epoch, f);
+ encode_json("log_op", log_op, f);
+ encode_json("bilog_flags", (uint32_t)bilog_flags, f);
+ encode_json("zones_trace", zones_trace, f);
+}
+
+void rgw_cls_read_olh_log_op::generate_test_instances(list<rgw_cls_read_olh_log_op*>& o)
+{
+ rgw_cls_read_olh_log_op *op = new rgw_cls_read_olh_log_op;
+ op->olh.name = "name";
+ op->ver_marker = 123;
+ op->olh_tag = "olh_tag";
+
+ o.push_back(op);
+
+ o.push_back(new rgw_cls_read_olh_log_op);
+}
+
+void rgw_cls_read_olh_log_op::dump(Formatter *f) const
+{
+ encode_json("olh", olh, f);
+ encode_json("ver_marker", ver_marker, f);
+ encode_json("olh_tag", olh_tag, f);
+}
+
+void rgw_cls_read_olh_log_ret::generate_test_instances(list<rgw_cls_read_olh_log_ret*>& o)
+{
+ rgw_cls_read_olh_log_ret *r = new rgw_cls_read_olh_log_ret;
+ r->is_truncated = true;
+ list<rgw_bucket_olh_log_entry *> l;
+ rgw_bucket_olh_log_entry::generate_test_instances(l);
+ auto iter = l.begin();
+ r->log[1].push_back(*(*iter));
+
+ o.push_back(r);
+
+ o.push_back(new rgw_cls_read_olh_log_ret);
+}
+
+void rgw_cls_read_olh_log_ret::dump(Formatter *f) const
+{
+ encode_json("log", log, f);
+ encode_json("is_truncated", is_truncated, f);
+}
+
+void rgw_cls_trim_olh_log_op::generate_test_instances(list<rgw_cls_trim_olh_log_op*>& o)
+{
+ rgw_cls_trim_olh_log_op *op = new rgw_cls_trim_olh_log_op;
+ op->olh.name = "olh.name";
+ op->ver = 100;
+ op->olh_tag = "olh_tag";
+
+ o.push_back(op);
+
+ o.push_back(new rgw_cls_trim_olh_log_op);
+}
+
+void rgw_cls_trim_olh_log_op::dump(Formatter *f) const
+{
+ encode_json("olh", olh, f);
+ encode_json("ver", ver, f);
+ encode_json("olh_tag", olh_tag, f);
+}
+
+void rgw_cls_bucket_clear_olh_op::generate_test_instances(list<rgw_cls_bucket_clear_olh_op *>& o)
+{
+
+ rgw_cls_bucket_clear_olh_op *op = new rgw_cls_bucket_clear_olh_op;
+ op->key.name = "key.name";
+ op->olh_tag = "olh_tag";
+
+ o.push_back(op);
+ o.push_back(new rgw_cls_bucket_clear_olh_op);
+}
+
+void rgw_cls_bucket_clear_olh_op::dump(Formatter *f) const
+{
+ encode_json("key", key, f);
+ encode_json("olh_tag", olh_tag, f);
+}
+
+void rgw_cls_list_op::generate_test_instances(list<rgw_cls_list_op*>& o)
+{
+ rgw_cls_list_op *op = new rgw_cls_list_op;
+ op->start_obj.name = "start_obj";
+ op->num_entries = 100;
+ op->filter_prefix = "filter_prefix";
+ o.push_back(op);
+ o.push_back(new rgw_cls_list_op);
+}
+
+void rgw_cls_list_op::dump(Formatter *f) const
+{
+ f->dump_string("start_obj", start_obj.name);
+ f->dump_unsigned("num_entries", num_entries);
+}
+
+void rgw_cls_list_ret::generate_test_instances(list<rgw_cls_list_ret*>& o)
+{
+ list<rgw_bucket_dir *> l;
+ rgw_bucket_dir::generate_test_instances(l);
+ for (auto iter = l.begin(); iter != l.end(); ++iter) {
+ rgw_bucket_dir *d = *iter;
+
+ rgw_cls_list_ret *ret = new rgw_cls_list_ret;
+ ret->dir = *d;
+ ret->is_truncated = true;
+
+ o.push_back(ret);
+
+ delete d;
+ }
+
+ o.push_back(new rgw_cls_list_ret);
+}
+
+void rgw_cls_list_ret::dump(Formatter *f) const
+{
+ f->open_object_section("dir");
+ dir.dump(f);
+ f->close_section();
+ f->dump_int("is_truncated", (int)is_truncated);
+}
+
+void rgw_cls_check_index_ret::generate_test_instances(list<rgw_cls_check_index_ret*>& o)
+{
+ list<rgw_bucket_dir_header *> h;
+ rgw_bucket_dir_header::generate_test_instances(h);
+ rgw_cls_check_index_ret *r = new rgw_cls_check_index_ret;
+ r->existing_header = *(h.front());
+ r->calculated_header = *(h.front());
+ o.push_back(r);
+
+ for (auto iter = h.begin(); iter != h.end(); ++iter) {
+ delete *iter;
+ }
+ o.push_back(new rgw_cls_check_index_ret);
+}
+
+void rgw_cls_check_index_ret::dump(Formatter *f) const
+{
+ encode_json("existing_header", existing_header, f);
+ encode_json("calculated_header", calculated_header, f);
+}
+
+void rgw_cls_bucket_update_stats_op::generate_test_instances(list<rgw_cls_bucket_update_stats_op*>& o)
+{
+ rgw_cls_bucket_update_stats_op *r = new rgw_cls_bucket_update_stats_op;
+ r->absolute = true;
+ rgw_bucket_category_stats& s = r->stats[RGWObjCategory::None];
+ s.total_size = 1;
+ s.total_size_rounded = 4096;
+ s.num_entries = 1;
+ o.push_back(r);
+
+ o.push_back(new rgw_cls_bucket_update_stats_op);
+}
+
+void rgw_cls_bucket_update_stats_op::dump(Formatter *f) const
+{
+ encode_json("absolute", absolute, f);
+ map<int, rgw_bucket_category_stats> s;
+ for (auto& entry : stats) {
+ s[(int)entry.first] = entry.second;
+ }
+ encode_json("stats", s, f);
+}
+
+void cls_rgw_bi_log_list_op::dump(Formatter *f) const
+{
+ f->dump_string("marker", marker);
+ f->dump_unsigned("max", max);
+}
+
+void cls_rgw_bi_log_list_op::generate_test_instances(list<cls_rgw_bi_log_list_op*>& ls)
+{
+ ls.push_back(new cls_rgw_bi_log_list_op);
+ ls.push_back(new cls_rgw_bi_log_list_op);
+ ls.back()->marker = "mark";
+ ls.back()->max = 123;
+}
+
+void cls_rgw_bi_log_trim_op::dump(Formatter *f) const
+{
+ f->dump_string("start_marker", start_marker);
+ f->dump_string("end_marker", end_marker);
+}
+
+void cls_rgw_bi_log_trim_op::generate_test_instances(list<cls_rgw_bi_log_trim_op*>& ls)
+{
+ ls.push_back(new cls_rgw_bi_log_trim_op);
+ ls.push_back(new cls_rgw_bi_log_trim_op);
+ ls.back()->start_marker = "foo";
+ ls.back()->end_marker = "bar";
+}
+
+void cls_rgw_bi_log_list_ret::dump(Formatter *f) const
+{
+ encode_json("entries", entries, f);
+ f->dump_unsigned("truncated", (int)truncated);
+}
+
+void cls_rgw_bi_log_list_ret::generate_test_instances(list<cls_rgw_bi_log_list_ret*>& ls)
+{
+ ls.push_back(new cls_rgw_bi_log_list_ret);
+ ls.push_back(new cls_rgw_bi_log_list_ret);
+ ls.back()->entries.push_back(rgw_bi_log_entry());
+ ls.back()->truncated = true;
+}
+
+void cls_rgw_reshard_add_op::generate_test_instances(list<cls_rgw_reshard_add_op*>& ls)
+{
+ ls.push_back(new cls_rgw_reshard_add_op);
+ ls.push_back(new cls_rgw_reshard_add_op);
+ list<cls_rgw_reshard_entry *> l;
+ cls_rgw_reshard_entry::generate_test_instances(l);
+ auto iter = l.begin();
+ ls.back()->entry = *(*iter);
+}
+
+void cls_rgw_reshard_add_op::dump(Formatter *f) const
+{
+ encode_json("entry", entry, f);
+}
+
+void cls_rgw_reshard_list_op::generate_test_instances(list<cls_rgw_reshard_list_op*>& ls)
+{
+ ls.push_back(new cls_rgw_reshard_list_op);
+ ls.push_back(new cls_rgw_reshard_list_op);
+ ls.back()->max = 1000;
+ ls.back()->marker = "foo";
+}
+
+void cls_rgw_reshard_list_op::dump(Formatter *f) const
+{
+ encode_json("max", max, f);
+ encode_json("marker", marker, f);
+}
+
+void cls_rgw_reshard_list_ret::generate_test_instances(list<cls_rgw_reshard_list_ret*>& ls)
+{
+ ls.push_back(new cls_rgw_reshard_list_ret);
+ ls.push_back(new cls_rgw_reshard_list_ret);
+ ls.back()->entries.push_back(cls_rgw_reshard_entry());
+ ls.back()->is_truncated = true;
+}
+
+void cls_rgw_reshard_list_ret::dump(Formatter *f) const
+{
+ encode_json("entries", entries, f);
+ encode_json("is_truncated", is_truncated, f);
+}
+
+void cls_rgw_reshard_get_op::generate_test_instances(list<cls_rgw_reshard_get_op*>& ls)
+{
+ ls.push_back(new cls_rgw_reshard_get_op);
+ ls.push_back(new cls_rgw_reshard_get_op);
+}
+
+void cls_rgw_reshard_get_op::dump(Formatter *f) const
+{
+ encode_json("entry", entry, f);
+}
+
+void cls_rgw_reshard_get_ret::generate_test_instances(list<cls_rgw_reshard_get_ret*>& ls)
+{
+ ls.push_back(new cls_rgw_reshard_get_ret);
+ ls.push_back(new cls_rgw_reshard_get_ret);
+}
+
+void cls_rgw_reshard_get_ret::dump(Formatter *f) const
+{
+ encode_json("entry", entry, f);
+}
+
+void cls_rgw_reshard_remove_op::generate_test_instances(list<cls_rgw_reshard_remove_op*>& ls)
+{
+ ls.push_back(new cls_rgw_reshard_remove_op);
+ ls.push_back(new cls_rgw_reshard_remove_op);
+ ls.back()->bucket_name = "foo";
+ ls.back()->bucket_id = "bucket_id";
+}
+
+void cls_rgw_reshard_remove_op::dump(Formatter *f) const
+{
+ encode_json("bucket_name", bucket_name, f);
+ encode_json("bucket_id", bucket_name, f);
+}
+
+
+void cls_rgw_set_bucket_resharding_op::generate_test_instances(
+ list<cls_rgw_set_bucket_resharding_op*>& ls)
+{
+ ls.push_back(new cls_rgw_set_bucket_resharding_op);
+ ls.push_back(new cls_rgw_set_bucket_resharding_op);
+}
+
+void cls_rgw_set_bucket_resharding_op::dump(Formatter *f) const
+{
+ encode_json("entry", entry, f);
+}
+
+void cls_rgw_clear_bucket_resharding_op::generate_test_instances(
+ list<cls_rgw_clear_bucket_resharding_op*>& ls)
+{
+ ls.push_back(new cls_rgw_clear_bucket_resharding_op);
+ ls.push_back(new cls_rgw_clear_bucket_resharding_op);
+}
+
+void cls_rgw_clear_bucket_resharding_op::dump(Formatter *f) const
+{
+}
+
+void cls_rgw_guard_bucket_resharding_op::generate_test_instances(
+ list<cls_rgw_guard_bucket_resharding_op*>& ls)
+{
+ ls.push_back(new cls_rgw_guard_bucket_resharding_op);
+ ls.push_back(new cls_rgw_guard_bucket_resharding_op);
+}
+
+void cls_rgw_guard_bucket_resharding_op::dump(Formatter *f) const
+{
+ encode_json("ret_err", ret_err, f);
+}
+
+
+void cls_rgw_get_bucket_resharding_op::generate_test_instances(
+ list<cls_rgw_get_bucket_resharding_op*>& ls)
+{
+ ls.push_back(new cls_rgw_get_bucket_resharding_op);
+ ls.push_back(new cls_rgw_get_bucket_resharding_op);
+}
+
+void cls_rgw_get_bucket_resharding_op::dump(Formatter *f) const
+{
+}
diff --git a/src/cls/rgw/cls_rgw_ops.h b/src/cls/rgw/cls_rgw_ops.h
new file mode 100644
index 000000000..a25483bb8
--- /dev/null
+++ b/src/cls/rgw/cls_rgw_ops.h
@@ -0,0 +1,1504 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#ifndef CEPH_CLS_RGW_OPS_H
+#define CEPH_CLS_RGW_OPS_H
+
+#include "cls/rgw/cls_rgw_types.h"
+
+struct rgw_cls_tag_timeout_op
+{
+ uint64_t tag_timeout;
+
+ rgw_cls_tag_timeout_op() : tag_timeout(0) {}
+
+ void encode(ceph::buffer::list &bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(tag_timeout, bl);
+ ENCODE_FINISH(bl);
+ }
+ void decode(ceph::buffer::list::const_iterator &bl) {
+ DECODE_START(1, bl);
+ decode(tag_timeout, bl);
+ DECODE_FINISH(bl);
+ }
+ void dump(ceph::Formatter *f) const;
+ static void generate_test_instances(std::list<rgw_cls_tag_timeout_op*>& ls);
+};
+WRITE_CLASS_ENCODER(rgw_cls_tag_timeout_op)
+
+struct rgw_cls_obj_prepare_op
+{
+ RGWModifyOp op;
+ cls_rgw_obj_key key;
+ std::string tag;
+ std::string locator;
+ bool log_op;
+ uint16_t bilog_flags;
+ rgw_zone_set zones_trace;
+
+ rgw_cls_obj_prepare_op() : op(CLS_RGW_OP_UNKNOWN), log_op(false), bilog_flags(0) {}
+
+ void encode(ceph::buffer::list &bl) const {
+ ENCODE_START(7, 5, bl);
+ uint8_t c = (uint8_t)op;
+ encode(c, bl);
+ encode(tag, bl);
+ encode(locator, bl);
+ encode(log_op, bl);
+ encode(key, bl);
+ encode(bilog_flags, bl);
+ encode(zones_trace, bl);
+ ENCODE_FINISH(bl);
+ }
+ void decode(ceph::buffer::list::const_iterator &bl) {
+ DECODE_START_LEGACY_COMPAT_LEN(7, 3, 3, bl);
+ uint8_t c;
+ decode(c, bl);
+ op = (RGWModifyOp)c;
+ if (struct_v < 5) {
+ decode(key.name, bl);
+ }
+ decode(tag, bl);
+ if (struct_v >= 2) {
+ decode(locator, bl);
+ }
+ if (struct_v >= 4) {
+ decode(log_op, bl);
+ }
+ if (struct_v >= 5) {
+ decode(key, bl);
+ }
+ if (struct_v >= 6) {
+ decode(bilog_flags, bl);
+ }
+ if (struct_v >= 7) {
+ decode(zones_trace, bl);
+ }
+ DECODE_FINISH(bl);
+ }
+ void dump(ceph::Formatter *f) const;
+ static void generate_test_instances(std::list<rgw_cls_obj_prepare_op*>& o);
+};
+WRITE_CLASS_ENCODER(rgw_cls_obj_prepare_op)
+
+struct rgw_cls_obj_complete_op
+{
+ RGWModifyOp op;
+ cls_rgw_obj_key key;
+ std::string locator;
+ rgw_bucket_entry_ver ver;
+ rgw_bucket_dir_entry_meta meta;
+ std::string tag;
+ bool log_op;
+ uint16_t bilog_flags;
+
+ std::list<cls_rgw_obj_key> remove_objs;
+ rgw_zone_set zones_trace;
+
+ rgw_cls_obj_complete_op() : op(CLS_RGW_OP_ADD), log_op(false), bilog_flags(0) {}
+
+ void encode(ceph::buffer::list &bl) const {
+ ENCODE_START(9, 7, bl);
+ uint8_t c = (uint8_t)op;
+ encode(c, bl);
+ encode(ver.epoch, bl);
+ encode(meta, bl);
+ encode(tag, bl);
+ encode(locator, bl);
+ encode(remove_objs, bl);
+ encode(ver, bl);
+ encode(log_op, bl);
+ encode(key, bl);
+ encode(bilog_flags, bl);
+ encode(zones_trace, bl);
+ ENCODE_FINISH(bl);
+ }
+ void decode(ceph::buffer::list::const_iterator &bl) {
+ DECODE_START_LEGACY_COMPAT_LEN(9, 3, 3, bl);
+ uint8_t c;
+ decode(c, bl);
+ op = (RGWModifyOp)c;
+ if (struct_v < 7) {
+ decode(key.name, bl);
+ }
+ decode(ver.epoch, bl);
+ decode(meta, bl);
+ decode(tag, bl);
+ if (struct_v >= 2) {
+ decode(locator, bl);
+ }
+ if (struct_v >= 4 && struct_v < 7) {
+ std::list<std::string> old_remove_objs;
+ decode(old_remove_objs, bl);
+
+ for (auto iter = old_remove_objs.begin();
+ iter != old_remove_objs.end(); ++iter) {
+ cls_rgw_obj_key k;
+ k.name = *iter;
+ remove_objs.push_back(k);
+ }
+ } else {
+ decode(remove_objs, bl);
+ }
+ if (struct_v >= 5) {
+ decode(ver, bl);
+ } else {
+ ver.pool = -1;
+ }
+ if (struct_v >= 6) {
+ decode(log_op, bl);
+ }
+ if (struct_v >= 7) {
+ decode(key, bl);
+ }
+ if (struct_v >= 8) {
+ decode(bilog_flags, bl);
+ }
+ if (struct_v >= 9) {
+ decode(zones_trace, bl);
+ }
+ DECODE_FINISH(bl);
+ }
+ void dump(ceph::Formatter *f) const;
+ static void generate_test_instances(std::list<rgw_cls_obj_complete_op*>& o);
+};
+WRITE_CLASS_ENCODER(rgw_cls_obj_complete_op)
+
+struct rgw_cls_link_olh_op {
+ cls_rgw_obj_key key;
+ std::string olh_tag;
+ bool delete_marker;
+ std::string op_tag;
+ rgw_bucket_dir_entry_meta meta;
+ uint64_t olh_epoch;
+ bool log_op;
+ uint16_t bilog_flags;
+ ceph::real_time unmod_since; /* only create delete marker if newer then this */
+ bool high_precision_time;
+ rgw_zone_set zones_trace;
+
+ rgw_cls_link_olh_op() : delete_marker(false), olh_epoch(0), log_op(false), bilog_flags(0), high_precision_time(false) {}
+
+ void encode(ceph::buffer::list& bl) const {
+ ENCODE_START(5, 1, bl);
+ encode(key, bl);
+ encode(olh_tag, bl);
+ encode(delete_marker, bl);
+ encode(op_tag, bl);
+ encode(meta, bl);
+ encode(olh_epoch, bl);
+ encode(log_op, bl);
+ encode(bilog_flags, bl);
+ uint64_t t = ceph::real_clock::to_time_t(unmod_since);
+ encode(t, bl);
+ encode(unmod_since, bl);
+ encode(high_precision_time, bl);
+ encode(zones_trace, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(ceph::buffer::list::const_iterator& bl) {
+ DECODE_START(5, bl);
+ decode(key, bl);
+ decode(olh_tag, bl);
+ decode(delete_marker, bl);
+ decode(op_tag, bl);
+ decode(meta, bl);
+ decode(olh_epoch, bl);
+ decode(log_op, bl);
+ decode(bilog_flags, bl);
+ if (struct_v == 2) {
+ uint64_t t;
+ decode(t, bl);
+ unmod_since = ceph::real_clock::from_time_t(static_cast<time_t>(t));
+ }
+ if (struct_v >= 3) {
+ uint64_t t;
+ decode(t, bl);
+ decode(unmod_since, bl);
+ }
+ if (struct_v >= 4) {
+ decode(high_precision_time, bl);
+ }
+ if (struct_v >= 5) {
+ decode(zones_trace, bl);
+ }
+ DECODE_FINISH(bl);
+ }
+
+ static void generate_test_instances(std::list<rgw_cls_link_olh_op *>& o);
+ void dump(ceph::Formatter *f) const;
+};
+WRITE_CLASS_ENCODER(rgw_cls_link_olh_op)
+
+struct rgw_cls_unlink_instance_op {
+ cls_rgw_obj_key key;
+ std::string op_tag;
+ uint64_t olh_epoch;
+ bool log_op;
+ uint16_t bilog_flags;
+ std::string olh_tag;
+ rgw_zone_set zones_trace;
+
+ rgw_cls_unlink_instance_op() : olh_epoch(0), log_op(false), bilog_flags(0) {}
+
+ void encode(ceph::buffer::list& bl) const {
+ ENCODE_START(3, 1, bl);
+ encode(key, bl);
+ encode(op_tag, bl);
+ encode(olh_epoch, bl);
+ encode(log_op, bl);
+ encode(bilog_flags, bl);
+ encode(olh_tag, bl);
+ encode(zones_trace, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(ceph::buffer::list::const_iterator& bl) {
+ DECODE_START(3, bl);
+ decode(key, bl);
+ decode(op_tag, bl);
+ decode(olh_epoch, bl);
+ decode(log_op, bl);
+ decode(bilog_flags, bl);
+ if (struct_v >= 2) {
+ decode(olh_tag, bl);
+ }
+ if (struct_v >= 3) {
+ decode(zones_trace, bl);
+ }
+ DECODE_FINISH(bl);
+ }
+
+ static void generate_test_instances(std::list<rgw_cls_unlink_instance_op *>& o);
+ void dump(ceph::Formatter *f) const;
+};
+WRITE_CLASS_ENCODER(rgw_cls_unlink_instance_op)
+
+struct rgw_cls_read_olh_log_op
+{
+ cls_rgw_obj_key olh;
+ uint64_t ver_marker;
+ std::string olh_tag;
+
+ rgw_cls_read_olh_log_op() : ver_marker(0) {}
+
+ void encode(ceph::buffer::list &bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(olh, bl);
+ encode(ver_marker, bl);
+ encode(olh_tag, bl);
+ ENCODE_FINISH(bl);
+ }
+ void decode(ceph::buffer::list::const_iterator &bl) {
+ DECODE_START(1, bl);
+ decode(olh, bl);
+ decode(ver_marker, bl);
+ decode(olh_tag, bl);
+ DECODE_FINISH(bl);
+ }
+ static void generate_test_instances(std::list<rgw_cls_read_olh_log_op *>& o);
+ void dump(ceph::Formatter *f) const;
+};
+WRITE_CLASS_ENCODER(rgw_cls_read_olh_log_op)
+
+
+struct rgw_cls_read_olh_log_ret
+{
+ std::map<uint64_t, std::vector<rgw_bucket_olh_log_entry> > log;
+ bool is_truncated;
+
+ rgw_cls_read_olh_log_ret() : is_truncated(false) {}
+
+ void encode(ceph::buffer::list &bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(log, bl);
+ encode(is_truncated, bl);
+ ENCODE_FINISH(bl);
+ }
+ void decode(ceph::buffer::list::const_iterator &bl) {
+ DECODE_START(1, bl);
+ decode(log, bl);
+ decode(is_truncated, bl);
+ DECODE_FINISH(bl);
+ }
+ static void generate_test_instances(std::list<rgw_cls_read_olh_log_ret *>& o);
+ void dump(ceph::Formatter *f) const;
+};
+WRITE_CLASS_ENCODER(rgw_cls_read_olh_log_ret)
+
+struct rgw_cls_trim_olh_log_op
+{
+ cls_rgw_obj_key olh;
+ uint64_t ver;
+ std::string olh_tag;
+
+ rgw_cls_trim_olh_log_op() : ver(0) {}
+
+ void encode(ceph::buffer::list &bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(olh, bl);
+ encode(ver, bl);
+ encode(olh_tag, bl);
+ ENCODE_FINISH(bl);
+ }
+ void decode(ceph::buffer::list::const_iterator &bl) {
+ DECODE_START(1, bl);
+ decode(olh, bl);
+ decode(ver, bl);
+ decode(olh_tag, bl);
+ DECODE_FINISH(bl);
+ }
+ static void generate_test_instances(std::list<rgw_cls_trim_olh_log_op *>& o);
+ void dump(ceph::Formatter *f) const;
+};
+WRITE_CLASS_ENCODER(rgw_cls_trim_olh_log_op)
+
+struct rgw_cls_bucket_clear_olh_op {
+ cls_rgw_obj_key key;
+ std::string olh_tag;
+
+ rgw_cls_bucket_clear_olh_op() {}
+
+ void encode(ceph::buffer::list& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(key, bl);
+ encode(olh_tag, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(ceph::buffer::list::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(key, bl);
+ decode(olh_tag, bl);
+ DECODE_FINISH(bl);
+ }
+
+ static void generate_test_instances(std::list<rgw_cls_bucket_clear_olh_op *>& o);
+ void dump(ceph::Formatter *f) const;
+};
+WRITE_CLASS_ENCODER(rgw_cls_bucket_clear_olh_op)
+
+struct rgw_cls_list_op
+{
+ cls_rgw_obj_key start_obj;
+ uint32_t num_entries;
+ std::string filter_prefix;
+ bool list_versions;
+ std::string delimiter;
+
+ rgw_cls_list_op() : num_entries(0), list_versions(false) {}
+
+ void encode(ceph::buffer::list &bl) const {
+ ENCODE_START(6, 4, bl);
+ encode(num_entries, bl);
+ encode(filter_prefix, bl);
+ encode(start_obj, bl);
+ encode(list_versions, bl);
+ encode(delimiter, bl);
+ ENCODE_FINISH(bl);
+ }
+ void decode(ceph::buffer::list::const_iterator &bl) {
+ DECODE_START_LEGACY_COMPAT_LEN(6, 2, 2, bl);
+ if (struct_v < 4) {
+ decode(start_obj.name, bl);
+ }
+ decode(num_entries, bl);
+ if (struct_v >= 3) {
+ decode(filter_prefix, bl);
+ }
+ if (struct_v >= 4) {
+ decode(start_obj, bl);
+ }
+ if (struct_v >= 5) {
+ decode(list_versions, bl);
+ }
+ if (struct_v >= 6) {
+ decode(delimiter, bl);
+ }
+ DECODE_FINISH(bl);
+ }
+ void dump(ceph::Formatter *f) const;
+ static void generate_test_instances(std::list<rgw_cls_list_op*>& o);
+};
+WRITE_CLASS_ENCODER(rgw_cls_list_op)
+
+struct rgw_cls_list_ret {
+ rgw_bucket_dir dir;
+ bool is_truncated;
+
+ // if is_truncated is true, starting marker for next iteration; this
+ // is necessary as it's possible after maximum number of tries we
+ // still might have zero entries to return, in which case we have to
+ // at least move the ball foward
+ cls_rgw_obj_key marker;
+
+ // cls_filtered is not transmitted; it is assumed true for versions
+ // on/after 3 and false for prior versions; this allows the rgw
+ // layer to know when an older osd (cls) does not do the filtering
+ bool cls_filtered;
+
+ rgw_cls_list_ret() :
+ is_truncated(false),
+ cls_filtered(true)
+ {}
+
+ void encode(ceph::buffer::list &bl) const {
+ ENCODE_START(4, 2, bl);
+ encode(dir, bl);
+ encode(is_truncated, bl);
+ encode(marker, bl);
+ ENCODE_FINISH(bl);
+ }
+ void decode(ceph::buffer::list::const_iterator &bl) {
+ DECODE_START_LEGACY_COMPAT_LEN(4, 2, 2, bl);
+ decode(dir, bl);
+ decode(is_truncated, bl);
+ cls_filtered = struct_v >= 3;
+ if (struct_v >= 4) {
+ decode(marker, bl);
+ }
+ DECODE_FINISH(bl);
+ }
+ void dump(ceph::Formatter *f) const;
+ static void generate_test_instances(std::list<rgw_cls_list_ret*>& o);
+};
+WRITE_CLASS_ENCODER(rgw_cls_list_ret)
+
+struct rgw_cls_check_index_ret
+{
+ rgw_bucket_dir_header existing_header;
+ rgw_bucket_dir_header calculated_header;
+
+ rgw_cls_check_index_ret() {}
+
+ void encode(ceph::buffer::list &bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(existing_header, bl);
+ encode(calculated_header, bl);
+ ENCODE_FINISH(bl);
+ }
+ void decode(ceph::buffer::list::const_iterator &bl) {
+ DECODE_START(1, bl);
+ decode(existing_header, bl);
+ decode(calculated_header, bl);
+ DECODE_FINISH(bl);
+ }
+ void dump(ceph::Formatter *f) const;
+ static void generate_test_instances(std::list<rgw_cls_check_index_ret *>& o);
+};
+WRITE_CLASS_ENCODER(rgw_cls_check_index_ret)
+
+struct rgw_cls_bucket_update_stats_op
+{
+ bool absolute{false};
+ std::map<RGWObjCategory, rgw_bucket_category_stats> stats;
+
+ rgw_cls_bucket_update_stats_op() {}
+
+ void encode(ceph::buffer::list &bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(absolute, bl);
+ encode(stats, bl);
+ ENCODE_FINISH(bl);
+ }
+ void decode(ceph::buffer::list::const_iterator &bl) {
+ DECODE_START(1, bl);
+ decode(absolute, bl);
+ decode(stats, bl);
+ DECODE_FINISH(bl);
+ }
+ void dump(ceph::Formatter *f) const;
+ static void generate_test_instances(std::list<rgw_cls_bucket_update_stats_op *>& o);
+};
+WRITE_CLASS_ENCODER(rgw_cls_bucket_update_stats_op)
+
+struct rgw_cls_obj_remove_op {
+ std::list<std::string> keep_attr_prefixes;
+
+ void encode(ceph::buffer::list& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(keep_attr_prefixes, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(ceph::buffer::list::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(keep_attr_prefixes, bl);
+ DECODE_FINISH(bl);
+ }
+};
+WRITE_CLASS_ENCODER(rgw_cls_obj_remove_op)
+
+struct rgw_cls_obj_store_pg_ver_op {
+ std::string attr;
+
+ void encode(ceph::buffer::list& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(attr, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(ceph::buffer::list::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(attr, bl);
+ DECODE_FINISH(bl);
+ }
+};
+WRITE_CLASS_ENCODER(rgw_cls_obj_store_pg_ver_op)
+
+struct rgw_cls_obj_check_attrs_prefix {
+ std::string check_prefix;
+ bool fail_if_exist;
+
+ rgw_cls_obj_check_attrs_prefix() : fail_if_exist(false) {}
+
+ void encode(ceph::buffer::list& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(check_prefix, bl);
+ encode(fail_if_exist, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(ceph::buffer::list::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(check_prefix, bl);
+ decode(fail_if_exist, bl);
+ DECODE_FINISH(bl);
+ }
+};
+WRITE_CLASS_ENCODER(rgw_cls_obj_check_attrs_prefix)
+
+struct rgw_cls_obj_check_mtime {
+ ceph::real_time mtime;
+ RGWCheckMTimeType type;
+ bool high_precision_time;
+
+ rgw_cls_obj_check_mtime() : type(CLS_RGW_CHECK_TIME_MTIME_EQ), high_precision_time(false) {}
+
+ void encode(ceph::buffer::list& bl) const {
+ ENCODE_START(2, 1, bl);
+ encode(mtime, bl);
+ encode((uint8_t)type, bl);
+ encode(high_precision_time, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(ceph::buffer::list::const_iterator& bl) {
+ DECODE_START(2, bl);
+ decode(mtime, bl);
+ uint8_t c;
+ decode(c, bl);
+ type = (RGWCheckMTimeType)c;
+ if (struct_v >= 2) {
+ decode(high_precision_time, bl);
+ }
+ DECODE_FINISH(bl);
+ }
+};
+WRITE_CLASS_ENCODER(rgw_cls_obj_check_mtime)
+
+struct rgw_cls_usage_log_add_op {
+ rgw_usage_log_info info;
+ rgw_user user;
+
+ void encode(ceph::buffer::list& bl) const {
+ ENCODE_START(2, 1, bl);
+ encode(info, bl);
+ encode(user.to_str(), bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(ceph::buffer::list::const_iterator& bl) {
+ DECODE_START(2, bl);
+ decode(info, bl);
+ if (struct_v >= 2) {
+ std::string s;
+ decode(s, bl);
+ user.from_str(s);
+ }
+ DECODE_FINISH(bl);
+ }
+};
+WRITE_CLASS_ENCODER(rgw_cls_usage_log_add_op)
+
+struct rgw_cls_bi_get_op {
+ cls_rgw_obj_key key;
+ BIIndexType type; /* namespace: plain, instance, olh */
+
+ rgw_cls_bi_get_op() : type(BIIndexType::Plain) {}
+
+ void encode(ceph::buffer::list& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(key, bl);
+ encode((uint8_t)type, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(ceph::buffer::list::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(key, bl);
+ uint8_t c;
+ decode(c, bl);
+ type = (BIIndexType)c;
+ DECODE_FINISH(bl);
+ }
+};
+WRITE_CLASS_ENCODER(rgw_cls_bi_get_op)
+
+struct rgw_cls_bi_get_ret {
+ rgw_cls_bi_entry entry;
+
+ rgw_cls_bi_get_ret() {}
+
+ void encode(ceph::buffer::list& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(entry, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(ceph::buffer::list::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(entry, bl);
+ DECODE_FINISH(bl);
+ }
+};
+WRITE_CLASS_ENCODER(rgw_cls_bi_get_ret)
+
+struct rgw_cls_bi_put_op {
+ rgw_cls_bi_entry entry;
+
+ rgw_cls_bi_put_op() {}
+
+ void encode(ceph::buffer::list& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(entry, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(ceph::buffer::list::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(entry, bl);
+ DECODE_FINISH(bl);
+ }
+};
+WRITE_CLASS_ENCODER(rgw_cls_bi_put_op)
+
+struct rgw_cls_bi_list_op {
+ uint32_t max;
+ std::string name_filter; // limit resultto one object and its instances
+ std::string marker;
+
+ rgw_cls_bi_list_op() : max(0) {}
+
+ void encode(ceph::buffer::list& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(max, bl);
+ encode(name_filter, bl);
+ encode(marker, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(ceph::buffer::list::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(max, bl);
+ decode(name_filter, bl);
+ decode(marker, bl);
+ DECODE_FINISH(bl);
+ }
+};
+WRITE_CLASS_ENCODER(rgw_cls_bi_list_op)
+
+struct rgw_cls_bi_list_ret {
+ std::list<rgw_cls_bi_entry> entries;
+ bool is_truncated;
+
+ rgw_cls_bi_list_ret() : is_truncated(false) {}
+
+ void encode(ceph::buffer::list& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(entries, bl);
+ encode(is_truncated, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(ceph::buffer::list::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(entries, bl);
+ decode(is_truncated, bl);
+ DECODE_FINISH(bl);
+ }
+};
+WRITE_CLASS_ENCODER(rgw_cls_bi_list_ret)
+
+struct rgw_cls_usage_log_read_op {
+ uint64_t start_epoch;
+ uint64_t end_epoch;
+ std::string owner;
+ std::string bucket;
+
+ std::string iter; // should be empty for the first call, non empty for subsequent calls
+ uint32_t max_entries;
+
+ void encode(ceph::buffer::list& bl) const {
+ ENCODE_START(2, 1, bl);
+ encode(start_epoch, bl);
+ encode(end_epoch, bl);
+ encode(owner, bl);
+ encode(iter, bl);
+ encode(max_entries, bl);
+ encode(bucket, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(ceph::buffer::list::const_iterator& bl) {
+ DECODE_START(2, bl);
+ decode(start_epoch, bl);
+ decode(end_epoch, bl);
+ decode(owner, bl);
+ decode(iter, bl);
+ decode(max_entries, bl);
+ if (struct_v >= 2) {
+ decode(bucket, bl);
+ }
+ DECODE_FINISH(bl);
+ }
+};
+WRITE_CLASS_ENCODER(rgw_cls_usage_log_read_op)
+
+struct rgw_cls_usage_log_read_ret {
+ std::map<rgw_user_bucket, rgw_usage_log_entry> usage;
+ bool truncated;
+ std::string next_iter;
+
+ void encode(ceph::buffer::list& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(usage, bl);
+ encode(truncated, bl);
+ encode(next_iter, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(ceph::buffer::list::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(usage, bl);
+ decode(truncated, bl);
+ decode(next_iter, bl);
+ DECODE_FINISH(bl);
+ }
+};
+WRITE_CLASS_ENCODER(rgw_cls_usage_log_read_ret)
+
+struct rgw_cls_usage_log_trim_op {
+ uint64_t start_epoch;
+ uint64_t end_epoch;
+ std::string user;
+ std::string bucket;
+
+ void encode(ceph::buffer::list& bl) const {
+ ENCODE_START(3, 2, bl);
+ encode(start_epoch, bl);
+ encode(end_epoch, bl);
+ encode(user, bl);
+ encode(bucket, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(ceph::buffer::list::const_iterator& bl) {
+ DECODE_START(3, bl);
+ decode(start_epoch, bl);
+ decode(end_epoch, bl);
+ decode(user, bl);
+ if (struct_v >= 3) {
+ decode(bucket, bl);
+ }
+ DECODE_FINISH(bl);
+ }
+};
+WRITE_CLASS_ENCODER(rgw_cls_usage_log_trim_op)
+
+struct cls_rgw_gc_set_entry_op {
+ uint32_t expiration_secs;
+ cls_rgw_gc_obj_info info;
+ cls_rgw_gc_set_entry_op() : expiration_secs(0) {}
+
+ void encode(ceph::buffer::list& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(expiration_secs, bl);
+ encode(info, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(ceph::buffer::list::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(expiration_secs, bl);
+ decode(info, bl);
+ DECODE_FINISH(bl);
+ }
+
+ void dump(ceph::Formatter *f) const;
+ static void generate_test_instances(std::list<cls_rgw_gc_set_entry_op*>& ls);
+
+ size_t estimate_encoded_size() const {
+ constexpr size_t start_overhead = sizeof(__u8) + sizeof(__u8) + sizeof(ceph_le32); // version and length prefix
+ constexpr size_t expr_secs_overhead = sizeof(__u32); // expiration_seconds_overhead
+ return start_overhead + expr_secs_overhead + info.estimate_encoded_size();
+ }
+};
+WRITE_CLASS_ENCODER(cls_rgw_gc_set_entry_op)
+
+struct cls_rgw_gc_defer_entry_op {
+ uint32_t expiration_secs;
+ std::string tag;
+ cls_rgw_gc_defer_entry_op() : expiration_secs(0) {}
+
+ void encode(ceph::buffer::list& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(expiration_secs, bl);
+ encode(tag, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(ceph::buffer::list::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(expiration_secs, bl);
+ decode(tag, bl);
+ DECODE_FINISH(bl);
+ }
+
+ void dump(ceph::Formatter *f) const;
+ static void generate_test_instances(std::list<cls_rgw_gc_defer_entry_op*>& ls);
+};
+WRITE_CLASS_ENCODER(cls_rgw_gc_defer_entry_op)
+
+struct cls_rgw_gc_list_op {
+ std::string marker;
+ uint32_t max;
+ bool expired_only;
+
+ cls_rgw_gc_list_op() : max(0), expired_only(true) {}
+
+ void encode(ceph::buffer::list& bl) const {
+ ENCODE_START(2, 1, bl);
+ encode(marker, bl);
+ encode(max, bl);
+ encode(expired_only, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(ceph::buffer::list::const_iterator& bl) {
+ DECODE_START(2, bl);
+ decode(marker, bl);
+ decode(max, bl);
+ if (struct_v >= 2) {
+ decode(expired_only, bl);
+ }
+ DECODE_FINISH(bl);
+ }
+
+ void dump(ceph::Formatter *f) const;
+ static void generate_test_instances(std::list<cls_rgw_gc_list_op*>& ls);
+};
+WRITE_CLASS_ENCODER(cls_rgw_gc_list_op)
+
+struct cls_rgw_gc_list_ret {
+ std::list<cls_rgw_gc_obj_info> entries;
+ std::string next_marker;
+ bool truncated;
+
+ cls_rgw_gc_list_ret() : truncated(false) {}
+
+ void encode(ceph::buffer::list& bl) const {
+ ENCODE_START(2, 1, bl);
+ encode(entries, bl);
+ encode(next_marker, bl);
+ encode(truncated, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(ceph::buffer::list::const_iterator& bl) {
+ DECODE_START(2, bl);
+ decode(entries, bl);
+ if (struct_v >= 2)
+ decode(next_marker, bl);
+ decode(truncated, bl);
+ DECODE_FINISH(bl);
+ }
+
+ void dump(ceph::Formatter *f) const;
+ static void generate_test_instances(std::list<cls_rgw_gc_list_ret*>& ls);
+};
+WRITE_CLASS_ENCODER(cls_rgw_gc_list_ret)
+
+struct cls_rgw_gc_remove_op {
+ std::vector<std::string> tags;
+
+ cls_rgw_gc_remove_op() {}
+
+ void encode(ceph::buffer::list& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(tags, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(ceph::buffer::list::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(tags, bl);
+ DECODE_FINISH(bl);
+ }
+
+ void dump(ceph::Formatter *f) const;
+ static void generate_test_instances(std::list<cls_rgw_gc_remove_op*>& ls);
+};
+WRITE_CLASS_ENCODER(cls_rgw_gc_remove_op)
+
+struct cls_rgw_bi_log_list_op {
+ std::string marker;
+ uint32_t max;
+
+ cls_rgw_bi_log_list_op() : max(0) {}
+
+ void encode(ceph::buffer::list& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(marker, bl);
+ encode(max, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(ceph::buffer::list::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(marker, bl);
+ decode(max, bl);
+ DECODE_FINISH(bl);
+ }
+
+ void dump(ceph::Formatter *f) const;
+ static void generate_test_instances(std::list<cls_rgw_bi_log_list_op*>& ls);
+};
+WRITE_CLASS_ENCODER(cls_rgw_bi_log_list_op)
+
+struct cls_rgw_bi_log_trim_op {
+ std::string start_marker;
+ std::string end_marker;
+
+ cls_rgw_bi_log_trim_op() {}
+
+ void encode(ceph::buffer::list& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(start_marker, bl);
+ encode(end_marker, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(ceph::buffer::list::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(start_marker, bl);
+ decode(end_marker, bl);
+ DECODE_FINISH(bl);
+ }
+
+ void dump(ceph::Formatter *f) const;
+ static void generate_test_instances(std::list<cls_rgw_bi_log_trim_op*>& ls);
+};
+WRITE_CLASS_ENCODER(cls_rgw_bi_log_trim_op)
+
+struct cls_rgw_bi_log_list_ret {
+ std::list<rgw_bi_log_entry> entries;
+ bool truncated;
+
+ cls_rgw_bi_log_list_ret() : truncated(false) {}
+
+ void encode(ceph::buffer::list& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(entries, bl);
+ encode(truncated, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(ceph::buffer::list::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(entries, bl);
+ decode(truncated, bl);
+ DECODE_FINISH(bl);
+ }
+
+ void dump(ceph::Formatter *f) const;
+ static void generate_test_instances(std::list<cls_rgw_bi_log_list_ret*>& ls);
+};
+WRITE_CLASS_ENCODER(cls_rgw_bi_log_list_ret)
+
+struct cls_rgw_lc_get_next_entry_op {
+ std::string marker;
+ cls_rgw_lc_get_next_entry_op() {}
+
+ void encode(ceph::buffer::list& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(marker, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(ceph::buffer::list::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(marker, bl);
+ DECODE_FINISH(bl);
+ }
+};
+WRITE_CLASS_ENCODER(cls_rgw_lc_get_next_entry_op)
+
+struct cls_rgw_lc_get_next_entry_ret {
+ cls_rgw_lc_entry entry;
+
+ cls_rgw_lc_get_next_entry_ret() {}
+
+ void encode(ceph::buffer::list& bl) const {
+ ENCODE_START(2, 2, bl);
+ encode(entry, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(ceph::buffer::list::const_iterator& bl) {
+ DECODE_START(2, bl);
+ if (struct_v < 2) {
+ std::pair<std::string, int> oe;
+ decode(oe, bl);
+ entry = {oe.first, 0 /* start */, uint32_t(oe.second)};
+ } else {
+ decode(entry, bl);
+ }
+ DECODE_FINISH(bl);
+ }
+
+};
+WRITE_CLASS_ENCODER(cls_rgw_lc_get_next_entry_ret)
+
+struct cls_rgw_lc_get_entry_op {
+ std::string marker;
+ cls_rgw_lc_get_entry_op() {}
+ cls_rgw_lc_get_entry_op(const std::string& _marker) : marker(_marker) {}
+
+ void encode(ceph::buffer::list& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(marker, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(ceph::buffer::list::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(marker, bl);
+ DECODE_FINISH(bl);
+ }
+};
+WRITE_CLASS_ENCODER(cls_rgw_lc_get_entry_op)
+
+struct cls_rgw_lc_get_entry_ret {
+ cls_rgw_lc_entry entry;
+
+ cls_rgw_lc_get_entry_ret() {}
+ cls_rgw_lc_get_entry_ret(cls_rgw_lc_entry&& _entry)
+ : entry(std::move(_entry)) {}
+
+ void encode(ceph::buffer::list& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(entry, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(ceph::buffer::list::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(entry, bl);
+ DECODE_FINISH(bl);
+ }
+
+};
+WRITE_CLASS_ENCODER(cls_rgw_lc_get_entry_ret)
+
+struct cls_rgw_lc_rm_entry_op {
+ cls_rgw_lc_entry entry;
+ cls_rgw_lc_rm_entry_op() {}
+
+ void encode(ceph::buffer::list& bl) const {
+ ENCODE_START(2, 2, bl);
+ encode(entry, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(ceph::buffer::list::const_iterator& bl) {
+ DECODE_START(2, bl);
+ if (struct_v < 2) {
+ std::pair<std::string, int> oe;
+ decode(oe, bl);
+ entry = {oe.first, 0 /* start */, uint32_t(oe.second)};
+ } else {
+ decode(entry, bl);
+ }
+ DECODE_FINISH(bl);
+ }
+};
+WRITE_CLASS_ENCODER(cls_rgw_lc_rm_entry_op)
+
+struct cls_rgw_lc_set_entry_op {
+ cls_rgw_lc_entry entry;
+ cls_rgw_lc_set_entry_op() {}
+
+ void encode(ceph::buffer::list& bl) const {
+ ENCODE_START(2, 2, bl);
+ encode(entry, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(ceph::buffer::list::const_iterator& bl) {
+ DECODE_START(2, bl);
+ if (struct_v < 2) {
+ std::pair<std::string, int> oe;
+ decode(oe, bl);
+ entry = {oe.first, 0 /* start */, uint32_t(oe.second)};
+ } else {
+ decode(entry, bl);
+ }
+ DECODE_FINISH(bl);
+ }
+};
+WRITE_CLASS_ENCODER(cls_rgw_lc_set_entry_op)
+
+struct cls_rgw_lc_put_head_op {
+ cls_rgw_lc_obj_head head;
+
+
+ cls_rgw_lc_put_head_op() {}
+
+ void encode(ceph::buffer::list& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(head, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(ceph::buffer::list::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(head, bl);
+ DECODE_FINISH(bl);
+ }
+
+};
+WRITE_CLASS_ENCODER(cls_rgw_lc_put_head_op)
+
+struct cls_rgw_lc_get_head_ret {
+ cls_rgw_lc_obj_head head;
+
+ cls_rgw_lc_get_head_ret() {}
+
+ void encode(ceph::buffer::list& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(head, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(ceph::buffer::list::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(head, bl);
+ DECODE_FINISH(bl);
+ }
+
+};
+WRITE_CLASS_ENCODER(cls_rgw_lc_get_head_ret)
+
+struct cls_rgw_lc_list_entries_op {
+ std::string marker;
+ uint32_t max_entries = 0;
+ uint8_t compat_v{0};
+
+ cls_rgw_lc_list_entries_op() {}
+
+ void encode(ceph::buffer::list& bl) const {
+ ENCODE_START(3, 1, bl);
+ encode(marker, bl);
+ encode(max_entries, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(ceph::buffer::list::const_iterator& bl) {
+ DECODE_START(3, bl);
+ compat_v = struct_v;
+ decode(marker, bl);
+ decode(max_entries, bl);
+ DECODE_FINISH(bl);
+ }
+
+};
+WRITE_CLASS_ENCODER(cls_rgw_lc_list_entries_op)
+
+struct cls_rgw_lc_list_entries_ret {
+ vector<cls_rgw_lc_entry> entries;
+ bool is_truncated{false};
+ uint8_t compat_v;
+
+cls_rgw_lc_list_entries_ret(uint8_t compat_v = 3)
+ : compat_v(compat_v) {}
+
+ void encode(ceph::buffer::list& bl) const {
+ ENCODE_START(compat_v, 1, bl);
+ if (compat_v <= 2) {
+ map<string, int> oes;
+ std::for_each(entries.begin(), entries.end(),
+ [&oes](const cls_rgw_lc_entry& elt)
+ {oes.insert({elt.bucket, elt.status});});
+ encode(oes, bl);
+ } else {
+ encode(entries, bl);
+ }
+ encode(is_truncated, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(ceph::buffer::list::const_iterator& bl) {
+ DECODE_START(3, bl);
+ compat_v = struct_v;
+ if (struct_v <= 2) {
+ map<string, int> oes;
+ decode(oes, bl);
+ std::for_each(oes.begin(), oes.end(),
+ [this](const std::pair<string, int>& oe)
+ {entries.push_back({oe.first, 0 /* start */,
+ uint32_t(oe.second)});});
+ } else {
+ decode(entries, bl);
+ }
+ if (struct_v >= 2) {
+ decode(is_truncated, bl);
+ }
+ DECODE_FINISH(bl);
+ }
+};
+WRITE_CLASS_ENCODER(cls_rgw_lc_list_entries_ret)
+
+struct cls_rgw_reshard_add_op {
+ cls_rgw_reshard_entry entry;
+
+ cls_rgw_reshard_add_op() {}
+
+ void encode(ceph::buffer::list& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(entry, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(ceph::buffer::list::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(entry, bl);
+ DECODE_FINISH(bl);
+ }
+ static void generate_test_instances(std::list<cls_rgw_reshard_add_op*>& o);
+ void dump(ceph::Formatter *f) const;
+};
+WRITE_CLASS_ENCODER(cls_rgw_reshard_add_op)
+
+struct cls_rgw_reshard_list_op {
+ uint32_t max{0};
+ std::string marker;
+
+ cls_rgw_reshard_list_op() {}
+
+ void encode(ceph::buffer::list& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(max, bl);
+ encode(marker, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(ceph::buffer::list::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(max, bl);
+ decode(marker, bl);
+ DECODE_FINISH(bl);
+ }
+ static void generate_test_instances(std::list<cls_rgw_reshard_list_op*>& o);
+ void dump(ceph::Formatter *f) const;
+};
+WRITE_CLASS_ENCODER(cls_rgw_reshard_list_op)
+
+
+struct cls_rgw_reshard_list_ret {
+ std::list<cls_rgw_reshard_entry> entries;
+ bool is_truncated{false};
+
+ cls_rgw_reshard_list_ret() {}
+
+ void encode(ceph::buffer::list& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(entries, bl);
+ encode(is_truncated, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(ceph::buffer::list::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(entries, bl);
+ decode(is_truncated, bl);
+ DECODE_FINISH(bl);
+ }
+ static void generate_test_instances(std::list<cls_rgw_reshard_list_ret*>& o);
+ void dump(ceph::Formatter *f) const;
+};
+WRITE_CLASS_ENCODER(cls_rgw_reshard_list_ret)
+
+struct cls_rgw_reshard_get_op {
+ cls_rgw_reshard_entry entry;
+
+ cls_rgw_reshard_get_op() {}
+
+ void encode(ceph::buffer::list& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(entry, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(ceph::buffer::list::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(entry, bl);
+ DECODE_FINISH(bl);
+ }
+ static void generate_test_instances(std::list<cls_rgw_reshard_get_op*>& o);
+ void dump(ceph::Formatter *f) const;
+};
+WRITE_CLASS_ENCODER(cls_rgw_reshard_get_op)
+
+struct cls_rgw_reshard_get_ret {
+ cls_rgw_reshard_entry entry;
+
+ cls_rgw_reshard_get_ret() {}
+
+ void encode(ceph::buffer::list& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(entry, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(ceph::buffer::list::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(entry, bl);
+ DECODE_FINISH(bl);
+ }
+ static void generate_test_instances(std::list<cls_rgw_reshard_get_ret*>& o);
+ void dump(ceph::Formatter *f) const;
+};
+WRITE_CLASS_ENCODER(cls_rgw_reshard_get_ret)
+
+struct cls_rgw_reshard_remove_op {
+ std::string tenant;
+ std::string bucket_name;
+ std::string bucket_id;
+
+ cls_rgw_reshard_remove_op() {}
+
+ void encode(ceph::buffer::list& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(tenant, bl);
+ encode(bucket_name, bl);
+ encode(bucket_id, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(ceph::buffer::list::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(tenant, bl);
+ decode(bucket_name, bl);
+ decode(bucket_id, bl);
+ DECODE_FINISH(bl);
+ }
+ static void generate_test_instances(std::list<cls_rgw_reshard_remove_op*>& o);
+ void dump(ceph::Formatter *f) const;
+};
+WRITE_CLASS_ENCODER(cls_rgw_reshard_remove_op)
+
+struct cls_rgw_set_bucket_resharding_op {
+ cls_rgw_bucket_instance_entry entry;
+
+ void encode(ceph::buffer::list& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(entry, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(ceph::buffer::list::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(entry, bl);
+ DECODE_FINISH(bl);
+ }
+ static void generate_test_instances(std::list<cls_rgw_set_bucket_resharding_op*>& o);
+ void dump(ceph::Formatter *f) const;
+};
+WRITE_CLASS_ENCODER(cls_rgw_set_bucket_resharding_op)
+
+struct cls_rgw_clear_bucket_resharding_op {
+ void encode(ceph::buffer::list& bl) const {
+ ENCODE_START(1, 1, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(ceph::buffer::list::const_iterator& bl) {
+ DECODE_START(1, bl);
+ DECODE_FINISH(bl);
+ }
+ static void generate_test_instances(std::list<cls_rgw_clear_bucket_resharding_op*>& o);
+ void dump(ceph::Formatter *f) const;
+};
+WRITE_CLASS_ENCODER(cls_rgw_clear_bucket_resharding_op)
+
+struct cls_rgw_guard_bucket_resharding_op {
+ int ret_err{0};
+
+ void encode(ceph::buffer::list& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(ret_err, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(ceph::buffer::list::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(ret_err, bl);
+ DECODE_FINISH(bl);
+ }
+
+ static void generate_test_instances(std::list<cls_rgw_guard_bucket_resharding_op*>& o);
+ void dump(ceph::Formatter *f) const;
+};
+WRITE_CLASS_ENCODER(cls_rgw_guard_bucket_resharding_op)
+
+struct cls_rgw_get_bucket_resharding_op {
+
+ void encode(ceph::buffer::list& bl) const {
+ ENCODE_START(1, 1, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(ceph::buffer::list::const_iterator& bl) {
+ DECODE_START(1, bl);
+ DECODE_FINISH(bl);
+ }
+
+ static void generate_test_instances(std::list<cls_rgw_get_bucket_resharding_op*>& o);
+ void dump(ceph::Formatter *f) const;
+};
+WRITE_CLASS_ENCODER(cls_rgw_get_bucket_resharding_op)
+
+struct cls_rgw_get_bucket_resharding_ret {
+ cls_rgw_bucket_instance_entry new_instance;
+
+ void encode(ceph::buffer::list& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(new_instance, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(ceph::buffer::list::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(new_instance, bl);
+ DECODE_FINISH(bl);
+ }
+
+ static void generate_test_instances(std::list<cls_rgw_get_bucket_resharding_ret*>& o);
+ void dump(ceph::Formatter *f) const;
+};
+WRITE_CLASS_ENCODER(cls_rgw_get_bucket_resharding_ret)
+
+#endif /* CEPH_CLS_RGW_OPS_H */
diff --git a/src/cls/rgw/cls_rgw_types.cc b/src/cls/rgw/cls_rgw_types.cc
new file mode 100644
index 000000000..4a982eccb
--- /dev/null
+++ b/src/cls/rgw/cls_rgw_types.cc
@@ -0,0 +1,784 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#include "cls/rgw/cls_rgw_types.h"
+#include "common/ceph_json.h"
+#include "include/utime.h"
+
+using std::list;
+using std::string;
+
+using ceph::bufferlist;
+using ceph::Formatter;
+
+void rgw_zone_set_entry::from_str(const string& s)
+{
+ auto pos = s.find(':');
+ if (pos == string::npos) {
+ zone = s;
+ location_key.reset();
+ } else {
+ zone = s.substr(0, pos);
+ location_key = s.substr(pos + 1);
+ }
+}
+
+string rgw_zone_set_entry::to_str() const
+{
+ string s = zone;
+ if (location_key) {
+ s = s + ":" + *location_key;
+ }
+ return s;
+}
+
+void rgw_zone_set_entry::encode(bufferlist &bl) const
+{
+ /* no ENCODE_START, ENCODE_END for backward compatibility */
+ ceph::encode(to_str(), bl);
+}
+
+void rgw_zone_set_entry::decode(bufferlist::const_iterator &bl)
+{
+ /* no DECODE_START, DECODE_END for backward compatibility */
+ string s;
+ ceph::decode(s, bl);
+ from_str(s);
+}
+
+void rgw_zone_set_entry::dump(Formatter *f) const
+{
+ encode_json("entry", to_str(), f);
+}
+
+void rgw_zone_set_entry::decode_json(JSONObj *obj) {
+ string s;
+ JSONDecoder::decode_json("entry", s, obj);
+ from_str(s);
+}
+
+void rgw_zone_set::insert(const string& zone, std::optional<string> location_key)
+{
+ entries.insert(rgw_zone_set_entry(zone, location_key));
+}
+
+bool rgw_zone_set::exists(const string& zone, std::optional<string> location_key) const
+{
+ return entries.find(rgw_zone_set_entry(zone, location_key)) != entries.end();
+}
+
+void encode_json(const char *name, const rgw_zone_set& zs, ceph::Formatter *f)
+{
+ encode_json(name, zs.entries, f);
+}
+
+void decode_json_obj(rgw_zone_set& zs, JSONObj *obj)
+{
+ decode_json_obj(zs.entries, obj);
+}
+
+void rgw_bucket_pending_info::generate_test_instances(list<rgw_bucket_pending_info*>& o)
+{
+ rgw_bucket_pending_info *i = new rgw_bucket_pending_info;
+ i->state = CLS_RGW_STATE_COMPLETE;
+ i->op = CLS_RGW_OP_DEL;
+ o.push_back(i);
+ o.push_back(new rgw_bucket_pending_info);
+}
+
+void rgw_bucket_pending_info::dump(Formatter *f) const
+{
+ encode_json("state", (int)state, f);
+ utime_t ut(timestamp);
+ encode_json("timestamp", ut, f);
+ encode_json("op", (int)op, f);
+}
+
+void rgw_bucket_pending_info::decode_json(JSONObj *obj) {
+ int val;
+ JSONDecoder::decode_json("state", val, obj);
+ state = (RGWPendingState)val;
+ utime_t ut(timestamp);
+ JSONDecoder::decode_json("timestamp", ut, obj);
+ JSONDecoder::decode_json("op", val, obj);
+ op = (uint8_t)val;
+}
+
+void cls_rgw_obj_key::decode_json(JSONObj *obj) {
+ JSONDecoder::decode_json("name", name, obj);
+ JSONDecoder::decode_json("instance", instance, obj);
+}
+
+void rgw_bucket_dir_entry_meta::generate_test_instances(list<rgw_bucket_dir_entry_meta*>& o)
+{
+ rgw_bucket_dir_entry_meta *m = new rgw_bucket_dir_entry_meta;
+ m->category = RGWObjCategory::Main;
+ m->size = 100;
+ m->etag = "etag";
+ m->owner = "owner";
+ m->owner_display_name = "display name";
+ m->content_type = "content/type";
+ o.push_back(m);
+ o.push_back(new rgw_bucket_dir_entry_meta);
+}
+
+void rgw_bucket_dir_entry_meta::dump(Formatter *f) const
+{
+ encode_json("category", (int)category, f);
+ encode_json("size", size, f);
+ utime_t ut(mtime);
+ encode_json("mtime", ut, f);
+ encode_json("etag", etag, f);
+ encode_json("storage_class", storage_class, f);
+ encode_json("owner", owner, f);
+ encode_json("owner_display_name", owner_display_name, f);
+ encode_json("content_type", content_type, f);
+ encode_json("accounted_size", accounted_size, f);
+ encode_json("user_data", user_data, f);
+ encode_json("appendable", appendable, f);
+}
+
+void rgw_bucket_dir_entry_meta::decode_json(JSONObj *obj) {
+ int val;
+ JSONDecoder::decode_json("category", val, obj);
+ category = static_cast<RGWObjCategory>(val);
+ JSONDecoder::decode_json("size", size, obj);
+ utime_t ut;
+ JSONDecoder::decode_json("mtime", ut, obj);
+ mtime = ut.to_real_time();
+ JSONDecoder::decode_json("etag", etag, obj);
+ JSONDecoder::decode_json("storage_class", storage_class, obj);
+ JSONDecoder::decode_json("owner", owner, obj);
+ JSONDecoder::decode_json("owner_display_name", owner_display_name, obj);
+ JSONDecoder::decode_json("content_type", content_type, obj);
+ JSONDecoder::decode_json("accounted_size", accounted_size, obj);
+ JSONDecoder::decode_json("user_data", user_data, obj);
+ JSONDecoder::decode_json("appendable", appendable, obj);
+}
+
+void rgw_bucket_dir_entry::generate_test_instances(list<rgw_bucket_dir_entry*>& o)
+{
+ list<rgw_bucket_dir_entry_meta *> l;
+ rgw_bucket_dir_entry_meta::generate_test_instances(l);
+
+ for (auto iter = l.begin(); iter != l.end(); ++iter) {
+ rgw_bucket_dir_entry_meta *m = *iter;
+ rgw_bucket_dir_entry *e = new rgw_bucket_dir_entry;
+ e->key.name = "name";
+ e->ver.pool = 1;
+ e->ver.epoch = 1234;
+ e->locator = "locator";
+ e->exists = true;
+ e->meta = *m;
+ e->tag = "tag";
+
+ o.push_back(e);
+
+ delete m;
+ }
+ o.push_back(new rgw_bucket_dir_entry);
+}
+
+void rgw_bucket_entry_ver::dump(Formatter *f) const
+{
+ encode_json("pool", pool, f);
+ encode_json("epoch", epoch, f);
+}
+
+void rgw_bucket_entry_ver::decode_json(JSONObj *obj) {
+ JSONDecoder::decode_json("pool", pool, obj);
+ JSONDecoder::decode_json("epoch", epoch, obj);
+}
+
+void rgw_bucket_entry_ver::generate_test_instances(list<rgw_bucket_entry_ver*>& ls)
+{
+ ls.push_back(new rgw_bucket_entry_ver);
+ ls.push_back(new rgw_bucket_entry_ver);
+ ls.back()->pool = 123;
+ ls.back()->epoch = 12322;
+}
+
+
+void rgw_bucket_dir_entry::dump(Formatter *f) const
+{
+ encode_json("name", key.name, f);
+ encode_json("instance", key.instance , f);
+ encode_json("ver", ver , f);
+ encode_json("locator", locator , f);
+ encode_json("exists", exists , f);
+ encode_json("meta", meta , f);
+ encode_json("tag", tag , f);
+ encode_json("flags", (int)flags , f);
+ encode_json("pending_map", pending_map, f);
+ encode_json("versioned_epoch", versioned_epoch , f);
+}
+
+void rgw_bucket_dir_entry::decode_json(JSONObj *obj) {
+ JSONDecoder::decode_json("name", key.name, obj);
+ JSONDecoder::decode_json("instance", key.instance , obj);
+ JSONDecoder::decode_json("ver", ver , obj);
+ JSONDecoder::decode_json("locator", locator , obj);
+ JSONDecoder::decode_json("exists", exists , obj);
+ JSONDecoder::decode_json("meta", meta , obj);
+ JSONDecoder::decode_json("tag", tag , obj);
+ int val;
+ JSONDecoder::decode_json("flags", val , obj);
+ flags = (uint16_t)val;
+ JSONDecoder::decode_json("pending_map", pending_map, obj);
+ JSONDecoder::decode_json("versioned_epoch", versioned_epoch, obj);
+}
+
+static void dump_bi_entry(bufferlist bl, BIIndexType index_type, Formatter *formatter)
+{
+ auto iter = bl.cbegin();
+ switch (index_type) {
+ case BIIndexType::Plain:
+ case BIIndexType::Instance:
+ {
+ rgw_bucket_dir_entry entry;
+ decode(entry, iter);
+ encode_json("entry", entry, formatter);
+ }
+ break;
+ case BIIndexType::OLH:
+ {
+ rgw_bucket_olh_entry entry;
+ decode(entry, iter);
+ encode_json("entry", entry, formatter);
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+void rgw_cls_bi_entry::decode_json(JSONObj *obj, cls_rgw_obj_key *effective_key) {
+ JSONDecoder::decode_json("idx", idx, obj);
+ string s;
+ JSONDecoder::decode_json("type", s, obj);
+ if (s == "plain") {
+ type = BIIndexType::Plain;
+ } else if (s == "instance") {
+ type = BIIndexType::Instance;
+ } else if (s == "olh") {
+ type = BIIndexType::OLH;
+ } else {
+ type = BIIndexType::Invalid;
+ }
+ using ceph::encode;
+ switch (type) {
+ case BIIndexType::Plain:
+ case BIIndexType::Instance:
+ {
+ rgw_bucket_dir_entry entry;
+ JSONDecoder::decode_json("entry", entry, obj);
+ encode(entry, data);
+
+ if (effective_key) {
+ *effective_key = entry.key;
+ }
+ }
+ break;
+ case BIIndexType::OLH:
+ {
+ rgw_bucket_olh_entry entry;
+ JSONDecoder::decode_json("entry", entry, obj);
+ encode(entry, data);
+
+ if (effective_key) {
+ *effective_key = entry.key;
+ }
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+void rgw_cls_bi_entry::dump(Formatter *f) const
+{
+ string type_str;
+ switch (type) {
+ case BIIndexType::Plain:
+ type_str = "plain";
+ break;
+ case BIIndexType::Instance:
+ type_str = "instance";
+ break;
+ case BIIndexType::OLH:
+ type_str = "olh";
+ break;
+ default:
+ type_str = "invalid";
+ }
+ encode_json("type", type_str, f);
+ encode_json("idx", idx, f);
+ dump_bi_entry(data, type, f);
+}
+
+bool rgw_cls_bi_entry::get_info(cls_rgw_obj_key *key,
+ RGWObjCategory *category,
+ rgw_bucket_category_stats *accounted_stats)
+{
+ bool account = false;
+ auto iter = data.cbegin();
+ using ceph::decode;
+ switch (type) {
+ case BIIndexType::Plain:
+ account = true;
+ // NO BREAK; falls through to case InstanceIdx:
+ case BIIndexType::Instance:
+ {
+ rgw_bucket_dir_entry entry;
+ decode(entry, iter);
+ account = (account && entry.exists);
+ *key = entry.key;
+ *category = entry.meta.category;
+ accounted_stats->num_entries++;
+ accounted_stats->total_size += entry.meta.accounted_size;
+ accounted_stats->total_size_rounded += cls_rgw_get_rounded_size(entry.meta.accounted_size);
+ accounted_stats->actual_size += entry.meta.size;
+ }
+ break;
+ case BIIndexType::OLH:
+ {
+ rgw_bucket_olh_entry entry;
+ decode(entry, iter);
+ *key = entry.key;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return account;
+}
+
+void rgw_bucket_olh_entry::dump(Formatter *f) const
+{
+ encode_json("key", key, f);
+ encode_json("delete_marker", delete_marker, f);
+ encode_json("epoch", epoch, f);
+ encode_json("pending_log", pending_log, f);
+ encode_json("tag", tag, f);
+ encode_json("exists", exists, f);
+ encode_json("pending_removal", pending_removal, f);
+}
+
+void rgw_bucket_olh_entry::decode_json(JSONObj *obj)
+{
+ JSONDecoder::decode_json("key", key, obj);
+ JSONDecoder::decode_json("delete_marker", delete_marker, obj);
+ JSONDecoder::decode_json("epoch", epoch, obj);
+ JSONDecoder::decode_json("pending_log", pending_log, obj);
+ JSONDecoder::decode_json("tag", tag, obj);
+ JSONDecoder::decode_json("exists", exists, obj);
+ JSONDecoder::decode_json("pending_removal", pending_removal, obj);
+}
+
+void rgw_bucket_olh_log_entry::generate_test_instances(list<rgw_bucket_olh_log_entry*>& o)
+{
+ rgw_bucket_olh_log_entry *entry = new rgw_bucket_olh_log_entry;
+ entry->epoch = 1234;
+ entry->op = CLS_RGW_OLH_OP_LINK_OLH;
+ entry->op_tag = "op_tag";
+ entry->key.name = "key.name";
+ entry->key.instance = "key.instance";
+ entry->delete_marker = true;
+ o.push_back(entry);
+ o.push_back(new rgw_bucket_olh_log_entry);
+}
+
+void rgw_bucket_olh_log_entry::dump(Formatter *f) const
+{
+ encode_json("epoch", epoch, f);
+ const char *op_str;
+ switch (op) {
+ case CLS_RGW_OLH_OP_LINK_OLH:
+ op_str = "link_olh";
+ break;
+ case CLS_RGW_OLH_OP_UNLINK_OLH:
+ op_str = "unlink_olh";
+ break;
+ case CLS_RGW_OLH_OP_REMOVE_INSTANCE:
+ op_str = "remove_instance";
+ break;
+ default:
+ op_str = "unknown";
+ }
+ encode_json("op", op_str, f);
+ encode_json("op_tag", op_tag, f);
+ encode_json("key", key, f);
+ encode_json("delete_marker", delete_marker, f);
+}
+
+void rgw_bucket_olh_log_entry::decode_json(JSONObj *obj)
+{
+ JSONDecoder::decode_json("epoch", epoch, obj);
+ string op_str;
+ JSONDecoder::decode_json("op", op_str, obj);
+ if (op_str == "link_olh") {
+ op = CLS_RGW_OLH_OP_LINK_OLH;
+ } else if (op_str == "unlink_olh") {
+ op = CLS_RGW_OLH_OP_UNLINK_OLH;
+ } else if (op_str == "remove_instance") {
+ op = CLS_RGW_OLH_OP_REMOVE_INSTANCE;
+ } else {
+ op = CLS_RGW_OLH_OP_UNKNOWN;
+ }
+ JSONDecoder::decode_json("op_tag", op_tag, obj);
+ JSONDecoder::decode_json("key", key, obj);
+ JSONDecoder::decode_json("delete_marker", delete_marker, obj);
+}
+void rgw_bi_log_entry::decode_json(JSONObj *obj)
+{
+ JSONDecoder::decode_json("op_id", id, obj);
+ JSONDecoder::decode_json("op_tag", tag, obj);
+ string op_str;
+ JSONDecoder::decode_json("op", op_str, obj);
+ if (op_str == "write") {
+ op = CLS_RGW_OP_ADD;
+ } else if (op_str == "del") {
+ op = CLS_RGW_OP_DEL;
+ } else if (op_str == "cancel") {
+ op = CLS_RGW_OP_CANCEL;
+ } else if (op_str == "unknown") {
+ op = CLS_RGW_OP_UNKNOWN;
+ } else if (op_str == "link_olh") {
+ op = CLS_RGW_OP_LINK_OLH;
+ } else if (op_str == "link_olh_del") {
+ op = CLS_RGW_OP_LINK_OLH_DM;
+ } else if (op_str == "unlink_instance") {
+ op = CLS_RGW_OP_UNLINK_INSTANCE;
+ } else if (op_str == "syncstop") {
+ op = CLS_RGW_OP_SYNCSTOP;
+ } else if (op_str == "resync") {
+ op = CLS_RGW_OP_RESYNC;
+ } else {
+ op = CLS_RGW_OP_UNKNOWN;
+ }
+ JSONDecoder::decode_json("object", object, obj);
+ JSONDecoder::decode_json("instance", instance, obj);
+ string state_str;
+ JSONDecoder::decode_json("state", state_str, obj);
+ if (state_str == "pending") {
+ state = CLS_RGW_STATE_PENDING_MODIFY;
+ } else if (state_str == "complete") {
+ state = CLS_RGW_STATE_COMPLETE;
+ } else {
+ state = CLS_RGW_STATE_UNKNOWN;
+ }
+ JSONDecoder::decode_json("index_ver", index_ver, obj);
+ utime_t ut;
+ JSONDecoder::decode_json("timestamp", ut, obj);
+ timestamp = ut.to_real_time();
+ uint32_t f;
+ JSONDecoder::decode_json("bilog_flags", f, obj);
+ JSONDecoder::decode_json("ver", ver, obj);
+ bilog_flags = (uint16_t)f;
+ JSONDecoder::decode_json("owner", owner, obj);
+ JSONDecoder::decode_json("owner_display_name", owner_display_name, obj);
+ JSONDecoder::decode_json("zones_trace", zones_trace, obj);
+}
+
+void rgw_bi_log_entry::dump(Formatter *f) const
+{
+ f->dump_string("op_id", id);
+ f->dump_string("op_tag", tag);
+ switch (op) {
+ case CLS_RGW_OP_ADD:
+ f->dump_string("op", "write");
+ break;
+ case CLS_RGW_OP_DEL:
+ f->dump_string("op", "del");
+ break;
+ case CLS_RGW_OP_CANCEL:
+ f->dump_string("op", "cancel");
+ break;
+ case CLS_RGW_OP_UNKNOWN:
+ f->dump_string("op", "unknown");
+ break;
+ case CLS_RGW_OP_LINK_OLH:
+ f->dump_string("op", "link_olh");
+ break;
+ case CLS_RGW_OP_LINK_OLH_DM:
+ f->dump_string("op", "link_olh_del");
+ break;
+ case CLS_RGW_OP_UNLINK_INSTANCE:
+ f->dump_string("op", "unlink_instance");
+ break;
+ case CLS_RGW_OP_SYNCSTOP:
+ f->dump_string("op", "syncstop");
+ break;
+ case CLS_RGW_OP_RESYNC:
+ f->dump_string("op", "resync");
+ break;
+ default:
+ f->dump_string("op", "invalid");
+ break;
+ }
+
+ f->dump_string("object", object);
+ f->dump_string("instance", instance);
+
+ switch (state) {
+ case CLS_RGW_STATE_PENDING_MODIFY:
+ f->dump_string("state", "pending");
+ break;
+ case CLS_RGW_STATE_COMPLETE:
+ f->dump_string("state", "complete");
+ break;
+ default:
+ f->dump_string("state", "invalid");
+ break;
+ }
+
+ f->dump_int("index_ver", index_ver);
+ utime_t ut(timestamp);
+ ut.gmtime_nsec(f->dump_stream("timestamp"));
+ f->open_object_section("ver");
+ ver.dump(f);
+ f->close_section();
+ f->dump_int("bilog_flags", bilog_flags);
+ f->dump_bool("versioned", (bilog_flags & RGW_BILOG_FLAG_VERSIONED_OP) != 0);
+ f->dump_string("owner", owner);
+ f->dump_string("owner_display_name", owner_display_name);
+ encode_json("zones_trace", zones_trace, f);
+}
+
+void rgw_bi_log_entry::generate_test_instances(list<rgw_bi_log_entry*>& ls)
+{
+ ls.push_back(new rgw_bi_log_entry);
+ ls.push_back(new rgw_bi_log_entry);
+ ls.back()->id = "midf";
+ ls.back()->object = "obj";
+ ls.back()->timestamp = ceph::real_clock::from_ceph_timespec({init_le32(2), init_le32(3)});
+ ls.back()->index_ver = 4323;
+ ls.back()->tag = "tagasdfds";
+ ls.back()->op = CLS_RGW_OP_DEL;
+ ls.back()->state = CLS_RGW_STATE_PENDING_MODIFY;
+}
+
+void rgw_bucket_category_stats::generate_test_instances(list<rgw_bucket_category_stats*>& o)
+{
+ rgw_bucket_category_stats *s = new rgw_bucket_category_stats;
+ s->total_size = 1024;
+ s->total_size_rounded = 4096;
+ s->num_entries = 2;
+ s->actual_size = 1024;
+ o.push_back(s);
+ o.push_back(new rgw_bucket_category_stats);
+}
+
+void rgw_bucket_category_stats::dump(Formatter *f) const
+{
+ f->dump_unsigned("total_size", total_size);
+ f->dump_unsigned("total_size_rounded", total_size_rounded);
+ f->dump_unsigned("num_entries", num_entries);
+ f->dump_unsigned("actual_size", actual_size);
+}
+
+void rgw_bucket_dir_header::generate_test_instances(list<rgw_bucket_dir_header*>& o)
+{
+ list<rgw_bucket_category_stats *> l;
+ rgw_bucket_category_stats::generate_test_instances(l);
+
+ uint8_t i = 0;
+ for (auto iter = l.begin(); iter != l.end(); ++iter, ++i) {
+ RGWObjCategory c = static_cast<RGWObjCategory>(i);
+ rgw_bucket_dir_header *h = new rgw_bucket_dir_header;
+ rgw_bucket_category_stats *s = *iter;
+ h->stats[c] = *s;
+
+ o.push_back(h);
+
+ delete s;
+ }
+
+ o.push_back(new rgw_bucket_dir_header);
+}
+
+void rgw_bucket_dir_header::dump(Formatter *f) const
+{
+ f->dump_int("ver", ver);
+ f->dump_int("master_ver", master_ver);
+ f->open_array_section("stats");
+ for (auto iter = stats.begin(); iter != stats.end(); ++iter) {
+ f->dump_int("category", int(iter->first));
+ f->open_object_section("category_stats");
+ iter->second.dump(f);
+ f->close_section();
+ }
+ f->close_section();
+ ::encode_json("new_instance", new_instance, f);
+}
+
+void rgw_bucket_dir::generate_test_instances(list<rgw_bucket_dir*>& o)
+{
+ list<rgw_bucket_dir_header *> l;
+ rgw_bucket_dir_header::generate_test_instances(l);
+
+ uint8_t i = 0;
+ for (auto iter = l.begin(); iter != l.end(); ++iter, ++i) {
+ rgw_bucket_dir *d = new rgw_bucket_dir;
+ rgw_bucket_dir_header *h = *iter;
+ d->header = *h;
+
+ list<rgw_bucket_dir_entry *> el;
+ for (auto eiter = el.begin(); eiter != el.end(); ++eiter) {
+ rgw_bucket_dir_entry *e = *eiter;
+ d->m[e->key.name] = *e;
+
+ delete e;
+ }
+
+ o.push_back(d);
+
+ delete h;
+ }
+
+ o.push_back(new rgw_bucket_dir);
+}
+
+void rgw_bucket_dir::dump(Formatter *f) const
+{
+ f->open_object_section("header");
+ header.dump(f);
+ f->close_section();
+ auto iter = m.cbegin();
+ f->open_array_section("map");
+ for (; iter != m.cend(); ++iter) {
+ f->dump_string("key", iter->first);
+ f->open_object_section("dir_entry");
+ iter->second.dump(f);
+ f->close_section();
+ }
+ f->close_section();
+}
+
+void rgw_usage_log_entry::dump(Formatter *f) const
+{
+ f->dump_string("owner", owner.to_str());
+ f->dump_string("payer", payer.to_str());
+ f->dump_string("bucket", bucket);
+ f->dump_unsigned("epoch", epoch);
+
+ f->open_object_section("total_usage");
+ f->dump_unsigned("bytes_sent", total_usage.bytes_sent);
+ f->dump_unsigned("bytes_received", total_usage.bytes_received);
+ f->dump_unsigned("ops", total_usage.ops);
+ f->dump_unsigned("successful_ops", total_usage.successful_ops);
+ f->close_section();
+
+ f->open_array_section("categories");
+ if (usage_map.size() > 0) {
+ for (auto it = usage_map.begin(); it != usage_map.end(); it++) {
+ const rgw_usage_data& total_usage = it->second;
+ f->open_object_section("entry");
+ f->dump_string("category", it->first.c_str());
+ f->dump_unsigned("bytes_sent", total_usage.bytes_sent);
+ f->dump_unsigned("bytes_received", total_usage.bytes_received);
+ f->dump_unsigned("ops", total_usage.ops);
+ f->dump_unsigned("successful_ops", total_usage.successful_ops);
+ f->close_section();
+ }
+ }
+ f->close_section();
+}
+
+void rgw_usage_log_entry::generate_test_instances(list<rgw_usage_log_entry *> &o)
+{
+ rgw_usage_log_entry *entry = new rgw_usage_log_entry;
+ rgw_usage_data usage_data{1024, 2048};
+ entry->owner = rgw_user("owner");
+ entry->payer = rgw_user("payer");
+ entry->bucket = "bucket";
+ entry->epoch = 1234;
+ entry->total_usage.bytes_sent = usage_data.bytes_sent;
+ entry->total_usage.bytes_received = usage_data.bytes_received;
+ entry->total_usage.ops = usage_data.ops;
+ entry->total_usage.successful_ops = usage_data.successful_ops;
+ entry->usage_map["get_obj"] = usage_data;
+ o.push_back(entry);
+ o.push_back(new rgw_usage_log_entry);
+}
+
+void cls_rgw_reshard_entry::generate_key(const string& tenant, const string& bucket_name, string *key)
+{
+ *key = tenant + ":" + bucket_name;
+}
+
+void cls_rgw_reshard_entry::get_key(string *key) const
+{
+ generate_key(tenant, bucket_name, key);
+}
+
+void cls_rgw_reshard_entry::dump(Formatter *f) const
+{
+ utime_t ut(time);
+ encode_json("time",ut, f);
+ encode_json("tenant", tenant, f);
+ encode_json("bucket_name", bucket_name, f);
+ encode_json("bucket_id", bucket_id, f);
+ encode_json("new_instance_id", new_instance_id, f);
+ encode_json("old_num_shards", old_num_shards, f);
+ encode_json("tentative_new_num_shards", new_num_shards, f);
+}
+
+void cls_rgw_reshard_entry::generate_test_instances(list<cls_rgw_reshard_entry*>& ls)
+{
+ ls.push_back(new cls_rgw_reshard_entry);
+ ls.push_back(new cls_rgw_reshard_entry);
+ ls.back()->time = ceph::real_clock::from_ceph_timespec({init_le32(2), init_le32(3)});
+ ls.back()->tenant = "tenant";
+ ls.back()->bucket_name = "bucket1""";
+ ls.back()->bucket_id = "bucket_id";
+ ls.back()->new_instance_id = "new_instance_id";
+ ls.back()->old_num_shards = 8;
+ ls.back()->new_num_shards = 64;
+}
+
+void cls_rgw_bucket_instance_entry::dump(Formatter *f) const
+{
+ encode_json("reshard_status", to_string(reshard_status), f);
+ encode_json("new_bucket_instance_id", new_bucket_instance_id, f);
+ encode_json("num_shards", num_shards, f);
+
+}
+
+void cls_rgw_bucket_instance_entry::generate_test_instances(
+ list<cls_rgw_bucket_instance_entry*>& ls)
+{
+ ls.push_back(new cls_rgw_bucket_instance_entry);
+ ls.push_back(new cls_rgw_bucket_instance_entry);
+ ls.back()->reshard_status = RESHARD_STATUS::IN_PROGRESS;
+ ls.back()->new_bucket_instance_id = "new_instance_id";
+}
+
+void cls_rgw_lc_obj_head::dump(Formatter *f) const
+{
+ encode_json("start_date", start_date, f);
+ encode_json("marker", marker, f);
+}
+
+void cls_rgw_lc_obj_head::generate_test_instances(list<cls_rgw_lc_obj_head*>& ls)
+{
+}
+
+std::ostream& operator<<(std::ostream& out, cls_rgw_reshard_status status) {
+ switch (status) {
+ case cls_rgw_reshard_status::NOT_RESHARDING:
+ out << "NOT_RESHARDING";
+ break;
+ case cls_rgw_reshard_status::IN_PROGRESS:
+ out << "IN_PROGRESS";
+ break;
+ case cls_rgw_reshard_status::DONE:
+ out << "DONE";
+ break;
+ default:
+ out << "UNKNOWN_STATUS";
+ }
+
+ return out;
+}
diff --git a/src/cls/rgw/cls_rgw_types.h b/src/cls/rgw/cls_rgw_types.h
new file mode 100644
index 000000000..5b0155584
--- /dev/null
+++ b/src/cls/rgw/cls_rgw_types.h
@@ -0,0 +1,1347 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#pragma once
+
+#include <boost/container/flat_map.hpp>
+#include "common/ceph_time.h"
+#include "common/Formatter.h"
+
+#undef FMT_HEADER_ONLY
+#define FMT_HEADER_ONLY 1
+#include <fmt/format.h>
+
+#include "rgw/rgw_basic_types.h"
+
+#define CEPH_RGW_REMOVE 'r'
+#define CEPH_RGW_UPDATE 'u'
+#define CEPH_RGW_TAG_TIMEOUT 120
+#define CEPH_RGW_DIR_SUGGEST_LOG_OP 0x80
+#define CEPH_RGW_DIR_SUGGEST_OP_MASK 0x7f
+
+class JSONObj;
+
+using ceph::operator <<;
+
+struct rgw_zone_set_entry {
+ std::string zone;
+ std::optional<std::string> location_key;
+
+ bool operator<(const rgw_zone_set_entry& e) const {
+ if (zone < e.zone) {
+ return true;
+ }
+ if (zone > e.zone) {
+ return false;
+ }
+ return (location_key < e.location_key);
+ }
+
+ rgw_zone_set_entry() {}
+ rgw_zone_set_entry(const std::string& _zone,
+ std::optional<std::string> _location_key) : zone(_zone),
+ location_key(_location_key) {}
+ rgw_zone_set_entry(const std::string& s) {
+ from_str(s);
+ }
+
+ void from_str(const std::string& s);
+ std::string to_str() const;
+
+ void encode(ceph::buffer::list &bl) const;
+ void decode(ceph::buffer::list::const_iterator &bl);
+
+ void dump(ceph::Formatter *f) const;
+ void decode_json(JSONObj *obj);
+};
+WRITE_CLASS_ENCODER(rgw_zone_set_entry)
+
+struct rgw_zone_set {
+ std::set<rgw_zone_set_entry> entries;
+
+ void encode(ceph::buffer::list &bl) const {
+ /* no ENCODE_START, ENCODE_END for backward compatibility */
+ ceph::encode(entries, bl);
+ }
+ void decode(ceph::buffer::list::const_iterator &bl) {
+ /* no DECODE_START, DECODE_END for backward compatibility */
+ ceph::decode(entries, bl);
+ }
+
+ void insert(const std::string& zone, std::optional<std::string> location_key);
+ bool exists(const std::string& zone, std::optional<std::string> location_key) const;
+};
+WRITE_CLASS_ENCODER(rgw_zone_set)
+
+/* backward compatibility, rgw_zone_set needs to encode/decode the same as std::set */
+void encode_json(const char *name, const rgw_zone_set& zs, ceph::Formatter *f);
+void decode_json_obj(rgw_zone_set& zs, JSONObj *obj);
+
+
+enum RGWPendingState {
+ CLS_RGW_STATE_PENDING_MODIFY = 0,
+ CLS_RGW_STATE_COMPLETE = 1,
+ CLS_RGW_STATE_UNKNOWN = 2,
+};
+
+enum RGWModifyOp {
+ CLS_RGW_OP_ADD = 0,
+ CLS_RGW_OP_DEL = 1,
+ CLS_RGW_OP_CANCEL = 2,
+ CLS_RGW_OP_UNKNOWN = 3,
+ CLS_RGW_OP_LINK_OLH = 4,
+ CLS_RGW_OP_LINK_OLH_DM = 5, /* creation of delete marker */
+ CLS_RGW_OP_UNLINK_INSTANCE = 6,
+ CLS_RGW_OP_SYNCSTOP = 7,
+ CLS_RGW_OP_RESYNC = 8,
+};
+
+enum RGWBILogFlags {
+ RGW_BILOG_FLAG_VERSIONED_OP = 0x1,
+};
+
+enum RGWCheckMTimeType {
+ CLS_RGW_CHECK_TIME_MTIME_EQ = 0,
+ CLS_RGW_CHECK_TIME_MTIME_LT = 1,
+ CLS_RGW_CHECK_TIME_MTIME_LE = 2,
+ CLS_RGW_CHECK_TIME_MTIME_GT = 3,
+ CLS_RGW_CHECK_TIME_MTIME_GE = 4,
+};
+
+#define ROUND_BLOCK_SIZE 4096
+
+inline uint64_t cls_rgw_get_rounded_size(uint64_t size) {
+ return (size + ROUND_BLOCK_SIZE - 1) & ~(ROUND_BLOCK_SIZE - 1);
+}
+
+/*
+ * This takes a std::string that either wholly contains a delimiter or is a
+ * path that ends with a delimiter and appends a new character to the
+ * end such that when a we request bucket-index entries *after* this,
+ * we'll get the next object after the "subdirectory". This works
+ * because we append a '\xFF' charater, and no valid UTF-8 character
+ * can contain that byte, so no valid entries can be skipped.
+ */
+inline std::string cls_rgw_after_delim(const std::string& path) {
+ // assert: ! path.empty()
+ return path + '\xFF';
+}
+
+struct rgw_bucket_pending_info {
+ RGWPendingState state;
+ ceph::real_time timestamp;
+ uint8_t op;
+
+ rgw_bucket_pending_info() : state(CLS_RGW_STATE_PENDING_MODIFY), op(0) {}
+
+ void encode(ceph::buffer::list &bl) const {
+ ENCODE_START(2, 2, bl);
+ uint8_t s = (uint8_t)state;
+ encode(s, bl);
+ encode(timestamp, bl);
+ encode(op, bl);
+ ENCODE_FINISH(bl);
+ }
+ void decode(ceph::buffer::list::const_iterator &bl) {
+ DECODE_START_LEGACY_COMPAT_LEN(2, 2, 2, bl);
+ uint8_t s;
+ decode(s, bl);
+ state = (RGWPendingState)s;
+ decode(timestamp, bl);
+ decode(op, bl);
+ DECODE_FINISH(bl);
+ }
+ void dump(ceph::Formatter *f) const;
+ void decode_json(JSONObj *obj);
+ static void generate_test_instances(std::list<rgw_bucket_pending_info*>& o);
+};
+WRITE_CLASS_ENCODER(rgw_bucket_pending_info)
+
+
+// categories of objects stored in a bucket index (b-i) and used to
+// differentiate their associated statistics (bucket stats, and in
+// some cases user stats)
+enum class RGWObjCategory : uint8_t {
+ None = 0, // b-i entries for delete markers; also used in
+ // testing and for default values in default
+ // constructors
+
+ Main = 1, // b-i entries for standard objs
+
+ Shadow = 2, // presumfably intended for multipart shadow
+ // uploads; not currently used in the codebase
+
+ MultiMeta = 3, // b-i entries for multipart upload metadata objs
+};
+
+
+struct rgw_bucket_dir_entry_meta {
+ RGWObjCategory category;
+ uint64_t size;
+ ceph::real_time mtime;
+ std::string etag;
+ std::string owner;
+ std::string owner_display_name;
+ std::string content_type;
+ uint64_t accounted_size;
+ std::string user_data;
+ std::string storage_class;
+ bool appendable;
+
+ rgw_bucket_dir_entry_meta() :
+ category(RGWObjCategory::None), size(0), accounted_size(0), appendable(false) { }
+
+ void encode(ceph::buffer::list &bl) const {
+ ENCODE_START(7, 3, bl);
+ encode(category, bl);
+ encode(size, bl);
+ encode(mtime, bl);
+ encode(etag, bl);
+ encode(owner, bl);
+ encode(owner_display_name, bl);
+ encode(content_type, bl);
+ encode(accounted_size, bl);
+ encode(user_data, bl);
+ encode(storage_class, bl);
+ encode(appendable, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(ceph::buffer::list::const_iterator &bl) {
+ DECODE_START_LEGACY_COMPAT_LEN(6, 3, 3, bl);
+ decode(category, bl);
+ decode(size, bl);
+ decode(mtime, bl);
+ decode(etag, bl);
+ decode(owner, bl);
+ decode(owner_display_name, bl);
+ if (struct_v >= 2)
+ decode(content_type, bl);
+ if (struct_v >= 4)
+ decode(accounted_size, bl);
+ else
+ accounted_size = size;
+ if (struct_v >= 5)
+ decode(user_data, bl);
+ if (struct_v >= 6)
+ decode(storage_class, bl);
+ if (struct_v >= 7)
+ decode(appendable, bl);
+ DECODE_FINISH(bl);
+ }
+ void dump(ceph::Formatter *f) const;
+ void decode_json(JSONObj *obj);
+ static void generate_test_instances(std::list<rgw_bucket_dir_entry_meta*>& o);
+};
+WRITE_CLASS_ENCODER(rgw_bucket_dir_entry_meta)
+
+template<class T>
+void encode_packed_val(T val, ceph::buffer::list& bl)
+{
+ using ceph::encode;
+ if ((uint64_t)val < 0x80) {
+ encode((uint8_t)val, bl);
+ } else {
+ unsigned char c = 0x80;
+
+ if ((uint64_t)val < 0x100) {
+ c |= 1;
+ encode(c, bl);
+ encode((uint8_t)val, bl);
+ } else if ((uint64_t)val <= 0x10000) {
+ c |= 2;
+ encode(c, bl);
+ encode((uint16_t)val, bl);
+ } else if ((uint64_t)val <= 0x1000000) {
+ c |= 4;
+ encode(c, bl);
+ encode((uint32_t)val, bl);
+ } else {
+ c |= 8;
+ encode(c, bl);
+ encode((uint64_t)val, bl);
+ }
+ }
+}
+
+template<class T>
+void decode_packed_val(T& val, ceph::buffer::list::const_iterator& bl)
+{
+ using ceph::decode;
+ unsigned char c;
+ decode(c, bl);
+ if (c < 0x80) {
+ val = c;
+ return;
+ }
+
+ c &= ~0x80;
+
+ switch (c) {
+ case 1:
+ {
+ uint8_t v;
+ decode(v, bl);
+ val = v;
+ }
+ break;
+ case 2:
+ {
+ uint16_t v;
+ decode(v, bl);
+ val = v;
+ }
+ break;
+ case 4:
+ {
+ uint32_t v;
+ decode(v, bl);
+ val = v;
+ }
+ break;
+ case 8:
+ {
+ uint64_t v;
+ decode(v, bl);
+ val = v;
+ }
+ break;
+ default:
+ throw ceph::buffer::malformed_input();
+ }
+}
+
+struct rgw_bucket_entry_ver {
+ int64_t pool;
+ uint64_t epoch;
+
+ rgw_bucket_entry_ver() : pool(-1), epoch(0) {}
+
+ void encode(ceph::buffer::list &bl) const {
+ ENCODE_START(1, 1, bl);
+ encode_packed_val(pool, bl);
+ encode_packed_val(epoch, bl);
+ ENCODE_FINISH(bl);
+ }
+ void decode(ceph::buffer::list::const_iterator &bl) {
+ DECODE_START(1, bl);
+ decode_packed_val(pool, bl);
+ decode_packed_val(epoch, bl);
+ DECODE_FINISH(bl);
+ }
+ void dump(ceph::Formatter *f) const;
+ void decode_json(JSONObj *obj);
+ static void generate_test_instances(std::list<rgw_bucket_entry_ver*>& o);
+};
+WRITE_CLASS_ENCODER(rgw_bucket_entry_ver)
+
+
+struct cls_rgw_obj_key {
+ std::string name;
+ std::string instance;
+
+ cls_rgw_obj_key() {}
+ cls_rgw_obj_key(const std::string &_name) : name(_name) {}
+ cls_rgw_obj_key(const std::string& n, const std::string& i) : name(n), instance(i) {}
+
+ std::string to_string() const {
+ return fmt::format("{}({})", name, instance);
+ }
+
+ bool empty() const {
+ return name.empty();
+ }
+
+ void set(const std::string& _name) {
+ name = _name;
+ instance.clear();
+ }
+
+ bool operator==(const cls_rgw_obj_key& k) const {
+ return (name.compare(k.name) == 0) &&
+ (instance.compare(k.instance) == 0);
+ }
+
+ bool operator<(const cls_rgw_obj_key& k) const {
+ int r = name.compare(k.name);
+ if (r == 0) {
+ r = instance.compare(k.instance);
+ }
+ return (r < 0);
+ }
+
+ bool operator<=(const cls_rgw_obj_key& k) const {
+ return !(k < *this);
+ }
+
+ std::ostream& operator<<(std::ostream& out) const {
+ out << to_string();
+ return out;
+ }
+
+ void encode(ceph::buffer::list &bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(name, bl);
+ encode(instance, bl);
+ ENCODE_FINISH(bl);
+ }
+ void decode(ceph::buffer::list::const_iterator &bl) {
+ DECODE_START(1, bl);
+ decode(name, bl);
+ decode(instance, bl);
+ DECODE_FINISH(bl);
+ }
+ void dump(ceph::Formatter *f) const {
+ f->dump_string("name", name);
+ f->dump_string("instance", instance);
+ }
+ void decode_json(JSONObj *obj);
+ static void generate_test_instances(std::list<cls_rgw_obj_key*>& ls) {
+ ls.push_back(new cls_rgw_obj_key);
+ ls.push_back(new cls_rgw_obj_key);
+ ls.back()->name = "name";
+ ls.back()->instance = "instance";
+ }
+
+ size_t estimate_encoded_size() const {
+ constexpr size_t start_overhead = sizeof(__u8) + sizeof(__u8) + sizeof(ceph_le32); // version and length prefix
+ constexpr size_t string_overhead = sizeof(__u32); // strings are encoded with 32-bit length prefix
+ return start_overhead +
+ string_overhead + name.size() +
+ string_overhead + instance.size();
+ }
+};
+WRITE_CLASS_ENCODER(cls_rgw_obj_key)
+
+
+struct rgw_bucket_dir_entry {
+ /* a versioned object instance */
+ static constexpr uint16_t FLAG_VER = 0x1;
+ /* the last object instance of a versioned object */
+ static constexpr uint16_t FLAG_CURRENT = 0x2;
+ /* delete marker */
+ static constexpr uint16_t FLAG_DELETE_MARKER = 0x4;
+ /* object is versioned, a placeholder for the plain entry */
+ static constexpr uint16_t FLAG_VER_MARKER = 0x8;
+ /* object is a proxy; it is not listed in the bucket index but is a
+ * prefix ending with a delimiter, perhaps common to multiple
+ * entries; it is only useful when a delimiter is used and
+ * represents a "subdirectory" (again, ending in a delimiter) that
+ * may contain one or more actual entries/objects */
+ static constexpr uint16_t FLAG_COMMON_PREFIX = 0x8000;
+
+ cls_rgw_obj_key key;
+ rgw_bucket_entry_ver ver;
+ std::string locator;
+ bool exists;
+ rgw_bucket_dir_entry_meta meta;
+ std::multimap<std::string, rgw_bucket_pending_info> pending_map;
+ uint64_t index_ver;
+ std::string tag;
+ uint16_t flags;
+ uint64_t versioned_epoch;
+
+ rgw_bucket_dir_entry() :
+ exists(false), index_ver(0), flags(0), versioned_epoch(0) {}
+
+ void encode(ceph::buffer::list &bl) const {
+ ENCODE_START(8, 3, bl);
+ encode(key.name, bl);
+ encode(ver.epoch, bl);
+ encode(exists, bl);
+ encode(meta, bl);
+ encode(pending_map, bl);
+ encode(locator, bl);
+ encode(ver, bl);
+ encode_packed_val(index_ver, bl);
+ encode(tag, bl);
+ encode(key.instance, bl);
+ encode(flags, bl);
+ encode(versioned_epoch, bl);
+ ENCODE_FINISH(bl);
+ }
+ void decode(ceph::buffer::list::const_iterator &bl) {
+ DECODE_START_LEGACY_COMPAT_LEN(8, 3, 3, bl);
+ decode(key.name, bl);
+ decode(ver.epoch, bl);
+ decode(exists, bl);
+ decode(meta, bl);
+ decode(pending_map, bl);
+ if (struct_v >= 2) {
+ decode(locator, bl);
+ }
+ if (struct_v >= 4) {
+ decode(ver, bl);
+ } else {
+ ver.pool = -1;
+ }
+ if (struct_v >= 5) {
+ decode_packed_val(index_ver, bl);
+ decode(tag, bl);
+ }
+ if (struct_v >= 6) {
+ decode(key.instance, bl);
+ }
+ if (struct_v >= 7) {
+ decode(flags, bl);
+ }
+ if (struct_v >= 8) {
+ decode(versioned_epoch, bl);
+ }
+ DECODE_FINISH(bl);
+ }
+
+ bool is_current() const {
+ int test_flags =
+ rgw_bucket_dir_entry::FLAG_VER | rgw_bucket_dir_entry::FLAG_CURRENT;
+ return (flags & rgw_bucket_dir_entry::FLAG_VER) == 0 ||
+ (flags & test_flags) == test_flags;
+ }
+ bool is_delete_marker() const {
+ return (flags & rgw_bucket_dir_entry::FLAG_DELETE_MARKER) != 0;
+ }
+ bool is_visible() const {
+ return is_current() && !is_delete_marker();
+ }
+ bool is_valid() const {
+ return (flags & rgw_bucket_dir_entry::FLAG_VER_MARKER) == 0;
+ }
+ bool is_common_prefix() const {
+ return flags & rgw_bucket_dir_entry::FLAG_COMMON_PREFIX;
+ }
+
+ void dump(ceph::Formatter *f) const;
+ void decode_json(JSONObj *obj);
+ static void generate_test_instances(std::list<rgw_bucket_dir_entry*>& o);
+};
+WRITE_CLASS_ENCODER(rgw_bucket_dir_entry)
+
+enum class BIIndexType : uint8_t {
+ Invalid = 0,
+ Plain = 1,
+ Instance = 2,
+ OLH = 3,
+};
+
+struct rgw_bucket_category_stats;
+
+struct rgw_cls_bi_entry {
+ BIIndexType type;
+ std::string idx;
+ ceph::buffer::list data;
+
+ rgw_cls_bi_entry() : type(BIIndexType::Invalid) {}
+
+ void encode(ceph::buffer::list& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(type, bl);
+ encode(idx, bl);
+ encode(data, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(ceph::buffer::list::const_iterator& bl) {
+ DECODE_START(1, bl);
+ uint8_t c;
+ decode(c, bl);
+ type = (BIIndexType)c;
+ decode(idx, bl);
+ decode(data, bl);
+ DECODE_FINISH(bl);
+ }
+
+ void dump(ceph::Formatter *f) const;
+ void decode_json(JSONObj *obj, cls_rgw_obj_key *effective_key = NULL);
+
+ bool get_info(cls_rgw_obj_key *key, RGWObjCategory *category,
+ rgw_bucket_category_stats *accounted_stats);
+};
+WRITE_CLASS_ENCODER(rgw_cls_bi_entry)
+
+enum OLHLogOp {
+ CLS_RGW_OLH_OP_UNKNOWN = 0,
+ CLS_RGW_OLH_OP_LINK_OLH = 1,
+ CLS_RGW_OLH_OP_UNLINK_OLH = 2, /* object does not exist */
+ CLS_RGW_OLH_OP_REMOVE_INSTANCE = 3,
+};
+
+struct rgw_bucket_olh_log_entry {
+ uint64_t epoch;
+ OLHLogOp op;
+ std::string op_tag;
+ cls_rgw_obj_key key;
+ bool delete_marker;
+
+ rgw_bucket_olh_log_entry() : epoch(0), op(CLS_RGW_OLH_OP_UNKNOWN), delete_marker(false) {}
+
+
+ void encode(ceph::buffer::list &bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(epoch, bl);
+ encode((__u8)op, bl);
+ encode(op_tag, bl);
+ encode(key, bl);
+ encode(delete_marker, bl);
+ ENCODE_FINISH(bl);
+ }
+ void decode(ceph::buffer::list::const_iterator &bl) {
+ DECODE_START(1, bl);
+ decode(epoch, bl);
+ uint8_t c;
+ decode(c, bl);
+ op = (OLHLogOp)c;
+ decode(op_tag, bl);
+ decode(key, bl);
+ decode(delete_marker, bl);
+ DECODE_FINISH(bl);
+ }
+ static void generate_test_instances(std::list<rgw_bucket_olh_log_entry*>& o);
+ void dump(ceph::Formatter *f) const;
+ void decode_json(JSONObj *obj);
+};
+WRITE_CLASS_ENCODER(rgw_bucket_olh_log_entry)
+
+struct rgw_bucket_olh_entry {
+ cls_rgw_obj_key key;
+ bool delete_marker;
+ uint64_t epoch;
+ std::map<uint64_t, std::vector<struct rgw_bucket_olh_log_entry> > pending_log;
+ std::string tag;
+ bool exists;
+ bool pending_removal;
+
+ rgw_bucket_olh_entry() : delete_marker(false), epoch(0), exists(false), pending_removal(false) {}
+
+ void encode(ceph::buffer::list &bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(key, bl);
+ encode(delete_marker, bl);
+ encode(epoch, bl);
+ encode(pending_log, bl);
+ encode(tag, bl);
+ encode(exists, bl);
+ encode(pending_removal, bl);
+ ENCODE_FINISH(bl);
+ }
+ void decode(ceph::buffer::list::const_iterator &bl) {
+ DECODE_START(1, bl);
+ decode(key, bl);
+ decode(delete_marker, bl);
+ decode(epoch, bl);
+ decode(pending_log, bl);
+ decode(tag, bl);
+ decode(exists, bl);
+ decode(pending_removal, bl);
+ DECODE_FINISH(bl);
+ }
+ void dump(ceph::Formatter *f) const;
+ void decode_json(JSONObj *obj);
+};
+WRITE_CLASS_ENCODER(rgw_bucket_olh_entry)
+
+struct rgw_bi_log_entry {
+ std::string id;
+ std::string object;
+ std::string instance;
+ ceph::real_time timestamp;
+ rgw_bucket_entry_ver ver;
+ RGWModifyOp op;
+ RGWPendingState state;
+ uint64_t index_ver;
+ std::string tag;
+ uint16_t bilog_flags;
+ std::string owner; /* only being set if it's a delete marker */
+ std::string owner_display_name; /* only being set if it's a delete marker */
+ rgw_zone_set zones_trace;
+
+ rgw_bi_log_entry() : op(CLS_RGW_OP_UNKNOWN), state(CLS_RGW_STATE_PENDING_MODIFY), index_ver(0), bilog_flags(0) {}
+
+ void encode(ceph::buffer::list &bl) const {
+ ENCODE_START(4, 1, bl);
+ encode(id, bl);
+ encode(object, bl);
+ encode(timestamp, bl);
+ encode(ver, bl);
+ encode(tag, bl);
+ uint8_t c = (uint8_t)op;
+ encode(c, bl);
+ c = (uint8_t)state;
+ encode(c, bl);
+ encode_packed_val(index_ver, bl);
+ encode(instance, bl);
+ encode(bilog_flags, bl);
+ encode(owner, bl);
+ encode(owner_display_name, bl);
+ encode(zones_trace, bl);
+ ENCODE_FINISH(bl);
+ }
+ void decode(ceph::buffer::list::const_iterator &bl) {
+ DECODE_START(4, bl);
+ decode(id, bl);
+ decode(object, bl);
+ decode(timestamp, bl);
+ decode(ver, bl);
+ decode(tag, bl);
+ uint8_t c;
+ decode(c, bl);
+ op = (RGWModifyOp)c;
+ decode(c, bl);
+ state = (RGWPendingState)c;
+ decode_packed_val(index_ver, bl);
+ if (struct_v >= 2) {
+ decode(instance, bl);
+ decode(bilog_flags, bl);
+ }
+ if (struct_v >= 3) {
+ decode(owner, bl);
+ decode(owner_display_name, bl);
+ }
+ if (struct_v >= 4) {
+ decode(zones_trace, bl);
+ }
+ DECODE_FINISH(bl);
+ }
+ void dump(ceph::Formatter *f) const;
+ void decode_json(JSONObj *obj);
+ static void generate_test_instances(std::list<rgw_bi_log_entry*>& o);
+
+ bool is_versioned() {
+ return ((bilog_flags & RGW_BILOG_FLAG_VERSIONED_OP) != 0);
+ }
+};
+WRITE_CLASS_ENCODER(rgw_bi_log_entry)
+
+struct rgw_bucket_category_stats {
+ uint64_t total_size;
+ uint64_t total_size_rounded;
+ uint64_t num_entries;
+ uint64_t actual_size{0}; //< account for compression, encryption
+
+ rgw_bucket_category_stats() : total_size(0), total_size_rounded(0), num_entries(0) {}
+
+ void encode(ceph::buffer::list &bl) const {
+ ENCODE_START(3, 2, bl);
+ encode(total_size, bl);
+ encode(total_size_rounded, bl);
+ encode(num_entries, bl);
+ encode(actual_size, bl);
+ ENCODE_FINISH(bl);
+ }
+ void decode(ceph::buffer::list::const_iterator &bl) {
+ DECODE_START_LEGACY_COMPAT_LEN(3, 2, 2, bl);
+ decode(total_size, bl);
+ decode(total_size_rounded, bl);
+ decode(num_entries, bl);
+ if (struct_v >= 3) {
+ decode(actual_size, bl);
+ } else {
+ actual_size = total_size;
+ }
+ DECODE_FINISH(bl);
+ }
+ void dump(ceph::Formatter *f) const;
+ static void generate_test_instances(std::list<rgw_bucket_category_stats*>& o);
+};
+WRITE_CLASS_ENCODER(rgw_bucket_category_stats)
+
+enum class cls_rgw_reshard_status : uint8_t {
+ NOT_RESHARDING = 0,
+ IN_PROGRESS = 1,
+ DONE = 2
+};
+std::ostream& operator<<(std::ostream&, cls_rgw_reshard_status);
+
+inline std::string to_string(const cls_rgw_reshard_status status)
+{
+ switch (status) {
+ case cls_rgw_reshard_status::NOT_RESHARDING:
+ return "not-resharding";
+ case cls_rgw_reshard_status::IN_PROGRESS:
+ return "in-progress";
+ case cls_rgw_reshard_status::DONE:
+ return "done";
+ };
+ return "Unknown reshard status";
+}
+
+struct cls_rgw_bucket_instance_entry {
+ using RESHARD_STATUS = cls_rgw_reshard_status;
+
+ cls_rgw_reshard_status reshard_status{RESHARD_STATUS::NOT_RESHARDING};
+ std::string new_bucket_instance_id;
+ int32_t num_shards{-1};
+
+ void encode(ceph::buffer::list& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode((uint8_t)reshard_status, bl);
+ encode(new_bucket_instance_id, bl);
+ encode(num_shards, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(ceph::buffer::list::const_iterator& bl) {
+ DECODE_START(1, bl);
+ uint8_t s;
+ decode(s, bl);
+ reshard_status = (cls_rgw_reshard_status)s;
+ decode(new_bucket_instance_id, bl);
+ decode(num_shards, bl);
+ DECODE_FINISH(bl);
+ }
+
+ void dump(ceph::Formatter *f) const;
+ static void generate_test_instances(std::list<cls_rgw_bucket_instance_entry*>& o);
+
+ void clear() {
+ reshard_status = RESHARD_STATUS::NOT_RESHARDING;
+ new_bucket_instance_id.clear();
+ }
+
+ void set_status(const std::string& new_instance_id,
+ int32_t new_num_shards,
+ cls_rgw_reshard_status s) {
+ reshard_status = s;
+ new_bucket_instance_id = new_instance_id;
+ num_shards = new_num_shards;
+ }
+
+ bool resharding() const {
+ return reshard_status != RESHARD_STATUS::NOT_RESHARDING;
+ }
+
+ bool resharding_in_progress() const {
+ return reshard_status == RESHARD_STATUS::IN_PROGRESS;
+ }
+
+ friend std::ostream& operator<<(std::ostream& out, const cls_rgw_bucket_instance_entry& v) {
+ out << "cls_rgw_bucket_instance_entry:{ " << v.reshard_status <<
+ ", \"" << v.new_bucket_instance_id << "\", " << v.num_shards << " }";
+ return out;
+ }
+};
+WRITE_CLASS_ENCODER(cls_rgw_bucket_instance_entry)
+
+struct rgw_bucket_dir_header {
+ std::map<RGWObjCategory, rgw_bucket_category_stats> stats;
+ uint64_t tag_timeout;
+ uint64_t ver;
+ uint64_t master_ver;
+ std::string max_marker;
+ cls_rgw_bucket_instance_entry new_instance;
+ bool syncstopped;
+
+ rgw_bucket_dir_header() : tag_timeout(0), ver(0), master_ver(0), syncstopped(false) {}
+
+ void encode(ceph::buffer::list &bl) const {
+ ENCODE_START(7, 2, bl);
+ encode(stats, bl);
+ encode(tag_timeout, bl);
+ encode(ver, bl);
+ encode(master_ver, bl);
+ encode(max_marker, bl);
+ encode(new_instance, bl);
+ encode(syncstopped,bl);
+ ENCODE_FINISH(bl);
+ }
+ void decode(ceph::buffer::list::const_iterator &bl) {
+ DECODE_START_LEGACY_COMPAT_LEN(6, 2, 2, bl);
+ decode(stats, bl);
+ if (struct_v > 2) {
+ decode(tag_timeout, bl);
+ } else {
+ tag_timeout = 0;
+ }
+ if (struct_v >= 4) {
+ decode(ver, bl);
+ decode(master_ver, bl);
+ } else {
+ ver = 0;
+ }
+ if (struct_v >= 5) {
+ decode(max_marker, bl);
+ }
+ if (struct_v >= 6) {
+ decode(new_instance, bl);
+ } else {
+ new_instance = cls_rgw_bucket_instance_entry();
+ }
+ if (struct_v >= 7) {
+ decode(syncstopped,bl);
+ }
+ DECODE_FINISH(bl);
+ }
+ void dump(ceph::Formatter *f) const;
+ static void generate_test_instances(std::list<rgw_bucket_dir_header*>& o);
+
+ bool resharding() const {
+ return new_instance.resharding();
+ }
+ bool resharding_in_progress() const {
+ return new_instance.resharding_in_progress();
+ }
+};
+WRITE_CLASS_ENCODER(rgw_bucket_dir_header)
+
+struct rgw_bucket_dir {
+ rgw_bucket_dir_header header;
+ boost::container::flat_map<std::string, rgw_bucket_dir_entry> m;
+
+ void encode(ceph::buffer::list &bl) const {
+ ENCODE_START(2, 2, bl);
+ encode(header, bl);
+ encode(m, bl);
+ ENCODE_FINISH(bl);
+ }
+ void decode(ceph::buffer::list::const_iterator &bl) {
+ DECODE_START_LEGACY_COMPAT_LEN(2, 2, 2, bl);
+ decode(header, bl);
+ decode(m, bl);
+ DECODE_FINISH(bl);
+ }
+ void dump(ceph::Formatter *f) const;
+ static void generate_test_instances(std::list<rgw_bucket_dir*>& o);
+};
+WRITE_CLASS_ENCODER(rgw_bucket_dir)
+
+struct rgw_usage_data {
+ uint64_t bytes_sent;
+ uint64_t bytes_received;
+ uint64_t ops;
+ uint64_t successful_ops;
+
+ rgw_usage_data() : bytes_sent(0), bytes_received(0), ops(0), successful_ops(0) {}
+ rgw_usage_data(uint64_t sent, uint64_t received) : bytes_sent(sent), bytes_received(received), ops(0), successful_ops(0) {}
+
+ void encode(ceph::buffer::list& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(bytes_sent, bl);
+ encode(bytes_received, bl);
+ encode(ops, bl);
+ encode(successful_ops, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(ceph::buffer::list::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(bytes_sent, bl);
+ decode(bytes_received, bl);
+ decode(ops, bl);
+ decode(successful_ops, bl);
+ DECODE_FINISH(bl);
+ }
+
+ void aggregate(const rgw_usage_data& usage) {
+ bytes_sent += usage.bytes_sent;
+ bytes_received += usage.bytes_received;
+ ops += usage.ops;
+ successful_ops += usage.successful_ops;
+ }
+};
+WRITE_CLASS_ENCODER(rgw_usage_data)
+
+
+struct rgw_usage_log_entry {
+ rgw_user owner;
+ rgw_user payer; /* if empty, same as owner */
+ std::string bucket;
+ uint64_t epoch;
+ rgw_usage_data total_usage; /* this one is kept for backwards compatibility */
+ std::map<std::string, rgw_usage_data> usage_map;
+
+ rgw_usage_log_entry() : epoch(0) {}
+ rgw_usage_log_entry(std::string& o, std::string& b) : owner(o), bucket(b), epoch(0) {}
+ rgw_usage_log_entry(std::string& o, std::string& p, std::string& b) : owner(o), payer(p), bucket(b), epoch(0) {}
+
+ void encode(ceph::buffer::list& bl) const {
+ ENCODE_START(3, 1, bl);
+ encode(owner.to_str(), bl);
+ encode(bucket, bl);
+ encode(epoch, bl);
+ encode(total_usage.bytes_sent, bl);
+ encode(total_usage.bytes_received, bl);
+ encode(total_usage.ops, bl);
+ encode(total_usage.successful_ops, bl);
+ encode(usage_map, bl);
+ encode(payer.to_str(), bl);
+ ENCODE_FINISH(bl);
+ }
+
+
+ void decode(ceph::buffer::list::const_iterator& bl) {
+ DECODE_START(3, bl);
+ std::string s;
+ decode(s, bl);
+ owner.from_str(s);
+ decode(bucket, bl);
+ decode(epoch, bl);
+ decode(total_usage.bytes_sent, bl);
+ decode(total_usage.bytes_received, bl);
+ decode(total_usage.ops, bl);
+ decode(total_usage.successful_ops, bl);
+ if (struct_v < 2) {
+ usage_map[""] = total_usage;
+ } else {
+ decode(usage_map, bl);
+ }
+ if (struct_v >= 3) {
+ std::string p;
+ decode(p, bl);
+ payer.from_str(p);
+ }
+ DECODE_FINISH(bl);
+ }
+
+ void aggregate(const rgw_usage_log_entry& e,
+ std::map<std::string, bool> *categories = NULL) {
+ if (owner.empty()) {
+ owner = e.owner;
+ bucket = e.bucket;
+ epoch = e.epoch;
+ payer = e.payer;
+ }
+
+ for (auto iter = e.usage_map.begin(); iter != e.usage_map.end(); ++iter) {
+ if (!categories || !categories->size() || categories->count(iter->first)) {
+ add(iter->first, iter->second);
+ }
+ }
+ }
+
+ void sum(rgw_usage_data& usage,
+ std::map<std::string, bool>& categories) const {
+ usage = rgw_usage_data();
+ for (auto iter = usage_map.begin(); iter != usage_map.end(); ++iter) {
+ if (!categories.size() || categories.count(iter->first)) {
+ usage.aggregate(iter->second);
+ }
+ }
+ }
+
+ void add(const std::string& category, const rgw_usage_data& data) {
+ usage_map[category].aggregate(data);
+ total_usage.aggregate(data);
+ }
+
+ void dump(ceph::Formatter* f) const;
+ static void generate_test_instances(std::list<rgw_usage_log_entry*>& o);
+
+};
+WRITE_CLASS_ENCODER(rgw_usage_log_entry)
+
+struct rgw_usage_log_info {
+ std::vector<rgw_usage_log_entry> entries;
+
+ void encode(ceph::buffer::list& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(entries, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(ceph::buffer::list::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(entries, bl);
+ DECODE_FINISH(bl);
+ }
+
+ rgw_usage_log_info() {}
+};
+WRITE_CLASS_ENCODER(rgw_usage_log_info)
+
+struct rgw_user_bucket {
+ std::string user;
+ std::string bucket;
+
+ rgw_user_bucket() {}
+ rgw_user_bucket(const std::string& u, const std::string& b) : user(u), bucket(b) {}
+
+ void encode(ceph::buffer::list& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(user, bl);
+ encode(bucket, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(ceph::buffer::list::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(user, bl);
+ decode(bucket, bl);
+ DECODE_FINISH(bl);
+ }
+
+ bool operator<(const rgw_user_bucket& ub2) const {
+ int comp = user.compare(ub2.user);
+ if (comp < 0)
+ return true;
+ else if (!comp)
+ return bucket.compare(ub2.bucket) < 0;
+
+ return false;
+ }
+};
+WRITE_CLASS_ENCODER(rgw_user_bucket)
+
+enum cls_rgw_gc_op {
+ CLS_RGW_GC_DEL_OBJ,
+ CLS_RGW_GC_DEL_BUCKET,
+};
+
+struct cls_rgw_obj {
+ std::string pool;
+ cls_rgw_obj_key key;
+ std::string loc;
+
+ cls_rgw_obj() {}
+ cls_rgw_obj(std::string& _p, cls_rgw_obj_key& _k) : pool(_p), key(_k) {}
+
+ void encode(ceph::buffer::list& bl) const {
+ ENCODE_START(2, 1, bl);
+ encode(pool, bl);
+ encode(key.name, bl);
+ encode(loc, bl);
+ encode(key, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(ceph::buffer::list::const_iterator& bl) {
+ DECODE_START(2, bl);
+ decode(pool, bl);
+ decode(key.name, bl);
+ decode(loc, bl);
+ if (struct_v >= 2) {
+ decode(key, bl);
+ }
+ DECODE_FINISH(bl);
+ }
+
+ void dump(ceph::Formatter *f) const {
+ f->dump_string("pool", pool);
+ f->dump_string("oid", key.name);
+ f->dump_string("key", loc);
+ f->dump_string("instance", key.instance);
+ }
+ static void generate_test_instances(std::list<cls_rgw_obj*>& ls) {
+ ls.push_back(new cls_rgw_obj);
+ ls.push_back(new cls_rgw_obj);
+ ls.back()->pool = "mypool";
+ ls.back()->key.name = "myoid";
+ ls.back()->loc = "mykey";
+ }
+
+ size_t estimate_encoded_size() const {
+ constexpr size_t start_overhead = sizeof(__u8) + sizeof(__u8) + sizeof(ceph_le32); // version and length prefix
+ constexpr size_t string_overhead = sizeof(__u32); // strings are encoded with 32-bit length prefix
+ return start_overhead +
+ string_overhead + pool.size() +
+ string_overhead + key.name.size() +
+ string_overhead + loc.size() +
+ key.estimate_encoded_size();
+ }
+};
+WRITE_CLASS_ENCODER(cls_rgw_obj)
+
+struct cls_rgw_obj_chain {
+ std::list<cls_rgw_obj> objs;
+
+ cls_rgw_obj_chain() {}
+
+ void push_obj(const std::string& pool, const cls_rgw_obj_key& key, const std::string& loc) {
+ cls_rgw_obj obj;
+ obj.pool = pool;
+ obj.key = key;
+ obj.loc = loc;
+ objs.push_back(obj);
+ }
+
+ void encode(ceph::buffer::list& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(objs, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(ceph::buffer::list::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(objs, bl);
+ DECODE_FINISH(bl);
+ }
+
+ void dump(ceph::Formatter *f) const {
+ f->open_array_section("objs");
+ for (std::list<cls_rgw_obj>::const_iterator p = objs.begin(); p != objs.end(); ++p) {
+ f->open_object_section("obj");
+ p->dump(f);
+ f->close_section();
+ }
+ f->close_section();
+ }
+ static void generate_test_instances(std::list<cls_rgw_obj_chain*>& ls) {
+ ls.push_back(new cls_rgw_obj_chain);
+ }
+
+ bool empty() {
+ return objs.empty();
+ }
+
+ size_t estimate_encoded_size() const {
+ constexpr size_t start_overhead = sizeof(__u8) + sizeof(__u8) + sizeof(ceph_le32);
+ constexpr size_t size_overhead = sizeof(__u32); // size of the chain
+ size_t chain_overhead = 0;
+ for (auto& it : objs) {
+ chain_overhead += it.estimate_encoded_size();
+ }
+ return (start_overhead + size_overhead + chain_overhead);
+ }
+};
+WRITE_CLASS_ENCODER(cls_rgw_obj_chain)
+
+struct cls_rgw_gc_obj_info
+{
+ std::string tag;
+ cls_rgw_obj_chain chain;
+ ceph::real_time time;
+
+ cls_rgw_gc_obj_info() {}
+
+ void encode(ceph::buffer::list& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(tag, bl);
+ encode(chain, bl);
+ encode(time, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(ceph::buffer::list::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(tag, bl);
+ decode(chain, bl);
+ decode(time, bl);
+ DECODE_FINISH(bl);
+ }
+
+ void dump(ceph::Formatter *f) const {
+ f->dump_string("tag", tag);
+ f->open_object_section("chain");
+ chain.dump(f);
+ f->close_section();
+ f->dump_stream("time") << time;
+ }
+ static void generate_test_instances(std::list<cls_rgw_gc_obj_info*>& ls) {
+ ls.push_back(new cls_rgw_gc_obj_info);
+ ls.push_back(new cls_rgw_gc_obj_info);
+ ls.back()->tag = "footag";
+ ceph_timespec ts{init_le32(21), init_le32(32)};
+ ls.back()->time = ceph::real_clock::from_ceph_timespec(ts);
+ }
+
+ size_t estimate_encoded_size() const {
+ constexpr size_t start_overhead = sizeof(__u8) + sizeof(__u8) + sizeof(ceph_le32); // version and length prefix
+ constexpr size_t string_overhead = sizeof(__u32); // strings are encoded with 32-bit length prefix
+ constexpr size_t time_overhead = 2 * sizeof(ceph_le32); // time is stored as tv_sec and tv_nsec
+ return start_overhead + string_overhead + tag.size() +
+ time_overhead + chain.estimate_encoded_size();
+ }
+};
+WRITE_CLASS_ENCODER(cls_rgw_gc_obj_info)
+
+struct cls_rgw_lc_obj_head
+{
+ time_t start_date = 0;
+ std::string marker;
+
+ cls_rgw_lc_obj_head() {}
+
+ void encode(ceph::buffer::list& bl) const {
+ ENCODE_START(1, 1, bl);
+ uint64_t t = start_date;
+ encode(t, bl);
+ encode(marker, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(ceph::buffer::list::const_iterator& bl) {
+ DECODE_START(1, bl);
+ uint64_t t;
+ decode(t, bl);
+ start_date = static_cast<time_t>(t);
+ decode(marker, bl);
+ DECODE_FINISH(bl);
+ }
+
+ void dump(ceph::Formatter *f) const;
+ static void generate_test_instances(std::list<cls_rgw_lc_obj_head*>& ls);
+};
+WRITE_CLASS_ENCODER(cls_rgw_lc_obj_head)
+
+struct cls_rgw_lc_entry {
+ std::string bucket;
+ uint64_t start_time; // if in_progress
+ uint32_t status;
+
+ cls_rgw_lc_entry()
+ : start_time(0), status(0) {}
+
+ cls_rgw_lc_entry(const cls_rgw_lc_entry& rhs) = default;
+
+ cls_rgw_lc_entry(const std::string& b, uint64_t t, uint32_t s)
+ : bucket(b), start_time(t), status(s) {};
+
+ void encode(bufferlist& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(bucket, bl);
+ encode(start_time, bl);
+ encode(status, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(bufferlist::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(bucket, bl);
+ decode(start_time, bl);
+ decode(status, bl);
+ DECODE_FINISH(bl);
+ }
+};
+WRITE_CLASS_ENCODER(cls_rgw_lc_entry);
+
+struct cls_rgw_reshard_entry
+{
+ ceph::real_time time;
+ std::string tenant;
+ std::string bucket_name;
+ std::string bucket_id;
+ std::string new_instance_id;
+ uint32_t old_num_shards{0};
+ uint32_t new_num_shards{0};
+
+ cls_rgw_reshard_entry() {}
+
+ void encode(ceph::buffer::list& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(time, bl);
+ encode(tenant, bl);
+ encode(bucket_name, bl);
+ encode(bucket_id, bl);
+ encode(new_instance_id, bl);
+ encode(old_num_shards, bl);
+ encode(new_num_shards, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(ceph::buffer::list::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(time, bl);
+ decode(tenant, bl);
+ decode(bucket_name, bl);
+ decode(bucket_id, bl);
+ decode(new_instance_id, bl);
+ decode(old_num_shards, bl);
+ decode(new_num_shards, bl);
+ DECODE_FINISH(bl);
+ }
+
+ void dump(ceph::Formatter *f) const;
+ static void generate_test_instances(std::list<cls_rgw_reshard_entry*>& o);
+
+ static void generate_key(const std::string& tenant, const std::string& bucket_name, std::string *key);
+ void get_key(std::string *key) const;
+};
+WRITE_CLASS_ENCODER(cls_rgw_reshard_entry)