// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #pragma once #include #include #include "include/buffer.h" #include "test/crimson/seastore/test_block.h" // TODO #include "crimson/os/seastore/onode.h" #include "crimson/os/seastore/transaction_manager.h" #include "crimson/os/seastore/transaction.h" namespace crimson::os::seastore { struct ObjectDataBlock : crimson::os::seastore::LogicalCachedExtent { using Ref = TCachedExtentRef; explicit ObjectDataBlock(ceph::bufferptr &&ptr) : LogicalCachedExtent(std::move(ptr)) {} explicit ObjectDataBlock(const ObjectDataBlock &other) : LogicalCachedExtent(other) {} explicit ObjectDataBlock(extent_len_t length) : LogicalCachedExtent(length) {} CachedExtentRef duplicate_for_write(Transaction&) final { return CachedExtentRef(new ObjectDataBlock(*this)); }; static constexpr extent_types_t TYPE = extent_types_t::OBJECT_DATA_BLOCK; extent_types_t get_type() const final { return TYPE; } ceph::bufferlist get_delta() final { /* Currently, we always allocate fresh ObjectDataBlock's rather than * mutating existing ones. */ ceph_assert(0 == "Should be impossible"); } void apply_delta(const ceph::bufferlist &bl) final { // See get_delta() ceph_assert(0 == "Should be impossible"); } }; using ObjectDataBlockRef = TCachedExtentRef; class ObjectDataHandler { public: using base_iertr = TransactionManager::base_iertr; ObjectDataHandler(uint32_t mos) : max_object_size(mos) {} struct context_t { TransactionManager &tm; Transaction &t; Onode &onode; Onode *d_onode = nullptr; // The desination node in case of clone }; /// Writes bl to [offset, offset + bl.length()) using write_iertr = base_iertr; using write_ret = write_iertr::future<>; write_ret write( context_t ctx, objaddr_t offset, const bufferlist &bl); using zero_iertr = base_iertr; using zero_ret = zero_iertr::future<>; zero_ret zero( context_t ctx, objaddr_t offset, extent_len_t len); /// Reads data in [offset, offset + len) using read_iertr = base_iertr; using read_ret = read_iertr::future; read_ret read( context_t ctx, objaddr_t offset, extent_len_t len); /// sparse read data, get range interval in [offset, offset + len) using fiemap_iertr = base_iertr; using fiemap_ret = fiemap_iertr::future>; fiemap_ret fiemap( context_t ctx, objaddr_t offset, extent_len_t len); /// Clears data past offset using truncate_iertr = base_iertr; using truncate_ret = truncate_iertr::future<>; truncate_ret truncate( context_t ctx, objaddr_t offset); /// Clears data and reservation using clear_iertr = base_iertr; using clear_ret = clear_iertr::future<>; clear_ret clear(context_t ctx); /// Clone data of an Onode using clone_iertr = base_iertr; using clone_ret = clone_iertr::future<>; clone_ret clone(context_t ctx); private: /// Updates region [_offset, _offset + bl.length) to bl write_ret overwrite( context_t ctx, ///< [in] ctx laddr_t offset, ///< [in] write offset extent_len_t len, ///< [in] len to write, len == bl->length() if bl std::optional &&bl, ///< [in] buffer to write, empty for zeros lba_pin_list_t &&pins ///< [in] set of pins overlapping above region ); /// Ensures object_data reserved region is prepared write_ret prepare_data_reservation( context_t ctx, object_data_t &object_data, extent_len_t size); /// Trims data past size clear_ret trim_data_reservation( context_t ctx, object_data_t &object_data, extent_len_t size); clone_ret clone_extents( context_t ctx, object_data_t &object_data, lba_pin_list_t &pins, laddr_t data_base); private: /** * max_object_size * * For now, we allocate a fixed region of laddr space of size max_object_size * for any object. In the future, once we have the ability to remap logical * mappings (necessary for clone), we'll add the ability to grow and shrink * these regions and remove this assumption. */ const uint32_t max_object_size = 0; }; } #if FMT_VERSION >= 90000 template <> struct fmt::formatter : fmt::ostream_formatter {}; #endif