summaryrefslogtreecommitdiffstats
path: root/src/rocksdb/util/aligned_buffer.h
blob: c7b0728c9fe463181494e30fd7f16aefd655b771 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
//  This source code is licensed under both the GPLv2 (found in the
//  COPYING file in the root directory) and Apache 2.0 License
//  (found in the LICENSE.Apache file in the root directory).
//
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
#pragma once

#include <algorithm>
#include "port/port.h"

namespace ROCKSDB_NAMESPACE {

// This file contains utilities to handle the alignment of pages and buffers.

// Truncate to a multiple of page_size, which is also a page boundary. This
// helps to figuring out the right alignment.
// Example:
//   TruncateToPageBoundary(5000, 4096)  => 4096
//   TruncateToPageBoundary(10000, 4096) => 8192
inline size_t TruncateToPageBoundary(size_t page_size, size_t s) {
  s -= (s & (page_size - 1));
  assert((s % page_size) == 0);
  return s;
}

// Round up x to a multiple of y.
// Example:
//   Roundup(13, 5)   => 15
//   Roundup(201, 16) => 208
inline size_t Roundup(size_t x, size_t y) {
  return ((x + y - 1) / y) * y;
}

// Round down x to a multiple of y.
// Example:
//   Rounddown(13, 5)   => 10
//   Rounddown(201, 16) => 192
inline size_t Rounddown(size_t x, size_t y) { return (x / y) * y; }

// AlignedBuffer manages a buffer by taking alignment into consideration, and
// aligns the buffer start and end positions. It is mainly used for direct I/O,
// though it can be used other purposes as well.
// It also supports expanding the managed buffer, and copying whole or part of
// the data from old buffer into the new expanded buffer. Such a copy especially
// helps in cases avoiding an IO to re-fetch the data from disk.
//
// Example:
//   AlignedBuffer buf;
//   buf.Alignment(alignment);
//   buf.AllocateNewBuffer(user_requested_buf_size);
//   ...
//   buf.AllocateNewBuffer(2*user_requested_buf_size, /*copy_data*/ true,
//                         copy_offset, copy_len);
class AlignedBuffer {
  size_t alignment_;
  std::unique_ptr<char[]> buf_;
  size_t capacity_;
  size_t cursize_;
  char* bufstart_;

public:
  AlignedBuffer()
    : alignment_(),
      capacity_(0),
      cursize_(0),
      bufstart_(nullptr) {
  }

  AlignedBuffer(AlignedBuffer&& o) ROCKSDB_NOEXCEPT {
    *this = std::move(o);
  }

  AlignedBuffer& operator=(AlignedBuffer&& o) ROCKSDB_NOEXCEPT {
    alignment_ = std::move(o.alignment_);
    buf_ = std::move(o.buf_);
    capacity_ = std::move(o.capacity_);
    cursize_ = std::move(o.cursize_);
    bufstart_ = std::move(o.bufstart_);
    return *this;
  }

  AlignedBuffer(const AlignedBuffer&) = delete;

  AlignedBuffer& operator=(const AlignedBuffer&) = delete;

  static bool isAligned(const void* ptr, size_t alignment) {
    return reinterpret_cast<uintptr_t>(ptr) % alignment == 0;
  }

  static bool isAligned(size_t n, size_t alignment) {
    return n % alignment == 0;
  }

  size_t Alignment() const {
    return alignment_;
  }

  size_t Capacity() const {
    return capacity_;
  }

  size_t CurrentSize() const {
    return cursize_;
  }

  const char* BufferStart() const {
    return bufstart_;
  }

  char* BufferStart() { return bufstart_; }

  void Clear() {
    cursize_ = 0;
  }

  void Alignment(size_t alignment) {
    assert(alignment > 0);
    assert((alignment & (alignment - 1)) == 0);
    alignment_ = alignment;
  }

  // Allocates a new buffer and sets the start position to the first aligned
  // byte.
  //
  // requested_capacity: requested new buffer capacity. This capacity will be
  //     rounded up based on alignment.
  // copy_data: Copy data from old buffer to new buffer. If copy_offset and
  //     copy_len are not passed in and the new requested capacity is bigger
  //     than the existing buffer's capacity, the data in the exising buffer is
  //     fully copied over to the new buffer.
  // copy_offset: Copy data from this offset in old buffer.
  // copy_len: Number of bytes to copy.
  //
  // The function does nothing if the new requested_capacity is smaller than
  // the current buffer capacity and copy_data is true i.e. the old buffer is
  // retained as is.
  void AllocateNewBuffer(size_t requested_capacity, bool copy_data = false,
                         uint64_t copy_offset = 0, size_t copy_len = 0) {
    assert(alignment_ > 0);
    assert((alignment_ & (alignment_ - 1)) == 0);

    copy_len = copy_len > 0 ? copy_len : cursize_;
    if (copy_data && requested_capacity < copy_len) {
      // If we are downsizing to a capacity that is smaller than the current
      // data in the buffer -- Ignore the request.
      return;
    }

    size_t new_capacity = Roundup(requested_capacity, alignment_);
    char* new_buf = new char[new_capacity + alignment_];
    char* new_bufstart = reinterpret_cast<char*>(
        (reinterpret_cast<uintptr_t>(new_buf) + (alignment_ - 1)) &
        ~static_cast<uintptr_t>(alignment_ - 1));

    if (copy_data) {
      assert(bufstart_ + copy_offset + copy_len <= bufstart_ + cursize_);
      memcpy(new_bufstart, bufstart_ + copy_offset, copy_len);
      cursize_ = copy_len;
    } else {
      cursize_ = 0;
    }

    bufstart_ = new_bufstart;
    capacity_ = new_capacity;
    buf_.reset(new_buf);
  }

  // Append to the buffer.
  //
  // src         : source to copy the data from.
  // append_size : number of bytes to copy from src.
  // Returns the number of bytes appended.
  //
  // If append_size is more than the remaining buffer size only the
  // remaining-size worth of bytes are copied.
  size_t Append(const char* src, size_t append_size) {
    size_t buffer_remaining = capacity_ - cursize_;
    size_t to_copy = std::min(append_size, buffer_remaining);

    if (to_copy > 0) {
      memcpy(bufstart_ + cursize_, src, to_copy);
      cursize_ += to_copy;
    }
    return to_copy;
  }

  // Read from the buffer.
  //
  // dest      : destination buffer to copy the data to.
  // offset    : the buffer offset to start reading from.
  // read_size : the number of bytes to copy from the buffer to dest.
  // Returns the number of bytes read/copied to dest.
  size_t Read(char* dest, size_t offset, size_t read_size) const {
    assert(offset < cursize_);

    size_t to_read = 0;
    if(offset < cursize_) {
      to_read = std::min(cursize_ - offset, read_size);
    }
    if (to_read > 0) {
      memcpy(dest, bufstart_ + offset, to_read);
    }
    return to_read;
  }

  // Pad to the end of alignment with "padding"
  void PadToAlignmentWith(int padding) {
    size_t total_size = Roundup(cursize_, alignment_);
    size_t pad_size = total_size - cursize_;

    if (pad_size > 0) {
      assert((pad_size + cursize_) <= capacity_);
      memset(bufstart_ + cursize_, padding, pad_size);
      cursize_ += pad_size;
    }
  }

  void PadWith(size_t pad_size, int padding) {
    assert((pad_size + cursize_) <= capacity_);
    memset(bufstart_ + cursize_, padding, pad_size);
    cursize_ += pad_size;
  }

  // After a partial flush move the tail to the beginning of the buffer.
  void RefitTail(size_t tail_offset, size_t tail_size) {
    if (tail_size > 0) {
      memmove(bufstart_, bufstart_ + tail_offset, tail_size);
    }
    cursize_ = tail_size;
  }

  // Returns a place to start appending.
  // WARNING: Note that it is possible to write past the end of the buffer if
  // the buffer is modified without using the write APIs or encapsulation
  // offered by AlignedBuffer. It is up to the user to guard against such
  // errors.
  char* Destination() {
    return bufstart_ + cursize_;
  }

  void Size(size_t cursize) {
    cursize_ = cursize;
  }
};
}  // namespace ROCKSDB_NAMESPACE