1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 20127 Red Hat, Inc.
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_BUFFER_RAW_H
#define CEPH_BUFFER_RAW_H
#include <atomic>
#include <map>
#include <utility>
#include <type_traits>
#include "include/buffer.h"
#include "include/mempool.h"
#include "include/spinlock.h"
namespace ceph::buffer {
inline namespace v14_2_0 {
class raw {
public:
// In the future we might want to have a slab allocator here with few
// embedded slots. This would allow to avoid the "if" in dtor of ptr_node.
std::aligned_storage<sizeof(ptr_node),
alignof(ptr_node)>::type bptr_storage;
char *data;
unsigned len;
std::atomic<unsigned> nref { 0 };
int mempool;
std::pair<size_t, size_t> last_crc_offset {std::numeric_limits<size_t>::max(), std::numeric_limits<size_t>::max()};
std::pair<uint32_t, uint32_t> last_crc_val;
mutable ceph::spinlock crc_spinlock;
explicit raw(unsigned l, int mempool=mempool::mempool_buffer_anon)
: data(nullptr), len(l), nref(0), mempool(mempool) {
mempool::get_pool(mempool::pool_index_t(mempool)).adjust_count(1, len);
}
raw(char *c, unsigned l, int mempool=mempool::mempool_buffer_anon)
: data(c), len(l), nref(0), mempool(mempool) {
mempool::get_pool(mempool::pool_index_t(mempool)).adjust_count(1, len);
}
virtual ~raw() {
mempool::get_pool(mempool::pool_index_t(mempool)).adjust_count(
-1, -(int)len);
}
void _set_len(unsigned l) {
mempool::get_pool(mempool::pool_index_t(mempool)).adjust_count(
-1, -(int)len);
len = l;
mempool::get_pool(mempool::pool_index_t(mempool)).adjust_count(1, len);
}
void reassign_to_mempool(int pool) {
if (pool == mempool) {
return;
}
mempool::get_pool(mempool::pool_index_t(mempool)).adjust_count(
-1, -(int)len);
mempool = pool;
mempool::get_pool(mempool::pool_index_t(pool)).adjust_count(1, len);
}
void try_assign_to_mempool(int pool) {
if (mempool == mempool::mempool_buffer_anon) {
reassign_to_mempool(pool);
}
}
private:
// no copying.
// cppcheck-suppress noExplicitConstructor
raw(const raw &other) = delete;
const raw& operator=(const raw &other) = delete;
public:
char *get_data() {
return data;
}
virtual raw* clone_empty() = 0;
ceph::unique_leakable_ptr<raw> clone() {
raw* const c = clone_empty();
memcpy(c->data, data, len);
return ceph::unique_leakable_ptr<raw>(c);
}
virtual bool is_shareable() const {
// true if safe to reference/share the existing buffer copy
// false if it is not safe to share the buffer, e.g., due to special
// and/or registered memory that is scarce
return true;
}
bool get_crc(const std::pair<size_t, size_t> &fromto,
std::pair<uint32_t, uint32_t> *crc) const {
std::lock_guard lg(crc_spinlock);
if (last_crc_offset == fromto) {
*crc = last_crc_val;
return true;
}
return false;
}
void set_crc(const std::pair<size_t, size_t> &fromto,
const std::pair<uint32_t, uint32_t> &crc) {
std::lock_guard lg(crc_spinlock);
last_crc_offset = fromto;
last_crc_val = crc;
}
void invalidate_crc() {
std::lock_guard lg(crc_spinlock);
last_crc_offset.first = std::numeric_limits<size_t>::max();
last_crc_offset.second = std::numeric_limits<size_t>::max();
}
};
} // inline namespace v14_2_0
} // namespace ceph::buffer
#endif // CEPH_BUFFER_RAW_H
|