1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2016 Allen Samuels <allen.samuels@sandisk.com>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "include/mempool.h"
#include "include/demangle.h"
// Thread local variables should save index, not &shard[index],
// because shard[] is defined in the class
static thread_local size_t thread_shard_index = mempool::num_shards;
// default to debug_mode off
bool mempool::debug_mode = false;
// --------------------------------------------------------------
mempool::pool_t& mempool::get_pool(mempool::pool_index_t ix)
{
// We rely on this array being initialized before any invocation of
// this function, even if it is called by ctors in other compilation
// units that are being initialized before this compilation unit.
static mempool::pool_t table[num_pools];
return table[ix];
}
const char *mempool::get_pool_name(mempool::pool_index_t ix) {
#define P(x) #x,
static const char *names[num_pools] = {
DEFINE_MEMORY_POOLS_HELPER(P)
};
#undef P
return names[ix];
}
void mempool::dump(ceph::Formatter *f)
{
stats_t total;
f->open_object_section("mempool"); // we need (dummy?) topmost section for
// JSON Formatter to print pool names. It omits them otherwise.
f->open_object_section("by_pool");
for (size_t i = 0; i < num_pools; ++i) {
const pool_t &pool = mempool::get_pool((pool_index_t)i);
f->open_object_section(get_pool_name((pool_index_t)i));
pool.dump(f, &total);
f->close_section();
}
f->close_section();
f->dump_object("total", total);
f->close_section();
}
void mempool::set_debug_mode(bool d)
{
debug_mode = d;
}
// --------------------------------------------------------------
// pool_t
size_t mempool::pool_t::allocated_bytes() const
{
ssize_t result = 0;
for (size_t i = 0; i < num_shards; ++i) {
result += shard[i].bytes;
}
if (result < 0) {
// we raced with some unbalanced allocations/deallocations
result = 0;
}
return (size_t) result;
}
size_t mempool::pool_t::allocated_items() const
{
ssize_t result = 0;
for (size_t i = 0; i < num_shards; ++i) {
result += shard[i].items;
}
if (result < 0) {
// we raced with some unbalanced allocations/deallocations
result = 0;
}
return (size_t) result;
}
void mempool::pool_t::adjust_count(ssize_t items, ssize_t bytes)
{
thread_shard_index = (thread_shard_index == num_shards) ? pick_a_shard_int() : thread_shard_index;
shard[thread_shard_index].items += items;
shard[thread_shard_index].bytes += bytes;
}
void mempool::pool_t::get_stats(
stats_t *total,
std::map<std::string, stats_t> *by_type) const
{
for (size_t i = 0; i < num_shards; ++i) {
total->items += shard[i].items;
total->bytes += shard[i].bytes;
}
if (debug_mode) {
std::lock_guard shard_lock(lock);
for (auto &p : type_map) {
std::string n = ceph_demangle(p.second.type_name);
stats_t &s = (*by_type)[n];
s.bytes = p.second.items * p.second.item_size;
s.items = p.second.items;
}
}
}
void mempool::pool_t::dump(ceph::Formatter *f, stats_t *ptotal) const
{
stats_t total;
std::map<std::string, stats_t> by_type;
get_stats(&total, &by_type);
if (ptotal) {
*ptotal += total;
}
total.dump(f);
if (!by_type.empty()) {
f->open_object_section("by_type");
for (auto &i : by_type) {
f->open_object_section(i.first.c_str());
i.second.dump(f);
f->close_section();
}
f->close_section();
}
}
|