1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
|
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef mozilla_ArenaAllocator_h
#define mozilla_ArenaAllocator_h
#include <algorithm>
#include <cstdint>
#include "mozilla/Assertions.h"
#include "mozilla/fallible.h"
#include "mozilla/Likely.h"
#include "mozilla/MemoryChecking.h"
#include "mozilla/MemoryReporting.h"
#include "mozilla/OperatorNewExtensions.h"
#include "mozilla/Poison.h"
#include "mozilla/TemplateLib.h"
#include "nsDebug.h"
namespace mozilla {
/**
* A very simple arena allocator based on NSPR's PLArena.
*
* The arena allocator only provides for allocation, all memory is retained
* until the allocator is destroyed. It's useful for situations where a large
* amount of small transient allocations are expected.
*
* Example usage:
*
* // Define an allocator that is page sized and returns allocations that are
* // 8-byte aligned.
* ArenaAllocator<4096, 8> a;
* for (int i = 0; i < 1000; i++) {
* DoSomething(a.Allocate(i));
* }
*/
template <size_t ArenaSize, size_t Alignment = 1>
class ArenaAllocator {
public:
constexpr ArenaAllocator() : mHead(), mCurrent(nullptr) {
static_assert(mozilla::tl::FloorLog2<Alignment>::value ==
mozilla::tl::CeilingLog2<Alignment>::value,
"ArenaAllocator alignment must be a power of two");
}
ArenaAllocator(const ArenaAllocator&) = delete;
ArenaAllocator& operator=(const ArenaAllocator&) = delete;
/**
* Frees all internal arenas but does not call destructors for objects
* allocated out of the arena.
*/
~ArenaAllocator() { Clear(); }
/**
* Fallibly allocates a chunk of memory with the given size from the internal
* arenas. If the allocation size is larger than the chosen arena a size an
* entire arena is allocated and used.
*/
MOZ_ALWAYS_INLINE void* Allocate(size_t aSize, const fallible_t&) {
MOZ_RELEASE_ASSERT(aSize, "Allocation size must be non-zero");
return InternalAllocate(AlignedSize(aSize));
}
void* Allocate(size_t aSize) {
void* p = Allocate(aSize, fallible);
if (MOZ_UNLIKELY(!p)) {
NS_ABORT_OOM(std::max(aSize, ArenaSize));
}
return p;
}
/**
* Frees all entries. The allocator can be reused after this is called.
*
* NB: This will not run destructors of any objects that were allocated from
* the arena.
*/
void Clear() {
// Free all chunks.
auto a = mHead.next;
while (a) {
auto tmp = a;
a = a->next;
free(tmp);
}
// Reset the list head.
mHead.next = nullptr;
mCurrent = nullptr;
}
/**
* Adjusts the given size to the required alignment.
*/
static constexpr size_t AlignedSize(size_t aSize) {
return (aSize + (Alignment - 1)) & ~(Alignment - 1);
}
size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const {
size_t s = 0;
for (auto arena = mHead.next; arena; arena = arena->next) {
s += aMallocSizeOf(arena);
}
return s;
}
void Check() {
if (mCurrent) {
mCurrent->canary.Check();
}
}
private:
struct ArenaHeader {
/**
* The location in memory of the data portion of the arena.
*/
uintptr_t offset;
/**
* The location in memory of the end of the data portion of the arena.
*/
uintptr_t tail;
};
struct ArenaChunk {
constexpr ArenaChunk() : header{0, 0}, next(nullptr) {}
explicit ArenaChunk(size_t aSize)
: header{AlignedSize(uintptr_t(this + 1)), uintptr_t(this) + aSize},
next(nullptr) {}
CorruptionCanary canary;
ArenaHeader header;
ArenaChunk* next;
/**
* Allocates a chunk of memory out of the arena and advances the offset.
*/
void* Allocate(size_t aSize) {
MOZ_ASSERT(aSize <= Available());
char* p = reinterpret_cast<char*>(header.offset);
MOZ_RELEASE_ASSERT(p);
header.offset += aSize;
canary.Check();
MOZ_MAKE_MEM_UNDEFINED(p, aSize);
return p;
}
/**
* Calculates the amount of space available for allocation in this chunk.
*/
size_t Available() const { return header.tail - header.offset; }
};
/**
* Allocates an arena chunk of the given size and initializes its header.
*/
ArenaChunk* AllocateChunk(size_t aSize) {
static const size_t kOffset = AlignedSize(sizeof(ArenaChunk));
MOZ_ASSERT(kOffset < aSize);
const size_t chunkSize = aSize + kOffset;
void* p = malloc(chunkSize);
if (!p) {
return nullptr;
}
ArenaChunk* arena = new (KnownNotNull, p) ArenaChunk(chunkSize);
MOZ_MAKE_MEM_NOACCESS((void*)arena->header.offset,
arena->header.tail - arena->header.offset);
// Insert into the head of the list.
arena->next = mHead.next;
mHead.next = arena;
// Only update |mCurrent| if this is a standard allocation, large
// allocations will always end up full so there's no point in updating
// |mCurrent| in that case.
if (aSize == ArenaSize - kOffset) {
mCurrent = arena;
}
return arena;
}
MOZ_ALWAYS_INLINE void* InternalAllocate(size_t aSize) {
static_assert(ArenaSize > AlignedSize(sizeof(ArenaChunk)),
"Arena size must be greater than the header size");
static const size_t kMaxArenaCapacity =
ArenaSize - AlignedSize(sizeof(ArenaChunk));
if (mCurrent && aSize <= mCurrent->Available()) {
return mCurrent->Allocate(aSize);
}
ArenaChunk* arena = AllocateChunk(std::max(kMaxArenaCapacity, aSize));
return arena ? arena->Allocate(aSize) : nullptr;
}
ArenaChunk mHead;
ArenaChunk* mCurrent;
};
} // namespace mozilla
#endif // mozilla_ArenaAllocator_h
|