1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
|
//
// Copyright 2019 The ANGLE Project Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
// PoolAlloc.h:
// Defines the class interface for PoolAllocator and the Allocation
// class that it uses internally.
//
#ifndef COMMON_POOLALLOC_H_
#define COMMON_POOLALLOC_H_
#if !defined(NDEBUG)
# define ANGLE_POOL_ALLOC_GUARD_BLOCKS // define to enable guard block checking
#endif
//
// This header defines an allocator that can be used to efficiently
// allocate a large number of small requests for heap memory, with the
// intention that they are not individually deallocated, but rather
// collectively deallocated at one time.
//
// This simultaneously
//
// * Makes each individual allocation much more efficient; the
// typical allocation is trivial.
// * Completely avoids the cost of doing individual deallocation.
// * Saves the trouble of tracking down and plugging a large class of leaks.
//
// Individual classes can use this allocator by supplying their own
// new and delete methods.
//
#include <stddef.h>
#include <string.h>
#include <memory>
#include <vector>
#include "angleutils.h"
#include "common/debug.h"
namespace angle
{
// If we are using guard blocks, we must track each individual
// allocation. If we aren't using guard blocks, these
// never get instantiated, so won't have any impact.
//
class Allocation
{
public:
Allocation(size_t size, unsigned char *mem, Allocation *prev = 0)
: mSize(size), mMem(mem), mPrevAlloc(prev)
{
// Allocations are bracketed:
// [allocationHeader][initialGuardBlock][userData][finalGuardBlock]
// This would be cleaner with if (kGuardBlockSize)..., but that
// makes the compiler print warnings about 0 length memsets,
// even with the if() protecting them.
#if defined(ANGLE_POOL_ALLOC_GUARD_BLOCKS)
memset(preGuard(), kGuardBlockBeginVal, kGuardBlockSize);
memset(data(), kUserDataFill, mSize);
memset(postGuard(), kGuardBlockEndVal, kGuardBlockSize);
#endif
}
void check() const
{
checkGuardBlock(preGuard(), kGuardBlockBeginVal, "before");
checkGuardBlock(postGuard(), kGuardBlockEndVal, "after");
}
void checkAllocList() const;
// Return total size needed to accommodate user buffer of 'size',
// plus our tracking data.
static size_t AllocationSize(size_t size) { return size + 2 * kGuardBlockSize + HeaderSize(); }
// Offset from surrounding buffer to get to user data buffer.
static unsigned char *OffsetAllocation(unsigned char *m)
{
return m + kGuardBlockSize + HeaderSize();
}
private:
void checkGuardBlock(unsigned char *blockMem, unsigned char val, const char *locText) const;
// Find offsets to pre and post guard blocks, and user data buffer
unsigned char *preGuard() const { return mMem + HeaderSize(); }
unsigned char *data() const { return preGuard() + kGuardBlockSize; }
unsigned char *postGuard() const { return data() + mSize; }
size_t mSize; // size of the user data area
unsigned char *mMem; // beginning of our allocation (pts to header)
Allocation *mPrevAlloc; // prior allocation in the chain
static constexpr unsigned char kGuardBlockBeginVal = 0xfb;
static constexpr unsigned char kGuardBlockEndVal = 0xfe;
static constexpr unsigned char kUserDataFill = 0xcd;
#if defined(ANGLE_POOL_ALLOC_GUARD_BLOCKS)
static constexpr size_t kGuardBlockSize = 16;
static constexpr size_t HeaderSize() { return sizeof(Allocation); }
#else
static constexpr size_t kGuardBlockSize = 0;
static constexpr size_t HeaderSize() { return 0; }
#endif
};
//
// There are several stacks. One is to track the pushing and popping
// of the user, and not yet implemented. The others are simply a
// repositories of free pages or used pages.
//
// Page stacks are linked together with a simple header at the beginning
// of each allocation obtained from the underlying OS. Multi-page allocations
// are returned to the OS. Individual page allocations are kept for future
// re-use.
//
// The "page size" used is not, nor must it match, the underlying OS
// page size. But, having it be about that size or equal to a set of
// pages is likely most optimal.
//
class PoolAllocator : angle::NonCopyable
{
public:
static const int kDefaultAlignment = 16;
//
// Create PoolAllocator. If alignment is set to 1 byte then fastAllocate()
// function can be used to make allocations with less overhead.
//
PoolAllocator(int growthIncrement = 8 * 1024, int allocationAlignment = kDefaultAlignment);
//
// Don't call the destructor just to free up the memory, call pop()
//
~PoolAllocator();
//
// Initialize page size and alignment after construction
//
void initialize(int pageSize, int alignment);
//
// Call push() to establish a new place to pop memory to. Does not
// have to be called to get things started.
//
void push();
//
// Call pop() to free all memory allocated since the last call to push(),
// or if no last call to push, frees all memory since first allocation.
//
void pop();
//
// Call popAll() to free all memory allocated.
//
void popAll();
//
// Call allocate() to actually acquire memory. Returns 0 if no memory
// available, otherwise a properly aligned pointer to 'numBytes' of memory.
//
void *allocate(size_t numBytes);
//
// Call fastAllocate() for a faster allocate function that does minimal bookkeeping
// preCondition: Allocator must have been created w/ alignment of 1
ANGLE_INLINE uint8_t *fastAllocate(size_t numBytes)
{
#if defined(ANGLE_DISABLE_POOL_ALLOC)
return reinterpret_cast<uint8_t *>(allocate(numBytes));
#else
ASSERT(mAlignment == 1);
// No multi-page allocations
ASSERT(numBytes <= (mPageSize - mHeaderSkip));
//
// Do the allocation, most likely case inline first, for efficiency.
//
if (numBytes <= mPageSize - mCurrentPageOffset)
{
//
// Safe to allocate from mCurrentPageOffset.
//
uint8_t *memory = reinterpret_cast<uint8_t *>(mInUseList) + mCurrentPageOffset;
mCurrentPageOffset += numBytes;
return memory;
}
return reinterpret_cast<uint8_t *>(allocateNewPage(numBytes, numBytes));
#endif
}
//
// There is no deallocate. The point of this class is that
// deallocation can be skipped by the user of it, as the model
// of use is to simultaneously deallocate everything at once
// by calling pop(), and to not have to solve memory leak problems.
//
// Catch unwanted allocations.
// TODO(jmadill): Remove this when we remove the global allocator.
void lock();
void unlock();
private:
size_t mAlignment; // all returned allocations will be aligned at
// this granularity, which will be a power of 2
size_t mAlignmentMask;
#if !defined(ANGLE_DISABLE_POOL_ALLOC)
friend struct Header;
struct Header
{
Header(Header *nextPage, size_t pageCount)
: nextPage(nextPage),
pageCount(pageCount)
# if defined(ANGLE_POOL_ALLOC_GUARD_BLOCKS)
,
lastAllocation(0)
# endif
{}
~Header()
{
# if defined(ANGLE_POOL_ALLOC_GUARD_BLOCKS)
if (lastAllocation)
lastAllocation->checkAllocList();
# endif
}
Header *nextPage;
size_t pageCount;
# if defined(ANGLE_POOL_ALLOC_GUARD_BLOCKS)
Allocation *lastAllocation;
# endif
};
struct AllocState
{
size_t offset;
Header *page;
};
using AllocStack = std::vector<AllocState>;
// Slow path of allocation when we have to get a new page.
void *allocateNewPage(size_t numBytes, size_t allocationSize);
// Track allocations if and only if we're using guard blocks
void *initializeAllocation(Header *block, unsigned char *memory, size_t numBytes)
{
# if defined(ANGLE_POOL_ALLOC_GUARD_BLOCKS)
new (memory) Allocation(numBytes + mAlignment, memory, block->lastAllocation);
block->lastAllocation = reinterpret_cast<Allocation *>(memory);
# endif
// The OffsetAllocation() call is optimized away if !defined(ANGLE_POOL_ALLOC_GUARD_BLOCKS)
void *unalignedPtr = Allocation::OffsetAllocation(memory);
size_t alignedBytes = numBytes + mAlignment;
return std::align(mAlignment, numBytes, unalignedPtr, alignedBytes);
}
size_t mPageSize; // granularity of allocation from the OS
size_t mHeaderSkip; // amount of memory to skip to make room for the
// header (basically, size of header, rounded
// up to make it aligned
size_t mCurrentPageOffset; // next offset in top of inUseList to allocate from
Header *mFreeList; // list of popped memory
Header *mInUseList; // list of all memory currently being used
AllocStack mStack; // stack of where to allocate from, to partition pool
int mNumCalls; // just an interesting statistic
size_t mTotalBytes; // just an interesting statistic
#else // !defined(ANGLE_DISABLE_POOL_ALLOC)
std::vector<std::vector<void *>> mStack;
#endif
bool mLocked;
};
} // namespace angle
#endif // COMMON_POOLALLOC_H_
|