1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
|
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "SharedBufferMLGPU.h"
#include "BufferCache.h"
#include "MLGDevice.h"
namespace mozilla {
namespace layers {
SharedBufferMLGPU::SharedBufferMLGPU(MLGDevice* aDevice, MLGBufferType aType,
size_t aDefaultSize)
: mDevice(aDevice),
mType(aType),
mDefaultSize(aDefaultSize),
mCanUseOffsetAllocation(true),
mCurrentPosition(0),
mMaxSize(0),
mMap(),
mMapped(false),
mBytesUsedThisFrame(0),
mNumSmallFrames(0) {
MOZ_COUNT_CTOR(SharedBufferMLGPU);
}
SharedBufferMLGPU::~SharedBufferMLGPU() {
MOZ_COUNT_DTOR(SharedBufferMLGPU);
Unmap();
}
bool SharedBufferMLGPU::Init() {
// If we can't use buffer offset binding, we never allocated shared buffers.
if (!mCanUseOffsetAllocation) {
return true;
}
// If we can use offset binding, allocate an initial shared buffer now.
if (!GrowBuffer(mDefaultSize)) {
return false;
}
return true;
}
void SharedBufferMLGPU::Reset() {
// We shouldn't be mapped here, but just in case, unmap now.
Unmap();
mBytesUsedThisFrame = 0;
// If we allocated a large buffer for a particularly heavy layer tree,
// but have not used most of the buffer again for many frames, we
// discard the buffer. This is to prevent having to perform large
// pointless uploads after visiting a single havy page - it also
// lessens ping-ponging between large and small buffers.
if (mBuffer && (mBuffer->GetSize() > mDefaultSize * 4) &&
mNumSmallFrames >= 10) {
mBuffer = nullptr;
}
// Note that we do not aggressively map a new buffer. There's no reason to,
// and it'd cause unnecessary uploads when painting empty frames.
}
bool SharedBufferMLGPU::EnsureMappedBuffer(size_t aBytes) {
if (!mBuffer || (mMaxSize - mCurrentPosition < aBytes)) {
if (!GrowBuffer(aBytes)) {
return false;
}
}
if (!mMapped && !Map()) {
return false;
}
return true;
}
// We don't want to cache large buffers, since it results in larger uploads
// that might not be needed.
static const size_t kMaxCachedBufferSize = 128 * 1024;
bool SharedBufferMLGPU::GrowBuffer(size_t aBytes) {
// We only pre-allocate buffers if we can use offset allocation.
MOZ_ASSERT(mCanUseOffsetAllocation);
// Unmap the previous buffer. This will retain mBuffer, but free up the
// address space used by its mapping.
Unmap();
size_t maybeSize = mDefaultSize;
if (mBuffer) {
// Try to first grow the previous allocation size.
maybeSize = std::min(kMaxCachedBufferSize, mBuffer->GetSize() * 2);
}
size_t bytes = std::max(aBytes, maybeSize);
mBuffer = mDevice->CreateBuffer(mType, bytes, MLGUsage::Dynamic);
if (!mBuffer) {
return false;
}
mCurrentPosition = 0;
mMaxSize = mBuffer->GetSize();
return true;
}
void SharedBufferMLGPU::PrepareForUsage() {
Unmap();
if (mBytesUsedThisFrame <= mDefaultSize) {
mNumSmallFrames++;
} else {
mNumSmallFrames = 0;
}
}
bool SharedBufferMLGPU::Map() {
MOZ_ASSERT(mBuffer);
MOZ_ASSERT(!mMapped);
if (!mDevice->Map(mBuffer, MLGMapType::WRITE_DISCARD, &mMap)) {
// Don't retain the buffer, it's useless if we can't map it.
mBuffer = nullptr;
return false;
}
mCurrentPosition = 0;
mMapped = true;
return true;
}
void SharedBufferMLGPU::Unmap() {
if (!mMapped) {
return;
}
mBytesUsedThisFrame += mCurrentPosition;
mDevice->Unmap(mBuffer);
mMap = MLGMappedResource();
mMapped = false;
}
uint8_t* SharedBufferMLGPU::GetBufferPointer(size_t aBytes,
ptrdiff_t* aOutOffset,
RefPtr<MLGBuffer>* aOutBuffer) {
if (!EnsureMappedBuffer(aBytes)) {
return nullptr;
}
ptrdiff_t newPos = mCurrentPosition + aBytes;
MOZ_ASSERT(size_t(newPos) <= mMaxSize);
*aOutOffset = mCurrentPosition;
*aOutBuffer = mBuffer;
uint8_t* ptr = reinterpret_cast<uint8_t*>(mMap.mData) + mCurrentPosition;
mCurrentPosition = newPos;
return ptr;
}
VertexBufferSection::VertexBufferSection()
: mOffset(-1), mNumVertices(0), mStride(0) {}
void VertexBufferSection::Init(MLGBuffer* aBuffer, ptrdiff_t aOffset,
size_t aNumVertices, size_t aStride) {
mBuffer = aBuffer;
mOffset = aOffset;
mNumVertices = aNumVertices;
mStride = aStride;
}
ConstantBufferSection::ConstantBufferSection()
: mOffset(-1), mNumBytes(0), mNumItems(0) {}
void ConstantBufferSection::Init(MLGBuffer* aBuffer, ptrdiff_t aOffset,
size_t aBytes, size_t aNumItems) {
mBuffer = aBuffer;
mOffset = aOffset;
mNumBytes = aBytes;
mNumItems = aNumItems;
}
SharedVertexBuffer::SharedVertexBuffer(MLGDevice* aDevice, size_t aDefaultSize)
: SharedBufferMLGPU(aDevice, MLGBufferType::Vertex, aDefaultSize) {}
bool SharedVertexBuffer::Allocate(VertexBufferSection* aHolder,
size_t aNumItems, size_t aSizeOfItem,
const void* aData) {
RefPtr<MLGBuffer> buffer;
ptrdiff_t offset;
size_t bytes = aSizeOfItem * aNumItems;
uint8_t* ptr = GetBufferPointer(bytes, &offset, &buffer);
if (!ptr) {
return false;
}
memcpy(ptr, aData, bytes);
aHolder->Init(buffer, offset, aNumItems, aSizeOfItem);
return true;
}
AutoBufferUploadBase::AutoBufferUploadBase() : mPtr(nullptr) {}
AutoBufferUploadBase::~AutoBufferUploadBase() {
if (mBuffer) {
UnmapBuffer();
}
}
void AutoBufferUploadBase::Init(void* aPtr, MLGDevice* aDevice,
MLGBuffer* aBuffer) {
MOZ_ASSERT(!mPtr && aPtr);
mPtr = aPtr;
mDevice = aDevice;
mBuffer = aBuffer;
}
SharedConstantBuffer::SharedConstantBuffer(MLGDevice* aDevice,
size_t aDefaultSize)
: SharedBufferMLGPU(aDevice, MLGBufferType::Constant, aDefaultSize) {
mMaxConstantBufferBindSize = aDevice->GetMaxConstantBufferBindSize();
mCanUseOffsetAllocation = aDevice->CanUseConstantBufferOffsetBinding();
}
bool SharedConstantBuffer::Allocate(ConstantBufferSection* aHolder,
AutoBufferUploadBase* aPtr,
size_t aNumItems, size_t aSizeOfItem) {
MOZ_ASSERT(aSizeOfItem % 16 == 0, "Items must be padded to 16 bytes");
size_t bytes = aNumItems * aSizeOfItem;
if (bytes > mMaxConstantBufferBindSize) {
gfxWarning()
<< "Attempted to allocate too many bytes into a constant buffer";
return false;
}
RefPtr<MLGBuffer> buffer;
ptrdiff_t offset;
if (!GetBufferPointer(aPtr, bytes, &offset, &buffer)) {
return false;
}
aHolder->Init(buffer, offset, bytes, aNumItems);
return true;
}
uint8_t* SharedConstantBuffer::AllocateNewBuffer(
size_t aBytes, ptrdiff_t* aOutOffset, RefPtr<MLGBuffer>* aOutBuffer) {
RefPtr<MLGBuffer> buffer;
if (BufferCache* cache = mDevice->GetConstantBufferCache()) {
buffer = cache->GetOrCreateBuffer(aBytes);
} else {
buffer = mDevice->CreateBuffer(MLGBufferType::Constant, aBytes,
MLGUsage::Dynamic);
}
if (!buffer) {
return nullptr;
}
MLGMappedResource map;
if (!mDevice->Map(buffer, MLGMapType::WRITE_DISCARD, &map)) {
return nullptr;
}
// Signal that offsetting is not supported.
*aOutOffset = -1;
*aOutBuffer = buffer;
return reinterpret_cast<uint8_t*>(map.mData);
}
void AutoBufferUploadBase::UnmapBuffer() { mDevice->Unmap(mBuffer); }
} // namespace layers
} // namespace mozilla
|