1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
|
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef BASEPROFILEJSONWRITER_H
#define BASEPROFILEJSONWRITER_H
#include "mozilla/FailureLatch.h"
#include "mozilla/HashFunctions.h"
#include "mozilla/HashTable.h"
#include "mozilla/JSONWriter.h"
#include "mozilla/Maybe.h"
#include "mozilla/NotNull.h"
#include "mozilla/ProgressLogger.h"
#include "mozilla/TimeStamp.h"
#include "mozilla/UniquePtrExtensions.h"
#include <functional>
#include <ostream>
#include <string_view>
namespace mozilla {
namespace baseprofiler {
class SpliceableJSONWriter;
// On average, profile JSONs are large enough such that we want to avoid
// reallocating its buffer when expanding. Additionally, the contents of the
// profile are not accessed until the profile is entirely written. For these
// reasons we use a chunked writer that keeps an array of chunks, which is
// concatenated together after writing is finished.
class ChunkedJSONWriteFunc final : public JSONWriteFunc, public FailureLatch {
public:
friend class SpliceableJSONWriter;
explicit ChunkedJSONWriteFunc(FailureLatch& aFailureLatch)
: mFailureLatch(WrapNotNullUnchecked(&aFailureLatch)) {
(void)AllocChunk(kChunkSize);
}
[[nodiscard]] bool IsEmpty() const {
MOZ_ASSERT_IF(!mChunkPtr, !mChunkEnd && mChunkList.length() == 0 &&
mChunkLengths.length() == 0);
return !mChunkPtr;
}
// Length of data written so far, excluding null terminator.
[[nodiscard]] size_t Length() const {
MOZ_ASSERT(mChunkLengths.length() == mChunkList.length());
size_t totalLen = 0;
for (size_t i = 0; i < mChunkLengths.length(); i++) {
MOZ_ASSERT(strlen(mChunkList[i].get()) == mChunkLengths[i]);
totalLen += mChunkLengths[i];
}
return totalLen;
}
void Write(const Span<const char>& aStr) final {
if (Failed()) {
return;
}
MOZ_ASSERT(mChunkPtr >= mChunkList.back().get() && mChunkPtr <= mChunkEnd);
MOZ_ASSERT(mChunkEnd >= mChunkList.back().get() + mChunkLengths.back());
MOZ_ASSERT(*mChunkPtr == '\0');
// Most strings to be written are small, but subprocess profiles (e.g.,
// from the content process in e10s) may be huge. If the string is larger
// than a chunk, allocate its own chunk.
char* newPtr;
if (aStr.size() >= kChunkSize) {
if (!AllocChunk(aStr.size() + 1)) {
return;
}
newPtr = mChunkPtr + aStr.size();
} else {
newPtr = mChunkPtr + aStr.size();
if (newPtr >= mChunkEnd) {
if (!AllocChunk(kChunkSize)) {
return;
}
newPtr = mChunkPtr + aStr.size();
}
}
memcpy(mChunkPtr, aStr.data(), aStr.size());
*newPtr = '\0';
mChunkPtr = newPtr;
mChunkLengths.back() += aStr.size();
}
[[nodiscard]] bool CopyDataIntoLazilyAllocatedBuffer(
const std::function<char*(size_t)>& aAllocator) const {
// Request a buffer for the full content plus a null terminator.
if (Failed()) {
return false;
}
char* ptr = aAllocator(Length() + 1);
if (!ptr) {
// Failed to allocate memory.
return false;
}
for (size_t i = 0; i < mChunkList.length(); i++) {
size_t len = mChunkLengths[i];
memcpy(ptr, mChunkList[i].get(), len);
ptr += len;
}
*ptr = '\0';
return true;
}
[[nodiscard]] UniquePtr<char[]> CopyData() const {
UniquePtr<char[]> c;
if (!CopyDataIntoLazilyAllocatedBuffer([&](size_t allocationSize) {
c = MakeUnique<char[]>(allocationSize);
return c.get();
})) {
// Something went wrong, make sure the returned pointer is null even if
// the allocation happened.
c = nullptr;
}
return c;
}
void Take(ChunkedJSONWriteFunc&& aOther) {
SetFailureFrom(aOther);
if (Failed()) {
return;
}
for (size_t i = 0; i < aOther.mChunkList.length(); i++) {
MOZ_ALWAYS_TRUE(mChunkLengths.append(aOther.mChunkLengths[i]));
MOZ_ALWAYS_TRUE(mChunkList.append(std::move(aOther.mChunkList[i])));
}
mChunkPtr = mChunkList.back().get() + mChunkLengths.back();
mChunkEnd = mChunkPtr;
aOther.Clear();
}
FAILURELATCH_IMPL_PROXY(*mFailureLatch)
// Change the failure latch to be used here, and if the previous latch was
// already in failure state, set that failure in the new latch.
// This allows using this WriteFunc in isolation, before attempting to bring
// it into another operation group with its own FailureLatch.
void ChangeFailureLatchAndForwardState(FailureLatch& aFailureLatch) {
aFailureLatch.SetFailureFrom(*this);
mFailureLatch = WrapNotNullUnchecked(&aFailureLatch);
}
private:
void Clear() {
mChunkPtr = nullptr;
mChunkEnd = nullptr;
mChunkList.clear();
mChunkLengths.clear();
}
void ClearAndSetFailure(std::string aFailure) {
Clear();
SetFailure(std::move(aFailure));
}
[[nodiscard]] bool ClearAndSetFailureAndFalse(std::string aFailure) {
ClearAndSetFailure(std::move(aFailure));
return false;
}
[[nodiscard]] bool AllocChunk(size_t aChunkSize) {
if (Failed()) {
if (mChunkPtr) {
// FailureLatch is in failed state, but chunks have not been cleared yet
// (error must have happened elsewhere).
Clear();
}
return false;
}
MOZ_ASSERT(mChunkLengths.length() == mChunkList.length());
UniquePtr<char[]> newChunk = MakeUniqueFallible<char[]>(aChunkSize);
if (!newChunk) {
return ClearAndSetFailureAndFalse(
"OOM in ChunkedJSONWriteFunc::AllocChunk allocating new chunk");
}
mChunkPtr = newChunk.get();
mChunkEnd = mChunkPtr + aChunkSize;
*mChunkPtr = '\0';
if (!mChunkLengths.append(0)) {
return ClearAndSetFailureAndFalse(
"OOM in ChunkedJSONWriteFunc::AllocChunk appending length");
}
if (!mChunkList.append(std::move(newChunk))) {
return ClearAndSetFailureAndFalse(
"OOM in ChunkedJSONWriteFunc::AllocChunk appending new chunk");
}
return true;
}
static const size_t kChunkSize = 4096 * 512;
// Pointer for writing inside the current chunk.
//
// The current chunk is always at the back of mChunkList, i.e.,
// mChunkList.back() <= mChunkPtr <= mChunkEnd.
char* mChunkPtr = nullptr;
// Pointer to the end of the current chunk.
//
// The current chunk is always at the back of mChunkList, i.e.,
// mChunkEnd >= mChunkList.back() + mChunkLengths.back().
char* mChunkEnd = nullptr;
// List of chunks and their lengths.
//
// For all i, the length of the string in mChunkList[i] is
// mChunkLengths[i].
Vector<UniquePtr<char[]>> mChunkList;
Vector<size_t> mChunkLengths;
NotNull<FailureLatch*> mFailureLatch;
};
struct OStreamJSONWriteFunc final : public JSONWriteFunc {
explicit OStreamJSONWriteFunc(std::ostream& aStream) : mStream(aStream) {}
void Write(const Span<const char>& aStr) final {
std::string_view sv(aStr.data(), aStr.size());
mStream << sv;
}
std::ostream& mStream;
};
class UniqueJSONStrings;
class SpliceableJSONWriter : public JSONWriter, public FailureLatch {
public:
SpliceableJSONWriter(JSONWriteFunc& aWriter, FailureLatch& aFailureLatch)
: JSONWriter(aWriter, JSONWriter::SingleLineStyle),
mFailureLatch(WrapNotNullUnchecked(&aFailureLatch)) {}
SpliceableJSONWriter(UniquePtr<JSONWriteFunc> aWriter,
FailureLatch& aFailureLatch)
: JSONWriter(std::move(aWriter), JSONWriter::SingleLineStyle),
mFailureLatch(WrapNotNullUnchecked(&aFailureLatch)) {}
void StartBareList() { StartCollection(scEmptyString, scEmptyString); }
void EndBareList() { EndCollection(scEmptyString); }
// Output a time (int64_t given in nanoseconds) in milliseconds. trim zeroes.
// E.g.: 1'234'567'890 -> "1234.56789"
void TimeI64NsProperty(const Span<const char>& aMaybePropertyName,
int64_t aTime_ns) {
if (aTime_ns == 0) {
Scalar(aMaybePropertyName, MakeStringSpan("0"));
return;
}
static constexpr int64_t million = 1'000'000;
const int64_t absNanos = std::abs(aTime_ns);
const int64_t integerMilliseconds = absNanos / million;
auto remainderNanoseconds = static_cast<uint32_t>(absNanos % million);
// Plenty enough to fit INT64_MIN (-9223372036854775808).
static constexpr size_t DIGITS_MAX = 23;
char buf[DIGITS_MAX + 1];
int len =
snprintf(buf, DIGITS_MAX, (aTime_ns >= 0) ? "%" PRIu64 : "-%" PRIu64,
integerMilliseconds);
if (remainderNanoseconds != 0) {
buf[len++] = '.';
// Output up to 6 fractional digits. Exit early if the rest would
// be trailing zeros.
uint32_t powerOfTen = static_cast<uint32_t>(million / 10);
for (;;) {
auto digit = remainderNanoseconds / powerOfTen;
buf[len++] = '0' + static_cast<char>(digit);
remainderNanoseconds %= powerOfTen;
if (remainderNanoseconds == 0) {
break;
}
powerOfTen /= 10;
if (powerOfTen == 0) {
break;
}
}
}
Scalar(aMaybePropertyName, Span<const char>(buf, len));
}
// Output a (double) time in milliseconds, with at best nanosecond precision.
void TimeDoubleMsProperty(const Span<const char>& aMaybePropertyName,
double aTime_ms) {
const double dTime_ns = aTime_ms * 1'000'000.0;
// Make sure it's well within int64_t range.
// 2^63 nanoseconds is almost 300 years; these times are relative to
// firefox startup, this should be enough for most uses.
if (dTime_ns >= 0.0) {
MOZ_RELEASE_ASSERT(dTime_ns < double(INT64_MAX - 1));
} else {
MOZ_RELEASE_ASSERT(dTime_ns > double(INT64_MIN + 2));
}
// Round to nearest integer nanosecond. The conversion to integer truncates
// the fractional part, so first we need to push it 0.5 away from zero.
const int64_t iTime_ns =
(dTime_ns >= 0.0) ? int64_t(dTime_ns + 0.5) : int64_t(dTime_ns - 0.5);
TimeI64NsProperty(aMaybePropertyName, iTime_ns);
}
// Output a (double) time in milliseconds, with at best nanosecond precision.
void TimeDoubleMsElement(double aTime_ms) {
TimeDoubleMsProperty(nullptr, aTime_ms);
}
// This function must be used to correctly stream timestamps in profiles.
// Null timestamps don't output anything.
void TimeProperty(const Span<const char>& aMaybePropertyName,
const TimeStamp& aTime) {
if (!aTime.IsNull()) {
TimeDoubleMsProperty(
aMaybePropertyName,
(aTime - TimeStamp::ProcessCreation()).ToMilliseconds());
}
}
void NullElements(uint32_t aCount) {
for (uint32_t i = 0; i < aCount; i++) {
NullElement();
}
}
void Splice(const Span<const char>& aStr) {
Separator();
WriteFunc().Write(aStr);
mNeedComma[mDepth] = true;
}
void Splice(const char* aStr, size_t aLen) {
Separator();
WriteFunc().Write(Span<const char>(aStr, aLen));
mNeedComma[mDepth] = true;
}
// Splice the given JSON directly in, without quoting.
void SplicedJSONProperty(const Span<const char>& aMaybePropertyName,
const Span<const char>& aJsonValue) {
Scalar(aMaybePropertyName, aJsonValue);
}
void CopyAndSplice(const ChunkedJSONWriteFunc& aFunc) {
Separator();
for (size_t i = 0; i < aFunc.mChunkList.length(); i++) {
WriteFunc().Write(
Span<const char>(aFunc.mChunkList[i].get(), aFunc.mChunkLengths[i]));
}
mNeedComma[mDepth] = true;
}
// Takes the chunks from aFunc and write them. If move is not possible
// (e.g., using OStreamJSONWriteFunc), aFunc's chunks are copied and its
// storage cleared.
virtual void TakeAndSplice(ChunkedJSONWriteFunc&& aFunc) {
Separator();
for (size_t i = 0; i < aFunc.mChunkList.length(); i++) {
WriteFunc().Write(
Span<const char>(aFunc.mChunkList[i].get(), aFunc.mChunkLengths[i]));
}
aFunc.mChunkPtr = nullptr;
aFunc.mChunkEnd = nullptr;
aFunc.mChunkList.clear();
aFunc.mChunkLengths.clear();
mNeedComma[mDepth] = true;
}
// Set (or reset) the pointer to a UniqueJSONStrings.
void SetUniqueStrings(UniqueJSONStrings& aUniqueStrings) {
MOZ_RELEASE_ASSERT(!mUniqueStrings);
mUniqueStrings = &aUniqueStrings;
}
// Set (or reset) the pointer to a UniqueJSONStrings.
void ResetUniqueStrings() {
MOZ_RELEASE_ASSERT(mUniqueStrings);
mUniqueStrings = nullptr;
}
// Add `aStr` to the unique-strings list (if not already there), and write its
// index as a named object property.
inline void UniqueStringProperty(const Span<const char>& aName,
const Span<const char>& aStr);
// Add `aStr` to the unique-strings list (if not already there), and write its
// index as an array element.
inline void UniqueStringElement(const Span<const char>& aStr);
// THe following functions override JSONWriter functions non-virtually. The
// goal is to try and prevent calls that specify a style, which would be
// ignored anyway because the whole thing is single-lined. It's fine if some
// calls still make it through a `JSONWriter&`, no big deal.
void Start() { JSONWriter::Start(); }
void StartArrayProperty(const Span<const char>& aName) {
JSONWriter::StartArrayProperty(aName);
}
template <size_t N>
void StartArrayProperty(const char (&aName)[N]) {
JSONWriter::StartArrayProperty(Span<const char>(aName, N));
}
void StartArrayElement() { JSONWriter::StartArrayElement(); }
void StartObjectProperty(const Span<const char>& aName) {
JSONWriter::StartObjectProperty(aName);
}
template <size_t N>
void StartObjectProperty(const char (&aName)[N]) {
JSONWriter::StartObjectProperty(Span<const char>(aName, N));
}
void StartObjectElement() { JSONWriter::StartObjectElement(); }
FAILURELATCH_IMPL_PROXY(*mFailureLatch)
protected:
NotNull<FailureLatch*> mFailureLatch;
private:
UniqueJSONStrings* mUniqueStrings = nullptr;
};
class SpliceableChunkedJSONWriter final : public SpliceableJSONWriter {
public:
explicit SpliceableChunkedJSONWriter(FailureLatch& aFailureLatch)
: SpliceableJSONWriter(MakeUnique<ChunkedJSONWriteFunc>(aFailureLatch),
aFailureLatch) {}
// Access the ChunkedJSONWriteFunc as reference-to-const, usually to copy data
// out.
const ChunkedJSONWriteFunc& ChunkedWriteFunc() const {
return ChunkedWriteFuncRef();
}
// Access the ChunkedJSONWriteFunc as rvalue-reference, usually to take its
// data out. This writer shouldn't be used anymore after this.
ChunkedJSONWriteFunc&& TakeChunkedWriteFunc() {
ChunkedJSONWriteFunc& ref = ChunkedWriteFuncRef();
#ifdef DEBUG
mTaken = true;
#endif //
return std::move(ref);
}
// Adopts the chunks from aFunc without copying.
void TakeAndSplice(ChunkedJSONWriteFunc&& aFunc) override {
MOZ_ASSERT(!mTaken);
Separator();
ChunkedWriteFuncRef().Take(std::move(aFunc));
mNeedComma[mDepth] = true;
}
void ChangeFailureLatchAndForwardState(FailureLatch& aFailureLatch) {
mFailureLatch = WrapNotNullUnchecked(&aFailureLatch);
return ChunkedWriteFuncRef().ChangeFailureLatchAndForwardState(
aFailureLatch);
}
private:
const ChunkedJSONWriteFunc& ChunkedWriteFuncRef() const {
MOZ_ASSERT(!mTaken);
// The WriteFunc was non-fallibly allocated as a ChunkedJSONWriteFunc in the
// only constructor above, so it's safe to cast to ChunkedJSONWriteFunc&.
return static_cast<const ChunkedJSONWriteFunc&>(WriteFunc());
}
ChunkedJSONWriteFunc& ChunkedWriteFuncRef() {
MOZ_ASSERT(!mTaken);
// The WriteFunc was non-fallibly allocated as a ChunkedJSONWriteFunc in the
// only constructor above, so it's safe to cast to ChunkedJSONWriteFunc&.
return static_cast<ChunkedJSONWriteFunc&>(WriteFunc());
}
#ifdef DEBUG
bool mTaken = false;
#endif
};
class JSONSchemaWriter {
JSONWriter& mWriter;
uint32_t mIndex;
public:
explicit JSONSchemaWriter(JSONWriter& aWriter) : mWriter(aWriter), mIndex(0) {
aWriter.StartObjectProperty("schema",
SpliceableJSONWriter::SingleLineStyle);
}
void WriteField(const Span<const char>& aName) {
mWriter.IntProperty(aName, mIndex++);
}
template <size_t Np1>
void WriteField(const char (&aName)[Np1]) {
WriteField(Span<const char>(aName, Np1 - 1));
}
~JSONSchemaWriter() { mWriter.EndObject(); }
};
// This class helps create an indexed list of unique strings, and inserts the
// index as a JSON value. The collected list of unique strings can later be
// inserted as a JSON array.
// This can be useful for elements/properties with many repeated strings.
//
// With only JSONWriter w,
// `w.WriteElement("a"); w.WriteElement("b"); w.WriteElement("a");`
// when done inside a JSON array, will generate:
// `["a", "b", "c"]`
//
// With UniqueStrings u,
// `u.WriteElement(w, "a"); u.WriteElement(w, "b"); u.WriteElement(w, "a");`
// when done inside a JSON array, will generate:
// `[0, 1, 0]`
// and later, `u.SpliceStringTableElements(w)` (inside a JSON array), will
// output the corresponding indexed list of unique strings:
// `["a", "b"]`
class UniqueJSONStrings final : public FailureLatch {
public:
// Start an empty list of unique strings.
MFBT_API explicit UniqueJSONStrings(FailureLatch& aFailureLatch);
// Start with a copy of the strings from another list.
MFBT_API UniqueJSONStrings(FailureLatch& aFailureLatch,
const UniqueJSONStrings& aOther,
ProgressLogger aProgressLogger);
MFBT_API ~UniqueJSONStrings();
// Add `aStr` to the list (if not already there), and write its index as a
// named object property.
void WriteProperty(SpliceableJSONWriter& aWriter,
const Span<const char>& aName,
const Span<const char>& aStr) {
if (const Maybe<uint32_t> maybeIndex = GetOrAddIndex(aStr); maybeIndex) {
aWriter.IntProperty(aName, *maybeIndex);
} else {
aWriter.SetFailureFrom(*this);
}
}
// Add `aStr` to the list (if not already there), and write its index as an
// array element.
void WriteElement(SpliceableJSONWriter& aWriter,
const Span<const char>& aStr) {
if (const Maybe<uint32_t> maybeIndex = GetOrAddIndex(aStr); maybeIndex) {
aWriter.IntElement(*maybeIndex);
} else if (!aWriter.Failed()) {
aWriter.SetFailureFrom(*this);
}
}
// Splice all collected unique strings into an array. This should only be done
// once, and then this UniqueStrings shouldn't be used anymore.
MFBT_API void SpliceStringTableElements(SpliceableJSONWriter& aWriter);
FAILURELATCH_IMPL_PROXY(mStringTableWriter)
void ChangeFailureLatchAndForwardState(FailureLatch& aFailureLatch) {
mStringTableWriter.ChangeFailureLatchAndForwardState(aFailureLatch);
}
private:
MFBT_API void ClearAndSetFailure(std::string aFailure);
// If `aStr` is already listed, return its index.
// Otherwise add it to the list and return the new index.
MFBT_API Maybe<uint32_t> GetOrAddIndex(const Span<const char>& aStr);
SpliceableChunkedJSONWriter mStringTableWriter;
HashMap<HashNumber, uint32_t> mStringHashToIndexMap;
};
void SpliceableJSONWriter::UniqueStringProperty(const Span<const char>& aName,
const Span<const char>& aStr) {
MOZ_RELEASE_ASSERT(mUniqueStrings);
mUniqueStrings->WriteProperty(*this, aName, aStr);
}
// Add `aStr` to the list (if not already there), and write its index as an
// array element.
void SpliceableJSONWriter::UniqueStringElement(const Span<const char>& aStr) {
MOZ_RELEASE_ASSERT(mUniqueStrings);
mUniqueStrings->WriteElement(*this, aStr);
}
} // namespace baseprofiler
} // namespace mozilla
#endif // BASEPROFILEJSONWRITER_H
|