1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
|
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
* vim: set ts=8 sts=2 et sw=2 tw=80:
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef gc_StoreBuffer_h
#define gc_StoreBuffer_h
#include "mozilla/Attributes.h"
#include "mozilla/HashFunctions.h"
#include "mozilla/ReentrancyGuard.h"
#include <algorithm>
#include "ds/BitArray.h"
#include "ds/LifoAlloc.h"
#include "gc/Nursery.h"
#include "gc/TraceKind.h"
#include "js/AllocPolicy.h"
#include "js/UniquePtr.h"
#include "threading/Mutex.h"
namespace JS {
struct GCSizes;
}
namespace js {
#ifdef DEBUG
extern bool CurrentThreadIsGCMarking();
#endif
namespace gc {
class Arena;
class ArenaCellSet;
#ifdef DEBUG
extern bool CurrentThreadHasLockedGC();
#endif
/*
* BufferableRef represents an abstract reference for use in the generational
* GC's remembered set. Entries in the store buffer that cannot be represented
* with the simple pointer-to-a-pointer scheme must derive from this class and
* use the generic store buffer interface.
*
* A single BufferableRef entry in the generic buffer can represent many entries
* in the remembered set. For example js::OrderedHashTableRef represents all
* the incoming edges corresponding to keys in an ordered hash table.
*/
class BufferableRef {
public:
virtual void trace(JSTracer* trc) = 0;
bool maybeInRememberedSet(const Nursery&) const { return true; }
};
typedef HashSet<void*, PointerHasher<void*>, SystemAllocPolicy> EdgeSet;
/* The size of a single block of store buffer storage space. */
static const size_t LifoAllocBlockSize = 8 * 1024;
/*
* The StoreBuffer observes all writes that occur in the system and performs
* efficient filtering of them to derive a remembered set for nursery GC.
*/
class StoreBuffer {
friend class mozilla::ReentrancyGuard;
/* The size at which a block is about to overflow for the generic buffer. */
static const size_t GenericBufferLowAvailableThreshold =
LifoAllocBlockSize / 2;
/* The size at which other store buffers are about to overflow. */
static const size_t BufferOverflowThresholdBytes = 128 * 1024;
/*
* This buffer holds only a single type of edge. Using this buffer is more
* efficient than the generic buffer when many writes will be to the same
* type of edge: e.g. Value or Cell*.
*/
template <typename T>
struct MonoTypeBuffer {
/* The canonical set of stores. */
typedef HashSet<T, typename T::Hasher, SystemAllocPolicy> StoreSet;
StoreSet stores_;
/*
* A one element cache in front of the canonical set to speed up
* temporary instances of HeapPtr.
*/
T last_;
StoreBuffer* owner_;
JS::GCReason gcReason_;
/* Maximum number of entries before we request a minor GC. */
const static size_t MaxEntries = BufferOverflowThresholdBytes / sizeof(T);
explicit MonoTypeBuffer(StoreBuffer* owner, JS::GCReason reason)
: last_(T()), owner_(owner), gcReason_(reason) {}
void clear() {
last_ = T();
stores_.clear();
}
/* Add one item to the buffer. */
void put(const T& t) {
sinkStore();
last_ = t;
}
/* Remove an item from the store buffer. */
void unput(const T& v) {
// Fast, hashless remove of last put.
if (last_ == v) {
last_ = T();
return;
}
stores_.remove(v);
}
/* Move any buffered stores to the canonical store set. */
void sinkStore() {
if (last_) {
AutoEnterOOMUnsafeRegion oomUnsafe;
if (!stores_.put(last_)) {
oomUnsafe.crash("Failed to allocate for MonoTypeBuffer::put.");
}
}
last_ = T();
if (MOZ_UNLIKELY(stores_.count() > MaxEntries)) {
owner_->setAboutToOverflow(gcReason_);
}
}
/* Trace the source of all edges in the store buffer. */
void trace(TenuringTracer& mover);
size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) {
return stores_.shallowSizeOfExcludingThis(mallocSizeOf);
}
bool isEmpty() const { return last_ == T() && stores_.empty(); }
private:
MonoTypeBuffer(const MonoTypeBuffer& other) = delete;
MonoTypeBuffer& operator=(const MonoTypeBuffer& other) = delete;
};
struct WholeCellBuffer {
UniquePtr<LifoAlloc> storage_;
ArenaCellSet* stringHead_;
ArenaCellSet* nonStringHead_;
StoreBuffer* owner_;
explicit WholeCellBuffer(StoreBuffer* owner)
: storage_(nullptr),
stringHead_(nullptr),
nonStringHead_(nullptr),
owner_(owner) {}
[[nodiscard]] bool init();
void clear();
bool isAboutToOverflow() const {
return !storage_->isEmpty() &&
storage_->used() > BufferOverflowThresholdBytes;
}
void trace(TenuringTracer& mover);
inline void put(const Cell* cell);
size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) {
return storage_ ? storage_->sizeOfIncludingThis(mallocSizeOf) : 0;
}
bool isEmpty() const {
MOZ_ASSERT_IF(!stringHead_ && !nonStringHead_,
!storage_ || storage_->isEmpty());
return !stringHead_ && !nonStringHead_;
}
private:
ArenaCellSet* allocateCellSet(Arena* arena);
WholeCellBuffer(const WholeCellBuffer& other) = delete;
WholeCellBuffer& operator=(const WholeCellBuffer& other) = delete;
};
struct GenericBuffer {
UniquePtr<LifoAlloc> storage_;
StoreBuffer* owner_;
explicit GenericBuffer(StoreBuffer* owner)
: storage_(nullptr), owner_(owner) {}
[[nodiscard]] bool init();
void clear() {
if (storage_) {
storage_->used() ? storage_->releaseAll() : storage_->freeAll();
}
}
bool isAboutToOverflow() const {
return !storage_->isEmpty() && storage_->availableInCurrentChunk() <
GenericBufferLowAvailableThreshold;
}
/* Trace all generic edges. */
void trace(JSTracer* trc);
template <typename T>
void put(const T& t) {
MOZ_ASSERT(storage_);
/* Ensure T is derived from BufferableRef. */
(void)static_cast<const BufferableRef*>(&t);
AutoEnterOOMUnsafeRegion oomUnsafe;
unsigned size = sizeof(T);
unsigned* sizep = storage_->pod_malloc<unsigned>();
if (!sizep) {
oomUnsafe.crash("Failed to allocate for GenericBuffer::put.");
}
*sizep = size;
T* tp = storage_->new_<T>(t);
if (!tp) {
oomUnsafe.crash("Failed to allocate for GenericBuffer::put.");
}
if (isAboutToOverflow()) {
owner_->setAboutToOverflow(JS::GCReason::FULL_GENERIC_BUFFER);
}
}
size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) {
return storage_ ? storage_->sizeOfIncludingThis(mallocSizeOf) : 0;
}
bool isEmpty() const { return !storage_ || storage_->isEmpty(); }
private:
GenericBuffer(const GenericBuffer& other) = delete;
GenericBuffer& operator=(const GenericBuffer& other) = delete;
};
template <typename Edge>
struct PointerEdgeHasher {
using Lookup = Edge;
static HashNumber hash(const Lookup& l) {
return mozilla::HashGeneric(l.edge);
}
static bool match(const Edge& k, const Lookup& l) { return k == l; }
};
template <typename T>
struct CellPtrEdge {
T** edge = nullptr;
CellPtrEdge() = default;
explicit CellPtrEdge(T** v) : edge(v) {}
bool operator==(const CellPtrEdge& other) const {
return edge == other.edge;
}
bool operator!=(const CellPtrEdge& other) const {
return edge != other.edge;
}
bool maybeInRememberedSet(const Nursery& nursery) const {
MOZ_ASSERT(IsInsideNursery(*edge));
return !nursery.isInside(edge);
}
void trace(TenuringTracer& mover) const;
explicit operator bool() const { return edge != nullptr; }
using Hasher = PointerEdgeHasher<CellPtrEdge<T>>;
};
using ObjectPtrEdge = CellPtrEdge<JSObject>;
using StringPtrEdge = CellPtrEdge<JSString>;
using BigIntPtrEdge = CellPtrEdge<JS::BigInt>;
struct ValueEdge {
JS::Value* edge;
ValueEdge() : edge(nullptr) {}
explicit ValueEdge(JS::Value* v) : edge(v) {}
bool operator==(const ValueEdge& other) const { return edge == other.edge; }
bool operator!=(const ValueEdge& other) const { return edge != other.edge; }
Cell* deref() const {
return edge->isGCThing() ? static_cast<Cell*>(edge->toGCThing())
: nullptr;
}
bool maybeInRememberedSet(const Nursery& nursery) const {
MOZ_ASSERT(IsInsideNursery(deref()));
return !nursery.isInside(edge);
}
void trace(TenuringTracer& mover) const;
explicit operator bool() const { return edge != nullptr; }
using Hasher = PointerEdgeHasher<ValueEdge>;
};
struct SlotsEdge {
// These definitions must match those in HeapSlot::Kind.
const static int SlotKind = 0;
const static int ElementKind = 1;
uintptr_t objectAndKind_; // NativeObject* | Kind
uint32_t start_;
uint32_t count_;
SlotsEdge() : objectAndKind_(0), start_(0), count_(0) {}
SlotsEdge(NativeObject* object, int kind, uint32_t start, uint32_t count)
: objectAndKind_(uintptr_t(object) | kind),
start_(start),
count_(count) {
MOZ_ASSERT((uintptr_t(object) & 1) == 0);
MOZ_ASSERT(kind <= 1);
MOZ_ASSERT(count > 0);
MOZ_ASSERT(start + count > start);
}
NativeObject* object() const {
return reinterpret_cast<NativeObject*>(objectAndKind_ & ~1);
}
int kind() const { return (int)(objectAndKind_ & 1); }
bool operator==(const SlotsEdge& other) const {
return objectAndKind_ == other.objectAndKind_ && start_ == other.start_ &&
count_ == other.count_;
}
bool operator!=(const SlotsEdge& other) const { return !(*this == other); }
// True if this SlotsEdge range overlaps with the other SlotsEdge range,
// false if they do not overlap.
bool overlaps(const SlotsEdge& other) const {
if (objectAndKind_ != other.objectAndKind_) {
return false;
}
// Widen our range by one on each side so that we consider
// adjacent-but-not-actually-overlapping ranges as overlapping. This
// is particularly useful for coalescing a series of increasing or
// decreasing single index writes 0, 1, 2, ..., N into a SlotsEdge
// range of elements [0, N].
uint32_t end = start_ + count_ + 1;
uint32_t start = start_ > 0 ? start_ - 1 : 0;
MOZ_ASSERT(start < end);
uint32_t otherEnd = other.start_ + other.count_;
MOZ_ASSERT(other.start_ <= otherEnd);
return (start <= other.start_ && other.start_ <= end) ||
(start <= otherEnd && otherEnd <= end);
}
// Destructively make this SlotsEdge range the union of the other
// SlotsEdge range and this one. A precondition is that the ranges must
// overlap.
void merge(const SlotsEdge& other) {
MOZ_ASSERT(overlaps(other));
uint32_t end = std::max(start_ + count_, other.start_ + other.count_);
start_ = std::min(start_, other.start_);
count_ = end - start_;
}
bool maybeInRememberedSet(const Nursery& n) const {
return !IsInsideNursery(reinterpret_cast<Cell*>(object()));
}
void trace(TenuringTracer& mover) const;
explicit operator bool() const { return objectAndKind_ != 0; }
typedef struct Hasher {
using Lookup = SlotsEdge;
static HashNumber hash(const Lookup& l) {
return mozilla::HashGeneric(l.objectAndKind_, l.start_, l.count_);
}
static bool match(const SlotsEdge& k, const Lookup& l) { return k == l; }
} Hasher;
};
#ifdef DEBUG
void checkAccess() const;
#else
void checkAccess() const {}
#endif
template <typename Buffer, typename Edge>
void unput(Buffer& buffer, const Edge& edge) {
checkAccess();
if (!isEnabled()) {
return;
}
mozilla::ReentrancyGuard g(*this);
buffer.unput(edge);
}
template <typename Buffer, typename Edge>
void put(Buffer& buffer, const Edge& edge) {
checkAccess();
if (!isEnabled()) {
return;
}
mozilla::ReentrancyGuard g(*this);
if (edge.maybeInRememberedSet(nursery_)) {
buffer.put(edge);
}
}
Mutex lock_ MOZ_UNANNOTATED;
MonoTypeBuffer<ValueEdge> bufferVal;
MonoTypeBuffer<StringPtrEdge> bufStrCell;
MonoTypeBuffer<BigIntPtrEdge> bufBigIntCell;
MonoTypeBuffer<ObjectPtrEdge> bufObjCell;
MonoTypeBuffer<SlotsEdge> bufferSlot;
WholeCellBuffer bufferWholeCell;
GenericBuffer bufferGeneric;
JSRuntime* runtime_;
const Nursery& nursery_;
bool aboutToOverflow_;
bool enabled_;
bool mayHavePointersToDeadCells_;
#ifdef DEBUG
bool mEntered; /* For ReentrancyGuard. */
#endif
public:
#ifdef DEBUG
bool markingNondeduplicatable;
#endif
explicit StoreBuffer(JSRuntime* rt, const Nursery& nursery);
[[nodiscard]] bool enable();
void disable();
bool isEnabled() const { return enabled_; }
bool isEmpty() const;
void clear();
const Nursery& nursery() const { return nursery_; }
/* Get the overflowed status. */
bool isAboutToOverflow() const { return aboutToOverflow_; }
/*
* Brain transplants may add whole cell buffer entires for dead cells. We must
* evict the nursery prior to sweeping arenas if any such entries are present.
*/
bool mayHavePointersToDeadCells() const {
return mayHavePointersToDeadCells_;
}
/* Insert a single edge into the buffer/remembered set. */
void putValue(JS::Value* vp) { put(bufferVal, ValueEdge(vp)); }
void unputValue(JS::Value* vp) { unput(bufferVal, ValueEdge(vp)); }
void putCell(JSString** strp) { put(bufStrCell, StringPtrEdge(strp)); }
void unputCell(JSString** strp) { unput(bufStrCell, StringPtrEdge(strp)); }
void putCell(JS::BigInt** bip) { put(bufBigIntCell, BigIntPtrEdge(bip)); }
void unputCell(JS::BigInt** bip) { unput(bufBigIntCell, BigIntPtrEdge(bip)); }
void putCell(JSObject** strp) { put(bufObjCell, ObjectPtrEdge(strp)); }
void unputCell(JSObject** strp) { unput(bufObjCell, ObjectPtrEdge(strp)); }
void putSlot(NativeObject* obj, int kind, uint32_t start, uint32_t count) {
SlotsEdge edge(obj, kind, start, count);
if (bufferSlot.last_.overlaps(edge)) {
bufferSlot.last_.merge(edge);
} else {
put(bufferSlot, edge);
}
}
inline void putWholeCell(Cell* cell);
/* Insert an entry into the generic buffer. */
template <typename T>
void putGeneric(const T& t) {
put(bufferGeneric, t);
}
void setMayHavePointersToDeadCells() { mayHavePointersToDeadCells_ = true; }
/* Methods to trace the source of all edges in the store buffer. */
void traceValues(TenuringTracer& mover) { bufferVal.trace(mover); }
void traceCells(TenuringTracer& mover) {
bufStrCell.trace(mover);
bufBigIntCell.trace(mover);
bufObjCell.trace(mover);
}
void traceSlots(TenuringTracer& mover) { bufferSlot.trace(mover); }
void traceWholeCells(TenuringTracer& mover) { bufferWholeCell.trace(mover); }
void traceGenericEntries(JSTracer* trc) { bufferGeneric.trace(trc); }
/* For use by our owned buffers and for testing. */
void setAboutToOverflow(JS::GCReason);
void addSizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf,
JS::GCSizes* sizes);
void checkEmpty() const;
// For use by the GC only.
void lock() { lock_.lock(); }
void unlock() { lock_.unlock(); }
};
// A set of cells in an arena used to implement the whole cell store buffer.
class ArenaCellSet {
friend class StoreBuffer;
using ArenaCellBits = BitArray<MaxArenaCellIndex>;
// The arena this relates to.
Arena* arena;
// Pointer to next set forming a linked list.
ArenaCellSet* next;
// Bit vector for each possible cell start position.
ArenaCellBits bits;
#ifdef DEBUG
// The minor GC number when this was created. This object should not survive
// past the next minor collection.
const uint64_t minorGCNumberAtCreation;
#endif
// Construct the empty sentinel object.
constexpr ArenaCellSet()
: arena(nullptr),
next(nullptr)
#ifdef DEBUG
,
minorGCNumberAtCreation(0)
#endif
{
}
public:
using WordT = ArenaCellBits::WordT;
const size_t BitsPerWord = ArenaCellBits::bitsPerElement;
const size_t NumWords = ArenaCellBits::numSlots;
ArenaCellSet(Arena* arena, ArenaCellSet* next);
bool hasCell(const TenuredCell* cell) const {
return hasCell(getCellIndex(cell));
}
void putCell(const TenuredCell* cell) { putCell(getCellIndex(cell)); }
bool isEmpty() const { return this == &Empty; }
bool hasCell(size_t cellIndex) const;
void putCell(size_t cellIndex);
void check() const;
WordT getWord(size_t wordIndex) const { return bits.getWord(wordIndex); }
void trace(TenuringTracer& mover);
// Sentinel object used for all empty sets.
//
// We use a sentinel because it simplifies the JIT code slightly as we can
// assume all arenas have a cell set.
static ArenaCellSet Empty;
static size_t getCellIndex(const TenuredCell* cell);
static void getWordIndexAndMask(size_t cellIndex, size_t* wordp,
uint32_t* maskp);
// Attempt to trigger a minor GC if free space in the nursery (where these
// objects are allocated) falls below this threshold.
static const size_t NurseryFreeThresholdBytes = 64 * 1024;
static size_t offsetOfArena() { return offsetof(ArenaCellSet, arena); }
static size_t offsetOfBits() { return offsetof(ArenaCellSet, bits); }
};
// Post-write barrier implementation for GC cells.
// Implement the post-write barrier for nursery allocateable cell type |T|. Call
// this from |T::postWriteBarrier|.
template <typename T>
MOZ_ALWAYS_INLINE void PostWriteBarrierImpl(void* cellp, T* prev, T* next) {
MOZ_ASSERT(cellp);
// If the target needs an entry, add it.
StoreBuffer* buffer;
if (next && (buffer = next->storeBuffer())) {
// If we know that the prev has already inserted an entry, we can skip
// doing the lookup to add the new entry. Note that we cannot safely
// assert the presence of the entry because it may have been added
// via a different store buffer.
if (prev && prev->storeBuffer()) {
return;
}
buffer->putCell(static_cast<T**>(cellp));
return;
}
// Remove the prev entry if the new value does not need it. There will only
// be a prev entry if the prev value was in the nursery.
if (prev && (buffer = prev->storeBuffer())) {
buffer->unputCell(static_cast<T**>(cellp));
}
}
template <typename T>
MOZ_ALWAYS_INLINE void PostWriteBarrier(T** vp, T* prev, T* next) {
static_assert(std::is_base_of_v<Cell, T>);
static_assert(!std::is_same_v<Cell, T> && !std::is_same_v<TenuredCell, T>);
if constexpr (!std::is_base_of_v<TenuredCell, T>) {
using BaseT = typename BaseGCType<T>::type;
PostWriteBarrierImpl<BaseT>(vp, prev, next);
return;
}
MOZ_ASSERT_IF(next, !IsInsideNursery(next));
}
// Used when we don't have a specific edge to put in the store buffer.
void PostWriteBarrierCell(Cell* cell, Cell* prev, Cell* next);
} /* namespace gc */
} /* namespace js */
#endif /* gc_StoreBuffer_h */
|