summaryrefslogtreecommitdiffstats
path: root/js/src/gc
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-15 03:34:50 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-15 03:34:50 +0000
commitdef92d1b8e9d373e2f6f27c366d578d97d8960c6 (patch)
tree2ef34b9ad8bb9a9220e05d60352558b15f513894 /js/src/gc
parentAdding debian version 125.0.3-1. (diff)
downloadfirefox-def92d1b8e9d373e2f6f27c366d578d97d8960c6.tar.xz
firefox-def92d1b8e9d373e2f6f27c366d578d97d8960c6.zip
Merging upstream version 126.0.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'js/src/gc')
-rw-r--r--js/src/gc/Allocator.cpp13
-rw-r--r--js/src/gc/Allocator.h2
-rw-r--r--js/src/gc/AtomMarking.cpp57
-rw-r--r--js/src/gc/AtomMarking.h17
-rw-r--r--js/src/gc/Cell.h3
-rw-r--r--js/src/gc/Compacting.cpp2
-rw-r--r--js/src/gc/GC.cpp138
-rw-r--r--js/src/gc/GC.h3
-rw-r--r--js/src/gc/GCAPI.cpp12
-rw-r--r--js/src/gc/GCInternals.h2
-rw-r--r--js/src/gc/GCRuntime.h3
-rw-r--r--js/src/gc/Heap-inl.h4
-rw-r--r--js/src/gc/MallocedBlockCache.h23
-rw-r--r--js/src/gc/Nursery-inl.h92
-rw-r--r--js/src/gc/Nursery.cpp876
-rw-r--r--js/src/gc/Nursery.h296
-rw-r--r--js/src/gc/NurseryAwareHashMap.h64
-rw-r--r--js/src/gc/Pretenuring.cpp22
-rw-r--r--js/src/gc/Pretenuring.h33
-rw-r--r--js/src/gc/PublicIterators.h2
-rw-r--r--js/src/gc/Scheduling.h3
-rw-r--r--js/src/gc/StableCellHasher-inl.h1
-rw-r--r--js/src/gc/StoreBuffer-inl.h5
-rw-r--r--js/src/gc/StoreBuffer.cpp32
-rw-r--r--js/src/gc/StoreBuffer.h54
-rw-r--r--js/src/gc/Sweeping.cpp25
-rw-r--r--js/src/gc/Tenuring.cpp710
-rw-r--r--js/src/gc/Tenuring.h87
-rw-r--r--js/src/gc/TraceMethods-inl.h17
-rw-r--r--js/src/gc/WeakMap-inl.h54
-rw-r--r--js/src/gc/Zone.cpp19
-rw-r--r--js/src/gc/Zone.h170
32 files changed, 1953 insertions, 888 deletions
diff --git a/js/src/gc/Allocator.cpp b/js/src/gc/Allocator.cpp
index c070ac1eef..9e7f16cc3f 100644
--- a/js/src/gc/Allocator.cpp
+++ b/js/src/gc/Allocator.cpp
@@ -77,9 +77,10 @@ MOZ_NEVER_INLINE void* CellAllocator::RetryNurseryAlloc(JSContext* cx,
size_t thingSize,
AllocSite* site) {
MOZ_ASSERT(cx->isNurseryAllocAllowed());
- MOZ_ASSERT(cx->zone() == site->zone());
- MOZ_ASSERT(!cx->zone()->isAtomsZone());
- MOZ_ASSERT(cx->zone()->allocKindInNursery(traceKind));
+
+ Zone* zone = site->zone();
+ MOZ_ASSERT(!zone->isAtomsZone());
+ MOZ_ASSERT(zone->allocKindInNursery(traceKind));
Nursery& nursery = cx->nursery();
JS::GCReason reason = nursery.handleAllocationFailure();
@@ -102,7 +103,7 @@ MOZ_NEVER_INLINE void* CellAllocator::RetryNurseryAlloc(JSContext* cx,
cx->runtime()->gc.minorGC(reason);
// Exceeding gcMaxBytes while tenuring can disable the Nursery.
- if (cx->zone()->allocKindInNursery(traceKind)) {
+ if (zone->allocKindInNursery(traceKind)) {
void* ptr = cx->nursery().allocateCell(site, thingSize, traceKind);
if (ptr) {
return ptr;
@@ -291,7 +292,7 @@ void CellAllocator::CheckIncrementalZoneState(JSContext* cx, void* ptr) {
}
#endif
-void* js::gc::AllocateCellInGC(Zone* zone, AllocKind thingKind) {
+void* js::gc::AllocateTenuredCellInGC(Zone* zone, AllocKind thingKind) {
void* ptr = zone->arenas.allocateFromFreeList(thingKind);
if (!ptr) {
AutoEnterOOMUnsafeRegion oomUnsafe;
@@ -541,7 +542,7 @@ TenuredChunk* GCRuntime::getOrAllocChunk(AutoLockGCBgAlloc& lock) {
// Reinitialize ChunkBase; arenas are all free and may or may not be
// committed.
SetMemCheckKind(chunk, sizeof(ChunkBase), MemCheckKind::MakeUndefined);
- chunk->initBase(rt, nullptr);
+ chunk->initBaseForTenuredChunk(rt);
MOZ_ASSERT(chunk->unused());
} else {
void* ptr = TenuredChunk::allocate(this);
diff --git a/js/src/gc/Allocator.h b/js/src/gc/Allocator.h
index 3b1566e7f5..c317bfb10b 100644
--- a/js/src/gc/Allocator.h
+++ b/js/src/gc/Allocator.h
@@ -21,6 +21,7 @@ namespace gc {
class AllocSite;
struct Cell;
class TenuredCell;
+class TenuringTracer;
// Allocator implementation functions. SpiderMonkey code outside this file
// should use:
@@ -82,6 +83,7 @@ class CellAllocator {
static void* AllocNurseryOrTenuredCell(JSContext* cx, gc::AllocKind allocKind,
size_t thingSize, gc::Heap heap,
AllocSite* site);
+ friend class TenuringTracer;
// Allocate a cell in the tenured heap.
template <AllowGC allowGC>
diff --git a/js/src/gc/AtomMarking.cpp b/js/src/gc/AtomMarking.cpp
index eb30758263..a5b7eb1acf 100644
--- a/js/src/gc/AtomMarking.cpp
+++ b/js/src/gc/AtomMarking.cpp
@@ -12,6 +12,7 @@
#include "gc/GC-inl.h"
#include "gc/Heap-inl.h"
+#include "gc/PrivateIterators-inl.h"
namespace js {
namespace gc {
@@ -71,7 +72,38 @@ void AtomMarkingRuntime::unregisterArena(Arena* arena, const AutoLockGC& lock) {
(void)freeArenaIndexes.ref().emplaceBack(arena->atomBitmapStart());
}
-bool AtomMarkingRuntime::computeBitmapFromChunkMarkBits(JSRuntime* runtime,
+void AtomMarkingRuntime::refineZoneBitmapsForCollectedZones(
+ GCRuntime* gc, size_t collectedZones) {
+ // If there is more than one zone to update, copy the chunk mark bits into a
+ // bitmap and AND that into the atom marking bitmap for each zone.
+ DenseBitmap marked;
+ if (collectedZones > 1 && computeBitmapFromChunkMarkBits(gc, marked)) {
+ for (GCZonesIter zone(gc); !zone.done(); zone.next()) {
+ refineZoneBitmapForCollectedZone(zone, marked);
+ }
+ return;
+ }
+
+ // If there's only one zone (or on OOM), AND the mark bits for each arena into
+ // the zones' atom marking bitmaps directly.
+ for (GCZonesIter zone(gc); !zone.done(); zone.next()) {
+ if (zone->isAtomsZone()) {
+ continue;
+ }
+
+ for (auto thingKind : AllAllocKinds()) {
+ for (ArenaIterInGC aiter(gc->atomsZone(), thingKind); !aiter.done();
+ aiter.next()) {
+ Arena* arena = aiter.get();
+ MarkBitmapWord* chunkWords = arena->chunk()->markBits.arenaBits(arena);
+ zone->markedAtoms().bitwiseAndRangeWith(arena->atomBitmapStart(),
+ ArenaBitmapWords, chunkWords);
+ }
+ }
+ }
+}
+
+bool AtomMarkingRuntime::computeBitmapFromChunkMarkBits(GCRuntime* gc,
DenseBitmap& bitmap) {
MOZ_ASSERT(CurrentThreadIsPerformingGC());
@@ -79,7 +111,7 @@ bool AtomMarkingRuntime::computeBitmapFromChunkMarkBits(JSRuntime* runtime,
return false;
}
- Zone* atomsZone = runtime->unsafeAtomsZone();
+ Zone* atomsZone = gc->atomsZone();
for (auto thingKind : AllAllocKinds()) {
for (ArenaIterInGC aiter(atomsZone, thingKind); !aiter.done();
aiter.next()) {
@@ -109,13 +141,12 @@ void AtomMarkingRuntime::refineZoneBitmapForCollectedZone(
// Set any bits in the chunk mark bitmaps for atoms which are marked in bitmap.
template <typename Bitmap>
-static void BitwiseOrIntoChunkMarkBits(JSRuntime* runtime, Bitmap& bitmap) {
+static void BitwiseOrIntoChunkMarkBits(Zone* atomsZone, Bitmap& bitmap) {
// Make sure that by copying the mark bits for one arena in word sizes we
// do not affect the mark bits for other arenas.
static_assert(ArenaBitmapBits == ArenaBitmapWords * JS_BITS_PER_WORD,
"ArenaBitmapWords must evenly divide ArenaBitmapBits");
- Zone* atomsZone = runtime->unsafeAtomsZone();
for (auto thingKind : AllAllocKinds()) {
for (ArenaIterInGC aiter(atomsZone, thingKind); !aiter.done();
aiter.next()) {
@@ -127,16 +158,10 @@ static void BitwiseOrIntoChunkMarkBits(JSRuntime* runtime, Bitmap& bitmap) {
}
}
-void AtomMarkingRuntime::markAtomsUsedByUncollectedZones(JSRuntime* runtime) {
+void AtomMarkingRuntime::markAtomsUsedByUncollectedZones(
+ GCRuntime* gc, size_t uncollectedZones) {
MOZ_ASSERT(CurrentThreadIsPerformingGC());
- size_t uncollectedZones = 0;
- for (ZonesIter zone(runtime, SkipAtoms); !zone.done(); zone.next()) {
- if (!zone->isCollecting()) {
- uncollectedZones++;
- }
- }
-
// If there are no uncollected non-atom zones then there's no work to do.
if (uncollectedZones == 0) {
return;
@@ -149,15 +174,15 @@ void AtomMarkingRuntime::markAtomsUsedByUncollectedZones(JSRuntime* runtime) {
DenseBitmap markedUnion;
if (uncollectedZones == 1 || !markedUnion.ensureSpace(allocatedWords)) {
- for (ZonesIter zone(runtime, SkipAtoms); !zone.done(); zone.next()) {
+ for (ZonesIter zone(gc, SkipAtoms); !zone.done(); zone.next()) {
if (!zone->isCollecting()) {
- BitwiseOrIntoChunkMarkBits(runtime, zone->markedAtoms());
+ BitwiseOrIntoChunkMarkBits(gc->atomsZone(), zone->markedAtoms());
}
}
return;
}
- for (ZonesIter zone(runtime, SkipAtoms); !zone.done(); zone.next()) {
+ for (ZonesIter zone(gc, SkipAtoms); !zone.done(); zone.next()) {
// We only need to update the chunk mark bits for zones which were
// not collected in the current GC. Atoms which are referenced by
// collected zones have already been marked.
@@ -166,7 +191,7 @@ void AtomMarkingRuntime::markAtomsUsedByUncollectedZones(JSRuntime* runtime) {
}
}
- BitwiseOrIntoChunkMarkBits(runtime, markedUnion);
+ BitwiseOrIntoChunkMarkBits(gc->atomsZone(), markedUnion);
}
template <typename T>
diff --git a/js/src/gc/AtomMarking.h b/js/src/gc/AtomMarking.h
index e7e97fb389..e5d8ef8418 100644
--- a/js/src/gc/AtomMarking.h
+++ b/js/src/gc/AtomMarking.h
@@ -19,6 +19,7 @@ class DenseBitmap;
namespace gc {
class Arena;
+class GCRuntime;
// This class manages state used for marking atoms during GCs.
// See AtomMarking.cpp for details.
@@ -42,19 +43,25 @@ class AtomMarkingRuntime {
// Mark an arena as no longer holding things in the atoms zone.
void unregisterArena(Arena* arena, const AutoLockGC& lock);
+ // Update the atom marking bitmaps in all collected zones according to the
+ // atoms zone mark bits.
+ void refineZoneBitmapsForCollectedZones(GCRuntime* gc, size_t collectedZones);
+
+ // Set any bits in the chunk mark bitmaps for atoms which are marked in any
+ // uncollected zone in the runtime.
+ void markAtomsUsedByUncollectedZones(GCRuntime* gc, size_t uncollectedZones);
+
+ private:
// Fill |bitmap| with an atom marking bitmap based on the things that are
// currently marked in the chunks used by atoms zone arenas. This returns
// false on an allocation failure (but does not report an exception).
- bool computeBitmapFromChunkMarkBits(JSRuntime* runtime, DenseBitmap& bitmap);
+ bool computeBitmapFromChunkMarkBits(GCRuntime* gc, DenseBitmap& bitmap);
// Update the atom marking bitmap in |zone| according to another
// overapproximation of the reachable atoms in |bitmap|.
void refineZoneBitmapForCollectedZone(Zone* zone, const DenseBitmap& bitmap);
- // Set any bits in the chunk mark bitmaps for atoms which are marked in any
- // uncollected zone in the runtime.
- void markAtomsUsedByUncollectedZones(JSRuntime* runtime);
-
+ public:
// Mark an atom or id as being newly reachable by the context's zone.
template <typename T>
void markAtom(JSContext* cx, T* thing);
diff --git a/js/src/gc/Cell.h b/js/src/gc/Cell.h
index 5a89ad794d..f91163e2f5 100644
--- a/js/src/gc/Cell.h
+++ b/js/src/gc/Cell.h
@@ -209,6 +209,8 @@ struct Cell {
inline JS::Zone* nurseryZone() const;
inline JS::Zone* nurseryZoneFromAnyThread() const;
+ inline ChunkBase* chunk() const;
+
// Default implementation for kinds that cannot be permanent. This may be
// overriden by derived classes.
MOZ_ALWAYS_INLINE bool isPermanentAndMayBeShared() const { return false; }
@@ -222,7 +224,6 @@ struct Cell {
protected:
uintptr_t address() const;
- inline ChunkBase* chunk() const;
private:
// Cells are destroyed by the GC. Do not delete them directly.
diff --git a/js/src/gc/Compacting.cpp b/js/src/gc/Compacting.cpp
index 442fa3fe47..79e8e0b71d 100644
--- a/js/src/gc/Compacting.cpp
+++ b/js/src/gc/Compacting.cpp
@@ -215,7 +215,7 @@ static void RelocateCell(Zone* zone, TenuredCell* src, AllocKind thingKind,
// Allocate a new cell.
MOZ_ASSERT(zone == src->zone());
TenuredCell* dst =
- reinterpret_cast<TenuredCell*>(AllocateCellInGC(zone, thingKind));
+ reinterpret_cast<TenuredCell*>(AllocateTenuredCellInGC(zone, thingKind));
// Copy source cell contents to destination.
memcpy(dst, src, thingSize);
diff --git a/js/src/gc/GC.cpp b/js/src/gc/GC.cpp
index c01dfe3660..68dd66898c 100644
--- a/js/src/gc/GC.cpp
+++ b/js/src/gc/GC.cpp
@@ -444,7 +444,9 @@ GCRuntime::GCRuntime(JSRuntime* rt)
#endif
requestSliceAfterBackgroundTask(false),
lifoBlocksToFree((size_t)JSContext::TEMP_LIFO_ALLOC_PRIMARY_CHUNK_SIZE),
- lifoBlocksToFreeAfterMinorGC(
+ lifoBlocksToFreeAfterFullMinorGC(
+ (size_t)JSContext::TEMP_LIFO_ALLOC_PRIMARY_CHUNK_SIZE),
+ lifoBlocksToFreeAfterNextMinorGC(
(size_t)JSContext::TEMP_LIFO_ALLOC_PRIMARY_CHUNK_SIZE),
sweepGroupIndex(0),
sweepGroups(nullptr),
@@ -658,7 +660,7 @@ void GCRuntime::setZeal(uint8_t zeal, uint32_t frequency) {
if (zeal == 0) {
if (hasZealMode(ZealMode::GenerationalGC)) {
- evictNursery(JS::GCReason::DEBUG_GC);
+ evictNursery();
nursery().leaveZealMode();
}
@@ -669,7 +671,7 @@ void GCRuntime::setZeal(uint8_t zeal, uint32_t frequency) {
ZealMode zealMode = ZealMode(zeal);
if (zealMode == ZealMode::GenerationalGC) {
- evictNursery(JS::GCReason::DEBUG_GC);
+ evictNursery(JS::GCReason::EVICT_NURSERY);
nursery().enterZealMode();
}
@@ -704,7 +706,7 @@ void GCRuntime::unsetZeal(uint8_t zeal) {
}
if (zealMode == ZealMode::GenerationalGC) {
- evictNursery(JS::GCReason::DEBUG_GC);
+ evictNursery();
nursery().leaveZealMode();
}
@@ -1073,6 +1075,11 @@ bool GCRuntime::setParameter(JSGCParamKey key, uint32_t value,
marker->incrementalWeakMapMarkingEnabled = value != 0;
}
break;
+ case JSGC_SEMISPACE_NURSERY_ENABLED: {
+ AutoUnlockGC unlock(lock);
+ nursery().setSemispaceEnabled(value);
+ break;
+ }
case JSGC_MIN_EMPTY_CHUNK_COUNT:
setMinEmptyChunkCount(value, lock);
break;
@@ -1160,6 +1167,11 @@ void GCRuntime::resetParameter(JSGCParamKey key, AutoLockGC& lock) {
TuningDefaults::IncrementalWeakMapMarkingEnabled;
}
break;
+ case JSGC_SEMISPACE_NURSERY_ENABLED: {
+ AutoUnlockGC unlock(lock);
+ nursery().setSemispaceEnabled(TuningDefaults::SemispaceNurseryEnabled);
+ break;
+ }
case JSGC_MIN_EMPTY_CHUNK_COUNT:
setMinEmptyChunkCount(TuningDefaults::MinEmptyChunkCount, lock);
break;
@@ -1241,6 +1253,8 @@ uint32_t GCRuntime::getParameter(JSGCParamKey key, const AutoLockGC& lock) {
return parallelMarkingEnabled;
case JSGC_INCREMENTAL_WEAKMAP_ENABLED:
return marker().incrementalWeakMapMarkingEnabled;
+ case JSGC_SEMISPACE_NURSERY_ENABLED:
+ return nursery().semispaceEnabled();
case JSGC_CHUNK_BYTES:
return ChunkSize;
case JSGC_HELPER_THREAD_RATIO:
@@ -2135,7 +2149,7 @@ void GCRuntime::queueUnusedLifoBlocksForFree(LifoAlloc* lifo) {
}
void GCRuntime::queueAllLifoBlocksForFreeAfterMinorGC(LifoAlloc* lifo) {
- lifoBlocksToFreeAfterMinorGC.ref().transferFrom(lifo);
+ lifoBlocksToFreeAfterFullMinorGC.ref().transferFrom(lifo);
}
void GCRuntime::queueBuffersForFreeAfterMinorGC(Nursery::BufferSet& buffers) {
@@ -2741,24 +2755,7 @@ void GCRuntime::endPreparePhase(JS::GCReason reason) {
MOZ_ASSERT(unmarkTask.isIdle());
for (GCZonesIter zone(this); !zone.done(); zone.next()) {
- /*
- * In an incremental GC, clear the area free lists to ensure that subsequent
- * allocations refill them and end up marking new cells back. See
- * arenaAllocatedDuringGC().
- */
- zone->arenas.clearFreeLists();
-
zone->setPreservingCode(false);
-
-#ifdef JS_GC_ZEAL
- if (hasZealMode(ZealMode::YieldBeforeRootMarking)) {
- for (auto kind : AllAllocKinds()) {
- for (ArenaIterInGC arena(zone, kind); !arena.done(); arena.next()) {
- arena->checkNoMarkedCells();
- }
- }
- }
-#endif
}
// Discard JIT code more aggressively if the process is approaching its
@@ -2800,7 +2797,7 @@ void GCRuntime::endPreparePhase(JS::GCReason reason) {
*/
{
- gcstats::AutoPhase ap1(stats(), gcstats::PhaseKind::PREPARE);
+ gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::PREPARE);
AutoLockHelperThreadState helperLock;
@@ -2817,20 +2814,6 @@ void GCRuntime::endPreparePhase(JS::GCReason reason) {
haveDiscardedJITCodeThisSlice = true;
/*
- * Relazify functions after discarding JIT code (we can't relazify
- * functions with JIT code) and before the actual mark phase, so that
- * the current GC can collect the JSScripts we're unlinking here. We do
- * this only when we're performing a shrinking GC, as too much
- * relazification can cause performance issues when we have to reparse
- * the same functions over and over.
- */
- if (isShrinkingGC()) {
- relazifyFunctionsForShrinkingGC();
- purgePropMapTablesForShrinkingGC();
- purgeSourceURLsForShrinkingGC();
- }
-
- /*
* We must purge the runtime at the beginning of an incremental GC. The
* danger if we purge later is that the snapshot invariant of
* incremental GC will be broken, as follows. If some object is
@@ -2840,8 +2823,25 @@ void GCRuntime::endPreparePhase(JS::GCReason reason) {
* it. This object might never be marked, so a GC hazard would exist.
*/
purgeRuntime();
+ }
+
+ // This will start background free for lifo blocks queued by purgeRuntime,
+ // even if there's nothing in the nursery.
+ collectNurseryFromMajorGC(reason);
- startBackgroundFreeAfterMinorGC();
+ {
+ gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::PREPARE);
+ // Relazify functions after discarding JIT code (we can't relazify functions
+ // with JIT code) and before the actual mark phase, so that the current GC
+ // can collect the JSScripts we're unlinking here. We do this only when
+ // we're performing a shrinking GC, as too much relazification can cause
+ // performance issues when we have to reparse the same functions over and
+ // over.
+ if (isShrinkingGC()) {
+ relazifyFunctionsForShrinkingGC();
+ purgePropMapTablesForShrinkingGC();
+ purgeSourceURLsForShrinkingGC();
+ }
if (isShutdownGC()) {
/* Clear any engine roots that may hold external data live. */
@@ -2903,6 +2903,21 @@ void GCRuntime::beginMarkPhase(AutoGCSession& session) {
#endif
for (GCZonesIter zone(this); !zone.done(); zone.next()) {
+ // In an incremental GC, clear the arena free lists to ensure that
+ // subsequent allocations refill them and end up marking new cells black.
+ // See arenaAllocatedDuringGC().
+ zone->arenas.clearFreeLists();
+
+#ifdef JS_GC_ZEAL
+ if (hasZealMode(ZealMode::YieldBeforeRootMarking)) {
+ for (auto kind : AllAllocKinds()) {
+ for (ArenaIter arena(zone, kind); !arena.done(); arena.next()) {
+ arena->checkNoMarkedCells();
+ }
+ }
+ }
+#endif
+
// Incremental marking barriers are enabled at this point.
zone->changeGCState(Zone::Prepare, zone->initialMarkingState());
@@ -3715,11 +3730,8 @@ void GCRuntime::incrementalSlice(SliceBudget& budget, JS::GCReason reason,
[[fallthrough]];
case State::MarkRoots:
- if (NeedToCollectNursery(this)) {
- collectNurseryFromMajorGC(reason);
- }
-
endPreparePhase(reason);
+
beginMarkPhase(session);
incrementalState = State::Mark;
@@ -3878,8 +3890,11 @@ void GCRuntime::incrementalSlice(SliceBudget& budget, JS::GCReason reason,
}
void GCRuntime::collectNurseryFromMajorGC(JS::GCReason reason) {
- collectNursery(gcOptions(), reason,
+ collectNursery(gcOptions(), JS::GCReason::EVICT_NURSERY,
gcstats::PhaseKind::EVICT_NURSERY_FOR_MAJOR_GC);
+
+ MOZ_ASSERT(nursery().isEmpty());
+ MOZ_ASSERT(storeBuffer().isEmpty());
}
bool GCRuntime::hasForegroundWork() const {
@@ -4733,26 +4748,43 @@ void GCRuntime::collectNursery(JS::GCOptions options, JS::GCReason reason,
gcstats::AutoPhase ap(stats(), phase);
nursery().collect(options, reason);
- MOZ_ASSERT(nursery().isEmpty());
startBackgroundFreeAfterMinorGC();
+
+ // We ignore gcMaxBytes when allocating for minor collection. However, if we
+ // overflowed, we disable the nursery. The next time we allocate, we'll fail
+ // because bytes >= gcMaxBytes.
+ if (heapSize.bytes() >= tunables.gcMaxBytes()) {
+ if (!nursery().isEmpty()) {
+ nursery().collect(options, JS::GCReason::DISABLE_GENERATIONAL_GC);
+ MOZ_ASSERT(nursery().isEmpty());
+ startBackgroundFreeAfterMinorGC();
+ }
+ nursery().disable();
+ }
}
void GCRuntime::startBackgroundFreeAfterMinorGC() {
- MOZ_ASSERT(nursery().isEmpty());
+ // Called after nursery collection. Free whatever blocks are safe to free now.
- {
- AutoLockHelperThreadState lock;
+ AutoLockHelperThreadState lock;
- lifoBlocksToFree.ref().transferFrom(&lifoBlocksToFreeAfterMinorGC.ref());
+ lifoBlocksToFree.ref().transferFrom(&lifoBlocksToFreeAfterNextMinorGC.ref());
- if (lifoBlocksToFree.ref().isEmpty() &&
- buffersToFreeAfterMinorGC.ref().empty()) {
- return;
- }
+ if (nursery().tenuredEverything) {
+ lifoBlocksToFree.ref().transferFrom(
+ &lifoBlocksToFreeAfterFullMinorGC.ref());
+ } else {
+ lifoBlocksToFreeAfterNextMinorGC.ref().transferFrom(
+ &lifoBlocksToFreeAfterFullMinorGC.ref());
+ }
+
+ if (lifoBlocksToFree.ref().isEmpty() &&
+ buffersToFreeAfterMinorGC.ref().empty()) {
+ return;
}
- startBackgroundFree();
+ freeTask.startOrRunIfIdle(lock);
}
bool GCRuntime::gcIfRequestedImpl(bool eagerOk) {
diff --git a/js/src/gc/GC.h b/js/src/gc/GC.h
index 4e4634d804..7f603b066f 100644
--- a/js/src/gc/GC.h
+++ b/js/src/gc/GC.h
@@ -83,7 +83,8 @@ class TenuredChunk;
_("maxHelperThreads", JSGC_MAX_HELPER_THREADS, true) \
_("helperThreadCount", JSGC_HELPER_THREAD_COUNT, false) \
_("markingThreadCount", JSGC_MARKING_THREAD_COUNT, true) \
- _("systemPageSizeKB", JSGC_SYSTEM_PAGE_SIZE_KB, false)
+ _("systemPageSizeKB", JSGC_SYSTEM_PAGE_SIZE_KB, false) \
+ _("semispaceNurseryEnabled", JSGC_SEMISPACE_NURSERY_ENABLED, true)
// Get the key and writability give a GC parameter name.
extern bool GetGCParameterInfo(const char* name, JSGCParamKey* keyOut,
diff --git a/js/src/gc/GCAPI.cpp b/js/src/gc/GCAPI.cpp
index 293bfce80d..d2eab44dea 100644
--- a/js/src/gc/GCAPI.cpp
+++ b/js/src/gc/GCAPI.cpp
@@ -487,8 +487,7 @@ JS_PUBLIC_API bool JS::WasIncrementalGC(JSRuntime* rt) {
bool js::gc::CreateUniqueIdForNativeObject(NativeObject* nobj, uint64_t* uidp) {
JSRuntime* runtime = nobj->runtimeFromMainThread();
*uidp = NextCellUniqueId(runtime);
- JSContext* cx = runtime->mainContextFromOwnThread();
- return nobj->setUniqueId(cx, *uidp);
+ return nobj->setUniqueId(runtime, *uidp);
}
bool js::gc::CreateUniqueIdForNonNativeObject(Cell* cell,
@@ -793,6 +792,15 @@ const char* CellColorName(CellColor color) {
} /* namespace gc */
} /* namespace js */
+JS_PUBLIC_API bool js::gc::IsDeadNurseryObject(JSObject* obj) {
+ MOZ_ASSERT(JS::RuntimeHeapIsMinorCollecting());
+ MOZ_ASSERT(obj);
+ MOZ_ASSERT(IsInsideNursery(obj));
+ MOZ_ASSERT(!IsForwarded(obj));
+
+ return obj->runtimeFromMainThread()->gc.nursery().inCollectedRegion(obj);
+}
+
JS_PUBLIC_API void js::gc::FinalizeDeadNurseryObject(JSContext* cx,
JSObject* obj) {
CHECK_THREAD(cx);
diff --git a/js/src/gc/GCInternals.h b/js/src/gc/GCInternals.h
index c234ad4b2b..9a3edc2c9d 100644
--- a/js/src/gc/GCInternals.h
+++ b/js/src/gc/GCInternals.h
@@ -329,7 +329,7 @@ inline bool IsOOMReason(JS::GCReason reason) {
reason == JS::GCReason::MEM_PRESSURE;
}
-void* AllocateCellInGC(JS::Zone* zone, AllocKind thingKind);
+void* AllocateTenuredCellInGC(JS::Zone* zone, AllocKind thingKind);
void ReadProfileEnv(const char* envName, const char* helpText, bool* enableOut,
bool* workersOut, mozilla::TimeDuration* thresholdOut);
diff --git a/js/src/gc/GCRuntime.h b/js/src/gc/GCRuntime.h
index c9f660b4d7..851e477359 100644
--- a/js/src/gc/GCRuntime.h
+++ b/js/src/gc/GCRuntime.h
@@ -1205,7 +1205,8 @@ class GCRuntime {
* a background thread.
*/
HelperThreadLockData<LifoAlloc> lifoBlocksToFree;
- MainThreadData<LifoAlloc> lifoBlocksToFreeAfterMinorGC;
+ MainThreadData<LifoAlloc> lifoBlocksToFreeAfterFullMinorGC;
+ MainThreadData<LifoAlloc> lifoBlocksToFreeAfterNextMinorGC;
HelperThreadLockData<Nursery::BufferSet> buffersToFreeAfterMinorGC;
/* Index of current sweep group (for stats). */
diff --git a/js/src/gc/Heap-inl.h b/js/src/gc/Heap-inl.h
index 95bd841cae..30937a7236 100644
--- a/js/src/gc/Heap-inl.h
+++ b/js/src/gc/Heap-inl.h
@@ -44,6 +44,10 @@ inline void js::gc::Arena::init(JS::Zone* zoneArg, AllocKind kind,
}
setAsFullyUnused();
+
+#ifdef DEBUG
+ checkNoMarkedCells();
+#endif
}
inline void js::gc::Arena::release(const AutoLockGC& lock) {
diff --git a/js/src/gc/MallocedBlockCache.h b/js/src/gc/MallocedBlockCache.h
index cd7d1e1064..6fc577044e 100644
--- a/js/src/gc/MallocedBlockCache.h
+++ b/js/src/gc/MallocedBlockCache.h
@@ -70,6 +70,8 @@ class MallocedBlockCache {
~MallocedBlockCache();
+ static inline size_t listIDForSize(size_t size);
+
// Allocation and freeing. Use `alloc` to allocate. `allocSlow` is
// `alloc`s fallback path. Do not call it directly, since it doesn't handle
// all cases by itself.
@@ -89,7 +91,8 @@ class MallocedBlockCache {
size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const;
};
-inline PointerAndUint7 MallocedBlockCache::alloc(size_t size) {
+/* static */
+inline size_t MallocedBlockCache::listIDForSize(size_t size) {
// Figure out which free list can give us a block of size `size`, after it
// has been rounded up to a multiple of `step`.
//
@@ -122,11 +125,23 @@ inline PointerAndUint7 MallocedBlockCache::alloc(size_t size) {
size_t i = size / STEP;
MOZ_ASSERT(i > 0);
+ if (i >= NUM_LISTS) {
+ return OVERSIZE_BLOCK_LIST_ID;
+ }
+
+ return i;
+}
+
+inline PointerAndUint7 MallocedBlockCache::alloc(size_t size) {
+ size_t i = listIDForSize(size);
+ MOZ_ASSERT(i < NUM_LISTS);
+
// Fast path: try to pull a block from the relevant list.
- if (MOZ_LIKELY(i < NUM_LISTS && // "block is small enough to cache"
- !lists[i].empty())) { // "a cached block is available"
+ if (MOZ_LIKELY(
+ i != OVERSIZE_BLOCK_LIST_ID && // "block is small enough to cache"
+ !lists[i].empty())) { // "a cached block is available"
// Check that i is the right list
- MOZ_ASSERT(i * STEP == size);
+ MOZ_ASSERT(i * STEP == js::RoundUp(size, STEP));
void* block = lists[i].popCopy();
return PointerAndUint7(block, i);
}
diff --git a/js/src/gc/Nursery-inl.h b/js/src/gc/Nursery-inl.h
index 3a29d76c07..3be063697a 100644
--- a/js/src/gc/Nursery-inl.h
+++ b/js/src/gc/Nursery-inl.h
@@ -16,6 +16,12 @@
#include "vm/JSContext.h"
#include "vm/NativeObject.h"
+namespace js {
+namespace gc {
+struct Cell;
+} // namespace gc
+} // namespace js
+
inline JSRuntime* js::Nursery::runtime() const { return gc->rt; }
template <typename T>
@@ -23,6 +29,50 @@ bool js::Nursery::isInside(const SharedMem<T>& p) const {
return isInside(p.unwrap(/*safe - used for value in comparison above*/));
}
+inline bool js::Nursery::shouldTenure(gc::Cell* cell) {
+ MOZ_ASSERT(semispaceEnabled());
+ MOZ_ASSERT(inCollectedRegion(cell));
+
+ size_t offset = fromSpace.offsetFromAddress(uintptr_t(cell));
+ MOZ_ASSERT(offset >=
+ fromSpace.offsetFromExclusiveAddress(fromSpace.startPosition_));
+ return offset <= tenureThreshold_;
+}
+
+inline bool js::Nursery::inCollectedRegion(gc::Cell* cell) const {
+ gc::ChunkBase* chunk = gc::detail::GetCellChunkBase(cell);
+ return chunk->getKind() == gc::ChunkKind::NurseryFromSpace;
+}
+
+inline bool js::Nursery::inCollectedRegion(void* ptr) const {
+ if (!semispaceEnabled()) {
+ return toSpace.isInside(ptr);
+ }
+
+ return fromSpace.isInside(ptr);
+}
+
+inline size_t js::Nursery::Space::offsetFromExclusiveAddress(
+ uintptr_t addr) const {
+ if ((addr & gc::ChunkMask) == 0) {
+ // |addr| points one past the end of the previous chunk.
+ return offsetFromAddress(addr - 1) + 1;
+ }
+
+ return offsetFromAddress(addr);
+}
+
+inline size_t js::Nursery::Space::offsetFromAddress(uintptr_t addr) const {
+ gc::ChunkBase* chunk =
+ gc::detail::GetCellChunkBase(reinterpret_cast<gc::Cell*>(addr));
+ MOZ_ASSERT(chunk->getKind() == kind);
+ MOZ_ASSERT(findChunkIndex(addr & ~gc::ChunkMask) == chunk->nurseryChunkIndex);
+
+ uint32_t offset = addr & gc::ChunkMask;
+ MOZ_ASSERT(offset >= sizeof(gc::ChunkBase));
+ return (chunk->nurseryChunkIndex << gc::ChunkShift) | offset;
+}
+
MOZ_ALWAYS_INLINE /* static */ bool js::Nursery::getForwardedPointer(
js::gc::Cell** ref) {
js::gc::Cell* cell = (*ref);
@@ -80,7 +130,7 @@ inline void js::Nursery::setForwardingPointer(void* oldData, void* newData,
inline void js::Nursery::setDirectForwardingPointer(void* oldData,
void* newData) {
MOZ_ASSERT(isInside(oldData));
- MOZ_ASSERT(!isInside(newData));
+ MOZ_ASSERT_IF(isInside(newData), !inCollectedRegion(newData));
new (oldData) BufferRelocationOverlay{newData};
}
@@ -123,8 +173,8 @@ inline void* js::Nursery::tryAllocateCell(gc::AllocSite* site, size_t size,
inline void* js::Nursery::tryAllocate(size_t size) {
MOZ_ASSERT(isEnabled());
- MOZ_ASSERT(!JS::RuntimeHeapIsBusy());
- MOZ_ASSERT_IF(currentChunk_ == startChunk_, position() >= startPosition_);
+ MOZ_ASSERT_IF(JS::RuntimeHeapIsBusy(), JS::RuntimeHeapIsMinorCollecting());
+ MOZ_ASSERT_IF(currentChunk() == startChunk(), position() >= startPosition());
MOZ_ASSERT(size % gc::CellAlignBytes == 0);
MOZ_ASSERT(position() % gc::CellAlignBytes == 0);
@@ -138,7 +188,7 @@ inline void* js::Nursery::tryAllocate(size_t size) {
"Successful allocation cannot result in nullptr");
}
- position_ = position() + size;
+ toSpace.position_ = position() + size;
DebugOnlyPoison(ptr, JS_ALLOCATED_NURSERY_PATTERN, size,
MemCheckKind::MakeUndefined);
@@ -148,30 +198,33 @@ inline void* js::Nursery::tryAllocate(size_t size) {
inline bool js::Nursery::registerTrailer(PointerAndUint7 blockAndListID,
size_t nBytes) {
- MOZ_ASSERT(trailersAdded_.length() == trailersRemoved_.length());
+ MOZ_ASSERT(toSpace.trailersAdded_.length() ==
+ toSpace.trailersRemoved_.length());
MOZ_ASSERT(nBytes > 0);
- if (MOZ_UNLIKELY(!trailersAdded_.append(blockAndListID))) {
+ if (MOZ_UNLIKELY(!toSpace.trailersAdded_.append(blockAndListID))) {
return false;
}
- if (MOZ_UNLIKELY(!trailersRemoved_.append(nullptr))) {
- trailersAdded_.popBack();
+ if (MOZ_UNLIKELY(!toSpace.trailersRemoved_.append(nullptr))) {
+ toSpace.trailersAdded_.popBack();
return false;
}
// This is a clone of the logic in ::registerMallocedBuffer. It may be
// that some other heuristic is better, once we know more about the
// typical behaviour of wasm-GC applications.
- trailerBytes_ += nBytes;
- if (MOZ_UNLIKELY(trailerBytes_ > capacity() * 8)) {
+ toSpace.trailerBytes_ += nBytes;
+ if (MOZ_UNLIKELY(toSpace.trailerBytes_ > capacity() * 8)) {
requestMinorGC(JS::GCReason::NURSERY_TRAILERS);
}
return true;
}
inline void js::Nursery::unregisterTrailer(void* block) {
- MOZ_ASSERT(trailersRemovedUsed_ < trailersRemoved_.length());
- trailersRemoved_[trailersRemovedUsed_] = block;
- trailersRemovedUsed_++;
+ // Unlike removeMallocedBuffer this is only called during minor GC.
+ MOZ_ASSERT(fromSpace.trailersRemovedUsed_ <
+ fromSpace.trailersRemoved_.length());
+ fromSpace.trailersRemoved_[fromSpace.trailersRemovedUsed_] = block;
+ fromSpace.trailersRemovedUsed_++;
}
namespace js {
@@ -181,13 +234,20 @@ namespace js {
// instead.
template <typename T>
-static inline T* AllocateCellBuffer(JSContext* cx, gc::Cell* cell,
+static inline T* AllocateCellBuffer(Nursery& nursery, gc::Cell* cell,
uint32_t count) {
size_t nbytes = RoundUp(count * sizeof(T), sizeof(Value));
- auto* buffer = static_cast<T*>(cx->nursery().allocateBuffer(
- cell->zone(), cell, nbytes, js::MallocArena));
+ return static_cast<T*>(
+ nursery.allocateBuffer(cell->zone(), cell, nbytes, js::MallocArena));
+}
+
+template <typename T>
+static inline T* AllocateCellBuffer(JSContext* cx, gc::Cell* cell,
+ uint32_t count) {
+ T* buffer = AllocateCellBuffer<T>(cx->nursery(), cell, count);
if (!buffer) {
ReportOutOfMemory(cx);
+ return nullptr;
}
return buffer;
diff --git a/js/src/gc/Nursery.cpp b/js/src/gc/Nursery.cpp
index 660daa8d4c..4753848c56 100644
--- a/js/src/gc/Nursery.cpp
+++ b/js/src/gc/Nursery.cpp
@@ -38,6 +38,7 @@
#include "gc/Heap-inl.h"
#include "gc/Marking-inl.h"
#include "gc/StableCellHasher-inl.h"
+#include "gc/StoreBuffer-inl.h"
#include "vm/GeckoProfiler-inl.h"
using namespace js;
@@ -50,15 +51,22 @@ using mozilla::TimeStamp;
namespace js {
+static constexpr size_t NurseryChunkHeaderSize =
+ RoundUp(sizeof(ChunkBase), CellAlignBytes);
+
+// The amount of space in a nursery chunk available to allocations.
+static constexpr size_t NurseryChunkUsableSize =
+ ChunkSize - NurseryChunkHeaderSize;
+
struct NurseryChunk : public ChunkBase {
- char data[Nursery::NurseryChunkUsableSize];
+ alignas(CellAlignBytes) uint8_t data[NurseryChunkUsableSize];
- static NurseryChunk* fromChunk(gc::TenuredChunk* chunk);
+ static NurseryChunk* fromChunk(TenuredChunk* chunk, ChunkKind kind,
+ uint8_t index);
- explicit NurseryChunk(JSRuntime* runtime)
- : ChunkBase(runtime, &runtime->gc.storeBuffer()) {}
+ explicit NurseryChunk(JSRuntime* runtime, ChunkKind kind, uint8_t chunkIndex)
+ : ChunkBase(runtime, &runtime->gc.storeBuffer(), kind, chunkIndex) {}
- void poisonAndInit(JSRuntime* rt, size_t size = ChunkSize);
void poisonRange(size_t start, size_t end, uint8_t value,
MemCheckKind checkKind);
void poisonAfterEvict(size_t extent = ChunkSize);
@@ -75,22 +83,29 @@ struct NurseryChunk : public ChunkBase {
uintptr_t start() const { return uintptr_t(&data); }
uintptr_t end() const { return uintptr_t(this) + ChunkSize; }
};
-static_assert(sizeof(js::NurseryChunk) == gc::ChunkSize,
- "Nursery chunk size must match gc::Chunk size.");
+static_assert(sizeof(NurseryChunk) == ChunkSize,
+ "Nursery chunk size must match Chunk size.");
+static_assert(offsetof(NurseryChunk, data) == NurseryChunkHeaderSize);
class NurseryDecommitTask : public GCParallelTask {
public:
explicit NurseryDecommitTask(gc::GCRuntime* gc);
- bool reserveSpaceForBytes(size_t nbytes);
+ bool reserveSpaceForChunks(size_t nchunks);
bool isEmpty(const AutoLockHelperThreadState& lock) const;
void queueChunk(NurseryChunk* chunk, const AutoLockHelperThreadState& lock);
- void queueRange(size_t newCapacity, NurseryChunk& chunk,
+ void queueRange(size_t newCapacity, NurseryChunk* chunk,
const AutoLockHelperThreadState& lock);
private:
+ struct Region {
+ NurseryChunk* chunk;
+ size_t startOffset;
+ };
+
using NurseryChunkVector = Vector<NurseryChunk*, 0, SystemAllocPolicy>;
+ using RegionVector = Vector<Region, 2, SystemAllocPolicy>;
void run(AutoLockHelperThreadState& lock) override;
@@ -98,25 +113,21 @@ class NurseryDecommitTask : public GCParallelTask {
const NurseryChunkVector& chunksToDecommit() const {
return chunksToDecommit_.ref();
}
+ RegionVector& regionsToDecommit() { return regionsToDecommit_.ref(); }
+ const RegionVector& regionsToDecommit() const {
+ return regionsToDecommit_.ref();
+ }
MainThreadOrGCTaskData<NurseryChunkVector> chunksToDecommit_;
-
- MainThreadOrGCTaskData<NurseryChunk*> partialChunk;
- MainThreadOrGCTaskData<size_t> partialCapacity;
+ MainThreadOrGCTaskData<RegionVector> regionsToDecommit_;
};
} // namespace js
-inline void js::NurseryChunk::poisonAndInit(JSRuntime* rt, size_t size) {
- MOZ_ASSERT(size >= sizeof(ChunkBase));
- MOZ_ASSERT(size <= ChunkSize);
- poisonRange(0, size, JS_FRESH_NURSERY_PATTERN, MemCheckKind::MakeUndefined);
- new (this) NurseryChunk(rt);
-}
-
inline void js::NurseryChunk::poisonRange(size_t start, size_t end,
uint8_t value,
MemCheckKind checkKind) {
+ MOZ_ASSERT(start >= NurseryChunkHeaderSize);
MOZ_ASSERT((start % gc::CellAlignBytes) == 0);
MOZ_ASSERT((end % gc::CellAlignBytes) == 0);
MOZ_ASSERT(end >= start);
@@ -132,12 +143,12 @@ inline void js::NurseryChunk::poisonRange(size_t start, size_t end,
}
inline void js::NurseryChunk::poisonAfterEvict(size_t extent) {
- poisonRange(sizeof(ChunkBase), extent, JS_SWEPT_NURSERY_PATTERN,
+ poisonRange(NurseryChunkHeaderSize, extent, JS_SWEPT_NURSERY_PATTERN,
MemCheckKind::MakeNoAccess);
}
inline void js::NurseryChunk::markPagesUnusedHard(size_t startOffset) {
- MOZ_ASSERT(startOffset >= sizeof(ChunkBase)); // Don't touch the header.
+ MOZ_ASSERT(startOffset >= NurseryChunkHeaderSize); // Don't touch the header.
MOZ_ASSERT(startOffset >= SystemPageSize());
MOZ_ASSERT(startOffset <= ChunkSize);
uintptr_t start = uintptr_t(this) + startOffset;
@@ -146,7 +157,7 @@ inline void js::NurseryChunk::markPagesUnusedHard(size_t startOffset) {
}
inline bool js::NurseryChunk::markPagesInUseHard(size_t endOffset) {
- MOZ_ASSERT(endOffset >= sizeof(ChunkBase));
+ MOZ_ASSERT(endOffset >= NurseryChunkHeaderSize);
MOZ_ASSERT(endOffset >= SystemPageSize());
MOZ_ASSERT(endOffset <= ChunkSize);
uintptr_t start = uintptr_t(this) + SystemPageSize();
@@ -155,23 +166,25 @@ inline bool js::NurseryChunk::markPagesInUseHard(size_t endOffset) {
}
// static
-inline js::NurseryChunk* js::NurseryChunk::fromChunk(TenuredChunk* chunk) {
- return reinterpret_cast<NurseryChunk*>(chunk);
+inline js::NurseryChunk* js::NurseryChunk::fromChunk(TenuredChunk* chunk,
+ ChunkKind kind,
+ uint8_t index) {
+ return new (chunk) NurseryChunk(chunk->runtime, kind, index);
}
js::NurseryDecommitTask::NurseryDecommitTask(gc::GCRuntime* gc)
: GCParallelTask(gc, gcstats::PhaseKind::NONE) {
// This can occur outside GCs so doesn't have a stats phase.
+ MOZ_ALWAYS_TRUE(regionsToDecommit().reserve(2));
}
bool js::NurseryDecommitTask::isEmpty(
const AutoLockHelperThreadState& lock) const {
- return chunksToDecommit().empty() && !partialChunk;
+ return chunksToDecommit().empty() && regionsToDecommit().empty();
}
-bool js::NurseryDecommitTask::reserveSpaceForBytes(size_t nbytes) {
+bool js::NurseryDecommitTask::reserveSpaceForChunks(size_t nchunks) {
MOZ_ASSERT(isIdle());
- size_t nchunks = HowMany(nbytes, ChunkSize);
return chunksToDecommit().reserve(nchunks);
}
@@ -182,15 +195,14 @@ void js::NurseryDecommitTask::queueChunk(
}
void js::NurseryDecommitTask::queueRange(
- size_t newCapacity, NurseryChunk& newChunk,
+ size_t newCapacity, NurseryChunk* chunk,
const AutoLockHelperThreadState& lock) {
MOZ_ASSERT(isIdle(lock));
- MOZ_ASSERT(!partialChunk);
+ MOZ_ASSERT(regionsToDecommit_.ref().length() < 2);
MOZ_ASSERT(newCapacity < ChunkSize);
MOZ_ASSERT(newCapacity % SystemPageSize() == 0);
- partialChunk = &newChunk;
- partialCapacity = newCapacity;
+ regionsToDecommit().infallibleAppend(Region{chunk, newCapacity});
}
void js::NurseryDecommitTask::run(AutoLockHelperThreadState& lock) {
@@ -204,25 +216,20 @@ void js::NurseryDecommitTask::run(AutoLockHelperThreadState& lock) {
gc->recycleChunk(tenuredChunk, lock);
}
- if (partialChunk) {
- {
- AutoUnlockHelperThreadState unlock(lock);
- partialChunk->markPagesUnusedHard(partialCapacity);
- }
- partialChunk = nullptr;
- partialCapacity = 0;
+ while (!regionsToDecommit().empty()) {
+ Region region = regionsToDecommit().popCopy();
+ AutoUnlockHelperThreadState unlock(lock);
+ region.chunk->markPagesUnusedHard(region.startOffset);
}
}
js::Nursery::Nursery(GCRuntime* gc)
- : position_(0),
- currentEnd_(0),
+ : toSpace(ChunkKind::NurseryToSpace),
+ fromSpace(ChunkKind::NurseryFromSpace),
gc(gc),
- currentChunk_(0),
- startChunk_(0),
- startPosition_(0),
capacity_(0),
enableProfiling_(false),
+ semispaceEnabled_(gc::TuningDefaults::SemispaceNurseryEnabled),
canAllocateStrings_(true),
canAllocateBigInts_(true),
reportDeduplications_(false),
@@ -232,6 +239,11 @@ js::Nursery::Nursery(GCRuntime* gc)
prevPosition_(0),
hasRecentGrowthData(false),
smoothedTargetSize(0.0) {
+ // Try to keep fields used by allocation fast path together at the start of
+ // the nursery.
+ static_assert(offsetof(Nursery, toSpace.position_) < TypicalCacheLineSize);
+ static_assert(offsetof(Nursery, toSpace.currentEnd_) < TypicalCacheLineSize);
+
const char* env = getenv("MOZ_NURSERY_STRINGS");
if (env && *env) {
canAllocateStrings_ = (*env == '1');
@@ -316,12 +328,13 @@ bool js::Nursery::init(AutoLockGCBgAlloc& lock) {
js::Nursery::~Nursery() { disable(); }
void js::Nursery::enable() {
- MOZ_ASSERT(isEmpty());
- MOZ_ASSERT(!gc->isVerifyPreBarriersEnabled());
if (isEnabled()) {
return;
}
+ MOZ_ASSERT(isEmpty());
+ MOZ_ASSERT(!gc->isVerifyPreBarriersEnabled());
+
{
AutoLockGCBgAlloc lock(gc);
if (!initFirstChunk(lock)) {
@@ -344,25 +357,60 @@ void js::Nursery::enable() {
bool js::Nursery::initFirstChunk(AutoLockGCBgAlloc& lock) {
MOZ_ASSERT(!isEnabled());
+ MOZ_ASSERT(toSpace.chunks_.length() == 0);
+ MOZ_ASSERT(fromSpace.chunks_.length() == 0);
- capacity_ = tunables().gcMinNurseryBytes();
+ setCapacity(minSpaceSize());
- if (!decommitTask->reserveSpaceForBytes(capacity_) ||
- !allocateNextChunk(0, lock)) {
- capacity_ = 0;
+ size_t nchunks = toSpace.maxChunkCount_ + fromSpace.maxChunkCount_;
+ if (!decommitTask->reserveSpaceForChunks(nchunks) ||
+ !allocateNextChunk(lock)) {
+ setCapacity(0);
+ MOZ_ASSERT(toSpace.isEmpty());
+ MOZ_ASSERT(fromSpace.isEmpty());
return false;
}
- moveToStartOfChunk(0);
- setStartToCurrentPosition();
+ toSpace.moveToStartOfChunk(this, 0);
+ toSpace.setStartToCurrentPosition();
+
+ if (semispaceEnabled_) {
+ fromSpace.moveToStartOfChunk(this, 0);
+ fromSpace.setStartToCurrentPosition();
+ }
+
+ MOZ_ASSERT(toSpace.isEmpty());
+ MOZ_ASSERT(fromSpace.isEmpty());
+
poisonAndInitCurrentChunk();
// Clear any information about previous collections.
clearRecentGrowthData();
+ tenureThreshold_ = 0;
+
+#ifdef DEBUG
+ toSpace.checkKind(ChunkKind::NurseryToSpace);
+ fromSpace.checkKind(ChunkKind::NurseryFromSpace);
+#endif
+
return true;
}
+size_t RequiredChunkCount(size_t nbytes) {
+ return nbytes <= ChunkSize ? 1 : nbytes / ChunkSize;
+}
+
+void js::Nursery::setCapacity(size_t newCapacity) {
+ MOZ_ASSERT(newCapacity == roundSize(newCapacity));
+ capacity_ = newCapacity;
+ size_t count = RequiredChunkCount(newCapacity);
+ toSpace.maxChunkCount_ = count;
+ if (semispaceEnabled_) {
+ fromSpace.maxChunkCount_ = count;
+ }
+}
+
void js::Nursery::disable() {
MOZ_ASSERT(isEmpty());
if (!isEnabled()) {
@@ -371,15 +419,19 @@ void js::Nursery::disable() {
// Free all chunks.
decommitTask->join();
- freeChunksFrom(0);
+ freeChunksFrom(toSpace, 0);
+ freeChunksFrom(fromSpace, 0);
decommitTask->runFromMainThread();
- capacity_ = 0;
+ setCapacity(0);
// We must reset currentEnd_ so that there is no space for anything in the
// nursery. JIT'd code uses this even if the nursery is disabled.
- currentEnd_ = 0;
- position_ = 0;
+ toSpace = Space(ChunkKind::NurseryToSpace);
+ fromSpace = Space(ChunkKind::NurseryFromSpace);
+ MOZ_ASSERT(toSpace.isEmpty());
+ MOZ_ASSERT(fromSpace.isEmpty());
+
gc->storeBuffer().disable();
if (gc->wasInitialized()) {
@@ -464,16 +516,59 @@ void js::Nursery::discardCodeAndSetJitFlagsForZone(JS::Zone* zone) {
}
}
+void js::Nursery::setSemispaceEnabled(bool enabled) {
+ if (semispaceEnabled() == enabled) {
+ return;
+ }
+
+ bool wasEnabled = isEnabled();
+ if (wasEnabled) {
+ if (!isEmpty()) {
+ gc->minorGC(JS::GCReason::EVICT_NURSERY);
+ }
+ disable();
+ }
+
+ semispaceEnabled_ = enabled;
+
+ if (wasEnabled) {
+ enable();
+ }
+}
+
bool js::Nursery::isEmpty() const {
+ MOZ_ASSERT(fromSpace.isEmpty());
+
if (!isEnabled()) {
return true;
}
if (!gc->hasZealMode(ZealMode::GenerationalGC)) {
- MOZ_ASSERT(startChunk_ == 0);
- MOZ_ASSERT(startPosition_ == chunk(0).start());
+ MOZ_ASSERT(startChunk() == 0);
+ MOZ_ASSERT(startPosition() == chunk(0).start());
}
- return position() == startPosition_;
+
+ return toSpace.isEmpty();
+}
+
+bool js::Nursery::Space::isEmpty() const { return position_ == startPosition_; }
+
+static size_t AdjustSizeForSemispace(size_t size, bool semispaceEnabled) {
+ if (!semispaceEnabled) {
+ return size;
+ }
+
+ return Nursery::roundSize(size / 2);
+}
+
+size_t js::Nursery::maxSpaceSize() const {
+ return AdjustSizeForSemispace(tunables().gcMaxNurseryBytes(),
+ semispaceEnabled_);
+}
+
+size_t js::Nursery::minSpaceSize() const {
+ return AdjustSizeForSemispace(tunables().gcMinNurseryBytes(),
+ semispaceEnabled_);
}
#ifdef JS_GC_ZEAL
@@ -501,9 +596,10 @@ void js::Nursery::enterZealMode() {
MemCheckKind::MakeUndefined);
}
- capacity_ = RoundUp(tunables().gcMaxNurseryBytes(), ChunkSize);
+ setCapacity(maxSpaceSize());
- if (!decommitTask->reserveSpaceForBytes(capacity_)) {
+ size_t nchunks = toSpace.maxChunkCount_ + fromSpace.maxChunkCount_;
+ if (!decommitTask->reserveSpaceForChunks(nchunks)) {
oomUnsafe.crash("Nursery::enterZealMode");
}
@@ -517,8 +613,14 @@ void js::Nursery::leaveZealMode() {
MOZ_ASSERT(isEmpty());
- moveToStartOfChunk(0);
- setStartToCurrentPosition();
+ toSpace.moveToStartOfChunk(this, 0);
+ toSpace.setStartToCurrentPosition();
+
+ if (semispaceEnabled_) {
+ fromSpace.moveToStartOfChunk(this, 0);
+ fromSpace.setStartToCurrentPosition();
+ }
+
poisonAndInitCurrentChunk();
}
#endif // JS_GC_ZEAL
@@ -573,7 +675,7 @@ MOZ_NEVER_INLINE JS::GCReason Nursery::handleAllocationFailure() {
}
bool Nursery::moveToNextChunk() {
- unsigned chunkno = currentChunk_ + 1;
+ unsigned chunkno = currentChunk() + 1;
MOZ_ASSERT(chunkno <= maxChunkCount());
MOZ_ASSERT(chunkno <= allocatedChunkCount());
if (chunkno == maxChunkCount()) {
@@ -584,7 +686,7 @@ bool Nursery::moveToNextChunk() {
TimeStamp start = TimeStamp::Now();
{
AutoLockGCBgAlloc lock(gc);
- if (!allocateNextChunk(chunkno, lock)) {
+ if (!allocateNextChunk(lock)) {
return false;
}
}
@@ -688,16 +790,16 @@ void* js::Nursery::reallocateBuffer(Zone* zone, Cell* cell, void* oldBuffer,
}
if (!isInside(oldBuffer)) {
- MOZ_ASSERT(mallocedBufferBytes >= oldBytes);
+ MOZ_ASSERT(toSpace.mallocedBufferBytes >= oldBytes);
void* newBuffer =
zone->pod_realloc<uint8_t>((uint8_t*)oldBuffer, oldBytes, newBytes);
if (newBuffer) {
if (oldBuffer != newBuffer) {
MOZ_ALWAYS_TRUE(
- mallocedBuffers.rekeyAs(oldBuffer, newBuffer, newBuffer));
+ toSpace.mallocedBuffers.rekeyAs(oldBuffer, newBuffer, newBuffer));
}
- mallocedBufferBytes -= oldBytes;
- mallocedBufferBytes += newBytes;
+ toSpace.mallocedBufferBytes -= oldBytes;
+ toSpace.mallocedBufferBytes += newBytes;
}
return newBuffer;
}
@@ -723,27 +825,21 @@ void js::Nursery::freeBuffer(void* buffer, size_t nbytes) {
#ifdef DEBUG
/* static */
-inline bool Nursery::checkForwardingPointerLocation(void* ptr,
- bool expectedInside) {
- if (isInside(ptr) == expectedInside) {
- return true;
- }
-
+inline bool Nursery::checkForwardingPointerInsideNursery(void* ptr) {
// If a zero-capacity elements header lands right at the end of a chunk then
// elements data will appear to be in the next chunk. If we have a pointer to
// the very start of a chunk, check the previous chunk.
- if ((uintptr_t(ptr) & ChunkMask) == 0 &&
- isInside(reinterpret_cast<uint8_t*>(ptr) - 1) == expectedInside) {
- return true;
+ if ((uintptr_t(ptr) & ChunkMask) == 0) {
+ return isInside(reinterpret_cast<uint8_t*>(ptr) - 1);
}
- return false;
+ return isInside(ptr);
}
#endif
void Nursery::setIndirectForwardingPointer(void* oldData, void* newData) {
- MOZ_ASSERT(checkForwardingPointerLocation(oldData, true));
- MOZ_ASSERT(checkForwardingPointerLocation(newData, false));
+ MOZ_ASSERT(checkForwardingPointerInsideNursery(oldData));
+ // |newData| may be either in the nursery or in the malloc heap.
AutoEnterOOMUnsafeRegion oomUnsafe;
#ifdef DEBUG
@@ -791,7 +887,7 @@ void js::Nursery::forwardBufferPointer(uintptr_t* pSlotsElems) {
MOZ_ASSERT(IsWriteableAddress(buffer));
}
- MOZ_ASSERT(!isInside(buffer));
+ MOZ_ASSERT_IF(isInside(buffer), !inCollectedRegion(buffer));
*pSlotsElems = reinterpret_cast<uintptr_t>(buffer);
}
@@ -1063,7 +1159,7 @@ bool js::Nursery::wantEagerCollection() const {
return false;
}
- if (isEmpty() && capacity() == tunables().gcMinNurseryBytes()) {
+ if (isEmpty() && capacity() == minSpaceSize()) {
return false;
}
@@ -1108,7 +1204,7 @@ inline bool js::Nursery::isUnderused() const {
return false;
}
- if (capacity() == tunables().gcMinNurseryBytes()) {
+ if (capacity() == minSpaceSize()) {
return false;
}
@@ -1126,8 +1222,8 @@ void js::Nursery::collect(JS::GCOptions options, JS::GCReason reason) {
MOZ_ASSERT(!rt->mainContextFromOwnThread()->suppressGC);
if (minorGCRequested()) {
- MOZ_ASSERT(position_ == chunk(currentChunk_).end());
- position_ = prevPosition_;
+ MOZ_ASSERT(position() == chunk(currentChunk()).end());
+ toSpace.position_ = prevPosition_;
prevPosition_ = 0;
minorGCTriggerReason_ = JS::GCReason::NO_REASON;
rt->mainContextFromOwnThread()->clearPendingInterrupt(
@@ -1141,7 +1237,7 @@ void js::Nursery::collect(JS::GCOptions options, JS::GCReason reason) {
// freed after this point.
gc->storeBuffer().clear();
- MOZ_ASSERT(!pretenuringNursery.hasAllocatedSites());
+ MOZ_ASSERT_IF(!semispaceEnabled_, !pretenuringNursery.hasAllocatedSites());
}
if (!isEnabled()) {
@@ -1162,10 +1258,11 @@ void js::Nursery::collect(JS::GCOptions options, JS::GCReason reason) {
previousGC.reason = JS::GCReason::NO_REASON;
previousGC.nurseryUsedBytes = usedSpace();
previousGC.nurseryCapacity = capacity();
- previousGC.nurseryCommitted = committed();
- previousGC.nurseryUsedChunkCount = currentChunk_ + 1;
+ previousGC.nurseryCommitted = totalCommitted();
+ previousGC.nurseryUsedChunkCount = currentChunk() + 1;
previousGC.tenuredBytes = 0;
previousGC.tenuredCells = 0;
+ tenuredEverything = true;
// If it isn't empty, it will call doCollection, and possibly after that
// isEmpty() will become true, so use another variable to keep track of the
@@ -1177,29 +1274,19 @@ void js::Nursery::collect(JS::GCOptions options, JS::GCReason reason) {
// space does not represent data that can be tenured
MOZ_ASSERT(result.tenuredBytes <=
(previousGC.nurseryUsedBytes -
- (sizeof(ChunkBase) * previousGC.nurseryUsedChunkCount)));
+ (NurseryChunkHeaderSize * previousGC.nurseryUsedChunkCount)));
previousGC.reason = reason;
previousGC.tenuredBytes = result.tenuredBytes;
previousGC.tenuredCells = result.tenuredCells;
- previousGC.nurseryUsedChunkCount = currentChunk_ + 1;
+ previousGC.nurseryUsedChunkCount = currentChunk() + 1;
}
// Resize the nursery.
maybeResizeNursery(options, reason);
- // Poison/initialise the first chunk.
- if (previousGC.nurseryUsedBytes) {
- // In most cases Nursery::clear() has not poisoned this chunk or marked it
- // as NoAccess; so we only need to poison the region used during the last
- // cycle. Also, if the heap was recently expanded we don't want to
- // re-poison the new memory. In both cases we only need to poison until
- // previousGC.nurseryUsedBytes.
- //
- // In cases where this is not true, like generational zeal mode or subchunk
- // mode, poisonAndInitCurrentChunk() will ignore its parameter. It will
- // also clamp the parameter.
- poisonAndInitCurrentChunk(previousGC.nurseryUsedBytes);
+ if (!semispaceEnabled()) {
+ poisonAndInitCurrentChunk();
}
bool validPromotionRate;
@@ -1207,19 +1294,10 @@ void js::Nursery::collect(JS::GCOptions options, JS::GCReason reason) {
startProfile(ProfileKey::Pretenure);
size_t sitesPretenured = 0;
- if (!wasEmpty) {
- sitesPretenured =
- doPretenuring(rt, reason, validPromotionRate, promotionRate);
- }
+ sitesPretenured =
+ doPretenuring(rt, reason, validPromotionRate, promotionRate);
endProfile(ProfileKey::Pretenure);
- // We ignore gcMaxBytes when allocating for minor collection. However, if we
- // overflowed, we disable the nursery. The next time we allocate, we'll fail
- // because bytes >= gcMaxBytes.
- if (gc->heapSize.bytes() >= tunables().gcMaxBytes()) {
- disable();
- }
-
previousGC.endTime =
TimeStamp::Now(); // Must happen after maybeResizeNursery.
endProfile(ProfileKey::Total);
@@ -1268,7 +1346,7 @@ void js::Nursery::sendTelemetry(JS::GCReason reason, TimeDuration totalTime,
rt->metrics().GC_MINOR_REASON_LONG(uint32_t(reason));
}
rt->metrics().GC_MINOR_US(totalTime);
- rt->metrics().GC_NURSERY_BYTES_2(committed());
+ rt->metrics().GC_NURSERY_BYTES_2(totalCommitted());
if (!wasEmpty) {
rt->metrics().GC_PRETENURE_COUNT_2(sitesPretenured);
@@ -1289,14 +1367,30 @@ void js::Nursery::printDeduplicationData(js::StringStats& prev,
}
}
-void js::Nursery::freeTrailerBlocks(void) {
+void js::Nursery::freeTrailerBlocks(JS::GCOptions options,
+ JS::GCReason reason) {
+ fromSpace.freeTrailerBlocks(mallocedBlockCache_);
+
+ if (options == JS::GCOptions::Shrink || gc::IsOOMReason(reason)) {
+ mallocedBlockCache_.clear();
+ return;
+ }
+
+ // Discard blocks from the cache at 0.05% per megabyte of nursery capacity,
+ // that is, 0.8% of blocks for a 16-megabyte nursery. This allows the cache
+ // to gradually discard unneeded blocks in long running applications.
+ mallocedBlockCache_.preen(0.05 * double(capacity()) / (1024.0 * 1024.0));
+}
+
+void js::Nursery::Space::freeTrailerBlocks(
+ MallocedBlockCache& mallocedBlockCache) {
// This routine frees those blocks denoted by the set
//
// trailersAdded_ (all of it)
// - trailersRemoved_ (entries with index below trailersRemovedUsed_)
//
// For each block, places it back on the nursery's small-malloced-block pool
- // by calling mallocedBlockCache_.free.
+ // by calling mallocedBlockCache.free.
MOZ_ASSERT(trailersAdded_.length() == trailersRemoved_.length());
MOZ_ASSERT(trailersRemovedUsed_ <= trailersRemoved_.length());
@@ -1321,7 +1415,7 @@ void js::Nursery::freeTrailerBlocks(void) {
if (!std::binary_search(trailersRemoved_.begin(),
trailersRemoved_.begin() + trailersRemovedUsed_,
blockPointer)) {
- mallocedBlockCache_.free(block);
+ mallocedBlockCache.free(block);
}
}
} else {
@@ -1348,7 +1442,7 @@ void js::Nursery::freeTrailerBlocks(void) {
const PointerAndUint7 blockAdded = trailersAdded_[iAdded];
const void* blockRemoved = trailersRemoved_[iRemoved];
if (blockAdded.pointer() < blockRemoved) {
- mallocedBlockCache_.free(blockAdded);
+ mallocedBlockCache.free(blockAdded);
continue;
}
// If this doesn't hold
@@ -1362,7 +1456,7 @@ void js::Nursery::freeTrailerBlocks(void) {
// added set.
for (/*keep going*/; iAdded < nAdded; iAdded++) {
const PointerAndUint7 block = trailersAdded_[iAdded];
- mallocedBlockCache_.free(block);
+ mallocedBlockCache.free(block);
}
}
@@ -1371,17 +1465,14 @@ void js::Nursery::freeTrailerBlocks(void) {
trailersRemoved_.clear();
trailersRemovedUsed_ = 0;
trailerBytes_ = 0;
-
- // Discard blocks from the cache at 0.05% per megabyte of nursery capacity,
- // that is, 0.8% of blocks for a 16-megabyte nursery. This allows the cache
- // to gradually discard unneeded blocks in long running applications.
- mallocedBlockCache_.preen(0.05 * double(capacity()) / (1024.0 * 1024.0));
}
size_t Nursery::sizeOfTrailerBlockSets(
mozilla::MallocSizeOf mallocSizeOf) const {
- return trailersAdded_.sizeOfExcludingThis(mallocSizeOf) +
- trailersRemoved_.sizeOfExcludingThis(mallocSizeOf);
+ MOZ_ASSERT(fromSpace.trailersAdded_.empty());
+ MOZ_ASSERT(fromSpace.trailersRemoved_.empty());
+ return toSpace.trailersAdded_.sizeOfExcludingThis(mallocSizeOf) +
+ toSpace.trailersRemoved_.sizeOfExcludingThis(mallocSizeOf);
}
js::Nursery::CollectionResult js::Nursery::doCollection(AutoGCSession& session,
@@ -1393,8 +1484,19 @@ js::Nursery::CollectionResult js::Nursery::doCollection(AutoGCSession& session,
AutoDisableProxyCheck disableStrictProxyChecking;
mozilla::DebugOnly<AutoEnterOOMUnsafeRegion> oomUnsafeRegion;
+ // Swap nursery spaces.
+ swapSpaces();
+ MOZ_ASSERT(toSpace.isEmpty());
+ MOZ_ASSERT(toSpace.mallocedBuffers.empty());
+ if (semispaceEnabled_) {
+ poisonAndInitCurrentChunk();
+ }
+
+ clearMapAndSetNurseryRanges();
+
// Move objects pointed to by roots from the nursery to the major heap.
- TenuringTracer mover(rt, this);
+ tenuredEverything = shouldTenureEverything(reason);
+ TenuringTracer mover(rt, this, tenuredEverything);
// Trace everything considered as a root by a minor GC.
traceRoots(session, mover);
@@ -1433,17 +1535,14 @@ js::Nursery::CollectionResult js::Nursery::doCollection(AutoGCSession& session,
// Sweep.
startProfile(ProfileKey::FreeMallocedBuffers);
- gc->queueBuffersForFreeAfterMinorGC(mallocedBuffers);
- mallocedBufferBytes = 0;
+ gc->queueBuffersForFreeAfterMinorGC(fromSpace.mallocedBuffers);
+ fromSpace.mallocedBufferBytes = 0;
endProfile(ProfileKey::FreeMallocedBuffers);
// Give trailer blocks associated with non-tenured Wasm{Struct,Array}Objects
// back to our `mallocedBlockCache_`.
startProfile(ProfileKey::FreeTrailerBlocks);
- freeTrailerBlocks();
- if (options == JS::GCOptions::Shrink || gc::IsOOMReason(reason)) {
- mallocedBlockCache_.clear();
- }
+ freeTrailerBlocks(options, reason);
endProfile(ProfileKey::FreeTrailerBlocks);
startProfile(ProfileKey::ClearNursery);
@@ -1466,7 +1565,28 @@ js::Nursery::CollectionResult js::Nursery::doCollection(AutoGCSession& session,
#endif
endProfile(ProfileKey::CheckHashTables);
- return {mover.getTenuredSize(), mover.getTenuredCells()};
+ if (semispaceEnabled_) {
+ // On the next collection, tenure everything before |tenureThreshold_|.
+ tenureThreshold_ = toSpace.offsetFromExclusiveAddress(position());
+ } else {
+ // Swap nursery spaces back because we only use one.
+ swapSpaces();
+ MOZ_ASSERT(toSpace.isEmpty());
+ }
+
+ MOZ_ASSERT(fromSpace.isEmpty());
+
+ if (semispaceEnabled_) {
+ poisonAndInitCurrentChunk();
+ }
+
+ return {mover.getPromotedSize(), mover.getPromotedCells()};
+}
+
+void js::Nursery::swapSpaces() {
+ std::swap(toSpace, fromSpace);
+ toSpace.setKind(ChunkKind::NurseryToSpace);
+ fromSpace.setKind(ChunkKind::NurseryFromSpace);
}
void js::Nursery::traceRoots(AutoGCSession& session, TenuringTracer& mover) {
@@ -1490,14 +1610,12 @@ void js::Nursery::traceRoots(AutoGCSession& session, TenuringTracer& mover) {
MOZ_ASSERT(gc->storeBuffer().isEnabled());
MOZ_ASSERT(gc->storeBuffer().isEmpty());
- // Strings in the whole cell buffer must be traced first, in order to mark
- // tenured dependent strings' bases as non-deduplicatable. The rest of
- // nursery collection (whole non-string cells, edges, etc.) can happen
- // later.
startProfile(ProfileKey::TraceWholeCells);
sb.traceWholeCells(mover);
endProfile(ProfileKey::TraceWholeCells);
+ cellsToSweep = sb.releaseCellSweepSet();
+
startProfile(ProfileKey::TraceValues);
sb.traceValues(mover);
endProfile(ProfileKey::TraceValues);
@@ -1523,8 +1641,6 @@ void js::Nursery::traceRoots(AutoGCSession& session, TenuringTracer& mover) {
endProfile(ProfileKey::MarkRuntime);
}
- MOZ_ASSERT(gc->storeBuffer().isEmpty());
-
startProfile(ProfileKey::MarkDebugger);
{
gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::MARK_ROOTS);
@@ -1533,6 +1649,15 @@ void js::Nursery::traceRoots(AutoGCSession& session, TenuringTracer& mover) {
endProfile(ProfileKey::MarkDebugger);
}
+bool js::Nursery::shouldTenureEverything(JS::GCReason reason) {
+ if (!semispaceEnabled()) {
+ return true;
+ }
+
+ return reason == JS::GCReason::EVICT_NURSERY ||
+ reason == JS::GCReason::DISABLE_GENERATIONAL_GC;
+}
+
size_t js::Nursery::doPretenuring(JSRuntime* rt, JS::GCReason reason,
bool validPromotionRate,
double promotionRate) {
@@ -1590,12 +1715,13 @@ bool js::Nursery::registerMallocedBuffer(void* buffer, size_t nbytes) {
MOZ_ASSERT(buffer);
MOZ_ASSERT(nbytes > 0);
MOZ_ASSERT(!isInside(buffer));
- if (!mallocedBuffers.putNew(buffer)) {
+
+ if (!toSpace.mallocedBuffers.putNew(buffer)) {
return false;
}
- mallocedBufferBytes += nbytes;
- if (MOZ_UNLIKELY(mallocedBufferBytes > capacity() * 8)) {
+ toSpace.mallocedBufferBytes += nbytes;
+ if (MOZ_UNLIKELY(toSpace.mallocedBufferBytes > capacity() * 8)) {
requestMinorGC(JS::GCReason::NURSERY_MALLOC_BUFFERS);
}
@@ -1613,21 +1739,19 @@ bool js::Nursery::registerMallocedBuffer(void* buffer, size_t nbytes) {
Nursery::WasBufferMoved js::Nursery::maybeMoveRawBufferOnPromotion(
void** bufferp, gc::Cell* owner, size_t nbytes, MemoryUse use,
arena_id_t arena) {
- MOZ_ASSERT(!IsInsideNursery(owner));
-
void* buffer = *bufferp;
if (!isInside(buffer)) {
- // This is a malloced buffer. Remove it from the nursery's list of buffers
- // so we don't free it and add it to the memory accounting for the zone
+ // This is a malloced buffer. Remove it from the nursery's previous list of
+ // buffers so we don't free it.
removeMallocedBufferDuringMinorGC(buffer);
- AddCellMemory(owner, nbytes, use);
+ trackMallocedBufferOnPromotion(buffer, owner, nbytes, use);
return BufferNotMoved;
}
// Copy the nursery-allocated buffer into a new malloc allocation.
AutoEnterOOMUnsafeRegion oomUnsafe;
- Zone* zone = owner->asTenured().zone();
+ Zone* zone = owner->zone();
void* movedBuffer = zone->pod_arena_malloc<uint8_t>(arena, nbytes);
if (!movedBuffer) {
oomUnsafe.crash("Nursery::updateBufferOnPromotion");
@@ -1635,38 +1759,111 @@ Nursery::WasBufferMoved js::Nursery::maybeMoveRawBufferOnPromotion(
memcpy(movedBuffer, buffer, nbytes);
- AddCellMemory(owner, nbytes, use);
+ trackMallocedBufferOnPromotion(movedBuffer, owner, nbytes, use);
*bufferp = movedBuffer;
return BufferMoved;
}
+void js::Nursery::trackMallocedBufferOnPromotion(void* buffer, gc::Cell* owner,
+ size_t nbytes, MemoryUse use) {
+ if (owner->isTenured()) {
+ // If we tenured the owner then account for the memory.
+ AddCellMemory(owner, nbytes, use);
+ return;
+ }
+
+ // Otherwise add it to the nursery's new buffer list.
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ if (!registerMallocedBuffer(buffer, nbytes)) {
+ oomUnsafe.crash("Nursery::trackMallocedBufferOnPromotion");
+ }
+}
+
+void js::Nursery::trackTrailerOnPromotion(void* buffer, gc::Cell* owner,
+ size_t nbytes, size_t overhead,
+ MemoryUse use) {
+ MOZ_ASSERT(!isInside(buffer));
+ unregisterTrailer(buffer);
+
+ if (owner->isTenured()) {
+ // If we tenured the owner then account for the memory.
+ AddCellMemory(owner, nbytes + overhead, use);
+ return;
+ }
+
+ // Otherwise add it to the nursery's new buffer list.
+ PointerAndUint7 blockAndListID(buffer,
+ MallocedBlockCache::listIDForSize(nbytes));
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ if (!registerTrailer(blockAndListID, nbytes)) {
+ oomUnsafe.crash("Nursery::trackTrailerOnPromotion");
+ }
+}
+
void Nursery::requestMinorGC(JS::GCReason reason) {
+ JS::HeapState heapState = runtime()->heapState();
+#ifdef DEBUG
+ if (heapState == JS::HeapState::Idle ||
+ heapState == JS::HeapState::MinorCollecting) {
+ MOZ_ASSERT(CurrentThreadCanAccessRuntime(runtime()));
+ } else if (heapState == JS::HeapState::MajorCollecting) {
+ // The GC runs sweeping tasks that may access the storebuffer in parallel
+ // and these require taking the store buffer lock.
+ MOZ_ASSERT(CurrentThreadIsGCSweeping());
+ runtime()->gc.assertCurrentThreadHasLockedStoreBuffer();
+ } else {
+ MOZ_CRASH("Unexpected heap state");
+ }
+#endif
+
MOZ_ASSERT(reason != JS::GCReason::NO_REASON);
- MOZ_ASSERT(CurrentThreadCanAccessRuntime(runtime()));
MOZ_ASSERT(isEnabled());
if (minorGCRequested()) {
return;
}
+ if (heapState == JS::HeapState::MinorCollecting) {
+ // This can happen when we promote a lot of data to the second generation in
+ // a semispace collection. This can trigger a GC due to the amount of store
+ // buffer entries added.
+ return;
+ }
+
// Set position to end of chunk to block further allocation.
MOZ_ASSERT(prevPosition_ == 0);
- prevPosition_ = position_;
- position_ = chunk(currentChunk_).end();
+ prevPosition_ = position();
+ toSpace.position_ = chunk(currentChunk()).end();
minorGCTriggerReason_ = reason;
- runtime()->mainContextFromOwnThread()->requestInterrupt(
+ runtime()->mainContextFromAnyThread()->requestInterrupt(
InterruptReason::MinorGC);
}
+size_t SemispaceSizeFactor(bool semispaceEnabled) {
+ return semispaceEnabled ? 2 : 1;
+}
+
+size_t js::Nursery::totalCapacity() const {
+ return capacity() * SemispaceSizeFactor(semispaceEnabled_);
+}
+
+size_t js::Nursery::totalCommitted() const {
+ size_t size = std::min(capacity_, allocatedChunkCount() * gc::ChunkSize);
+ return size * SemispaceSizeFactor(semispaceEnabled_);
+}
+
size_t Nursery::sizeOfMallocedBuffers(
mozilla::MallocSizeOf mallocSizeOf) const {
+ MOZ_ASSERT(fromSpace.mallocedBuffers.empty());
+
size_t total = 0;
- for (BufferSet::Range r = mallocedBuffers.all(); !r.empty(); r.popFront()) {
+ for (BufferSet::Range r = toSpace.mallocedBuffers.all(); !r.empty();
+ r.popFront()) {
total += mallocSizeOf(r.front());
}
- total += mallocedBuffers.shallowSizeOfExcludingThis(mallocSizeOf);
+ total += toSpace.mallocedBuffers.shallowSizeOfExcludingThis(mallocSizeOf);
return total;
}
@@ -1680,121 +1877,171 @@ void js::Nursery::sweep() {
// Sweep unique IDs first before we sweep any tables that may be keyed based
// on them.
- for (Cell* cell : cellsWithUid_) {
+ cellsWithUid_.mutableEraseIf([](Cell*& cell) {
auto* obj = static_cast<JSObject*>(cell);
if (!IsForwarded(obj)) {
gc::RemoveUniqueId(obj);
- } else {
- JSObject* dst = Forwarded(obj);
- gc::TransferUniqueId(dst, obj);
+ return true;
}
- }
- cellsWithUid_.clear();
+
+ JSObject* dst = Forwarded(obj);
+ gc::TransferUniqueId(dst, obj);
+
+ if (!IsInsideNursery(dst)) {
+ return true;
+ }
+
+ cell = dst;
+ return false;
+ });
for (ZonesIter zone(runtime(), SkipAtoms); !zone.done(); zone.next()) {
zone->sweepAfterMinorGC(&trc);
}
sweepMapAndSetObjects();
+ cellsToSweep.sweep();
+ CellSweepSet empty;
+ std::swap(cellsToSweep, empty);
runtime()->caches().sweepAfterMinorGC(&trc);
}
+void gc::CellSweepSet::sweep() {
+ if (head_) {
+ head_->sweepDependentStrings();
+ head_ = nullptr;
+ }
+ if (storage_) {
+ storage_->freeAll();
+ }
+}
+
void js::Nursery::clear() {
+ fromSpace.clear(this);
+ MOZ_ASSERT(fromSpace.isEmpty());
+}
+
+void js::Nursery::Space::clear(Nursery* nursery) {
+ GCRuntime* gc = nursery->gc;
+
// Poison the nursery contents so touching a freed object will crash.
unsigned firstClearChunk;
- if (gc->hasZealMode(ZealMode::GenerationalGC)) {
- // Poison all the chunks used in this cycle. The new start chunk is
- // reposioned in Nursery::collect() but there's no point optimising that in
- // this case.
+ if (gc->hasZealMode(ZealMode::GenerationalGC) || nursery->semispaceEnabled_) {
+ // Poison all the chunks used in this cycle.
firstClearChunk = startChunk_;
} else {
- // In normal mode we start at the second chunk, the first one will be used
+ // Poison from the second chunk onwards as the first one will be used
// in the next cycle and poisoned in Nusery::collect();
MOZ_ASSERT(startChunk_ == 0);
firstClearChunk = 1;
}
for (unsigned i = firstClearChunk; i < currentChunk_; ++i) {
- chunk(i).poisonAfterEvict();
+ chunks_[i]->poisonAfterEvict();
}
// Clear only the used part of the chunk because that's the part we touched,
// but only if it's not going to be re-used immediately (>= firstClearChunk).
if (currentChunk_ >= firstClearChunk) {
- chunk(currentChunk_)
- .poisonAfterEvict(position() - chunk(currentChunk_).start());
+ size_t usedBytes = position_ - chunks_[currentChunk_]->start();
+ chunks_[currentChunk_]->poisonAfterEvict(NurseryChunkHeaderSize +
+ usedBytes);
}
// Reset the start chunk & position if we're not in this zeal mode, or we're
// in it and close to the end of the nursery.
- MOZ_ASSERT(maxChunkCount() > 0);
+ MOZ_ASSERT(maxChunkCount_ > 0);
if (!gc->hasZealMode(ZealMode::GenerationalGC) ||
- (gc->hasZealMode(ZealMode::GenerationalGC) &&
- currentChunk_ + 1 == maxChunkCount())) {
- moveToStartOfChunk(0);
+ currentChunk_ + 1 == maxChunkCount_) {
+ moveToStartOfChunk(nursery, 0);
}
// Set current start position for isEmpty checks.
setStartToCurrentPosition();
}
-MOZ_ALWAYS_INLINE void js::Nursery::moveToStartOfChunk(unsigned chunkno) {
- MOZ_ASSERT(chunkno < allocatedChunkCount());
+void js::Nursery::moveToStartOfChunk(unsigned chunkno) {
+ toSpace.moveToStartOfChunk(this, chunkno);
+}
+
+void js::Nursery::Space::moveToStartOfChunk(Nursery* nursery,
+ unsigned chunkno) {
+ MOZ_ASSERT(chunkno < chunks_.length());
currentChunk_ = chunkno;
- position_ = chunk(chunkno).start();
- setCurrentEnd();
+ position_ = chunks_[chunkno]->start();
+ setCurrentEnd(nursery);
MOZ_ASSERT(position_ != 0);
MOZ_ASSERT(currentEnd_ > position_); // Check this cannot wrap.
}
-void js::Nursery::poisonAndInitCurrentChunk(size_t extent) {
- if (gc->hasZealMode(ZealMode::GenerationalGC) || !isSubChunkMode()) {
- chunk(currentChunk_).poisonAndInit(runtime());
- } else {
- extent = std::min(capacity_, extent);
- chunk(currentChunk_).poisonAndInit(runtime(), extent);
- }
+void js::Nursery::poisonAndInitCurrentChunk() {
+ NurseryChunk& chunk = this->chunk(currentChunk());
+ size_t start = position() - uintptr_t(&chunk);
+ size_t end = isSubChunkMode() ? capacity_ : ChunkSize;
+ chunk.poisonRange(start, end, JS_FRESH_NURSERY_PATTERN,
+ MemCheckKind::MakeUndefined);
+ new (&chunk)
+ NurseryChunk(runtime(), ChunkKind::NurseryToSpace, currentChunk());
}
-MOZ_ALWAYS_INLINE void js::Nursery::setCurrentEnd() {
- MOZ_ASSERT_IF(isSubChunkMode(),
- currentChunk_ == 0 && currentEnd_ <= chunk(0).end());
- currentEnd_ =
- uintptr_t(&chunk(currentChunk_)) + std::min(capacity_, ChunkSize);
+void js::Nursery::setCurrentEnd() { toSpace.setCurrentEnd(this); }
- MOZ_ASSERT_IF(!isSubChunkMode(), currentEnd_ == chunk(currentChunk_).end());
- MOZ_ASSERT(currentEnd_ != chunk(currentChunk_).start());
+void js::Nursery::Space::setCurrentEnd(Nursery* nursery) {
+ currentEnd_ = uintptr_t(chunks_[currentChunk_]) +
+ std::min(nursery->capacity(), ChunkSize);
}
-bool js::Nursery::allocateNextChunk(const unsigned chunkno,
- AutoLockGCBgAlloc& lock) {
- const unsigned priorCount = allocatedChunkCount();
+bool js::Nursery::allocateNextChunk(AutoLockGCBgAlloc& lock) {
+ // Allocate a new nursery chunk. If semispace collection is enabled, we have
+ // to allocate one for both spaces.
+
+ const unsigned priorCount = toSpace.chunks_.length();
const unsigned newCount = priorCount + 1;
- MOZ_ASSERT((chunkno == currentChunk_ + 1) ||
- (chunkno == 0 && allocatedChunkCount() == 0));
- MOZ_ASSERT(chunkno == allocatedChunkCount());
- MOZ_ASSERT(chunkno < HowMany(capacity(), ChunkSize));
+ MOZ_ASSERT(newCount <= maxChunkCount());
+ MOZ_ASSERT(fromSpace.chunks_.length() ==
+ (semispaceEnabled_ ? priorCount : 0));
+
+ if (!toSpace.chunks_.reserve(newCount) ||
+ (semispaceEnabled_ && !fromSpace.chunks_.reserve(newCount))) {
+ return false;
+ }
- if (!chunks_.resize(newCount)) {
+ TenuredChunk* toSpaceChunk = gc->getOrAllocChunk(lock);
+ if (!toSpaceChunk) {
return false;
}
- TenuredChunk* newChunk;
- newChunk = gc->getOrAllocChunk(lock);
- if (!newChunk) {
- chunks_.shrinkTo(priorCount);
+ TenuredChunk* fromSpaceChunk = nullptr;
+ if (semispaceEnabled_ && !(fromSpaceChunk = gc->getOrAllocChunk(lock))) {
+ gc->recycleChunk(toSpaceChunk, lock);
return false;
}
- chunks_[chunkno] = NurseryChunk::fromChunk(newChunk);
+ uint8_t index = toSpace.chunks_.length();
+ NurseryChunk* nurseryChunk =
+ NurseryChunk::fromChunk(toSpaceChunk, ChunkKind::NurseryToSpace, index);
+ toSpace.chunks_.infallibleAppend(nurseryChunk);
+
+ if (semispaceEnabled_) {
+ MOZ_ASSERT(index == fromSpace.chunks_.length());
+ nurseryChunk = NurseryChunk::fromChunk(fromSpaceChunk,
+ ChunkKind::NurseryFromSpace, index);
+ fromSpace.chunks_.infallibleAppend(nurseryChunk);
+ }
+
return true;
}
-MOZ_ALWAYS_INLINE void js::Nursery::setStartToCurrentPosition() {
+void js::Nursery::setStartToCurrentPosition() {
+ toSpace.setStartToCurrentPosition();
+}
+
+void js::Nursery::Space::setStartToCurrentPosition() {
startChunk_ = currentChunk_;
- startPosition_ = position();
+ startPosition_ = position_;
+ MOZ_ASSERT(isEmpty());
}
void js::Nursery::maybeResizeNursery(JS::GCOptions options,
@@ -1809,8 +2056,7 @@ void js::Nursery::maybeResizeNursery(JS::GCOptions options,
decommitTask->join();
size_t newCapacity = mozilla::Clamp(targetSize(options, reason),
- tunables().gcMinNurseryBytes(),
- tunables().gcMaxNurseryBytes());
+ minSpaceSize(), maxSpaceSize());
MOZ_ASSERT(roundSize(newCapacity) == newCapacity);
MOZ_ASSERT(newCapacity >= SystemPageSize());
@@ -1862,7 +2108,7 @@ size_t js::Nursery::targetSize(JS::GCOptions options, JS::GCReason reason) {
TimeStamp now = TimeStamp::Now();
if (reason == JS::GCReason::PREPARE_FOR_PAGELOAD) {
- return roundSize(tunables().gcMaxNurseryBytes());
+ return roundSize(maxSpaceSize());
}
// If the nursery is completely unused then minimise it.
@@ -1960,38 +2206,58 @@ size_t js::Nursery::roundSize(size_t size) {
}
void js::Nursery::growAllocableSpace(size_t newCapacity) {
- MOZ_ASSERT_IF(!isSubChunkMode(), newCapacity > currentChunk_ * ChunkSize);
- MOZ_ASSERT(newCapacity <= tunables().gcMaxNurseryBytes());
+ MOZ_ASSERT_IF(!isSubChunkMode(), newCapacity > currentChunk() * ChunkSize);
+ MOZ_ASSERT(newCapacity <= maxSpaceSize());
MOZ_ASSERT(newCapacity > capacity());
- if (!decommitTask->reserveSpaceForBytes(newCapacity)) {
+ size_t nchunks =
+ RequiredChunkCount(newCapacity) * SemispaceSizeFactor(semispaceEnabled_);
+ if (!decommitTask->reserveSpaceForChunks(nchunks)) {
return;
}
if (isSubChunkMode()) {
- MOZ_ASSERT(currentChunk_ == 0);
-
- // The remainder of the chunk may have been decommitted.
- if (!chunk(0).markPagesInUseHard(std::min(newCapacity, ChunkSize))) {
- // The OS won't give us the memory we need, we can't grow.
+ if (!toSpace.commitSubChunkRegion(capacity(), newCapacity) ||
+ (semispaceEnabled_ &&
+ !fromSpace.commitSubChunkRegion(capacity(), newCapacity))) {
return;
}
+ }
- // The capacity has changed and since we were in sub-chunk mode we need to
- // update the poison values / asan information for the now-valid region of
- // this chunk.
- size_t end = std::min(newCapacity, ChunkSize);
- chunk(0).poisonRange(capacity(), end, JS_FRESH_NURSERY_PATTERN,
- MemCheckKind::MakeUndefined);
+ setCapacity(newCapacity);
+
+ toSpace.setCurrentEnd(this);
+ if (semispaceEnabled_) {
+ fromSpace.setCurrentEnd(this);
}
+}
- capacity_ = newCapacity;
+bool js::Nursery::Space::commitSubChunkRegion(size_t oldCapacity,
+ size_t newCapacity) {
+ MOZ_ASSERT(currentChunk_ == 0);
+ MOZ_ASSERT(oldCapacity < ChunkSize);
+ MOZ_ASSERT(newCapacity > oldCapacity);
- setCurrentEnd();
+ size_t newChunkEnd = std::min(newCapacity, ChunkSize);
+
+ // The remainder of the chunk may have been decommitted.
+ if (!chunks_[0]->markPagesInUseHard(newChunkEnd)) {
+ // The OS won't give us the memory we need, we can't grow.
+ return false;
+ }
+
+ // The capacity has changed and since we were in sub-chunk mode we need to
+ // update the poison values / asan information for the now-valid region of
+ // this chunk.
+ chunks_[0]->poisonRange(oldCapacity, newChunkEnd, JS_FRESH_NURSERY_PATTERN,
+ MemCheckKind::MakeUndefined);
+ return true;
}
-void js::Nursery::freeChunksFrom(const unsigned firstFreeChunk) {
- MOZ_ASSERT(firstFreeChunk < chunks_.length());
+void js::Nursery::freeChunksFrom(Space& space, const unsigned firstFreeChunk) {
+ if (firstFreeChunk >= space.chunks_.length()) {
+ return;
+ }
// The loop below may need to skip the first chunk, so we may use this so we
// can modify it.
@@ -2000,61 +2266,112 @@ void js::Nursery::freeChunksFrom(const unsigned firstFreeChunk) {
if ((firstChunkToDecommit == 0) && isSubChunkMode()) {
// Part of the first chunk may be hard-decommitted, un-decommit it so that
// the GC's normal chunk-handling doesn't segfault.
- MOZ_ASSERT(currentChunk_ == 0);
- if (!chunk(0).markPagesInUseHard(ChunkSize)) {
+ MOZ_ASSERT(space.currentChunk_ == 0);
+ if (!space.chunks_[0]->markPagesInUseHard(ChunkSize)) {
// Free the chunk if we can't allocate its pages.
- UnmapPages(static_cast<void*>(&chunk(0)), ChunkSize);
+ UnmapPages(space.chunks_[0], ChunkSize);
firstChunkToDecommit = 1;
}
}
{
AutoLockHelperThreadState lock;
- for (size_t i = firstChunkToDecommit; i < chunks_.length(); i++) {
- decommitTask->queueChunk(chunks_[i], lock);
+ for (size_t i = firstChunkToDecommit; i < space.chunks_.length(); i++) {
+ decommitTask->queueChunk(space.chunks_[i], lock);
}
}
- chunks_.shrinkTo(firstFreeChunk);
+ space.chunks_.shrinkTo(firstFreeChunk);
}
void js::Nursery::shrinkAllocableSpace(size_t newCapacity) {
-#ifdef JS_GC_ZEAL
- if (gc->hasZealMode(ZealMode::GenerationalGC)) {
- return;
- }
-#endif
+ MOZ_ASSERT(!gc->hasZealMode(ZealMode::GenerationalGC));
+ MOZ_ASSERT(newCapacity < capacity_);
- // Don't shrink the nursery to zero (use Nursery::disable() instead)
- // This can't happen due to the rounding-down performed above because of the
- // clamping in maybeResizeNursery().
- MOZ_ASSERT(newCapacity != 0);
- // Don't attempt to shrink it to the same size.
- if (newCapacity == capacity_) {
+ if (semispaceEnabled() && usedSpace() >= newCapacity) {
+ // Can't shrink below what we've already used.
return;
}
- MOZ_ASSERT(newCapacity < capacity_);
unsigned newCount = HowMany(newCapacity, ChunkSize);
if (newCount < allocatedChunkCount()) {
- freeChunksFrom(newCount);
+ freeChunksFrom(toSpace, newCount);
+ freeChunksFrom(fromSpace, newCount);
}
size_t oldCapacity = capacity_;
- capacity_ = newCapacity;
+ setCapacity(newCapacity);
- setCurrentEnd();
+ toSpace.setCurrentEnd(this);
+ if (semispaceEnabled_) {
+ fromSpace.setCurrentEnd(this);
+ }
if (isSubChunkMode()) {
- MOZ_ASSERT(currentChunk_ == 0);
- size_t end = std::min(oldCapacity, ChunkSize);
- chunk(0).poisonRange(newCapacity, end, JS_SWEPT_NURSERY_PATTERN,
- MemCheckKind::MakeNoAccess);
+ toSpace.decommitSubChunkRegion(this, oldCapacity, newCapacity);
+ if (semispaceEnabled_) {
+ fromSpace.decommitSubChunkRegion(this, oldCapacity, newCapacity);
+ }
+ }
+}
- AutoLockHelperThreadState lock;
- decommitTask->queueRange(capacity_, chunk(0), lock);
+void js::Nursery::Space::decommitSubChunkRegion(Nursery* nursery,
+ size_t oldCapacity,
+ size_t newCapacity) {
+ MOZ_ASSERT(currentChunk_ == 0);
+ MOZ_ASSERT(newCapacity < ChunkSize);
+ MOZ_ASSERT(newCapacity < oldCapacity);
+
+ size_t oldChunkEnd = std::min(oldCapacity, ChunkSize);
+ chunks_[0]->poisonRange(newCapacity, oldChunkEnd, JS_SWEPT_NURSERY_PATTERN,
+ MemCheckKind::MakeNoAccess);
+
+ AutoLockHelperThreadState lock;
+ nursery->decommitTask->queueRange(newCapacity, chunks_[0], lock);
+}
+
+js::Nursery::Space::Space(gc::ChunkKind kind) : kind(kind) {
+ MOZ_ASSERT(kind == ChunkKind::NurseryFromSpace ||
+ kind == ChunkKind::NurseryToSpace);
+}
+
+void js::Nursery::Space::setKind(ChunkKind newKind) {
+#ifdef DEBUG
+ MOZ_ASSERT(newKind == ChunkKind::NurseryFromSpace ||
+ newKind == ChunkKind::NurseryToSpace);
+ checkKind(kind);
+#endif
+
+ kind = newKind;
+ for (NurseryChunk* chunk : chunks_) {
+ chunk->kind = newKind;
+ }
+
+#ifdef DEBUG
+ checkKind(newKind);
+#endif
+}
+
+#ifdef DEBUG
+void js::Nursery::Space::checkKind(ChunkKind expected) const {
+ MOZ_ASSERT(kind == expected);
+ for (NurseryChunk* chunk : chunks_) {
+ MOZ_ASSERT(chunk->getKind() == expected);
+ }
+}
+#endif
+
+#ifdef DEBUG
+size_t js::Nursery::Space::findChunkIndex(uintptr_t chunkAddr) const {
+ for (size_t i = 0; i < chunks_.length(); i++) {
+ if (uintptr_t(chunks_[i]) == chunkAddr) {
+ return i;
+ }
}
+
+ MOZ_CRASH("Nursery chunk not found");
}
+#endif
gcstats::Statistics& js::Nursery::stats() const { return gc->stats(); }
@@ -2067,18 +2384,55 @@ bool js::Nursery::isSubChunkMode() const {
return capacity() <= NurseryChunkUsableSize;
}
+void js::Nursery::clearMapAndSetNurseryRanges() {
+ // Clears the lists of nursery ranges used by map and set iterators. These
+ // lists are cleared at the start of minor GC and rebuilt when iterators are
+ // promoted during minor GC.
+ for (auto* map : mapsWithNurseryMemory_) {
+ map->clearNurseryRangesBeforeMinorGC();
+ }
+ for (auto* set : setsWithNurseryMemory_) {
+ set->clearNurseryRangesBeforeMinorGC();
+ }
+}
+
void js::Nursery::sweepMapAndSetObjects() {
+ // This processes all Map and Set objects that are known to have associated
+ // nursery memory (either they are nursery allocated themselves or they have
+ // iterator objects that are nursery allocated).
+ //
+ // These objects may die and be finalized or if not their internal state and
+ // memory tracking are updated.
+ //
+ // Finally the lists themselves are rebuilt so as to remove objects that are
+ // no longer associated with nursery memory (either because they died or
+ // because the nursery object was promoted to the tenured heap).
+
auto* gcx = runtime()->gcContext();
- for (auto* mapobj : mapsWithNurseryMemory_) {
- MapObject::sweepAfterMinorGC(gcx, mapobj);
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+
+ MapObjectVector maps;
+ std::swap(mapsWithNurseryMemory_, maps);
+ for (auto* mapobj : maps) {
+ mapobj = MapObject::sweepAfterMinorGC(gcx, mapobj);
+ if (mapobj) {
+ if (!mapsWithNurseryMemory_.append(mapobj)) {
+ oomUnsafe.crash("sweepAfterMinorGC");
+ }
+ }
}
- mapsWithNurseryMemory_.clearAndFree();
- for (auto* setobj : setsWithNurseryMemory_) {
- SetObject::sweepAfterMinorGC(gcx, setobj);
+ SetObjectVector sets;
+ std::swap(setsWithNurseryMemory_, sets);
+ for (auto* setobj : sets) {
+ setobj = SetObject::sweepAfterMinorGC(gcx, setobj);
+ if (setobj) {
+ if (!setsWithNurseryMemory_.append(setobj)) {
+ oomUnsafe.crash("sweepAfterMinorGC");
+ }
+ }
}
- setsWithNurseryMemory_.clearAndFree();
}
void js::Nursery::joinDecommitTask() { decommitTask->join(); }
diff --git a/js/src/gc/Nursery.h b/js/src/gc/Nursery.h
index 0d7b607ff8..647dbbb24f 100644
--- a/js/src/gc/Nursery.h
+++ b/js/src/gc/Nursery.h
@@ -13,6 +13,7 @@
#include <tuple>
+#include "ds/LifoAlloc.h"
#include "gc/GCEnum.h"
#include "gc/GCProbes.h"
#include "gc/Heap.h"
@@ -21,6 +22,8 @@
#include "js/AllocPolicy.h"
#include "js/Class.h"
#include "js/GCAPI.h"
+#include "js/GCVector.h"
+#include "js/HeapAPI.h"
#include "js/TypeDecls.h"
#include "js/UniquePtr.h"
#include "js/Utility.h"
@@ -69,7 +72,20 @@ namespace gc {
class AutoGCSession;
struct Cell;
class GCSchedulingTunables;
+class StoreBuffer;
class TenuringTracer;
+
+// A set of cells that need to be swept at the end of a minor GC,
+// represented as a linked list of ArenaCellSet structs extracted from a
+// WholeCellBuffer.
+struct CellSweepSet {
+ UniquePtr<LifoAlloc> storage_;
+ ArenaCellSet* head_ = nullptr;
+
+ // Fixup the tenured dependent strings stored in the ArenaCellSet list.
+ void sweep();
+};
+
} // namespace gc
class Nursery {
@@ -79,18 +95,6 @@ class Nursery {
[[nodiscard]] bool init(AutoLockGCBgAlloc& lock);
- // Number of allocated (ready to use) chunks.
- unsigned allocatedChunkCount() const { return chunks_.length(); }
-
- // Total number of chunks and the capacity of the nursery. Chunks will be
- // lazilly allocated and added to the chunks array up to this limit, after
- // that the nursery must be collected, this limit may be raised during
- // collection.
- unsigned maxChunkCount() const {
- MOZ_ASSERT(capacity());
- return HowMany(capacity(), gc::ChunkSize);
- }
-
void enable();
void disable();
bool isEnabled() const { return capacity() != 0; }
@@ -103,20 +107,16 @@ class Nursery {
void disableBigInts();
bool canAllocateBigInts() const { return canAllocateBigInts_; }
+ void setSemispaceEnabled(bool enabled);
+ bool semispaceEnabled() const { return semispaceEnabled_; }
+
// Return true if no allocations have been made since the last collection.
bool isEmpty() const;
// Check whether an arbitrary pointer is within the nursery. This is
// slower than IsInsideNursery(Cell*), but works on all types of pointers.
- MOZ_ALWAYS_INLINE bool isInside(gc::Cell* cellp) const = delete;
- MOZ_ALWAYS_INLINE bool isInside(const void* p) const {
- for (auto* chunk : chunks_) {
- if (uintptr_t(p) - uintptr_t(chunk) < gc::ChunkSize) {
- return true;
- }
- }
- return false;
- }
+ bool isInside(gc::Cell* cellp) const = delete;
+ inline bool isInside(const void* p) const;
template <typename T>
inline bool isInside(const SharedMem<T>& p) const;
@@ -223,11 +223,12 @@ class Nursery {
// Mark a malloced buffer as no longer needing to be freed.
void removeMallocedBuffer(void* buffer, size_t nbytes) {
- MOZ_ASSERT(mallocedBuffers.has(buffer));
+ MOZ_ASSERT(!JS::RuntimeHeapIsMinorCollecting());
+ MOZ_ASSERT(toSpace.mallocedBuffers.has(buffer));
MOZ_ASSERT(nbytes > 0);
- MOZ_ASSERT(mallocedBufferBytes >= nbytes);
- mallocedBuffers.remove(buffer);
- mallocedBufferBytes -= nbytes;
+ MOZ_ASSERT(toSpace.mallocedBufferBytes >= nbytes);
+ toSpace.mallocedBuffers.remove(buffer);
+ toSpace.mallocedBufferBytes -= nbytes;
}
// Mark a malloced buffer as no longer needing to be freed during minor
@@ -235,8 +236,8 @@ class Nursery {
// buffers will soon be freed.
void removeMallocedBufferDuringMinorGC(void* buffer) {
MOZ_ASSERT(JS::RuntimeHeapIsMinorCollecting());
- MOZ_ASSERT(mallocedBuffers.has(buffer));
- mallocedBuffers.remove(buffer);
+ MOZ_ASSERT(fromSpace.mallocedBuffers.has(buffer));
+ fromSpace.mallocedBuffers.remove(buffer);
}
[[nodiscard]] bool addedUniqueIdToCell(gc::Cell* cell) {
@@ -277,26 +278,8 @@ class Nursery {
inline void unregisterTrailer(void* block);
size_t sizeOfTrailerBlockSets(mozilla::MallocSizeOf mallocSizeOf) const;
- size_t capacity() const { return capacity_; }
- size_t committed() const {
- return std::min(capacity_, allocatedChunkCount() * gc::ChunkSize);
- }
-
- // Used and free space both include chunk headers for that part of the
- // nursery.
- //
- // usedSpace() + freeSpace() == capacity()
- //
- MOZ_ALWAYS_INLINE size_t usedSpace() const {
- return capacity() - freeSpace();
- }
- MOZ_ALWAYS_INLINE size_t freeSpace() const {
- MOZ_ASSERT(isEnabled());
- MOZ_ASSERT(currentEnd_ - position_ <= NurseryChunkUsableSize);
- MOZ_ASSERT(currentChunk_ < maxChunkCount());
- return (currentEnd_ - position_) +
- (maxChunkCount() - currentChunk_ - 1) * gc::ChunkSize;
- }
+ size_t totalCapacity() const;
+ size_t totalCommitted() const;
#ifdef JS_GC_ZEAL
void enterZealMode();
@@ -312,9 +295,10 @@ class Nursery {
// Print total profile times on shutdown.
void printTotalProfileTimes();
- void* addressOfPosition() const { return (void**)&position_; }
+ void* addressOfPosition() const { return (void**)&toSpace.position_; }
static constexpr int32_t offsetOfCurrentEndFromPosition() {
- return offsetof(Nursery, currentEnd_) - offsetof(Nursery, position_);
+ return offsetof(Nursery, toSpace.currentEnd_) -
+ offsetof(Nursery, toSpace.position_);
}
void* addressOfNurseryAllocatedSites() {
@@ -343,10 +327,6 @@ class Nursery {
return setsWithNurseryMemory_.append(obj);
}
- // The amount of space in the mapped nursery available to allocations.
- static const size_t NurseryChunkUsableSize =
- gc::ChunkSize - sizeof(gc::ChunkBase);
-
void joinDecommitTask();
mozilla::TimeStamp collectionStartTime() {
@@ -362,6 +342,16 @@ class Nursery {
void setAllocFlagsForZone(JS::Zone* zone);
+ bool shouldTenureEverything(JS::GCReason reason);
+
+ inline bool inCollectedRegion(gc::Cell* cell) const;
+ inline bool inCollectedRegion(void* ptr) const;
+
+ void trackMallocedBufferOnPromotion(void* buffer, gc::Cell* owner,
+ size_t nbytes, MemoryUse use);
+ void trackTrailerOnPromotion(void* buffer, gc::Cell* owner, size_t nbytes,
+ size_t overhead, MemoryUse use);
+
// Round a size in bytes to the nearest valid nursery size.
static size_t roundSize(size_t size);
@@ -374,6 +364,8 @@ class Nursery {
mozilla::TimeStamp lastCollectionEndTime() const;
private:
+ struct Space;
+
enum class ProfileKey {
#define DEFINE_TIME_KEY(name, text) name,
FOR_EACH_NURSERY_PROFILE_TIME(DEFINE_TIME_KEY)
@@ -387,6 +379,36 @@ class Nursery {
mozilla::EnumeratedArray<ProfileKey, mozilla::TimeDuration,
size_t(ProfileKey::KeyCount)>;
+ size_t capacity() const { return capacity_; }
+
+ // Total number of chunks and the capacity of the current nursery
+ // space. Chunks will be lazily allocated and added to the chunks array up to
+ // this limit. After that the nursery must be collected. This limit may be
+ // changed at the end of collection by maybeResizeNursery.
+ uint32_t maxChunkCount() const {
+ MOZ_ASSERT(toSpace.maxChunkCount_);
+ return toSpace.maxChunkCount_;
+ }
+
+ // Number of allocated (ready to use) chunks.
+ unsigned allocatedChunkCount() const { return toSpace.chunks_.length(); }
+
+ uint32_t currentChunk() const { return toSpace.currentChunk_; }
+ uint32_t startChunk() const { return toSpace.startChunk_; }
+ uintptr_t startPosition() const { return toSpace.startPosition_; }
+
+ // Used and free space both include chunk headers for that part of the
+ // nursery.
+ MOZ_ALWAYS_INLINE size_t usedSpace() const {
+ return capacity() - freeSpace();
+ }
+ MOZ_ALWAYS_INLINE size_t freeSpace() const {
+ MOZ_ASSERT(isEnabled());
+ MOZ_ASSERT(currentChunk() < maxChunkCount());
+ return (currentEnd() - position()) +
+ (maxChunkCount() - currentChunk() - 1) * gc::ChunkSize;
+ }
+
// Calculate the promotion rate of the most recent minor GC.
// The valid_for_tenuring parameter is used to return whether this
// promotion rate is accurate enough (the nursery was full enough) to be
@@ -395,31 +417,27 @@ class Nursery {
// Must only be called if the previousGC data is initialised.
double calcPromotionRate(bool* validForTenuring) const;
- void freeTrailerBlocks();
+ void freeTrailerBlocks(JS::GCOptions options, JS::GCReason reason);
- NurseryChunk& chunk(unsigned index) const { return *chunks_[index]; }
+ NurseryChunk& chunk(unsigned index) const { return *toSpace.chunks_[index]; }
// Set the allocation position to the start of a chunk. This sets
// currentChunk_, position_ and currentEnd_ values as appropriate.
void moveToStartOfChunk(unsigned chunkno);
bool initFirstChunk(AutoLockGCBgAlloc& lock);
+ void setCapacity(size_t newCapacity);
- // extent is advisory, it will be ignored in sub-chunk and generational zeal
- // modes. It will be clamped to Min(NurseryChunkUsableSize, capacity_).
- void poisonAndInitCurrentChunk(size_t extent = gc::ChunkSize);
+ void poisonAndInitCurrentChunk();
void setCurrentEnd();
void setStartToCurrentPosition();
- // Allocate the next chunk, or the first chunk for initialization.
- // Callers will probably want to call moveToStartOfChunk(0) next.
- [[nodiscard]] bool allocateNextChunk(unsigned chunkno,
- AutoLockGCBgAlloc& lock);
+ // Allocate another chunk.
+ [[nodiscard]] bool allocateNextChunk(AutoLockGCBgAlloc& lock);
- uintptr_t currentEnd() const { return currentEnd_; }
-
- uintptr_t position() const { return position_; }
+ uintptr_t position() const { return toSpace.position_; }
+ uintptr_t currentEnd() const { return toSpace.currentEnd_; }
MOZ_ALWAYS_INLINE bool isSubChunkMode() const;
@@ -451,6 +469,7 @@ class Nursery {
};
CollectionResult doCollection(gc::AutoGCSession& session,
JS::GCOptions options, JS::GCReason reason);
+ void swapSpaces();
void traceRoots(gc::AutoGCSession& session, gc::TenuringTracer& mover);
size_t doPretenuring(JSRuntime* rt, JS::GCReason reason,
@@ -469,22 +488,30 @@ class Nursery {
uint32_t capacity);
#ifdef DEBUG
- bool checkForwardingPointerLocation(void* ptr, bool expectedInside);
+ bool checkForwardingPointerInsideNursery(void* ptr);
#endif
// Updates pointers to nursery objects that have been tenured and discards
// pointers to objects that have been freed.
void sweep();
- // Reset the current chunk and position after a minor collection. Also poison
+ // In a minor GC, resets the start and end positions, the current chunk and
+ // current position.
+ void setNewExtentAndPosition();
+
// the nursery on debug & nightly builds.
void clear();
+ void clearMapAndSetNurseryRanges();
void sweepMapAndSetObjects();
// Allocate a buffer for a given zone, using the nursery if possible.
void* allocateBuffer(JS::Zone* zone, size_t nbytes);
+ // Get per-space size limits.
+ size_t maxSpaceSize() const;
+ size_t minSpaceSize() const;
+
// Change the allocable space provided by the nursery.
void maybeResizeNursery(JS::GCOptions options, JS::GCReason reason);
size_t targetSize(JS::GCOptions options, JS::GCReason reason);
@@ -495,7 +522,9 @@ class Nursery {
// Free the chunks starting at firstFreeChunk until the end of the chunks
// vector. Shrinks the vector but does not update maxChunkCount().
- void freeChunksFrom(unsigned firstFreeChunk);
+ void freeChunksFrom(Space& space, unsigned firstFreeChunk);
+
+ inline bool shouldTenure(gc::Cell* cell);
void sendTelemetry(JS::GCReason reason, mozilla::TimeDuration totalTime,
bool wasEmpty, double promotionRate,
@@ -514,35 +543,87 @@ class Nursery {
mozilla::TimeStamp collectionStartTime() const;
private:
- // Fields used during allocation fast path are grouped first:
+ using BufferRelocationOverlay = void*;
+ using BufferSet = HashSet<void*, PointerHasher<void*>, SystemAllocPolicy>;
- // Pointer to the first unallocated byte in the nursery.
- uintptr_t position_;
+ struct Space {
+ // Fields used during allocation fast path go first:
- // Pointer to the last byte of space in the current chunk.
- uintptr_t currentEnd_;
+ // Pointer to the first unallocated byte in the nursery.
+ uintptr_t position_ = 0;
- // Other fields not necessarily used during allocation follow:
+ // Pointer to the last byte of space in the current chunk.
+ uintptr_t currentEnd_ = 0;
- gc::GCRuntime* const gc;
+ // Vector of allocated chunks to allocate from.
+ Vector<NurseryChunk*, 0, SystemAllocPolicy> chunks_;
+
+ // The index of the chunk that is currently being allocated from.
+ uint32_t currentChunk_ = 0;
+
+ // The maximum number of chunks to allocate based on capacity_.
+ uint32_t maxChunkCount_ = 0;
+
+ // These fields refer to the beginning of the nursery. They're normally 0
+ // and chunk(0).start() respectively. Except when a generational GC zeal
+ // mode is active, then they may be arbitrary (see Nursery::clear()).
+ uint32_t startChunk_ = 0;
+ uintptr_t startPosition_ = 0;
+
+ // The set of malloced-allocated buffers owned by nursery objects. Any
+ // buffers that do not belong to a promoted thing at the end of a minor GC
+ // must be freed.
+ BufferSet mallocedBuffers;
+ size_t mallocedBufferBytes = 0;
+
+ // Wasm "trailer" (C++-heap-allocated) blocks. See comments above on
+ // ::registerTrailer and ::unregisterTrailer.
+ Vector<PointerAndUint7, 0, SystemAllocPolicy> trailersAdded_;
+ Vector<void*, 0, SystemAllocPolicy> trailersRemoved_;
+ size_t trailersRemovedUsed_ = 0;
+ size_t trailerBytes_ = 0;
+
+ gc::ChunkKind kind;
+
+ explicit Space(gc::ChunkKind kind);
+
+ inline bool isEmpty() const;
+ inline bool isInside(const void* p) const;
- // Vector of allocated chunks to allocate from.
- Vector<NurseryChunk*, 0, SystemAllocPolicy> chunks_;
+ // Return the logical offset within the nursery of an address in a nursery
+ // chunk (chunks are discontiguous in memory).
+ inline size_t offsetFromAddress(uintptr_t addr) const;
+ inline size_t offsetFromExclusiveAddress(uintptr_t addr) const;
- // The index of the chunk that is currently being allocated from.
- uint32_t currentChunk_;
+ void setKind(gc::ChunkKind newKind);
- // These fields refer to the beginning of the nursery. They're normally 0
- // and chunk(0).start() respectively. Except when a generational GC zeal
- // mode is active, then they may be arbitrary (see Nursery::clear()).
- uint32_t startChunk_;
- uintptr_t startPosition_;
+ void clear(Nursery* nursery);
+ void moveToStartOfChunk(Nursery* nursery, unsigned chunkno);
+ void setCurrentEnd(Nursery* nursery);
+ void setStartToCurrentPosition();
+ bool commitSubChunkRegion(size_t oldCapacity, size_t newCapacity);
+ void decommitSubChunkRegion(Nursery* nursery, size_t oldCapacity,
+ size_t newCapacity);
+ void freeTrailerBlocks(gc::MallocedBlockCache& mallocedBlockCache);
+
+#ifdef DEBUG
+ void checkKind(gc::ChunkKind expected) const;
+ size_t findChunkIndex(uintptr_t chunkAddr) const;
+#endif
+ };
+
+ Space toSpace;
+ Space fromSpace;
+
+ gc::GCRuntime* const gc;
// The current nursery capacity measured in bytes. It may grow up to this
// value without a collection, allocating chunks on demand. This limit may be
// changed by maybeResizeNursery() each collection. It includes chunk headers.
size_t capacity_;
+ uintptr_t tenureThreshold_ = 0;
+
gc::PretenuringNursery pretenuringNursery;
mozilla::TimeDuration timeInChunkAlloc_;
@@ -553,6 +634,9 @@ class Nursery {
mozilla::TimeDuration profileThreshold_;
+ // Whether to use semispace collection.
+ bool semispaceEnabled_;
+
// Whether we will nursery-allocate strings.
bool canAllocateStrings_;
@@ -595,21 +679,6 @@ class Nursery {
bool hasRecentGrowthData;
double smoothedTargetSize;
- // The set of externally malloced buffers potentially kept live by objects
- // stored in the nursery. Any external buffers that do not belong to a
- // tenured thing at the end of a minor GC must be freed.
- using BufferRelocationOverlay = void*;
- using BufferSet = HashSet<void*, PointerHasher<void*>, SystemAllocPolicy>;
- BufferSet mallocedBuffers;
- size_t mallocedBufferBytes = 0;
-
- // Wasm "trailer" (C++-heap-allocated) blocks. See comments above on
- // ::registerTrailer and ::unregisterTrailer.
- Vector<PointerAndUint7, 0, SystemAllocPolicy> trailersAdded_;
- Vector<void*, 0, SystemAllocPolicy> trailersRemoved_;
- size_t trailersRemovedUsed_ = 0;
- size_t trailerBytes_ = 0;
-
// During a collection most hoisted slot and element buffers indicate their
// new location with a forwarding pointer at the base. This does not work
// for buffers whose length is less than pointer width, or when different
@@ -619,6 +688,8 @@ class Nursery {
HashMap<void*, void*, PointerHasher<void*>, SystemAllocPolicy>;
ForwardedBufferMap forwardedBuffers;
+ gc::CellSweepSet cellsToSweep;
+
// When we assign a unique id to cell in the nursery, that almost always
// means that the cell will be in a hash table, and thus, held live,
// automatically moving the uid from the nursery to its new home in
@@ -629,13 +700,15 @@ class Nursery {
// Note: we store the pointers as Cell* here, resulting in an ugly cast in
// sweep. This is because this structure is used to help implement
// stable object hashing and we have to break the cycle somehow.
- using CellsWithUniqueIdVector = Vector<gc::Cell*, 8, SystemAllocPolicy>;
+ using CellsWithUniqueIdVector = JS::GCVector<gc::Cell*, 8, SystemAllocPolicy>;
CellsWithUniqueIdVector cellsWithUid_;
// Lists of map and set objects allocated in the nursery or with iterators
// allocated there. Such objects need to be swept after minor GC.
- Vector<MapObject*, 0, SystemAllocPolicy> mapsWithNurseryMemory_;
- Vector<SetObject*, 0, SystemAllocPolicy> setsWithNurseryMemory_;
+ using MapObjectVector = Vector<MapObject*, 0, SystemAllocPolicy>;
+ MapObjectVector mapsWithNurseryMemory_;
+ using SetObjectVector = Vector<SetObject*, 0, SystemAllocPolicy>;
+ SetObjectVector setsWithNurseryMemory_;
UniquePtr<NurseryDecommitTask> decommitTask;
@@ -648,11 +721,30 @@ class Nursery {
// no correctness impact, only a performance impact.
gc::MallocedBlockCache mallocedBlockCache_;
+ // Whether the previous collection tenured everything. This may be false if
+ // semispace is in use.
+ bool tenuredEverything;
+
friend class gc::GCRuntime;
friend class gc::TenuringTracer;
friend struct NurseryChunk;
};
+MOZ_ALWAYS_INLINE bool Nursery::isInside(const void* p) const {
+ // TODO: Split this into separate methods.
+ // TODO: Do we ever need to check both?
+ return toSpace.isInside(p) || fromSpace.isInside(p);
+}
+
+MOZ_ALWAYS_INLINE bool Nursery::Space::isInside(const void* p) const {
+ for (auto* chunk : chunks_) {
+ if (uintptr_t(p) - uintptr_t(chunk) < gc::ChunkSize) {
+ return true;
+ }
+ }
+ return false;
+}
+
} // namespace js
#endif // gc_Nursery_h
diff --git a/js/src/gc/NurseryAwareHashMap.h b/js/src/gc/NurseryAwareHashMap.h
index 35c2bebcea..4df50c1627 100644
--- a/js/src/gc/NurseryAwareHashMap.h
+++ b/js/src/gc/NurseryAwareHashMap.h
@@ -11,7 +11,8 @@
#include "gc/Tracer.h"
#include "js/GCHashTable.h"
#include "js/GCPolicyAPI.h"
-#include "js/HashTable.h"
+#include "js/GCVector.h"
+#include "js/Utility.h"
namespace js {
@@ -81,7 +82,8 @@ class NurseryAwareHashMap {
// Keep a list of all keys for which key->isTenured() is false. This lets us
// avoid a full traversal of the map on each minor GC, keeping the minor GC
// times proportional to the nursery heap size.
- Vector<Key, 0, AllocPolicy> nurseryEntries;
+ using KeyVector = GCVector<Key, 0, AllocPolicy>;
+ KeyVector nurseryEntries;
public:
using Lookup = typename MapType::Lookup;
@@ -127,16 +129,16 @@ class NurseryAwareHashMap {
}
void sweepAfterMinorGC(JSTracer* trc) {
- for (auto& key : nurseryEntries) {
+ nurseryEntries.mutableEraseIf([this, trc](Key& key) {
auto p = map.lookup(key);
if (!p) {
- continue;
+ return true;
}
// Drop the entry if the value is not marked.
if (!JS::GCPolicy<MapValue>::traceWeak(trc, &p->value())) {
map.remove(p);
- continue;
+ return true;
}
// Update and relocate the key, if the value is still needed.
@@ -147,33 +149,65 @@ class NurseryAwareHashMap {
// wrappee, as they are just copies. The wrapper map entry is merely used
// as a cache to avoid re-copying the string, and currently that entire
// cache is flushed on major GC.
- MapKey copy(key);
- if (!JS::GCPolicy<MapKey>::traceWeak(trc, &copy)) {
+ //
+ // Since |key| is a reference, this updates the content of the
+ // nurseryEntries vector.
+ Key prior = key;
+ if (!TraceManuallyBarrieredWeakEdge(trc, &key,
+ "NurseryAwareHashMap key")) {
map.remove(p);
- continue;
+ return true;
}
- if (AllowDuplicates) {
+ bool valueIsTenured = p->value().unbarrieredGet()->isTenured();
+
+ if constexpr (AllowDuplicates) {
// Drop duplicated keys.
//
// A key can be forwarded to another place. In this case, rekey the
// item. If two or more different keys are forwarded to the same new
// key, simply drop the later ones.
- if (key == copy) {
+ if (key == prior) {
// No rekey needed.
- } else if (map.has(copy)) {
+ } else if (map.has(key)) {
// Key was forwarded to the same place that another key was already
// forwarded to.
map.remove(p);
+ return true;
} else {
- map.rekeyAs(key, copy, copy);
+ map.rekeyAs(prior, key, key);
}
} else {
- MOZ_ASSERT(key == copy || !map.has(copy));
- map.rekeyIfMoved(key, copy);
+ MOZ_ASSERT(key == prior || !map.has(key));
+ map.rekeyIfMoved(prior, key);
+ }
+
+ return key->isTenured() && valueIsTenured;
+ });
+
+ checkNurseryEntries();
+ }
+
+ void checkNurseryEntries() const {
+#ifdef DEBUG
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ HashSet<Key, DefaultHasher<Key>, SystemAllocPolicy> set;
+ for (const auto& key : nurseryEntries) {
+ if (!set.put(key)) {
+ oomUnsafe.crash("NurseryAwareHashMap::checkNurseryEntries");
}
}
- nurseryEntries.clear();
+
+ for (auto i = map.iter(); !i.done(); i.next()) {
+ Key key = i.get().key().get();
+ MOZ_ASSERT(gc::IsCellPointerValid(key));
+ MOZ_ASSERT_IF(IsInsideNursery(key), set.has(key));
+
+ Value value = i.get().value().unbarrieredGet();
+ MOZ_ASSERT(gc::IsCellPointerValid(value));
+ MOZ_ASSERT_IF(IsInsideNursery(value), set.has(key));
+ }
+#endif
}
void traceWeak(JSTracer* trc) { map.traceWeak(trc); }
diff --git a/js/src/gc/Pretenuring.cpp b/js/src/gc/Pretenuring.cpp
index a191dbba90..b44bbc901f 100644
--- a/js/src/gc/Pretenuring.cpp
+++ b/js/src/gc/Pretenuring.cpp
@@ -49,12 +49,12 @@ static constexpr double LowYoungSurvivalThreshold = 0.05;
// that must occur before recovery is attempted.
static constexpr size_t LowYoungSurvivalCountBeforeRecovery = 2;
-// The proportion of the nursery that must be tenured above which a minor
+// The proportion of the nursery that must be promoted above which a minor
// collection may be determined to have a high nursery survival rate.
static constexpr double HighNurserySurvivalPromotionThreshold = 0.6;
// The number of nursery allocations made by optimized JIT code that must be
-// tenured above which a minor collection may be determined to have a high
+// promoted above which a minor collection may be determined to have a high
// nursery survival rate.
static constexpr size_t HighNurserySurvivalOptimizedAllocThreshold = 10000;
@@ -95,7 +95,7 @@ size_t PretenuringNursery::doPretenuring(GCRuntime* gc, JS::GCReason reason,
for (ZonesIter zone(gc, SkipAtoms); !zone.done(); zone.next()) {
bool highNurserySurvivalRate =
promotionRate > HighNurserySurvivalPromotionThreshold &&
- zone->optimizedAllocSite()->nurseryTenuredCount >=
+ zone->optimizedAllocSite()->nurseryPromotedCount >=
HighNurserySurvivalOptimizedAllocThreshold;
zone->pretenuring.noteHighNurserySurvivalRate(highNurserySurvivalRate);
if (highNurserySurvivalRate) {
@@ -136,6 +136,7 @@ size_t PretenuringNursery::doPretenuring(GCRuntime* gc, JS::GCReason reason,
// Catch-all sites don't end up on the list if they are only used from
// optimized JIT code, so process them here.
+
for (ZonesIter zone(gc, SkipAtoms); !zone.done(); zone.next()) {
for (auto& site : zone->pretenuring.unknownAllocSites) {
updateTotalAllocCounts(&site);
@@ -150,6 +151,11 @@ size_t PretenuringNursery::doPretenuring(GCRuntime* gc, JS::GCReason reason,
updateTotalAllocCounts(zone->optimizedAllocSite());
zone->optimizedAllocSite()->processCatchAllSite(reportInfo,
reportThreshold);
+
+ // The data from the promoted alloc sites is never used so clear them here.
+ for (AllocSite& site : zone->pretenuring.promotedAllocSites) {
+ site.resetNurseryAllocations();
+ }
}
if (reportInfo) {
@@ -171,7 +177,7 @@ AllocSite::SiteResult AllocSite::processSite(GCRuntime* gc,
bool reportInfo,
size_t reportThreshold) {
MOZ_ASSERT(kind() != Kind::Optimized);
- MOZ_ASSERT(nurseryAllocCount >= nurseryTenuredCount);
+ MOZ_ASSERT(nurseryAllocCount >= nurseryPromotedCount);
SiteResult result = NoChange;
@@ -180,7 +186,7 @@ AllocSite::SiteResult AllocSite::processSite(GCRuntime* gc,
bool wasInvalidated = false;
if (nurseryAllocCount > attentionThreshold) {
- promotionRate = double(nurseryTenuredCount) / double(nurseryAllocCount);
+ promotionRate = double(nurseryPromotedCount) / double(nurseryAllocCount);
hasPromotionRate = true;
AllocSite::State prevState = state();
@@ -402,7 +408,7 @@ bool PretenuringZone::shouldResetPretenuredAllocSites() {
/* static */
void AllocSite::printInfoHeader(JS::GCReason reason, double promotionRate) {
fprintf(stderr, " %-16s %-16s %-20s %-8s %-8s %-6s %-10s\n", "site", "zone",
- "script/kind", "nallocs", "tenures", "prate", "state");
+ "script/kind", "nallocs", "promotes", "prate", "state");
}
/* static */
@@ -455,8 +461,8 @@ void AllocSite::printInfo(bool hasPromotionRate, double promotionRate,
}
fprintf(stderr, " %8s", buffer);
- // Nursery tenure count.
- fprintf(stderr, " %8" PRIu32, nurseryTenuredCount);
+ // Nursery promotion count.
+ fprintf(stderr, " %8" PRIu32, nurseryPromotedCount);
// Promotion rate, if there were enough allocations.
buffer[0] = '\0';
diff --git a/js/src/gc/Pretenuring.h b/js/src/gc/Pretenuring.h
index 5aeb5c50d7..93677532e4 100644
--- a/js/src/gc/Pretenuring.h
+++ b/js/src/gc/Pretenuring.h
@@ -83,7 +83,7 @@ class AllocSite {
uint32_t nurseryAllocCount = 0;
// Number of nursery allocations that survived. Used during collection.
- uint32_t nurseryTenuredCount : 24;
+ uint32_t nurseryPromotedCount : 24;
// Number of times the script has been invalidated.
uint32_t invalidationCount : 4;
@@ -103,12 +103,12 @@ class AllocSite {
uintptr_t rawScript() const { return scriptAndState & ~STATE_MASK; }
public:
- AllocSite() : nurseryTenuredCount(0), invalidationCount(0), traceKind_(0) {}
+ AllocSite() : nurseryPromotedCount(0), invalidationCount(0), traceKind_(0) {}
// Create a dummy site to use for unknown allocations.
explicit AllocSite(JS::Zone* zone, JS::TraceKind kind)
: zone_(zone),
- nurseryTenuredCount(0),
+ nurseryPromotedCount(0),
invalidationCount(0),
traceKind_(uint32_t(kind)) {
MOZ_ASSERT(traceKind_ < NurseryTraceKinds);
@@ -126,7 +126,7 @@ class AllocSite {
void initUnknownSite(JS::Zone* zone, JS::TraceKind kind) {
MOZ_ASSERT(!zone_ && scriptAndState == uintptr_t(State::Unknown));
zone_ = zone;
- nurseryTenuredCount = 0;
+ nurseryPromotedCount = 0;
invalidationCount = 0;
traceKind_ = uint32_t(kind);
MOZ_ASSERT(traceKind_ < NurseryTraceKinds);
@@ -137,7 +137,7 @@ class AllocSite {
MOZ_ASSERT(!zone_ && scriptAndState == uintptr_t(State::Unknown));
zone_ = zone;
setScript(WasmScript);
- nurseryTenuredCount = 0;
+ nurseryPromotedCount = 0;
invalidationCount = 0;
traceKind_ = uint32_t(JS::TraceKind::Object);
}
@@ -179,24 +179,24 @@ class AllocSite {
}
bool hasNurseryAllocations() const {
- return nurseryAllocCount != 0 || nurseryTenuredCount != 0;
+ return nurseryAllocCount != 0 || nurseryPromotedCount != 0;
}
void resetNurseryAllocations() {
nurseryAllocCount = 0;
- nurseryTenuredCount = 0;
+ nurseryPromotedCount = 0;
}
uint32_t incAllocCount() { return ++nurseryAllocCount; }
uint32_t* nurseryAllocCountAddress() { return &nurseryAllocCount; }
- void incTenuredCount() {
+ void incPromotedCount() {
// The nursery is not large enough for this to overflow.
- nurseryTenuredCount++;
- MOZ_ASSERT(nurseryTenuredCount != 0);
+ nurseryPromotedCount++;
+ MOZ_ASSERT(nurseryPromotedCount != 0);
}
size_t allocCount() const {
- return std::max(nurseryAllocCount, nurseryTenuredCount);
+ return std::max(nurseryAllocCount, nurseryPromotedCount);
}
// Called for every active alloc site after minor GC.
@@ -259,6 +259,10 @@ class PretenuringZone {
// not recorded by optimized JIT code.
AllocSite optimizedAllocSite;
+ // Allocation sites used for nursery cells promoted to the next nursery
+ // generation that didn't come from optimized alloc sites.
+ AllocSite promotedAllocSites[NurseryTraceKinds];
+
// Count of tenured cell allocations made between each major collection and
// how many survived.
uint32_t allocCountInNewlyCreatedArenas = 0;
@@ -282,6 +286,7 @@ class PretenuringZone {
: optimizedAllocSite(zone, JS::TraceKind::Object) {
for (uint32_t i = 0; i < NurseryTraceKinds; i++) {
unknownAllocSites[i].initUnknownSite(zone, JS::TraceKind(i));
+ promotedAllocSites[i].initUnknownSite(zone, JS::TraceKind(i));
}
}
@@ -291,6 +296,12 @@ class PretenuringZone {
return unknownAllocSites[i];
}
+ AllocSite& promotedAllocSite(JS::TraceKind kind) {
+ size_t i = size_t(kind);
+ MOZ_ASSERT(i < NurseryTraceKinds);
+ return promotedAllocSites[i];
+ }
+
void clearCellCountsInNewlyCreatedArenas() {
allocCountInNewlyCreatedArenas = 0;
survivorCountInNewlyCreatedArenas = 0;
diff --git a/js/src/gc/PublicIterators.h b/js/src/gc/PublicIterators.h
index d1072cfe98..7ef6271f63 100644
--- a/js/src/gc/PublicIterators.h
+++ b/js/src/gc/PublicIterators.h
@@ -35,7 +35,7 @@ class ZonesIter {
public:
ZonesIter(gc::GCRuntime* gc, ZoneSelector selector)
: iterMarker(gc), it(gc->zones().begin()), end(gc->zones().end()) {
- if (selector == SkipAtoms) {
+ if (selector == SkipAtoms && !done()) {
MOZ_ASSERT(get()->isAtomsZone());
next();
}
diff --git a/js/src/gc/Scheduling.h b/js/src/gc/Scheduling.h
index cbaeb1f353..c5ed56dd5f 100644
--- a/js/src/gc/Scheduling.h
+++ b/js/src/gc/Scheduling.h
@@ -536,6 +536,9 @@ static const bool ParallelMarkingEnabled = false;
/* JSGC_INCREMENTAL_WEAKMAP_ENABLED */
static const bool IncrementalWeakMapMarkingEnabled = true;
+/* JSGC_SEMISPACE_NURSERY_ENABLED */
+static const bool SemispaceNurseryEnabled = false;
+
/* JSGC_HELPER_THREAD_RATIO */
static const double HelperThreadRatio = 0.5;
diff --git a/js/src/gc/StableCellHasher-inl.h b/js/src/gc/StableCellHasher-inl.h
index af0caaad89..34a8827cfb 100644
--- a/js/src/gc/StableCellHasher-inl.h
+++ b/js/src/gc/StableCellHasher-inl.h
@@ -132,7 +132,6 @@ inline bool HasUniqueId(Cell* cell) {
inline void TransferUniqueId(Cell* tgt, Cell* src) {
MOZ_ASSERT(src != tgt);
- MOZ_ASSERT(!IsInsideNursery(tgt));
MOZ_ASSERT(CurrentThreadCanAccessRuntime(tgt->runtimeFromAnyThread()));
MOZ_ASSERT(src->zone() == tgt->zone());
diff --git a/js/src/gc/StoreBuffer-inl.h b/js/src/gc/StoreBuffer-inl.h
index 1c97654761..343b953e3a 100644
--- a/js/src/gc/StoreBuffer-inl.h
+++ b/js/src/gc/StoreBuffer-inl.h
@@ -49,9 +49,10 @@ inline void ArenaCellSet::check() const {
MOZ_ASSERT(isEmpty() == !arena);
if (!isEmpty()) {
MOZ_ASSERT(IsCellPointerValid(arena));
- MOZ_ASSERT(arena->bufferedCells() == this);
JSRuntime* runtime = arena->zone->runtimeFromMainThread();
- MOZ_ASSERT(runtime->gc.minorGCCount() == minorGCNumberAtCreation);
+ uint64_t minorGCCount = runtime->gc.minorGCCount();
+ MOZ_ASSERT(minorGCCount == minorGCNumberAtCreation ||
+ minorGCCount == minorGCNumberAtCreation + 1);
}
#endif
}
diff --git a/js/src/gc/StoreBuffer.cpp b/js/src/gc/StoreBuffer.cpp
index d637eb62b1..1f8023bc62 100644
--- a/js/src/gc/StoreBuffer.cpp
+++ b/js/src/gc/StoreBuffer.cpp
@@ -20,7 +20,8 @@ void StoreBuffer::checkAccess() const {
// The GC runs tasks that may access the storebuffer in parallel and so must
// take a lock. The mutator may only access the storebuffer from the main
// thread.
- if (runtime_->heapState() != JS::HeapState::Idle) {
+ if (runtime_->heapState() != JS::HeapState::Idle &&
+ runtime_->heapState() != JS::HeapState::MinorCollecting) {
MOZ_ASSERT(!CurrentThreadIsGCMarking());
runtime_->gc.assertCurrentThreadHasLockedStoreBuffer();
} else {
@@ -30,8 +31,7 @@ void StoreBuffer::checkAccess() const {
#endif
bool StoreBuffer::WholeCellBuffer::init() {
- MOZ_ASSERT(!stringHead_);
- MOZ_ASSERT(!nonStringHead_);
+ MOZ_ASSERT(!head_);
if (!storage_) {
storage_ = MakeUnique<LifoAlloc>(LifoAllocBlockSize);
// This prevents LifoAlloc::Enum from crashing with a release
@@ -75,8 +75,7 @@ StoreBuffer::StoreBuffer(JSRuntime* rt)
mayHavePointersToDeadCells_(false)
#ifdef DEBUG
,
- mEntered(false),
- markingNondeduplicatable(false)
+ mEntered(false)
#endif
{
}
@@ -97,13 +96,11 @@ StoreBuffer::StoreBuffer(StoreBuffer&& other)
mayHavePointersToDeadCells_(other.mayHavePointersToDeadCells_)
#ifdef DEBUG
,
- mEntered(other.mEntered),
- markingNondeduplicatable(other.markingNondeduplicatable)
+ mEntered(other.mEntered)
#endif
{
MOZ_ASSERT(enabled_);
MOZ_ASSERT(!mEntered);
- MOZ_ASSERT(!markingNondeduplicatable);
other.disable();
}
@@ -213,21 +210,14 @@ ArenaCellSet* StoreBuffer::WholeCellBuffer::allocateCellSet(Arena* arena) {
return nullptr;
}
- // Maintain separate lists for strings and non-strings, so that all buffered
- // string whole cells will be processed before anything else (to prevent them
- // from being deduplicated when their chars are used by a tenured string.)
- bool isString =
- MapAllocToTraceKind(arena->getAllocKind()) == JS::TraceKind::String;
-
AutoEnterOOMUnsafeRegion oomUnsafe;
- ArenaCellSet*& head = isString ? stringHead_ : nonStringHead_;
- auto* cells = storage_->new_<ArenaCellSet>(arena, head);
+ auto* cells = storage_->new_<ArenaCellSet>(arena, head_);
if (!cells) {
oomUnsafe.crash("Failed to allocate ArenaCellSet");
}
arena->bufferedCells() = cells;
- head = cells;
+ head_ = cells;
if (isAboutToOverflow()) {
rt->gc.storeBuffer().setAboutToOverflow(
@@ -243,12 +233,10 @@ void gc::CellHeaderPostWriteBarrier(JSObject** ptr, JSObject* prev,
}
void StoreBuffer::WholeCellBuffer::clear() {
- for (auto** headPtr : {&stringHead_, &nonStringHead_}) {
- for (auto* set = *headPtr; set; set = set->next) {
- set->arena->bufferedCells() = &ArenaCellSet::Empty;
- }
- *headPtr = nullptr;
+ for (auto* set = head_; set; set = set->next) {
+ set->arena->bufferedCells() = &ArenaCellSet::Empty;
}
+ head_ = nullptr;
if (storage_) {
storage_->used() ? storage_->releaseAll() : storage_->freeAll();
diff --git a/js/src/gc/StoreBuffer.h b/js/src/gc/StoreBuffer.h
index 2e117f02c8..0b1ca8af9b 100644
--- a/js/src/gc/StoreBuffer.h
+++ b/js/src/gc/StoreBuffer.h
@@ -169,8 +169,7 @@ class StoreBuffer {
struct WholeCellBuffer {
UniquePtr<LifoAlloc> storage_;
- ArenaCellSet* stringHead_ = nullptr;
- ArenaCellSet* nonStringHead_ = nullptr;
+ ArenaCellSet* head_ = nullptr;
const Cell* last_ = nullptr;
WholeCellBuffer() = default;
@@ -180,11 +179,9 @@ class StoreBuffer {
WholeCellBuffer(WholeCellBuffer&& other)
: storage_(std::move(other.storage_)),
- stringHead_(other.stringHead_),
- nonStringHead_(other.nonStringHead_),
+ head_(other.head_),
last_(other.last_) {
- other.stringHead_ = nullptr;
- other.nonStringHead_ = nullptr;
+ other.head_ = nullptr;
other.last_ = nullptr;
}
WholeCellBuffer& operator=(WholeCellBuffer&& other) {
@@ -214,13 +211,20 @@ class StoreBuffer {
}
bool isEmpty() const {
- MOZ_ASSERT_IF(!stringHead_ && !nonStringHead_,
- !storage_ || storage_->isEmpty());
- return !stringHead_ && !nonStringHead_;
+ MOZ_ASSERT_IF(!head_, !storage_ || storage_->isEmpty());
+ return !head_;
}
const Cell** lastBufferedPtr() { return &last_; }
+ CellSweepSet releaseCellSweepSet() {
+ CellSweepSet set;
+ std::swap(storage_, set.storage_);
+ std::swap(head_, set.head_);
+ last_ = nullptr;
+ return set;
+ }
+
private:
ArenaCellSet* allocateCellSet(Arena* arena);
};
@@ -339,9 +343,10 @@ class StoreBuffer {
bool operator==(const ValueEdge& other) const { return edge == other.edge; }
bool operator!=(const ValueEdge& other) const { return edge != other.edge; }
+ bool isGCThing() const { return edge->isGCThing(); }
+
Cell* deref() const {
- return edge->isGCThing() ? static_cast<Cell*>(edge->toGCThing())
- : nullptr;
+ return isGCThing() ? static_cast<Cell*>(edge->toGCThing()) : nullptr;
}
bool maybeInRememberedSet(const Nursery& nursery) const {
@@ -449,10 +454,10 @@ class StoreBuffer {
return edge != other.edge;
}
+ bool isGCThing() const { return edge->isGCThing(); }
+
Cell* deref() const {
- return edge->isGCThing() ? static_cast<Cell*>(edge->toGCThing())
- : nullptr;
- return nullptr;
+ return isGCThing() ? static_cast<Cell*>(edge->toGCThing()) : nullptr;
}
bool maybeInRememberedSet(const Nursery& nursery) const {
@@ -525,10 +530,6 @@ class StoreBuffer {
#endif
public:
-#ifdef DEBUG
- bool markingNondeduplicatable;
-#endif
-
explicit StoreBuffer(JSRuntime* rt);
StoreBuffer(const StoreBuffer& other) = delete;
@@ -629,6 +630,10 @@ class StoreBuffer {
}
void traceGenericEntries(JSTracer* trc) { bufferGeneric.trace(trc, this); }
+ gc::CellSweepSet releaseCellSweepSet() {
+ return bufferWholeCell.releaseCellSweepSet();
+ }
+
/* For use by our owned buffers and for testing. */
void setAboutToOverflow(JS::GCReason);
@@ -639,6 +644,7 @@ class StoreBuffer {
};
// A set of cells in an arena used to implement the whole cell store buffer.
+// Also used to store a set of cells that need to be swept.
class ArenaCellSet {
friend class StoreBuffer;
@@ -693,7 +699,17 @@ class ArenaCellSet {
WordT getWord(size_t wordIndex) const { return bits.getWord(wordIndex); }
- void trace(TenuringTracer& mover);
+ void setWord(size_t wordIndex, WordT value) {
+ bits.setWord(wordIndex, value);
+ }
+
+ // Returns the list of ArenaCellSets that need to be swept.
+ ArenaCellSet* trace(TenuringTracer& mover);
+
+ // At the end of a minor GC, sweep through all tenured dependent strings that
+ // may point to nursery-allocated chars to update their pointers in case the
+ // base string moved its chars.
+ void sweepDependentStrings();
// Sentinel object used for all empty sets.
//
diff --git a/js/src/gc/Sweeping.cpp b/js/src/gc/Sweeping.cpp
index 123b2c9650..2cd6a2c662 100644
--- a/js/src/gc/Sweeping.cpp
+++ b/js/src/gc/Sweeping.cpp
@@ -441,6 +441,12 @@ void GCRuntime::waitBackgroundSweepEnd() {
void GCRuntime::startBackgroundFree() {
AutoLockHelperThreadState lock;
+
+ if (lifoBlocksToFree.ref().isEmpty() &&
+ buffersToFreeAfterMinorGC.ref().empty()) {
+ return;
+ }
+
freeTask.startOrRunIfIdle(lock);
}
@@ -1194,18 +1200,19 @@ class ImmediateSweepWeakCacheTask : public GCParallelTask {
};
void GCRuntime::updateAtomsBitmap() {
- DenseBitmap marked;
- if (atomMarking.computeBitmapFromChunkMarkBits(rt, marked)) {
- for (GCZonesIter zone(this); !zone.done(); zone.next()) {
- atomMarking.refineZoneBitmapForCollectedZone(zone, marked);
+ size_t collectedZones = 0;
+ size_t uncollectedZones = 0;
+ for (ZonesIter zone(this, SkipAtoms); !zone.done(); zone.next()) {
+ if (zone->isCollecting()) {
+ collectedZones++;
+ } else {
+ uncollectedZones++;
}
- } else {
- // Ignore OOM in computeBitmapFromChunkMarkBits. The
- // refineZoneBitmapForCollectedZone call can only remove atoms from the
- // zone bitmap, so it is conservative to just not call it.
}
- atomMarking.markAtomsUsedByUncollectedZones(rt);
+ atomMarking.refineZoneBitmapsForCollectedZones(this, collectedZones);
+
+ atomMarking.markAtomsUsedByUncollectedZones(this, uncollectedZones);
// For convenience sweep these tables non-incrementally as part of bitmap
// sweeping; they are likely to be much smaller than the main atoms table.
diff --git a/js/src/gc/Tenuring.cpp b/js/src/gc/Tenuring.cpp
index a9506cfa14..d38a374599 100644
--- a/js/src/gc/Tenuring.cpp
+++ b/js/src/gc/Tenuring.cpp
@@ -18,6 +18,7 @@
#include "gc/Pretenuring.h"
#include "gc/Zone.h"
#include "jit/JitCode.h"
+#include "js/TypeDecls.h"
#include "proxy/Proxy.h"
#include "vm/BigIntType.h"
#include "vm/JSScript.h"
@@ -44,104 +45,120 @@ using mozilla::PodCopy;
constexpr size_t MAX_DEDUPLICATABLE_STRING_LENGTH = 500;
-TenuringTracer::TenuringTracer(JSRuntime* rt, Nursery* nursery)
+TenuringTracer::TenuringTracer(JSRuntime* rt, Nursery* nursery,
+ bool tenureEverything)
: JSTracer(rt, JS::TracerKind::Tenuring,
JS::WeakMapTraceAction::TraceKeysAndValues),
- nursery_(*nursery) {
+ nursery_(*nursery),
+ tenureEverything(tenureEverything) {
stringDeDupSet.emplace();
}
-size_t TenuringTracer::getTenuredSize() const {
- return tenuredSize + tenuredCells * sizeof(NurseryCellHeader);
+size_t TenuringTracer::getPromotedSize() const {
+ return promotedSize + promotedCells * sizeof(NurseryCellHeader);
}
-size_t TenuringTracer::getTenuredCells() const { return tenuredCells; }
-
-static inline void UpdateAllocSiteOnTenure(Cell* cell) {
- AllocSite* site = NurseryCellHeader::from(cell)->allocSite();
- site->incTenuredCount();
-}
+size_t TenuringTracer::getPromotedCells() const { return promotedCells; }
void TenuringTracer::onObjectEdge(JSObject** objp, const char* name) {
JSObject* obj = *objp;
- if (!IsInsideNursery(obj)) {
+ if (!nursery_.inCollectedRegion(obj)) {
+ MOZ_ASSERT(!obj->isForwarded());
return;
}
+ *objp = promoteOrForward(obj);
+ MOZ_ASSERT(!(*objp)->isForwarded());
+}
+
+JSObject* TenuringTracer::promoteOrForward(JSObject* obj) {
+ MOZ_ASSERT(nursery_.inCollectedRegion(obj));
+
if (obj->isForwarded()) {
const gc::RelocationOverlay* overlay = gc::RelocationOverlay::fromCell(obj);
- *objp = static_cast<JSObject*>(overlay->forwardingAddress());
- return;
+ obj = static_cast<JSObject*>(overlay->forwardingAddress());
+ if (IsInsideNursery(obj)) {
+ promotedToNursery = true;
+ }
+ return obj;
}
- onNonForwardedNurseryObjectEdge(objp);
+ return onNonForwardedNurseryObject(obj);
}
-void TenuringTracer::onNonForwardedNurseryObjectEdge(JSObject** objp) {
- JSObject* obj = *objp;
+JSObject* TenuringTracer::onNonForwardedNurseryObject(JSObject* obj) {
MOZ_ASSERT(IsInsideNursery(obj));
MOZ_ASSERT(!obj->isForwarded());
- UpdateAllocSiteOnTenure(obj);
-
- // Take a fast path for tenuring a plain object which is by far the most
+ // Take a fast path for promoting a plain object as this is by far the most
// common case.
if (obj->is<PlainObject>()) {
- *objp = movePlainObjectToTenured(&obj->as<PlainObject>());
- return;
+ return promotePlainObject(&obj->as<PlainObject>());
}
- *objp = moveToTenuredSlow(obj);
+ return promoteObjectSlow(obj);
}
void TenuringTracer::onStringEdge(JSString** strp, const char* name) {
JSString* str = *strp;
- if (!IsInsideNursery(str)) {
+ if (!nursery_.inCollectedRegion(str)) {
return;
}
+ *strp = promoteOrForward(str);
+}
+
+JSString* TenuringTracer::promoteOrForward(JSString* str) {
+ MOZ_ASSERT(nursery_.inCollectedRegion(str));
+
if (str->isForwarded()) {
const gc::RelocationOverlay* overlay = gc::RelocationOverlay::fromCell(str);
- *strp = static_cast<JSString*>(overlay->forwardingAddress());
- return;
+ str = static_cast<JSString*>(overlay->forwardingAddress());
+ if (IsInsideNursery(str)) {
+ promotedToNursery = true;
+ }
+ return str;
}
- onNonForwardedNurseryStringEdge(strp);
+ return onNonForwardedNurseryString(str);
}
-void TenuringTracer::onNonForwardedNurseryStringEdge(JSString** strp) {
- JSString* str = *strp;
+JSString* TenuringTracer::onNonForwardedNurseryString(JSString* str) {
MOZ_ASSERT(IsInsideNursery(str));
MOZ_ASSERT(!str->isForwarded());
- UpdateAllocSiteOnTenure(str);
-
- *strp = moveToTenured(str);
+ return promoteString(str);
}
void TenuringTracer::onBigIntEdge(JS::BigInt** bip, const char* name) {
JS::BigInt* bi = *bip;
- if (!IsInsideNursery(bi)) {
+ if (!nursery_.inCollectedRegion(bi)) {
return;
}
+ *bip = promoteOrForward(bi);
+}
+
+JS::BigInt* TenuringTracer::promoteOrForward(JS::BigInt* bi) {
+ MOZ_ASSERT(nursery_.inCollectedRegion(bi));
+
if (bi->isForwarded()) {
const gc::RelocationOverlay* overlay = gc::RelocationOverlay::fromCell(bi);
- *bip = static_cast<JS::BigInt*>(overlay->forwardingAddress());
- return;
+ bi = static_cast<JS::BigInt*>(overlay->forwardingAddress());
+ if (IsInsideNursery(bi)) {
+ promotedToNursery = true;
+ }
+ return bi;
}
- onNonForwardedNurseryBigIntEdge(bip);
+ return onNonForwardedNurseryBigInt(bi);
}
-void TenuringTracer::onNonForwardedNurseryBigIntEdge(JS::BigInt** bip) {
- JS::BigInt* bi = *bip;
+JS::BigInt* TenuringTracer::onNonForwardedNurseryBigInt(JS::BigInt* bi) {
MOZ_ASSERT(IsInsideNursery(bi));
MOZ_ASSERT(!bi->isForwarded());
- UpdateAllocSiteOnTenure(bi);
-
- *bip = moveToTenured(bi);
+ return promoteBigInt(bi);
}
void TenuringTracer::onSymbolEdge(JS::Symbol** symp, const char* name) {}
@@ -156,7 +173,7 @@ void TenuringTracer::onJitCodeEdge(jit::JitCode** codep, const char* name) {}
void TenuringTracer::onScopeEdge(Scope** scopep, const char* name) {}
void TenuringTracer::traverse(JS::Value* thingp) {
- MOZ_ASSERT(!nursery().isInside(thingp));
+ MOZ_ASSERT(!nursery().inCollectedRegion(thingp));
Value value = *thingp;
CheckTracedThing(this, value);
@@ -166,66 +183,71 @@ void TenuringTracer::traverse(JS::Value* thingp) {
}
Cell* cell = value.toGCThing();
- if (!IsInsideNursery(cell)) {
+ if (!nursery_.inCollectedRegion(cell)) {
return;
}
if (cell->isForwarded()) {
const gc::RelocationOverlay* overlay =
gc::RelocationOverlay::fromCell(cell);
- thingp->changeGCThingPayload(overlay->forwardingAddress());
+ Cell* target = overlay->forwardingAddress();
+ thingp->changeGCThingPayload(target);
+ if (IsInsideNursery(target)) {
+ promotedToNursery = true;
+ }
return;
}
// We only care about a few kinds of GC thing here and this generates much
// tighter code than using MapGCThingTyped.
if (value.isObject()) {
- JSObject* obj = &value.toObject();
- onNonForwardedNurseryObjectEdge(&obj);
+ JSObject* obj = onNonForwardedNurseryObject(&value.toObject());
MOZ_ASSERT(obj != &value.toObject());
*thingp = JS::ObjectValue(*obj);
return;
}
#ifdef ENABLE_RECORD_TUPLE
if (value.isExtendedPrimitive()) {
- JSObject* obj = &value.toExtendedPrimitive();
- onNonForwardedNurseryObjectEdge(&obj);
+ JSObject* obj = onNonForwardedNurseryObject(&value.toExtendedPrimitive());
MOZ_ASSERT(obj != &value.toExtendedPrimitive());
*thingp = JS::ExtendedPrimitiveValue(*obj);
return;
}
#endif
if (value.isString()) {
- JSString* str = value.toString();
- onNonForwardedNurseryStringEdge(&str);
+ JSString* str = onNonForwardedNurseryString(value.toString());
MOZ_ASSERT(str != value.toString());
*thingp = JS::StringValue(str);
return;
}
MOZ_ASSERT(value.isBigInt());
- JS::BigInt* bi = value.toBigInt();
- onNonForwardedNurseryBigIntEdge(&bi);
+ JS::BigInt* bi = onNonForwardedNurseryBigInt(value.toBigInt());
MOZ_ASSERT(bi != value.toBigInt());
*thingp = JS::BigIntValue(bi);
}
void TenuringTracer::traverse(wasm::AnyRef* thingp) {
- MOZ_ASSERT(!nursery().isInside(thingp));
+ MOZ_ASSERT(!nursery().inCollectedRegion(thingp));
wasm::AnyRef value = *thingp;
CheckTracedThing(this, value);
+ Cell* cell = value.toGCThing();
+ if (!nursery_.inCollectedRegion(cell)) {
+ return;
+ }
+
wasm::AnyRef post = wasm::AnyRef::invalid();
switch (value.kind()) {
case wasm::AnyRefKind::Object: {
- JSObject* obj = &value.toJSObject();
- onObjectEdge(&obj, "value");
+ JSObject* obj = promoteOrForward(&value.toJSObject());
+ MOZ_ASSERT(obj != &value.toJSObject());
post = wasm::AnyRef::fromJSObject(*obj);
break;
}
case wasm::AnyRefKind::String: {
- JSString* str = value.toJSString();
- onStringEdge(&str, "string");
+ JSString* str = promoteOrForward(value.toJSString());
+ MOZ_ASSERT(str != value.toJSString());
post = wasm::AnyRef::fromJSString(str);
break;
}
@@ -236,11 +258,20 @@ void TenuringTracer::traverse(wasm::AnyRef* thingp) {
}
}
- if (post != value) {
- *thingp = post;
- }
+ *thingp = post;
}
+class MOZ_RAII TenuringTracer::AutoPromotedAnyToNursery {
+ public:
+ explicit AutoPromotedAnyToNursery(TenuringTracer& trc) : trc_(trc) {
+ trc.promotedToNursery = false;
+ }
+ explicit operator bool() const { return trc_.promotedToNursery; }
+
+ private:
+ TenuringTracer& trc_;
+};
+
template <typename T>
void js::gc::StoreBuffer::MonoTypeBuffer<T>::trace(TenuringTracer& mover,
StoreBuffer* owner) {
@@ -279,6 +310,8 @@ void js::gc::StoreBuffer::SlotsEdge::trace(TenuringTracer& mover) const {
MOZ_ASSERT(!IsInsideNursery(obj), "obj shouldn't live in nursery.");
+ TenuringTracer::AutoPromotedAnyToNursery promotedToNursery(mover);
+
if (kind() == ElementKind) {
uint32_t initLen = obj->getDenseInitializedLength();
uint32_t numShifted = obj->getElementsHeader()->numShiftedElements();
@@ -289,99 +322,60 @@ void js::gc::StoreBuffer::SlotsEdge::trace(TenuringTracer& mover) const {
clampedEnd = numShifted < clampedEnd ? clampedEnd - numShifted : 0;
clampedEnd = std::min(clampedEnd, initLen);
MOZ_ASSERT(clampedStart <= clampedEnd);
- mover.traceSlots(
- static_cast<HeapSlot*>(obj->getDenseElements() + clampedStart)
- ->unbarrieredAddress(),
- clampedEnd - clampedStart);
+ auto* slotStart =
+ static_cast<HeapSlot*>(obj->getDenseElements() + clampedStart);
+ uint32_t nslots = clampedEnd - clampedStart;
+ mover.traceObjectElements(slotStart->unbarrieredAddress(), nslots);
} else {
uint32_t start = std::min(start_, obj->slotSpan());
uint32_t end = std::min(start_ + count_, obj->slotSpan());
MOZ_ASSERT(start <= end);
mover.traceObjectSlots(obj, start, end);
}
+
+ if (promotedToNursery) {
+ mover.runtime()->gc.storeBuffer().putSlot(obj, kind(), start_, count_);
+ }
}
static inline void TraceWholeCell(TenuringTracer& mover, JSObject* object) {
- MOZ_ASSERT_IF(object->storeBuffer(),
- !object->storeBuffer()->markingNondeduplicatable);
mover.traceObject(object);
}
-// Non-deduplicatable marking is necessary because of the following 2 reasons:
+// Return whether the string needs to be swept.
//
-// 1. Tenured string chars cannot be updated:
+// We can break down the relevant dependency chains as follows:
//
-// If any of the tenured string's bases were deduplicated during tenuring,
-// the tenured string's chars pointer would need to be adjusted. This would
-// then require updating any other tenured strings that are dependent on the
-// first tenured string, and we have no way to find them without scanning
-// the entire tenured heap.
+// T -> T2 : will not be swept, but safe because T2.chars is fixed.
+// T -> N1 -> ... -> T2 : safe because T2.chars is fixed
+// T -> N1 -> ... -> N2 : update T.chars += tenured(N2).chars - N2.chars
//
-// 2. Tenured string cannot store its nursery base or base's chars:
+// Collapse the base chain down to simply T -> T2 or T -> N2. The pointer update
+// will happen during sweeping.
//
-// Tenured strings have no place to stash a pointer to their nursery base or
-// its chars. You need to be able to traverse any dependent string's chain
-// of bases up to a nursery "root base" that points to the malloced chars
-// that the dependent strings started out pointing to, so that you can
-// calculate the offset of any dependent string and update the ptr+offset if
-// the root base gets deduplicated to a different allocation. Tenured
-// strings in this base chain will stop you from reaching the nursery
-// version of the root base; you can only get to the tenured version, and it
-// has no place to store the original chars pointer.
-static inline void PreventDeduplicationOfReachableStrings(JSString* str) {
- MOZ_ASSERT(str->isTenured());
- MOZ_ASSERT(!str->isForwarded());
-
- JSLinearString* baseOrRelocOverlay = str->nurseryBaseOrRelocOverlay();
-
- // Walk along the chain of dependent strings' base string pointers
- // to mark them all non-deduplicatable.
- while (true) {
- // baseOrRelocOverlay can be one of the three cases:
- // 1. forwarded nursery string:
- // The forwarded string still retains the flag that can tell whether
- // this string is a dependent string with a base. Its
- // StringRelocationOverlay holds a saved pointer to its base in the
- // nursery.
- // 2. not yet forwarded nursery string:
- // Retrieve the base field directly from the string.
- // 3. tenured string:
- // The nursery base chain ends here, so stop traversing.
- if (baseOrRelocOverlay->isForwarded()) {
- JSLinearString* tenuredBase = Forwarded(baseOrRelocOverlay);
- if (!tenuredBase->hasBase()) {
- break;
- }
- baseOrRelocOverlay = StringRelocationOverlay::fromCell(baseOrRelocOverlay)
- ->savedNurseryBaseOrRelocOverlay();
- } else {
- JSLinearString* base = baseOrRelocOverlay;
- if (base->isTenured()) {
- break;
- }
- if (base->isDeduplicatable()) {
- base->setNonDeduplicatable();
- }
- if (!base->hasBase()) {
- break;
- }
- baseOrRelocOverlay = base->nurseryBaseOrRelocOverlay();
- }
- }
-}
-
-static inline void TraceWholeCell(TenuringTracer& mover, JSString* str) {
- MOZ_ASSERT_IF(str->storeBuffer(),
- str->storeBuffer()->markingNondeduplicatable);
-
- // Mark all strings reachable from the tenured string `str` as
- // non-deduplicatable. These strings are the bases of the tenured dependent
- // string.
+// Note that in cases like T -> N1 -> T2 -> T3 -> N2, both T -> N1 and T3 -> N2
+// will be processed by the whole cell buffer (or rather, only T and T3 will
+// be in the store buffer). The order that these strings are
+// visited does not matter because the nursery bases are left alone until
+// sweeping.
+static inline bool TraceWholeCell(TenuringTracer& mover, JSString* str) {
if (str->hasBase()) {
- PreventDeduplicationOfReachableStrings(str);
+ // For tenured dependent strings -> nursery string edges, sweep the
+ // (tenured) strings at the end of nursery marking to update chars pointers
+ // that were in the nursery. Rather than updating the base pointer to point
+ // directly to the tenured version of itself, we will leave it pointing at
+ // the nursery Cell (which will become a StringRelocationOverlay during the
+ // minor GC.)
+ JSLinearString* base = str->nurseryBaseOrRelocOverlay();
+ if (IsInsideNursery(base)) {
+ str->traceBaseFromStoreBuffer(&mover);
+ return IsInsideNursery(str->nurseryBaseOrRelocOverlay());
+ }
}
str->traceChildren(&mover);
+
+ return false;
}
static inline void TraceWholeCell(TenuringTracer& mover, BaseScript* script) {
@@ -394,70 +388,173 @@ static inline void TraceWholeCell(TenuringTracer& mover,
}
template <typename T>
-static void TraceBufferedCells(TenuringTracer& mover, Arena* arena,
- ArenaCellSet* cells) {
+bool TenuringTracer::traceBufferedCells(Arena* arena, ArenaCellSet* cells) {
for (size_t i = 0; i < MaxArenaCellIndex; i += cells->BitsPerWord) {
ArenaCellSet::WordT bitset = cells->getWord(i / cells->BitsPerWord);
while (bitset) {
size_t bit = i + js::detail::CountTrailingZeroes(bitset);
+ bitset &= bitset - 1; // Clear the low bit.
+
auto cell =
reinterpret_cast<T*>(uintptr_t(arena) + ArenaCellIndexBytes * bit);
- TraceWholeCell(mover, cell);
- bitset &= bitset - 1; // Clear the low bit.
+
+ TenuringTracer::AutoPromotedAnyToNursery promotedToNursery(*this);
+
+ TraceWholeCell(*this, cell);
+
+ if (promotedToNursery) {
+ runtime()->gc.storeBuffer().putWholeCell(cell);
+ }
}
}
+
+ return false;
}
-void ArenaCellSet::trace(TenuringTracer& mover) {
- for (ArenaCellSet* cells = this; cells; cells = cells->next) {
+template <>
+bool TenuringTracer::traceBufferedCells<JSString>(Arena* arena,
+ ArenaCellSet* cells) {
+ bool needsSweep = false;
+ for (size_t i = 0; i < MaxArenaCellIndex; i += cells->BitsPerWord) {
+ ArenaCellSet::WordT bitset = cells->getWord(i / cells->BitsPerWord);
+ ArenaCellSet::WordT tosweep = bitset;
+ while (bitset) {
+ size_t bit = i + js::detail::CountTrailingZeroes(bitset);
+ auto* cell = reinterpret_cast<JSString*>(uintptr_t(arena) +
+ ArenaCellIndexBytes * bit);
+ TenuringTracer::AutoPromotedAnyToNursery promotedToNursery(*this);
+ bool needsSweep = TraceWholeCell(*this, cell);
+ if (promotedToNursery) {
+ runtime()->gc.storeBuffer().putWholeCell(cell);
+ }
+ ArenaCellSet::WordT mask = bitset - 1;
+ bitset &= mask;
+ if (!needsSweep) {
+ tosweep &= mask;
+ }
+ }
+
+ cells->setWord(i / cells->BitsPerWord, tosweep);
+ if (tosweep) {
+ needsSweep = true;
+ }
+ }
+
+ return needsSweep;
+}
+
+ArenaCellSet* ArenaCellSet::trace(TenuringTracer& mover) {
+ ArenaCellSet* head = nullptr;
+
+ ArenaCellSet* cells = this;
+ while (cells) {
cells->check();
Arena* arena = cells->arena;
arena->bufferedCells() = &ArenaCellSet::Empty;
JS::TraceKind kind = MapAllocToTraceKind(arena->getAllocKind());
+ bool needsSweep;
switch (kind) {
case JS::TraceKind::Object:
- TraceBufferedCells<JSObject>(mover, arena, cells);
+ needsSweep = mover.traceBufferedCells<JSObject>(arena, cells);
break;
case JS::TraceKind::String:
- TraceBufferedCells<JSString>(mover, arena, cells);
+ needsSweep = mover.traceBufferedCells<JSString>(arena, cells);
break;
case JS::TraceKind::Script:
- TraceBufferedCells<BaseScript>(mover, arena, cells);
+ needsSweep = mover.traceBufferedCells<BaseScript>(arena, cells);
break;
case JS::TraceKind::JitCode:
- TraceBufferedCells<jit::JitCode>(mover, arena, cells);
+ needsSweep = mover.traceBufferedCells<jit::JitCode>(arena, cells);
break;
default:
MOZ_CRASH("Unexpected trace kind");
}
+
+ ArenaCellSet* next = cells->next;
+ if (needsSweep) {
+ cells->next = head;
+ head = cells;
+ }
+
+ cells = next;
}
+
+ return head;
}
void js::gc::StoreBuffer::WholeCellBuffer::trace(TenuringTracer& mover,
StoreBuffer* owner) {
MOZ_ASSERT(owner->isEnabled());
-#ifdef DEBUG
- // Verify that all string whole cells are traced first before any other
- // strings are visited for any reason.
- MOZ_ASSERT(!owner->markingNondeduplicatable);
- owner->markingNondeduplicatable = true;
-#endif
- // Trace all of the strings to mark the non-deduplicatable bits, then trace
- // all other whole cells.
- if (stringHead_) {
- stringHead_->trace(mover);
+ if (head_) {
+ head_ = head_->trace(mover);
}
-#ifdef DEBUG
- owner->markingNondeduplicatable = false;
-#endif
- if (nonStringHead_) {
- nonStringHead_->trace(mover);
+}
+
+// Sweep a tenured dependent string with a nursery base. The base chain will
+// have been collapsed to a single link before this string was added to the
+// sweep set, so only the simple case of a tenured dependent string with a
+// nursery base needs to be considered.
+template <typename CharT>
+void JSDependentString::sweepTypedAfterMinorGC() {
+ MOZ_ASSERT(isTenured());
+ MOZ_ASSERT(IsInsideNursery(nurseryBaseOrRelocOverlay()));
+
+ JSLinearString* base = nurseryBaseOrRelocOverlay();
+ MOZ_ASSERT(IsInsideNursery(base));
+ MOZ_ASSERT(!Forwarded(base)->hasBase(), "base chain should be collapsed");
+ MOZ_ASSERT(base->isForwarded(), "root base should be kept alive");
+ auto* baseOverlay = js::gc::StringRelocationOverlay::fromCell(base);
+ const CharT* oldBaseChars = baseOverlay->savedNurseryChars<CharT>();
+
+ // We have the base's original chars pointer and its current chars pointer.
+ // Update our chars pointer, which is an offset from the original base
+ // chars, and make it point to the same offset within the root's chars.
+ // (Most of the time, the base chars didn't move and so this has no
+ // effect.)
+ const CharT* oldChars = JSString::nonInlineCharsRaw<CharT>();
+ size_t offset = oldChars - oldBaseChars;
+ JSLinearString* tenuredBase = Forwarded(base);
+ MOZ_ASSERT(offset < tenuredBase->length());
+
+ const CharT* newBaseChars = tenuredBase->JSString::nonInlineCharsRaw<CharT>();
+ relocateNonInlineChars(newBaseChars, offset);
+
+ d.s.u3.base = tenuredBase;
+}
+
+inline void JSDependentString::sweepAfterMinorGC() {
+ if (hasTwoByteChars()) {
+ sweepTypedAfterMinorGC<char16_t>();
+ } else {
+ sweepTypedAfterMinorGC<JS::Latin1Char>();
+ }
+}
+
+static void SweepDependentStrings(Arena* arena, ArenaCellSet* cells) {
+ for (size_t i = 0; i < MaxArenaCellIndex; i += cells->BitsPerWord) {
+ ArenaCellSet::WordT bitset = cells->getWord(i / cells->BitsPerWord);
+ while (bitset) {
+ size_t bit = i + js::detail::CountTrailingZeroes(bitset);
+ auto* str = reinterpret_cast<JSString*>(uintptr_t(arena) +
+ ArenaCellIndexBytes * bit);
+ MOZ_ASSERT(str->isTenured());
+ str->asDependent().sweepAfterMinorGC();
+ bitset &= bitset - 1; // Clear the low bit.
+ }
}
+}
- stringHead_ = nonStringHead_ = nullptr;
+void ArenaCellSet::sweepDependentStrings() {
+ for (ArenaCellSet* cells = this; cells; cells = cells->next) {
+ Arena* arena = cells->arena;
+ arena->bufferedCells() = &ArenaCellSet::Empty;
+ MOZ_ASSERT(MapAllocToTraceKind(arena->getAllocKind()) ==
+ JS::TraceKind::String);
+ SweepDependentStrings(arena, cells);
+ }
}
template <typename T>
@@ -481,18 +578,42 @@ void js::gc::StoreBuffer::CellPtrEdge<T>::trace(TenuringTracer& mover) const {
edge, JS::TraceKind::String));
}
- DispatchToOnEdge(&mover, edge, "CellPtrEdge");
+ if (!mover.nursery().inCollectedRegion(thing)) {
+ return;
+ }
+
+ *edge = mover.promoteOrForward(thing);
+
+ if (IsInsideNursery(*edge)) {
+ mover.runtime()->gc.storeBuffer().putCell(edge);
+ }
}
void js::gc::StoreBuffer::ValueEdge::trace(TenuringTracer& mover) const {
- if (deref()) {
- mover.traverse(edge);
+ if (!isGCThing()) {
+ return;
+ }
+
+ TenuringTracer::AutoPromotedAnyToNursery promotedToNursery(mover);
+
+ mover.traverse(edge);
+
+ if (promotedToNursery) {
+ mover.runtime()->gc.storeBuffer().putValue(edge);
}
}
void js::gc::StoreBuffer::WasmAnyRefEdge::trace(TenuringTracer& mover) const {
- if (deref()) {
- mover.traverse(edge);
+ if (!isGCThing()) {
+ return;
+ }
+
+ TenuringTracer::AutoPromotedAnyToNursery promotedToNursery(mover);
+
+ mover.traverse(edge);
+
+ if (promotedToNursery) {
+ mover.runtime()->gc.storeBuffer().putWasmAnyRef(edge);
}
}
@@ -513,7 +634,7 @@ void js::gc::TenuringTracer::traceObject(JSObject* obj) {
if (!nobj->hasEmptyElements()) {
HeapSlotArray elements = nobj->getDenseElements();
Value* elems = elements.begin()->unbarrieredAddress();
- traceSlots(elems, elems + nobj->getDenseInitializedLength());
+ traceObjectElements(elems, nobj->getDenseInitializedLength());
}
traceObjectSlots(nobj, 0, nobj->slotSpan());
@@ -527,16 +648,17 @@ void js::gc::TenuringTracer::traceObjectSlots(NativeObject* nobj,
nobj->forEachSlotRange(start, end, traceRange);
}
+void js::gc::TenuringTracer::traceObjectElements(JS::Value* vp,
+ uint32_t count) {
+ traceSlots(vp, vp + count);
+}
+
void js::gc::TenuringTracer::traceSlots(Value* vp, Value* end) {
for (; vp != end; ++vp) {
traverse(vp);
}
}
-inline void js::gc::TenuringTracer::traceSlots(JS::Value* vp, uint32_t nslots) {
- traceSlots(vp, vp + nslots);
-}
-
void js::gc::TenuringTracer::traceString(JSString* str) {
str->traceChildren(this);
}
@@ -564,25 +686,73 @@ inline void js::gc::TenuringTracer::insertIntoObjectFixupList(
}
template <typename T>
-inline T* js::gc::TenuringTracer::allocTenured(Zone* zone, AllocKind kind) {
- return static_cast<T*>(static_cast<Cell*>(AllocateCellInGC(zone, kind)));
+inline T* js::gc::TenuringTracer::alloc(Zone* zone, AllocKind kind, Cell* src) {
+ AllocSite* site = NurseryCellHeader::from(src)->allocSite();
+ site->incPromotedCount();
+
+ void* ptr = allocCell<T::TraceKind>(zone, kind, site, src);
+ auto* cell = reinterpret_cast<T*>(ptr);
+ if (IsInsideNursery(cell)) {
+ MOZ_ASSERT(!nursery().inCollectedRegion(cell));
+ promotedToNursery = true;
+ }
+
+ return cell;
}
-JSString* js::gc::TenuringTracer::allocTenuredString(JSString* src, Zone* zone,
- AllocKind dstKind) {
- JSString* dst = allocTenured<JSString>(zone, dstKind);
- tenuredSize += moveStringToTenured(dst, src, dstKind);
- tenuredCells++;
+template <JS::TraceKind traceKind>
+void* js::gc::TenuringTracer::allocCell(Zone* zone, AllocKind allocKind,
+ AllocSite* site, Cell* src) {
+ MOZ_ASSERT(zone == src->zone());
+
+ if (!shouldTenure(zone, traceKind, src)) {
+ // Allocations from the optimized alloc site continue to use that site,
+ // otherwise a special promoted alloc site it used.
+ if (site->kind() != AllocSite::Kind::Optimized) {
+ site = &zone->pretenuring.promotedAllocSite(traceKind);
+ }
+
+ size_t thingSize = Arena::thingSize(allocKind);
+ void* ptr = nursery_.tryAllocateCell(site, thingSize, traceKind);
+ if (MOZ_LIKELY(ptr)) {
+ return ptr;
+ }
+
+ JSContext* cx = runtime()->mainContextFromOwnThread();
+ ptr = CellAllocator::RetryNurseryAlloc<NoGC>(cx, traceKind, allocKind,
+ thingSize, site);
+ if (MOZ_LIKELY(ptr)) {
+ return ptr;
+ }
+
+ // The nursery is full. This is unlikely but can happen. Fall through to
+ // the tenured allocation path.
+ }
+
+ return AllocateTenuredCellInGC(zone, allocKind);
+}
+
+JSString* js::gc::TenuringTracer::allocString(JSString* src, Zone* zone,
+ AllocKind dstKind) {
+ JSString* dst = alloc<JSString>(zone, dstKind, src);
+ promotedSize += moveString(dst, src, dstKind);
+ promotedCells++;
return dst;
}
-JSObject* js::gc::TenuringTracer::moveToTenuredSlow(JSObject* src) {
+bool js::gc::TenuringTracer::shouldTenure(Zone* zone, JS::TraceKind traceKind,
+ Cell* cell) {
+ return tenureEverything || !zone->allocKindInNursery(traceKind) ||
+ nursery_.shouldTenure(cell);
+}
+
+JSObject* js::gc::TenuringTracer::promoteObjectSlow(JSObject* src) {
MOZ_ASSERT(IsInsideNursery(src));
MOZ_ASSERT(!src->is<PlainObject>());
AllocKind dstKind = src->allocKindForTenure(nursery());
- auto* dst = allocTenured<JSObject>(src->nurseryZone(), dstKind);
+ auto* dst = alloc<JSObject>(src->nurseryZone(), dstKind, src);
size_t srcSize = Arena::thingSize(dstKind);
@@ -590,8 +760,8 @@ JSObject* js::gc::TenuringTracer::moveToTenuredSlow(JSObject* src) {
// and dst. We deal with this by copying elements manually, possibly
// re-inlining them if there is adequate room inline in dst.
//
- // For Arrays and Tuples we're reducing tenuredSize to the smaller srcSize
- // because moveElementsToTenured() accounts for all Array or Tuple elements,
+ // For Arrays and Tuples we're reducing promotedSize to the smaller srcSize
+ // because moveElements() accounts for all Array or Tuple elements,
// even if they are inlined.
if (src->is<FixedLengthTypedArrayObject>()) {
auto* tarray = &src->as<FixedLengthTypedArrayObject>();
@@ -613,8 +783,8 @@ JSObject* js::gc::TenuringTracer::moveToTenuredSlow(JSObject* src) {
srcSize = sizeof(NativeObject);
}
- tenuredSize += srcSize;
- tenuredCells++;
+ promotedSize += srcSize;
+ promotedCells++;
// Copy the Cell contents.
MOZ_ASSERT(OffsetFromChunkStart(src) >= sizeof(ChunkBase));
@@ -625,8 +795,8 @@ JSObject* js::gc::TenuringTracer::moveToTenuredSlow(JSObject* src) {
if (src->is<NativeObject>()) {
NativeObject* ndst = &dst->as<NativeObject>();
NativeObject* nsrc = &src->as<NativeObject>();
- tenuredSize += moveSlotsToTenured(ndst, nsrc);
- tenuredSize += moveElementsToTenured(ndst, nsrc, dstKind);
+ promotedSize += moveSlots(ndst, nsrc);
+ promotedSize += moveElements(ndst, nsrc, dstKind);
}
JSObjectMovedOp op = dst->getClass()->extObjectMovedOp();
@@ -634,7 +804,7 @@ JSObject* js::gc::TenuringTracer::moveToTenuredSlow(JSObject* src) {
if (op) {
// Tell the hazard analysis that the object moved hook can't GC.
JS::AutoSuppressGCAnalysis nogc;
- tenuredSize += op(dst, src);
+ promotedSize += op(dst, src);
} else {
MOZ_ASSERT_IF(src->getClass()->hasFinalize(),
CanNurseryAllocateFinalizedClass(src->getClass()));
@@ -647,18 +817,17 @@ JSObject* js::gc::TenuringTracer::moveToTenuredSlow(JSObject* src) {
return dst;
}
-inline JSObject* js::gc::TenuringTracer::movePlainObjectToTenured(
- PlainObject* src) {
- // Fast path version of moveToTenuredSlow() for specialized for PlainObject.
+inline JSObject* js::gc::TenuringTracer::promotePlainObject(PlainObject* src) {
+ // Fast path version of promoteObjectSlow() for specialized for PlainObject.
MOZ_ASSERT(IsInsideNursery(src));
AllocKind dstKind = src->allocKindForTenure();
- auto* dst = allocTenured<PlainObject>(src->nurseryZone(), dstKind);
+ auto* dst = alloc<PlainObject>(src->nurseryZone(), dstKind, src);
size_t srcSize = Arena::thingSize(dstKind);
- tenuredSize += srcSize;
- tenuredCells++;
+ promotedSize += srcSize;
+ promotedCells++;
// Copy the Cell contents.
MOZ_ASSERT(OffsetFromChunkStart(src) >= sizeof(ChunkBase));
@@ -666,8 +835,8 @@ inline JSObject* js::gc::TenuringTracer::movePlainObjectToTenured(
js_memcpy(dst, src, srcSize);
// Move the slots and elements.
- tenuredSize += moveSlotsToTenured(dst, src);
- tenuredSize += moveElementsToTenured(dst, src, dstKind);
+ promotedSize += moveSlots(dst, src);
+ promotedSize += moveElements(dst, src, dstKind);
MOZ_ASSERT(!dst->getClass()->extObjectMovedOp());
@@ -678,8 +847,7 @@ inline JSObject* js::gc::TenuringTracer::movePlainObjectToTenured(
return dst;
}
-size_t js::gc::TenuringTracer::moveSlotsToTenured(NativeObject* dst,
- NativeObject* src) {
+size_t js::gc::TenuringTracer::moveSlots(NativeObject* dst, NativeObject* src) {
/* Fixed slots have already been copied over. */
if (!src->hasDynamicSlots()) {
return 0;
@@ -702,9 +870,9 @@ size_t js::gc::TenuringTracer::moveSlotsToTenured(NativeObject* dst,
return allocSize;
}
-size_t js::gc::TenuringTracer::moveElementsToTenured(NativeObject* dst,
- NativeObject* src,
- AllocKind dstKind) {
+size_t js::gc::TenuringTracer::moveElements(NativeObject* dst,
+ NativeObject* src,
+ AllocKind dstKind) {
if (src->hasEmptyElements()) {
return 0;
}
@@ -751,7 +919,7 @@ inline void js::gc::TenuringTracer::insertIntoStringFixupList(
stringHead = entry;
}
-JSString* js::gc::TenuringTracer::moveToTenured(JSString* src) {
+JSString* js::gc::TenuringTracer::promoteString(JSString* src) {
MOZ_ASSERT(IsInsideNursery(src));
MOZ_ASSERT(!src->isExternal());
@@ -762,29 +930,32 @@ JSString* js::gc::TenuringTracer::moveToTenured(JSString* src) {
// the atom. Don't do this for dependent strings because they're more
// complicated. See StringRelocationOverlay and DeduplicationStringHasher
// comments.
+ MOZ_ASSERT(!src->isAtom());
if (src->isLinear() && src->inStringToAtomCache() &&
src->isDeduplicatable() && !src->hasBase()) {
JSLinearString* linear = &src->asLinear();
JSAtom* atom = runtime()->caches().stringToAtomCache.lookupInMap(linear);
- MOZ_ASSERT(atom, "Why was the cache purged before minor GC?");
-
- // Only deduplicate if both strings have the same encoding, to not confuse
- // dependent strings.
- if (src->hasTwoByteChars() == atom->hasTwoByteChars()) {
- // The StringToAtomCache isn't used for inline strings (due to the minimum
- // length) so canOwnDependentChars must be true for both src and atom.
- // This means if there are dependent strings floating around using str's
- // chars, they will be able to use the chars from the atom.
- static_assert(StringToAtomCache::MinStringLength >
- JSFatInlineString::MAX_LENGTH_LATIN1);
- static_assert(StringToAtomCache::MinStringLength >
- JSFatInlineString::MAX_LENGTH_TWO_BYTE);
- MOZ_ASSERT(src->canOwnDependentChars());
- MOZ_ASSERT(atom->canOwnDependentChars());
-
- StringRelocationOverlay::forwardCell(src, atom);
- gcprobes::PromoteToTenured(src, atom);
- return atom;
+ // The string will not be present in the cache if it was previously promoted
+ // to the second nursery generation.
+ if (atom) {
+ // Only deduplicate if both strings have the same encoding, to not confuse
+ // dependent strings.
+ if (src->hasTwoByteChars() == atom->hasTwoByteChars()) {
+ // The StringToAtomCache isn't used for inline strings (due to the
+ // minimum length) so canOwnDependentChars must be true for both src and
+ // atom. This means if there are dependent strings floating around using
+ // str's chars, they will be able to use the chars from the atom.
+ static_assert(StringToAtomCache::MinStringLength >
+ JSFatInlineString::MAX_LENGTH_LATIN1);
+ static_assert(StringToAtomCache::MinStringLength >
+ JSFatInlineString::MAX_LENGTH_TWO_BYTE);
+ MOZ_ASSERT(src->canOwnDependentChars());
+ MOZ_ASSERT(atom->canOwnDependentChars());
+
+ StringRelocationOverlay::forwardCell(src, atom);
+ gcprobes::PromoteToTenured(src, atom);
+ return atom;
+ }
}
}
@@ -798,9 +969,12 @@ JSString* js::gc::TenuringTracer::moveToTenured(JSString* src) {
// 3. It is deduplicatable:
// The JSString NON_DEDUP_BIT flag is unset.
// 4. It matches an entry in stringDeDupSet.
+ // 5. It is moved to the tenured heap.
- if (src->length() < MAX_DEDUPLICATABLE_STRING_LENGTH && src->isLinear() &&
+ if (shouldTenure(zone, JS::TraceKind::String, src) &&
+ src->length() < MAX_DEDUPLICATABLE_STRING_LENGTH && src->isLinear() &&
src->isDeduplicatable() && stringDeDupSet.isSome()) {
+ src->clearBitsOnTenure();
auto p = stringDeDupSet->lookupForAdd(src);
if (p) {
// Deduplicate to the looked-up string!
@@ -811,7 +985,7 @@ JSString* js::gc::TenuringTracer::moveToTenured(JSString* src) {
return dst;
}
- dst = allocTenuredString(src, zone, dstKind);
+ dst = allocString(src, zone, dstKind);
using DedupHasher [[maybe_unused]] = DeduplicationStringHasher<JSString*>;
MOZ_ASSERT(DedupHasher::hash(src) == DedupHasher::hash(dst),
@@ -823,14 +997,17 @@ JSString* js::gc::TenuringTracer::moveToTenured(JSString* src) {
stringDeDupSet.reset();
}
} else {
- dst = allocTenuredString(src, zone, dstKind);
- dst->clearNonDeduplicatable();
+ dst = allocString(src, zone, dstKind);
+ if (dst->isTenured()) {
+ src->clearBitsOnTenure();
+ dst->clearBitsOnTenure();
+ }
}
zone->stringStats.ref().noteTenured(src->allocSize());
auto* overlay = StringRelocationOverlay::forwardCell(src, dst);
- MOZ_ASSERT(dst->isDeduplicatable());
+ MOZ_ASSERT_IF(dst->isTenured() && dst->isLinear(), dst->isDeduplicatable());
if (dst->hasBase() || dst->isRope()) {
// dst or one of its leaves might have a base that will be deduplicated.
@@ -874,6 +1051,9 @@ void js::gc::TenuringTracer::relocateDependentStringChars(
tenuredDependentStr->relocateNonInlineChars<const CharT*>(
tenuredRootBase->nonInlineChars<CharT>(nogc), *offset);
tenuredDependentStr->setBase(tenuredRootBase);
+ if (tenuredDependentStr->isTenured() && !tenuredRootBase->isTenured()) {
+ runtime()->gc.storeBuffer().putWholeCell(tenuredDependentStr);
+ }
return;
}
@@ -889,7 +1069,7 @@ void js::gc::TenuringTracer::relocateDependentStringChars(
// The root base can be in either the nursery or the tenured heap.
// dependentStr chars needs to be relocated after traceString if the
// root base is in the nursery.
- if (!(*rootBase)->isTenured()) {
+ if (nursery().inCollectedRegion(*rootBase)) {
*rootBaseNotYetForwarded = true;
const CharT* rootBaseChars = (*rootBase)->nonInlineChars<CharT>(nogc);
*offset = dependentStrChars - rootBaseChars;
@@ -906,15 +1086,15 @@ void js::gc::TenuringTracer::relocateDependentStringChars(
}
}
-JS::BigInt* js::gc::TenuringTracer::moveToTenured(JS::BigInt* src) {
+JS::BigInt* js::gc::TenuringTracer::promoteBigInt(JS::BigInt* src) {
MOZ_ASSERT(IsInsideNursery(src));
AllocKind dstKind = src->getAllocKind();
Zone* zone = src->nurseryZone();
- JS::BigInt* dst = allocTenured<JS::BigInt>(zone, dstKind);
- tenuredSize += moveBigIntToTenured(dst, src, dstKind);
- tenuredCells++;
+ JS::BigInt* dst = alloc<JS::BigInt>(zone, dstKind, src);
+ promotedSize += moveBigInt(dst, src, dstKind);
+ promotedCells++;
RelocationOverlay::forwardCell(src, dst);
@@ -924,41 +1104,57 @@ JS::BigInt* js::gc::TenuringTracer::moveToTenured(JS::BigInt* src) {
void js::gc::TenuringTracer::collectToObjectFixedPoint() {
while (RelocationOverlay* p = objHead) {
+ MOZ_ASSERT(nursery().inCollectedRegion(p));
objHead = objHead->next();
auto* obj = static_cast<JSObject*>(p->forwardingAddress());
+
+ MOZ_ASSERT_IF(IsInsideNursery(obj), !nursery().inCollectedRegion(obj));
+
+ AutoPromotedAnyToNursery promotedAnyToNursery(*this);
traceObject(obj);
+ if (obj->isTenured() && promotedAnyToNursery) {
+ runtime()->gc.storeBuffer().putWholeCell(obj);
+ }
}
}
void js::gc::TenuringTracer::collectToStringFixedPoint() {
while (StringRelocationOverlay* p = stringHead) {
+ MOZ_ASSERT(nursery().inCollectedRegion(p));
stringHead = stringHead->next();
- auto* tenuredStr = static_cast<JSString*>(p->forwardingAddress());
+ auto* str = static_cast<JSString*>(p->forwardingAddress());
+ MOZ_ASSERT_IF(IsInsideNursery(str), !nursery().inCollectedRegion(str));
+
// To ensure the NON_DEDUP_BIT was reset properly.
- MOZ_ASSERT(tenuredStr->isDeduplicatable());
+ MOZ_ASSERT(!str->isAtom());
+ MOZ_ASSERT_IF(str->isTenured() && str->isLinear(), str->isDeduplicatable());
// The nursery root base might not be forwarded before
- // traceString(tenuredStr). traceString(tenuredStr) will forward the root
+ // traceString(str). traceString(str) will forward the root
// base if that's the case. Dependent string chars needs to be relocated
// after traceString if root base was not forwarded.
size_t offset = 0;
bool rootBaseNotYetForwarded = false;
JSLinearString* rootBase = nullptr;
- if (tenuredStr->isDependent()) {
- if (tenuredStr->hasTwoByteChars()) {
+ if (str->isDependent()) {
+ if (str->hasTwoByteChars()) {
relocateDependentStringChars<char16_t>(
- &tenuredStr->asDependent(), p->savedNurseryBaseOrRelocOverlay(),
- &offset, &rootBaseNotYetForwarded, &rootBase);
+ &str->asDependent(), p->savedNurseryBaseOrRelocOverlay(), &offset,
+ &rootBaseNotYetForwarded, &rootBase);
} else {
relocateDependentStringChars<JS::Latin1Char>(
- &tenuredStr->asDependent(), p->savedNurseryBaseOrRelocOverlay(),
- &offset, &rootBaseNotYetForwarded, &rootBase);
+ &str->asDependent(), p->savedNurseryBaseOrRelocOverlay(), &offset,
+ &rootBaseNotYetForwarded, &rootBase);
}
}
- traceString(tenuredStr);
+ AutoPromotedAnyToNursery promotedAnyToNursery(*this);
+ traceString(str);
+ if (str->isTenured() && promotedAnyToNursery) {
+ runtime()->gc.storeBuffer().putWholeCell(str);
+ }
if (rootBaseNotYetForwarded) {
MOZ_ASSERT(rootBase->isForwarded(),
@@ -968,25 +1164,34 @@ void js::gc::TenuringTracer::collectToStringFixedPoint() {
JSLinearString* tenuredRootBase = Forwarded(rootBase);
MOZ_ASSERT(offset < tenuredRootBase->length());
- if (tenuredStr->hasTwoByteChars()) {
- tenuredStr->asDependent().relocateNonInlineChars<const char16_t*>(
+ if (str->hasTwoByteChars()) {
+ str->asDependent().relocateNonInlineChars<const char16_t*>(
tenuredRootBase->twoByteChars(nogc), offset);
} else {
- tenuredStr->asDependent().relocateNonInlineChars<const JS::Latin1Char*>(
+ str->asDependent().relocateNonInlineChars<const JS::Latin1Char*>(
tenuredRootBase->latin1Chars(nogc), offset);
}
- tenuredStr->setBase(tenuredRootBase);
+
+ str->setBase(tenuredRootBase);
+ if (str->isTenured() && !tenuredRootBase->isTenured()) {
+ runtime()->gc.storeBuffer().putWholeCell(str);
+ }
+ }
+
+ if (str->hasBase()) {
+ MOZ_ASSERT(!str->base()->isForwarded());
+ MOZ_ASSERT_IF(!str->base()->isTenured(),
+ !nursery().inCollectedRegion(str->base()));
}
}
}
-size_t js::gc::TenuringTracer::moveStringToTenured(JSString* dst, JSString* src,
- AllocKind dstKind) {
+size_t js::gc::TenuringTracer::moveString(JSString* dst, JSString* src,
+ AllocKind dstKind) {
size_t size = Arena::thingSize(dstKind);
- // At the moment, strings always have the same AllocKind between src and
- // dst. This may change in the future.
- MOZ_ASSERT(dst->asTenured().getAllocKind() == src->getAllocKind());
+ MOZ_ASSERT_IF(dst->isTenured(),
+ dst->asTenured().getAllocKind() == src->getAllocKind());
// Copy the Cell contents.
MOZ_ASSERT(OffsetToChunkEnd(src) >= size);
@@ -999,7 +1204,8 @@ size_t js::gc::TenuringTracer::moveStringToTenured(JSString* dst, JSString* src,
if (src->ownsMallocedChars()) {
void* chars = src->asLinear().nonInlineCharsRaw();
nursery().removeMallocedBufferDuringMinorGC(chars);
- AddCellMemory(dst, dst->asLinear().allocSize(), MemoryUse::StringContents);
+ nursery().trackMallocedBufferOnPromotion(
+ chars, dst, dst->asLinear().allocSize(), MemoryUse::StringContents);
return size;
}
@@ -1016,14 +1222,12 @@ size_t js::gc::TenuringTracer::moveStringToTenured(JSString* dst, JSString* src,
return size;
}
-size_t js::gc::TenuringTracer::moveBigIntToTenured(JS::BigInt* dst,
- JS::BigInt* src,
- AllocKind dstKind) {
+size_t js::gc::TenuringTracer::moveBigInt(JS::BigInt* dst, JS::BigInt* src,
+ AllocKind dstKind) {
size_t size = Arena::thingSize(dstKind);
- // At the moment, BigInts always have the same AllocKind between src and
- // dst. This may change in the future.
- MOZ_ASSERT(dst->asTenured().getAllocKind() == src->getAllocKind());
+ MOZ_ASSERT_IF(dst->isTenured(),
+ dst->asTenured().getAllocKind() == src->getAllocKind());
// Copy the Cell contents.
MOZ_ASSERT(OffsetToChunkEnd(src) >= size);
diff --git a/js/src/gc/Tenuring.h b/js/src/gc/Tenuring.h
index 3eca5f4bc3..8bb9efd4f8 100644
--- a/js/src/gc/Tenuring.h
+++ b/js/src/gc/Tenuring.h
@@ -26,6 +26,8 @@ class AnyRef;
namespace gc {
+class AllocSite;
+class ArenaCellSet;
class RelocationOverlay;
class StringRelocationOverlay;
@@ -39,10 +41,10 @@ struct DeduplicationStringHasher {
class TenuringTracer final : public JSTracer {
Nursery& nursery_;
- // Amount of data moved to the tenured generation during collection.
- size_t tenuredSize = 0;
- // Number of cells moved to the tenured generation.
- size_t tenuredCells = 0;
+ // Size of data promoted during collection.
+ size_t promotedSize = 0;
+ // Number of cells promoted during collection.
+ size_t promotedCells = 0;
// These lists are threaded through the Nursery using the space from
// already moved things. The lists are used to fix up the moved things and
@@ -59,27 +61,34 @@ class TenuringTracer final : public JSTracer {
// collection when out of memory to insert new entries.
mozilla::Maybe<StringDeDupSet> stringDeDupSet;
+ bool tenureEverything;
+
+ // A flag set when a GC thing is promoted to the next nursery generation (as
+ // opposed to the tenured heap). This is used to check when we need to add an
+ // edge to the remembered set during nursery collection.
+ bool promotedToNursery = false;
+
#define DEFINE_ON_EDGE_METHOD(name, type, _1, _2) \
void on##name##Edge(type** thingp, const char* name) override;
JS_FOR_EACH_TRACEKIND(DEFINE_ON_EDGE_METHOD)
#undef DEFINE_ON_EDGE_METHOD
public:
- TenuringTracer(JSRuntime* rt, Nursery* nursery);
+ TenuringTracer(JSRuntime* rt, Nursery* nursery, bool tenureEverything);
Nursery& nursery() { return nursery_; }
- // Move all objects and everything they can reach to the tenured heap. Called
- // after all roots have been traced.
+ // Promote all live objects and everything they can reach. Called after all
+ // roots have been traced.
void collectToObjectFixedPoint();
- // Move all strings and all strings they can reach to the tenured heap, and
- // additionally do any fixups for when strings are pointing into memory that
- // was deduplicated. Called after collectToObjectFixedPoint().
+ // Promote all live strings and all strings they can reach, and additionally
+ // do any fixups for when strings are pointing into memory that was
+ // deduplicated. Called after collectToObjectFixedPoint().
void collectToStringFixedPoint();
- size_t getTenuredSize() const;
- size_t getTenuredCells() const;
+ size_t getPromotedSize() const;
+ size_t getPromotedCells() const;
void traverse(JS::Value* thingp);
void traverse(wasm::AnyRef* thingp);
@@ -87,14 +96,26 @@ class TenuringTracer final : public JSTracer {
// The store buffers need to be able to call these directly.
void traceObject(JSObject* obj);
void traceObjectSlots(NativeObject* nobj, uint32_t start, uint32_t end);
- void traceSlots(JS::Value* vp, uint32_t nslots);
+ void traceObjectElements(JS::Value* vp, uint32_t count);
void traceString(JSString* str);
void traceBigInt(JS::BigInt* bi);
+ // Methods to promote a live cell or get the pointer to its new location if
+ // that has already happened. The store buffers call these.
+ JSObject* promoteOrForward(JSObject* obj);
+ JSString* promoteOrForward(JSString* str);
+ JS::BigInt* promoteOrForward(JS::BigInt* bip);
+
+ // Returns whether any cells in the arena require sweeping.
+ template <typename T>
+ bool traceBufferedCells(Arena* arena, ArenaCellSet* cells);
+
+ class AutoPromotedAnyToNursery;
+
private:
- MOZ_ALWAYS_INLINE void onNonForwardedNurseryObjectEdge(JSObject** objp);
- MOZ_ALWAYS_INLINE void onNonForwardedNurseryStringEdge(JSString** strp);
- MOZ_ALWAYS_INLINE void onNonForwardedNurseryBigIntEdge(JS::BigInt** bip);
+ MOZ_ALWAYS_INLINE JSObject* onNonForwardedNurseryObject(JSObject* obj);
+ MOZ_ALWAYS_INLINE JSString* onNonForwardedNurseryString(JSString* str);
+ MOZ_ALWAYS_INLINE JS::BigInt* onNonForwardedNurseryBigInt(JS::BigInt* bi);
// The dependent string chars needs to be relocated if the base which it's
// using chars from has been deduplicated.
@@ -109,22 +130,24 @@ class TenuringTracer final : public JSTracer {
inline void insertIntoStringFixupList(gc::StringRelocationOverlay* entry);
template <typename T>
- inline T* allocTenured(JS::Zone* zone, gc::AllocKind kind);
- JSString* allocTenuredString(JSString* src, JS::Zone* zone,
- gc::AllocKind dstKind);
-
- inline JSObject* movePlainObjectToTenured(PlainObject* src);
- JSObject* moveToTenuredSlow(JSObject* src);
- JSString* moveToTenured(JSString* src);
- JS::BigInt* moveToTenured(JS::BigInt* src);
-
- size_t moveElementsToTenured(NativeObject* dst, NativeObject* src,
- gc::AllocKind dstKind);
- size_t moveSlotsToTenured(NativeObject* dst, NativeObject* src);
- size_t moveStringToTenured(JSString* dst, JSString* src,
- gc::AllocKind dstKind);
- size_t moveBigIntToTenured(JS::BigInt* dst, JS::BigInt* src,
- gc::AllocKind dstKind);
+ T* alloc(JS::Zone* zone, gc::AllocKind kind, gc::Cell* src);
+ template <JS::TraceKind traceKind>
+ void* allocCell(JS::Zone* zone, gc::AllocKind allocKind, gc::AllocSite* site,
+ gc::Cell* src);
+ JSString* allocString(JSString* src, JS::Zone* zone, gc::AllocKind dstKind);
+
+ bool shouldTenure(Zone* zone, JS::TraceKind traceKind, Cell* cell);
+
+ inline JSObject* promotePlainObject(PlainObject* src);
+ JSObject* promoteObjectSlow(JSObject* src);
+ JSString* promoteString(JSString* src);
+ JS::BigInt* promoteBigInt(JS::BigInt* src);
+
+ size_t moveElements(NativeObject* dst, NativeObject* src,
+ gc::AllocKind dstKind);
+ size_t moveSlots(NativeObject* dst, NativeObject* src);
+ size_t moveString(JSString* dst, JSString* src, gc::AllocKind dstKind);
+ size_t moveBigInt(JS::BigInt* dst, JS::BigInt* src, gc::AllocKind dstKind);
void traceSlots(JS::Value* vp, JS::Value* end);
};
diff --git a/js/src/gc/TraceMethods-inl.h b/js/src/gc/TraceMethods-inl.h
index 37da24fe6e..e7066c5ebc 100644
--- a/js/src/gc/TraceMethods-inl.h
+++ b/js/src/gc/TraceMethods-inl.h
@@ -30,6 +30,9 @@
#include "vm/SymbolType.h"
#include "wasm/WasmJS.h"
+#include "gc/Marking-inl.h"
+#include "vm/StringType-inl.h"
+
inline void js::BaseScript::traceChildren(JSTracer* trc) {
TraceNullableEdge(trc, &function_, "function");
TraceEdge(trc, &sourceObject_, "sourceObject");
@@ -76,6 +79,20 @@ inline void JSString::traceChildren(JSTracer* trc) {
asRope().traceChildren(trc);
}
}
+inline void JSString::traceBaseFromStoreBuffer(JSTracer* trc) {
+ MOZ_ASSERT(!d.s.u3.base->isTenured());
+
+ // Contract the base chain so that this tenured dependent string points
+ // directly at the root base that owns its chars.
+ JSLinearString* root = asDependent().rootBaseDuringMinorGC();
+ d.s.u3.base = root;
+ if (!root->isTenured()) {
+ js::TraceManuallyBarrieredEdge(trc, &root, "base");
+ // Do not update the actual base to the tenured string yet. This string will
+ // need to be swept in order to update its chars ptr to be relative to the
+ // root, and that update requires information from the overlay.
+ }
+}
template <uint32_t opts>
void js::GCMarker::eagerlyMarkChildren(JSString* str) {
if (str->isLinear()) {
diff --git a/js/src/gc/WeakMap-inl.h b/js/src/gc/WeakMap-inl.h
index 015e5071ed..d7b5feb5a6 100644
--- a/js/src/gc/WeakMap-inl.h
+++ b/js/src/gc/WeakMap-inl.h
@@ -18,6 +18,7 @@
#include "gc/GCLock.h"
#include "gc/Marking.h"
#include "gc/Zone.h"
+#include "js/Prefs.h"
#include "js/TraceKind.h"
#include "vm/JSContext.h"
@@ -71,6 +72,16 @@ static inline JSObject* GetDelegate(const T& key) {
template <>
inline JSObject* GetDelegate(gc::Cell* const&) = delete;
+template <typename T>
+static inline bool IsSymbol(const T& key) {
+ return false;
+}
+
+template <>
+inline bool IsSymbol(const HeapPtr<JS::Value>& key) {
+ return key.isSymbol();
+}
+
} // namespace gc::detail
// Weakmap entry -> value edges are only visible if the map is traced, which
@@ -303,25 +314,42 @@ bool WeakMap<K, V>::findSweepGroupEdges() {
for (Range r = all(); !r.empty(); r.popFront()) {
const K& key = r.front().key();
- // If the key type doesn't have delegates, then this will always return
- // nullptr and the optimizer can remove the entire body of this function.
JSObject* delegate = gc::detail::GetDelegate(key);
- if (!delegate) {
+ if (delegate) {
+ // Marking a WeakMap key's delegate will mark the key, so process the
+ // delegate zone no later than the key zone.
+ Zone* delegateZone = delegate->zone();
+ gc::Cell* keyCell = gc::ToMarkable(key);
+ MOZ_ASSERT(keyCell);
+ Zone* keyZone = keyCell->zone();
+ if (delegateZone != keyZone && delegateZone->isGCMarking() &&
+ keyZone->isGCMarking()) {
+ if (!delegateZone->addSweepGroupEdgeTo(keyZone)) {
+ return false;
+ }
+ }
+ }
+
+#ifdef NIGHTLY_BUILD
+ bool symbolsAsWeakMapKeysEnabled =
+ JS::Prefs::experimental_symbols_as_weakmap_keys();
+ if (!symbolsAsWeakMapKeysEnabled) {
continue;
}
- // Marking a WeakMap key's delegate will mark the key, so process the
- // delegate zone no later than the key zone.
- Zone* delegateZone = delegate->zone();
- gc::Cell* keyCell = gc::ToMarkable(key);
- MOZ_ASSERT(keyCell);
- Zone* keyZone = keyCell->zone();
- if (delegateZone != keyZone && delegateZone->isGCMarking() &&
- keyZone->isGCMarking()) {
- if (!delegateZone->addSweepGroupEdgeTo(keyZone)) {
- return false;
+ bool isSym = gc::detail::IsSymbol(key);
+ if (isSym) {
+ gc::Cell* keyCell = gc::ToMarkable(key);
+ Zone* keyZone = keyCell->zone();
+ MOZ_ASSERT(keyZone->isAtomsZone());
+
+ if (zone()->isGCMarking() && keyZone->isGCMarking()) {
+ if (!keyZone->addSweepGroupEdgeTo(zone())) {
+ return false;
+ }
}
}
+#endif
}
return true;
}
diff --git a/js/src/gc/Zone.cpp b/js/src/gc/Zone.cpp
index d0586d5d56..6437f6f4c3 100644
--- a/js/src/gc/Zone.cpp
+++ b/js/src/gc/Zone.cpp
@@ -622,7 +622,7 @@ void Zone::fixupAfterMovingGC() {
}
void Zone::purgeAtomCache() {
- atomCache().clearAndCompact();
+ atomCache_.ref().reset();
// Also purge the dtoa caches so that subsequent lookups populate atom
// cache too.
@@ -981,20 +981,3 @@ bool Zone::registerObjectWithWeakPointers(JSObject* obj) {
MOZ_ASSERT(!IsInsideNursery(obj));
return objectsWithWeakPointers.ref().append(obj);
}
-
-js::DependentScriptSet* Zone::getOrCreateDependentScriptSet(
- JSContext* cx, js::InvalidatingFuse* fuse) {
- for (auto& dss : fuseDependencies) {
- if (dss.associatedFuse == fuse) {
- return &dss;
- }
- }
-
- if (!fuseDependencies.emplaceBack(cx, fuse)) {
- return nullptr;
- }
-
- auto& dss = fuseDependencies.back();
- MOZ_ASSERT(dss.associatedFuse == fuse);
- return &dss;
-}
diff --git a/js/src/gc/Zone.h b/js/src/gc/Zone.h
index fd91de8626..a5ce161cc4 100644
--- a/js/src/gc/Zone.h
+++ b/js/src/gc/Zone.h
@@ -15,6 +15,8 @@
#include "mozilla/PodOperations.h"
#include "mozilla/TimeStamp.h"
+#include <array>
+
#include "jstypes.h"
#include "ds/Bitmap.h"
@@ -139,6 +141,148 @@ class MOZ_NON_TEMPORARY_CLASS FunctionToStringCache {
MOZ_ALWAYS_INLINE void put(BaseScript* script, JSString* string);
};
+// HashAndLength is a simple class encapsulating the combination of a HashNumber
+// and a (string) length into a single 64-bit value. Having them bundled
+// together like this enables us to compare pairs of hashes and lengths with a
+// single 64-bit comparison.
+class HashAndLength {
+ public:
+ MOZ_ALWAYS_INLINE explicit HashAndLength(uint64_t initialValue = unsetValue())
+ : mHashAndLength(initialValue) {}
+ MOZ_ALWAYS_INLINE HashAndLength(HashNumber hash, uint32_t length)
+ : mHashAndLength(uint64FromHashAndLength(hash, length)) {}
+
+ void MOZ_ALWAYS_INLINE set(HashNumber hash, uint32_t length) {
+ mHashAndLength = uint64FromHashAndLength(hash, length);
+ }
+
+ constexpr MOZ_ALWAYS_INLINE HashNumber hash() const {
+ return hashFromUint64(mHashAndLength);
+ }
+ constexpr MOZ_ALWAYS_INLINE uint32_t length() const {
+ return lengthFromUint64(mHashAndLength);
+ }
+
+ constexpr MOZ_ALWAYS_INLINE bool isEqual(HashNumber hash,
+ uint32_t length) const {
+ return mHashAndLength == uint64FromHashAndLength(hash, length);
+ }
+
+ // This function is used at compile-time to verify and that we pack and unpack
+ // hash and length values consistently.
+ static constexpr bool staticChecks() {
+ std::array<HashNumber, 5> hashes{0x00000000, 0xffffffff, 0xf0f0f0f0,
+ 0x0f0f0f0f, 0x73737373};
+ std::array<uint32_t, 6> lengths{0, 1, 2, 3, 11, 56};
+
+ for (const HashNumber hash : hashes) {
+ for (const uint32_t length : lengths) {
+ const uint64_t lengthAndHash = uint64FromHashAndLength(hash, length);
+ if (hashFromUint64(lengthAndHash) != hash) {
+ return false;
+ }
+ if (lengthFromUint64(lengthAndHash) != length) {
+ return false;
+ }
+ }
+ }
+
+ return true;
+ }
+
+ static constexpr MOZ_ALWAYS_INLINE uint64_t unsetValue() {
+ // This needs to be a combination of hash and length that would never occur
+ // together. There is only one string of length zero, and its hash is zero,
+ // so the hash here can be anything except zero.
+ return uint64FromHashAndLength(0xffffffff, 0);
+ }
+
+ private:
+ uint64_t mHashAndLength;
+
+ static constexpr MOZ_ALWAYS_INLINE uint64_t
+ uint64FromHashAndLength(HashNumber hash, uint32_t length) {
+ return (static_cast<uint64_t>(length) << 32) | hash;
+ }
+
+ static constexpr MOZ_ALWAYS_INLINE uint32_t
+ lengthFromUint64(uint64_t hashAndLength) {
+ return static_cast<uint32_t>(hashAndLength >> 32);
+ }
+
+ static constexpr MOZ_ALWAYS_INLINE HashNumber
+ hashFromUint64(uint64_t hashAndLength) {
+ return hashAndLength & 0xffffffff;
+ }
+};
+
+static_assert(HashAndLength::staticChecks());
+
+class AtomCacheHashTable {
+ public:
+ MOZ_ALWAYS_INLINE AtomCacheHashTable() { clear(); }
+
+ MOZ_ALWAYS_INLINE void clear() {
+ mEntries.fill({HashAndLength{HashAndLength::unsetValue()}, nullptr});
+ }
+
+ static MOZ_ALWAYS_INLINE constexpr uint32_t computeIndexFromHash(
+ const HashNumber hash) {
+ // Simply use the low bits of the hash value as the cache index.
+ return hash & (sSize - 1);
+ }
+
+ MOZ_ALWAYS_INLINE JSAtom* lookupForAdd(
+ const AtomHasher::Lookup& lookup) const {
+ MOZ_ASSERT(lookup.atom == nullptr, "Lookup by atom is not supported");
+
+ const uint32_t index = computeIndexFromHash(lookup.hash);
+
+ JSAtom* const atom = mEntries[index].mAtom;
+
+ if (!mEntries[index].mHashAndLength.isEqual(lookup.hash, lookup.length)) {
+ return nullptr;
+ }
+
+ // This is annotated with MOZ_UNLIKELY because it virtually never happens
+ // that, after matching the hash and the length, the string isn't a match.
+ if (MOZ_UNLIKELY(!lookup.StringsMatch(*atom))) {
+ return nullptr;
+ }
+
+ return atom;
+ }
+
+ MOZ_ALWAYS_INLINE void add(const HashNumber hash, JSAtom* atom) {
+ const uint32_t index = computeIndexFromHash(hash);
+
+ mEntries[index].set(hash, atom->length(), atom);
+ }
+
+ private:
+ struct Entry {
+ MOZ_ALWAYS_INLINE void set(const HashNumber hash, const uint32_t length,
+ JSAtom* const atom) {
+ mHashAndLength.set(hash, length);
+ mAtom = atom;
+ }
+
+ // Hash and length are also available, from JSAtom and JSString
+ // respectively, but are cached here to avoid likely cache misses in the
+ // frequent case of a missed lookup.
+ HashAndLength mHashAndLength;
+ // No read barrier is required here because the table is cleared at the
+ // start of GC.
+ JSAtom* mAtom;
+ };
+
+ // This value was picked empirically based on performance testing using SP2
+ // and SP3. 4k was better than 2k but 8k was not much better than 4k.
+ static constexpr uint32_t sSize = 4 * 1024;
+ static_assert(mozilla::IsPowerOfTwo(sSize));
+ std::array<Entry, sSize> mEntries;
+};
+
} // namespace js
namespace JS {
@@ -281,7 +425,7 @@ class Zone : public js::ZoneAllocator, public js::gc::GraphNodeBase<JS::Zone> {
js::MainThreadOrGCTaskData<js::SparseBitmap> markedAtoms_;
// Set of atoms recently used by this Zone. Purged on GC.
- js::MainThreadOrGCTaskData<js::AtomSet> atomCache_;
+ js::MainThreadOrGCTaskData<js::UniquePtr<js::AtomCacheHashTable>> atomCache_;
// Cache storing allocated external strings. Purged on GC.
js::MainThreadOrGCTaskData<js::ExternalStringCache> externalStringCache_;
@@ -613,7 +757,16 @@ class Zone : public js::ZoneAllocator, public js::gc::GraphNodeBase<JS::Zone> {
js::SparseBitmap& markedAtoms() { return markedAtoms_.ref(); }
- js::AtomSet& atomCache() { return atomCache_.ref(); }
+ // The atom cache is "allocate-on-demand". This function can return nullptr if
+ // the allocation failed.
+ js::AtomCacheHashTable* atomCache() {
+ if (atomCache_.ref()) {
+ return atomCache_.ref().get();
+ }
+
+ atomCache_ = js::MakeUnique<js::AtomCacheHashTable>();
+ return atomCache_.ref().get();
+ }
void purgeAtomCache();
@@ -677,18 +830,7 @@ class Zone : public js::ZoneAllocator, public js::gc::GraphNodeBase<JS::Zone> {
#endif
// Support for invalidating fuses
-
- // A dependent script set pairs a fuse with a set of scripts which depend
- // on said fuse; this is a vector of script sets because the expectation for
- // now is that the number of runtime wide invalidating fuses will be small.
- // This will need to be revisited (convert to HashMap?) should that no
- // longer be the case
- //
- // Note: This isn't traced through the zone, but rather through the use
- // of JS::WeakCache.
- js::Vector<js::DependentScriptSet, 1, js::SystemAllocPolicy> fuseDependencies;
- js::DependentScriptSet* getOrCreateDependentScriptSet(
- JSContext* cx, js::InvalidatingFuse* fuse);
+ js::DependentScriptGroup fuseDependencies;
private:
js::jit::JitZone* createJitZone(JSContext* cx);