diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-15 03:34:50 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-15 03:34:50 +0000 |
commit | def92d1b8e9d373e2f6f27c366d578d97d8960c6 (patch) | |
tree | 2ef34b9ad8bb9a9220e05d60352558b15f513894 /js/src/gc/Nursery.cpp | |
parent | Adding debian version 125.0.3-1. (diff) | |
download | firefox-def92d1b8e9d373e2f6f27c366d578d97d8960c6.tar.xz firefox-def92d1b8e9d373e2f6f27c366d578d97d8960c6.zip |
Merging upstream version 126.0.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'js/src/gc/Nursery.cpp')
-rw-r--r-- | js/src/gc/Nursery.cpp | 876 |
1 files changed, 615 insertions, 261 deletions
diff --git a/js/src/gc/Nursery.cpp b/js/src/gc/Nursery.cpp index 660daa8d4c..4753848c56 100644 --- a/js/src/gc/Nursery.cpp +++ b/js/src/gc/Nursery.cpp @@ -38,6 +38,7 @@ #include "gc/Heap-inl.h" #include "gc/Marking-inl.h" #include "gc/StableCellHasher-inl.h" +#include "gc/StoreBuffer-inl.h" #include "vm/GeckoProfiler-inl.h" using namespace js; @@ -50,15 +51,22 @@ using mozilla::TimeStamp; namespace js { +static constexpr size_t NurseryChunkHeaderSize = + RoundUp(sizeof(ChunkBase), CellAlignBytes); + +// The amount of space in a nursery chunk available to allocations. +static constexpr size_t NurseryChunkUsableSize = + ChunkSize - NurseryChunkHeaderSize; + struct NurseryChunk : public ChunkBase { - char data[Nursery::NurseryChunkUsableSize]; + alignas(CellAlignBytes) uint8_t data[NurseryChunkUsableSize]; - static NurseryChunk* fromChunk(gc::TenuredChunk* chunk); + static NurseryChunk* fromChunk(TenuredChunk* chunk, ChunkKind kind, + uint8_t index); - explicit NurseryChunk(JSRuntime* runtime) - : ChunkBase(runtime, &runtime->gc.storeBuffer()) {} + explicit NurseryChunk(JSRuntime* runtime, ChunkKind kind, uint8_t chunkIndex) + : ChunkBase(runtime, &runtime->gc.storeBuffer(), kind, chunkIndex) {} - void poisonAndInit(JSRuntime* rt, size_t size = ChunkSize); void poisonRange(size_t start, size_t end, uint8_t value, MemCheckKind checkKind); void poisonAfterEvict(size_t extent = ChunkSize); @@ -75,22 +83,29 @@ struct NurseryChunk : public ChunkBase { uintptr_t start() const { return uintptr_t(&data); } uintptr_t end() const { return uintptr_t(this) + ChunkSize; } }; -static_assert(sizeof(js::NurseryChunk) == gc::ChunkSize, - "Nursery chunk size must match gc::Chunk size."); +static_assert(sizeof(NurseryChunk) == ChunkSize, + "Nursery chunk size must match Chunk size."); +static_assert(offsetof(NurseryChunk, data) == NurseryChunkHeaderSize); class NurseryDecommitTask : public GCParallelTask { public: explicit NurseryDecommitTask(gc::GCRuntime* gc); - bool reserveSpaceForBytes(size_t nbytes); + bool reserveSpaceForChunks(size_t nchunks); bool isEmpty(const AutoLockHelperThreadState& lock) const; void queueChunk(NurseryChunk* chunk, const AutoLockHelperThreadState& lock); - void queueRange(size_t newCapacity, NurseryChunk& chunk, + void queueRange(size_t newCapacity, NurseryChunk* chunk, const AutoLockHelperThreadState& lock); private: + struct Region { + NurseryChunk* chunk; + size_t startOffset; + }; + using NurseryChunkVector = Vector<NurseryChunk*, 0, SystemAllocPolicy>; + using RegionVector = Vector<Region, 2, SystemAllocPolicy>; void run(AutoLockHelperThreadState& lock) override; @@ -98,25 +113,21 @@ class NurseryDecommitTask : public GCParallelTask { const NurseryChunkVector& chunksToDecommit() const { return chunksToDecommit_.ref(); } + RegionVector& regionsToDecommit() { return regionsToDecommit_.ref(); } + const RegionVector& regionsToDecommit() const { + return regionsToDecommit_.ref(); + } MainThreadOrGCTaskData<NurseryChunkVector> chunksToDecommit_; - - MainThreadOrGCTaskData<NurseryChunk*> partialChunk; - MainThreadOrGCTaskData<size_t> partialCapacity; + MainThreadOrGCTaskData<RegionVector> regionsToDecommit_; }; } // namespace js -inline void js::NurseryChunk::poisonAndInit(JSRuntime* rt, size_t size) { - MOZ_ASSERT(size >= sizeof(ChunkBase)); - MOZ_ASSERT(size <= ChunkSize); - poisonRange(0, size, JS_FRESH_NURSERY_PATTERN, MemCheckKind::MakeUndefined); - new (this) NurseryChunk(rt); -} - inline void js::NurseryChunk::poisonRange(size_t start, size_t end, uint8_t value, MemCheckKind checkKind) { + MOZ_ASSERT(start >= NurseryChunkHeaderSize); MOZ_ASSERT((start % gc::CellAlignBytes) == 0); MOZ_ASSERT((end % gc::CellAlignBytes) == 0); MOZ_ASSERT(end >= start); @@ -132,12 +143,12 @@ inline void js::NurseryChunk::poisonRange(size_t start, size_t end, } inline void js::NurseryChunk::poisonAfterEvict(size_t extent) { - poisonRange(sizeof(ChunkBase), extent, JS_SWEPT_NURSERY_PATTERN, + poisonRange(NurseryChunkHeaderSize, extent, JS_SWEPT_NURSERY_PATTERN, MemCheckKind::MakeNoAccess); } inline void js::NurseryChunk::markPagesUnusedHard(size_t startOffset) { - MOZ_ASSERT(startOffset >= sizeof(ChunkBase)); // Don't touch the header. + MOZ_ASSERT(startOffset >= NurseryChunkHeaderSize); // Don't touch the header. MOZ_ASSERT(startOffset >= SystemPageSize()); MOZ_ASSERT(startOffset <= ChunkSize); uintptr_t start = uintptr_t(this) + startOffset; @@ -146,7 +157,7 @@ inline void js::NurseryChunk::markPagesUnusedHard(size_t startOffset) { } inline bool js::NurseryChunk::markPagesInUseHard(size_t endOffset) { - MOZ_ASSERT(endOffset >= sizeof(ChunkBase)); + MOZ_ASSERT(endOffset >= NurseryChunkHeaderSize); MOZ_ASSERT(endOffset >= SystemPageSize()); MOZ_ASSERT(endOffset <= ChunkSize); uintptr_t start = uintptr_t(this) + SystemPageSize(); @@ -155,23 +166,25 @@ inline bool js::NurseryChunk::markPagesInUseHard(size_t endOffset) { } // static -inline js::NurseryChunk* js::NurseryChunk::fromChunk(TenuredChunk* chunk) { - return reinterpret_cast<NurseryChunk*>(chunk); +inline js::NurseryChunk* js::NurseryChunk::fromChunk(TenuredChunk* chunk, + ChunkKind kind, + uint8_t index) { + return new (chunk) NurseryChunk(chunk->runtime, kind, index); } js::NurseryDecommitTask::NurseryDecommitTask(gc::GCRuntime* gc) : GCParallelTask(gc, gcstats::PhaseKind::NONE) { // This can occur outside GCs so doesn't have a stats phase. + MOZ_ALWAYS_TRUE(regionsToDecommit().reserve(2)); } bool js::NurseryDecommitTask::isEmpty( const AutoLockHelperThreadState& lock) const { - return chunksToDecommit().empty() && !partialChunk; + return chunksToDecommit().empty() && regionsToDecommit().empty(); } -bool js::NurseryDecommitTask::reserveSpaceForBytes(size_t nbytes) { +bool js::NurseryDecommitTask::reserveSpaceForChunks(size_t nchunks) { MOZ_ASSERT(isIdle()); - size_t nchunks = HowMany(nbytes, ChunkSize); return chunksToDecommit().reserve(nchunks); } @@ -182,15 +195,14 @@ void js::NurseryDecommitTask::queueChunk( } void js::NurseryDecommitTask::queueRange( - size_t newCapacity, NurseryChunk& newChunk, + size_t newCapacity, NurseryChunk* chunk, const AutoLockHelperThreadState& lock) { MOZ_ASSERT(isIdle(lock)); - MOZ_ASSERT(!partialChunk); + MOZ_ASSERT(regionsToDecommit_.ref().length() < 2); MOZ_ASSERT(newCapacity < ChunkSize); MOZ_ASSERT(newCapacity % SystemPageSize() == 0); - partialChunk = &newChunk; - partialCapacity = newCapacity; + regionsToDecommit().infallibleAppend(Region{chunk, newCapacity}); } void js::NurseryDecommitTask::run(AutoLockHelperThreadState& lock) { @@ -204,25 +216,20 @@ void js::NurseryDecommitTask::run(AutoLockHelperThreadState& lock) { gc->recycleChunk(tenuredChunk, lock); } - if (partialChunk) { - { - AutoUnlockHelperThreadState unlock(lock); - partialChunk->markPagesUnusedHard(partialCapacity); - } - partialChunk = nullptr; - partialCapacity = 0; + while (!regionsToDecommit().empty()) { + Region region = regionsToDecommit().popCopy(); + AutoUnlockHelperThreadState unlock(lock); + region.chunk->markPagesUnusedHard(region.startOffset); } } js::Nursery::Nursery(GCRuntime* gc) - : position_(0), - currentEnd_(0), + : toSpace(ChunkKind::NurseryToSpace), + fromSpace(ChunkKind::NurseryFromSpace), gc(gc), - currentChunk_(0), - startChunk_(0), - startPosition_(0), capacity_(0), enableProfiling_(false), + semispaceEnabled_(gc::TuningDefaults::SemispaceNurseryEnabled), canAllocateStrings_(true), canAllocateBigInts_(true), reportDeduplications_(false), @@ -232,6 +239,11 @@ js::Nursery::Nursery(GCRuntime* gc) prevPosition_(0), hasRecentGrowthData(false), smoothedTargetSize(0.0) { + // Try to keep fields used by allocation fast path together at the start of + // the nursery. + static_assert(offsetof(Nursery, toSpace.position_) < TypicalCacheLineSize); + static_assert(offsetof(Nursery, toSpace.currentEnd_) < TypicalCacheLineSize); + const char* env = getenv("MOZ_NURSERY_STRINGS"); if (env && *env) { canAllocateStrings_ = (*env == '1'); @@ -316,12 +328,13 @@ bool js::Nursery::init(AutoLockGCBgAlloc& lock) { js::Nursery::~Nursery() { disable(); } void js::Nursery::enable() { - MOZ_ASSERT(isEmpty()); - MOZ_ASSERT(!gc->isVerifyPreBarriersEnabled()); if (isEnabled()) { return; } + MOZ_ASSERT(isEmpty()); + MOZ_ASSERT(!gc->isVerifyPreBarriersEnabled()); + { AutoLockGCBgAlloc lock(gc); if (!initFirstChunk(lock)) { @@ -344,25 +357,60 @@ void js::Nursery::enable() { bool js::Nursery::initFirstChunk(AutoLockGCBgAlloc& lock) { MOZ_ASSERT(!isEnabled()); + MOZ_ASSERT(toSpace.chunks_.length() == 0); + MOZ_ASSERT(fromSpace.chunks_.length() == 0); - capacity_ = tunables().gcMinNurseryBytes(); + setCapacity(minSpaceSize()); - if (!decommitTask->reserveSpaceForBytes(capacity_) || - !allocateNextChunk(0, lock)) { - capacity_ = 0; + size_t nchunks = toSpace.maxChunkCount_ + fromSpace.maxChunkCount_; + if (!decommitTask->reserveSpaceForChunks(nchunks) || + !allocateNextChunk(lock)) { + setCapacity(0); + MOZ_ASSERT(toSpace.isEmpty()); + MOZ_ASSERT(fromSpace.isEmpty()); return false; } - moveToStartOfChunk(0); - setStartToCurrentPosition(); + toSpace.moveToStartOfChunk(this, 0); + toSpace.setStartToCurrentPosition(); + + if (semispaceEnabled_) { + fromSpace.moveToStartOfChunk(this, 0); + fromSpace.setStartToCurrentPosition(); + } + + MOZ_ASSERT(toSpace.isEmpty()); + MOZ_ASSERT(fromSpace.isEmpty()); + poisonAndInitCurrentChunk(); // Clear any information about previous collections. clearRecentGrowthData(); + tenureThreshold_ = 0; + +#ifdef DEBUG + toSpace.checkKind(ChunkKind::NurseryToSpace); + fromSpace.checkKind(ChunkKind::NurseryFromSpace); +#endif + return true; } +size_t RequiredChunkCount(size_t nbytes) { + return nbytes <= ChunkSize ? 1 : nbytes / ChunkSize; +} + +void js::Nursery::setCapacity(size_t newCapacity) { + MOZ_ASSERT(newCapacity == roundSize(newCapacity)); + capacity_ = newCapacity; + size_t count = RequiredChunkCount(newCapacity); + toSpace.maxChunkCount_ = count; + if (semispaceEnabled_) { + fromSpace.maxChunkCount_ = count; + } +} + void js::Nursery::disable() { MOZ_ASSERT(isEmpty()); if (!isEnabled()) { @@ -371,15 +419,19 @@ void js::Nursery::disable() { // Free all chunks. decommitTask->join(); - freeChunksFrom(0); + freeChunksFrom(toSpace, 0); + freeChunksFrom(fromSpace, 0); decommitTask->runFromMainThread(); - capacity_ = 0; + setCapacity(0); // We must reset currentEnd_ so that there is no space for anything in the // nursery. JIT'd code uses this even if the nursery is disabled. - currentEnd_ = 0; - position_ = 0; + toSpace = Space(ChunkKind::NurseryToSpace); + fromSpace = Space(ChunkKind::NurseryFromSpace); + MOZ_ASSERT(toSpace.isEmpty()); + MOZ_ASSERT(fromSpace.isEmpty()); + gc->storeBuffer().disable(); if (gc->wasInitialized()) { @@ -464,16 +516,59 @@ void js::Nursery::discardCodeAndSetJitFlagsForZone(JS::Zone* zone) { } } +void js::Nursery::setSemispaceEnabled(bool enabled) { + if (semispaceEnabled() == enabled) { + return; + } + + bool wasEnabled = isEnabled(); + if (wasEnabled) { + if (!isEmpty()) { + gc->minorGC(JS::GCReason::EVICT_NURSERY); + } + disable(); + } + + semispaceEnabled_ = enabled; + + if (wasEnabled) { + enable(); + } +} + bool js::Nursery::isEmpty() const { + MOZ_ASSERT(fromSpace.isEmpty()); + if (!isEnabled()) { return true; } if (!gc->hasZealMode(ZealMode::GenerationalGC)) { - MOZ_ASSERT(startChunk_ == 0); - MOZ_ASSERT(startPosition_ == chunk(0).start()); + MOZ_ASSERT(startChunk() == 0); + MOZ_ASSERT(startPosition() == chunk(0).start()); } - return position() == startPosition_; + + return toSpace.isEmpty(); +} + +bool js::Nursery::Space::isEmpty() const { return position_ == startPosition_; } + +static size_t AdjustSizeForSemispace(size_t size, bool semispaceEnabled) { + if (!semispaceEnabled) { + return size; + } + + return Nursery::roundSize(size / 2); +} + +size_t js::Nursery::maxSpaceSize() const { + return AdjustSizeForSemispace(tunables().gcMaxNurseryBytes(), + semispaceEnabled_); +} + +size_t js::Nursery::minSpaceSize() const { + return AdjustSizeForSemispace(tunables().gcMinNurseryBytes(), + semispaceEnabled_); } #ifdef JS_GC_ZEAL @@ -501,9 +596,10 @@ void js::Nursery::enterZealMode() { MemCheckKind::MakeUndefined); } - capacity_ = RoundUp(tunables().gcMaxNurseryBytes(), ChunkSize); + setCapacity(maxSpaceSize()); - if (!decommitTask->reserveSpaceForBytes(capacity_)) { + size_t nchunks = toSpace.maxChunkCount_ + fromSpace.maxChunkCount_; + if (!decommitTask->reserveSpaceForChunks(nchunks)) { oomUnsafe.crash("Nursery::enterZealMode"); } @@ -517,8 +613,14 @@ void js::Nursery::leaveZealMode() { MOZ_ASSERT(isEmpty()); - moveToStartOfChunk(0); - setStartToCurrentPosition(); + toSpace.moveToStartOfChunk(this, 0); + toSpace.setStartToCurrentPosition(); + + if (semispaceEnabled_) { + fromSpace.moveToStartOfChunk(this, 0); + fromSpace.setStartToCurrentPosition(); + } + poisonAndInitCurrentChunk(); } #endif // JS_GC_ZEAL @@ -573,7 +675,7 @@ MOZ_NEVER_INLINE JS::GCReason Nursery::handleAllocationFailure() { } bool Nursery::moveToNextChunk() { - unsigned chunkno = currentChunk_ + 1; + unsigned chunkno = currentChunk() + 1; MOZ_ASSERT(chunkno <= maxChunkCount()); MOZ_ASSERT(chunkno <= allocatedChunkCount()); if (chunkno == maxChunkCount()) { @@ -584,7 +686,7 @@ bool Nursery::moveToNextChunk() { TimeStamp start = TimeStamp::Now(); { AutoLockGCBgAlloc lock(gc); - if (!allocateNextChunk(chunkno, lock)) { + if (!allocateNextChunk(lock)) { return false; } } @@ -688,16 +790,16 @@ void* js::Nursery::reallocateBuffer(Zone* zone, Cell* cell, void* oldBuffer, } if (!isInside(oldBuffer)) { - MOZ_ASSERT(mallocedBufferBytes >= oldBytes); + MOZ_ASSERT(toSpace.mallocedBufferBytes >= oldBytes); void* newBuffer = zone->pod_realloc<uint8_t>((uint8_t*)oldBuffer, oldBytes, newBytes); if (newBuffer) { if (oldBuffer != newBuffer) { MOZ_ALWAYS_TRUE( - mallocedBuffers.rekeyAs(oldBuffer, newBuffer, newBuffer)); + toSpace.mallocedBuffers.rekeyAs(oldBuffer, newBuffer, newBuffer)); } - mallocedBufferBytes -= oldBytes; - mallocedBufferBytes += newBytes; + toSpace.mallocedBufferBytes -= oldBytes; + toSpace.mallocedBufferBytes += newBytes; } return newBuffer; } @@ -723,27 +825,21 @@ void js::Nursery::freeBuffer(void* buffer, size_t nbytes) { #ifdef DEBUG /* static */ -inline bool Nursery::checkForwardingPointerLocation(void* ptr, - bool expectedInside) { - if (isInside(ptr) == expectedInside) { - return true; - } - +inline bool Nursery::checkForwardingPointerInsideNursery(void* ptr) { // If a zero-capacity elements header lands right at the end of a chunk then // elements data will appear to be in the next chunk. If we have a pointer to // the very start of a chunk, check the previous chunk. - if ((uintptr_t(ptr) & ChunkMask) == 0 && - isInside(reinterpret_cast<uint8_t*>(ptr) - 1) == expectedInside) { - return true; + if ((uintptr_t(ptr) & ChunkMask) == 0) { + return isInside(reinterpret_cast<uint8_t*>(ptr) - 1); } - return false; + return isInside(ptr); } #endif void Nursery::setIndirectForwardingPointer(void* oldData, void* newData) { - MOZ_ASSERT(checkForwardingPointerLocation(oldData, true)); - MOZ_ASSERT(checkForwardingPointerLocation(newData, false)); + MOZ_ASSERT(checkForwardingPointerInsideNursery(oldData)); + // |newData| may be either in the nursery or in the malloc heap. AutoEnterOOMUnsafeRegion oomUnsafe; #ifdef DEBUG @@ -791,7 +887,7 @@ void js::Nursery::forwardBufferPointer(uintptr_t* pSlotsElems) { MOZ_ASSERT(IsWriteableAddress(buffer)); } - MOZ_ASSERT(!isInside(buffer)); + MOZ_ASSERT_IF(isInside(buffer), !inCollectedRegion(buffer)); *pSlotsElems = reinterpret_cast<uintptr_t>(buffer); } @@ -1063,7 +1159,7 @@ bool js::Nursery::wantEagerCollection() const { return false; } - if (isEmpty() && capacity() == tunables().gcMinNurseryBytes()) { + if (isEmpty() && capacity() == minSpaceSize()) { return false; } @@ -1108,7 +1204,7 @@ inline bool js::Nursery::isUnderused() const { return false; } - if (capacity() == tunables().gcMinNurseryBytes()) { + if (capacity() == minSpaceSize()) { return false; } @@ -1126,8 +1222,8 @@ void js::Nursery::collect(JS::GCOptions options, JS::GCReason reason) { MOZ_ASSERT(!rt->mainContextFromOwnThread()->suppressGC); if (minorGCRequested()) { - MOZ_ASSERT(position_ == chunk(currentChunk_).end()); - position_ = prevPosition_; + MOZ_ASSERT(position() == chunk(currentChunk()).end()); + toSpace.position_ = prevPosition_; prevPosition_ = 0; minorGCTriggerReason_ = JS::GCReason::NO_REASON; rt->mainContextFromOwnThread()->clearPendingInterrupt( @@ -1141,7 +1237,7 @@ void js::Nursery::collect(JS::GCOptions options, JS::GCReason reason) { // freed after this point. gc->storeBuffer().clear(); - MOZ_ASSERT(!pretenuringNursery.hasAllocatedSites()); + MOZ_ASSERT_IF(!semispaceEnabled_, !pretenuringNursery.hasAllocatedSites()); } if (!isEnabled()) { @@ -1162,10 +1258,11 @@ void js::Nursery::collect(JS::GCOptions options, JS::GCReason reason) { previousGC.reason = JS::GCReason::NO_REASON; previousGC.nurseryUsedBytes = usedSpace(); previousGC.nurseryCapacity = capacity(); - previousGC.nurseryCommitted = committed(); - previousGC.nurseryUsedChunkCount = currentChunk_ + 1; + previousGC.nurseryCommitted = totalCommitted(); + previousGC.nurseryUsedChunkCount = currentChunk() + 1; previousGC.tenuredBytes = 0; previousGC.tenuredCells = 0; + tenuredEverything = true; // If it isn't empty, it will call doCollection, and possibly after that // isEmpty() will become true, so use another variable to keep track of the @@ -1177,29 +1274,19 @@ void js::Nursery::collect(JS::GCOptions options, JS::GCReason reason) { // space does not represent data that can be tenured MOZ_ASSERT(result.tenuredBytes <= (previousGC.nurseryUsedBytes - - (sizeof(ChunkBase) * previousGC.nurseryUsedChunkCount))); + (NurseryChunkHeaderSize * previousGC.nurseryUsedChunkCount))); previousGC.reason = reason; previousGC.tenuredBytes = result.tenuredBytes; previousGC.tenuredCells = result.tenuredCells; - previousGC.nurseryUsedChunkCount = currentChunk_ + 1; + previousGC.nurseryUsedChunkCount = currentChunk() + 1; } // Resize the nursery. maybeResizeNursery(options, reason); - // Poison/initialise the first chunk. - if (previousGC.nurseryUsedBytes) { - // In most cases Nursery::clear() has not poisoned this chunk or marked it - // as NoAccess; so we only need to poison the region used during the last - // cycle. Also, if the heap was recently expanded we don't want to - // re-poison the new memory. In both cases we only need to poison until - // previousGC.nurseryUsedBytes. - // - // In cases where this is not true, like generational zeal mode or subchunk - // mode, poisonAndInitCurrentChunk() will ignore its parameter. It will - // also clamp the parameter. - poisonAndInitCurrentChunk(previousGC.nurseryUsedBytes); + if (!semispaceEnabled()) { + poisonAndInitCurrentChunk(); } bool validPromotionRate; @@ -1207,19 +1294,10 @@ void js::Nursery::collect(JS::GCOptions options, JS::GCReason reason) { startProfile(ProfileKey::Pretenure); size_t sitesPretenured = 0; - if (!wasEmpty) { - sitesPretenured = - doPretenuring(rt, reason, validPromotionRate, promotionRate); - } + sitesPretenured = + doPretenuring(rt, reason, validPromotionRate, promotionRate); endProfile(ProfileKey::Pretenure); - // We ignore gcMaxBytes when allocating for minor collection. However, if we - // overflowed, we disable the nursery. The next time we allocate, we'll fail - // because bytes >= gcMaxBytes. - if (gc->heapSize.bytes() >= tunables().gcMaxBytes()) { - disable(); - } - previousGC.endTime = TimeStamp::Now(); // Must happen after maybeResizeNursery. endProfile(ProfileKey::Total); @@ -1268,7 +1346,7 @@ void js::Nursery::sendTelemetry(JS::GCReason reason, TimeDuration totalTime, rt->metrics().GC_MINOR_REASON_LONG(uint32_t(reason)); } rt->metrics().GC_MINOR_US(totalTime); - rt->metrics().GC_NURSERY_BYTES_2(committed()); + rt->metrics().GC_NURSERY_BYTES_2(totalCommitted()); if (!wasEmpty) { rt->metrics().GC_PRETENURE_COUNT_2(sitesPretenured); @@ -1289,14 +1367,30 @@ void js::Nursery::printDeduplicationData(js::StringStats& prev, } } -void js::Nursery::freeTrailerBlocks(void) { +void js::Nursery::freeTrailerBlocks(JS::GCOptions options, + JS::GCReason reason) { + fromSpace.freeTrailerBlocks(mallocedBlockCache_); + + if (options == JS::GCOptions::Shrink || gc::IsOOMReason(reason)) { + mallocedBlockCache_.clear(); + return; + } + + // Discard blocks from the cache at 0.05% per megabyte of nursery capacity, + // that is, 0.8% of blocks for a 16-megabyte nursery. This allows the cache + // to gradually discard unneeded blocks in long running applications. + mallocedBlockCache_.preen(0.05 * double(capacity()) / (1024.0 * 1024.0)); +} + +void js::Nursery::Space::freeTrailerBlocks( + MallocedBlockCache& mallocedBlockCache) { // This routine frees those blocks denoted by the set // // trailersAdded_ (all of it) // - trailersRemoved_ (entries with index below trailersRemovedUsed_) // // For each block, places it back on the nursery's small-malloced-block pool - // by calling mallocedBlockCache_.free. + // by calling mallocedBlockCache.free. MOZ_ASSERT(trailersAdded_.length() == trailersRemoved_.length()); MOZ_ASSERT(trailersRemovedUsed_ <= trailersRemoved_.length()); @@ -1321,7 +1415,7 @@ void js::Nursery::freeTrailerBlocks(void) { if (!std::binary_search(trailersRemoved_.begin(), trailersRemoved_.begin() + trailersRemovedUsed_, blockPointer)) { - mallocedBlockCache_.free(block); + mallocedBlockCache.free(block); } } } else { @@ -1348,7 +1442,7 @@ void js::Nursery::freeTrailerBlocks(void) { const PointerAndUint7 blockAdded = trailersAdded_[iAdded]; const void* blockRemoved = trailersRemoved_[iRemoved]; if (blockAdded.pointer() < blockRemoved) { - mallocedBlockCache_.free(blockAdded); + mallocedBlockCache.free(blockAdded); continue; } // If this doesn't hold @@ -1362,7 +1456,7 @@ void js::Nursery::freeTrailerBlocks(void) { // added set. for (/*keep going*/; iAdded < nAdded; iAdded++) { const PointerAndUint7 block = trailersAdded_[iAdded]; - mallocedBlockCache_.free(block); + mallocedBlockCache.free(block); } } @@ -1371,17 +1465,14 @@ void js::Nursery::freeTrailerBlocks(void) { trailersRemoved_.clear(); trailersRemovedUsed_ = 0; trailerBytes_ = 0; - - // Discard blocks from the cache at 0.05% per megabyte of nursery capacity, - // that is, 0.8% of blocks for a 16-megabyte nursery. This allows the cache - // to gradually discard unneeded blocks in long running applications. - mallocedBlockCache_.preen(0.05 * double(capacity()) / (1024.0 * 1024.0)); } size_t Nursery::sizeOfTrailerBlockSets( mozilla::MallocSizeOf mallocSizeOf) const { - return trailersAdded_.sizeOfExcludingThis(mallocSizeOf) + - trailersRemoved_.sizeOfExcludingThis(mallocSizeOf); + MOZ_ASSERT(fromSpace.trailersAdded_.empty()); + MOZ_ASSERT(fromSpace.trailersRemoved_.empty()); + return toSpace.trailersAdded_.sizeOfExcludingThis(mallocSizeOf) + + toSpace.trailersRemoved_.sizeOfExcludingThis(mallocSizeOf); } js::Nursery::CollectionResult js::Nursery::doCollection(AutoGCSession& session, @@ -1393,8 +1484,19 @@ js::Nursery::CollectionResult js::Nursery::doCollection(AutoGCSession& session, AutoDisableProxyCheck disableStrictProxyChecking; mozilla::DebugOnly<AutoEnterOOMUnsafeRegion> oomUnsafeRegion; + // Swap nursery spaces. + swapSpaces(); + MOZ_ASSERT(toSpace.isEmpty()); + MOZ_ASSERT(toSpace.mallocedBuffers.empty()); + if (semispaceEnabled_) { + poisonAndInitCurrentChunk(); + } + + clearMapAndSetNurseryRanges(); + // Move objects pointed to by roots from the nursery to the major heap. - TenuringTracer mover(rt, this); + tenuredEverything = shouldTenureEverything(reason); + TenuringTracer mover(rt, this, tenuredEverything); // Trace everything considered as a root by a minor GC. traceRoots(session, mover); @@ -1433,17 +1535,14 @@ js::Nursery::CollectionResult js::Nursery::doCollection(AutoGCSession& session, // Sweep. startProfile(ProfileKey::FreeMallocedBuffers); - gc->queueBuffersForFreeAfterMinorGC(mallocedBuffers); - mallocedBufferBytes = 0; + gc->queueBuffersForFreeAfterMinorGC(fromSpace.mallocedBuffers); + fromSpace.mallocedBufferBytes = 0; endProfile(ProfileKey::FreeMallocedBuffers); // Give trailer blocks associated with non-tenured Wasm{Struct,Array}Objects // back to our `mallocedBlockCache_`. startProfile(ProfileKey::FreeTrailerBlocks); - freeTrailerBlocks(); - if (options == JS::GCOptions::Shrink || gc::IsOOMReason(reason)) { - mallocedBlockCache_.clear(); - } + freeTrailerBlocks(options, reason); endProfile(ProfileKey::FreeTrailerBlocks); startProfile(ProfileKey::ClearNursery); @@ -1466,7 +1565,28 @@ js::Nursery::CollectionResult js::Nursery::doCollection(AutoGCSession& session, #endif endProfile(ProfileKey::CheckHashTables); - return {mover.getTenuredSize(), mover.getTenuredCells()}; + if (semispaceEnabled_) { + // On the next collection, tenure everything before |tenureThreshold_|. + tenureThreshold_ = toSpace.offsetFromExclusiveAddress(position()); + } else { + // Swap nursery spaces back because we only use one. + swapSpaces(); + MOZ_ASSERT(toSpace.isEmpty()); + } + + MOZ_ASSERT(fromSpace.isEmpty()); + + if (semispaceEnabled_) { + poisonAndInitCurrentChunk(); + } + + return {mover.getPromotedSize(), mover.getPromotedCells()}; +} + +void js::Nursery::swapSpaces() { + std::swap(toSpace, fromSpace); + toSpace.setKind(ChunkKind::NurseryToSpace); + fromSpace.setKind(ChunkKind::NurseryFromSpace); } void js::Nursery::traceRoots(AutoGCSession& session, TenuringTracer& mover) { @@ -1490,14 +1610,12 @@ void js::Nursery::traceRoots(AutoGCSession& session, TenuringTracer& mover) { MOZ_ASSERT(gc->storeBuffer().isEnabled()); MOZ_ASSERT(gc->storeBuffer().isEmpty()); - // Strings in the whole cell buffer must be traced first, in order to mark - // tenured dependent strings' bases as non-deduplicatable. The rest of - // nursery collection (whole non-string cells, edges, etc.) can happen - // later. startProfile(ProfileKey::TraceWholeCells); sb.traceWholeCells(mover); endProfile(ProfileKey::TraceWholeCells); + cellsToSweep = sb.releaseCellSweepSet(); + startProfile(ProfileKey::TraceValues); sb.traceValues(mover); endProfile(ProfileKey::TraceValues); @@ -1523,8 +1641,6 @@ void js::Nursery::traceRoots(AutoGCSession& session, TenuringTracer& mover) { endProfile(ProfileKey::MarkRuntime); } - MOZ_ASSERT(gc->storeBuffer().isEmpty()); - startProfile(ProfileKey::MarkDebugger); { gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::MARK_ROOTS); @@ -1533,6 +1649,15 @@ void js::Nursery::traceRoots(AutoGCSession& session, TenuringTracer& mover) { endProfile(ProfileKey::MarkDebugger); } +bool js::Nursery::shouldTenureEverything(JS::GCReason reason) { + if (!semispaceEnabled()) { + return true; + } + + return reason == JS::GCReason::EVICT_NURSERY || + reason == JS::GCReason::DISABLE_GENERATIONAL_GC; +} + size_t js::Nursery::doPretenuring(JSRuntime* rt, JS::GCReason reason, bool validPromotionRate, double promotionRate) { @@ -1590,12 +1715,13 @@ bool js::Nursery::registerMallocedBuffer(void* buffer, size_t nbytes) { MOZ_ASSERT(buffer); MOZ_ASSERT(nbytes > 0); MOZ_ASSERT(!isInside(buffer)); - if (!mallocedBuffers.putNew(buffer)) { + + if (!toSpace.mallocedBuffers.putNew(buffer)) { return false; } - mallocedBufferBytes += nbytes; - if (MOZ_UNLIKELY(mallocedBufferBytes > capacity() * 8)) { + toSpace.mallocedBufferBytes += nbytes; + if (MOZ_UNLIKELY(toSpace.mallocedBufferBytes > capacity() * 8)) { requestMinorGC(JS::GCReason::NURSERY_MALLOC_BUFFERS); } @@ -1613,21 +1739,19 @@ bool js::Nursery::registerMallocedBuffer(void* buffer, size_t nbytes) { Nursery::WasBufferMoved js::Nursery::maybeMoveRawBufferOnPromotion( void** bufferp, gc::Cell* owner, size_t nbytes, MemoryUse use, arena_id_t arena) { - MOZ_ASSERT(!IsInsideNursery(owner)); - void* buffer = *bufferp; if (!isInside(buffer)) { - // This is a malloced buffer. Remove it from the nursery's list of buffers - // so we don't free it and add it to the memory accounting for the zone + // This is a malloced buffer. Remove it from the nursery's previous list of + // buffers so we don't free it. removeMallocedBufferDuringMinorGC(buffer); - AddCellMemory(owner, nbytes, use); + trackMallocedBufferOnPromotion(buffer, owner, nbytes, use); return BufferNotMoved; } // Copy the nursery-allocated buffer into a new malloc allocation. AutoEnterOOMUnsafeRegion oomUnsafe; - Zone* zone = owner->asTenured().zone(); + Zone* zone = owner->zone(); void* movedBuffer = zone->pod_arena_malloc<uint8_t>(arena, nbytes); if (!movedBuffer) { oomUnsafe.crash("Nursery::updateBufferOnPromotion"); @@ -1635,38 +1759,111 @@ Nursery::WasBufferMoved js::Nursery::maybeMoveRawBufferOnPromotion( memcpy(movedBuffer, buffer, nbytes); - AddCellMemory(owner, nbytes, use); + trackMallocedBufferOnPromotion(movedBuffer, owner, nbytes, use); *bufferp = movedBuffer; return BufferMoved; } +void js::Nursery::trackMallocedBufferOnPromotion(void* buffer, gc::Cell* owner, + size_t nbytes, MemoryUse use) { + if (owner->isTenured()) { + // If we tenured the owner then account for the memory. + AddCellMemory(owner, nbytes, use); + return; + } + + // Otherwise add it to the nursery's new buffer list. + AutoEnterOOMUnsafeRegion oomUnsafe; + if (!registerMallocedBuffer(buffer, nbytes)) { + oomUnsafe.crash("Nursery::trackMallocedBufferOnPromotion"); + } +} + +void js::Nursery::trackTrailerOnPromotion(void* buffer, gc::Cell* owner, + size_t nbytes, size_t overhead, + MemoryUse use) { + MOZ_ASSERT(!isInside(buffer)); + unregisterTrailer(buffer); + + if (owner->isTenured()) { + // If we tenured the owner then account for the memory. + AddCellMemory(owner, nbytes + overhead, use); + return; + } + + // Otherwise add it to the nursery's new buffer list. + PointerAndUint7 blockAndListID(buffer, + MallocedBlockCache::listIDForSize(nbytes)); + AutoEnterOOMUnsafeRegion oomUnsafe; + if (!registerTrailer(blockAndListID, nbytes)) { + oomUnsafe.crash("Nursery::trackTrailerOnPromotion"); + } +} + void Nursery::requestMinorGC(JS::GCReason reason) { + JS::HeapState heapState = runtime()->heapState(); +#ifdef DEBUG + if (heapState == JS::HeapState::Idle || + heapState == JS::HeapState::MinorCollecting) { + MOZ_ASSERT(CurrentThreadCanAccessRuntime(runtime())); + } else if (heapState == JS::HeapState::MajorCollecting) { + // The GC runs sweeping tasks that may access the storebuffer in parallel + // and these require taking the store buffer lock. + MOZ_ASSERT(CurrentThreadIsGCSweeping()); + runtime()->gc.assertCurrentThreadHasLockedStoreBuffer(); + } else { + MOZ_CRASH("Unexpected heap state"); + } +#endif + MOZ_ASSERT(reason != JS::GCReason::NO_REASON); - MOZ_ASSERT(CurrentThreadCanAccessRuntime(runtime())); MOZ_ASSERT(isEnabled()); if (minorGCRequested()) { return; } + if (heapState == JS::HeapState::MinorCollecting) { + // This can happen when we promote a lot of data to the second generation in + // a semispace collection. This can trigger a GC due to the amount of store + // buffer entries added. + return; + } + // Set position to end of chunk to block further allocation. MOZ_ASSERT(prevPosition_ == 0); - prevPosition_ = position_; - position_ = chunk(currentChunk_).end(); + prevPosition_ = position(); + toSpace.position_ = chunk(currentChunk()).end(); minorGCTriggerReason_ = reason; - runtime()->mainContextFromOwnThread()->requestInterrupt( + runtime()->mainContextFromAnyThread()->requestInterrupt( InterruptReason::MinorGC); } +size_t SemispaceSizeFactor(bool semispaceEnabled) { + return semispaceEnabled ? 2 : 1; +} + +size_t js::Nursery::totalCapacity() const { + return capacity() * SemispaceSizeFactor(semispaceEnabled_); +} + +size_t js::Nursery::totalCommitted() const { + size_t size = std::min(capacity_, allocatedChunkCount() * gc::ChunkSize); + return size * SemispaceSizeFactor(semispaceEnabled_); +} + size_t Nursery::sizeOfMallocedBuffers( mozilla::MallocSizeOf mallocSizeOf) const { + MOZ_ASSERT(fromSpace.mallocedBuffers.empty()); + size_t total = 0; - for (BufferSet::Range r = mallocedBuffers.all(); !r.empty(); r.popFront()) { + for (BufferSet::Range r = toSpace.mallocedBuffers.all(); !r.empty(); + r.popFront()) { total += mallocSizeOf(r.front()); } - total += mallocedBuffers.shallowSizeOfExcludingThis(mallocSizeOf); + total += toSpace.mallocedBuffers.shallowSizeOfExcludingThis(mallocSizeOf); return total; } @@ -1680,121 +1877,171 @@ void js::Nursery::sweep() { // Sweep unique IDs first before we sweep any tables that may be keyed based // on them. - for (Cell* cell : cellsWithUid_) { + cellsWithUid_.mutableEraseIf([](Cell*& cell) { auto* obj = static_cast<JSObject*>(cell); if (!IsForwarded(obj)) { gc::RemoveUniqueId(obj); - } else { - JSObject* dst = Forwarded(obj); - gc::TransferUniqueId(dst, obj); + return true; } - } - cellsWithUid_.clear(); + + JSObject* dst = Forwarded(obj); + gc::TransferUniqueId(dst, obj); + + if (!IsInsideNursery(dst)) { + return true; + } + + cell = dst; + return false; + }); for (ZonesIter zone(runtime(), SkipAtoms); !zone.done(); zone.next()) { zone->sweepAfterMinorGC(&trc); } sweepMapAndSetObjects(); + cellsToSweep.sweep(); + CellSweepSet empty; + std::swap(cellsToSweep, empty); runtime()->caches().sweepAfterMinorGC(&trc); } +void gc::CellSweepSet::sweep() { + if (head_) { + head_->sweepDependentStrings(); + head_ = nullptr; + } + if (storage_) { + storage_->freeAll(); + } +} + void js::Nursery::clear() { + fromSpace.clear(this); + MOZ_ASSERT(fromSpace.isEmpty()); +} + +void js::Nursery::Space::clear(Nursery* nursery) { + GCRuntime* gc = nursery->gc; + // Poison the nursery contents so touching a freed object will crash. unsigned firstClearChunk; - if (gc->hasZealMode(ZealMode::GenerationalGC)) { - // Poison all the chunks used in this cycle. The new start chunk is - // reposioned in Nursery::collect() but there's no point optimising that in - // this case. + if (gc->hasZealMode(ZealMode::GenerationalGC) || nursery->semispaceEnabled_) { + // Poison all the chunks used in this cycle. firstClearChunk = startChunk_; } else { - // In normal mode we start at the second chunk, the first one will be used + // Poison from the second chunk onwards as the first one will be used // in the next cycle and poisoned in Nusery::collect(); MOZ_ASSERT(startChunk_ == 0); firstClearChunk = 1; } for (unsigned i = firstClearChunk; i < currentChunk_; ++i) { - chunk(i).poisonAfterEvict(); + chunks_[i]->poisonAfterEvict(); } // Clear only the used part of the chunk because that's the part we touched, // but only if it's not going to be re-used immediately (>= firstClearChunk). if (currentChunk_ >= firstClearChunk) { - chunk(currentChunk_) - .poisonAfterEvict(position() - chunk(currentChunk_).start()); + size_t usedBytes = position_ - chunks_[currentChunk_]->start(); + chunks_[currentChunk_]->poisonAfterEvict(NurseryChunkHeaderSize + + usedBytes); } // Reset the start chunk & position if we're not in this zeal mode, or we're // in it and close to the end of the nursery. - MOZ_ASSERT(maxChunkCount() > 0); + MOZ_ASSERT(maxChunkCount_ > 0); if (!gc->hasZealMode(ZealMode::GenerationalGC) || - (gc->hasZealMode(ZealMode::GenerationalGC) && - currentChunk_ + 1 == maxChunkCount())) { - moveToStartOfChunk(0); + currentChunk_ + 1 == maxChunkCount_) { + moveToStartOfChunk(nursery, 0); } // Set current start position for isEmpty checks. setStartToCurrentPosition(); } -MOZ_ALWAYS_INLINE void js::Nursery::moveToStartOfChunk(unsigned chunkno) { - MOZ_ASSERT(chunkno < allocatedChunkCount()); +void js::Nursery::moveToStartOfChunk(unsigned chunkno) { + toSpace.moveToStartOfChunk(this, chunkno); +} + +void js::Nursery::Space::moveToStartOfChunk(Nursery* nursery, + unsigned chunkno) { + MOZ_ASSERT(chunkno < chunks_.length()); currentChunk_ = chunkno; - position_ = chunk(chunkno).start(); - setCurrentEnd(); + position_ = chunks_[chunkno]->start(); + setCurrentEnd(nursery); MOZ_ASSERT(position_ != 0); MOZ_ASSERT(currentEnd_ > position_); // Check this cannot wrap. } -void js::Nursery::poisonAndInitCurrentChunk(size_t extent) { - if (gc->hasZealMode(ZealMode::GenerationalGC) || !isSubChunkMode()) { - chunk(currentChunk_).poisonAndInit(runtime()); - } else { - extent = std::min(capacity_, extent); - chunk(currentChunk_).poisonAndInit(runtime(), extent); - } +void js::Nursery::poisonAndInitCurrentChunk() { + NurseryChunk& chunk = this->chunk(currentChunk()); + size_t start = position() - uintptr_t(&chunk); + size_t end = isSubChunkMode() ? capacity_ : ChunkSize; + chunk.poisonRange(start, end, JS_FRESH_NURSERY_PATTERN, + MemCheckKind::MakeUndefined); + new (&chunk) + NurseryChunk(runtime(), ChunkKind::NurseryToSpace, currentChunk()); } -MOZ_ALWAYS_INLINE void js::Nursery::setCurrentEnd() { - MOZ_ASSERT_IF(isSubChunkMode(), - currentChunk_ == 0 && currentEnd_ <= chunk(0).end()); - currentEnd_ = - uintptr_t(&chunk(currentChunk_)) + std::min(capacity_, ChunkSize); +void js::Nursery::setCurrentEnd() { toSpace.setCurrentEnd(this); } - MOZ_ASSERT_IF(!isSubChunkMode(), currentEnd_ == chunk(currentChunk_).end()); - MOZ_ASSERT(currentEnd_ != chunk(currentChunk_).start()); +void js::Nursery::Space::setCurrentEnd(Nursery* nursery) { + currentEnd_ = uintptr_t(chunks_[currentChunk_]) + + std::min(nursery->capacity(), ChunkSize); } -bool js::Nursery::allocateNextChunk(const unsigned chunkno, - AutoLockGCBgAlloc& lock) { - const unsigned priorCount = allocatedChunkCount(); +bool js::Nursery::allocateNextChunk(AutoLockGCBgAlloc& lock) { + // Allocate a new nursery chunk. If semispace collection is enabled, we have + // to allocate one for both spaces. + + const unsigned priorCount = toSpace.chunks_.length(); const unsigned newCount = priorCount + 1; - MOZ_ASSERT((chunkno == currentChunk_ + 1) || - (chunkno == 0 && allocatedChunkCount() == 0)); - MOZ_ASSERT(chunkno == allocatedChunkCount()); - MOZ_ASSERT(chunkno < HowMany(capacity(), ChunkSize)); + MOZ_ASSERT(newCount <= maxChunkCount()); + MOZ_ASSERT(fromSpace.chunks_.length() == + (semispaceEnabled_ ? priorCount : 0)); + + if (!toSpace.chunks_.reserve(newCount) || + (semispaceEnabled_ && !fromSpace.chunks_.reserve(newCount))) { + return false; + } - if (!chunks_.resize(newCount)) { + TenuredChunk* toSpaceChunk = gc->getOrAllocChunk(lock); + if (!toSpaceChunk) { return false; } - TenuredChunk* newChunk; - newChunk = gc->getOrAllocChunk(lock); - if (!newChunk) { - chunks_.shrinkTo(priorCount); + TenuredChunk* fromSpaceChunk = nullptr; + if (semispaceEnabled_ && !(fromSpaceChunk = gc->getOrAllocChunk(lock))) { + gc->recycleChunk(toSpaceChunk, lock); return false; } - chunks_[chunkno] = NurseryChunk::fromChunk(newChunk); + uint8_t index = toSpace.chunks_.length(); + NurseryChunk* nurseryChunk = + NurseryChunk::fromChunk(toSpaceChunk, ChunkKind::NurseryToSpace, index); + toSpace.chunks_.infallibleAppend(nurseryChunk); + + if (semispaceEnabled_) { + MOZ_ASSERT(index == fromSpace.chunks_.length()); + nurseryChunk = NurseryChunk::fromChunk(fromSpaceChunk, + ChunkKind::NurseryFromSpace, index); + fromSpace.chunks_.infallibleAppend(nurseryChunk); + } + return true; } -MOZ_ALWAYS_INLINE void js::Nursery::setStartToCurrentPosition() { +void js::Nursery::setStartToCurrentPosition() { + toSpace.setStartToCurrentPosition(); +} + +void js::Nursery::Space::setStartToCurrentPosition() { startChunk_ = currentChunk_; - startPosition_ = position(); + startPosition_ = position_; + MOZ_ASSERT(isEmpty()); } void js::Nursery::maybeResizeNursery(JS::GCOptions options, @@ -1809,8 +2056,7 @@ void js::Nursery::maybeResizeNursery(JS::GCOptions options, decommitTask->join(); size_t newCapacity = mozilla::Clamp(targetSize(options, reason), - tunables().gcMinNurseryBytes(), - tunables().gcMaxNurseryBytes()); + minSpaceSize(), maxSpaceSize()); MOZ_ASSERT(roundSize(newCapacity) == newCapacity); MOZ_ASSERT(newCapacity >= SystemPageSize()); @@ -1862,7 +2108,7 @@ size_t js::Nursery::targetSize(JS::GCOptions options, JS::GCReason reason) { TimeStamp now = TimeStamp::Now(); if (reason == JS::GCReason::PREPARE_FOR_PAGELOAD) { - return roundSize(tunables().gcMaxNurseryBytes()); + return roundSize(maxSpaceSize()); } // If the nursery is completely unused then minimise it. @@ -1960,38 +2206,58 @@ size_t js::Nursery::roundSize(size_t size) { } void js::Nursery::growAllocableSpace(size_t newCapacity) { - MOZ_ASSERT_IF(!isSubChunkMode(), newCapacity > currentChunk_ * ChunkSize); - MOZ_ASSERT(newCapacity <= tunables().gcMaxNurseryBytes()); + MOZ_ASSERT_IF(!isSubChunkMode(), newCapacity > currentChunk() * ChunkSize); + MOZ_ASSERT(newCapacity <= maxSpaceSize()); MOZ_ASSERT(newCapacity > capacity()); - if (!decommitTask->reserveSpaceForBytes(newCapacity)) { + size_t nchunks = + RequiredChunkCount(newCapacity) * SemispaceSizeFactor(semispaceEnabled_); + if (!decommitTask->reserveSpaceForChunks(nchunks)) { return; } if (isSubChunkMode()) { - MOZ_ASSERT(currentChunk_ == 0); - - // The remainder of the chunk may have been decommitted. - if (!chunk(0).markPagesInUseHard(std::min(newCapacity, ChunkSize))) { - // The OS won't give us the memory we need, we can't grow. + if (!toSpace.commitSubChunkRegion(capacity(), newCapacity) || + (semispaceEnabled_ && + !fromSpace.commitSubChunkRegion(capacity(), newCapacity))) { return; } + } - // The capacity has changed and since we were in sub-chunk mode we need to - // update the poison values / asan information for the now-valid region of - // this chunk. - size_t end = std::min(newCapacity, ChunkSize); - chunk(0).poisonRange(capacity(), end, JS_FRESH_NURSERY_PATTERN, - MemCheckKind::MakeUndefined); + setCapacity(newCapacity); + + toSpace.setCurrentEnd(this); + if (semispaceEnabled_) { + fromSpace.setCurrentEnd(this); } +} - capacity_ = newCapacity; +bool js::Nursery::Space::commitSubChunkRegion(size_t oldCapacity, + size_t newCapacity) { + MOZ_ASSERT(currentChunk_ == 0); + MOZ_ASSERT(oldCapacity < ChunkSize); + MOZ_ASSERT(newCapacity > oldCapacity); - setCurrentEnd(); + size_t newChunkEnd = std::min(newCapacity, ChunkSize); + + // The remainder of the chunk may have been decommitted. + if (!chunks_[0]->markPagesInUseHard(newChunkEnd)) { + // The OS won't give us the memory we need, we can't grow. + return false; + } + + // The capacity has changed and since we were in sub-chunk mode we need to + // update the poison values / asan information for the now-valid region of + // this chunk. + chunks_[0]->poisonRange(oldCapacity, newChunkEnd, JS_FRESH_NURSERY_PATTERN, + MemCheckKind::MakeUndefined); + return true; } -void js::Nursery::freeChunksFrom(const unsigned firstFreeChunk) { - MOZ_ASSERT(firstFreeChunk < chunks_.length()); +void js::Nursery::freeChunksFrom(Space& space, const unsigned firstFreeChunk) { + if (firstFreeChunk >= space.chunks_.length()) { + return; + } // The loop below may need to skip the first chunk, so we may use this so we // can modify it. @@ -2000,61 +2266,112 @@ void js::Nursery::freeChunksFrom(const unsigned firstFreeChunk) { if ((firstChunkToDecommit == 0) && isSubChunkMode()) { // Part of the first chunk may be hard-decommitted, un-decommit it so that // the GC's normal chunk-handling doesn't segfault. - MOZ_ASSERT(currentChunk_ == 0); - if (!chunk(0).markPagesInUseHard(ChunkSize)) { + MOZ_ASSERT(space.currentChunk_ == 0); + if (!space.chunks_[0]->markPagesInUseHard(ChunkSize)) { // Free the chunk if we can't allocate its pages. - UnmapPages(static_cast<void*>(&chunk(0)), ChunkSize); + UnmapPages(space.chunks_[0], ChunkSize); firstChunkToDecommit = 1; } } { AutoLockHelperThreadState lock; - for (size_t i = firstChunkToDecommit; i < chunks_.length(); i++) { - decommitTask->queueChunk(chunks_[i], lock); + for (size_t i = firstChunkToDecommit; i < space.chunks_.length(); i++) { + decommitTask->queueChunk(space.chunks_[i], lock); } } - chunks_.shrinkTo(firstFreeChunk); + space.chunks_.shrinkTo(firstFreeChunk); } void js::Nursery::shrinkAllocableSpace(size_t newCapacity) { -#ifdef JS_GC_ZEAL - if (gc->hasZealMode(ZealMode::GenerationalGC)) { - return; - } -#endif + MOZ_ASSERT(!gc->hasZealMode(ZealMode::GenerationalGC)); + MOZ_ASSERT(newCapacity < capacity_); - // Don't shrink the nursery to zero (use Nursery::disable() instead) - // This can't happen due to the rounding-down performed above because of the - // clamping in maybeResizeNursery(). - MOZ_ASSERT(newCapacity != 0); - // Don't attempt to shrink it to the same size. - if (newCapacity == capacity_) { + if (semispaceEnabled() && usedSpace() >= newCapacity) { + // Can't shrink below what we've already used. return; } - MOZ_ASSERT(newCapacity < capacity_); unsigned newCount = HowMany(newCapacity, ChunkSize); if (newCount < allocatedChunkCount()) { - freeChunksFrom(newCount); + freeChunksFrom(toSpace, newCount); + freeChunksFrom(fromSpace, newCount); } size_t oldCapacity = capacity_; - capacity_ = newCapacity; + setCapacity(newCapacity); - setCurrentEnd(); + toSpace.setCurrentEnd(this); + if (semispaceEnabled_) { + fromSpace.setCurrentEnd(this); + } if (isSubChunkMode()) { - MOZ_ASSERT(currentChunk_ == 0); - size_t end = std::min(oldCapacity, ChunkSize); - chunk(0).poisonRange(newCapacity, end, JS_SWEPT_NURSERY_PATTERN, - MemCheckKind::MakeNoAccess); + toSpace.decommitSubChunkRegion(this, oldCapacity, newCapacity); + if (semispaceEnabled_) { + fromSpace.decommitSubChunkRegion(this, oldCapacity, newCapacity); + } + } +} - AutoLockHelperThreadState lock; - decommitTask->queueRange(capacity_, chunk(0), lock); +void js::Nursery::Space::decommitSubChunkRegion(Nursery* nursery, + size_t oldCapacity, + size_t newCapacity) { + MOZ_ASSERT(currentChunk_ == 0); + MOZ_ASSERT(newCapacity < ChunkSize); + MOZ_ASSERT(newCapacity < oldCapacity); + + size_t oldChunkEnd = std::min(oldCapacity, ChunkSize); + chunks_[0]->poisonRange(newCapacity, oldChunkEnd, JS_SWEPT_NURSERY_PATTERN, + MemCheckKind::MakeNoAccess); + + AutoLockHelperThreadState lock; + nursery->decommitTask->queueRange(newCapacity, chunks_[0], lock); +} + +js::Nursery::Space::Space(gc::ChunkKind kind) : kind(kind) { + MOZ_ASSERT(kind == ChunkKind::NurseryFromSpace || + kind == ChunkKind::NurseryToSpace); +} + +void js::Nursery::Space::setKind(ChunkKind newKind) { +#ifdef DEBUG + MOZ_ASSERT(newKind == ChunkKind::NurseryFromSpace || + newKind == ChunkKind::NurseryToSpace); + checkKind(kind); +#endif + + kind = newKind; + for (NurseryChunk* chunk : chunks_) { + chunk->kind = newKind; + } + +#ifdef DEBUG + checkKind(newKind); +#endif +} + +#ifdef DEBUG +void js::Nursery::Space::checkKind(ChunkKind expected) const { + MOZ_ASSERT(kind == expected); + for (NurseryChunk* chunk : chunks_) { + MOZ_ASSERT(chunk->getKind() == expected); + } +} +#endif + +#ifdef DEBUG +size_t js::Nursery::Space::findChunkIndex(uintptr_t chunkAddr) const { + for (size_t i = 0; i < chunks_.length(); i++) { + if (uintptr_t(chunks_[i]) == chunkAddr) { + return i; + } } + + MOZ_CRASH("Nursery chunk not found"); } +#endif gcstats::Statistics& js::Nursery::stats() const { return gc->stats(); } @@ -2067,18 +2384,55 @@ bool js::Nursery::isSubChunkMode() const { return capacity() <= NurseryChunkUsableSize; } +void js::Nursery::clearMapAndSetNurseryRanges() { + // Clears the lists of nursery ranges used by map and set iterators. These + // lists are cleared at the start of minor GC and rebuilt when iterators are + // promoted during minor GC. + for (auto* map : mapsWithNurseryMemory_) { + map->clearNurseryRangesBeforeMinorGC(); + } + for (auto* set : setsWithNurseryMemory_) { + set->clearNurseryRangesBeforeMinorGC(); + } +} + void js::Nursery::sweepMapAndSetObjects() { + // This processes all Map and Set objects that are known to have associated + // nursery memory (either they are nursery allocated themselves or they have + // iterator objects that are nursery allocated). + // + // These objects may die and be finalized or if not their internal state and + // memory tracking are updated. + // + // Finally the lists themselves are rebuilt so as to remove objects that are + // no longer associated with nursery memory (either because they died or + // because the nursery object was promoted to the tenured heap). + auto* gcx = runtime()->gcContext(); - for (auto* mapobj : mapsWithNurseryMemory_) { - MapObject::sweepAfterMinorGC(gcx, mapobj); + AutoEnterOOMUnsafeRegion oomUnsafe; + + MapObjectVector maps; + std::swap(mapsWithNurseryMemory_, maps); + for (auto* mapobj : maps) { + mapobj = MapObject::sweepAfterMinorGC(gcx, mapobj); + if (mapobj) { + if (!mapsWithNurseryMemory_.append(mapobj)) { + oomUnsafe.crash("sweepAfterMinorGC"); + } + } } - mapsWithNurseryMemory_.clearAndFree(); - for (auto* setobj : setsWithNurseryMemory_) { - SetObject::sweepAfterMinorGC(gcx, setobj); + SetObjectVector sets; + std::swap(setsWithNurseryMemory_, sets); + for (auto* setobj : sets) { + setobj = SetObject::sweepAfterMinorGC(gcx, setobj); + if (setobj) { + if (!setsWithNurseryMemory_.append(setobj)) { + oomUnsafe.crash("sweepAfterMinorGC"); + } + } } - setsWithNurseryMemory_.clearAndFree(); } void js::Nursery::joinDecommitTask() { decommitTask->join(); } |