From 40a355a42d4a9444dc753c04c6608dade2f06a23 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Fri, 19 Apr 2024 03:13:27 +0200 Subject: Adding upstream version 125.0.1. Signed-off-by: Daniel Baumann --- js/src/gc/AllocKind.h | 5 ++- js/src/gc/GC.cpp | 80 +++++++++++++++++++++++++++++++++++----- js/src/gc/GC.h | 12 +++--- js/src/gc/GCAPI.cpp | 10 ++++- js/src/gc/GCEnum.h | 1 + js/src/gc/GCMarker.h | 7 ++-- js/src/gc/GCRuntime.h | 13 ++++++- js/src/gc/Marking.cpp | 32 +++++----------- js/src/gc/MaybeRooted.h | 4 +- js/src/gc/Nursery.cpp | 52 ++++++++++++-------------- js/src/gc/Nursery.h | 16 ++++---- js/src/gc/Scheduling.h | 31 +++++++++------- js/src/gc/StableCellHasher-inl.h | 2 +- js/src/gc/Statistics.cpp | 6 +-- js/src/gc/Statistics.h | 18 ++++----- js/src/gc/Sweeping.cpp | 8 ++++ js/src/gc/Tenuring.cpp | 77 ++++++++++++++++++++++++++++---------- js/src/gc/Tenuring.h | 4 ++ js/src/gc/Zone.cpp | 12 +++--- js/src/gc/Zone.h | 16 ++++---- 20 files changed, 260 insertions(+), 146 deletions(-) (limited to 'js/src/gc') diff --git a/js/src/gc/AllocKind.h b/js/src/gc/AllocKind.h index cb3d063f89..f73352e557 100644 --- a/js/src/gc/AllocKind.h +++ b/js/src/gc/AllocKind.h @@ -197,13 +197,14 @@ constexpr auto SomeAllocKinds(AllocKind first = AllocKind::FIRST, // with each index corresponding to a particular alloc kind. template using AllAllocKindArray = - mozilla::EnumeratedArray; + mozilla::EnumeratedArray; // ObjectAllocKindArray gives an enumerated array of ValueTypes, // with each index corresponding to a particular object alloc kind. template using ObjectAllocKindArray = - mozilla::EnumeratedArray; + mozilla::EnumeratedArray; /* * Map from C++ type to alloc kind for non-object types. JSObject does not have diff --git a/js/src/gc/GC.cpp b/js/src/gc/GC.cpp index 7ec63a571d..c01dfe3660 100644 --- a/js/src/gc/GC.cpp +++ b/js/src/gc/GC.cpp @@ -930,6 +930,8 @@ void GCRuntime::finish() { } #endif + releaseMarkingThreads(); + #ifdef JS_GC_ZEAL // Free memory associated with GC verification. finishVerifier(); @@ -1064,9 +1066,8 @@ bool GCRuntime::setParameter(JSGCParamKey key, uint32_t value, compactingEnabled = value != 0; break; case JSGC_PARALLEL_MARKING_ENABLED: - // Not supported on workers. - parallelMarkingEnabled = rt->isMainRuntime() && value != 0; - return initOrDisableParallelMarking(); + setParallelMarkingEnabled(value != 0); + break; case JSGC_INCREMENTAL_WEAKMAP_ENABLED: for (auto& marker : markers) { marker->incrementalWeakMapMarkingEnabled = value != 0; @@ -1151,8 +1152,7 @@ void GCRuntime::resetParameter(JSGCParamKey key, AutoLockGC& lock) { compactingEnabled = TuningDefaults::CompactingEnabled; break; case JSGC_PARALLEL_MARKING_ENABLED: - parallelMarkingEnabled = TuningDefaults::ParallelMarkingEnabled; - initOrDisableParallelMarking(); + setParallelMarkingEnabled(TuningDefaults::ParallelMarkingEnabled); break; case JSGC_INCREMENTAL_WEAKMAP_ENABLED: for (auto& marker : markers) { @@ -1350,16 +1350,56 @@ void GCRuntime::assertNoMarkingWork() const { } #endif +bool GCRuntime::setParallelMarkingEnabled(bool enabled) { + if (enabled == parallelMarkingEnabled) { + return true; + } + + parallelMarkingEnabled = enabled; + return initOrDisableParallelMarking(); +} + bool GCRuntime::initOrDisableParallelMarking() { - // Attempt to initialize parallel marking state or disable it on failure. + // Attempt to initialize parallel marking state or disable it on failure. This + // is called when parallel marking is enabled or disabled. MOZ_ASSERT(markers.length() != 0); - if (!updateMarkersVector()) { - parallelMarkingEnabled = false; + if (updateMarkersVector()) { + return true; + } + + // Failed to initialize parallel marking so disable it instead. + MOZ_ASSERT(parallelMarkingEnabled); + parallelMarkingEnabled = false; + MOZ_ALWAYS_TRUE(updateMarkersVector()); + return false; +} + +void GCRuntime::releaseMarkingThreads() { + MOZ_ALWAYS_TRUE(reserveMarkingThreads(0)); +} + +bool GCRuntime::reserveMarkingThreads(size_t newCount) { + if (reservedMarkingThreads == newCount) { + return true; + } + + // Update the helper thread system's global count by subtracting this + // runtime's current contribution |reservedMarkingThreads| and adding the new + // contribution |newCount|. + + AutoLockHelperThreadState lock; + auto& globalCount = HelperThreadState().gcParallelMarkingThreads; + MOZ_ASSERT(globalCount >= reservedMarkingThreads); + size_t newGlobalCount = globalCount - reservedMarkingThreads + newCount; + if (newGlobalCount > HelperThreadState().threadCount) { + // Not enough total threads. return false; } + globalCount = newGlobalCount; + reservedMarkingThreads = newCount; return true; } @@ -1378,6 +1418,16 @@ bool GCRuntime::updateMarkersVector() { // concurrently, otherwise one thread can deadlock waiting on another. size_t targetCount = std::min(markingWorkerCount(), getMaxParallelThreads()); + if (rt->isMainRuntime()) { + // For the main runtime, reserve helper threads as long as parallel marking + // is enabled. Worker runtimes may not mark in parallel if there are + // insufficient threads available at the time. + size_t threadsToReserve = targetCount > 1 ? targetCount : 0; + if (!reserveMarkingThreads(threadsToReserve)) { + return false; + } + } + if (markers.length() > targetCount) { return markers.resize(targetCount); } @@ -2870,7 +2920,7 @@ void GCRuntime::beginMarkPhase(AutoGCSession& session) { stats().measureInitialHeapSize(); useParallelMarking = SingleThreadedMarking; - if (canMarkInParallel() && initParallelMarkers()) { + if (canMarkInParallel() && initParallelMarking()) { useParallelMarking = AllowParallelMarking; } @@ -2989,9 +3039,19 @@ inline bool GCRuntime::canMarkInParallel() const { tunables.parallelMarkingThresholdBytes(); } -bool GCRuntime::initParallelMarkers() { +bool GCRuntime::initParallelMarking() { + // This is called at the start of collection. + MOZ_ASSERT(canMarkInParallel()); + // Reserve/release helper threads for worker runtimes. These are released at + // the end of sweeping. If there are not enough helper threads because + // other runtimes are marking in parallel then parallel marking will not be + // used. + if (!rt->isMainRuntime() && !reserveMarkingThreads(markers.length())) { + return false; + } + // Allocate stack for parallel markers. The first marker always has stack // allocated. Other markers have their stack freed in // GCRuntime::finishCollection. diff --git a/js/src/gc/GC.h b/js/src/gc/GC.h index 3b7dec3201..4e4634d804 100644 --- a/js/src/gc/GC.h +++ b/js/src/gc/GC.h @@ -69,12 +69,12 @@ class TenuredChunk; _("parallelMarkingEnabled", JSGC_PARALLEL_MARKING_ENABLED, true) \ _("parallelMarkingThresholdMB", JSGC_PARALLEL_MARKING_THRESHOLD_MB, true) \ _("minLastDitchGCPeriod", JSGC_MIN_LAST_DITCH_GC_PERIOD, true) \ - _("nurseryFreeThresholdForIdleCollection", \ - JSGC_NURSERY_FREE_THRESHOLD_FOR_IDLE_COLLECTION, true) \ - _("nurseryFreeThresholdForIdleCollectionPercent", \ - JSGC_NURSERY_FREE_THRESHOLD_FOR_IDLE_COLLECTION_PERCENT, true) \ - _("nurseryTimeoutForIdleCollectionMS", \ - JSGC_NURSERY_TIMEOUT_FOR_IDLE_COLLECTION_MS, true) \ + _("nurseryEagerCollectionThresholdKB", \ + JSGC_NURSERY_EAGER_COLLECTION_THRESHOLD_KB, true) \ + _("nurseryEagerCollectionThresholdPercent", \ + JSGC_NURSERY_EAGER_COLLECTION_THRESHOLD_PERCENT, true) \ + _("nurseryEagerCollectionTimeoutMS", \ + JSGC_NURSERY_EAGER_COLLECTION_TIMEOUT_MS, true) \ _("zoneAllocDelayKB", JSGC_ZONE_ALLOC_DELAY_KB, true) \ _("mallocThresholdBase", JSGC_MALLOC_THRESHOLD_BASE, true) \ _("urgentThreshold", JSGC_URGENT_THRESHOLD_MB, true) \ diff --git a/js/src/gc/GCAPI.cpp b/js/src/gc/GCAPI.cpp index ab6c3c297a..293bfce80d 100644 --- a/js/src/gc/GCAPI.cpp +++ b/js/src/gc/GCAPI.cpp @@ -817,11 +817,17 @@ JS_PUBLIC_API void js::gc::SetPerformanceHint(JSContext* cx, AutoSelectGCHeap::AutoSelectGCHeap(JSContext* cx, size_t allowedNurseryCollections) : cx_(cx), allowedNurseryCollections_(allowedNurseryCollections) { - JS::AddGCNurseryCollectionCallback(cx, &NurseryCollectionCallback, this); + if (!JS::AddGCNurseryCollectionCallback(cx, &NurseryCollectionCallback, + this)) { + cx_ = nullptr; + } } AutoSelectGCHeap::~AutoSelectGCHeap() { - JS::RemoveGCNurseryCollectionCallback(cx_, &NurseryCollectionCallback, this); + if (cx_) { + JS::RemoveGCNurseryCollectionCallback(cx_, &NurseryCollectionCallback, + this); + } } /* static */ diff --git a/js/src/gc/GCEnum.h b/js/src/gc/GCEnum.h index 6b1a00f4db..d60cfaea76 100644 --- a/js/src/gc/GCEnum.h +++ b/js/src/gc/GCEnum.h @@ -120,6 +120,7 @@ enum class GCAbortReason { _(PropMapTable) \ _(ModuleBindingMap) \ _(ModuleCyclicFields) \ + _(ModuleSyntheticFields) \ _(ModuleExports) \ _(BaselineScript) \ _(IonScript) \ diff --git a/js/src/gc/GCMarker.h b/js/src/gc/GCMarker.h index 2d47349794..9d34d0a0dc 100644 --- a/js/src/gc/GCMarker.h +++ b/js/src/gc/GCMarker.h @@ -156,11 +156,10 @@ class MarkStack { MarkStack(); ~MarkStack(); - explicit MarkStack(const MarkStack& other); - MarkStack& operator=(const MarkStack& other); + MarkStack(const MarkStack& other) = delete; + MarkStack& operator=(const MarkStack& other) = delete; - MarkStack(MarkStack&& other) noexcept; - MarkStack& operator=(MarkStack&& other) noexcept; + void swap(MarkStack& other); // The unit for MarkStack::capacity() is mark stack words. size_t capacity() { return stack().length(); } diff --git a/js/src/gc/GCRuntime.h b/js/src/gc/GCRuntime.h index a7198f5bbc..c9f660b4d7 100644 --- a/js/src/gc/GCRuntime.h +++ b/js/src/gc/GCRuntime.h @@ -640,6 +640,7 @@ class GCRuntime { const AutoLockHelperThreadState& lock); // Parallel marking. + bool setParallelMarkingEnabled(bool enabled); bool initOrDisableParallelMarking(); [[nodiscard]] bool updateMarkersVector(); size_t markingWorkerCount() const; @@ -799,9 +800,12 @@ class GCRuntime { ParallelMarking allowParallelMarking = SingleThreadedMarking, ShouldReportMarkTime reportTime = ReportMarkTime); bool canMarkInParallel() const; - bool initParallelMarkers(); + bool initParallelMarking(); void finishParallelMarkers(); + bool reserveMarkingThreads(size_t count); + void releaseMarkingThreads(); + bool hasMarkingWork(MarkColor color) const; void drainMarkStack(); @@ -1120,6 +1124,13 @@ class GCRuntime { /* Incremented on every GC slice. */ MainThreadData sliceNumber; + /* + * This runtime's current contribution to the global number of helper threads + * 'reserved' for parallel marking. Does not affect other uses of helper + * threads. + */ + MainThreadData reservedMarkingThreads; + /* Whether the currently running GC can finish in multiple slices. */ MainThreadOrGCTaskData isIncremental; diff --git a/js/src/gc/Marking.cpp b/js/src/gc/Marking.cpp index 78fcc3dedc..6b8742c980 100644 --- a/js/src/gc/Marking.cpp +++ b/js/src/gc/Marking.cpp @@ -1006,7 +1006,6 @@ void js::gc::PerformIncrementalPreWriteBarrier(TenuredCell* cell) { // runtime for cells in atoms zone. Zone* zone = cell->zoneFromAnyThread(); - MOZ_ASSERT(zone->needsIncrementalBarrier()); MOZ_ASSERT(cell); if (cell->isMarkedBlack()) { @@ -1023,6 +1022,7 @@ void js::gc::PerformIncrementalPreWriteBarrier(TenuredCell* cell) { return; } + MOZ_ASSERT(zone->needsIncrementalBarrier()); MOZ_ASSERT(CurrentThreadIsMainThread()); MOZ_ASSERT(!JS::RuntimeHeapIsMajorCollecting()); @@ -1809,29 +1809,15 @@ MarkStack::MarkStack() { MOZ_ASSERT(isEmpty()); } MarkStack::~MarkStack() { MOZ_ASSERT(isEmpty()); } -MarkStack::MarkStack(const MarkStack& other) { - MOZ_CRASH("Compiler requires this but doesn't call it"); -} - -MarkStack& MarkStack::operator=(const MarkStack& other) { - new (this) MarkStack(other); - return *this; -} - -MarkStack::MarkStack(MarkStack&& other) noexcept - : stack_(std::move(other.stack_.ref())), - topIndex_(other.topIndex_.ref()) +void MarkStack::swap(MarkStack& other) { + std::swap(stack_, other.stack_); + std::swap(topIndex_, other.topIndex_); #ifdef JS_GC_ZEAL - , - maxCapacity_(other.maxCapacity_) + std::swap(maxCapacity_, other.maxCapacity_); +#endif +#ifdef DEBUG + std::swap(elementsRangesAreValid, other.elementsRangesAreValid); #endif -{ - other.topIndex_ = 0; -} - -MarkStack& MarkStack::operator=(MarkStack&& other) noexcept { - new (this) MarkStack(std::move(other)); - return *this; } bool MarkStack::init() { return resetStackCapacity(); } @@ -2186,7 +2172,7 @@ void GCMarker::setMarkColor(gc::MarkColor newColor) { // Switch stacks. We only need to do this if there are any stack entries (as // empty stacks are interchangeable) or to swtich back to the original stack. if (!isDrained() || haveSwappedStacks) { - std::swap(stack, otherStack); + stack.swap(otherStack); haveSwappedStacks = !haveSwappedStacks; } } diff --git a/js/src/gc/MaybeRooted.h b/js/src/gc/MaybeRooted.h index fbeb0c553c..6b38172472 100644 --- a/js/src/gc/MaybeRooted.h +++ b/js/src/gc/MaybeRooted.h @@ -35,7 +35,7 @@ class MOZ_RAII FakeRooted : public RootedOperations> { explicit FakeRooted(JSContext* cx) : ptr(JS::SafelyInitialized::create()) {} - FakeRooted(JSContext* cx, T initial) : ptr(initial) {} + FakeRooted(JSContext* cx, const T& initial) : ptr(initial) {} FakeRooted(const FakeRooted&) = delete; @@ -44,6 +44,8 @@ class MOZ_RAII FakeRooted : public RootedOperations> { DECLARE_NONPOINTER_ACCESSOR_METHODS(ptr); DECLARE_NONPOINTER_MUTABLE_ACCESSOR_METHODS(ptr); + operator JS::Handle() { return JS::Handle::fromMarkedLocation(&ptr); } + private: T ptr; diff --git a/js/src/gc/Nursery.cpp b/js/src/gc/Nursery.cpp index a78db5cc9a..660daa8d4c 100644 --- a/js/src/gc/Nursery.cpp +++ b/js/src/gc/Nursery.cpp @@ -1058,7 +1058,7 @@ TimeStamp js::Nursery::lastCollectionEndTime() const { return previousGC.endTime; } -bool js::Nursery::shouldCollect() const { +bool js::Nursery::wantEagerCollection() const { if (!isEnabled()) { return false; } @@ -1071,8 +1071,7 @@ bool js::Nursery::shouldCollect() const { return true; } - // Eagerly collect the nursery in idle time if it's nearly full. - if (isNearlyFull()) { + if (freeSpaceIsBelowEagerThreshold()) { return true; } @@ -1081,32 +1080,27 @@ bool js::Nursery::shouldCollect() const { return isUnderused(); } -inline bool js::Nursery::isNearlyFull() const { - bool belowBytesThreshold = - freeSpace() < tunables().nurseryFreeThresholdForIdleCollection(); - bool belowFractionThreshold = - double(freeSpace()) / double(capacity()) < - tunables().nurseryFreeThresholdForIdleCollectionFraction(); - - // We want to use belowBytesThreshold when the nursery is sufficiently large, - // and belowFractionThreshold when it's small. - // - // When the nursery is small then belowBytesThreshold is a lower threshold - // (triggered earlier) than belowFractionThreshold. So if the fraction - // threshold is true, the bytes one will be true also. The opposite is true - // when the nursery is large. - // - // Therefore, by the time we cross the threshold we care about, we've already - // crossed the other one, and we can boolean AND to use either condition - // without encoding any "is the nursery big/small" test/threshold. The point - // at which they cross is when the nursery is: BytesThreshold / - // FractionThreshold large. - // - // With defaults that's: +inline bool js::Nursery::freeSpaceIsBelowEagerThreshold() const { + // The threshold is specified in terms of free space so that it doesn't depend + // on the size of the nursery. // - // 1MB = 256KB / 0.25 + // There two thresholds, an absolute free bytes threshold and a free space + // fraction threshold. Two thresholds are used so that we don't collect too + // eagerly for small nurseries (or even all the time if nursery size is less + // than the free bytes threshold) or too eagerly for large nurseries (where a + // fractional threshold may leave a significant amount of nursery unused). // - return belowBytesThreshold && belowFractionThreshold; + // Since the aim is making this less eager we require both thresholds to be + // met. + + size_t freeBytes = freeSpace(); + double freeFraction = double(freeBytes) / double(capacity()); + + size_t bytesThreshold = tunables().nurseryEagerCollectionThresholdBytes(); + double fractionThreshold = + tunables().nurseryEagerCollectionThresholdPercent(); + + return freeBytes < bytesThreshold && freeFraction < fractionThreshold; } inline bool js::Nursery::isUnderused() const { @@ -1124,7 +1118,7 @@ inline bool js::Nursery::isUnderused() const { // simplest. TimeDuration timeSinceLastCollection = TimeStamp::NowLoRes() - previousGC.endTime; - return timeSinceLastCollection > tunables().nurseryTimeoutForIdleCollection(); + return timeSinceLastCollection > tunables().nurseryEagerCollectionTimeout(); } void js::Nursery::collect(JS::GCOptions options, JS::GCReason reason) { @@ -1874,7 +1868,7 @@ size_t js::Nursery::targetSize(JS::GCOptions options, JS::GCReason reason) { // If the nursery is completely unused then minimise it. if (hasRecentGrowthData && previousGC.nurseryUsedBytes == 0 && now - lastCollectionEndTime() > - tunables().nurseryTimeoutForIdleCollection() && + tunables().nurseryEagerCollectionTimeout() && !js::SupportDifferentialTesting()) { clearRecentGrowthData(); return 0; diff --git a/js/src/gc/Nursery.h b/js/src/gc/Nursery.h index 2bab1623b0..0d7b607ff8 100644 --- a/js/src/gc/Nursery.h +++ b/js/src/gc/Nursery.h @@ -328,9 +328,7 @@ class Nursery { } JS::GCReason minorGCTriggerReason() const { return minorGCTriggerReason_; } - bool shouldCollect() const; - bool isNearlyFull() const; - bool isUnderused() const; + bool wantEagerCollection() const; bool enableProfiling() const { return enableProfiling_; } @@ -383,12 +381,11 @@ class Nursery { KeyCount }; - using ProfileTimes = - mozilla::EnumeratedArray; + using ProfileTimes = mozilla::EnumeratedArray; using ProfileDurations = - mozilla::EnumeratedArray; + mozilla::EnumeratedArray; // Calculate the promotion rate of the most recent minor GC. // The valid_for_tenuring parameter is used to return whether this @@ -445,6 +442,9 @@ class Nursery { [[nodiscard]] bool moveToNextChunk(); + bool freeSpaceIsBelowEagerThreshold() const; + bool isUnderused() const; + struct CollectionResult { size_t tenuredBytes; size_t tenuredCells; diff --git a/js/src/gc/Scheduling.h b/js/src/gc/Scheduling.h index 09a9f834eb..cbaeb1f353 100644 --- a/js/src/gc/Scheduling.h +++ b/js/src/gc/Scheduling.h @@ -447,23 +447,26 @@ NoCheck, 16 * 1024 * 1024) \ \ /* \ - * JSGC_NURSERY_FREE_THRESHOLD_FOR_IDLE_COLLECTION \ - * JSGC_NURSERY_FREE_THRESHOLD_FOR_IDLE_COLLECTION_FRACTION \ - * JSGC_NURSERY_TIMEOUT_FOR_IDLE_COLLECTION_MS \ + * JSGC_NURSERY_EAGER_COLLECTION_THRESHOLD_KB \ + * JSGC_NURSERY_EAGER_COLLECTION_THRESHOLD_PERCENT \ + * JSGC_NURSERY_EAGER_COLLECTION_TIMEOUT_MS \ * \ - * Attempt to run a minor GC in the idle time if the free space falls below \ - * this threshold or if it hasn't been collected for too long. The absolute \ - * threshold is used when the nursery is large and the percentage when it is \ - * small. See Nursery::shouldCollect(). \ + * JS::MaybeRunNurseryCollection will run a minor GC if the free space falls \ + * below a threshold or if it hasn't been collected for too long. \ + * \ + * To avoid making this too eager, two thresholds must be met. The free \ + * space must fall below a size threshold and the fraction of free space \ + * remaining must also fall below a threshold. \ + * \ + * See Nursery::wantEagerCollection() for more details. \ */ \ - _(JSGC_NURSERY_FREE_THRESHOLD_FOR_IDLE_COLLECTION, size_t, \ - nurseryFreeThresholdForIdleCollection, ConvertSize, NoCheck, \ - ChunkSize / 4) \ - _(JSGC_NURSERY_FREE_THRESHOLD_FOR_IDLE_COLLECTION_PERCENT, double, \ - nurseryFreeThresholdForIdleCollectionFraction, ConvertTimes100, \ + _(JSGC_NURSERY_EAGER_COLLECTION_THRESHOLD_KB, size_t, \ + nurseryEagerCollectionThresholdBytes, ConvertKB, NoCheck, ChunkSize / 4) \ + _(JSGC_NURSERY_EAGER_COLLECTION_THRESHOLD_PERCENT, double, \ + nurseryEagerCollectionThresholdPercent, ConvertTimes100, \ CheckNonZeroUnitRange, 0.25) \ - _(JSGC_NURSERY_TIMEOUT_FOR_IDLE_COLLECTION_MS, mozilla::TimeDuration, \ - nurseryTimeoutForIdleCollection, ConvertMillis, NoCheck, \ + _(JSGC_NURSERY_EAGER_COLLECTION_TIMEOUT_MS, mozilla::TimeDuration, \ + nurseryEagerCollectionTimeout, ConvertMillis, NoCheck, \ mozilla::TimeDuration::FromSeconds(5)) \ \ /* \ diff --git a/js/src/gc/StableCellHasher-inl.h b/js/src/gc/StableCellHasher-inl.h index b4054342c0..af0caaad89 100644 --- a/js/src/gc/StableCellHasher-inl.h +++ b/js/src/gc/StableCellHasher-inl.h @@ -137,7 +137,7 @@ inline void TransferUniqueId(Cell* tgt, Cell* src) { MOZ_ASSERT(src->zone() == tgt->zone()); Zone* zone = tgt->zone(); - MOZ_ASSERT(!zone->uniqueIds().has(tgt)); + MOZ_ASSERT_IF(zone->uniqueIds().has(src), !zone->uniqueIds().has(tgt)); zone->uniqueIds().rekeyIfMoved(src, tgt); } diff --git a/js/src/gc/Statistics.cpp b/js/src/gc/Statistics.cpp index e50e7500cf..c12d44db97 100644 --- a/js/src/gc/Statistics.cpp +++ b/js/src/gc/Statistics.cpp @@ -154,11 +154,11 @@ struct PhaseInfo { }; // A table of PhaseInfo indexed by Phase. -using PhaseTable = EnumeratedArray; +using PhaseTable = EnumeratedArray; // A table of PhaseKindInfo indexed by PhaseKind. using PhaseKindTable = - EnumeratedArray; + EnumeratedArray; #include "gc/StatsPhasesGenerated.inc" @@ -595,7 +595,7 @@ UniqueChars Statistics::formatDetailedTotals() const { void Statistics::formatJsonSlice(size_t sliceNum, JSONPrinter& json) const { /* * We number each of the slice properties to keep the code in - * GCTelemetry.jsm in sync. See MAX_SLICE_KEYS. + * GCTelemetry.sys.mjs in sync. See MAX_SLICE_KEYS. */ json.beginObject(); formatJsonSliceDescription(sliceNum, slices_[sliceNum], json); // # 1-11 diff --git a/js/src/gc/Statistics.h b/js/src/gc/Statistics.h index f03bc2ea38..bc6d7bf5dd 100644 --- a/js/src/gc/Statistics.h +++ b/js/src/gc/Statistics.h @@ -136,19 +136,19 @@ struct Statistics { template using Array = mozilla::Array; - template + template using EnumeratedArray = - mozilla::EnumeratedArray; + mozilla::EnumeratedArray; using TimeDuration = mozilla::TimeDuration; using TimeStamp = mozilla::TimeStamp; // Create types for tables of times, by phase and phase kind. - using PhaseTimes = EnumeratedArray; + using PhaseTimes = EnumeratedArray; using PhaseKindTimes = - EnumeratedArray; + EnumeratedArray; - using PhaseTimeStamps = EnumeratedArray; + using PhaseTimeStamps = EnumeratedArray; [[nodiscard]] static bool initialize(); @@ -370,12 +370,12 @@ struct Statistics { TimeDuration totalGCTime_; /* Number of events of this type for this GC. */ - EnumeratedArray> + EnumeratedArray, + COUNT_LIMIT> counts; /* Other GC statistics. */ - EnumeratedArray stats; + EnumeratedArray stats; /* * These events cannot be kept in the above array, we need to take their @@ -440,7 +440,7 @@ struct Statistics { }; using ProfileDurations = - EnumeratedArray; + EnumeratedArray; bool enableProfiling_ = false; bool profileWorkers_ = false; diff --git a/js/src/gc/Sweeping.cpp b/js/src/gc/Sweeping.cpp index 3686695978..123b2c9650 100644 --- a/js/src/gc/Sweeping.cpp +++ b/js/src/gc/Sweeping.cpp @@ -2394,6 +2394,14 @@ void GCRuntime::endSweepPhase(bool destroyingRuntime) { MOZ_ASSERT_IF(destroyingRuntime, !useBackgroundThreads); + // Release parallel marking threads for worker runtimes now we've finished + // marking. The main thread keeps the reservation as long as parallel marking + // is enabled. + if (!rt->isMainRuntime()) { + MOZ_ASSERT_IF(useParallelMarking, reservedMarkingThreads != 0); + releaseMarkingThreads(); + } + { gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::DESTROY); diff --git a/js/src/gc/Tenuring.cpp b/js/src/gc/Tenuring.cpp index 84526e2109..a9506cfa14 100644 --- a/js/src/gc/Tenuring.cpp +++ b/js/src/gc/Tenuring.cpp @@ -74,6 +74,14 @@ void TenuringTracer::onObjectEdge(JSObject** objp, const char* name) { return; } + onNonForwardedNurseryObjectEdge(objp); +} + +void TenuringTracer::onNonForwardedNurseryObjectEdge(JSObject** objp) { + JSObject* obj = *objp; + MOZ_ASSERT(IsInsideNursery(obj)); + MOZ_ASSERT(!obj->isForwarded()); + UpdateAllocSiteOnTenure(obj); // Take a fast path for tenuring a plain object which is by far the most @@ -98,6 +106,14 @@ void TenuringTracer::onStringEdge(JSString** strp, const char* name) { return; } + onNonForwardedNurseryStringEdge(strp); +} + +void TenuringTracer::onNonForwardedNurseryStringEdge(JSString** strp) { + JSString* str = *strp; + MOZ_ASSERT(IsInsideNursery(str)); + MOZ_ASSERT(!str->isForwarded()); + UpdateAllocSiteOnTenure(str); *strp = moveToTenured(str); @@ -115,6 +131,14 @@ void TenuringTracer::onBigIntEdge(JS::BigInt** bip, const char* name) { return; } + onNonForwardedNurseryBigIntEdge(bip); +} + +void TenuringTracer::onNonForwardedNurseryBigIntEdge(JS::BigInt** bip) { + JS::BigInt* bi = *bip; + MOZ_ASSERT(IsInsideNursery(bi)); + MOZ_ASSERT(!bi->isForwarded()); + UpdateAllocSiteOnTenure(bi); *bip = moveToTenured(bi); @@ -137,37 +161,52 @@ void TenuringTracer::traverse(JS::Value* thingp) { Value value = *thingp; CheckTracedThing(this, value); + if (!value.isGCThing()) { + return; + } + + Cell* cell = value.toGCThing(); + if (!IsInsideNursery(cell)) { + return; + } + + if (cell->isForwarded()) { + const gc::RelocationOverlay* overlay = + gc::RelocationOverlay::fromCell(cell); + thingp->changeGCThingPayload(overlay->forwardingAddress()); + return; + } + // We only care about a few kinds of GC thing here and this generates much // tighter code than using MapGCThingTyped. - Value post; if (value.isObject()) { JSObject* obj = &value.toObject(); - onObjectEdge(&obj, "value"); - post = JS::ObjectValue(*obj); + onNonForwardedNurseryObjectEdge(&obj); + MOZ_ASSERT(obj != &value.toObject()); + *thingp = JS::ObjectValue(*obj); + return; } #ifdef ENABLE_RECORD_TUPLE - else if (value.isExtendedPrimitive()) { + if (value.isExtendedPrimitive()) { JSObject* obj = &value.toExtendedPrimitive(); - onObjectEdge(&obj, "value"); - post = JS::ExtendedPrimitiveValue(*obj); + onNonForwardedNurseryObjectEdge(&obj); + MOZ_ASSERT(obj != &value.toExtendedPrimitive()); + *thingp = JS::ExtendedPrimitiveValue(*obj); + return; } #endif - else if (value.isString()) { + if (value.isString()) { JSString* str = value.toString(); - onStringEdge(&str, "value"); - post = JS::StringValue(str); - } else if (value.isBigInt()) { - JS::BigInt* bi = value.toBigInt(); - onBigIntEdge(&bi, "value"); - post = JS::BigIntValue(bi); - } else { - MOZ_ASSERT_IF(value.isGCThing(), !IsInsideNursery(value.toGCThing())); + onNonForwardedNurseryStringEdge(&str); + MOZ_ASSERT(str != value.toString()); + *thingp = JS::StringValue(str); return; } - - if (post != value) { - *thingp = post; - } + MOZ_ASSERT(value.isBigInt()); + JS::BigInt* bi = value.toBigInt(); + onNonForwardedNurseryBigIntEdge(&bi); + MOZ_ASSERT(bi != value.toBigInt()); + *thingp = JS::BigIntValue(bi); } void TenuringTracer::traverse(wasm::AnyRef* thingp) { diff --git a/js/src/gc/Tenuring.h b/js/src/gc/Tenuring.h index 560d98d178..3eca5f4bc3 100644 --- a/js/src/gc/Tenuring.h +++ b/js/src/gc/Tenuring.h @@ -92,6 +92,10 @@ class TenuringTracer final : public JSTracer { void traceBigInt(JS::BigInt* bi); private: + MOZ_ALWAYS_INLINE void onNonForwardedNurseryObjectEdge(JSObject** objp); + MOZ_ALWAYS_INLINE void onNonForwardedNurseryStringEdge(JSString** strp); + MOZ_ALWAYS_INLINE void onNonForwardedNurseryBigIntEdge(JS::BigInt** bip); + // The dependent string chars needs to be relocated if the base which it's // using chars from has been deduplicated. template diff --git a/js/src/gc/Zone.cpp b/js/src/gc/Zone.cpp index e2c67aee7b..d0586d5d56 100644 --- a/js/src/gc/Zone.cpp +++ b/js/src/gc/Zone.cpp @@ -632,11 +632,13 @@ void Zone::purgeAtomCache() { } void Zone::addSizeOfIncludingThis( - mozilla::MallocSizeOf mallocSizeOf, JS::CodeSizes* code, size_t* regexpZone, - size_t* jitZone, size_t* cacheIRStubs, size_t* uniqueIdMap, - size_t* initialPropMapTable, size_t* shapeTables, size_t* atomsMarkBitmaps, - size_t* compartmentObjects, size_t* crossCompartmentWrappersTables, - size_t* compartmentsPrivateData, size_t* scriptCountsMapArg) { + mozilla::MallocSizeOf mallocSizeOf, size_t* zoneObject, JS::CodeSizes* code, + size_t* regexpZone, size_t* jitZone, size_t* cacheIRStubs, + size_t* uniqueIdMap, size_t* initialPropMapTable, size_t* shapeTables, + size_t* atomsMarkBitmaps, size_t* compartmentObjects, + size_t* crossCompartmentWrappersTables, size_t* compartmentsPrivateData, + size_t* scriptCountsMapArg) { + *zoneObject += mallocSizeOf(this); *regexpZone += regExps().sizeOfIncludingThis(mallocSizeOf); if (jitZone_) { jitZone_->addSizeOfIncludingThis(mallocSizeOf, code, jitZone, cacheIRStubs); diff --git a/js/src/gc/Zone.h b/js/src/gc/Zone.h index 457e586cea..fd91de8626 100644 --- a/js/src/gc/Zone.h +++ b/js/src/gc/Zone.h @@ -375,15 +375,13 @@ class Zone : public js::ZoneAllocator, public js::gc::GraphNodeBase { bool registerObjectWithWeakPointers(JSObject* obj); void sweepObjectsWithWeakPointers(JSTracer* trc); - void addSizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf, - JS::CodeSizes* code, size_t* regexpZone, - size_t* jitZone, size_t* cacheIRStubs, - size_t* uniqueIdMap, size_t* initialPropMapTable, - size_t* shapeTables, size_t* atomsMarkBitmaps, - size_t* compartmentObjects, - size_t* crossCompartmentWrappersTables, - size_t* compartmentsPrivateData, - size_t* scriptCountsMapArg); + void addSizeOfIncludingThis( + mozilla::MallocSizeOf mallocSizeOf, size_t* zoneObject, + JS::CodeSizes* code, size_t* regexpZone, size_t* jitZone, + size_t* cacheIRStubs, size_t* uniqueIdMap, size_t* initialPropMapTable, + size_t* shapeTables, size_t* atomsMarkBitmaps, size_t* compartmentObjects, + size_t* crossCompartmentWrappersTables, size_t* compartmentsPrivateData, + size_t* scriptCountsMapArg); // Iterate over all cells in the zone. See the definition of ZoneCellIter // in gc/GC-inl.h for the possible arguments and documentation. -- cgit v1.2.3