summaryrefslogtreecommitdiffstats
path: root/js/src/gc
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-28 14:29:10 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-28 14:29:10 +0000
commit2aa4a82499d4becd2284cdb482213d541b8804dd (patch)
treeb80bf8bf13c3766139fbacc530efd0dd9d54394c /js/src/gc
parentInitial commit. (diff)
downloadfirefox-2aa4a82499d4becd2284cdb482213d541b8804dd.tar.xz
firefox-2aa4a82499d4becd2284cdb482213d541b8804dd.zip
Adding upstream version 86.0.1.upstream/86.0.1upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
-rw-r--r--js/src/gc/AllocKind.h239
-rw-r--r--js/src/gc/Allocator.cpp886
-rw-r--r--js/src/gc/Allocator.h89
-rw-r--r--js/src/gc/ArenaList-inl.h333
-rw-r--r--js/src/gc/ArenaList.h405
-rw-r--r--js/src/gc/AtomMarking-inl.h99
-rw-r--r--js/src/gc/AtomMarking.cpp305
-rw-r--r--js/src/gc/AtomMarking.h88
-rw-r--r--js/src/gc/Barrier.cpp357
-rw-r--r--js/src/gc/Barrier.h1168
-rw-r--r--js/src/gc/Cell.h780
-rw-r--r--js/src/gc/ClearEdgesTracer.h38
-rw-r--r--js/src/gc/FinalizationRegistry.cpp140
-rw-r--r--js/src/gc/FindSCCs.h204
-rw-r--r--js/src/gc/FreeOp-inl.h35
-rw-r--r--js/src/gc/FreeOp.h153
-rw-r--r--js/src/gc/GC-inl.h341
-rw-r--r--js/src/gc/GC.cpp9119
-rw-r--r--js/src/gc/GC.h215
-rw-r--r--js/src/gc/GCEnum.h159
-rw-r--r--js/src/gc/GCInternals.h295
-rw-r--r--js/src/gc/GCLock.h110
-rw-r--r--js/src/gc/GCMarker.h584
-rw-r--r--js/src/gc/GCParallelTask.cpp173
-rw-r--r--js/src/gc/GCParallelTask.h176
-rw-r--r--js/src/gc/GCProbes.h44
-rw-r--r--js/src/gc/GCRuntime.h1287
-rw-r--r--js/src/gc/GenerateStatsPhases.py416
-rw-r--r--js/src/gc/HashUtil.h85
-rw-r--r--js/src/gc/Heap-inl.h65
-rw-r--r--js/src/gc/Heap.h774
-rw-r--r--js/src/gc/IteratorUtils.h121
-rw-r--r--js/src/gc/Marking-inl.h162
-rw-r--r--js/src/gc/Marking.cpp4116
-rw-r--r--js/src/gc/Marking.h178
-rw-r--r--js/src/gc/MaybeRooted.h152
-rw-r--r--js/src/gc/Memory.cpp1003
-rw-r--r--js/src/gc/Memory.h82
-rw-r--r--js/src/gc/Nursery-inl.h188
-rw-r--r--js/src/gc/Nursery.cpp1841
-rw-r--r--js/src/gc/Nursery.h775
-rw-r--r--js/src/gc/NurseryAwareHashMap.h218
-rw-r--r--js/src/gc/ObjectKind-inl.h176
-rw-r--r--js/src/gc/ParallelWork.h139
-rw-r--r--js/src/gc/Policy.h99
-rw-r--r--js/src/gc/PrivateIterators-inl.h167
-rw-r--r--js/src/gc/PublicIterators.cpp249
-rw-r--r--js/src/gc/PublicIterators.h197
-rw-r--r--js/src/gc/RelocationOverlay.h66
-rw-r--r--js/src/gc/RootMarking.cpp649
-rw-r--r--js/src/gc/Rooting.h102
-rw-r--r--js/src/gc/Scheduling.cpp826
-rw-r--r--js/src/gc/Scheduling.h974
-rw-r--r--js/src/gc/Statistics.cpp1633
-rw-r--r--js/src/gc/Statistics.h578
-rw-r--r--js/src/gc/StoreBuffer-inl.h83
-rw-r--r--js/src/gc/StoreBuffer.cpp238
-rw-r--r--js/src/gc/StoreBuffer.h679
-rw-r--r--js/src/gc/Tracer.cpp377
-rw-r--r--js/src/gc/Tracer.h347
-rw-r--r--js/src/gc/Verifier.cpp1110
-rw-r--r--js/src/gc/WeakMap-inl.h439
-rw-r--r--js/src/gc/WeakMap.cpp186
-rw-r--r--js/src/gc/WeakMap.h442
-rw-r--r--js/src/gc/WeakMapPtr.cpp114
-rw-r--r--js/src/gc/Zone-inl.h117
-rw-r--r--js/src/gc/Zone.cpp979
-rw-r--r--js/src/gc/Zone.h711
-rw-r--r--js/src/gc/ZoneAllocator.h334
-rw-r--r--js/src/gc/moz.build54
70 files changed, 40063 insertions, 0 deletions
diff --git a/js/src/gc/AllocKind.h b/js/src/gc/AllocKind.h
new file mode 100644
index 0000000000..89ae7cc4fe
--- /dev/null
+++ b/js/src/gc/AllocKind.h
@@ -0,0 +1,239 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * GC-internal definition of GC cell kinds.
+ */
+
+#ifndef gc_AllocKind_h
+#define gc_AllocKind_h
+
+#include "mozilla/EnumeratedArray.h"
+#include "mozilla/EnumeratedRange.h"
+
+#include <iterator>
+#include <stdint.h>
+
+#include "js/TraceKind.h"
+#include "js/Utility.h"
+
+namespace js {
+namespace gc {
+
+// The GC allocation kinds.
+//
+// These are defined by macros which enumerate the different allocation kinds
+// and supply the following information:
+//
+// - the corresponding AllocKind
+// - their JS::TraceKind
+// - their C++ base type
+// - a C++ type of the correct size
+// - whether they can be finalized on the background thread
+// - whether they can be allocated in the nursery
+// - whether they can be compacted
+
+// clang-format off
+#define FOR_EACH_OBJECT_ALLOCKIND(D) \
+ /* AllocKind TraceKind TypeName SizedType BGFinal Nursery Compact */ \
+ D(FUNCTION, Object, JSObject, JSFunction, true, true, true) \
+ D(FUNCTION_EXTENDED, Object, JSObject, FunctionExtended, true, true, true) \
+ D(OBJECT0, Object, JSObject, JSObject_Slots0, false, false, true) \
+ D(OBJECT0_BACKGROUND, Object, JSObject, JSObject_Slots0, true, true, true) \
+ D(OBJECT2, Object, JSObject, JSObject_Slots2, false, false, true) \
+ D(OBJECT2_BACKGROUND, Object, JSObject, JSObject_Slots2, true, true, true) \
+ D(ARRAYBUFFER4, Object, JSObject, JSObject_Slots4, true, true, true) \
+ D(OBJECT4, Object, JSObject, JSObject_Slots4, false, false, true) \
+ D(OBJECT4_BACKGROUND, Object, JSObject, JSObject_Slots4, true, true, true) \
+ D(ARRAYBUFFER8, Object, JSObject, JSObject_Slots8, true, true, true) \
+ D(OBJECT8, Object, JSObject, JSObject_Slots8, false, false, true) \
+ D(OBJECT8_BACKGROUND, Object, JSObject, JSObject_Slots8, true, true, true) \
+ D(ARRAYBUFFER12, Object, JSObject, JSObject_Slots12, true, true, true) \
+ D(OBJECT12, Object, JSObject, JSObject_Slots12, false, false, true) \
+ D(OBJECT12_BACKGROUND, Object, JSObject, JSObject_Slots12, true, true, true) \
+ D(ARRAYBUFFER16, Object, JSObject, JSObject_Slots16, true, true, true) \
+ D(OBJECT16, Object, JSObject, JSObject_Slots16, false, false, true) \
+ D(OBJECT16_BACKGROUND, Object, JSObject, JSObject_Slots16, true, true, true)
+
+#define FOR_EACH_NONOBJECT_NONNURSERY_ALLOCKIND(D) \
+ /* AllocKind TraceKind TypeName SizedType BGFinal Nursery Compact */ \
+ D(SCRIPT, Script, js::BaseScript, js::BaseScript, false, false, true) \
+ D(SHAPE, Shape, js::Shape, js::Shape, true, false, true) \
+ D(ACCESSOR_SHAPE, Shape, js::AccessorShape, js::AccessorShape, true, false, true) \
+ D(BASE_SHAPE, BaseShape, js::BaseShape, js::BaseShape, true, false, true) \
+ D(OBJECT_GROUP, ObjectGroup, js::ObjectGroup, js::ObjectGroup, true, false, true) \
+ D(EXTERNAL_STRING, String, JSExternalString, JSExternalString, true, false, true) \
+ D(FAT_INLINE_ATOM, String, js::FatInlineAtom, js::FatInlineAtom, true, false, false) \
+ D(ATOM, String, js::NormalAtom, js::NormalAtom, true, false, false) \
+ D(SYMBOL, Symbol, JS::Symbol, JS::Symbol, true, false, false) \
+ D(JITCODE, JitCode, js::jit::JitCode, js::jit::JitCode, false, false, false) \
+ D(SCOPE, Scope, js::Scope, js::Scope, true, false, true) \
+ D(REGEXP_SHARED, RegExpShared, js::RegExpShared, js::RegExpShared, true, false, true)
+
+#define FOR_EACH_NONOBJECT_NURSERY_ALLOCKIND(D) \
+ /* AllocKind TraceKind TypeName SizedType BGFinal Nursery Compact */ \
+ D(BIGINT, BigInt, JS::BigInt, JS::BigInt, true, true, true)
+
+#define FOR_EACH_NURSERY_STRING_ALLOCKIND(D) \
+ D(FAT_INLINE_STRING, String, JSFatInlineString, JSFatInlineString, true, true, true) \
+ D(STRING, String, JSString, JSString, true, true, true)
+// clang-format on
+
+#define FOR_EACH_NONOBJECT_ALLOCKIND(D) \
+ FOR_EACH_NONOBJECT_NONNURSERY_ALLOCKIND(D) \
+ FOR_EACH_NONOBJECT_NURSERY_ALLOCKIND(D) \
+ FOR_EACH_NURSERY_STRING_ALLOCKIND(D)
+
+#define FOR_EACH_ALLOCKIND(D) \
+ FOR_EACH_OBJECT_ALLOCKIND(D) \
+ FOR_EACH_NONOBJECT_ALLOCKIND(D)
+
+#define DEFINE_ALLOC_KIND(allocKind, _1, _2, _3, _4, _5, _6) allocKind,
+enum class AllocKind : uint8_t {
+ // clang-format off
+ FOR_EACH_OBJECT_ALLOCKIND(DEFINE_ALLOC_KIND)
+
+ OBJECT_LIMIT,
+ OBJECT_LAST = OBJECT_LIMIT - 1,
+
+ FOR_EACH_NONOBJECT_ALLOCKIND(DEFINE_ALLOC_KIND)
+
+ LIMIT,
+ LAST = LIMIT - 1,
+
+ FIRST = 0,
+ OBJECT_FIRST = FUNCTION // Hardcoded to first object kind.
+ // clang-format on
+};
+#undef DEFINE_ALLOC_KIND
+
+static_assert(int(AllocKind::FIRST) == 0,
+ "Various places depend on AllocKind starting at 0");
+static_assert(int(AllocKind::OBJECT_FIRST) == 0,
+ "OBJECT_FIRST must be defined as the first object kind");
+
+constexpr size_t AllocKindCount = size_t(AllocKind::LIMIT);
+
+inline bool IsAllocKind(AllocKind kind) {
+ return kind >= AllocKind::FIRST && kind <= AllocKind::LIMIT;
+}
+
+inline bool IsValidAllocKind(AllocKind kind) {
+ return kind >= AllocKind::FIRST && kind <= AllocKind::LAST;
+}
+
+const char* AllocKindName(AllocKind kind);
+
+inline bool IsObjectAllocKind(AllocKind kind) {
+ return kind >= AllocKind::OBJECT_FIRST && kind <= AllocKind::OBJECT_LAST;
+}
+
+inline bool IsShapeAllocKind(AllocKind kind) {
+ return kind == AllocKind::SHAPE || kind == AllocKind::ACCESSOR_SHAPE;
+}
+
+// Returns a sequence for use in a range-based for loop,
+// to iterate over all alloc kinds.
+inline auto AllAllocKinds() {
+ return mozilla::MakeEnumeratedRange(AllocKind::FIRST, AllocKind::LIMIT);
+}
+
+// Returns a sequence for use in a range-based for loop,
+// to iterate over all object alloc kinds.
+inline auto ObjectAllocKinds() {
+ return mozilla::MakeEnumeratedRange(AllocKind::OBJECT_FIRST,
+ AllocKind::OBJECT_LIMIT);
+}
+
+// Returns a sequence for use in a range-based for loop,
+// to iterate over alloc kinds from |first| to |limit|, exclusive.
+inline auto SomeAllocKinds(AllocKind first = AllocKind::FIRST,
+ AllocKind limit = AllocKind::LIMIT) {
+ MOZ_ASSERT(IsAllocKind(first), "|first| is not a valid AllocKind!");
+ MOZ_ASSERT(IsAllocKind(limit), "|limit| is not a valid AllocKind!");
+ return mozilla::MakeEnumeratedRange(first, limit);
+}
+
+// AllAllocKindArray<ValueType> gives an enumerated array of ValueTypes,
+// with each index corresponding to a particular alloc kind.
+template <typename ValueType>
+using AllAllocKindArray =
+ mozilla::EnumeratedArray<AllocKind, AllocKind::LIMIT, ValueType>;
+
+// ObjectAllocKindArray<ValueType> gives an enumerated array of ValueTypes,
+// with each index corresponding to a particular object alloc kind.
+template <typename ValueType>
+using ObjectAllocKindArray =
+ mozilla::EnumeratedArray<AllocKind, AllocKind::OBJECT_LIMIT, ValueType>;
+
+static inline JS::TraceKind MapAllocToTraceKind(AllocKind kind) {
+ static const JS::TraceKind map[] = {
+#define EXPAND_ELEMENT(allocKind, traceKind, type, sizedType, bgFinal, \
+ nursery, compact) \
+ JS::TraceKind::traceKind,
+ FOR_EACH_ALLOCKIND(EXPAND_ELEMENT)
+#undef EXPAND_ELEMENT
+ };
+
+ static_assert(std::size(map) == AllocKindCount,
+ "AllocKind-to-TraceKind mapping must be in sync");
+ return map[size_t(kind)];
+}
+
+static inline bool IsNurseryAllocable(AllocKind kind) {
+ MOZ_ASSERT(IsValidAllocKind(kind));
+
+ static const bool map[] = {
+#define DEFINE_NURSERY_ALLOCABLE(_1, _2, _3, _4, _5, nursery, _6) nursery,
+ FOR_EACH_ALLOCKIND(DEFINE_NURSERY_ALLOCABLE)
+#undef DEFINE_NURSERY_ALLOCABLE
+ };
+
+ static_assert(std::size(map) == AllocKindCount,
+ "IsNurseryAllocable sanity check");
+ return map[size_t(kind)];
+}
+
+static inline bool IsBackgroundFinalized(AllocKind kind) {
+ MOZ_ASSERT(IsValidAllocKind(kind));
+
+ static const bool map[] = {
+#define DEFINE_BACKGROUND_FINALIZED(_1, _2, _3, _4, bgFinal, _5, _6) bgFinal,
+ FOR_EACH_ALLOCKIND(DEFINE_BACKGROUND_FINALIZED)
+#undef DEFINE_BACKGROUND_FINALIZED
+ };
+
+ static_assert(std::size(map) == AllocKindCount,
+ "IsBackgroundFinalized sanity check");
+ return map[size_t(kind)];
+}
+
+static inline bool IsForegroundFinalized(AllocKind kind) {
+ return !IsBackgroundFinalized(kind);
+}
+
+static inline bool IsCompactingKind(AllocKind kind) {
+ MOZ_ASSERT(IsValidAllocKind(kind));
+
+ static const bool map[] = {
+#define DEFINE_COMPACTING_KIND(_1, _2, _3, _4, _5, _6, compact) compact,
+ FOR_EACH_ALLOCKIND(DEFINE_COMPACTING_KIND)
+#undef DEFINE_COMPACTING_KIND
+ };
+
+ static_assert(std::size(map) == AllocKindCount,
+ "IsCompactingKind sanity check");
+ return map[size_t(kind)];
+}
+
+static inline bool IsMovableKind(AllocKind kind) {
+ return IsNurseryAllocable(kind) || IsCompactingKind(kind);
+}
+
+} /* namespace gc */
+} /* namespace js */
+
+#endif /* gc_AllocKind_h */
diff --git a/js/src/gc/Allocator.cpp b/js/src/gc/Allocator.cpp
new file mode 100644
index 0000000000..a5fd34b3f0
--- /dev/null
+++ b/js/src/gc/Allocator.cpp
@@ -0,0 +1,886 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "gc/Allocator.h"
+
+#include "mozilla/DebugOnly.h"
+#include "mozilla/TimeStamp.h"
+
+#include <type_traits>
+
+#include "gc/GCInternals.h"
+#include "gc/GCLock.h"
+#include "gc/GCProbes.h"
+#include "gc/Nursery.h"
+#include "threading/CpuCount.h"
+#include "util/Poison.h"
+#include "vm/JSContext.h"
+#include "vm/Runtime.h"
+#include "vm/StringType.h"
+#include "vm/TraceLogging.h"
+
+#include "gc/ArenaList-inl.h"
+#include "gc/Heap-inl.h"
+#include "gc/PrivateIterators-inl.h"
+#include "vm/JSObject-inl.h"
+
+using mozilla::TimeDuration;
+using mozilla::TimeStamp;
+
+using namespace js;
+using namespace gc;
+
+template <AllowGC allowGC /* = CanGC */>
+JSObject* js::AllocateObject(JSContext* cx, AllocKind kind,
+ size_t nDynamicSlots, InitialHeap heap,
+ const JSClass* clasp) {
+ MOZ_ASSERT(IsObjectAllocKind(kind));
+ size_t thingSize = Arena::thingSize(kind);
+
+ MOZ_ASSERT(thingSize == Arena::thingSize(kind));
+ MOZ_ASSERT(thingSize >= sizeof(JSObject_Slots0));
+ static_assert(
+ sizeof(JSObject_Slots0) >= MinCellSize,
+ "All allocations must be at least the allocator-imposed minimum size.");
+
+ MOZ_ASSERT_IF(nDynamicSlots != 0, clasp->isNative());
+
+ // We cannot trigger GC or make runtime assertions when nursery allocation
+ // is suppressed, either explicitly or because we are off-thread.
+ if (cx->isNurseryAllocSuppressed()) {
+ JSObject* obj = GCRuntime::tryNewTenuredObject<NoGC>(cx, kind, thingSize,
+ nDynamicSlots);
+ if (MOZ_UNLIKELY(allowGC && !obj)) {
+ ReportOutOfMemory(cx);
+ }
+ return obj;
+ }
+
+ JSRuntime* rt = cx->runtime();
+ if (!rt->gc.checkAllocatorState<allowGC>(cx, kind)) {
+ return nullptr;
+ }
+
+ if (cx->nursery().isEnabled() && heap != TenuredHeap) {
+ JSObject* obj = rt->gc.tryNewNurseryObject<allowGC>(cx, thingSize,
+ nDynamicSlots, clasp);
+ if (obj) {
+ return obj;
+ }
+
+ // Our most common non-jit allocation path is NoGC; thus, if we fail the
+ // alloc and cannot GC, we *must* return nullptr here so that the caller
+ // will do a CanGC allocation to clear the nursery. Failing to do so will
+ // cause all allocations on this path to land in Tenured, and we will not
+ // get the benefit of the nursery.
+ if (!allowGC) {
+ return nullptr;
+ }
+ }
+
+ return GCRuntime::tryNewTenuredObject<allowGC>(cx, kind, thingSize,
+ nDynamicSlots);
+}
+template JSObject* js::AllocateObject<NoGC>(JSContext* cx, gc::AllocKind kind,
+ size_t nDynamicSlots,
+ gc::InitialHeap heap,
+ const JSClass* clasp);
+template JSObject* js::AllocateObject<CanGC>(JSContext* cx, gc::AllocKind kind,
+ size_t nDynamicSlots,
+ gc::InitialHeap heap,
+ const JSClass* clasp);
+
+// Attempt to allocate a new JSObject out of the nursery. If there is not
+// enough room in the nursery or there is an OOM, this method will return
+// nullptr.
+template <AllowGC allowGC>
+JSObject* GCRuntime::tryNewNurseryObject(JSContext* cx, size_t thingSize,
+ size_t nDynamicSlots,
+ const JSClass* clasp) {
+ MOZ_RELEASE_ASSERT(!cx->isHelperThreadContext());
+
+ MOZ_ASSERT(cx->isNurseryAllocAllowed());
+ MOZ_ASSERT(!cx->isNurseryAllocSuppressed());
+ MOZ_ASSERT(!cx->zone()->isAtomsZone());
+
+ JSObject* obj =
+ cx->nursery().allocateObject(cx, thingSize, nDynamicSlots, clasp);
+ if (obj) {
+ return obj;
+ }
+
+ if (allowGC && !cx->suppressGC) {
+ cx->runtime()->gc.minorGC(JS::GCReason::OUT_OF_NURSERY);
+
+ // Exceeding gcMaxBytes while tenuring can disable the Nursery.
+ if (cx->nursery().isEnabled()) {
+ return cx->nursery().allocateObject(cx, thingSize, nDynamicSlots, clasp);
+ }
+ }
+ return nullptr;
+}
+
+template <AllowGC allowGC>
+JSObject* GCRuntime::tryNewTenuredObject(JSContext* cx, AllocKind kind,
+ size_t thingSize,
+ size_t nDynamicSlots) {
+ ObjectSlots* slotsHeader = nullptr;
+ if (nDynamicSlots) {
+ HeapSlot* allocation =
+ cx->maybe_pod_malloc<HeapSlot>(ObjectSlots::allocCount(nDynamicSlots));
+ if (MOZ_UNLIKELY(!allocation)) {
+ if (allowGC) {
+ ReportOutOfMemory(cx);
+ }
+ return nullptr;
+ }
+
+ slotsHeader = new (allocation) ObjectSlots(nDynamicSlots, 0);
+ Debug_SetSlotRangeToCrashOnTouch(slotsHeader->slots(), nDynamicSlots);
+ }
+
+ JSObject* obj = tryNewTenuredThing<JSObject, allowGC>(cx, kind, thingSize);
+
+ if (obj) {
+ if (nDynamicSlots) {
+ static_cast<NativeObject*>(obj)->initSlots(slotsHeader->slots());
+ AddCellMemory(obj, ObjectSlots::allocSize(nDynamicSlots),
+ MemoryUse::ObjectSlots);
+ }
+ } else {
+ js_free(slotsHeader);
+ }
+
+ return obj;
+}
+
+// Attempt to allocate a new string out of the nursery. If there is not enough
+// room in the nursery or there is an OOM, this method will return nullptr.
+template <AllowGC allowGC>
+JSString* GCRuntime::tryNewNurseryString(JSContext* cx, size_t thingSize,
+ AllocKind kind) {
+ MOZ_ASSERT(IsNurseryAllocable(kind));
+ MOZ_ASSERT(cx->isNurseryAllocAllowed());
+ MOZ_ASSERT(!cx->isHelperThreadContext());
+ MOZ_ASSERT(!cx->isNurseryAllocSuppressed());
+ MOZ_ASSERT(!cx->zone()->isAtomsZone());
+
+ Cell* cell = cx->nursery().allocateString(cx->zone(), thingSize);
+ if (cell) {
+ return static_cast<JSString*>(cell);
+ }
+
+ if (allowGC && !cx->suppressGC) {
+ cx->runtime()->gc.minorGC(JS::GCReason::OUT_OF_NURSERY);
+
+ // Exceeding gcMaxBytes while tenuring can disable the Nursery, and
+ // other heuristics can disable nursery strings for this zone.
+ if (cx->nursery().isEnabled() && cx->zone()->allocNurseryStrings) {
+ return static_cast<JSString*>(
+ cx->nursery().allocateString(cx->zone(), thingSize));
+ }
+ }
+ return nullptr;
+}
+
+template <typename StringAllocT, AllowGC allowGC /* = CanGC */>
+StringAllocT* js::AllocateStringImpl(JSContext* cx, InitialHeap heap) {
+ static_assert(std::is_convertible_v<StringAllocT*, JSString*>,
+ "must be JSString derived");
+
+ AllocKind kind = MapTypeToFinalizeKind<StringAllocT>::kind;
+ size_t size = sizeof(StringAllocT);
+ MOZ_ASSERT(size == Arena::thingSize(kind));
+ MOZ_ASSERT(size == sizeof(JSString) || size == sizeof(JSFatInlineString));
+
+ // Off-thread alloc cannot trigger GC or make runtime assertions.
+ if (cx->isNurseryAllocSuppressed()) {
+ StringAllocT* str =
+ GCRuntime::tryNewTenuredThing<StringAllocT, NoGC>(cx, kind, size);
+ if (MOZ_UNLIKELY(allowGC && !str)) {
+ ReportOutOfMemory(cx);
+ }
+ return str;
+ }
+
+ JSRuntime* rt = cx->runtime();
+ if (!rt->gc.checkAllocatorState<allowGC>(cx, kind)) {
+ return nullptr;
+ }
+
+ if (cx->nursery().isEnabled() && heap != TenuredHeap &&
+ cx->nursery().canAllocateStrings() && cx->zone()->allocNurseryStrings) {
+ auto* str = static_cast<StringAllocT*>(
+ rt->gc.tryNewNurseryString<allowGC>(cx, size, kind));
+ if (str) {
+ return str;
+ }
+
+ // Our most common non-jit allocation path is NoGC; thus, if we fail the
+ // alloc and cannot GC, we *must* return nullptr here so that the caller
+ // will do a CanGC allocation to clear the nursery. Failing to do so will
+ // cause all allocations on this path to land in Tenured, and we will not
+ // get the benefit of the nursery.
+ if (!allowGC) {
+ return nullptr;
+ }
+ }
+
+ return GCRuntime::tryNewTenuredThing<StringAllocT, allowGC>(cx, kind, size);
+}
+
+// Attempt to allocate a new BigInt out of the nursery. If there is not enough
+// room in the nursery or there is an OOM, this method will return nullptr.
+template <AllowGC allowGC>
+JS::BigInt* GCRuntime::tryNewNurseryBigInt(JSContext* cx, size_t thingSize,
+ AllocKind kind) {
+ MOZ_ASSERT(IsNurseryAllocable(kind));
+ MOZ_ASSERT(cx->isNurseryAllocAllowed());
+ MOZ_ASSERT(!cx->isHelperThreadContext());
+ MOZ_ASSERT(!cx->isNurseryAllocSuppressed());
+ MOZ_ASSERT(!cx->zone()->isAtomsZone());
+
+ Cell* cell = cx->nursery().allocateBigInt(cx->zone(), thingSize);
+ if (cell) {
+ return static_cast<JS::BigInt*>(cell);
+ }
+
+ if (allowGC && !cx->suppressGC) {
+ cx->runtime()->gc.minorGC(JS::GCReason::OUT_OF_NURSERY);
+
+ // Exceeding gcMaxBytes while tenuring can disable the Nursery, and
+ // other heuristics can disable nursery BigInts for this zone.
+ if (cx->nursery().isEnabled() && cx->zone()->allocNurseryBigInts) {
+ return static_cast<JS::BigInt*>(
+ cx->nursery().allocateBigInt(cx->zone(), thingSize));
+ }
+ }
+ return nullptr;
+}
+
+template <AllowGC allowGC /* = CanGC */>
+JS::BigInt* js::AllocateBigInt(JSContext* cx, InitialHeap heap) {
+ AllocKind kind = MapTypeToFinalizeKind<JS::BigInt>::kind;
+ size_t size = sizeof(JS::BigInt);
+ MOZ_ASSERT(size == Arena::thingSize(kind));
+
+ // Off-thread alloc cannot trigger GC or make runtime assertions.
+ if (cx->isNurseryAllocSuppressed()) {
+ JS::BigInt* bi =
+ GCRuntime::tryNewTenuredThing<JS::BigInt, NoGC>(cx, kind, size);
+ if (MOZ_UNLIKELY(allowGC && !bi)) {
+ ReportOutOfMemory(cx);
+ }
+ return bi;
+ }
+
+ JSRuntime* rt = cx->runtime();
+ if (!rt->gc.checkAllocatorState<allowGC>(cx, kind)) {
+ return nullptr;
+ }
+
+ if (cx->nursery().isEnabled() && heap != TenuredHeap &&
+ cx->nursery().canAllocateBigInts() && cx->zone()->allocNurseryBigInts) {
+ auto* bi = static_cast<JS::BigInt*>(
+ rt->gc.tryNewNurseryBigInt<allowGC>(cx, size, kind));
+ if (bi) {
+ return bi;
+ }
+
+ // Our most common non-jit allocation path is NoGC; thus, if we fail the
+ // alloc and cannot GC, we *must* return nullptr here so that the caller
+ // will do a CanGC allocation to clear the nursery. Failing to do so will
+ // cause all allocations on this path to land in Tenured, and we will not
+ // get the benefit of the nursery.
+ if (!allowGC) {
+ return nullptr;
+ }
+ }
+
+ return GCRuntime::tryNewTenuredThing<JS::BigInt, allowGC>(cx, kind, size);
+}
+template JS::BigInt* js::AllocateBigInt<NoGC>(JSContext* cx,
+ gc::InitialHeap heap);
+template JS::BigInt* js::AllocateBigInt<CanGC>(JSContext* cx,
+ gc::InitialHeap heap);
+
+#define DECL_ALLOCATOR_INSTANCES(allocKind, traceKind, type, sizedType, \
+ bgfinal, nursery, compact) \
+ template type* js::AllocateStringImpl<type, NoGC>(JSContext * cx, \
+ InitialHeap heap); \
+ template type* js::AllocateStringImpl<type, CanGC>(JSContext * cx, \
+ InitialHeap heap);
+FOR_EACH_NURSERY_STRING_ALLOCKIND(DECL_ALLOCATOR_INSTANCES)
+#undef DECL_ALLOCATOR_INSTANCES
+
+template <typename T, AllowGC allowGC /* = CanGC */>
+T* js::Allocate(JSContext* cx) {
+ static_assert(!std::is_convertible_v<T*, JSObject*>,
+ "must not be JSObject derived");
+ static_assert(
+ sizeof(T) >= MinCellSize,
+ "All allocations must be at least the allocator-imposed minimum size.");
+
+ AllocKind kind = MapTypeToFinalizeKind<T>::kind;
+ size_t thingSize = sizeof(T);
+ MOZ_ASSERT(thingSize == Arena::thingSize(kind));
+
+ if (!cx->isHelperThreadContext()) {
+ if (!cx->runtime()->gc.checkAllocatorState<allowGC>(cx, kind)) {
+ return nullptr;
+ }
+ }
+
+ return GCRuntime::tryNewTenuredThing<T, allowGC>(cx, kind, thingSize);
+}
+
+#define DECL_ALLOCATOR_INSTANCES(allocKind, traceKind, type, sizedType, \
+ bgFinal, nursery, compact) \
+ template type* js::Allocate<type, NoGC>(JSContext * cx); \
+ template type* js::Allocate<type, CanGC>(JSContext * cx);
+FOR_EACH_NONOBJECT_NONNURSERY_ALLOCKIND(DECL_ALLOCATOR_INSTANCES)
+#undef DECL_ALLOCATOR_INSTANCES
+
+template <typename T, AllowGC allowGC>
+/* static */
+T* GCRuntime::tryNewTenuredThing(JSContext* cx, AllocKind kind,
+ size_t thingSize) {
+ // Bump allocate in the arena's current free-list span.
+ auto* t = reinterpret_cast<T*>(cx->freeLists().allocate(kind));
+ if (MOZ_UNLIKELY(!t)) {
+ // Get the next available free list and allocate out of it. This may
+ // acquire a new arena, which will lock the chunk list. If there are no
+ // chunks available it may also allocate new memory directly.
+ t = reinterpret_cast<T*>(refillFreeListFromAnyThread(cx, kind));
+
+ if (MOZ_UNLIKELY(!t)) {
+ if (allowGC) {
+ cx->runtime()->gc.attemptLastDitchGC(cx);
+ t = tryNewTenuredThing<T, NoGC>(cx, kind, thingSize);
+ }
+ if (!t) {
+ if (allowGC) {
+ ReportOutOfMemory(cx);
+ }
+ return nullptr;
+ }
+ }
+ }
+
+ checkIncrementalZoneState(cx, t);
+ gcprobes::TenuredAlloc(t, kind);
+ // We count this regardless of the profiler's state, assuming that it costs
+ // just as much to count it, as to check the profiler's state and decide not
+ // to count it.
+ cx->noteTenuredAlloc();
+ return t;
+}
+
+void GCRuntime::attemptLastDitchGC(JSContext* cx) {
+ // Either there was no memory available for a new chunk or the heap hit its
+ // size limit. Try to perform an all-compartments, non-incremental, shrinking
+ // GC and wait for it to finish.
+
+ if (cx->isHelperThreadContext()) {
+ return;
+ }
+
+ if (!lastLastDitchTime.IsNull() &&
+ TimeStamp::Now() - lastLastDitchTime <= tunables.minLastDitchGCPeriod()) {
+ return;
+ }
+
+ JS::PrepareForFullGC(cx);
+ gc(GC_SHRINK, JS::GCReason::LAST_DITCH);
+ waitBackgroundAllocEnd();
+ waitBackgroundFreeEnd();
+
+ lastLastDitchTime = mozilla::TimeStamp::Now();
+}
+
+template <AllowGC allowGC>
+bool GCRuntime::checkAllocatorState(JSContext* cx, AllocKind kind) {
+ if (allowGC) {
+ if (!gcIfNeededAtAllocation(cx)) {
+ return false;
+ }
+ }
+
+#if defined(JS_GC_ZEAL) || defined(DEBUG)
+ MOZ_ASSERT_IF(cx->zone()->isAtomsZone(),
+ kind == AllocKind::ATOM || kind == AllocKind::FAT_INLINE_ATOM ||
+ kind == AllocKind::SYMBOL || kind == AllocKind::JITCODE ||
+ kind == AllocKind::SCOPE);
+ MOZ_ASSERT_IF(!cx->zone()->isAtomsZone(),
+ kind != AllocKind::ATOM && kind != AllocKind::FAT_INLINE_ATOM);
+ MOZ_ASSERT_IF(cx->zone()->isSelfHostingZone(),
+ !rt->parentRuntime && !selfHostingZoneFrozen);
+ MOZ_ASSERT(!JS::RuntimeHeapIsBusy());
+#endif
+
+ // Crash if we perform a GC action when it is not safe.
+ if (allowGC && !cx->suppressGC) {
+ cx->verifyIsSafeToGC();
+ }
+
+ // For testing out of memory conditions
+ if (js::oom::ShouldFailWithOOM()) {
+ // If we are doing a fallible allocation, percolate up the OOM
+ // instead of reporting it.
+ if (allowGC) {
+ ReportOutOfMemory(cx);
+ }
+ return false;
+ }
+
+ return true;
+}
+
+inline bool GCRuntime::gcIfNeededAtAllocation(JSContext* cx) {
+#ifdef JS_GC_ZEAL
+ if (needZealousGC()) {
+ runDebugGC();
+ }
+#endif
+
+ // Invoking the interrupt callback can fail and we can't usefully
+ // handle that here. Just check in case we need to collect instead.
+ if (cx->hasAnyPendingInterrupt()) {
+ gcIfRequested();
+ }
+
+ return true;
+}
+
+template <typename T>
+/* static */
+void GCRuntime::checkIncrementalZoneState(JSContext* cx, T* t) {
+#ifdef DEBUG
+ if (cx->isHelperThreadContext() || !t) {
+ return;
+ }
+
+ TenuredCell* cell = &t->asTenured();
+ Zone* zone = cell->zone();
+ if (zone->isGCMarkingOrSweeping()) {
+ MOZ_ASSERT(cell->isMarkedBlack());
+ } else {
+ MOZ_ASSERT(!cell->isMarkedAny());
+ }
+#endif
+}
+
+TenuredCell* js::gc::AllocateCellInGC(Zone* zone, AllocKind thingKind) {
+ TenuredCell* cell = zone->arenas.allocateFromFreeList(thingKind);
+ if (!cell) {
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ cell = GCRuntime::refillFreeListInGC(zone, thingKind);
+ if (!cell) {
+ oomUnsafe.crash(ChunkSize, "Failed not allocate new chunk during GC");
+ }
+ }
+ return cell;
+}
+
+// /////////// Arena -> Thing Allocator //////////////////////////////////////
+
+void GCRuntime::startBackgroundAllocTaskIfIdle() {
+ AutoLockHelperThreadState lock;
+ if (!allocTask.wasStarted(lock)) {
+ // Join the previous invocation of the task. This will return immediately
+ // if the thread has never been started.
+ allocTask.joinWithLockHeld(lock);
+ allocTask.startWithLockHeld(lock);
+ }
+}
+
+/* static */
+TenuredCell* GCRuntime::refillFreeListFromAnyThread(JSContext* cx,
+ AllocKind thingKind) {
+ MOZ_ASSERT(cx->freeLists().isEmpty(thingKind));
+
+ if (!cx->isHelperThreadContext()) {
+ return refillFreeListFromMainThread(cx, thingKind);
+ }
+
+ return refillFreeListFromHelperThread(cx, thingKind);
+}
+
+/* static */
+TenuredCell* GCRuntime::refillFreeListFromMainThread(JSContext* cx,
+ AllocKind thingKind) {
+ // It should not be possible to allocate on the main thread while we are
+ // inside a GC.
+ MOZ_ASSERT(!JS::RuntimeHeapIsBusy(), "allocating while under GC");
+
+ return cx->zone()->arenas.refillFreeListAndAllocate(
+ cx->freeLists(), thingKind, ShouldCheckThresholds::CheckThresholds);
+}
+
+/* static */
+TenuredCell* GCRuntime::refillFreeListFromHelperThread(JSContext* cx,
+ AllocKind thingKind) {
+ // A GC may be happening on the main thread, but zones used by off thread
+ // tasks are never collected.
+ Zone* zone = cx->zone();
+ MOZ_ASSERT(!zone->wasGCStarted());
+
+ return zone->arenas.refillFreeListAndAllocate(
+ cx->freeLists(), thingKind, ShouldCheckThresholds::CheckThresholds);
+}
+
+/* static */
+TenuredCell* GCRuntime::refillFreeListInGC(Zone* zone, AllocKind thingKind) {
+ // Called by compacting GC to refill a free list while we are in a GC.
+ MOZ_ASSERT(JS::RuntimeHeapIsCollecting());
+ MOZ_ASSERT_IF(!JS::RuntimeHeapIsMinorCollecting(),
+ !zone->runtimeFromMainThread()->gc.isBackgroundSweeping());
+
+ return zone->arenas.refillFreeListAndAllocate(
+ zone->arenas.freeLists(), thingKind,
+ ShouldCheckThresholds::DontCheckThresholds);
+}
+
+TenuredCell* ArenaLists::refillFreeListAndAllocate(
+ FreeLists& freeLists, AllocKind thingKind,
+ ShouldCheckThresholds checkThresholds) {
+ MOZ_ASSERT(freeLists.isEmpty(thingKind));
+
+ JSRuntime* rt = runtimeFromAnyThread();
+
+ mozilla::Maybe<AutoLockGCBgAlloc> maybeLock;
+
+ // See if we can proceed without taking the GC lock.
+ if (concurrentUse(thingKind) != ConcurrentUse::None) {
+ maybeLock.emplace(rt);
+ }
+
+ Arena* arena = arenaList(thingKind).takeNextArena();
+ if (arena) {
+ // Empty arenas should be immediately freed.
+ MOZ_ASSERT(!arena->isEmpty());
+
+ return freeLists.setArenaAndAllocate(arena, thingKind);
+ }
+
+ // Parallel threads have their own ArenaLists, but chunks are shared;
+ // if we haven't already, take the GC lock now to avoid racing.
+ if (maybeLock.isNothing()) {
+ maybeLock.emplace(rt);
+ }
+
+ TenuredChunk* chunk = rt->gc.pickChunk(maybeLock.ref());
+ if (!chunk) {
+ return nullptr;
+ }
+
+ // Although our chunk should definitely have enough space for another arena,
+ // there are other valid reasons why TenuredChunk::allocateArena() may fail.
+ arena = rt->gc.allocateArena(chunk, zone_, thingKind, checkThresholds,
+ maybeLock.ref());
+ if (!arena) {
+ return nullptr;
+ }
+
+ addNewArena(arena, thingKind);
+
+ return freeLists.setArenaAndAllocate(arena, thingKind);
+}
+
+inline void ArenaLists::addNewArena(Arena* arena, AllocKind thingKind) {
+ ArenaList& al = zone_->isGCMarking() ? newArenasInMarkPhase(thingKind)
+ : arenaList(thingKind);
+
+ MOZ_ASSERT(al.isCursorAtEnd());
+ al.insertBeforeCursor(arena);
+}
+
+inline TenuredCell* FreeLists::setArenaAndAllocate(Arena* arena,
+ AllocKind kind) {
+#ifdef DEBUG
+ auto old = freeLists_[kind];
+ if (!old->isEmpty()) {
+ old->getArena()->checkNoMarkedFreeCells();
+ }
+#endif
+
+ FreeSpan* span = arena->getFirstFreeSpan();
+ freeLists_[kind] = span;
+
+ Zone* zone = arena->zone;
+ if (MOZ_UNLIKELY(zone->isGCMarkingOrSweeping())) {
+ arena->arenaAllocatedDuringGC();
+ }
+
+ TenuredCell* thing = span->allocate(Arena::thingSize(kind));
+ MOZ_ASSERT(thing); // This allocation is infallible.
+
+ return thing;
+}
+
+void Arena::arenaAllocatedDuringGC() {
+ // Ensure that anything allocated during the mark or sweep phases of an
+ // incremental GC will be marked black by pre-marking all free cells in the
+ // arena we are about to allocate from.
+
+ MOZ_ASSERT(zone->isGCMarkingOrSweeping());
+ for (ArenaFreeCellIter cell(this); !cell.done(); cell.next()) {
+ MOZ_ASSERT(!cell->isMarkedAny());
+ cell->markBlack();
+ }
+}
+
+void GCRuntime::setParallelAtomsAllocEnabled(bool enabled) {
+ // This can only be changed on the main thread otherwise we could race.
+ MOZ_ASSERT(CurrentThreadCanAccessRuntime(rt));
+ MOZ_ASSERT(enabled == rt->hasHelperThreadZones());
+
+ atomsZone->arenas.setParallelAllocEnabled(enabled);
+}
+
+void ArenaLists::setParallelAllocEnabled(bool enabled) {
+ MOZ_ASSERT(zone_->isAtomsZone());
+
+ static const ConcurrentUse states[2] = {ConcurrentUse::None,
+ ConcurrentUse::ParallelAlloc};
+
+ for (auto kind : AllAllocKinds()) {
+ MOZ_ASSERT(concurrentUse(kind) == states[!enabled]);
+ concurrentUse(kind) = states[enabled];
+ }
+}
+
+void GCRuntime::setParallelUnmarkEnabled(bool enabled) {
+ // This can only be changed on the main thread otherwise we could race.
+ MOZ_ASSERT(CurrentThreadCanAccessRuntime(rt));
+ MOZ_ASSERT(JS::RuntimeHeapIsMajorCollecting());
+ for (GCZonesIter zone(this); !zone.done(); zone.next()) {
+ zone->arenas.setParallelUnmarkEnabled(enabled);
+ }
+}
+
+void ArenaLists::setParallelUnmarkEnabled(bool enabled) {
+ static const ConcurrentUse states[2] = {ConcurrentUse::None,
+ ConcurrentUse::ParallelUnmark};
+
+ for (auto kind : AllAllocKinds()) {
+ MOZ_ASSERT(concurrentUse(kind) == states[!enabled]);
+ concurrentUse(kind) = states[enabled];
+ }
+}
+
+// /////////// TenuredChunk -> Arena Allocator ///////////////////////////////
+
+bool GCRuntime::wantBackgroundAllocation(const AutoLockGC& lock) const {
+ // To minimize memory waste, we do not want to run the background chunk
+ // allocation if we already have some empty chunks or when the runtime has
+ // a small heap size (and therefore likely has a small growth rate).
+ return allocTask.enabled() &&
+ emptyChunks(lock).count() < tunables.minEmptyChunkCount(lock) &&
+ (fullChunks(lock).count() + availableChunks(lock).count()) >= 4;
+}
+
+Arena* GCRuntime::allocateArena(TenuredChunk* chunk, Zone* zone,
+ AllocKind thingKind,
+ ShouldCheckThresholds checkThresholds,
+ const AutoLockGC& lock) {
+ MOZ_ASSERT(chunk->hasAvailableArenas());
+
+ // Fail the allocation if we are over our heap size limits.
+ if ((checkThresholds != ShouldCheckThresholds::DontCheckThresholds) &&
+ (heapSize.bytes() >= tunables.gcMaxBytes()))
+ return nullptr;
+
+ Arena* arena = chunk->allocateArena(this, zone, thingKind, lock);
+ zone->gcHeapSize.addGCArena();
+
+ // Trigger an incremental slice if needed.
+ if (checkThresholds != ShouldCheckThresholds::DontCheckThresholds) {
+ maybeTriggerGCAfterAlloc(zone);
+ }
+
+ return arena;
+}
+
+Arena* TenuredChunk::allocateArena(GCRuntime* gc, Zone* zone,
+ AllocKind thingKind,
+ const AutoLockGC& lock) {
+ Arena* arena = info.numArenasFreeCommitted > 0 ? fetchNextFreeArena(gc)
+ : fetchNextDecommittedArena();
+ arena->init(zone, thingKind, lock);
+ updateChunkListAfterAlloc(gc, lock);
+ return arena;
+}
+
+inline void GCRuntime::updateOnFreeArenaAlloc(const TenuredChunkInfo& info) {
+ MOZ_ASSERT(info.numArenasFreeCommitted <= numArenasFreeCommitted);
+ --numArenasFreeCommitted;
+}
+
+Arena* TenuredChunk::fetchNextFreeArena(GCRuntime* gc) {
+ MOZ_ASSERT(info.numArenasFreeCommitted > 0);
+ MOZ_ASSERT(info.numArenasFreeCommitted <= info.numArenasFree);
+
+ Arena* arena = info.freeArenasHead;
+ info.freeArenasHead = arena->next;
+ --info.numArenasFreeCommitted;
+ --info.numArenasFree;
+ gc->updateOnFreeArenaAlloc(info);
+
+ return arena;
+}
+
+Arena* TenuredChunk::fetchNextDecommittedArena() {
+ MOZ_ASSERT(info.numArenasFreeCommitted == 0);
+ MOZ_ASSERT(info.numArenasFree > 0);
+
+ unsigned offset = findDecommittedArenaOffset();
+ info.lastDecommittedArenaOffset = offset + 1;
+ --info.numArenasFree;
+ decommittedArenas[offset] = false;
+
+ Arena* arena = &arenas[offset];
+ MarkPagesInUseSoft(arena, ArenaSize);
+ arena->setAsNotAllocated();
+
+ return arena;
+}
+
+/*
+ * Search for and return the next decommitted Arena. Our goal is to keep
+ * lastDecommittedArenaOffset "close" to a free arena. We do this by setting
+ * it to the most recently freed arena when we free, and forcing it to
+ * the last alloc + 1 when we allocate.
+ */
+uint32_t TenuredChunk::findDecommittedArenaOffset() {
+ /* Note: lastFreeArenaOffset can be past the end of the list. */
+ for (unsigned i = info.lastDecommittedArenaOffset; i < ArenasPerChunk; i++) {
+ if (decommittedArenas[i]) {
+ return i;
+ }
+ }
+ for (unsigned i = 0; i < info.lastDecommittedArenaOffset; i++) {
+ if (decommittedArenas[i]) {
+ return i;
+ }
+ }
+ MOZ_CRASH("No decommitted arenas found.");
+}
+
+// /////////// System -> TenuredChunk Allocator //////////////////////////////
+
+TenuredChunk* GCRuntime::getOrAllocChunk(AutoLockGCBgAlloc& lock) {
+ TenuredChunk* chunk = emptyChunks(lock).pop();
+ if (!chunk) {
+ chunk = TenuredChunk::allocate(this);
+ if (!chunk) {
+ return nullptr;
+ }
+ MOZ_ASSERT(chunk->info.numArenasFreeCommitted == 0);
+ }
+
+ if (wantBackgroundAllocation(lock)) {
+ lock.tryToStartBackgroundAllocation();
+ }
+
+ return chunk;
+}
+
+void GCRuntime::recycleChunk(TenuredChunk* chunk, const AutoLockGC& lock) {
+ AlwaysPoison(chunk, JS_FREED_CHUNK_PATTERN, sizeof(ChunkBase),
+ MemCheckKind::MakeNoAccess);
+ emptyChunks(lock).push(chunk);
+}
+
+TenuredChunk* GCRuntime::pickChunk(AutoLockGCBgAlloc& lock) {
+ if (availableChunks(lock).count()) {
+ return availableChunks(lock).head();
+ }
+
+ TenuredChunk* chunk = getOrAllocChunk(lock);
+ if (!chunk) {
+ return nullptr;
+ }
+
+ chunk->init(this);
+ MOZ_ASSERT(chunk->info.numArenasFreeCommitted == 0);
+ MOZ_ASSERT(chunk->unused());
+ MOZ_ASSERT(!fullChunks(lock).contains(chunk));
+ MOZ_ASSERT(!availableChunks(lock).contains(chunk));
+
+ availableChunks(lock).push(chunk);
+
+ return chunk;
+}
+
+BackgroundAllocTask::BackgroundAllocTask(GCRuntime* gc, ChunkPool& pool)
+ : GCParallelTask(gc),
+ chunkPool_(pool),
+ enabled_(CanUseExtraThreads() && GetCPUCount() >= 2) {}
+
+void BackgroundAllocTask::run(AutoLockHelperThreadState& lock) {
+ AutoUnlockHelperThreadState unlock(lock);
+
+ TraceLoggerThread* logger = TraceLoggerForCurrentThread();
+ AutoTraceLog logAllocation(logger, TraceLogger_GCAllocation);
+
+ AutoLockGC gcLock(gc);
+ while (!isCancelled() && gc->wantBackgroundAllocation(gcLock)) {
+ TenuredChunk* chunk;
+ {
+ AutoUnlockGC unlock(gcLock);
+ chunk = TenuredChunk::allocate(gc);
+ if (!chunk) {
+ break;
+ }
+ chunk->init(gc);
+ }
+ chunkPool_.ref().push(chunk);
+ }
+}
+
+/* static */
+TenuredChunk* TenuredChunk::allocate(GCRuntime* gc) {
+ void* chunk = MapAlignedPages(ChunkSize, ChunkSize);
+ if (!chunk) {
+ return nullptr;
+ }
+
+ gc->stats().count(gcstats::COUNT_NEW_CHUNK);
+ return static_cast<TenuredChunk*>(chunk);
+}
+
+void TenuredChunk::init(GCRuntime* gc) {
+ /* The chunk may still have some regions marked as no-access. */
+ MOZ_MAKE_MEM_UNDEFINED(this, ChunkSize);
+
+ /*
+ * Poison the chunk. Note that decommitAllArenas() below will mark the
+ * arenas as inaccessible (for memory sanitizers).
+ */
+ Poison(this, JS_FRESH_TENURED_PATTERN, ChunkSize,
+ MemCheckKind::MakeUndefined);
+
+ new (this) TenuredChunk(gc->rt);
+
+ /*
+ * Decommit the arenas. We do this after poisoning so that if the OS does
+ * not have to recycle the pages, we still get the benefit of poisoning.
+ */
+ decommitAllArenas();
+
+ /* The rest of info fields are initialized in pickChunk. */
+}
+
+void TenuredChunk::decommitAllArenas() {
+ decommittedArenas.SetAll();
+ MarkPagesUnusedSoft(&arenas[0], ArenasPerChunk * ArenaSize);
+
+ info.freeArenasHead = nullptr;
+ info.lastDecommittedArenaOffset = 0;
+ info.numArenasFree = ArenasPerChunk;
+ info.numArenasFreeCommitted = 0;
+}
diff --git a/js/src/gc/Allocator.h b/js/src/gc/Allocator.h
new file mode 100644
index 0000000000..6becd816b9
--- /dev/null
+++ b/js/src/gc/Allocator.h
@@ -0,0 +1,89 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef gc_Allocator_h
+#define gc_Allocator_h
+
+#include <stdint.h>
+
+#include "gc/AllocKind.h"
+#include "js/RootingAPI.h"
+
+class JSFatInlineString;
+
+namespace js {
+
+enum AllowGC { NoGC = 0, CanGC = 1 };
+
+namespace gc {
+
+/*
+ * This flag allows an allocation site to request a specific heap based upon the
+ * estimated lifetime or lifetime requirements of objects allocated from that
+ * site.
+ */
+enum InitialHeap : uint8_t { DefaultHeap, TenuredHeap };
+
+} // namespace gc
+
+// Allocate a new GC thing that's not a JSObject or a string.
+//
+// After a successful allocation the caller must fully initialize the thing
+// before calling any function that can potentially trigger GC. This will ensure
+// that GC tracing never sees junk values stored in the partially initialized
+// thing.
+template <typename T, AllowGC allowGC = CanGC>
+T* Allocate(JSContext* cx);
+
+// Allocate a JSObject.
+//
+// A longer signature that includes additional information in support of various
+// optimizations. If dynamic slots are requested they will be allocated and the
+// pointer stored directly in |NativeObject::slots_|.
+template <AllowGC allowGC = CanGC>
+JSObject* AllocateObject(JSContext* cx, gc::AllocKind kind,
+ size_t nDynamicSlots, gc::InitialHeap heap,
+ const JSClass* clasp);
+
+// Internal function used for nursery-allocatable strings.
+template <typename StringAllocT, AllowGC allowGC = CanGC>
+StringAllocT* AllocateStringImpl(JSContext* cx, gc::InitialHeap heap);
+
+// Allocate a string.
+//
+// Use for nursery-allocatable strings. Returns a value cast to the correct
+// type.
+template <typename StringT, AllowGC allowGC = CanGC>
+StringT* AllocateString(JSContext* cx, gc::InitialHeap heap) {
+ return static_cast<StringT*>(AllocateStringImpl<JSString, allowGC>(cx, heap));
+}
+
+// Specialization for JSFatInlineString that must use a different allocation
+// type. Note that we have to explicitly specialize for both values of AllowGC
+// because partial function specialization is not allowed.
+template <>
+inline JSFatInlineString* AllocateString<JSFatInlineString, CanGC>(
+ JSContext* cx, gc::InitialHeap heap) {
+ return static_cast<JSFatInlineString*>(
+ js::AllocateStringImpl<JSFatInlineString, CanGC>(cx, heap));
+}
+
+template <>
+inline JSFatInlineString* AllocateString<JSFatInlineString, NoGC>(
+ JSContext* cx, gc::InitialHeap heap) {
+ return static_cast<JSFatInlineString*>(
+ js::AllocateStringImpl<JSFatInlineString, NoGC>(cx, heap));
+}
+
+// Allocate a BigInt.
+//
+// Use for nursery-allocatable BigInt.
+template <AllowGC allowGC = CanGC>
+JS::BigInt* AllocateBigInt(JSContext* cx, gc::InitialHeap heap);
+
+} // namespace js
+
+#endif // gc_Allocator_h
diff --git a/js/src/gc/ArenaList-inl.h b/js/src/gc/ArenaList-inl.h
new file mode 100644
index 0000000000..f591ef4766
--- /dev/null
+++ b/js/src/gc/ArenaList-inl.h
@@ -0,0 +1,333 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef gc_ArenaList_inl_h
+#define gc_ArenaList_inl_h
+
+#include "gc/ArenaList.h"
+
+#include "gc/Heap.h"
+#include "gc/Zone.h"
+
+void js::gc::SortedArenaListSegment::append(Arena* arena) {
+ MOZ_ASSERT(arena);
+ MOZ_ASSERT_IF(head, head->getAllocKind() == arena->getAllocKind());
+ *tailp = arena;
+ tailp = &arena->next;
+}
+
+inline js::gc::ArenaList::ArenaList() { clear(); }
+
+inline js::gc::ArenaList::ArenaList(ArenaList&& other) { moveFrom(other); }
+
+inline js::gc::ArenaList::~ArenaList() { MOZ_ASSERT(isEmpty()); }
+
+void js::gc::ArenaList::moveFrom(ArenaList& other) {
+ other.check();
+
+ head_ = other.head_;
+ cursorp_ = other.isCursorAtHead() ? &head_ : other.cursorp_;
+ other.clear();
+
+ check();
+}
+
+js::gc::ArenaList& js::gc::ArenaList::operator=(ArenaList&& other) {
+ MOZ_ASSERT(isEmpty());
+ moveFrom(other);
+ return *this;
+}
+
+inline js::gc::ArenaList::ArenaList(const SortedArenaListSegment& segment) {
+ head_ = segment.head;
+ cursorp_ = segment.isEmpty() ? &head_ : segment.tailp;
+ check();
+}
+
+// This does checking just of |head_| and |cursorp_|.
+void js::gc::ArenaList::check() const {
+#ifdef DEBUG
+ // If the list is empty, it must have this form.
+ MOZ_ASSERT_IF(!head_, cursorp_ == &head_);
+
+ // If there's an arena following the cursor, it must not be full.
+ Arena* cursor = *cursorp_;
+ MOZ_ASSERT_IF(cursor, cursor->hasFreeThings());
+#endif
+}
+
+void js::gc::ArenaList::clear() {
+ head_ = nullptr;
+ cursorp_ = &head_;
+ check();
+}
+
+bool js::gc::ArenaList::isEmpty() const {
+ check();
+ return !head_;
+}
+
+js::gc::Arena* js::gc::ArenaList::head() const {
+ check();
+ return head_;
+}
+
+bool js::gc::ArenaList::isCursorAtHead() const {
+ check();
+ return cursorp_ == &head_;
+}
+
+bool js::gc::ArenaList::isCursorAtEnd() const {
+ check();
+ return !*cursorp_;
+}
+
+js::gc::Arena* js::gc::ArenaList::arenaAfterCursor() const {
+ check();
+ return *cursorp_;
+}
+
+js::gc::Arena* js::gc::ArenaList::takeNextArena() {
+ check();
+ Arena* arena = *cursorp_;
+ if (!arena) {
+ return nullptr;
+ }
+ cursorp_ = &arena->next;
+ check();
+ return arena;
+}
+
+void js::gc::ArenaList::insertAtCursor(Arena* a) {
+ check();
+ a->next = *cursorp_;
+ *cursorp_ = a;
+ // At this point, the cursor is sitting before |a|. Move it after |a|
+ // if necessary.
+ if (!a->hasFreeThings()) {
+ cursorp_ = &a->next;
+ }
+ check();
+}
+
+void js::gc::ArenaList::insertBeforeCursor(Arena* a) {
+ check();
+ a->next = *cursorp_;
+ *cursorp_ = a;
+ cursorp_ = &a->next;
+ check();
+}
+
+js::gc::ArenaList& js::gc::ArenaList::insertListWithCursorAtEnd(
+ ArenaList& other) {
+ check();
+ other.check();
+ MOZ_ASSERT(other.isCursorAtEnd());
+
+ if (other.isEmpty()) {
+ return *this;
+ }
+
+ // Insert the full arenas of |other| after those of |this|.
+ *other.cursorp_ = *cursorp_;
+ *cursorp_ = other.head_;
+ cursorp_ = other.cursorp_;
+ check();
+
+ other.clear();
+ return *this;
+}
+
+js::gc::SortedArenaList::SortedArenaList(size_t thingsPerArena) {
+ reset(thingsPerArena);
+}
+
+void js::gc::SortedArenaList::setThingsPerArena(size_t thingsPerArena) {
+ MOZ_ASSERT(thingsPerArena && thingsPerArena <= MaxThingsPerArena);
+ thingsPerArena_ = thingsPerArena;
+}
+
+void js::gc::SortedArenaList::reset(size_t thingsPerArena) {
+ setThingsPerArena(thingsPerArena);
+ // Initialize the segments.
+ for (size_t i = 0; i <= thingsPerArena; ++i) {
+ segments[i].clear();
+ }
+}
+
+void js::gc::SortedArenaList::insertAt(Arena* arena, size_t nfree) {
+ MOZ_ASSERT(nfree <= thingsPerArena_);
+ segments[nfree].append(arena);
+}
+
+void js::gc::SortedArenaList::extractEmpty(Arena** empty) {
+ SortedArenaListSegment& segment = segments[thingsPerArena_];
+ if (segment.head) {
+ *segment.tailp = *empty;
+ *empty = segment.head;
+ segment.clear();
+ }
+}
+
+js::gc::ArenaList js::gc::SortedArenaList::toArenaList() {
+ // Link the non-empty segment tails up to the non-empty segment heads.
+ size_t tailIndex = 0;
+ for (size_t headIndex = 1; headIndex <= thingsPerArena_; ++headIndex) {
+ if (headAt(headIndex)) {
+ segments[tailIndex].linkTo(headAt(headIndex));
+ tailIndex = headIndex;
+ }
+ }
+ // Point the tail of the final non-empty segment at null. Note that if
+ // the list is empty, this will just set segments[0].head to null.
+ segments[tailIndex].linkTo(nullptr);
+ // Create an ArenaList with head and cursor set to the head and tail of
+ // the first segment (if that segment is empty, only the head is used).
+ return ArenaList(segments[0]);
+}
+
+#ifdef DEBUG
+
+bool js::gc::FreeLists::allEmpty() const {
+ for (auto i : AllAllocKinds()) {
+ if (!isEmpty(i)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+bool js::gc::FreeLists::isEmpty(AllocKind kind) const {
+ return freeLists_[kind]->isEmpty();
+}
+
+#endif
+
+void js::gc::FreeLists::clear() {
+ for (auto i : AllAllocKinds()) {
+#ifdef DEBUG
+ auto old = freeLists_[i];
+ if (!old->isEmpty()) {
+ old->getArena()->checkNoMarkedFreeCells();
+ }
+#endif
+ freeLists_[i] = &emptySentinel;
+ }
+}
+
+js::gc::TenuredCell* js::gc::FreeLists::allocate(AllocKind kind) {
+ return freeLists_[kind]->allocate(Arena::thingSize(kind));
+}
+
+void js::gc::FreeLists::unmarkPreMarkedFreeCells(AllocKind kind) {
+ FreeSpan* freeSpan = freeLists_[kind];
+ if (!freeSpan->isEmpty()) {
+ freeSpan->getArena()->unmarkPreMarkedFreeCells();
+ }
+}
+
+JSRuntime* js::gc::ArenaLists::runtime() {
+ return zone_->runtimeFromMainThread();
+}
+
+JSRuntime* js::gc::ArenaLists::runtimeFromAnyThread() {
+ return zone_->runtimeFromAnyThread();
+}
+
+js::gc::Arena* js::gc::ArenaLists::getFirstArena(AllocKind thingKind) const {
+ return arenaList(thingKind).head();
+}
+
+js::gc::Arena* js::gc::ArenaLists::getFirstArenaToSweep(
+ AllocKind thingKind) const {
+ return arenasToSweep(thingKind);
+}
+
+js::gc::Arena* js::gc::ArenaLists::getFirstSweptArena(
+ AllocKind thingKind) const {
+ if (thingKind != incrementalSweptArenaKind.ref()) {
+ return nullptr;
+ }
+ return incrementalSweptArenas.ref().head();
+}
+
+js::gc::Arena* js::gc::ArenaLists::getFirstNewArenaInMarkPhase(
+ AllocKind thingKind) const {
+ return newArenasInMarkPhase(thingKind).head();
+}
+
+js::gc::Arena* js::gc::ArenaLists::getArenaAfterCursor(
+ AllocKind thingKind) const {
+ return arenaList(thingKind).arenaAfterCursor();
+}
+
+bool js::gc::ArenaLists::arenaListsAreEmpty() const {
+ for (auto i : AllAllocKinds()) {
+ /*
+ * The arena cannot be empty if the background finalization is not yet
+ * done.
+ */
+ if (concurrentUse(i) == ConcurrentUse::BackgroundFinalize) {
+ return false;
+ }
+ if (!arenaList(i).isEmpty()) {
+ return false;
+ }
+ }
+ return true;
+}
+
+void js::gc::ArenaLists::unmarkAll() {
+ for (auto i : AllAllocKinds()) {
+ /* The background finalization must have stopped at this point. */
+ MOZ_ASSERT(concurrentUse(i) == ConcurrentUse::None);
+ for (Arena* arena = arenaList(i).head(); arena; arena = arena->next) {
+ arena->unmarkAll();
+ }
+ }
+}
+
+bool js::gc::ArenaLists::doneBackgroundFinalize(AllocKind kind) const {
+ return concurrentUse(kind) != ConcurrentUse::BackgroundFinalize;
+}
+
+bool js::gc::ArenaLists::needBackgroundFinalizeWait(AllocKind kind) const {
+ return concurrentUse(kind) == ConcurrentUse::BackgroundFinalize;
+}
+
+void js::gc::ArenaLists::clearFreeLists() { freeLists().clear(); }
+
+MOZ_ALWAYS_INLINE js::gc::TenuredCell* js::gc::ArenaLists::allocateFromFreeList(
+ AllocKind thingKind) {
+ return freeLists().allocate(thingKind);
+}
+
+void js::gc::ArenaLists::unmarkPreMarkedFreeCells() {
+ for (auto i : AllAllocKinds()) {
+ freeLists().unmarkPreMarkedFreeCells(i);
+ }
+}
+
+void js::gc::ArenaLists::mergeNewArenasInMarkPhase() {
+ for (auto i : AllAllocKinds()) {
+ arenaList(i).insertListWithCursorAtEnd(newArenasInMarkPhase(i));
+ newArenasInMarkPhase(i).clear();
+ }
+}
+
+void js::gc::ArenaLists::checkEmptyFreeLists() {
+ MOZ_ASSERT(freeLists().allEmpty());
+}
+
+void js::gc::ArenaLists::checkEmptyArenaLists() {
+#ifdef DEBUG
+ for (auto i : AllAllocKinds()) {
+ checkEmptyArenaList(i);
+ }
+#endif
+}
+
+#endif // gc_ArenaList_inl_h
diff --git a/js/src/gc/ArenaList.h b/js/src/gc/ArenaList.h
new file mode 100644
index 0000000000..50291e3c57
--- /dev/null
+++ b/js/src/gc/ArenaList.h
@@ -0,0 +1,405 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * GC-internal definitions of ArenaList and associated heap data structures.
+ */
+
+#ifndef gc_ArenaList_h
+#define gc_ArenaList_h
+
+#include "gc/AllocKind.h"
+#include "js/GCAPI.h"
+#include "js/HeapAPI.h"
+#include "js/SliceBudget.h"
+#include "js/TypeDecls.h"
+#include "threading/ProtectedData.h"
+
+namespace js {
+
+class Nursery;
+class TenuringTracer;
+
+namespace gcstats {
+struct Statistics;
+}
+
+namespace gc {
+
+class Arena;
+struct FinalizePhase;
+class FreeSpan;
+class TenuredCell;
+
+/*
+ * A single segment of a SortedArenaList. Each segment has a head and a tail,
+ * which track the start and end of a segment for O(1) append and concatenation.
+ */
+struct SortedArenaListSegment {
+ Arena* head;
+ Arena** tailp;
+
+ void clear() {
+ head = nullptr;
+ tailp = &head;
+ }
+
+ bool isEmpty() const { return tailp == &head; }
+
+ // Appends |arena| to this segment.
+ inline void append(Arena* arena);
+
+ // Points the tail of this segment at |arena|, which may be null. Note
+ // that this does not change the tail itself, but merely which arena
+ // follows it. This essentially turns the tail into a cursor (see also the
+ // description of ArenaList), but from the perspective of a SortedArenaList
+ // this makes no difference.
+ void linkTo(Arena* arena) { *tailp = arena; }
+};
+
+/*
+ * Arena lists contain a singly linked lists of arenas starting from a head
+ * pointer.
+ *
+ * They also have a cursor, which conceptually lies on arena boundaries,
+ * i.e. before the first arena, between two arenas, or after the last arena.
+ *
+ * Arenas are usually sorted in order of increasing free space, with the cursor
+ * following the Arena currently being allocated from. This ordering should not
+ * be treated as an invariant, however, as the free lists may be cleared,
+ * leaving arenas previously used for allocation partially full. Sorting order
+ * is restored during sweeping.
+ *
+ * Arenas following the cursor should not be full.
+ */
+class ArenaList {
+ // The cursor is implemented via an indirect pointer, |cursorp_|, to allow
+ // for efficient list insertion at the cursor point and other list
+ // manipulations.
+ //
+ // - If the list is empty: |head| is null, |cursorp_| points to |head|, and
+ // therefore |*cursorp_| is null.
+ //
+ // - If the list is not empty: |head| is non-null, and...
+ //
+ // - If the cursor is at the start of the list: |cursorp_| points to
+ // |head|, and therefore |*cursorp_| points to the first arena.
+ //
+ // - If cursor is at the end of the list: |cursorp_| points to the |next|
+ // field of the last arena, and therefore |*cursorp_| is null.
+ //
+ // - If the cursor is at neither the start nor the end of the list:
+ // |cursorp_| points to the |next| field of the arena preceding the
+ // cursor, and therefore |*cursorp_| points to the arena following the
+ // cursor.
+ //
+ // |cursorp_| is never null.
+ //
+ Arena* head_;
+ Arena** cursorp_;
+
+ // Transfers the contents of |other| to this list and clears |other|.
+ inline void moveFrom(ArenaList& other);
+
+ public:
+ inline ArenaList();
+ inline ArenaList(ArenaList&& other);
+ inline ~ArenaList();
+
+ inline ArenaList& operator=(ArenaList&& other);
+
+ // It doesn't make sense for arenas to be present in more than one list, so
+ // list copy operations are not provided.
+ ArenaList(const ArenaList& other) = delete;
+ ArenaList& operator=(const ArenaList& other) = delete;
+
+ inline explicit ArenaList(const SortedArenaListSegment& segment);
+
+ inline void check() const;
+
+ inline void clear();
+ inline bool isEmpty() const;
+
+ // This returns nullptr if the list is empty.
+ inline Arena* head() const;
+
+ inline bool isCursorAtHead() const;
+ inline bool isCursorAtEnd() const;
+
+ // This can return nullptr.
+ inline Arena* arenaAfterCursor() const;
+
+ // This returns the arena after the cursor and moves the cursor past it.
+ inline Arena* takeNextArena();
+
+ // This does two things.
+ // - Inserts |a| at the cursor.
+ // - Leaves the cursor sitting just before |a|, if |a| is not full, or just
+ // after |a|, if |a| is full.
+ inline void insertAtCursor(Arena* a);
+
+ // Inserts |a| at the cursor, then moves the cursor past it.
+ inline void insertBeforeCursor(Arena* a);
+
+ // This inserts the contents of |other|, which must be full, at the cursor of
+ // |this| and clears |other|.
+ inline ArenaList& insertListWithCursorAtEnd(ArenaList& other);
+
+ Arena* removeRemainingArenas(Arena** arenap);
+ Arena** pickArenasToRelocate(size_t& arenaTotalOut, size_t& relocTotalOut);
+ Arena* relocateArenas(Arena* toRelocate, Arena* relocated,
+ js::SliceBudget& sliceBudget,
+ gcstats::Statistics& stats);
+
+#ifdef DEBUG
+ void dump();
+#endif
+};
+
+/*
+ * A class that holds arenas in sorted order by appending arenas to specific
+ * segments. Each segment has a head and a tail, which can be linked up to
+ * other segments to create a contiguous ArenaList.
+ */
+class SortedArenaList {
+ public:
+ // The minimum size, in bytes, of a GC thing.
+ static const size_t MinThingSize = 16;
+
+ static_assert(ArenaSize <= 4096,
+ "When increasing the Arena size, please consider how"
+ " this will affect the size of a SortedArenaList.");
+
+ static_assert(MinThingSize >= 16,
+ "When decreasing the minimum thing size, please consider"
+ " how this will affect the size of a SortedArenaList.");
+
+ private:
+ // The maximum number of GC things that an arena can hold.
+ static const size_t MaxThingsPerArena =
+ (ArenaSize - ArenaHeaderSize) / MinThingSize;
+
+ size_t thingsPerArena_;
+ SortedArenaListSegment segments[MaxThingsPerArena + 1];
+
+ // Convenience functions to get the nth head and tail.
+ Arena* headAt(size_t n) { return segments[n].head; }
+ Arena** tailAt(size_t n) { return segments[n].tailp; }
+
+ public:
+ inline explicit SortedArenaList(size_t thingsPerArena = MaxThingsPerArena);
+
+ inline void setThingsPerArena(size_t thingsPerArena);
+
+ // Resets the first |thingsPerArena| segments of this list for further use.
+ inline void reset(size_t thingsPerArena = MaxThingsPerArena);
+
+ // Inserts an arena, which has room for |nfree| more things, in its segment.
+ inline void insertAt(Arena* arena, size_t nfree);
+
+ // Remove all empty arenas, inserting them as a linked list.
+ inline void extractEmpty(Arena** empty);
+
+ // Links up the tail of each non-empty segment to the head of the next
+ // non-empty segment, creating a contiguous list that is returned as an
+ // ArenaList. This is not a destructive operation: neither the head nor tail
+ // of any segment is modified. However, note that the Arenas in the
+ // resulting ArenaList should be treated as read-only unless the
+ // SortedArenaList is no longer needed: inserting or removing arenas would
+ // invalidate the SortedArenaList.
+ inline ArenaList toArenaList();
+};
+
+enum class ShouldCheckThresholds {
+ DontCheckThresholds = 0,
+ CheckThresholds = 1
+};
+
+// For each arena kind its free list is represented as the first span with free
+// things. Initially all the spans are initialized as empty. After we find a new
+// arena with available things we move its first free span into the list and set
+// the arena as fully allocated. That way we do not need to update the arena
+// after the initial allocation. When starting the GC we only move the head of
+// the of the list of spans back to the arena only for the arena that was not
+// fully allocated.
+class FreeLists {
+ AllAllocKindArray<FreeSpan*> freeLists_;
+
+ public:
+ // Because the JITs can allocate from the free lists, they cannot be null.
+ // We use a placeholder FreeSpan that is empty (and wihout an associated
+ // Arena) so the JITs can fall back gracefully.
+ static FreeSpan emptySentinel;
+
+ FreeLists();
+
+#ifdef DEBUG
+ inline bool allEmpty() const;
+ inline bool isEmpty(AllocKind kind) const;
+#endif
+
+ inline void clear();
+
+ MOZ_ALWAYS_INLINE TenuredCell* allocate(AllocKind kind);
+
+ inline TenuredCell* setArenaAndAllocate(Arena* arena, AllocKind kind);
+
+ inline void unmarkPreMarkedFreeCells(AllocKind kind);
+
+ FreeSpan** addressOfFreeList(AllocKind thingKind) {
+ return &freeLists_[thingKind];
+ }
+};
+
+class ArenaLists {
+ enum class ConcurrentUse : uint32_t {
+ None,
+ BackgroundFinalize,
+ ParallelAlloc,
+ ParallelUnmark
+ };
+
+ using ConcurrentUseState =
+ mozilla::Atomic<ConcurrentUse, mozilla::SequentiallyConsistent>;
+
+ JS::Zone* zone_;
+
+ // Whether this structure can be accessed by other threads.
+ UnprotectedData<AllAllocKindArray<ConcurrentUseState>> concurrentUseState_;
+
+ ZoneData<FreeLists> freeLists_;
+
+ /* The main list of arenas for each alloc kind. */
+ ArenaListData<AllAllocKindArray<ArenaList>> arenaLists_;
+
+ /* For each arena kind, a list of arenas allocated during marking. */
+ ArenaListData<AllAllocKindArray<ArenaList>> newArenasInMarkPhase_;
+
+ /* For each arena kind, a list of arenas remaining to be swept. */
+ MainThreadOrGCTaskData<AllAllocKindArray<Arena*>> arenasToSweep_;
+
+ /* During incremental sweeping, a list of the arenas already swept. */
+ ZoneOrGCTaskData<AllocKind> incrementalSweptArenaKind;
+ ZoneOrGCTaskData<ArenaList> incrementalSweptArenas;
+
+ // Arena lists which have yet to be swept, but need additional foreground
+ // processing before they are swept.
+ ZoneData<Arena*> gcShapeArenasToUpdate;
+ ZoneData<Arena*> gcAccessorShapeArenasToUpdate;
+
+ // The list of empty arenas which are collected during the sweep phase and
+ // released at the end of sweeping every sweep group.
+ ZoneOrGCTaskData<Arena*> savedEmptyArenas;
+
+ public:
+ explicit ArenaLists(JS::Zone* zone);
+ ~ArenaLists();
+
+ FreeLists& freeLists() { return freeLists_.ref(); }
+ const FreeLists& freeLists() const { return freeLists_.ref(); }
+
+ FreeSpan** addressOfFreeList(AllocKind thingKind) {
+ return freeLists_.refNoCheck().addressOfFreeList(thingKind);
+ }
+
+ inline Arena* getFirstArena(AllocKind thingKind) const;
+ inline Arena* getFirstArenaToSweep(AllocKind thingKind) const;
+ inline Arena* getFirstSweptArena(AllocKind thingKind) const;
+ inline Arena* getFirstNewArenaInMarkPhase(AllocKind thingKind) const;
+ inline Arena* getArenaAfterCursor(AllocKind thingKind) const;
+
+ inline bool arenaListsAreEmpty() const;
+
+ inline void unmarkAll();
+
+ inline bool doneBackgroundFinalize(AllocKind kind) const;
+ inline bool needBackgroundFinalizeWait(AllocKind kind) const;
+
+ /* Clear the free lists so we won't try to allocate from swept arenas. */
+ inline void clearFreeLists();
+
+ inline void unmarkPreMarkedFreeCells();
+
+ MOZ_ALWAYS_INLINE TenuredCell* allocateFromFreeList(AllocKind thingKind);
+
+ /* Moves all arenas from |fromArenaLists| into |this|. */
+ void adoptArenas(ArenaLists* fromArenaLists, bool targetZoneIsCollecting);
+
+ inline void checkEmptyFreeLists();
+ inline void checkEmptyArenaLists();
+ inline void checkEmptyFreeList(AllocKind kind);
+
+ void checkEmptyArenaList(AllocKind kind);
+
+ bool relocateArenas(Arena*& relocatedListOut, JS::GCReason reason,
+ js::SliceBudget& sliceBudget, gcstats::Statistics& stats);
+
+ void queueForegroundObjectsForSweep(JSFreeOp* fop);
+ void queueForegroundThingsForSweep();
+
+ Arena* takeSweptEmptyArenas();
+
+ bool foregroundFinalize(JSFreeOp* fop, AllocKind thingKind,
+ js::SliceBudget& sliceBudget,
+ SortedArenaList& sweepList);
+ static void backgroundFinalize(JSFreeOp* fop, Arena* listHead, Arena** empty);
+
+ void setParallelAllocEnabled(bool enabled);
+ void setParallelUnmarkEnabled(bool enabled);
+
+ inline void mergeNewArenasInMarkPhase();
+
+ void checkGCStateNotInUse();
+ void checkSweepStateNotInUse();
+ void checkNoArenasToUpdate();
+ void checkNoArenasToUpdateForKind(AllocKind kind);
+
+ private:
+ ArenaList& arenaList(AllocKind i) { return arenaLists_.ref()[i]; }
+ const ArenaList& arenaList(AllocKind i) const { return arenaLists_.ref()[i]; }
+
+ ArenaList& newArenasInMarkPhase(AllocKind i) {
+ return newArenasInMarkPhase_.ref()[i];
+ }
+ const ArenaList& newArenasInMarkPhase(AllocKind i) const {
+ return newArenasInMarkPhase_.ref()[i];
+ }
+
+ ConcurrentUseState& concurrentUse(AllocKind i) {
+ return concurrentUseState_.ref()[i];
+ }
+ ConcurrentUse concurrentUse(AllocKind i) const {
+ return concurrentUseState_.ref()[i];
+ }
+
+ Arena*& arenasToSweep(AllocKind i) { return arenasToSweep_.ref()[i]; }
+ Arena* arenasToSweep(AllocKind i) const { return arenasToSweep_.ref()[i]; }
+
+ inline JSRuntime* runtime();
+ inline JSRuntime* runtimeFromAnyThread();
+
+ inline void queueForForegroundSweep(JSFreeOp* fop,
+ const FinalizePhase& phase);
+ inline void queueForBackgroundSweep(JSFreeOp* fop,
+ const FinalizePhase& phase);
+ inline void queueForForegroundSweep(AllocKind thingKind);
+ inline void queueForBackgroundSweep(AllocKind thingKind);
+
+ TenuredCell* refillFreeListAndAllocate(FreeLists& freeLists,
+ AllocKind thingKind,
+ ShouldCheckThresholds checkThresholds);
+
+ void addNewArena(Arena* arena, AllocKind thingKind);
+
+ friend class GCRuntime;
+ friend class js::Nursery;
+ friend class js::TenuringTracer;
+};
+
+} /* namespace gc */
+} /* namespace js */
+
+#endif /* gc_ArenaList_h */
diff --git a/js/src/gc/AtomMarking-inl.h b/js/src/gc/AtomMarking-inl.h
new file mode 100644
index 0000000000..6f113138c8
--- /dev/null
+++ b/js/src/gc/AtomMarking-inl.h
@@ -0,0 +1,99 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "gc/AtomMarking.h"
+
+#include "mozilla/Assertions.h"
+
+#include <type_traits>
+
+#include "vm/Realm.h"
+#include "vm/SymbolType.h"
+
+#include "gc/Heap-inl.h"
+
+namespace js {
+namespace gc {
+
+inline size_t GetAtomBit(TenuredCell* thing) {
+ MOZ_ASSERT(thing->zoneFromAnyThread()->isAtomsZone());
+ Arena* arena = thing->arena();
+ size_t arenaBit = (reinterpret_cast<uintptr_t>(thing) - arena->address()) /
+ CellBytesPerMarkBit;
+ return arena->atomBitmapStart() * JS_BITS_PER_WORD + arenaBit;
+}
+
+template <typename T, bool Fallible>
+MOZ_ALWAYS_INLINE bool AtomMarkingRuntime::inlinedMarkAtomInternal(
+ JSContext* cx, T* thing) {
+ static_assert(std::is_same_v<T, JSAtom> || std::is_same_v<T, JS::Symbol>,
+ "Should only be called with JSAtom* or JS::Symbol* argument");
+
+ MOZ_ASSERT(thing);
+ js::gc::TenuredCell* cell = &thing->asTenured();
+ MOZ_ASSERT(cell->zoneFromAnyThread()->isAtomsZone());
+
+ // The context's zone will be null during initialization of the runtime.
+ if (!cx->zone()) {
+ return true;
+ }
+ MOZ_ASSERT(!cx->zone()->isAtomsZone());
+
+ // This doesn't check for pinned atoms since that might require taking a
+ // lock. This is not required for correctness.
+ if (thing->isPermanentAndMayBeShared()) {
+ return true;
+ }
+
+ size_t bit = GetAtomBit(cell);
+ MOZ_ASSERT(bit / JS_BITS_PER_WORD < allocatedWords);
+
+ if (Fallible) {
+ if (!cx->zone()->markedAtoms().setBitFallible(bit)) {
+ return false;
+ }
+ } else {
+ cx->zone()->markedAtoms().setBit(bit);
+ }
+
+ if (!cx->isHelperThreadContext()) {
+ // Trigger a read barrier on the atom, in case there is an incremental
+ // GC in progress. This is necessary if the atom is being marked
+ // because a reference to it was obtained from another zone which is
+ // not being collected by the incremental GC.
+ ReadBarrier(thing);
+ }
+
+ // Children of the thing also need to be marked in the context's zone.
+ // We don't have a JSTracer for this so manually handle the cases in which
+ // an atom can reference other atoms.
+ markChildren(cx, thing);
+
+ return true;
+}
+
+void AtomMarkingRuntime::markChildren(JSContext* cx, JSAtom*) {}
+
+void AtomMarkingRuntime::markChildren(JSContext* cx, JS::Symbol* symbol) {
+ if (JSAtom* description = symbol->description()) {
+ markAtom(cx, description);
+ }
+}
+
+template <typename T>
+MOZ_ALWAYS_INLINE void AtomMarkingRuntime::inlinedMarkAtom(JSContext* cx,
+ T* thing) {
+ MOZ_ALWAYS_TRUE((inlinedMarkAtomInternal<T, false>(cx, thing)));
+}
+
+template <typename T>
+MOZ_ALWAYS_INLINE bool AtomMarkingRuntime::inlinedMarkAtomFallible(
+ JSContext* cx, T* thing) {
+ return inlinedMarkAtomInternal<T, true>(cx, thing);
+}
+
+} // namespace gc
+} // namespace js
diff --git a/js/src/gc/AtomMarking.cpp b/js/src/gc/AtomMarking.cpp
new file mode 100644
index 0000000000..5631cb8d66
--- /dev/null
+++ b/js/src/gc/AtomMarking.cpp
@@ -0,0 +1,305 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "gc/AtomMarking-inl.h"
+
+#include <type_traits>
+
+#include "gc/PublicIterators.h"
+#include "vm/Realm.h"
+
+#include "gc/GC-inl.h"
+#include "gc/Heap-inl.h"
+
+namespace js {
+namespace gc {
+
+// [SMDOC] GC Atom Marking
+//
+// Things in the atoms zone (which includes atomized strings and other things,
+// all of which we will refer to as 'atoms' here) may be pointed to freely by
+// things in other zones. To avoid the need to perform garbage collections of
+// the entire runtime to collect atoms, we compute a separate atom mark bitmap
+// for each zone that is always an overapproximation of the atoms that zone is
+// using. When an atom is not in the mark bitmap for any zone, it can be
+// destroyed.
+//
+// To minimize interference with the rest of the GC, atom marking and sweeping
+// is done by manipulating the mark bitmaps in the chunks used for the atoms.
+// When the atoms zone is being collected, the mark bitmaps for the chunk(s)
+// used by the atoms are updated normally during marking. After marking
+// finishes, the chunk mark bitmaps are translated to a more efficient atom mark
+// bitmap (see below) that is stored on the zones which the GC collected
+// (computeBitmapFromChunkMarkBits). Before sweeping begins, the chunk mark
+// bitmaps are updated with any atoms that might be referenced by zones which
+// weren't collected (markAtomsUsedByUncollectedZones). The GC sweeping will
+// then release all atoms which are not marked by any zone.
+//
+// The representation of atom mark bitmaps is as follows:
+//
+// Each arena in the atoms zone has an atomBitmapStart() value indicating the
+// word index into the bitmap of the first thing in the arena. Each arena uses
+// ArenaBitmapWords of data to store its bitmap, which uses the same
+// representation as chunk mark bitmaps: one bit is allocated per Cell, with
+// bits for space between things being unused when things are larger than a
+// single Cell.
+
+void AtomMarkingRuntime::registerArena(Arena* arena, const AutoLockGC& lock) {
+ MOZ_ASSERT(arena->getThingSize() != 0);
+ MOZ_ASSERT(arena->getThingSize() % CellAlignBytes == 0);
+ MOZ_ASSERT(arena->zone->isAtomsZone());
+
+ // We need to find a range of bits from the atoms bitmap for this arena.
+
+ // Look for a free range of bits compatible with this arena.
+ if (freeArenaIndexes.ref().length()) {
+ arena->atomBitmapStart() = freeArenaIndexes.ref().popCopy();
+ return;
+ }
+
+ // Allocate a range of bits from the end for this arena.
+ arena->atomBitmapStart() = allocatedWords;
+ allocatedWords += ArenaBitmapWords;
+}
+
+void AtomMarkingRuntime::unregisterArena(Arena* arena, const AutoLockGC& lock) {
+ MOZ_ASSERT(arena->zone->isAtomsZone());
+
+ // Leak these atom bits if we run out of memory.
+ mozilla::Unused << freeArenaIndexes.ref().emplaceBack(
+ arena->atomBitmapStart());
+}
+
+bool AtomMarkingRuntime::computeBitmapFromChunkMarkBits(JSRuntime* runtime,
+ DenseBitmap& bitmap) {
+ MOZ_ASSERT(CurrentThreadIsPerformingGC());
+ MOZ_ASSERT(!runtime->hasHelperThreadZones());
+
+ if (!bitmap.ensureSpace(allocatedWords)) {
+ return false;
+ }
+
+ Zone* atomsZone = runtime->unsafeAtomsZone();
+ for (auto thingKind : AllAllocKinds()) {
+ for (ArenaIter aiter(atomsZone, thingKind); !aiter.done(); aiter.next()) {
+ Arena* arena = aiter.get();
+ MarkBitmapWord* chunkWords = arena->chunk()->markBits.arenaBits(arena);
+ bitmap.copyBitsFrom(arena->atomBitmapStart(), ArenaBitmapWords,
+ chunkWords);
+ }
+ }
+
+ return true;
+}
+
+void AtomMarkingRuntime::refineZoneBitmapForCollectedZone(
+ Zone* zone, const DenseBitmap& bitmap) {
+ MOZ_ASSERT(zone->isCollectingFromAnyThread());
+
+ if (zone->isAtomsZone()) {
+ return;
+ }
+
+ // Take the bitwise and between the two mark bitmaps to get the best new
+ // overapproximation we can. |bitmap| might include bits that are not in
+ // the zone's mark bitmap, if additional zones were collected by the GC.
+ zone->markedAtoms().bitwiseAndWith(bitmap);
+}
+
+// Set any bits in the chunk mark bitmaps for atoms which are marked in bitmap.
+template <typename Bitmap>
+static void BitwiseOrIntoChunkMarkBits(JSRuntime* runtime, Bitmap& bitmap) {
+ // Make sure that by copying the mark bits for one arena in word sizes we
+ // do not affect the mark bits for other arenas.
+ static_assert(ArenaBitmapBits == ArenaBitmapWords * JS_BITS_PER_WORD,
+ "ArenaBitmapWords must evenly divide ArenaBitmapBits");
+
+ Zone* atomsZone = runtime->unsafeAtomsZone();
+ for (auto thingKind : AllAllocKinds()) {
+ for (ArenaIter aiter(atomsZone, thingKind); !aiter.done(); aiter.next()) {
+ Arena* arena = aiter.get();
+ MarkBitmapWord* chunkWords = arena->chunk()->markBits.arenaBits(arena);
+ bitmap.bitwiseOrRangeInto(arena->atomBitmapStart(), ArenaBitmapWords,
+ chunkWords);
+ }
+ }
+}
+
+void AtomMarkingRuntime::markAtomsUsedByUncollectedZones(JSRuntime* runtime) {
+ MOZ_ASSERT(CurrentThreadIsPerformingGC());
+ MOZ_ASSERT(!runtime->hasHelperThreadZones());
+
+ // Try to compute a simple union of the zone atom bitmaps before updating
+ // the chunk mark bitmaps. If this allocation fails then fall back to
+ // updating the chunk mark bitmaps separately for each zone.
+ DenseBitmap markedUnion;
+ if (markedUnion.ensureSpace(allocatedWords)) {
+ for (ZonesIter zone(runtime, SkipAtoms); !zone.done(); zone.next()) {
+ // We only need to update the chunk mark bits for zones which were
+ // not collected in the current GC. Atoms which are referenced by
+ // collected zones have already been marked.
+ if (!zone->isCollectingFromAnyThread()) {
+ zone->markedAtoms().bitwiseOrInto(markedUnion);
+ }
+ }
+ BitwiseOrIntoChunkMarkBits(runtime, markedUnion);
+ } else {
+ for (ZonesIter zone(runtime, SkipAtoms); !zone.done(); zone.next()) {
+ if (!zone->isCollectingFromAnyThread()) {
+ BitwiseOrIntoChunkMarkBits(runtime, zone->markedAtoms());
+ }
+ }
+ }
+}
+
+template <typename T>
+void AtomMarkingRuntime::markAtom(JSContext* cx, T* thing) {
+ return inlinedMarkAtom(cx, thing);
+}
+
+template void AtomMarkingRuntime::markAtom(JSContext* cx, JSAtom* thing);
+template void AtomMarkingRuntime::markAtom(JSContext* cx, JS::Symbol* thing);
+
+void AtomMarkingRuntime::markId(JSContext* cx, jsid id) {
+ if (JSID_IS_ATOM(id)) {
+ markAtom(cx, JSID_TO_ATOM(id));
+ return;
+ }
+ if (JSID_IS_SYMBOL(id)) {
+ markAtom(cx, JSID_TO_SYMBOL(id));
+ return;
+ }
+ MOZ_ASSERT(!id.isGCThing());
+}
+
+void AtomMarkingRuntime::markAtomValue(JSContext* cx, const Value& value) {
+ if (value.isString()) {
+ if (value.toString()->isAtom()) {
+ markAtom(cx, &value.toString()->asAtom());
+ }
+ return;
+ }
+ if (value.isSymbol()) {
+ markAtom(cx, value.toSymbol());
+ return;
+ }
+ MOZ_ASSERT_IF(value.isGCThing(), value.isObject() ||
+ value.isPrivateGCThing() ||
+ value.isBigInt());
+}
+
+void AtomMarkingRuntime::adoptMarkedAtoms(Zone* target, Zone* source) {
+ MOZ_ASSERT(CurrentThreadCanAccessZone(source));
+ MOZ_ASSERT(CurrentThreadCanAccessZone(target));
+ target->markedAtoms().bitwiseOrWith(source->markedAtoms());
+}
+
+#ifdef DEBUG
+template <typename T>
+bool AtomMarkingRuntime::atomIsMarked(Zone* zone, T* thing) {
+ static_assert(std::is_same_v<T, JSAtom> || std::is_same_v<T, JS::Symbol>,
+ "Should only be called with JSAtom* or JS::Symbol* argument");
+
+ MOZ_ASSERT(thing);
+ MOZ_ASSERT(!IsInsideNursery(thing));
+ MOZ_ASSERT(thing->zoneFromAnyThread()->isAtomsZone());
+
+ if (!zone->runtimeFromAnyThread()->permanentAtomsPopulated()) {
+ return true;
+ }
+
+ if (thing->isPermanentAndMayBeShared()) {
+ return true;
+ }
+
+ if constexpr (std::is_same_v<T, JSAtom>) {
+ JSRuntime* rt = zone->runtimeFromAnyThread();
+ if (rt->atoms().atomIsPinned(rt, thing)) {
+ return true;
+ }
+ }
+
+ size_t bit = GetAtomBit(&thing->asTenured());
+ return zone->markedAtoms().getBit(bit);
+}
+
+template bool AtomMarkingRuntime::atomIsMarked(Zone* zone, JSAtom* thing);
+template bool AtomMarkingRuntime::atomIsMarked(Zone* zone, JS::Symbol* thing);
+
+template <>
+bool AtomMarkingRuntime::atomIsMarked(Zone* zone, TenuredCell* thing) {
+ if (!thing) {
+ return true;
+ }
+
+ if (thing->is<JSString>()) {
+ JSString* str = thing->as<JSString>();
+ if (!str->isAtom()) {
+ return true;
+ }
+ return atomIsMarked(zone, &str->asAtom());
+ }
+
+ if (thing->is<JS::Symbol>()) {
+ return atomIsMarked(zone, thing->as<JS::Symbol>());
+ }
+
+ return true;
+}
+
+bool AtomMarkingRuntime::idIsMarked(Zone* zone, jsid id) {
+ if (JSID_IS_ATOM(id)) {
+ return atomIsMarked(zone, JSID_TO_ATOM(id));
+ }
+
+ if (JSID_IS_SYMBOL(id)) {
+ return atomIsMarked(zone, JSID_TO_SYMBOL(id));
+ }
+
+ MOZ_ASSERT(!id.isGCThing());
+ return true;
+}
+
+bool AtomMarkingRuntime::valueIsMarked(Zone* zone, const Value& value) {
+ if (value.isString()) {
+ if (value.toString()->isAtom()) {
+ return atomIsMarked(zone, &value.toString()->asAtom());
+ }
+ return true;
+ }
+
+ if (value.isSymbol()) {
+ return atomIsMarked(zone, value.toSymbol());
+ }
+
+ MOZ_ASSERT_IF(value.isGCThing(), value.isObject() ||
+ value.isPrivateGCThing() ||
+ value.isBigInt());
+ return true;
+}
+
+#endif // DEBUG
+
+} // namespace gc
+
+#ifdef DEBUG
+
+bool AtomIsMarked(Zone* zone, JSAtom* atom) {
+ return zone->runtimeFromAnyThread()->gc.atomMarking.atomIsMarked(zone, atom);
+}
+
+bool AtomIsMarked(Zone* zone, jsid id) {
+ return zone->runtimeFromAnyThread()->gc.atomMarking.idIsMarked(zone, id);
+}
+
+bool AtomIsMarked(Zone* zone, const Value& value) {
+ return zone->runtimeFromAnyThread()->gc.atomMarking.valueIsMarked(zone,
+ value);
+}
+
+#endif // DEBUG
+
+} // namespace js
diff --git a/js/src/gc/AtomMarking.h b/js/src/gc/AtomMarking.h
new file mode 100644
index 0000000000..d9186c7c8b
--- /dev/null
+++ b/js/src/gc/AtomMarking.h
@@ -0,0 +1,88 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef gc_AtomMarking_h
+#define gc_AtomMarking_h
+
+#include "NamespaceImports.h"
+#include "ds/Bitmap.h"
+#include "threading/ProtectedData.h"
+
+namespace js {
+
+class AutoLockGC;
+
+namespace gc {
+
+class Arena;
+
+// This class manages state used for marking atoms during GCs.
+// See AtomMarking.cpp for details.
+class AtomMarkingRuntime {
+ // Unused arena atom bitmap indexes. Protected by the GC lock.
+ js::GCLockData<Vector<size_t, 0, SystemAllocPolicy>> freeArenaIndexes;
+
+ inline void markChildren(JSContext* cx, JSAtom*);
+ inline void markChildren(JSContext* cx, JS::Symbol* symbol);
+
+ public:
+ // The extent of all allocated and free words in atom mark bitmaps.
+ // This monotonically increases and may be read from without locking.
+ mozilla::Atomic<size_t, mozilla::SequentiallyConsistent> allocatedWords;
+
+ AtomMarkingRuntime() : allocatedWords(0) {}
+
+ // Mark an arena as holding things in the atoms zone.
+ void registerArena(Arena* arena, const AutoLockGC& lock);
+
+ // Mark an arena as no longer holding things in the atoms zone.
+ void unregisterArena(Arena* arena, const AutoLockGC& lock);
+
+ // Fill |bitmap| with an atom marking bitmap based on the things that are
+ // currently marked in the chunks used by atoms zone arenas. This returns
+ // false on an allocation failure (but does not report an exception).
+ bool computeBitmapFromChunkMarkBits(JSRuntime* runtime, DenseBitmap& bitmap);
+
+ // Update the atom marking bitmap in |zone| according to another
+ // overapproximation of the reachable atoms in |bitmap|.
+ void refineZoneBitmapForCollectedZone(Zone* zone, const DenseBitmap& bitmap);
+
+ // Set any bits in the chunk mark bitmaps for atoms which are marked in any
+ // uncollected zone in the runtime.
+ void markAtomsUsedByUncollectedZones(JSRuntime* runtime);
+
+ // Mark an atom or id as being newly reachable by the context's zone.
+ template <typename T>
+ void markAtom(JSContext* cx, T* thing);
+
+ // Version of markAtom that's always inlined, for performance-sensitive
+ // callers.
+ template <typename T, bool Fallible>
+ MOZ_ALWAYS_INLINE bool inlinedMarkAtomInternal(JSContext* cx, T* thing);
+ template <typename T>
+ MOZ_ALWAYS_INLINE void inlinedMarkAtom(JSContext* cx, T* thing);
+ template <typename T>
+ MOZ_ALWAYS_INLINE bool inlinedMarkAtomFallible(JSContext* cx, T* thing);
+
+ void markId(JSContext* cx, jsid id);
+ void markAtomValue(JSContext* cx, const Value& value);
+
+ // Mark all atoms in |source| as being reachable within |target|.
+ void adoptMarkedAtoms(Zone* target, Zone* source);
+
+#ifdef DEBUG
+ // Return whether |thing/id| is in the atom marking bitmap for |zone|.
+ template <typename T>
+ bool atomIsMarked(Zone* zone, T* thing);
+ bool idIsMarked(Zone* zone, jsid id);
+ bool valueIsMarked(Zone* zone, const Value& value);
+#endif
+};
+
+} // namespace gc
+} // namespace js
+
+#endif // gc_AtomMarking_h
diff --git a/js/src/gc/Barrier.cpp b/js/src/gc/Barrier.cpp
new file mode 100644
index 0000000000..74c5b0ff8f
--- /dev/null
+++ b/js/src/gc/Barrier.cpp
@@ -0,0 +1,357 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "gc/Barrier.h"
+
+#include "gc/Policy.h"
+#include "jit/Ion.h"
+#include "js/HashTable.h"
+#include "js/shadow/Zone.h" // JS::shadow::Zone
+#include "js/Value.h"
+#include "vm/BigIntType.h" // JS::BigInt
+#include "vm/EnvironmentObject.h"
+#include "vm/GeneratorObject.h"
+#include "vm/JSObject.h"
+#include "vm/Realm.h"
+#include "vm/SharedArrayObject.h"
+#include "vm/SymbolType.h"
+#include "wasm/WasmJS.h"
+
+#include "gc/Zone-inl.h"
+
+namespace js {
+
+bool RuntimeFromMainThreadIsHeapMajorCollecting(JS::shadow::Zone* shadowZone) {
+ MOZ_ASSERT(
+ CurrentThreadCanAccessRuntime(shadowZone->runtimeFromMainThread()));
+ return JS::RuntimeHeapIsMajorCollecting();
+}
+
+#ifdef DEBUG
+
+bool IsMarkedBlack(JSObject* obj) { return obj->isMarkedBlack(); }
+
+bool HeapSlot::preconditionForSet(NativeObject* owner, Kind kind,
+ uint32_t slot) const {
+ if (kind == Slot) {
+ return &owner->getSlotRef(slot) == this;
+ }
+
+ uint32_t numShifted = owner->getElementsHeader()->numShiftedElements();
+ MOZ_ASSERT(slot >= numShifted);
+ return &owner->getDenseElement(slot - numShifted) == (const Value*)this;
+}
+
+void HeapSlot::assertPreconditionForPostWriteBarrier(
+ NativeObject* obj, Kind kind, uint32_t slot, const Value& target) const {
+ if (kind == Slot) {
+ MOZ_ASSERT(obj->getSlotAddressUnchecked(slot)->get() == target);
+ } else {
+ uint32_t numShifted = obj->getElementsHeader()->numShiftedElements();
+ MOZ_ASSERT(slot >= numShifted);
+ MOZ_ASSERT(
+ static_cast<HeapSlot*>(obj->getDenseElements() + (slot - numShifted))
+ ->get() == target);
+ }
+
+ AssertTargetIsNotGray(obj);
+}
+
+bool CurrentThreadIsIonCompiling() {
+ jit::JitContext* jcx = jit::MaybeGetJitContext();
+ return jcx && jcx->inIonBackend();
+}
+
+bool CurrentThreadIsGCMarking() {
+ JSContext* cx = MaybeGetJSContext();
+ return cx && cx->gcUse == JSContext::GCUse::Marking;
+}
+
+bool CurrentThreadIsGCSweeping() {
+ JSContext* cx = MaybeGetJSContext();
+ return cx && cx->gcUse == JSContext::GCUse::Sweeping;
+}
+
+bool CurrentThreadIsGCFinalizing() {
+ JSContext* cx = MaybeGetJSContext();
+ return cx && cx->gcUse == JSContext::GCUse::Finalizing;
+}
+
+bool CurrentThreadIsTouchingGrayThings() {
+ JSContext* cx = MaybeGetJSContext();
+ return cx && cx->isTouchingGrayThings;
+}
+
+AutoTouchingGrayThings::AutoTouchingGrayThings() {
+ TlsContext.get()->isTouchingGrayThings++;
+}
+
+AutoTouchingGrayThings::~AutoTouchingGrayThings() {
+ JSContext* cx = TlsContext.get();
+ MOZ_ASSERT(cx->isTouchingGrayThings);
+ cx->isTouchingGrayThings--;
+}
+
+#endif // DEBUG
+
+// Tagged pointer barriers
+//
+// It's tempting to use ApplyGCThingTyped to dispatch to the typed barrier
+// functions (e.g. gc::ReadBarrier(JSObject*)) but this does not compile well
+// (clang generates 1580 bytes on x64 versus 296 bytes for this implementation
+// of ValueReadBarrier).
+//
+// Instead, check known special cases and call the generic barrier functions.
+
+static MOZ_ALWAYS_INLINE bool ValueIsPermanent(const Value& value) {
+ gc::Cell* cell = value.toGCThing();
+
+ if (value.isString()) {
+ return cell->as<JSString>()->isPermanentAndMayBeShared();
+ }
+
+ if (value.isSymbol()) {
+ return cell->as<JS::Symbol>()->isPermanentAndMayBeShared();
+ }
+
+#ifdef DEBUG
+ // Using mozilla::DebugOnly here still generated code in opt builds.
+ bool isPermanent = MapGCThingTyped(value, [](auto t) {
+ return t->isPermanentAndMayBeShared();
+ }).value();
+ MOZ_ASSERT(!isPermanent);
+#endif
+
+ return false;
+}
+
+void gc::ValueReadBarrier(const Value& v) {
+ MOZ_ASSERT(v.isGCThing());
+
+ if (!ValueIsPermanent(v)) {
+ ReadBarrierImpl(v.toGCThing());
+ }
+}
+
+void gc::ValuePreWriteBarrier(const Value& v) {
+ MOZ_ASSERT(v.isGCThing());
+
+ if (!ValueIsPermanent(v)) {
+ PreWriteBarrierImpl(v.toGCThing());
+ }
+}
+
+static MOZ_ALWAYS_INLINE bool IdIsPermanent(jsid id) {
+ gc::Cell* cell = id.toGCThing();
+
+ if (id.isString()) {
+ return cell->as<JSString>()->isPermanentAndMayBeShared();
+ }
+
+ if (id.isSymbol()) {
+ return cell->as<JS::Symbol>()->isPermanentAndMayBeShared();
+ }
+
+#ifdef DEBUG
+ bool isPermanent = MapGCThingTyped(id, [](auto t) {
+ return t->isPermanentAndMayBeShared();
+ }).value();
+ MOZ_ASSERT(!isPermanent);
+#endif
+
+ return false;
+}
+
+void gc::IdPreWriteBarrier(jsid id) {
+ MOZ_ASSERT(id.isGCThing());
+
+ if (!IdIsPermanent(id)) {
+ PreWriteBarrierImpl(&id.toGCThing()->asTenured());
+ }
+}
+
+static MOZ_ALWAYS_INLINE bool CellPtrIsPermanent(JS::GCCellPtr thing) {
+ if (thing.mayBeOwnedByOtherRuntime()) {
+ return true;
+ }
+
+#ifdef DEBUG
+ bool isPermanent = MapGCThingTyped(
+ thing, [](auto t) { return t->isPermanentAndMayBeShared(); });
+ MOZ_ASSERT(!isPermanent);
+#endif
+
+ return false;
+}
+
+void gc::CellPtrPreWriteBarrier(JS::GCCellPtr thing) {
+ MOZ_ASSERT(thing);
+
+ if (!CellPtrIsPermanent(thing)) {
+ PreWriteBarrierImpl(thing.asCell());
+ }
+}
+
+template <typename T>
+/* static */ bool MovableCellHasher<T>::hasHash(const Lookup& l) {
+ if (!l) {
+ return true;
+ }
+
+ return l->zoneFromAnyThread()->hasUniqueId(l);
+}
+
+template <typename T>
+/* static */ bool MovableCellHasher<T>::ensureHash(const Lookup& l) {
+ if (!l) {
+ return true;
+ }
+
+ uint64_t unusedId;
+ return l->zoneFromAnyThread()->getOrCreateUniqueId(l, &unusedId);
+}
+
+template <typename T>
+/* static */ HashNumber MovableCellHasher<T>::hash(const Lookup& l) {
+ if (!l) {
+ return 0;
+ }
+
+ // We have to access the zone from-any-thread here: a worker thread may be
+ // cloning a self-hosted object from the main runtime's self- hosting zone
+ // into another runtime. The zone's uid lock will protect against multiple
+ // workers doing this simultaneously.
+ MOZ_ASSERT(CurrentThreadCanAccessZone(l->zoneFromAnyThread()) ||
+ l->zoneFromAnyThread()->isSelfHostingZone() ||
+ CurrentThreadIsPerformingGC());
+
+ return l->zoneFromAnyThread()->getHashCodeInfallible(l);
+}
+
+template <typename T>
+/* static */ bool MovableCellHasher<T>::match(const Key& k, const Lookup& l) {
+ // Return true if both are null or false if only one is null.
+ if (!k) {
+ return !l;
+ }
+ if (!l) {
+ return false;
+ }
+
+ MOZ_ASSERT(k);
+ MOZ_ASSERT(l);
+ MOZ_ASSERT(CurrentThreadCanAccessZone(l->zoneFromAnyThread()) ||
+ l->zoneFromAnyThread()->isSelfHostingZone());
+
+ Zone* zone = k->zoneFromAnyThread();
+ if (zone != l->zoneFromAnyThread()) {
+ return false;
+ }
+
+#ifdef DEBUG
+ // Incremental table sweeping means that existing table entries may no
+ // longer have unique IDs. We fail the match in that case and the entry is
+ // removed from the table later on.
+ if (!zone->hasUniqueId(k)) {
+ Key key = k;
+ MOZ_ASSERT(IsAboutToBeFinalizedUnbarriered(&key));
+ }
+ MOZ_ASSERT(zone->hasUniqueId(l));
+#endif
+
+ uint64_t keyId;
+ if (!zone->maybeGetUniqueId(k, &keyId)) {
+ // Key is dead and cannot match lookup which must be live.
+ return false;
+ }
+
+ return keyId == zone->getUniqueIdInfallible(l);
+}
+
+#if !MOZ_IS_GCC
+template struct JS_PUBLIC_API MovableCellHasher<JSObject*>;
+#endif
+
+template struct JS_PUBLIC_API MovableCellHasher<AbstractGeneratorObject*>;
+template struct JS_PUBLIC_API MovableCellHasher<EnvironmentObject*>;
+template struct JS_PUBLIC_API MovableCellHasher<GlobalObject*>;
+template struct JS_PUBLIC_API MovableCellHasher<JSScript*>;
+template struct JS_PUBLIC_API MovableCellHasher<BaseScript*>;
+template struct JS_PUBLIC_API MovableCellHasher<ScriptSourceObject*>;
+template struct JS_PUBLIC_API MovableCellHasher<SavedFrame*>;
+template struct JS_PUBLIC_API MovableCellHasher<WasmInstanceObject*>;
+
+} // namespace js
+
+// Post-write barrier, used by the C++ Heap<T> implementation.
+
+JS_PUBLIC_API void JS::HeapObjectPostWriteBarrier(JSObject** objp,
+ JSObject* prev,
+ JSObject* next) {
+ MOZ_ASSERT(objp);
+ js::InternalBarrierMethods<JSObject*>::postBarrier(objp, prev, next);
+}
+
+JS_PUBLIC_API void JS::HeapStringPostWriteBarrier(JSString** strp,
+ JSString* prev,
+ JSString* next) {
+ MOZ_ASSERT(strp);
+ js::InternalBarrierMethods<JSString*>::postBarrier(strp, prev, next);
+}
+
+JS_PUBLIC_API void JS::HeapBigIntPostWriteBarrier(JS::BigInt** bip,
+ JS::BigInt* prev,
+ JS::BigInt* next) {
+ MOZ_ASSERT(bip);
+ js::InternalBarrierMethods<JS::BigInt*>::postBarrier(bip, prev, next);
+}
+
+JS_PUBLIC_API void JS::HeapValuePostWriteBarrier(JS::Value* valuep,
+ const Value& prev,
+ const Value& next) {
+ MOZ_ASSERT(valuep);
+ js::InternalBarrierMethods<JS::Value>::postBarrier(valuep, prev, next);
+}
+
+// Combined pre- and post-write barriers, used by the rust Heap<T>
+// implementation.
+
+JS_PUBLIC_API void JS::HeapObjectWriteBarriers(JSObject** objp, JSObject* prev,
+ JSObject* next) {
+ MOZ_ASSERT(objp);
+ js::InternalBarrierMethods<JSObject*>::preBarrier(prev);
+ js::InternalBarrierMethods<JSObject*>::postBarrier(objp, prev, next);
+}
+
+JS_PUBLIC_API void JS::HeapStringWriteBarriers(JSString** strp, JSString* prev,
+ JSString* next) {
+ MOZ_ASSERT(strp);
+ js::InternalBarrierMethods<JSString*>::preBarrier(prev);
+ js::InternalBarrierMethods<JSString*>::postBarrier(strp, prev, next);
+}
+
+JS_PUBLIC_API void JS::HeapBigIntWriteBarriers(JS::BigInt** bip,
+ JS::BigInt* prev,
+ JS::BigInt* next) {
+ MOZ_ASSERT(bip);
+ js::InternalBarrierMethods<JS::BigInt*>::preBarrier(prev);
+ js::InternalBarrierMethods<JS::BigInt*>::postBarrier(bip, prev, next);
+}
+
+JS_PUBLIC_API void JS::HeapScriptWriteBarriers(JSScript** scriptp,
+ JSScript* prev, JSScript* next) {
+ MOZ_ASSERT(scriptp);
+ js::InternalBarrierMethods<JSScript*>::preBarrier(prev);
+ js::InternalBarrierMethods<JSScript*>::postBarrier(scriptp, prev, next);
+}
+
+JS_PUBLIC_API void JS::HeapValueWriteBarriers(JS::Value* valuep,
+ const Value& prev,
+ const Value& next) {
+ MOZ_ASSERT(valuep);
+ js::InternalBarrierMethods<JS::Value>::preBarrier(prev);
+ js::InternalBarrierMethods<JS::Value>::postBarrier(valuep, prev, next);
+}
diff --git a/js/src/gc/Barrier.h b/js/src/gc/Barrier.h
new file mode 100644
index 0000000000..7734852bcc
--- /dev/null
+++ b/js/src/gc/Barrier.h
@@ -0,0 +1,1168 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef gc_Barrier_h
+#define gc_Barrier_h
+
+#include "mozilla/DebugOnly.h"
+
+#include <type_traits> // std::true_type
+
+#include "NamespaceImports.h"
+
+#include "gc/Cell.h"
+#include "gc/StoreBuffer.h"
+#include "js/ComparisonOperators.h" // JS::detail::DefineComparisonOps
+#include "js/HeapAPI.h"
+#include "js/Id.h"
+#include "js/RootingAPI.h"
+#include "js/Value.h"
+#include "util/Poison.h"
+
+/*
+ * [SMDOC] GC Barriers
+ *
+ * Several kinds of barrier are necessary to allow the GC to function correctly.
+ * These are triggered by reading or writing to GC pointers in the heap and
+ * serve to tell the collector about changes to the graph of reachable GC
+ * things.
+ *
+ * Since it would be awkward to change every write to memory into a function
+ * call, this file contains a bunch of C++ classes and templates that use
+ * operator overloading to take care of barriers automatically. In most cases,
+ * all that's necessary is to replace:
+ *
+ * Type* field;
+ *
+ * with:
+ *
+ * HeapPtr<Type> field;
+ *
+ * All heap-based GC pointers and tagged pointers must use one of these classes,
+ * except in a couple of exceptional cases.
+ *
+ * These classes are designed to be used by the internals of the JS engine.
+ * Barriers designed to be used externally are provided in js/RootingAPI.h.
+ *
+ * Overview
+ * ========
+ *
+ * This file implements the following concrete classes:
+ *
+ * HeapPtr General wrapper for heap-based pointers that provides pre- and
+ * post-write barriers. Most clients should use this.
+ *
+ * GCPtr An optimisation of HeapPtr for objects which are only destroyed
+ * by GC finalization (this rules out use in Vector, for example).
+ *
+ * PreBarriered Provides a pre-barrier but not a post-barrier. Necessary when
+ * generational GC updates are handled manually, e.g. for hash
+ * table keys that don't use MovableCellHasher.
+ *
+ * HeapSlot Provides pre and post-barriers, optimised for use in JSObject
+ * slots and elements.
+ *
+ * WeakHeapPtr Provides read and post-write barriers, for use with weak
+ * pointers.
+ *
+ * The following classes are implemented in js/RootingAPI.h (in the JS
+ * namespace):
+ *
+ * Heap General wrapper for external clients. Like HeapPtr but also
+ * handles cycle collector concerns. Most external clients should
+ * use this.
+ *
+ * TenuredHeap Like Heap but doesn't allow nursery pointers. Allows storing
+ * flags in unused lower bits of the pointer.
+ *
+ * Which class to use?
+ * -------------------
+ *
+ * Answer the following questions to decide which barrier class is right for
+ * your use case:
+ *
+ * Is your code part of the JS engine?
+ * Yes, it's internal =>
+ * Is your pointer weak or strong?
+ * Strong =>
+ * Do you want automatic handling of nursery pointers?
+ * Yes, of course =>
+ * Can your object be destroyed outside of a GC?
+ * Yes => Use HeapPtr<T>
+ * No => Use GCPtr<T> (optimization)
+ * No, I'll do this myself => Use PreBarriered<T>
+ * Weak => Use WeakHeapPtr<T>
+ * No, it's external =>
+ * Can your pointer refer to nursery objects?
+ * Yes => Use JS::Heap<T>
+ * Never => Use JS::TenuredHeap<T> (optimization)
+ *
+ * Write barriers
+ * ==============
+ *
+ * A write barrier is a mechanism used by incremental or generational GCs to
+ * ensure that every value that needs to be marked is marked. In general, the
+ * write barrier should be invoked whenever a write can cause the set of things
+ * traced through by the GC to change. This includes:
+ *
+ * - writes to object properties
+ * - writes to array slots
+ * - writes to fields like JSObject::shape_ that we trace through
+ * - writes to fields in private data
+ * - writes to non-markable fields like JSObject::private that point to
+ * markable data
+ *
+ * The last category is the trickiest. Even though the private pointer does not
+ * point to a GC thing, changing the private pointer may change the set of
+ * objects that are traced by the GC. Therefore it needs a write barrier.
+ *
+ * Every barriered write should have the following form:
+ *
+ * <pre-barrier>
+ * obj->field = value; // do the actual write
+ * <post-barrier>
+ *
+ * The pre-barrier is used for incremental GC and the post-barrier is for
+ * generational GC.
+ *
+ * Pre-write barrier
+ * -----------------
+ *
+ * To understand the pre-barrier, let's consider how incremental GC works. The
+ * GC itself is divided into "slices". Between each slice, JS code is allowed to
+ * run. Each slice should be short so that the user doesn't notice the
+ * interruptions. In our GC, the structure of the slices is as follows:
+ *
+ * 1. ... JS work, which leads to a request to do GC ...
+ * 2. [first GC slice, which performs all root marking and (maybe) more marking]
+ * 3. ... more JS work is allowed to run ...
+ * 4. [GC mark slice, which runs entirely in
+ * GCRuntime::markUntilBudgetExhausted]
+ * 5. ... more JS work ...
+ * 6. [GC mark slice, which runs entirely in
+ * GCRuntime::markUntilBudgetExhausted]
+ * 7. ... more JS work ...
+ * 8. [GC marking finishes; sweeping done non-incrementally; GC is done]
+ * 9. ... JS continues uninterrupted now that GC is finishes ...
+ *
+ * Of course, there may be a different number of slices depending on how much
+ * marking is to be done.
+ *
+ * The danger inherent in this scheme is that the JS code in steps 3, 5, and 7
+ * might change the heap in a way that causes the GC to collect an object that
+ * is actually reachable. The write barrier prevents this from happening. We use
+ * a variant of incremental GC called "snapshot at the beginning." This approach
+ * guarantees the invariant that if an object is reachable in step 2, then we
+ * will mark it eventually. The name comes from the idea that we take a
+ * theoretical "snapshot" of all reachable objects in step 2; all objects in
+ * that snapshot should eventually be marked. (Note that the write barrier
+ * verifier code takes an actual snapshot.)
+ *
+ * The basic correctness invariant of a snapshot-at-the-beginning collector is
+ * that any object reachable at the end of the GC (step 9) must either:
+ * (1) have been reachable at the beginning (step 2) and thus in the snapshot
+ * (2) or must have been newly allocated, in steps 3, 5, or 7.
+ * To deal with case (2), any objects allocated during an incremental GC are
+ * automatically marked black.
+ *
+ * This strategy is actually somewhat conservative: if an object becomes
+ * unreachable between steps 2 and 8, it would be safe to collect it. We won't,
+ * mainly for simplicity. (Also, note that the snapshot is entirely
+ * theoretical. We don't actually do anything special in step 2 that we wouldn't
+ * do in a non-incremental GC.
+ *
+ * It's the pre-barrier's job to maintain the snapshot invariant. Consider the
+ * write "obj->field = value". Let the prior value of obj->field be
+ * value0. Since it's possible that value0 may have been what obj->field
+ * contained in step 2, when the snapshot was taken, the barrier marks
+ * value0. Note that it only does this if we're in the middle of an incremental
+ * GC. Since this is rare, the cost of the write barrier is usually just an
+ * extra branch.
+ *
+ * In practice, we implement the pre-barrier differently based on the type of
+ * value0. E.g., see JSObject::preWriteBarrier, which is used if obj->field is
+ * a JSObject*. It takes value0 as a parameter.
+ *
+ * Post-write barrier
+ * ------------------
+ *
+ * For generational GC, we want to be able to quickly collect the nursery in a
+ * minor collection. Part of the way this is achieved is to only mark the
+ * nursery itself; tenured things, which may form the majority of the heap, are
+ * not traced through or marked. This leads to the problem of what to do about
+ * tenured objects that have pointers into the nursery: if such things are not
+ * marked, they may be discarded while there are still live objects which
+ * reference them. The solution is to maintain information about these pointers,
+ * and mark their targets when we start a minor collection.
+ *
+ * The pointers can be thought of as edges in an object graph, and the set of
+ * edges from the tenured generation into the nursery is known as the remembered
+ * set. Post barriers are used to track this remembered set.
+ *
+ * Whenever a slot which could contain such a pointer is written, we check
+ * whether the pointed-to thing is in the nursery (if storeBuffer() returns a
+ * buffer). If so we add the cell into the store buffer, which is the
+ * collector's representation of the remembered set. This means that when we
+ * come to do a minor collection we can examine the contents of the store buffer
+ * and mark any edge targets that are in the nursery.
+ *
+ * Read barriers
+ * =============
+ *
+ * Weak pointer read barrier
+ * -------------------------
+ *
+ * Weak pointers must have a read barrier to prevent the referent from being
+ * collected if it is read after the start of an incremental GC.
+ *
+ * The problem happens when, during an incremental GC, some code reads a weak
+ * pointer and writes it somewhere on the heap that has been marked black in a
+ * previous slice. Since the weak pointer will not otherwise be marked and will
+ * be swept and finalized in the last slice, this will leave the pointer just
+ * written dangling after the GC. To solve this, we immediately mark black all
+ * weak pointers that get read between slices so that it is safe to store them
+ * in an already marked part of the heap, e.g. in Rooted.
+ *
+ * Cycle collector read barrier
+ * ----------------------------
+ *
+ * Heap pointers external to the engine may be marked gray. The JS API has an
+ * invariant that no gray pointers may be passed, and this maintained by a read
+ * barrier that calls ExposeGCThingToActiveJS on such pointers. This is
+ * implemented by JS::Heap<T> in js/RootingAPI.h.
+ *
+ * Implementation Details
+ * ======================
+ *
+ * One additional note: not all object writes need to be pre-barriered. Writes
+ * to newly allocated objects do not need a pre-barrier. In these cases, we use
+ * the "obj->field.init(value)" method instead of "obj->field = value". We use
+ * the init naming idiom in many places to signify that a field is being
+ * assigned for the first time.
+ *
+ * This file implements the following hierarchy of classes:
+ *
+ * BarrieredBase base class of all barriers
+ * | |
+ * | WriteBarriered base class which provides common write operations
+ * | | | | |
+ * | | | | PreBarriered provides pre-barriers only
+ * | | | |
+ * | | | GCPtr provides pre- and post-barriers
+ * | | |
+ * | | HeapPtr provides pre- and post-barriers; is relocatable
+ * | | and deletable for use inside C++ managed memory
+ * | |
+ * | HeapSlot similar to GCPtr, but tailored to slots storage
+ * |
+ * ReadBarriered base class which provides common read operations
+ * |
+ * WeakHeapPtr provides read barriers only
+ *
+ *
+ * The implementation of the barrier logic is implemented in the
+ * Cell/TenuredCell base classes, which are called via:
+ *
+ * WriteBarriered<T>::pre
+ * -> InternalBarrierMethods<T*>::preBarrier
+ * -> Cell::preWriteBarrier
+ * -> InternalBarrierMethods<Value>::preBarrier
+ * -> InternalBarrierMethods<jsid>::preBarrier
+ * -> InternalBarrierMethods<T*>::preBarrier
+ * -> Cell::preWriteBarrier
+ *
+ * GCPtr<T>::post and HeapPtr<T>::post
+ * -> InternalBarrierMethods<T*>::postBarrier
+ * -> gc::PostWriteBarrierImpl
+ * -> InternalBarrierMethods<Value>::postBarrier
+ * -> StoreBuffer::put
+ *
+ * Barriers for use outside of the JS engine call into the same barrier
+ * implementations at InternalBarrierMethods<T>::post via an indirect call to
+ * Heap(.+)PostWriteBarrier.
+ *
+ * These clases are designed to be used to wrap GC thing pointers or values that
+ * act like them (i.e. JS::Value and jsid). It is possible to use them for
+ * other types by supplying the necessary barrier implementations but this
+ * is not usually necessary and should be done with caution.
+ */
+
+namespace js {
+
+class NativeObject;
+
+namespace gc {
+
+void ValueReadBarrier(const Value& v);
+void ValuePreWriteBarrier(const Value& v);
+void IdPreWriteBarrier(jsid id);
+void CellPtrPreWriteBarrier(JS::GCCellPtr thing);
+
+} // namespace gc
+
+#ifdef DEBUG
+
+bool CurrentThreadIsTouchingGrayThings();
+
+bool IsMarkedBlack(JSObject* obj);
+
+#endif
+
+struct MOZ_RAII AutoTouchingGrayThings {
+#ifdef DEBUG
+ AutoTouchingGrayThings();
+ ~AutoTouchingGrayThings();
+#else
+ AutoTouchingGrayThings() {}
+#endif
+};
+
+template <typename T>
+struct InternalBarrierMethods {};
+
+template <typename T>
+struct InternalBarrierMethods<T*> {
+ static bool isMarkable(const T* v) { return v != nullptr; }
+
+ static void preBarrier(T* v) { gc::PreWriteBarrier(v); }
+
+ static void postBarrier(T** vp, T* prev, T* next) {
+ gc::PostWriteBarrier(vp, prev, next);
+ }
+
+ static void readBarrier(T* v) { gc::ReadBarrier(v); }
+
+#ifdef DEBUG
+ static void assertThingIsNotGray(T* v) { return T::assertThingIsNotGray(v); }
+#endif
+};
+
+template <>
+struct InternalBarrierMethods<Value> {
+ static bool isMarkable(const Value& v) { return v.isGCThing(); }
+
+ static void preBarrier(const Value& v) {
+ if (v.isGCThing()) {
+ gc::ValuePreWriteBarrier(v);
+ }
+ }
+
+ static MOZ_ALWAYS_INLINE void postBarrier(Value* vp, const Value& prev,
+ const Value& next) {
+ MOZ_ASSERT(!CurrentThreadIsIonCompiling());
+ MOZ_ASSERT(vp);
+
+ // If the target needs an entry, add it.
+ js::gc::StoreBuffer* sb;
+ if ((next.isObject() || next.isString() || next.isBigInt()) &&
+ (sb = next.toGCThing()->storeBuffer())) {
+ // If we know that the prev has already inserted an entry, we can
+ // skip doing the lookup to add the new entry. Note that we cannot
+ // safely assert the presence of the entry because it may have been
+ // added via a different store buffer.
+ if ((prev.isObject() || prev.isString() || prev.isBigInt()) &&
+ prev.toGCThing()->storeBuffer()) {
+ return;
+ }
+ sb->putValue(vp);
+ return;
+ }
+ // Remove the prev entry if the new value does not need it.
+ if ((prev.isObject() || prev.isString() || prev.isBigInt()) &&
+ (sb = prev.toGCThing()->storeBuffer())) {
+ sb->unputValue(vp);
+ }
+ }
+
+ static void readBarrier(const Value& v) {
+ if (v.isGCThing()) {
+ gc::ValueReadBarrier(v);
+ }
+ }
+
+#ifdef DEBUG
+ static void assertThingIsNotGray(const Value& v) {
+ JS::AssertValueIsNotGray(v);
+ }
+#endif
+};
+
+template <>
+struct InternalBarrierMethods<jsid> {
+ static bool isMarkable(jsid id) { return id.isGCThing(); }
+ static void preBarrier(jsid id) {
+ if (id.isGCThing()) {
+ gc::IdPreWriteBarrier(id);
+ }
+ }
+ static void postBarrier(jsid* idp, jsid prev, jsid next) {}
+#ifdef DEBUG
+ static void assertThingIsNotGray(jsid id) { JS::AssertIdIsNotGray(id); }
+#endif
+};
+
+template <typename T>
+static inline void AssertTargetIsNotGray(const T& v) {
+#ifdef DEBUG
+ if (!CurrentThreadIsTouchingGrayThings()) {
+ InternalBarrierMethods<T>::assertThingIsNotGray(v);
+ }
+#endif
+}
+
+// Base class of all barrier types.
+//
+// This is marked non-memmovable since post barriers added by derived classes
+// can add pointers to class instances to the store buffer.
+template <typename T>
+class MOZ_NON_MEMMOVABLE BarrieredBase {
+ protected:
+ // BarrieredBase is not directly instantiable.
+ explicit BarrieredBase(const T& v) : value(v) {}
+
+ // BarrieredBase subclasses cannot be copy constructed by default.
+ BarrieredBase(const BarrieredBase<T>& other) = default;
+
+ // Storage for all barrier classes. |value| must be a GC thing reference
+ // type: either a direct pointer to a GC thing or a supported tagged
+ // pointer that can reference GC things, such as JS::Value or jsid. Nested
+ // barrier types are NOT supported. See assertTypeConstraints.
+ T value;
+
+ public:
+ using ElementType = T;
+
+ // Note: this is public because C++ cannot friend to a specific template
+ // instantiation. Friending to the generic template leads to a number of
+ // unintended consequences, including template resolution ambiguity and a
+ // circular dependency with Tracing.h.
+ T* unbarrieredAddress() const { return const_cast<T*>(&value); }
+};
+
+// Base class for barriered pointer types that intercept only writes.
+template <class T>
+class WriteBarriered : public BarrieredBase<T>,
+ public WrappedPtrOperations<T, WriteBarriered<T>> {
+ protected:
+ using BarrieredBase<T>::value;
+
+ // WriteBarriered is not directly instantiable.
+ explicit WriteBarriered(const T& v) : BarrieredBase<T>(v) {}
+
+ public:
+ DECLARE_POINTER_CONSTREF_OPS(T);
+
+ // Use this if the automatic coercion to T isn't working.
+ const T& get() const { return this->value; }
+
+ // Use this if you want to change the value without invoking barriers.
+ // Obviously this is dangerous unless you know the barrier is not needed.
+ void unbarrieredSet(const T& v) { this->value = v; }
+
+ // For users who need to manually barrier the raw types.
+ static void preWriteBarrier(const T& v) {
+ InternalBarrierMethods<T>::preBarrier(v);
+ }
+
+ protected:
+ void pre() { InternalBarrierMethods<T>::preBarrier(this->value); }
+ MOZ_ALWAYS_INLINE void post(const T& prev, const T& next) {
+ InternalBarrierMethods<T>::postBarrier(&this->value, prev, next);
+ }
+};
+
+#define DECLARE_POINTER_ASSIGN_AND_MOVE_OPS(Wrapper, T) \
+ DECLARE_POINTER_ASSIGN_OPS(Wrapper, T) \
+ Wrapper<T>& operator=(Wrapper<T>&& other) { \
+ setUnchecked(other.release()); \
+ return *this; \
+ }
+
+/*
+ * PreBarriered only automatically handles pre-barriers. Post-barriers must be
+ * manually implemented when using this class. GCPtr and HeapPtr should be used
+ * in all cases that do not require explicit low-level control of moving
+ * behavior.
+ *
+ * This class is useful for example for HashMap keys where automatically
+ * updating a moved nursery pointer would break the hash table.
+ */
+template <class T>
+class PreBarriered : public WriteBarriered<T> {
+ public:
+ PreBarriered() : WriteBarriered<T>(JS::SafelyInitialized<T>()) {}
+ /*
+ * Allow implicit construction for use in generic contexts, such as
+ * DebuggerWeakMap::markKeys.
+ */
+ MOZ_IMPLICIT PreBarriered(const T& v) : WriteBarriered<T>(v) {}
+
+ explicit PreBarriered(const PreBarriered<T>& other)
+ : WriteBarriered<T>(other.value) {}
+
+ PreBarriered(PreBarriered<T>&& other) : WriteBarriered<T>(other.release()) {}
+
+ ~PreBarriered() { this->pre(); }
+
+ void init(const T& v) { this->value = v; }
+
+ /* Use to set the pointer to nullptr. */
+ void clear() { set(JS::SafelyInitialized<T>()); }
+
+ DECLARE_POINTER_ASSIGN_AND_MOVE_OPS(PreBarriered, T);
+
+ private:
+ void set(const T& v) {
+ AssertTargetIsNotGray(v);
+ setUnchecked(v);
+ }
+
+ void setUnchecked(const T& v) {
+ this->pre();
+ this->value = v;
+ }
+
+ T release() {
+ T tmp = this->value;
+ this->value = JS::SafelyInitialized<T>();
+ return tmp;
+ }
+};
+
+} // namespace js
+
+namespace JS {
+
+namespace detail {
+
+template <typename T>
+struct DefineComparisonOps<js::PreBarriered<T>> : std::true_type {
+ static const T& get(const js::PreBarriered<T>& v) { return v.get(); }
+};
+
+} // namespace detail
+
+} // namespace JS
+
+namespace js {
+
+/*
+ * A pre- and post-barriered heap pointer, for use inside the JS engine.
+ *
+ * It must only be stored in memory that has GC lifetime. GCPtr must not be
+ * used in contexts where it may be implicitly moved or deleted, e.g. most
+ * containers.
+ *
+ * The post-barriers implemented by this class are faster than those
+ * implemented by js::HeapPtr<T> or JS::Heap<T> at the cost of not
+ * automatically handling deletion or movement.
+ */
+template <class T>
+class GCPtr : public WriteBarriered<T> {
+ public:
+ GCPtr() : WriteBarriered<T>(JS::SafelyInitialized<T>()) {}
+
+ explicit GCPtr(const T& v) : WriteBarriered<T>(v) {
+ this->post(JS::SafelyInitialized<T>(), v);
+ }
+
+ explicit GCPtr(const GCPtr<T>& v) : WriteBarriered<T>(v) {
+ this->post(JS::SafelyInitialized<T>(), v);
+ }
+
+#ifdef DEBUG
+ ~GCPtr() {
+ // No barriers are necessary as this only happens when the GC is sweeping.
+ //
+ // If this assertion fails you may need to make the containing object use a
+ // HeapPtr instead, as this can be deleted from outside of GC.
+ MOZ_ASSERT(CurrentThreadIsGCSweeping() || CurrentThreadIsGCFinalizing());
+
+ Poison(this, JS_FREED_HEAP_PTR_PATTERN, sizeof(*this),
+ MemCheckKind::MakeNoAccess);
+ }
+#endif
+
+ void init(const T& v) {
+ AssertTargetIsNotGray(v);
+ this->value = v;
+ this->post(JS::SafelyInitialized<T>(), v);
+ }
+
+ DECLARE_POINTER_ASSIGN_OPS(GCPtr, T);
+
+ private:
+ void set(const T& v) {
+ AssertTargetIsNotGray(v);
+ setUnchecked(v);
+ }
+
+ void setUnchecked(const T& v) {
+ this->pre();
+ T tmp = this->value;
+ this->value = v;
+ this->post(tmp, this->value);
+ }
+
+ /*
+ * Unlike HeapPtr<T>, GCPtr<T> must be managed with GC lifetimes.
+ * Specifically, the memory used by the pointer itself must be live until
+ * at least the next minor GC. For that reason, move semantics are invalid
+ * and are deleted here. Please note that not all containers support move
+ * semantics, so this does not completely prevent invalid uses.
+ */
+ GCPtr(GCPtr<T>&&) = delete;
+ GCPtr<T>& operator=(GCPtr<T>&&) = delete;
+};
+
+} // namespace js
+
+namespace JS {
+
+namespace detail {
+
+template <typename T>
+struct DefineComparisonOps<js::GCPtr<T>> : std::true_type {
+ static const T& get(const js::GCPtr<T>& v) { return v.get(); }
+};
+
+} // namespace detail
+
+} // namespace JS
+
+namespace js {
+
+/*
+ * A pre- and post-barriered heap pointer, for use inside the JS engine. These
+ * heap pointers can be stored in C++ containers like GCVector and GCHashMap.
+ *
+ * The GC sometimes keeps pointers to pointers to GC things --- for example, to
+ * track references into the nursery. However, C++ containers like GCVector and
+ * GCHashMap usually reserve the right to relocate their elements any time
+ * they're modified, invalidating all pointers to the elements. HeapPtr
+ * has a move constructor which knows how to keep the GC up to date if it is
+ * moved to a new location.
+ *
+ * However, because of this additional communication with the GC, HeapPtr
+ * is somewhat slower, so it should only be used in contexts where this ability
+ * is necessary.
+ *
+ * Obviously, JSObjects, JSStrings, and the like get tenured and compacted, so
+ * whatever pointers they contain get relocated, in the sense used here.
+ * However, since the GC itself is moving those values, it takes care of its
+ * internal pointers to those pointers itself. HeapPtr is only necessary
+ * when the relocation would otherwise occur without the GC's knowledge.
+ */
+template <class T>
+class HeapPtr : public WriteBarriered<T> {
+ public:
+ HeapPtr() : WriteBarriered<T>(JS::SafelyInitialized<T>()) {}
+
+ // Implicitly adding barriers is a reasonable default.
+ MOZ_IMPLICIT HeapPtr(const T& v) : WriteBarriered<T>(v) {
+ this->post(JS::SafelyInitialized<T>(), this->value);
+ }
+
+ MOZ_IMPLICIT HeapPtr(const HeapPtr<T>& other) : WriteBarriered<T>(other) {
+ this->post(JS::SafelyInitialized<T>(), this->value);
+ }
+
+ HeapPtr(HeapPtr<T>&& other) : WriteBarriered<T>(other.release()) {
+ this->post(JS::SafelyInitialized<T>(), this->value);
+ }
+
+ ~HeapPtr() {
+ this->pre();
+ this->post(this->value, JS::SafelyInitialized<T>());
+ }
+
+ void init(const T& v) {
+ MOZ_ASSERT(this->value == JS::SafelyInitialized<T>());
+ AssertTargetIsNotGray(v);
+ this->value = v;
+ this->post(JS::SafelyInitialized<T>(), this->value);
+ }
+
+ DECLARE_POINTER_ASSIGN_AND_MOVE_OPS(HeapPtr, T);
+
+ /* Make this friend so it can access pre() and post(). */
+ template <class T1, class T2>
+ friend inline void BarrieredSetPair(Zone* zone, HeapPtr<T1*>& v1, T1* val1,
+ HeapPtr<T2*>& v2, T2* val2);
+
+ protected:
+ void set(const T& v) {
+ AssertTargetIsNotGray(v);
+ setUnchecked(v);
+ }
+
+ void setUnchecked(const T& v) {
+ this->pre();
+ postBarrieredSet(v);
+ }
+
+ void postBarrieredSet(const T& v) {
+ T tmp = this->value;
+ this->value = v;
+ this->post(tmp, this->value);
+ }
+
+ T release() {
+ T tmp = this->value;
+ postBarrieredSet(JS::SafelyInitialized<T>());
+ return tmp;
+ }
+};
+
+} // namespace js
+
+namespace JS {
+
+namespace detail {
+
+template <typename T>
+struct DefineComparisonOps<js::HeapPtr<T>> : std::true_type {
+ static const T& get(const js::HeapPtr<T>& v) { return v.get(); }
+};
+
+} // namespace detail
+
+} // namespace JS
+
+namespace js {
+
+// Base class for barriered pointer types that intercept reads and writes.
+template <typename T>
+class ReadBarriered : public BarrieredBase<T> {
+ protected:
+ // ReadBarriered is not directly instantiable.
+ explicit ReadBarriered(const T& v) : BarrieredBase<T>(v) {}
+
+ void read() const { InternalBarrierMethods<T>::readBarrier(this->value); }
+ void post(const T& prev, const T& next) {
+ InternalBarrierMethods<T>::postBarrier(&this->value, prev, next);
+ }
+};
+
+// Incremental GC requires that weak pointers have read barriers. See the block
+// comment at the top of Barrier.h for a complete discussion of why.
+//
+// Note that this class also has post-barriers, so is safe to use with nursery
+// pointers. However, when used as a hashtable key, care must still be taken to
+// insert manual post-barriers on the table for rekeying if the key is based in
+// any way on the address of the object.
+template <typename T>
+class WeakHeapPtr : public ReadBarriered<T>,
+ public WrappedPtrOperations<T, WeakHeapPtr<T>> {
+ protected:
+ using ReadBarriered<T>::value;
+
+ public:
+ WeakHeapPtr() : ReadBarriered<T>(JS::SafelyInitialized<T>()) {}
+
+ // It is okay to add barriers implicitly.
+ MOZ_IMPLICIT WeakHeapPtr(const T& v) : ReadBarriered<T>(v) {
+ this->post(JS::SafelyInitialized<T>(), v);
+ }
+
+ // The copy constructor creates a new weak edge but the wrapped pointer does
+ // not escape, so no read barrier is necessary.
+ explicit WeakHeapPtr(const WeakHeapPtr& other) : ReadBarriered<T>(other) {
+ this->post(JS::SafelyInitialized<T>(), value);
+ }
+
+ // Move retains the lifetime status of the source edge, so does not fire
+ // the read barrier of the defunct edge.
+ WeakHeapPtr(WeakHeapPtr&& other) : ReadBarriered<T>(other.release()) {
+ this->post(JS::SafelyInitialized<T>(), value);
+ }
+
+ ~WeakHeapPtr() { this->post(this->value, JS::SafelyInitialized<T>()); }
+
+ WeakHeapPtr& operator=(const WeakHeapPtr& v) {
+ AssertTargetIsNotGray(v.value);
+ T prior = this->value;
+ this->value = v.value;
+ this->post(prior, v.value);
+ return *this;
+ }
+
+ const T& get() const {
+ if (InternalBarrierMethods<T>::isMarkable(this->value)) {
+ this->read();
+ }
+ return this->value;
+ }
+
+ const T& unbarrieredGet() const { return this->value; }
+
+ explicit operator bool() const { return bool(this->value); }
+
+ operator const T&() const { return get(); }
+
+ const T& operator->() const { return get(); }
+
+ void set(const T& v) {
+ AssertTargetIsNotGray(v);
+ setUnchecked(v);
+ }
+
+ void unbarrieredSet(const T& v) {
+ AssertTargetIsNotGray(v);
+ this->value = v;
+ }
+
+ private:
+ void setUnchecked(const T& v) {
+ T tmp = this->value;
+ this->value = v;
+ this->post(tmp, v);
+ }
+
+ T release() {
+ T tmp = value;
+ set(JS::SafelyInitialized<T>());
+ return tmp;
+ }
+};
+
+} // namespace js
+
+namespace JS {
+
+namespace detail {
+
+template <typename T>
+struct DefineComparisonOps<js::WeakHeapPtr<T>> : std::true_type {
+ static const T& get(const js::WeakHeapPtr<T>& v) {
+ return v.unbarrieredGet();
+ }
+};
+
+} // namespace detail
+
+} // namespace JS
+
+namespace js {
+
+// A pre- and post-barriered Value that is specialized to be aware that it
+// resides in a slots or elements vector. This allows it to be relocated in
+// memory, but with substantially less overhead than a HeapPtr.
+class HeapSlot : public WriteBarriered<Value> {
+ public:
+ enum Kind { Slot = 0, Element = 1 };
+
+ void init(NativeObject* owner, Kind kind, uint32_t slot, const Value& v) {
+ value = v;
+ post(owner, kind, slot, v);
+ }
+
+ void destroy() { pre(); }
+
+#ifdef DEBUG
+ bool preconditionForSet(NativeObject* owner, Kind kind, uint32_t slot) const;
+ void assertPreconditionForPostWriteBarrier(NativeObject* obj, Kind kind,
+ uint32_t slot,
+ const Value& target) const;
+#endif
+
+ MOZ_ALWAYS_INLINE void set(NativeObject* owner, Kind kind, uint32_t slot,
+ const Value& v) {
+ MOZ_ASSERT(preconditionForSet(owner, kind, slot));
+ pre();
+ value = v;
+ post(owner, kind, slot, v);
+ }
+
+ private:
+ void post(NativeObject* owner, Kind kind, uint32_t slot,
+ const Value& target) {
+#ifdef DEBUG
+ assertPreconditionForPostWriteBarrier(owner, kind, slot, target);
+#endif
+ if (this->value.isObject() || this->value.isString() ||
+ this->value.isBigInt()) {
+ gc::Cell* cell = this->value.toGCThing();
+ if (cell->storeBuffer()) {
+ cell->storeBuffer()->putSlot(owner, kind, slot, 1);
+ }
+ }
+ }
+};
+
+} // namespace js
+
+namespace JS {
+
+namespace detail {
+
+template <>
+struct DefineComparisonOps<js::HeapSlot> : std::true_type {
+ static const Value& get(const js::HeapSlot& v) { return v.get(); }
+};
+
+} // namespace detail
+
+} // namespace JS
+
+namespace js {
+
+class HeapSlotArray {
+ HeapSlot* array;
+
+ public:
+ explicit HeapSlotArray(HeapSlot* array) : array(array) {}
+
+ HeapSlot* begin() const { return array; }
+
+ operator const Value*() const {
+ static_assert(sizeof(GCPtr<Value>) == sizeof(Value));
+ static_assert(sizeof(HeapSlot) == sizeof(Value));
+ return reinterpret_cast<const Value*>(array);
+ }
+ operator HeapSlot*() const { return begin(); }
+
+ HeapSlotArray operator+(int offset) const {
+ return HeapSlotArray(array + offset);
+ }
+ HeapSlotArray operator+(uint32_t offset) const {
+ return HeapSlotArray(array + offset);
+ }
+};
+
+/*
+ * This is a hack for RegExpStatics::updateFromMatch. It allows us to do two
+ * barriers with only one branch to check if we're in an incremental GC.
+ */
+template <class T1, class T2>
+static inline void BarrieredSetPair(Zone* zone, HeapPtr<T1*>& v1, T1* val1,
+ HeapPtr<T2*>& v2, T2* val2) {
+ AssertTargetIsNotGray(val1);
+ AssertTargetIsNotGray(val2);
+ if (T1::needPreWriteBarrier(zone)) {
+ v1.pre();
+ v2.pre();
+ }
+ v1.postBarrieredSet(val1);
+ v2.postBarrieredSet(val2);
+}
+
+/*
+ * ImmutableTenuredPtr is designed for one very narrow case: replacing
+ * immutable raw pointers to GC-managed things, implicitly converting to a
+ * handle type for ease of use. Pointers encapsulated by this type must:
+ *
+ * be immutable (no incremental write barriers),
+ * never point into the nursery (no generational write barriers), and
+ * be traced via MarkRuntime (we use fromMarkedLocation).
+ *
+ * In short: you *really* need to know what you're doing before you use this
+ * class!
+ */
+template <typename T>
+class MOZ_HEAP_CLASS ImmutableTenuredPtr {
+ T value;
+
+ public:
+ operator T() const { return value; }
+ T operator->() const { return value; }
+
+ // `ImmutableTenuredPtr<T>` is implicitly convertible to `Handle<T>`.
+ //
+ // In case you need to convert to `Handle<U>` where `U` is base class of `T`,
+ // convert this to `Handle<T>` by `toHandle()` and then use implicit
+ // conversion from `Handle<T>` to `Handle<U>`.
+ operator Handle<T>() const { return toHandle(); }
+ Handle<T> toHandle() const { return Handle<T>::fromMarkedLocation(&value); }
+
+ void init(T ptr) {
+ MOZ_ASSERT(ptr->isTenured());
+ AssertTargetIsNotGray(ptr);
+ value = ptr;
+ }
+
+ T get() const { return value; }
+ const T* address() { return &value; }
+};
+
+#if MOZ_IS_GCC
+template struct JS_PUBLIC_API MovableCellHasher<JSObject*>;
+#endif
+
+template <typename T>
+struct MovableCellHasher<PreBarriered<T>> {
+ using Key = PreBarriered<T>;
+ using Lookup = T;
+
+ static bool hasHash(const Lookup& l) {
+ return MovableCellHasher<T>::hasHash(l);
+ }
+ static bool ensureHash(const Lookup& l) {
+ return MovableCellHasher<T>::ensureHash(l);
+ }
+ static HashNumber hash(const Lookup& l) {
+ return MovableCellHasher<T>::hash(l);
+ }
+ static bool match(const Key& k, const Lookup& l) {
+ return MovableCellHasher<T>::match(k, l);
+ }
+};
+
+template <typename T>
+struct MovableCellHasher<HeapPtr<T>> {
+ using Key = HeapPtr<T>;
+ using Lookup = T;
+
+ static bool hasHash(const Lookup& l) {
+ return MovableCellHasher<T>::hasHash(l);
+ }
+ static bool ensureHash(const Lookup& l) {
+ return MovableCellHasher<T>::ensureHash(l);
+ }
+ static HashNumber hash(const Lookup& l) {
+ return MovableCellHasher<T>::hash(l);
+ }
+ static bool match(const Key& k, const Lookup& l) {
+ return MovableCellHasher<T>::match(k, l);
+ }
+};
+
+template <typename T>
+struct MovableCellHasher<WeakHeapPtr<T>> {
+ using Key = WeakHeapPtr<T>;
+ using Lookup = T;
+
+ static bool hasHash(const Lookup& l) {
+ return MovableCellHasher<T>::hasHash(l);
+ }
+ static bool ensureHash(const Lookup& l) {
+ return MovableCellHasher<T>::ensureHash(l);
+ }
+ static HashNumber hash(const Lookup& l) {
+ return MovableCellHasher<T>::hash(l);
+ }
+ static bool match(const Key& k, const Lookup& l) {
+ return MovableCellHasher<T>::match(k.unbarrieredGet(), l);
+ }
+};
+
+/* Useful for hashtables with a HeapPtr as key. */
+template <class T>
+struct HeapPtrHasher {
+ using Key = HeapPtr<T>;
+ using Lookup = T;
+
+ static HashNumber hash(Lookup obj) { return DefaultHasher<T>::hash(obj); }
+ static bool match(const Key& k, Lookup l) { return k.get() == l; }
+ static void rekey(Key& k, const Key& newKey) { k.unbarrieredSet(newKey); }
+};
+
+template <class T>
+struct PreBarrieredHasher {
+ using Key = PreBarriered<T>;
+ using Lookup = T;
+
+ static HashNumber hash(Lookup obj) { return DefaultHasher<T>::hash(obj); }
+ static bool match(const Key& k, Lookup l) { return k.get() == l; }
+ static void rekey(Key& k, const Key& newKey) { k.unbarrieredSet(newKey); }
+};
+
+/* Useful for hashtables with a WeakHeapPtr as key. */
+template <class T>
+struct WeakHeapPtrHasher {
+ using Key = WeakHeapPtr<T>;
+ using Lookup = T;
+
+ static HashNumber hash(Lookup obj) { return DefaultHasher<T>::hash(obj); }
+ static bool match(const Key& k, Lookup l) { return k.unbarrieredGet() == l; }
+ static void rekey(Key& k, const Key& newKey) {
+ k.set(newKey.unbarrieredGet());
+ }
+};
+
+} // namespace js
+
+namespace mozilla {
+
+template <class T>
+struct DefaultHasher<js::HeapPtr<T>> : js::HeapPtrHasher<T> {};
+
+template <class T>
+struct DefaultHasher<js::GCPtr<T>> {
+ // Not implemented. GCPtr can't be used as a hash table key because it has a
+ // post barrier but doesn't support relocation.
+};
+
+template <class T>
+struct DefaultHasher<js::PreBarriered<T>> : js::PreBarrieredHasher<T> {};
+
+template <class T>
+struct DefaultHasher<js::WeakHeapPtr<T>> : js::WeakHeapPtrHasher<T> {};
+
+} // namespace mozilla
+
+namespace js {
+
+class ArrayObject;
+class DebugEnvironmentProxy;
+class GlobalObject;
+class ObjectGroup;
+class PropertyName;
+class Scope;
+class ScriptSourceObject;
+class Shape;
+class BaseShape;
+class UnownedBaseShape;
+class WasmInstanceObject;
+class WasmTableObject;
+
+namespace jit {
+class JitCode;
+} // namespace jit
+
+using PreBarrieredId = PreBarriered<jsid>;
+using PreBarrieredObject = PreBarriered<JSObject*>;
+using PreBarrieredValue = PreBarriered<Value>;
+
+using GCPtrNativeObject = GCPtr<NativeObject*>;
+using GCPtrArrayObject = GCPtr<ArrayObject*>;
+using GCPtrAtom = GCPtr<JSAtom*>;
+using GCPtrBigInt = GCPtr<BigInt*>;
+using GCPtrFunction = GCPtr<JSFunction*>;
+using GCPtrLinearString = GCPtr<JSLinearString*>;
+using GCPtrObject = GCPtr<JSObject*>;
+using GCPtrScript = GCPtr<JSScript*>;
+using GCPtrString = GCPtr<JSString*>;
+using GCPtrShape = GCPtr<Shape*>;
+using GCPtrUnownedBaseShape = GCPtr<UnownedBaseShape*>;
+using GCPtrObjectGroup = GCPtr<ObjectGroup*>;
+using GCPtrValue = GCPtr<Value>;
+using GCPtrId = GCPtr<jsid>;
+
+using ImmutablePropertyNamePtr = ImmutableTenuredPtr<PropertyName*>;
+using ImmutableSymbolPtr = ImmutableTenuredPtr<JS::Symbol*>;
+
+using WeakHeapPtrDebugEnvironmentProxy = WeakHeapPtr<DebugEnvironmentProxy*>;
+using WeakHeapPtrGlobalObject = WeakHeapPtr<GlobalObject*>;
+using WeakHeapPtrObject = WeakHeapPtr<JSObject*>;
+using WeakHeapPtrScript = WeakHeapPtr<JSScript*>;
+using WeakHeapPtrScriptSourceObject = WeakHeapPtr<ScriptSourceObject*>;
+using WeakHeapPtrShape = WeakHeapPtr<Shape*>;
+using WeakHeapPtrJitCode = WeakHeapPtr<jit::JitCode*>;
+using WeakHeapPtrObjectGroup = WeakHeapPtr<ObjectGroup*>;
+using WeakHeapPtrSymbol = WeakHeapPtr<JS::Symbol*>;
+using WeakHeapPtrWasmInstanceObject = WeakHeapPtr<WasmInstanceObject*>;
+using WeakHeapPtrWasmTableObject = WeakHeapPtr<WasmTableObject*>;
+
+using HeapPtrJitCode = HeapPtr<jit::JitCode*>;
+using HeapPtrNativeObject = HeapPtr<NativeObject*>;
+using HeapPtrObject = HeapPtr<JSObject*>;
+using HeapPtrRegExpShared = HeapPtr<RegExpShared*>;
+using HeapPtrValue = HeapPtr<Value>;
+
+} /* namespace js */
+
+#endif /* gc_Barrier_h */
diff --git a/js/src/gc/Cell.h b/js/src/gc/Cell.h
new file mode 100644
index 0000000000..d9ebb3a751
--- /dev/null
+++ b/js/src/gc/Cell.h
@@ -0,0 +1,780 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef gc_Cell_h
+#define gc_Cell_h
+
+#include "mozilla/Atomics.h"
+#include "mozilla/EndianUtils.h"
+
+#include <type_traits>
+
+#include "gc/GCEnum.h"
+#include "gc/Heap.h"
+#include "js/GCAnnotations.h"
+#include "js/shadow/Zone.h" // JS::shadow::Zone
+#include "js/TraceKind.h"
+#include "js/TypeDecls.h"
+
+namespace JS {
+enum class TraceKind;
+} /* namespace JS */
+
+namespace js {
+
+class GenericPrinter;
+
+extern bool RuntimeFromMainThreadIsHeapMajorCollecting(
+ JS::shadow::Zone* shadowZone);
+
+#ifdef DEBUG
+
+// Barriers can't be triggered during backend Ion compilation, which may run on
+// a helper thread.
+extern bool CurrentThreadIsIonCompiling();
+
+extern bool CurrentThreadIsGCMarking();
+extern bool CurrentThreadIsGCSweeping();
+extern bool CurrentThreadIsGCFinalizing();
+extern bool RuntimeIsVerifyingPreBarriers(JSRuntime* runtime);
+
+#endif
+
+extern void TraceManuallyBarrieredGenericPointerEdge(JSTracer* trc,
+ gc::Cell** thingp,
+ const char* name);
+
+namespace gc {
+
+class Arena;
+enum class AllocKind : uint8_t;
+class StoreBuffer;
+class TenuredCell;
+
+extern void UnmarkGrayGCThingRecursively(TenuredCell* cell);
+
+// Like gc::MarkColor but allows the possibility of the cell being unmarked.
+//
+// This class mimics an enum class, but supports operator overloading.
+class CellColor {
+ public:
+ enum Color { White = 0, Gray = 1, Black = 2 };
+
+ CellColor() : color(White) {}
+
+ MOZ_IMPLICIT CellColor(MarkColor markColor)
+ : color(markColor == MarkColor::Black ? Black : Gray) {}
+
+ MOZ_IMPLICIT constexpr CellColor(Color c) : color(c) {}
+
+ MarkColor asMarkColor() const {
+ MOZ_ASSERT(color != White);
+ return color == Black ? MarkColor::Black : MarkColor::Gray;
+ }
+
+ // Implement a total ordering for CellColor, with white being 'least marked'
+ // and black being 'most marked'.
+ bool operator<(const CellColor other) const { return color < other.color; }
+ bool operator>(const CellColor other) const { return color > other.color; }
+ bool operator<=(const CellColor other) const { return color <= other.color; }
+ bool operator>=(const CellColor other) const { return color >= other.color; }
+ bool operator!=(const CellColor other) const { return color != other.color; }
+ bool operator==(const CellColor other) const { return color == other.color; }
+ explicit operator bool() const { return color != White; }
+
+#if defined(JS_GC_ZEAL) || defined(DEBUG)
+ const char* name() const {
+ switch (color) {
+ case CellColor::White:
+ return "white";
+ case CellColor::Black:
+ return "black";
+ case CellColor::Gray:
+ return "gray";
+ default:
+ MOZ_CRASH("Unexpected cell color");
+ }
+ }
+#endif
+
+ private:
+ Color color;
+};
+
+// [SMDOC] GC Cell
+//
+// A GC cell is the ultimate base class for all GC things. All types allocated
+// on the GC heap extend either gc::Cell or gc::TenuredCell. If a type is always
+// tenured, prefer the TenuredCell class as base.
+//
+// The first word of Cell is a uintptr_t that reserves the low three bits for GC
+// purposes. The remaining bits are available to sub-classes and can be used
+// store a pointer to another gc::Cell. It can also be used for temporary
+// storage (see setTemporaryGCUnsafeData). To make use of the remaining space,
+// sub-classes derive from a helper class such as TenuredCellWithNonGCPointer.
+//
+// During moving GC operation a Cell may be marked as forwarded. This indicates
+// that a gc::RelocationOverlay is currently stored in the Cell's memory and
+// should be used to find the new location of the Cell.
+struct Cell {
+ protected:
+ // Cell header word. Stores GC flags and derived class data.
+ //
+ // This is atomic since it can be read from and written to by different
+ // threads during compacting GC, in a limited way. Specifically, writes that
+ // update the derived class data can race with reads that check the forwarded
+ // flag. The writes do not change the forwarded flag (which is always false in
+ // this situation).
+ mozilla::Atomic<uintptr_t, mozilla::MemoryOrdering::Relaxed> header_;
+
+ public:
+ static_assert(gc::CellFlagBitsReservedForGC >= 3,
+ "Not enough flag bits reserved for GC");
+ static constexpr uintptr_t RESERVED_MASK =
+ BitMask(gc::CellFlagBitsReservedForGC);
+
+ // Indicates whether the cell has been forwarded (moved) by generational or
+ // compacting GC and is now a RelocationOverlay.
+ static constexpr uintptr_t FORWARD_BIT = Bit(0);
+
+ // Bits 1 and 2 are currently unused.
+
+ bool isForwarded() const { return header_ & FORWARD_BIT; }
+ uintptr_t flags() const { return header_ & RESERVED_MASK; }
+
+ MOZ_ALWAYS_INLINE bool isTenured() const { return !IsInsideNursery(this); }
+ MOZ_ALWAYS_INLINE const TenuredCell& asTenured() const;
+ MOZ_ALWAYS_INLINE TenuredCell& asTenured();
+
+ MOZ_ALWAYS_INLINE bool isMarkedAny() const;
+ MOZ_ALWAYS_INLINE bool isMarkedBlack() const;
+ MOZ_ALWAYS_INLINE bool isMarkedGray() const;
+ MOZ_ALWAYS_INLINE bool isMarked(gc::MarkColor color) const;
+ MOZ_ALWAYS_INLINE bool isMarkedAtLeast(gc::MarkColor color) const;
+
+ MOZ_ALWAYS_INLINE CellColor color() const {
+ return isMarkedBlack() ? CellColor::Black
+ : isMarkedGray() ? CellColor::Gray
+ : CellColor::White;
+ }
+
+ inline JSRuntime* runtimeFromMainThread() const;
+
+ // Note: Unrestricted access to the runtime of a GC thing from an arbitrary
+ // thread can easily lead to races. Use this method very carefully.
+ inline JSRuntime* runtimeFromAnyThread() const;
+
+ // May be overridden by GC thing kinds that have a compartment pointer.
+ inline JS::Compartment* maybeCompartment() const { return nullptr; }
+
+ // The StoreBuffer used to record incoming pointers from the tenured heap.
+ // This will return nullptr for a tenured cell.
+ inline StoreBuffer* storeBuffer() const;
+
+ inline JS::TraceKind getTraceKind() const;
+
+ static MOZ_ALWAYS_INLINE bool needPreWriteBarrier(JS::Zone* zone);
+
+ template <typename T, typename = std::enable_if_t<JS::IsBaseTraceType_v<T>>>
+ inline bool is() const {
+ return getTraceKind() == JS::MapTypeToTraceKind<T>::kind;
+ }
+
+ template <typename T, typename = std::enable_if_t<JS::IsBaseTraceType_v<T>>>
+ inline T* as() {
+ // |this|-qualify the |is| call below to avoid compile errors with even
+ // fairly recent versions of gcc, e.g. 7.1.1 according to bz.
+ MOZ_ASSERT(this->is<T>());
+ return static_cast<T*>(this);
+ }
+
+ template <typename T, typename = std::enable_if_t<JS::IsBaseTraceType_v<T>>>
+ inline const T* as() const {
+ // |this|-qualify the |is| call below to avoid compile errors with even
+ // fairly recent versions of gcc, e.g. 7.1.1 according to bz.
+ MOZ_ASSERT(this->is<T>());
+ return static_cast<const T*>(this);
+ }
+
+ inline JS::Zone* zone() const;
+ inline JS::Zone* zoneFromAnyThread() const;
+
+ // Get the zone for a cell known to be in the nursery.
+ inline JS::Zone* nurseryZone() const;
+ inline JS::Zone* nurseryZoneFromAnyThread() const;
+
+ // Default implementation for kinds that cannot be permanent. This may be
+ // overriden by derived classes.
+ MOZ_ALWAYS_INLINE bool isPermanentAndMayBeShared() const { return false; }
+
+#ifdef DEBUG
+ static inline void assertThingIsNotGray(Cell* cell);
+ inline bool isAligned() const;
+ void dump(GenericPrinter& out) const;
+ void dump() const;
+#endif
+
+ protected:
+ uintptr_t address() const;
+ inline TenuredChunk* chunk() const;
+
+ private:
+ // Cells are destroyed by the GC. Do not delete them directly.
+ void operator delete(void*) = delete;
+} JS_HAZ_GC_THING;
+
+// A GC TenuredCell gets behaviors that are valid for things in the Tenured
+// heap, such as access to the arena and mark bits.
+class TenuredCell : public Cell {
+ public:
+ MOZ_ALWAYS_INLINE bool isTenured() const {
+ MOZ_ASSERT(!IsInsideNursery(this));
+ return true;
+ }
+
+ // Mark bit management.
+ MOZ_ALWAYS_INLINE bool isMarkedAny() const;
+ MOZ_ALWAYS_INLINE bool isMarkedBlack() const;
+ MOZ_ALWAYS_INLINE bool isMarkedGray() const;
+
+ // Same as Cell::color, but skips nursery checks.
+ MOZ_ALWAYS_INLINE CellColor color() const {
+ return isMarkedBlack() ? CellColor::Black
+ : isMarkedGray() ? CellColor::Gray
+ : CellColor::White;
+ }
+
+ // The return value indicates if the cell went from unmarked to marked.
+ MOZ_ALWAYS_INLINE bool markIfUnmarked(
+ MarkColor color = MarkColor::Black) const;
+ MOZ_ALWAYS_INLINE void markBlack() const;
+ MOZ_ALWAYS_INLINE void copyMarkBitsFrom(const TenuredCell* src);
+ MOZ_ALWAYS_INLINE void unmark();
+
+ // Access to the arena.
+ inline Arena* arena() const;
+ inline AllocKind getAllocKind() const;
+ inline JS::TraceKind getTraceKind() const;
+ inline JS::Zone* zone() const;
+ inline JS::Zone* zoneFromAnyThread() const;
+ inline bool isInsideZone(JS::Zone* zone) const;
+
+ MOZ_ALWAYS_INLINE JS::shadow::Zone* shadowZone() const {
+ return JS::shadow::Zone::from(zone());
+ }
+ MOZ_ALWAYS_INLINE JS::shadow::Zone* shadowZoneFromAnyThread() const {
+ return JS::shadow::Zone::from(zoneFromAnyThread());
+ }
+
+ template <typename T, typename = std::enable_if_t<JS::IsBaseTraceType_v<T>>>
+ inline bool is() const {
+ return getTraceKind() == JS::MapTypeToTraceKind<T>::kind;
+ }
+
+ template <typename T, typename = std::enable_if_t<JS::IsBaseTraceType_v<T>>>
+ inline T* as() {
+ // |this|-qualify the |is| call below to avoid compile errors with even
+ // fairly recent versions of gcc, e.g. 7.1.1 according to bz.
+ MOZ_ASSERT(this->is<T>());
+ return static_cast<T*>(this);
+ }
+
+ template <typename T, typename = std::enable_if_t<JS::IsBaseTraceType_v<T>>>
+ inline const T* as() const {
+ // |this|-qualify the |is| call below to avoid compile errors with even
+ // fairly recent versions of gcc, e.g. 7.1.1 according to bz.
+ MOZ_ASSERT(this->is<T>());
+ return static_cast<const T*>(this);
+ }
+
+ // Default implementation for kinds that don't require fixup.
+ void fixupAfterMovingGC() {}
+
+#ifdef DEBUG
+ inline bool isAligned() const;
+#endif
+};
+
+MOZ_ALWAYS_INLINE const TenuredCell& Cell::asTenured() const {
+ MOZ_ASSERT(isTenured());
+ return *static_cast<const TenuredCell*>(this);
+}
+
+MOZ_ALWAYS_INLINE TenuredCell& Cell::asTenured() {
+ MOZ_ASSERT(isTenured());
+ return *static_cast<TenuredCell*>(this);
+}
+
+MOZ_ALWAYS_INLINE bool Cell::isMarkedAny() const {
+ return !isTenured() || asTenured().isMarkedAny();
+}
+
+MOZ_ALWAYS_INLINE bool Cell::isMarkedBlack() const {
+ return !isTenured() || asTenured().isMarkedBlack();
+}
+
+MOZ_ALWAYS_INLINE bool Cell::isMarkedGray() const {
+ return isTenured() && asTenured().isMarkedGray();
+}
+
+MOZ_ALWAYS_INLINE bool Cell::isMarked(gc::MarkColor color) const {
+ return color == MarkColor::Gray ? isMarkedGray() : isMarkedBlack();
+}
+
+MOZ_ALWAYS_INLINE bool Cell::isMarkedAtLeast(gc::MarkColor color) const {
+ return color == MarkColor::Gray ? isMarkedAny() : isMarkedBlack();
+}
+
+inline JSRuntime* Cell::runtimeFromMainThread() const {
+ JSRuntime* rt = chunk()->runtime;
+ MOZ_ASSERT(CurrentThreadCanAccessRuntime(rt));
+ return rt;
+}
+
+inline JSRuntime* Cell::runtimeFromAnyThread() const {
+ return chunk()->runtime;
+}
+
+inline uintptr_t Cell::address() const {
+ uintptr_t addr = uintptr_t(this);
+ MOZ_ASSERT(addr % CellAlignBytes == 0);
+ MOZ_ASSERT(TenuredChunk::withinValidRange(addr));
+ return addr;
+}
+
+TenuredChunk* Cell::chunk() const {
+ uintptr_t addr = uintptr_t(this);
+ MOZ_ASSERT(addr % CellAlignBytes == 0);
+ addr &= ~ChunkMask;
+ return reinterpret_cast<TenuredChunk*>(addr);
+}
+
+inline StoreBuffer* Cell::storeBuffer() const { return chunk()->storeBuffer; }
+
+JS::Zone* Cell::zone() const {
+ if (isTenured()) {
+ return asTenured().zone();
+ }
+
+ return nurseryZone();
+}
+
+JS::Zone* Cell::zoneFromAnyThread() const {
+ if (isTenured()) {
+ return asTenured().zoneFromAnyThread();
+ }
+
+ return nurseryZoneFromAnyThread();
+}
+
+JS::Zone* Cell::nurseryZone() const {
+ JS::Zone* zone = nurseryZoneFromAnyThread();
+ MOZ_ASSERT(CurrentThreadIsGCMarking() || CurrentThreadCanAccessZone(zone));
+ return zone;
+}
+
+JS::Zone* Cell::nurseryZoneFromAnyThread() const {
+ return NurseryCellHeader::from(this)->zone();
+}
+
+#ifdef DEBUG
+extern Cell* UninlinedForwarded(const Cell* cell);
+#endif
+
+inline JS::TraceKind Cell::getTraceKind() const {
+ if (isTenured()) {
+ MOZ_ASSERT_IF(isForwarded(), UninlinedForwarded(this)->getTraceKind() ==
+ asTenured().getTraceKind());
+ return asTenured().getTraceKind();
+ }
+
+ return NurseryCellHeader::from(this)->traceKind();
+}
+
+/* static */ MOZ_ALWAYS_INLINE bool Cell::needPreWriteBarrier(JS::Zone* zone) {
+ return JS::shadow::Zone::from(zone)->needsIncrementalBarrier();
+}
+
+bool TenuredCell::isMarkedAny() const {
+ MOZ_ASSERT(arena()->allocated());
+ return chunk()->markBits.isMarkedAny(this);
+}
+
+bool TenuredCell::isMarkedBlack() const {
+ MOZ_ASSERT(arena()->allocated());
+ return chunk()->markBits.isMarkedBlack(this);
+}
+
+bool TenuredCell::isMarkedGray() const {
+ MOZ_ASSERT(arena()->allocated());
+ return chunk()->markBits.isMarkedGray(this);
+}
+
+bool TenuredCell::markIfUnmarked(MarkColor color /* = Black */) const {
+ return chunk()->markBits.markIfUnmarked(this, color);
+}
+
+void TenuredCell::markBlack() const { chunk()->markBits.markBlack(this); }
+
+void TenuredCell::copyMarkBitsFrom(const TenuredCell* src) {
+ MarkBitmap& markBits = chunk()->markBits;
+ markBits.copyMarkBit(this, src, ColorBit::BlackBit);
+ markBits.copyMarkBit(this, src, ColorBit::GrayOrBlackBit);
+}
+
+void TenuredCell::unmark() { chunk()->markBits.unmark(this); }
+
+inline Arena* TenuredCell::arena() const {
+ MOZ_ASSERT(isTenured());
+ uintptr_t addr = address();
+ addr &= ~ArenaMask;
+ return reinterpret_cast<Arena*>(addr);
+}
+
+AllocKind TenuredCell::getAllocKind() const { return arena()->getAllocKind(); }
+
+JS::TraceKind TenuredCell::getTraceKind() const {
+ return MapAllocToTraceKind(getAllocKind());
+}
+
+JS::Zone* TenuredCell::zone() const {
+ JS::Zone* zone = arena()->zone;
+ MOZ_ASSERT(CurrentThreadIsGCMarking() || CurrentThreadCanAccessZone(zone));
+ return zone;
+}
+
+JS::Zone* TenuredCell::zoneFromAnyThread() const { return arena()->zone; }
+
+bool TenuredCell::isInsideZone(JS::Zone* zone) const {
+ return zone == arena()->zone;
+}
+
+// Read barrier and pre-write barrier implementation for GC cells.
+
+template <typename T>
+MOZ_ALWAYS_INLINE void ReadBarrier(T* thing) {
+ static_assert(std::is_base_of_v<Cell, T>);
+ static_assert(!std::is_same_v<Cell, T> && !std::is_same_v<TenuredCell, T>);
+
+ if (thing && !thing->isPermanentAndMayBeShared()) {
+ ReadBarrierImpl(thing);
+ }
+}
+
+MOZ_ALWAYS_INLINE void ReadBarrierImpl(TenuredCell* thing) {
+ MOZ_ASSERT(!CurrentThreadIsIonCompiling());
+ MOZ_ASSERT(!CurrentThreadIsGCMarking());
+ MOZ_ASSERT(thing);
+ MOZ_ASSERT(CurrentThreadCanAccessZone(thing->zoneFromAnyThread()));
+
+ // Barriers should not be triggered on main thread while collecting.
+ mozilla::DebugOnly<JSRuntime*> runtime = thing->runtimeFromAnyThread();
+ MOZ_ASSERT_IF(CurrentThreadCanAccessRuntime(runtime),
+ !JS::RuntimeHeapIsCollecting());
+
+ JS::shadow::Zone* shadowZone = thing->shadowZoneFromAnyThread();
+ if (shadowZone->needsIncrementalBarrier()) {
+ // We should only observe barriers being enabled on the main thread.
+ MOZ_ASSERT(CurrentThreadCanAccessRuntime(runtime));
+ Cell* tmp = thing;
+ TraceManuallyBarrieredGenericPointerEdge(shadowZone->barrierTracer(), &tmp,
+ "read barrier");
+ MOZ_ASSERT(tmp == thing);
+ return;
+ }
+
+ if (thing->isMarkedGray()) {
+ // There shouldn't be anything marked gray unless we're on the main thread.
+ MOZ_ASSERT(CurrentThreadCanAccessRuntime(runtime));
+ UnmarkGrayGCThingRecursively(thing);
+ }
+}
+
+MOZ_ALWAYS_INLINE void ReadBarrierImpl(Cell* thing) {
+ MOZ_ASSERT(!CurrentThreadIsGCMarking());
+ if (thing->isTenured()) {
+ ReadBarrierImpl(&thing->asTenured());
+ }
+}
+
+MOZ_ALWAYS_INLINE void PreWriteBarrierImpl(TenuredCell* thing) {
+ MOZ_ASSERT(!CurrentThreadIsIonCompiling());
+ MOZ_ASSERT(!CurrentThreadIsGCMarking());
+
+ if (!thing) {
+ return;
+ }
+
+ // Barriers can be triggered on the main thread while collecting, but are
+ // disabled. For example, this happens when destroying HeapPtr wrappers.
+
+ JS::shadow::Zone* zone = thing->shadowZoneFromAnyThread();
+ if (!zone->needsIncrementalBarrier()) {
+ return;
+ }
+
+ // Barriers can be triggered on off the main thread in two situations:
+ // - background finalization of HeapPtrs to the atoms zone
+ // - while we are verifying pre-barriers for a worker runtime
+ // The barrier is not required in either case.
+ bool checkThread = zone->isAtomsZone();
+#ifdef JS_GC_ZEAL
+ checkThread = checkThread || zone->isSelfHostingZone();
+#endif
+ JSRuntime* runtime = thing->runtimeFromAnyThread();
+ if (checkThread && !CurrentThreadCanAccessRuntime(runtime)) {
+ MOZ_ASSERT(CurrentThreadIsGCFinalizing() ||
+ RuntimeIsVerifyingPreBarriers(runtime));
+ return;
+ }
+
+ MOZ_ASSERT(CurrentThreadCanAccessRuntime(runtime));
+ MOZ_ASSERT(!RuntimeFromMainThreadIsHeapMajorCollecting(zone));
+ Cell* tmp = thing;
+ TraceManuallyBarrieredGenericPointerEdge(zone->barrierTracer(), &tmp,
+ "pre barrier");
+ MOZ_ASSERT(tmp == thing);
+}
+
+MOZ_ALWAYS_INLINE void PreWriteBarrierImpl(Cell* thing) {
+ MOZ_ASSERT(!CurrentThreadIsGCMarking());
+ if (thing && thing->isTenured()) {
+ PreWriteBarrierImpl(&thing->asTenured());
+ }
+}
+
+template <typename T>
+MOZ_ALWAYS_INLINE void PreWriteBarrier(T* thing) {
+ static_assert(std::is_base_of_v<Cell, T>);
+ static_assert(!std::is_same_v<Cell, T> && !std::is_same_v<TenuredCell, T>);
+
+ if (thing && !thing->isPermanentAndMayBeShared()) {
+ PreWriteBarrierImpl(thing);
+ }
+}
+
+#ifdef DEBUG
+
+/* static */ void Cell::assertThingIsNotGray(Cell* cell) {
+ JS::AssertCellIsNotGray(cell);
+}
+
+bool Cell::isAligned() const {
+ if (!isTenured()) {
+ return true;
+ }
+ return asTenured().isAligned();
+}
+
+bool TenuredCell::isAligned() const {
+ return Arena::isAligned(address(), arena()->getThingSize());
+}
+
+#endif
+
+// Base class for nusery-allocatable GC things that have 32-bit length and
+// 32-bit flags (currently JSString and BigInt).
+//
+// This tries to store both in Cell::header_, but if that isn't large enough the
+// length is stored separately.
+//
+// 32 0
+// ------------------
+// | Length | Flags |
+// ------------------
+//
+// The low bits of the flags word (see CellFlagBitsReservedForGC) are reserved
+// for GC. Derived classes must ensure they don't use these flags for non-GC
+// purposes.
+class alignas(gc::CellAlignBytes) CellWithLengthAndFlags : public Cell {
+#if JS_BITS_PER_WORD == 32
+ // Additional storage for length if |header_| is too small to fit both.
+ uint32_t length_;
+#endif
+
+ protected:
+ uint32_t headerLengthField() const {
+#if JS_BITS_PER_WORD == 32
+ return length_;
+#else
+ return uint32_t(header_ >> 32);
+#endif
+ }
+
+ uint32_t headerFlagsField() const { return uint32_t(header_); }
+
+ void setHeaderFlagBit(uint32_t flag) { header_ |= uintptr_t(flag); }
+ void clearHeaderFlagBit(uint32_t flag) { header_ &= ~uintptr_t(flag); }
+ void toggleHeaderFlagBit(uint32_t flag) { header_ ^= uintptr_t(flag); }
+
+ void setHeaderLengthAndFlags(uint32_t len, uint32_t flags) {
+#if JS_BITS_PER_WORD == 32
+ header_ = flags;
+ length_ = len;
+#else
+ header_ = (uint64_t(len) << 32) | uint64_t(flags);
+#endif
+ }
+
+ // Sub classes can store temporary data in the flags word. This is not GC safe
+ // and users must ensure flags/length are never checked (including by asserts)
+ // while this data is stored. Use of this method is strongly discouraged!
+ void setTemporaryGCUnsafeData(uintptr_t data) { header_ = data; }
+
+ // To get back the data, values to safely re-initialize clobbered flags
+ // must be provided.
+ uintptr_t unsetTemporaryGCUnsafeData(uint32_t len, uint32_t flags) {
+ uintptr_t data = header_;
+ setHeaderLengthAndFlags(len, flags);
+ return data;
+ }
+
+ public:
+ // Returns the offset of header_. JIT code should use offsetOfFlags
+ // below.
+ static constexpr size_t offsetOfRawHeaderFlagsField() {
+ return offsetof(CellWithLengthAndFlags, header_);
+ }
+
+ // Offsets for direct field from jit code. A number of places directly
+ // access 32-bit length and flags fields so do endian trickery here.
+#if JS_BITS_PER_WORD == 32
+ static constexpr size_t offsetOfHeaderFlags() {
+ return offsetof(CellWithLengthAndFlags, header_);
+ }
+ static constexpr size_t offsetOfHeaderLength() {
+ return offsetof(CellWithLengthAndFlags, length_);
+ }
+#elif MOZ_LITTLE_ENDIAN()
+ static constexpr size_t offsetOfHeaderFlags() {
+ return offsetof(CellWithLengthAndFlags, header_);
+ }
+ static constexpr size_t offsetOfHeaderLength() {
+ return offsetof(CellWithLengthAndFlags, header_) + sizeof(uint32_t);
+ }
+#else
+ static constexpr size_t offsetOfHeaderFlags() {
+ return offsetof(CellWithLengthAndFlags, header_) + sizeof(uint32_t);
+ }
+ static constexpr size_t offsetOfHeaderLength() {
+ return offsetof(CellWithLengthAndFlags, header_);
+ }
+#endif
+};
+
+// Base class for non-nursery-allocatable GC things that allows storing a non-GC
+// thing pointer in the first word.
+//
+// The low bits of the word (see CellFlagBitsReservedForGC) are reserved for GC.
+template <class PtrT>
+class alignas(gc::CellAlignBytes) TenuredCellWithNonGCPointer
+ : public TenuredCell {
+ static_assert(!std::is_pointer_v<PtrT>,
+ "PtrT should be the type of the referent, not of the pointer");
+ static_assert(
+ !std::is_base_of_v<Cell, PtrT>,
+ "Don't use TenuredCellWithNonGCPointer for pointers to GC things");
+
+ protected:
+ TenuredCellWithNonGCPointer() = default;
+ explicit TenuredCellWithNonGCPointer(PtrT* initial) {
+ uintptr_t data = uintptr_t(initial);
+ MOZ_ASSERT((data & RESERVED_MASK) == 0);
+ header_ = data;
+ }
+
+ PtrT* headerPtr() const {
+ // Currently we never observe any flags set here because this base class is
+ // only used for JSObject (for which the nursery kind flags are always
+ // clear) or GC things that are always tenured (for which the nursery kind
+ // flags are also always clear). This means we don't need to use masking to
+ // get and set the pointer.
+ MOZ_ASSERT(flags() == 0);
+ return reinterpret_cast<PtrT*>(uintptr_t(header_));
+ }
+
+ void setHeaderPtr(PtrT* newValue) {
+ // As above, no flags are expected to be set here.
+ uintptr_t data = uintptr_t(newValue);
+ MOZ_ASSERT(flags() == 0);
+ MOZ_ASSERT((data & RESERVED_MASK) == 0);
+ header_ = data;
+ }
+
+ public:
+ static constexpr size_t offsetOfHeaderPtr() {
+ return offsetof(TenuredCellWithNonGCPointer, header_);
+ }
+};
+
+// Base class for GC things that have a tenured GC pointer as their first word.
+//
+// The low bits of the first word (see CellFlagBitsReservedForGC) are reserved
+// for GC.
+//
+// This includes a pre write barrier when the pointer is update. No post barrier
+// is necessary as the pointer is always tenured.
+template <class BaseCell, class PtrT>
+class alignas(gc::CellAlignBytes) CellWithTenuredGCPointer : public BaseCell {
+ static void staticAsserts() {
+ // These static asserts are not in class scope because the PtrT may not be
+ // defined when this class template is instantiated.
+ static_assert(
+ std::is_same_v<BaseCell, Cell> || std::is_same_v<BaseCell, TenuredCell>,
+ "BaseCell must be either Cell or TenuredCell");
+ static_assert(
+ !std::is_pointer_v<PtrT>,
+ "PtrT should be the type of the referent, not of the pointer");
+ static_assert(
+ std::is_base_of_v<Cell, PtrT>,
+ "Only use CellWithTenuredGCPointer for pointers to GC things");
+ }
+
+ protected:
+ CellWithTenuredGCPointer() = default;
+ explicit CellWithTenuredGCPointer(PtrT* initial) { initHeaderPtr(initial); }
+
+ void initHeaderPtr(PtrT* initial) {
+ MOZ_ASSERT(!IsInsideNursery(initial));
+ uintptr_t data = uintptr_t(initial);
+ MOZ_ASSERT((data & Cell::RESERVED_MASK) == 0);
+ this->header_ = data;
+ }
+
+ void setHeaderPtr(PtrT* newValue) {
+ // As above, no flags are expected to be set here.
+ MOZ_ASSERT(!IsInsideNursery(newValue));
+ PreWriteBarrier(headerPtr());
+ unbarrieredSetHeaderPtr(newValue);
+ }
+
+ public:
+ PtrT* headerPtr() const {
+ // Currently we never observe any flags set here because this base class is
+ // only used for GC things that are always tenured (for which the nursery
+ // kind flags are also always clear). This means we don't need to use
+ // masking to get and set the pointer.
+ staticAsserts();
+ MOZ_ASSERT(this->flags() == 0);
+ return reinterpret_cast<PtrT*>(uintptr_t(this->header_));
+ }
+
+ void unbarrieredSetHeaderPtr(PtrT* newValue) {
+ uintptr_t data = uintptr_t(newValue);
+ MOZ_ASSERT(this->flags() == 0);
+ MOZ_ASSERT((data & Cell::RESERVED_MASK) == 0);
+ this->header_ = data;
+ }
+
+ static constexpr size_t offsetOfHeaderPtr() {
+ return offsetof(CellWithTenuredGCPointer, header_);
+ }
+};
+
+} /* namespace gc */
+} /* namespace js */
+
+#endif /* gc_Cell_h */
diff --git a/js/src/gc/ClearEdgesTracer.h b/js/src/gc/ClearEdgesTracer.h
new file mode 100644
index 0000000000..efed496138
--- /dev/null
+++ b/js/src/gc/ClearEdgesTracer.h
@@ -0,0 +1,38 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef gc_ClearEdgesTracer_h
+#define gc_ClearEdgesTracer_h
+
+#include "js/TracingAPI.h"
+
+namespace js {
+namespace gc {
+
+struct ClearEdgesTracer final : public GenericTracer {
+ explicit ClearEdgesTracer(JSRuntime* rt);
+ ClearEdgesTracer();
+
+ template <typename T>
+ inline T* onEdge(T* thing);
+
+ JSObject* onObjectEdge(JSObject* obj) override;
+ JSString* onStringEdge(JSString* str) override;
+ JS::Symbol* onSymbolEdge(JS::Symbol* sym) override;
+ JS::BigInt* onBigIntEdge(JS::BigInt* bi) override;
+ js::BaseScript* onScriptEdge(js::BaseScript* script) override;
+ js::Shape* onShapeEdge(js::Shape* shape) override;
+ js::ObjectGroup* onObjectGroupEdge(js::ObjectGroup* group) override;
+ js::BaseShape* onBaseShapeEdge(js::BaseShape* base) override;
+ js::jit::JitCode* onJitCodeEdge(js::jit::JitCode* code) override;
+ js::Scope* onScopeEdge(js::Scope* scope) override;
+ js::RegExpShared* onRegExpSharedEdge(js::RegExpShared* shared) override;
+};
+
+} // namespace gc
+} // namespace js
+
+#endif // gc_ClearEdgesTracer_h
diff --git a/js/src/gc/FinalizationRegistry.cpp b/js/src/gc/FinalizationRegistry.cpp
new file mode 100644
index 0000000000..1d1575972f
--- /dev/null
+++ b/js/src/gc/FinalizationRegistry.cpp
@@ -0,0 +1,140 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * Finalization registry GC implementation.
+ */
+
+#include "builtin/FinalizationRegistryObject.h"
+#include "gc/GCRuntime.h"
+#include "gc/Zone.h"
+#include "vm/JSContext.h"
+
+#include "gc/PrivateIterators-inl.h"
+#include "vm/JSObject-inl.h"
+
+using namespace js;
+using namespace js::gc;
+
+bool GCRuntime::addFinalizationRegistry(JSContext* cx,
+ FinalizationRegistryObject* registry) {
+ if (!cx->zone()->finalizationRegistries().put(registry)) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ return true;
+}
+
+bool GCRuntime::registerWithFinalizationRegistry(JSContext* cx,
+ HandleObject target,
+ HandleObject record) {
+ MOZ_ASSERT(!IsCrossCompartmentWrapper(target));
+ MOZ_ASSERT(
+ UncheckedUnwrapWithoutExpose(record)->is<FinalizationRecordObject>());
+ MOZ_ASSERT(target->compartment() == record->compartment());
+
+ auto& map = target->zone()->finalizationRecordMap();
+ auto ptr = map.lookupForAdd(target);
+ if (!ptr) {
+ if (!map.add(ptr, target, FinalizationRecordVector(target->zone()))) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+ }
+ if (!ptr->value().append(record)) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+ return true;
+}
+
+void GCRuntime::markFinalizationRegistryRoots(JSTracer* trc) {
+ // All finalization records stored in the zone maps are marked as roots.
+ // Records can be removed from these maps during sweeping in which case they
+ // die in the next collection.
+ for (GCZonesIter zone(this); !zone.done(); zone.next()) {
+ Zone::FinalizationRecordMap& map = zone->finalizationRecordMap();
+ for (Zone::FinalizationRecordMap::Enum e(map); !e.empty(); e.popFront()) {
+ e.front().value().trace(trc);
+ }
+ }
+}
+
+static FinalizationRecordObject* UnwrapFinalizationRecord(JSObject* obj) {
+ obj = UncheckedUnwrapWithoutExpose(obj);
+ if (!obj->is<FinalizationRecordObject>()) {
+ MOZ_ASSERT(JS_IsDeadWrapper(obj));
+ // CCWs between the compartments have been nuked. The
+ // FinalizationRegistry's callback doesn't run in this case.
+ return nullptr;
+ }
+ return &obj->as<FinalizationRecordObject>();
+}
+
+void GCRuntime::sweepFinalizationRegistries(Zone* zone) {
+ // Sweep finalization registry data and queue finalization records for cleanup
+ // for any entries whose target is dying and remove them from the map.
+
+ Zone::FinalizationRegistrySet& set = zone->finalizationRegistries();
+ for (Zone::FinalizationRegistrySet::Enum e(set); !e.empty(); e.popFront()) {
+ if (IsAboutToBeFinalized(&e.mutableFront())) {
+ e.front()->as<FinalizationRegistryObject>().queue()->setHasRegistry(
+ false);
+ e.removeFront();
+ } else {
+ e.front()->as<FinalizationRegistryObject>().sweep();
+ }
+ }
+
+ Zone::FinalizationRecordMap& map = zone->finalizationRecordMap();
+ for (Zone::FinalizationRecordMap::Enum e(map); !e.empty(); e.popFront()) {
+ FinalizationRecordVector& records = e.front().value();
+
+ // Update any pointers moved by the GC.
+ records.sweep();
+
+ // Sweep finalization records and remove records for:
+ records.eraseIf([](JSObject* obj) {
+ FinalizationRecordObject* record = UnwrapFinalizationRecord(obj);
+ return !record || // Nuked CCW to record.
+ !record->isActive() || // Unregistered record.
+ !record->queue()->hasRegistry(); // Dead finalization registry.
+ });
+
+ // Queue finalization records for targets that are dying.
+ if (IsAboutToBeFinalized(&e.front().mutableKey())) {
+ for (JSObject* obj : records) {
+ FinalizationRecordObject* record = UnwrapFinalizationRecord(obj);
+ FinalizationQueueObject* queue = record->queue();
+ queue->queueRecordToBeCleanedUp(record);
+ queueFinalizationRegistryForCleanup(queue);
+ }
+ e.removeFront();
+ }
+ }
+}
+
+void GCRuntime::queueFinalizationRegistryForCleanup(
+ FinalizationQueueObject* queue) {
+ // Prod the embedding to call us back later to run the finalization callbacks,
+ // if necessary.
+
+ if (queue->isQueuedForCleanup()) {
+ return;
+ }
+
+ // Derive the incumbent global by unwrapping the incumbent global object and
+ // then getting its global.
+ JSObject* object = UncheckedUnwrapWithoutExpose(queue->incumbentObject());
+ MOZ_ASSERT(object);
+ GlobalObject* incumbentGlobal = &object->nonCCWGlobal();
+
+ callHostCleanupFinalizationRegistryCallback(queue->doCleanupFunction(),
+ incumbentGlobal);
+
+ queue->setQueuedForCleanup(true);
+}
diff --git a/js/src/gc/FindSCCs.h b/js/src/gc/FindSCCs.h
new file mode 100644
index 0000000000..403b46db71
--- /dev/null
+++ b/js/src/gc/FindSCCs.h
@@ -0,0 +1,204 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef gc_FindSCCs_h
+#define gc_FindSCCs_h
+
+#include "mozilla/Assertions.h" // MOZ_ASSERT
+
+#include <algorithm> // std::min
+#include <stdint.h> // uintptr_t
+
+#include "js/AllocPolicy.h" // js::SystemAllocPolicy
+#include "js/friend/StackLimits.h" // JS_CHECK_STACK_SIZE
+#include "js/HashTable.h" // js::HashSet, js::DefaultHasher
+
+namespace js {
+namespace gc {
+
+template <typename Node>
+struct GraphNodeBase {
+ using NodeSet =
+ js::HashSet<Node*, js::DefaultHasher<Node*>, js::SystemAllocPolicy>;
+
+ NodeSet gcGraphEdges;
+ Node* gcNextGraphNode = nullptr;
+ Node* gcNextGraphComponent = nullptr;
+ unsigned gcDiscoveryTime = 0;
+ unsigned gcLowLink = 0;
+
+ Node* nextNodeInGroup() const {
+ if (gcNextGraphNode &&
+ gcNextGraphNode->gcNextGraphComponent == gcNextGraphComponent) {
+ return gcNextGraphNode;
+ }
+ return nullptr;
+ }
+
+ Node* nextGroup() const { return gcNextGraphComponent; }
+};
+
+/*
+ * Find the strongly connected components of a graph using Tarjan's algorithm,
+ * and return them in topological order.
+ *
+ * Nodes derive from GraphNodeBase and add target edge pointers to
+ * sourceNode.gcGraphEdges to describe the graph:
+ *
+ * struct MyGraphNode : public GraphNodeBase<MyGraphNode>
+ * {
+ * ...
+ * }
+ *
+ * MyGraphNode node1, node2, node3;
+ * node1.gcGraphEdges.put(node2); // Error checking elided.
+ * node2.gcGraphEdges.put(node3);
+ * node3.gcGraphEdges.put(node2);
+ *
+ * ComponentFinder<MyGraphNode> finder;
+ * finder.addNode(node1);
+ * finder.addNode(node2);
+ * finder.addNode(node3);
+ * MyGraphNode* result = finder.getResultsList();
+ */
+
+template <typename Node>
+class ComponentFinder {
+ public:
+ explicit ComponentFinder(uintptr_t sl) : stackLimit(sl) {}
+
+ ~ComponentFinder() {
+ MOZ_ASSERT(!stack);
+ MOZ_ASSERT(!firstComponent);
+ }
+
+ /* Forces all nodes to be added to a single component. */
+ void useOneComponent() { stackFull = true; }
+
+ void addNode(Node* v) {
+ if (v->gcDiscoveryTime == Undefined) {
+ MOZ_ASSERT(v->gcLowLink == Undefined);
+ processNode(v);
+ }
+ }
+
+ Node* getResultsList() {
+ if (stackFull) {
+ /*
+ * All nodes after the stack overflow are in |stack|. Put them all in
+ * one big component of their own.
+ */
+ Node* firstGoodComponent = firstComponent;
+ for (Node* v = stack; v; v = stack) {
+ stack = v->gcNextGraphNode;
+ v->gcNextGraphComponent = firstGoodComponent;
+ v->gcNextGraphNode = firstComponent;
+ firstComponent = v;
+ }
+ stackFull = false;
+ }
+
+ MOZ_ASSERT(!stack);
+
+ Node* result = firstComponent;
+ firstComponent = nullptr;
+
+ for (Node* v = result; v; v = v->gcNextGraphNode) {
+ v->gcDiscoveryTime = Undefined;
+ v->gcLowLink = Undefined;
+ }
+
+ return result;
+ }
+
+ static void mergeGroups(Node* first) {
+ for (Node* v = first; v; v = v->gcNextGraphNode) {
+ v->gcNextGraphComponent = nullptr;
+ }
+ }
+
+ private:
+ // Constant used to indicate an unprocessed vertex.
+ static const unsigned Undefined = 0;
+
+ // Constant used to indicate a processed vertex that is no longer on the
+ // stack.
+ static const unsigned Finished = (unsigned)-1;
+
+ void addEdgeTo(Node* w) {
+ if (w->gcDiscoveryTime == Undefined) {
+ processNode(w);
+ cur->gcLowLink = std::min(cur->gcLowLink, w->gcLowLink);
+ } else if (w->gcDiscoveryTime != Finished) {
+ cur->gcLowLink = std::min(cur->gcLowLink, w->gcDiscoveryTime);
+ }
+ }
+
+ void processNode(Node* v) {
+ v->gcDiscoveryTime = clock;
+ v->gcLowLink = clock;
+ ++clock;
+
+ v->gcNextGraphNode = stack;
+ stack = v;
+
+ int stackDummy;
+ if (stackFull || !JS_CHECK_STACK_SIZE(stackLimit, &stackDummy)) {
+ stackFull = true;
+ return;
+ }
+
+ Node* old = cur;
+ cur = v;
+ for (auto r = cur->gcGraphEdges.all(); !r.empty(); r.popFront()) {
+ addEdgeTo(r.front());
+ }
+ cur = old;
+
+ if (stackFull) {
+ return;
+ }
+
+ if (v->gcLowLink == v->gcDiscoveryTime) {
+ Node* nextComponent = firstComponent;
+ Node* w;
+ do {
+ MOZ_ASSERT(stack);
+ w = stack;
+ stack = w->gcNextGraphNode;
+
+ /*
+ * Record that the element is no longer on the stack by setting the
+ * discovery time to a special value that's not Undefined.
+ */
+ w->gcDiscoveryTime = Finished;
+
+ /* Figure out which group we're in. */
+ w->gcNextGraphComponent = nextComponent;
+
+ /*
+ * Prepend the component to the beginning of the output list to
+ * reverse the list and achieve the desired order.
+ */
+ w->gcNextGraphNode = firstComponent;
+ firstComponent = w;
+ } while (w != v);
+ }
+ }
+
+ private:
+ unsigned clock = 1;
+ Node* stack = nullptr;
+ Node* firstComponent = nullptr;
+ Node* cur = nullptr;
+ uintptr_t stackLimit;
+ bool stackFull = false;
+};
+
+} /* namespace gc */
+} /* namespace js */
+
+#endif /* gc_FindSCCs_h */
diff --git a/js/src/gc/FreeOp-inl.h b/js/src/gc/FreeOp-inl.h
new file mode 100644
index 0000000000..da325a0aeb
--- /dev/null
+++ b/js/src/gc/FreeOp-inl.h
@@ -0,0 +1,35 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef gc_FreeOp_inl_h
+#define gc_FreeOp_inl_h
+
+#include "gc/FreeOp.h"
+
+#include "gc/ZoneAllocator.h"
+#include "js/RefCounted.h"
+
+inline void JSFreeOp::free_(Cell* cell, void* p, size_t nbytes, MemoryUse use) {
+ if (p) {
+ removeCellMemory(cell, nbytes, use);
+ js_free(p);
+ }
+}
+
+template <class T>
+inline void JSFreeOp::release(Cell* cell, T* p, size_t nbytes, MemoryUse use) {
+ if (p) {
+ removeCellMemory(cell, nbytes, use);
+ p->Release();
+ }
+}
+
+inline void JSFreeOp::removeCellMemory(Cell* cell, size_t nbytes,
+ MemoryUse use) {
+ RemoveCellMemory(cell, nbytes, use, isCollecting());
+}
+
+#endif // gc_JSFreeOp_inl_h
diff --git a/js/src/gc/FreeOp.h b/js/src/gc/FreeOp.h
new file mode 100644
index 0000000000..f33571d856
--- /dev/null
+++ b/js/src/gc/FreeOp.h
@@ -0,0 +1,153 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef gc_FreeOp_h
+#define gc_FreeOp_h
+
+#include "mozilla/Assertions.h" // MOZ_ASSERT
+
+#include "jstypes.h" // JS_PUBLIC_API
+#include "gc/GCEnum.h" // js::MemoryUse
+#include "jit/ExecutableAllocator.h" // jit::JitPoisonRangeVector
+#include "js/AllocPolicy.h" // SystemAllocPolicy
+#include "js/MemoryFunctions.h" // JSFreeOp
+#include "js/Utility.h" // AutoEnterOOMUnsafeRegion, js_free
+#include "js/Vector.h" // js::Vector
+
+struct JS_PUBLIC_API JSRuntime;
+
+namespace js {
+namespace gc {
+class AutoSetThreadIsPerformingGC;
+} // namespace gc
+} // namespace js
+
+/*
+ * A JSFreeOp can do one thing: free memory. For convenience, it has delete_
+ * convenience methods that also call destructors.
+ *
+ * JSFreeOp is passed to finalizers and other sweep-phase hooks so that we do
+ * not need to pass a JSContext to those hooks.
+ */
+class JSFreeOp {
+ using Cell = js::gc::Cell;
+ using MemoryUse = js::MemoryUse;
+
+ JSRuntime* runtime_;
+
+ js::jit::JitPoisonRangeVector jitPoisonRanges;
+
+ const bool isDefault;
+ bool isCollecting_;
+
+ friend class js::gc::AutoSetThreadIsPerformingGC;
+
+ public:
+ explicit JSFreeOp(JSRuntime* maybeRuntime, bool isDefault = false);
+ ~JSFreeOp();
+
+ JSRuntime* runtime() const {
+ MOZ_ASSERT(runtime_);
+ return runtime_;
+ }
+
+ bool onMainThread() const { return runtime_ != nullptr; }
+
+ bool maybeOnHelperThread() const {
+ // Sometimes background finalization happens on the main thread so
+ // runtime_ being null doesn't always mean we are off thread.
+ return !runtime_;
+ }
+
+ bool isDefaultFreeOp() const { return isDefault; }
+ bool isCollecting() const { return isCollecting_; }
+
+ // Deprecated. Where possible, memory should be tracked against the owning GC
+ // thing by calling js::AddCellMemory and the memory freed with free_() below.
+ void freeUntracked(void* p) { js_free(p); }
+
+ // Free memory associated with a GC thing and update the memory accounting.
+ //
+ // The memory should have been associated with the GC thing using
+ // js::InitReservedSlot or js::InitObjectPrivate, or possibly
+ // js::AddCellMemory.
+ void free_(Cell* cell, void* p, size_t nbytes, MemoryUse use);
+
+ bool appendJitPoisonRange(const js::jit::JitPoisonRange& range) {
+ // JSFreeOps other than the defaultFreeOp() are constructed on the stack,
+ // and won't hold onto the pointers to free indefinitely.
+ MOZ_ASSERT(!isDefaultFreeOp());
+
+ return jitPoisonRanges.append(range);
+ }
+
+ // Deprecated. Where possible, memory should be tracked against the owning GC
+ // thing by calling js::AddCellMemory and the memory freed with delete_()
+ // below.
+ template <class T>
+ void deleteUntracked(T* p) {
+ if (p) {
+ p->~T();
+ js_free(p);
+ }
+ }
+
+ // Delete a C++ object that was associated with a GC thing and update the
+ // memory accounting. The size is determined by the type T.
+ //
+ // The memory should have been associated with the GC thing using
+ // js::InitReservedSlot or js::InitObjectPrivate, or possibly
+ // js::AddCellMemory.
+ template <class T>
+ void delete_(Cell* cell, T* p, MemoryUse use) {
+ delete_(cell, p, sizeof(T), use);
+ }
+
+ // Delete a C++ object that was associated with a GC thing and update the
+ // memory accounting.
+ //
+ // The memory should have been associated with the GC thing using
+ // js::InitReservedSlot or js::InitObjectPrivate, or possibly
+ // js::AddCellMemory.
+ template <class T>
+ void delete_(Cell* cell, T* p, size_t nbytes, MemoryUse use) {
+ if (p) {
+ p->~T();
+ free_(cell, p, nbytes, use);
+ }
+ }
+
+ // Release a RefCounted object that was associated with a GC thing and update
+ // the memory accounting.
+ //
+ // The memory should have been associated with the GC thing using
+ // js::InitReservedSlot or js::InitObjectPrivate, or possibly
+ // js::AddCellMemory.
+ //
+ // This counts the memory once per association with a GC thing. It's not
+ // expected that the same object is associated with more than one GC thing in
+ // each zone. If this is the case then some other form of accounting would be
+ // more appropriate.
+ template <class T>
+ void release(Cell* cell, T* p, MemoryUse use) {
+ release(cell, p, sizeof(T), use);
+ }
+
+ // Release a RefCounted object and that was associated with a GC thing and
+ // update the memory accounting.
+ //
+ // The memory should have been associated with the GC thing using
+ // js::InitReservedSlot or js::InitObjectPrivate, or possibly
+ // js::AddCellMemory.
+ template <class T>
+ void release(Cell* cell, T* p, size_t nbytes, MemoryUse use);
+
+ // Update the memory accounting for a GC for memory freed by some other
+ // method.
+ void removeCellMemory(Cell* cell, size_t nbytes, MemoryUse use);
+};
+
+#endif // gc_FreeOp_h
diff --git a/js/src/gc/GC-inl.h b/js/src/gc/GC-inl.h
new file mode 100644
index 0000000000..6b7c1ed30a
--- /dev/null
+++ b/js/src/gc/GC-inl.h
@@ -0,0 +1,341 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef gc_GC_inl_h
+#define gc_GC_inl_h
+
+#include "gc/GC.h"
+
+#include "mozilla/DebugOnly.h"
+#include "mozilla/Maybe.h"
+
+#include "gc/IteratorUtils.h"
+#include "gc/Zone.h"
+#include "vm/Runtime.h"
+
+#include "gc/ArenaList-inl.h"
+
+namespace js {
+namespace gc {
+
+class AutoAssertEmptyNursery;
+
+class ArenaListIter {
+ Arena* arena;
+
+ public:
+ explicit ArenaListIter(Arena* head) : arena(head) {}
+ bool done() const { return !arena; }
+ Arena* get() const {
+ MOZ_ASSERT(!done());
+ return arena;
+ }
+ void next() {
+ MOZ_ASSERT(!done());
+ arena = arena->next;
+ }
+};
+
+class ArenaIter : public ChainedIterator<ArenaListIter, 4> {
+ public:
+ ArenaIter(JS::Zone* zone, AllocKind kind)
+ : ChainedIterator(zone->arenas.getFirstArena(kind),
+ zone->arenas.getFirstArenaToSweep(kind),
+ zone->arenas.getFirstSweptArena(kind),
+ zone->arenas.getFirstNewArenaInMarkPhase(kind)) {}
+};
+
+class ArenaCellIter {
+ size_t firstThingOffset;
+ size_t thingSize;
+ Arena* arenaAddr;
+ FreeSpan span;
+ uint_fast16_t thing;
+ mozilla::DebugOnly<JS::TraceKind> traceKind;
+
+ // Upon entry, |thing| points to any thing (free or used) and finds the
+ // first used thing, which may be |thing|.
+ void settle() {
+ MOZ_ASSERT(!done());
+ MOZ_ASSERT(thing);
+ // Note: if |span| is empty, this test will fail, which is what we want
+ // -- |span| being empty means that we're past the end of the last free
+ // thing, all the remaining things in the arena are used, and we'll
+ // never need to move forward.
+ if (thing == span.first) {
+ thing = span.last + thingSize;
+ span = *span.nextSpan(arenaAddr);
+ }
+ }
+
+ public:
+ explicit ArenaCellIter(Arena* arena) {
+ MOZ_ASSERT(arena);
+ AllocKind kind = arena->getAllocKind();
+ firstThingOffset = Arena::firstThingOffset(kind);
+ thingSize = Arena::thingSize(kind);
+ traceKind = MapAllocToTraceKind(kind);
+ arenaAddr = arena;
+ span = *arena->getFirstFreeSpan();
+ thing = firstThingOffset;
+ settle();
+ }
+
+ bool done() const {
+ MOZ_ASSERT(thing <= ArenaSize);
+ return thing == ArenaSize;
+ }
+
+ TenuredCell* get() const {
+ MOZ_ASSERT(!done());
+ return reinterpret_cast<TenuredCell*>(uintptr_t(arenaAddr) + thing);
+ }
+
+ template <typename T>
+ T* as() const {
+ MOZ_ASSERT(!done());
+ MOZ_ASSERT(JS::MapTypeToTraceKind<T>::kind == traceKind);
+ return reinterpret_cast<T*>(get());
+ }
+
+ void next() {
+ MOZ_ASSERT(!done());
+ thing += thingSize;
+ if (thing < ArenaSize) {
+ settle();
+ }
+ }
+
+ operator TenuredCell*() const { return get(); }
+ TenuredCell* operator->() const { return get(); }
+};
+
+template <typename T>
+class ZoneAllCellIter;
+
+template <>
+class ZoneAllCellIter<TenuredCell> {
+ mozilla::Maybe<NestedIterator<ArenaIter, ArenaCellIter>> iter;
+ mozilla::Maybe<JS::AutoAssertNoGC> nogc;
+
+ protected:
+ // For use when a subclass wants to insert some setup before init().
+ ZoneAllCellIter() = default;
+
+ void init(JS::Zone* zone, AllocKind kind) {
+ MOZ_ASSERT_IF(IsNurseryAllocable(kind),
+ (zone->isAtomsZone() ||
+ zone->runtimeFromMainThread()->gc.nursery().isEmpty()));
+ initForTenuredIteration(zone, kind);
+ }
+
+ void initForTenuredIteration(JS::Zone* zone, AllocKind kind) {
+ JSRuntime* rt = zone->runtimeFromAnyThread();
+
+ // If called from outside a GC, ensure that the heap is in a state
+ // that allows us to iterate.
+ if (!JS::RuntimeHeapIsBusy()) {
+ // Assert that no GCs can occur while a ZoneAllCellIter is live.
+ nogc.emplace();
+ }
+
+ // We have a single-threaded runtime, so there's no need to protect
+ // against other threads iterating or allocating. However, we do have
+ // background finalization; we may have to wait for this to finish if
+ // it's currently active.
+ if (IsBackgroundFinalized(kind) &&
+ zone->arenas.needBackgroundFinalizeWait(kind)) {
+ rt->gc.waitBackgroundSweepEnd();
+ }
+ iter.emplace(zone, kind);
+ }
+
+ public:
+ ZoneAllCellIter(JS::Zone* zone, AllocKind kind) {
+ // If we are iterating a nursery-allocated kind then we need to
+ // evict first so that we can see all things.
+ if (IsNurseryAllocable(kind)) {
+ zone->runtimeFromMainThread()->gc.evictNursery();
+ }
+
+ init(zone, kind);
+ }
+
+ ZoneAllCellIter(JS::Zone* zone, AllocKind kind,
+ const js::gc::AutoAssertEmptyNursery&) {
+ // No need to evict the nursery. (This constructor is known statically
+ // to not GC.)
+ init(zone, kind);
+ }
+
+ bool done() const { return iter->done(); }
+
+ template <typename T>
+ T* get() const {
+ return iter->ref().as<T>();
+ }
+
+ TenuredCell* getCell() const { return iter->get(); }
+
+ void next() { iter->next(); }
+};
+
+/* clang-format off */
+//
+// Iterator over the cells in a Zone, where the GC type (JSString, JSObject) is
+// known, for a single AllocKind. Example usages:
+//
+// for (auto obj = zone->cellIter<JSObject>(AllocKind::OBJECT0); !obj.done(); obj.next()) {
+// ...
+// }
+//
+// for (auto script = zone->cellIter<JSScript>(); !script.done(); script.next()) {
+// f(script->code());
+// }
+//
+// As this code demonstrates, you can use 'script' as if it were a JSScript*.
+// Its actual type is ZoneAllCellIter<JSScript>, but for most purposes it will
+// autoconvert to JSScript*.
+//
+// Note that in the JSScript case, ZoneAllCellIter is able to infer the AllocKind
+// from the type 'JSScript', whereas in the JSObject case, the kind must be
+// given (because there are multiple AllocKinds for objects).
+//
+// Also, the static rooting hazard analysis knows that the JSScript case will
+// not GC during construction. The JSObject case needs to GC, or more precisely
+// to empty the nursery and clear out the store buffer, so that it can see all
+// objects to iterate over (the nursery is not iterable) and remove the
+// possibility of having pointers from the store buffer to data hanging off
+// stuff we're iterating over that we are going to delete. (The latter should
+// not be a problem, since such instances should be using RelocatablePtr do
+// remove themselves from the store buffer on deletion, but currently for
+// subtle reasons that isn't good enough.)
+//
+// If the iterator is used within a GC, then there is no need to evict the
+// nursery (again). You may select a variant that will skip the eviction either
+// by specializing on a GCType that is never allocated in the nursery, or
+// explicitly by passing in a trailing AutoAssertEmptyNursery argument.
+//
+// NOTE: This class can return items that are about to be swept/finalized.
+// You must not keep pointers to such items across GCs. Use
+// ZoneCellIter below to filter these out.
+//
+// NOTE: This class also does not read barrier returned items, so may return
+// gray cells. You must not store such items anywhere on the heap without
+// gray-unmarking them. Use ZoneCellIter to automatically unmark them.
+//
+/* clang-format on */
+template <typename GCType>
+class ZoneAllCellIter : public ZoneAllCellIter<TenuredCell> {
+ public:
+ // Non-nursery allocated (equivalent to having an entry in
+ // MapTypeToFinalizeKind). The template declaration here is to discard this
+ // constructor overload if MapTypeToFinalizeKind<GCType>::kind does not
+ // exist. Note that there will be no remaining overloads that will work,
+ // which makes sense given that you haven't specified which of the
+ // AllocKinds to use for GCType.
+ //
+ // If we later add a nursery allocable GCType with a single AllocKind, we
+ // will want to add an overload of this constructor that does the right
+ // thing (ie, it empties the nursery before iterating.)
+ explicit ZoneAllCellIter(JS::Zone* zone) : ZoneAllCellIter<TenuredCell>() {
+ init(zone, MapTypeToFinalizeKind<GCType>::kind);
+ }
+
+ // Non-nursery allocated, nursery is known to be empty: same behavior as
+ // above.
+ ZoneAllCellIter(JS::Zone* zone, const js::gc::AutoAssertEmptyNursery&)
+ : ZoneAllCellIter(zone) {}
+
+ // Arbitrary kind, which will be assumed to be nursery allocable (and
+ // therefore the nursery will be emptied before iterating.)
+ ZoneAllCellIter(JS::Zone* zone, AllocKind kind)
+ : ZoneAllCellIter<TenuredCell>(zone, kind) {}
+
+ // Arbitrary kind, which will be assumed to be nursery allocable, but the
+ // nursery is known to be empty already: same behavior as non-nursery types.
+ ZoneAllCellIter(JS::Zone* zone, AllocKind kind,
+ const js::gc::AutoAssertEmptyNursery& empty)
+ : ZoneAllCellIter<TenuredCell>(zone, kind, empty) {}
+
+ GCType* get() const { return ZoneAllCellIter<TenuredCell>::get<GCType>(); }
+ operator GCType*() const { return get(); }
+ GCType* operator->() const { return get(); }
+};
+
+// Like the above class but filter out cells that are about to be finalized.
+// Also, read barrier all cells returned (unless the Unbarriered variants are
+// used) to prevent gray cells from escaping.
+template <typename T>
+class ZoneCellIter : protected ZoneAllCellIter<T> {
+ using Base = ZoneAllCellIter<T>;
+
+ public:
+ /*
+ * The same constructors as above.
+ */
+ explicit ZoneCellIter(JS::Zone* zone) : ZoneAllCellIter<T>(zone) {
+ skipDying();
+ }
+ ZoneCellIter(JS::Zone* zone, const js::gc::AutoAssertEmptyNursery& empty)
+ : ZoneAllCellIter<T>(zone, empty) {
+ skipDying();
+ }
+ ZoneCellIter(JS::Zone* zone, AllocKind kind)
+ : ZoneAllCellIter<T>(zone, kind) {
+ skipDying();
+ }
+ ZoneCellIter(JS::Zone* zone, AllocKind kind,
+ const js::gc::AutoAssertEmptyNursery& empty)
+ : ZoneAllCellIter<T>(zone, kind, empty) {
+ skipDying();
+ }
+
+ using Base::done;
+
+ void next() {
+ ZoneAllCellIter<T>::next();
+ skipDying();
+ }
+
+ TenuredCell* getCell() const {
+ TenuredCell* cell = Base::getCell();
+
+ // This can result in a new reference being created to an object that an
+ // ongoing incremental GC may find to be unreachable, so we may need a
+ // barrier here.
+ JSRuntime* rt = cell->runtimeFromAnyThread();
+ if (!JS::RuntimeHeapIsCollecting(rt->heapState())) {
+ JS::TraceKind traceKind = JS::MapTypeToTraceKind<T>::kind;
+ ExposeGCThingToActiveJS(JS::GCCellPtr(cell, traceKind));
+ }
+
+ return cell;
+ }
+
+ T* get() const { return reinterpret_cast<T*>(getCell()); }
+
+ TenuredCell* unbarrieredGetCell() const { return Base::getCell(); }
+ T* unbarrieredGet() const { return Base::get(); }
+ operator T*() const { return get(); }
+ T* operator->() const { return get(); }
+
+ private:
+ void skipDying() {
+ while (!ZoneAllCellIter<T>::done()) {
+ T* current = ZoneAllCellIter<T>::get();
+ if (!IsAboutToBeFinalizedUnbarriered(&current)) {
+ return;
+ }
+ ZoneAllCellIter<T>::next();
+ }
+ }
+};
+
+} /* namespace gc */
+} /* namespace js */
+
+#endif /* gc_GC_inl_h */
diff --git a/js/src/gc/GC.cpp b/js/src/gc/GC.cpp
new file mode 100644
index 0000000000..f280a2aa5b
--- /dev/null
+++ b/js/src/gc/GC.cpp
@@ -0,0 +1,9119 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * [SMDOC] Garbage Collector
+ *
+ * This code implements an incremental mark-and-sweep garbage collector, with
+ * most sweeping carried out in the background on a parallel thread.
+ *
+ * Full vs. zone GC
+ * ----------------
+ *
+ * The collector can collect all zones at once, or a subset. These types of
+ * collection are referred to as a full GC and a zone GC respectively.
+ *
+ * It is possible for an incremental collection that started out as a full GC to
+ * become a zone GC if new zones are created during the course of the
+ * collection.
+ *
+ * Incremental collection
+ * ----------------------
+ *
+ * For a collection to be carried out incrementally the following conditions
+ * must be met:
+ * - the collection must be run by calling js::GCSlice() rather than js::GC()
+ * - the GC parameter JSGC_INCREMENTAL_GC_ENABLED must be true.
+ *
+ * The last condition is an engine-internal mechanism to ensure that incremental
+ * collection is not carried out without the correct barriers being implemented.
+ * For more information see 'Incremental marking' below.
+ *
+ * If the collection is not incremental, all foreground activity happens inside
+ * a single call to GC() or GCSlice(). However the collection is not complete
+ * until the background sweeping activity has finished.
+ *
+ * An incremental collection proceeds as a series of slices, interleaved with
+ * mutator activity, i.e. running JavaScript code. Slices are limited by a time
+ * budget. The slice finishes as soon as possible after the requested time has
+ * passed.
+ *
+ * Collector states
+ * ----------------
+ *
+ * The collector proceeds through the following states, the current state being
+ * held in JSRuntime::gcIncrementalState:
+ *
+ * - Prepare - unmarks GC things, discards JIT code and other setup
+ * - MarkRoots - marks the stack and other roots
+ * - Mark - incrementally marks reachable things
+ * - Sweep - sweeps zones in groups and continues marking unswept zones
+ * - Finalize - performs background finalization, concurrent with mutator
+ * - Compact - incrementally compacts by zone
+ * - Decommit - performs background decommit and chunk removal
+ *
+ * Roots are marked in the first MarkRoots slice; this is the start of the GC
+ * proper. The following states can take place over one or more slices.
+ *
+ * In other words an incremental collection proceeds like this:
+ *
+ * Slice 1: Prepare: Starts background task to unmark GC things
+ *
+ * ... JS code runs, background unmarking finishes ...
+ *
+ * Slice 2: MarkRoots: Roots are pushed onto the mark stack.
+ * Mark: The mark stack is processed by popping an element,
+ * marking it, and pushing its children.
+ *
+ * ... JS code runs ...
+ *
+ * Slice 3: Mark: More mark stack processing.
+ *
+ * ... JS code runs ...
+ *
+ * Slice n-1: Mark: More mark stack processing.
+ *
+ * ... JS code runs ...
+ *
+ * Slice n: Mark: Mark stack is completely drained.
+ * Sweep: Select first group of zones to sweep and sweep them.
+ *
+ * ... JS code runs ...
+ *
+ * Slice n+1: Sweep: Mark objects in unswept zones that were newly
+ * identified as alive (see below). Then sweep more zone
+ * sweep groups.
+ *
+ * ... JS code runs ...
+ *
+ * Slice n+2: Sweep: Mark objects in unswept zones that were newly
+ * identified as alive. Then sweep more zones.
+ *
+ * ... JS code runs ...
+ *
+ * Slice m: Sweep: Sweeping is finished, and background sweeping
+ * started on the helper thread.
+ *
+ * ... JS code runs, remaining sweeping done on background thread ...
+ *
+ * When background sweeping finishes the GC is complete.
+ *
+ * Incremental marking
+ * -------------------
+ *
+ * Incremental collection requires close collaboration with the mutator (i.e.,
+ * JS code) to guarantee correctness.
+ *
+ * - During an incremental GC, if a memory location (except a root) is written
+ * to, then the value it previously held must be marked. Write barriers
+ * ensure this.
+ *
+ * - Any object that is allocated during incremental GC must start out marked.
+ *
+ * - Roots are marked in the first slice and hence don't need write barriers.
+ * Roots are things like the C stack and the VM stack.
+ *
+ * The problem that write barriers solve is that between slices the mutator can
+ * change the object graph. We must ensure that it cannot do this in such a way
+ * that makes us fail to mark a reachable object (marking an unreachable object
+ * is tolerable).
+ *
+ * We use a snapshot-at-the-beginning algorithm to do this. This means that we
+ * promise to mark at least everything that is reachable at the beginning of
+ * collection. To implement it we mark the old contents of every non-root memory
+ * location written to by the mutator while the collection is in progress, using
+ * write barriers. This is described in gc/Barrier.h.
+ *
+ * Incremental sweeping
+ * --------------------
+ *
+ * Sweeping is difficult to do incrementally because object finalizers must be
+ * run at the start of sweeping, before any mutator code runs. The reason is
+ * that some objects use their finalizers to remove themselves from caches. If
+ * mutator code was allowed to run after the start of sweeping, it could observe
+ * the state of the cache and create a new reference to an object that was just
+ * about to be destroyed.
+ *
+ * Sweeping all finalizable objects in one go would introduce long pauses, so
+ * instead sweeping broken up into groups of zones. Zones which are not yet
+ * being swept are still marked, so the issue above does not apply.
+ *
+ * The order of sweeping is restricted by cross compartment pointers - for
+ * example say that object |a| from zone A points to object |b| in zone B and
+ * neither object was marked when we transitioned to the Sweep phase. Imagine we
+ * sweep B first and then return to the mutator. It's possible that the mutator
+ * could cause |a| to become alive through a read barrier (perhaps it was a
+ * shape that was accessed via a shape table). Then we would need to mark |b|,
+ * which |a| points to, but |b| has already been swept.
+ *
+ * So if there is such a pointer then marking of zone B must not finish before
+ * marking of zone A. Pointers which form a cycle between zones therefore
+ * restrict those zones to being swept at the same time, and these are found
+ * using Tarjan's algorithm for finding the strongly connected components of a
+ * graph.
+ *
+ * GC things without finalizers, and things with finalizers that are able to run
+ * in the background, are swept on the background thread. This accounts for most
+ * of the sweeping work.
+ *
+ * Reset
+ * -----
+ *
+ * During incremental collection it is possible, although unlikely, for
+ * conditions to change such that incremental collection is no longer safe. In
+ * this case, the collection is 'reset' by resetIncrementalGC(). If we are in
+ * the mark state, this just stops marking, but if we have started sweeping
+ * already, we continue non-incrementally until we have swept the current sweep
+ * group. Following a reset, a new collection is started.
+ *
+ * Compacting GC
+ * -------------
+ *
+ * Compacting GC happens at the end of a major GC as part of the last slice.
+ * There are three parts:
+ *
+ * - Arenas are selected for compaction.
+ * - The contents of those arenas are moved to new arenas.
+ * - All references to moved things are updated.
+ *
+ * Collecting Atoms
+ * ----------------
+ *
+ * Atoms are collected differently from other GC things. They are contained in
+ * a special zone and things in other zones may have pointers to them that are
+ * not recorded in the cross compartment pointer map. Each zone holds a bitmap
+ * with the atoms it might be keeping alive, and atoms are only collected if
+ * they are not included in any zone's atom bitmap. See AtomMarking.cpp for how
+ * this bitmap is managed.
+ */
+
+#include "gc/GC-inl.h"
+
+#include "mozilla/DebugOnly.h"
+#include "mozilla/MacroForEach.h"
+#include "mozilla/MemoryReporting.h"
+#include "mozilla/Range.h"
+#include "mozilla/ScopeExit.h"
+#include "mozilla/TextUtils.h"
+#include "mozilla/TimeStamp.h"
+#include "mozilla/Unused.h"
+
+#include <algorithm>
+#include <initializer_list>
+#include <iterator>
+#include <string.h>
+#include <utility>
+#ifndef XP_WIN
+# include <sys/mman.h>
+# include <unistd.h>
+#endif
+
+#include "jsapi.h"
+#include "jsfriendapi.h"
+#include "jstypes.h"
+
+#include "builtin/FinalizationRegistryObject.h"
+#include "builtin/WeakRefObject.h"
+#include "debugger/DebugAPI.h"
+#include "gc/ClearEdgesTracer.h"
+#include "gc/FindSCCs.h"
+#include "gc/FreeOp.h"
+#include "gc/GCInternals.h"
+#include "gc/GCLock.h"
+#include "gc/GCProbes.h"
+#include "gc/Memory.h"
+#include "gc/ParallelWork.h"
+#include "gc/Policy.h"
+#include "gc/WeakMap.h"
+#include "jit/BaselineJIT.h"
+#include "jit/JitCode.h"
+#include "jit/JitcodeMap.h"
+#include "jit/JitRealm.h"
+#include "jit/JitRuntime.h"
+#include "jit/JitZone.h"
+#include "jit/MacroAssembler.h" // js::jit::CodeAlignment
+#include "js/Object.h" // JS::GetClass
+#include "js/SliceBudget.h"
+#include "proxy/DeadObjectProxy.h"
+#include "util/DifferentialTesting.h"
+#include "util/Poison.h"
+#include "util/Windows.h"
+#include "vm/BigIntType.h"
+#include "vm/GeckoProfiler.h"
+#include "vm/HelperThreadState.h"
+#include "vm/JSAtom.h"
+#include "vm/JSContext.h"
+#include "vm/JSObject.h"
+#include "vm/JSScript.h"
+#include "vm/Printer.h"
+#include "vm/ProxyObject.h"
+#include "vm/Realm.h"
+#include "vm/Shape.h"
+#include "vm/StringType.h"
+#include "vm/SymbolType.h"
+#include "vm/Time.h"
+#include "vm/TraceLogging.h"
+#include "vm/WrapperObject.h"
+
+#include "gc/Heap-inl.h"
+#include "gc/Marking-inl.h"
+#include "gc/Nursery-inl.h"
+#include "gc/PrivateIterators-inl.h"
+#include "gc/Zone-inl.h"
+#include "vm/GeckoProfiler-inl.h"
+#include "vm/JSObject-inl.h"
+#include "vm/JSScript-inl.h"
+#include "vm/Stack-inl.h"
+#include "vm/StringType-inl.h"
+
+using namespace js;
+using namespace js::gc;
+
+using mozilla::Maybe;
+using mozilla::Nothing;
+using mozilla::Some;
+using mozilla::TimeDuration;
+using mozilla::TimeStamp;
+
+using JS::AutoGCRooter;
+
+/* Increase the IGC marking slice time if we are in highFrequencyGC mode. */
+static constexpr int IGC_MARK_SLICE_MULTIPLIER = 2;
+
+const AllocKind gc::slotsToThingKind[] = {
+ // clang-format off
+ /* 0 */ AllocKind::OBJECT0, AllocKind::OBJECT2, AllocKind::OBJECT2, AllocKind::OBJECT4,
+ /* 4 */ AllocKind::OBJECT4, AllocKind::OBJECT8, AllocKind::OBJECT8, AllocKind::OBJECT8,
+ /* 8 */ AllocKind::OBJECT8, AllocKind::OBJECT12, AllocKind::OBJECT12, AllocKind::OBJECT12,
+ /* 12 */ AllocKind::OBJECT12, AllocKind::OBJECT16, AllocKind::OBJECT16, AllocKind::OBJECT16,
+ /* 16 */ AllocKind::OBJECT16
+ // clang-format on
+};
+
+// Check that reserved bits of a Cell are compatible with our typical allocators
+// since most derived classes will store a pointer in the first word.
+static const size_t MinFirstWordAlignment = 1u << CellFlagBitsReservedForGC;
+static_assert(js::detail::LIFO_ALLOC_ALIGN >= MinFirstWordAlignment,
+ "CellFlagBitsReservedForGC should support LifoAlloc");
+static_assert(CellAlignBytes >= MinFirstWordAlignment,
+ "CellFlagBitsReservedForGC should support gc::Cell");
+static_assert(js::jit::CodeAlignment >= MinFirstWordAlignment,
+ "CellFlagBitsReservedForGC should support JIT code");
+static_assert(js::gc::JSClassAlignBytes >= MinFirstWordAlignment,
+ "CellFlagBitsReservedForGC should support JSClass pointers");
+static_assert(js::ScopeDataAlignBytes >= MinFirstWordAlignment,
+ "CellFlagBitsReservedForGC should support scope data pointers");
+
+static_assert(std::size(slotsToThingKind) == SLOTS_TO_THING_KIND_LIMIT,
+ "We have defined a slot count for each kind.");
+
+#define CHECK_THING_SIZE(allocKind, traceKind, type, sizedType, bgFinal, \
+ nursery, compact) \
+ static_assert(sizeof(sizedType) >= SortedArenaList::MinThingSize, \
+ #sizedType " is smaller than SortedArenaList::MinThingSize!"); \
+ static_assert(sizeof(sizedType) >= sizeof(FreeSpan), \
+ #sizedType " is smaller than FreeSpan"); \
+ static_assert(sizeof(sizedType) % CellAlignBytes == 0, \
+ "Size of " #sizedType " is not a multiple of CellAlignBytes"); \
+ static_assert(sizeof(sizedType) >= MinCellSize, \
+ "Size of " #sizedType " is smaller than the minimum size");
+FOR_EACH_ALLOCKIND(CHECK_THING_SIZE);
+#undef CHECK_THING_SIZE
+
+template <typename T>
+struct ArenaLayout {
+ static constexpr size_t thingSize() { return sizeof(T); }
+ static constexpr size_t thingsPerArena() {
+ return (ArenaSize - ArenaHeaderSize) / thingSize();
+ }
+ static constexpr size_t firstThingOffset() {
+ return ArenaSize - thingSize() * thingsPerArena();
+ }
+};
+
+const uint8_t Arena::ThingSizes[] = {
+#define EXPAND_THING_SIZE(_1, _2, _3, sizedType, _4, _5, _6) \
+ ArenaLayout<sizedType>::thingSize(),
+ FOR_EACH_ALLOCKIND(EXPAND_THING_SIZE)
+#undef EXPAND_THING_SIZE
+};
+
+const uint8_t Arena::FirstThingOffsets[] = {
+#define EXPAND_FIRST_THING_OFFSET(_1, _2, _3, sizedType, _4, _5, _6) \
+ ArenaLayout<sizedType>::firstThingOffset(),
+ FOR_EACH_ALLOCKIND(EXPAND_FIRST_THING_OFFSET)
+#undef EXPAND_FIRST_THING_OFFSET
+};
+
+const uint8_t Arena::ThingsPerArena[] = {
+#define EXPAND_THINGS_PER_ARENA(_1, _2, _3, sizedType, _4, _5, _6) \
+ ArenaLayout<sizedType>::thingsPerArena(),
+ FOR_EACH_ALLOCKIND(EXPAND_THINGS_PER_ARENA)
+#undef EXPAND_THINGS_PER_ARENA
+};
+
+FreeSpan FreeLists::emptySentinel;
+
+struct js::gc::FinalizePhase {
+ gcstats::PhaseKind statsPhase;
+ AllocKinds kinds;
+};
+
+/*
+ * Finalization order for objects swept incrementally on the main thread.
+ */
+static constexpr FinalizePhase ForegroundObjectFinalizePhase = {
+ gcstats::PhaseKind::SWEEP_OBJECT,
+ {AllocKind::OBJECT0, AllocKind::OBJECT2, AllocKind::OBJECT4,
+ AllocKind::OBJECT8, AllocKind::OBJECT12, AllocKind::OBJECT16}};
+
+/*
+ * Finalization order for GC things swept incrementally on the main thread.
+ */
+static constexpr FinalizePhase ForegroundNonObjectFinalizePhase = {
+ gcstats::PhaseKind::SWEEP_SCRIPT, {AllocKind::SCRIPT, AllocKind::JITCODE}};
+
+/*
+ * Finalization order for GC things swept on the background thread.
+ */
+static constexpr FinalizePhase BackgroundFinalizePhases[] = {
+ {gcstats::PhaseKind::SWEEP_OBJECT,
+ {AllocKind::FUNCTION, AllocKind::FUNCTION_EXTENDED,
+ AllocKind::OBJECT0_BACKGROUND, AllocKind::OBJECT2_BACKGROUND,
+ AllocKind::ARRAYBUFFER4, AllocKind::OBJECT4_BACKGROUND,
+ AllocKind::ARRAYBUFFER8, AllocKind::OBJECT8_BACKGROUND,
+ AllocKind::ARRAYBUFFER12, AllocKind::OBJECT12_BACKGROUND,
+ AllocKind::ARRAYBUFFER16, AllocKind::OBJECT16_BACKGROUND}},
+ {gcstats::PhaseKind::SWEEP_SCOPE,
+ {
+ AllocKind::SCOPE,
+ }},
+ {gcstats::PhaseKind::SWEEP_REGEXP_SHARED,
+ {
+ AllocKind::REGEXP_SHARED,
+ }},
+ {gcstats::PhaseKind::SWEEP_STRING,
+ {AllocKind::FAT_INLINE_STRING, AllocKind::STRING,
+ AllocKind::EXTERNAL_STRING, AllocKind::FAT_INLINE_ATOM, AllocKind::ATOM,
+ AllocKind::SYMBOL, AllocKind::BIGINT}},
+ {gcstats::PhaseKind::SWEEP_SHAPE,
+ {AllocKind::SHAPE, AllocKind::ACCESSOR_SHAPE, AllocKind::BASE_SHAPE,
+ AllocKind::OBJECT_GROUP}}};
+
+void Arena::unmarkAll() {
+ MarkBitmapWord* arenaBits = chunk()->markBits.arenaBits(this);
+ for (size_t i = 0; i < ArenaBitmapWords; i++) {
+ arenaBits[i] = 0;
+ }
+}
+
+void Arena::unmarkPreMarkedFreeCells() {
+ for (ArenaFreeCellIter cell(this); !cell.done(); cell.next()) {
+ MOZ_ASSERT(cell->isMarkedBlack());
+ cell->unmark();
+ }
+}
+
+#ifdef DEBUG
+
+void Arena::checkNoMarkedFreeCells() {
+ for (ArenaFreeCellIter cell(this); !cell.done(); cell.next()) {
+ MOZ_ASSERT(!cell->isMarkedAny());
+ }
+}
+
+void Arena::checkAllCellsMarkedBlack() {
+ for (ArenaCellIter cell(this); !cell.done(); cell.next()) {
+ MOZ_ASSERT(cell->isMarkedBlack());
+ }
+}
+
+#endif
+
+#if defined(DEBUG) || defined(JS_GC_ZEAL)
+void Arena::checkNoMarkedCells() {
+ for (ArenaCellIter cell(this); !cell.done(); cell.next()) {
+ MOZ_ASSERT(!cell->isMarkedAny());
+ }
+}
+#endif
+
+/* static */
+void Arena::staticAsserts() {
+ static_assert(size_t(AllocKind::LIMIT) <= 255,
+ "All AllocKinds and AllocKind::LIMIT must fit in a uint8_t.");
+ static_assert(std::size(ThingSizes) == AllocKindCount,
+ "We haven't defined all thing sizes.");
+ static_assert(std::size(FirstThingOffsets) == AllocKindCount,
+ "We haven't defined all offsets.");
+ static_assert(std::size(ThingsPerArena) == AllocKindCount,
+ "We haven't defined all counts.");
+}
+
+/* static */
+inline void Arena::checkLookupTables() {
+#ifdef DEBUG
+ for (size_t i = 0; i < AllocKindCount; i++) {
+ MOZ_ASSERT(
+ FirstThingOffsets[i] + ThingsPerArena[i] * ThingSizes[i] == ArenaSize,
+ "Inconsistent arena lookup table data");
+ }
+#endif
+}
+
+template <typename T>
+inline size_t Arena::finalize(JSFreeOp* fop, AllocKind thingKind,
+ size_t thingSize) {
+ /* Enforce requirements on size of T. */
+ MOZ_ASSERT(thingSize % CellAlignBytes == 0);
+ MOZ_ASSERT(thingSize >= MinCellSize);
+ MOZ_ASSERT(thingSize <= 255);
+
+ MOZ_ASSERT(allocated());
+ MOZ_ASSERT(thingKind == getAllocKind());
+ MOZ_ASSERT(thingSize == getThingSize());
+ MOZ_ASSERT(!onDelayedMarkingList_);
+
+ uint_fast16_t firstThing = firstThingOffset(thingKind);
+ uint_fast16_t firstThingOrSuccessorOfLastMarkedThing = firstThing;
+ uint_fast16_t lastThing = ArenaSize - thingSize;
+
+ FreeSpan newListHead;
+ FreeSpan* newListTail = &newListHead;
+ size_t nmarked = 0, nfinalized = 0;
+
+ for (ArenaCellIterUnderFinalize cell(this); !cell.done(); cell.next()) {
+ T* t = cell.as<T>();
+ if (t->asTenured().isMarkedAny()) {
+ uint_fast16_t thing = uintptr_t(t) & ArenaMask;
+ if (thing != firstThingOrSuccessorOfLastMarkedThing) {
+ // We just finished passing over one or more free things,
+ // so record a new FreeSpan.
+ newListTail->initBounds(firstThingOrSuccessorOfLastMarkedThing,
+ thing - thingSize, this);
+ newListTail = newListTail->nextSpanUnchecked(this);
+ }
+ firstThingOrSuccessorOfLastMarkedThing = thing + thingSize;
+ nmarked++;
+ } else {
+ t->finalize(fop);
+ AlwaysPoison(t, JS_SWEPT_TENURED_PATTERN, thingSize,
+ MemCheckKind::MakeUndefined);
+ gcprobes::TenuredFinalize(t);
+ nfinalized++;
+ }
+ }
+
+ if (thingKind == AllocKind::STRING ||
+ thingKind == AllocKind::FAT_INLINE_STRING) {
+ zone->markedStrings += nmarked;
+ zone->finalizedStrings += nfinalized;
+ }
+
+ if (nmarked == 0) {
+ // Do nothing. The caller will update the arena appropriately.
+ MOZ_ASSERT(newListTail == &newListHead);
+ DebugOnlyPoison(data, JS_SWEPT_TENURED_PATTERN, sizeof(data),
+ MemCheckKind::MakeUndefined);
+ return nmarked;
+ }
+
+ MOZ_ASSERT(firstThingOrSuccessorOfLastMarkedThing != firstThing);
+ uint_fast16_t lastMarkedThing =
+ firstThingOrSuccessorOfLastMarkedThing - thingSize;
+ if (lastThing == lastMarkedThing) {
+ // If the last thing was marked, we will have already set the bounds of
+ // the final span, and we just need to terminate the list.
+ newListTail->initAsEmpty();
+ } else {
+ // Otherwise, end the list with a span that covers the final stretch of free
+ // things.
+ newListTail->initFinal(firstThingOrSuccessorOfLastMarkedThing, lastThing,
+ this);
+ }
+
+ firstFreeSpan = newListHead;
+#ifdef DEBUG
+ size_t nfree = numFreeThings(thingSize);
+ MOZ_ASSERT(nfree + nmarked == thingsPerArena(thingKind));
+#endif
+ return nmarked;
+}
+
+// Finalize arenas from src list, releasing empty arenas if keepArenas wasn't
+// specified and inserting the others into the appropriate destination size
+// bins.
+template <typename T>
+static inline bool FinalizeTypedArenas(JSFreeOp* fop, Arena** src,
+ SortedArenaList& dest,
+ AllocKind thingKind,
+ SliceBudget& budget) {
+ AutoSetThreadIsFinalizing setThreadUse;
+
+ size_t thingSize = Arena::thingSize(thingKind);
+ size_t thingsPerArena = Arena::thingsPerArena(thingKind);
+
+ while (Arena* arena = *src) {
+ Arena* next = arena->next;
+ MOZ_ASSERT_IF(next, next->zone == arena->zone);
+ *src = next;
+
+ size_t nmarked = arena->finalize<T>(fop, thingKind, thingSize);
+ size_t nfree = thingsPerArena - nmarked;
+
+ if (nmarked) {
+ dest.insertAt(arena, nfree);
+ } else {
+ arena->chunk()->recycleArena(arena, dest, thingsPerArena);
+ }
+
+ budget.step(thingsPerArena);
+ if (budget.isOverBudget()) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+/*
+ * Finalize the list of areans.
+ */
+static bool FinalizeArenas(JSFreeOp* fop, Arena** src, SortedArenaList& dest,
+ AllocKind thingKind, SliceBudget& budget) {
+ switch (thingKind) {
+#define EXPAND_CASE(allocKind, traceKind, type, sizedType, bgFinal, nursery, \
+ compact) \
+ case AllocKind::allocKind: \
+ return FinalizeTypedArenas<type>(fop, src, dest, thingKind, budget);
+ FOR_EACH_ALLOCKIND(EXPAND_CASE)
+#undef EXPAND_CASE
+
+ default:
+ MOZ_CRASH("Invalid alloc kind");
+ }
+}
+
+TenuredChunk* ChunkPool::pop() {
+ MOZ_ASSERT(bool(head_) == bool(count_));
+ if (!count_) {
+ return nullptr;
+ }
+ return remove(head_);
+}
+
+void ChunkPool::push(TenuredChunk* chunk) {
+ MOZ_ASSERT(!chunk->info.next);
+ MOZ_ASSERT(!chunk->info.prev);
+
+ chunk->info.next = head_;
+ if (head_) {
+ head_->info.prev = chunk;
+ }
+ head_ = chunk;
+ ++count_;
+}
+
+TenuredChunk* ChunkPool::remove(TenuredChunk* chunk) {
+ MOZ_ASSERT(count_ > 0);
+ MOZ_ASSERT(contains(chunk));
+
+ if (head_ == chunk) {
+ head_ = chunk->info.next;
+ }
+ if (chunk->info.prev) {
+ chunk->info.prev->info.next = chunk->info.next;
+ }
+ if (chunk->info.next) {
+ chunk->info.next->info.prev = chunk->info.prev;
+ }
+ chunk->info.next = chunk->info.prev = nullptr;
+ --count_;
+
+ return chunk;
+}
+
+// We could keep the chunk pool sorted, but that's likely to be more expensive.
+// This sort is nlogn, but keeping it sorted is likely to be m*n, with m being
+// the number of operations (likely higher than n).
+void ChunkPool::sort() {
+ // Only sort if the list isn't already sorted.
+ if (!isSorted()) {
+ head_ = mergeSort(head(), count());
+
+ // Fixup prev pointers.
+ TenuredChunk* prev = nullptr;
+ for (TenuredChunk* cur = head_; cur; cur = cur->info.next) {
+ cur->info.prev = prev;
+ prev = cur;
+ }
+ }
+
+ MOZ_ASSERT(verify());
+ MOZ_ASSERT(isSorted());
+}
+
+TenuredChunk* ChunkPool::mergeSort(TenuredChunk* list, size_t count) {
+ MOZ_ASSERT(bool(list) == bool(count));
+
+ if (count < 2) {
+ return list;
+ }
+
+ size_t half = count / 2;
+
+ // Split;
+ TenuredChunk* front = list;
+ TenuredChunk* back;
+ {
+ TenuredChunk* cur = list;
+ for (size_t i = 0; i < half - 1; i++) {
+ MOZ_ASSERT(cur);
+ cur = cur->info.next;
+ }
+ back = cur->info.next;
+ cur->info.next = nullptr;
+ }
+
+ front = mergeSort(front, half);
+ back = mergeSort(back, count - half);
+
+ // Merge
+ list = nullptr;
+ TenuredChunk** cur = &list;
+ while (front || back) {
+ if (!front) {
+ *cur = back;
+ break;
+ }
+ if (!back) {
+ *cur = front;
+ break;
+ }
+
+ // Note that the sort is stable due to the <= here. Nothing depends on
+ // this but it could.
+ if (front->info.numArenasFree <= back->info.numArenasFree) {
+ *cur = front;
+ front = front->info.next;
+ cur = &(*cur)->info.next;
+ } else {
+ *cur = back;
+ back = back->info.next;
+ cur = &(*cur)->info.next;
+ }
+ }
+
+ return list;
+}
+
+bool ChunkPool::isSorted() const {
+ uint32_t last = 1;
+ for (TenuredChunk* cursor = head_; cursor; cursor = cursor->info.next) {
+ if (cursor->info.numArenasFree < last) {
+ return false;
+ }
+ last = cursor->info.numArenasFree;
+ }
+ return true;
+}
+
+#ifdef DEBUG
+bool ChunkPool::contains(TenuredChunk* chunk) const {
+ verify();
+ for (TenuredChunk* cursor = head_; cursor; cursor = cursor->info.next) {
+ if (cursor == chunk) {
+ return true;
+ }
+ }
+ return false;
+}
+
+bool ChunkPool::verify() const {
+ MOZ_ASSERT(bool(head_) == bool(count_));
+ uint32_t count = 0;
+ for (TenuredChunk* cursor = head_; cursor;
+ cursor = cursor->info.next, ++count) {
+ MOZ_ASSERT_IF(cursor->info.prev, cursor->info.prev->info.next == cursor);
+ MOZ_ASSERT_IF(cursor->info.next, cursor->info.next->info.prev == cursor);
+ }
+ MOZ_ASSERT(count_ == count);
+ return true;
+}
+#endif
+
+void ChunkPool::Iter::next() {
+ MOZ_ASSERT(!done());
+ current_ = current_->info.next;
+}
+
+inline bool GCRuntime::tooManyEmptyChunks(const AutoLockGC& lock) {
+ return emptyChunks(lock).count() > tunables.minEmptyChunkCount(lock);
+}
+
+ChunkPool GCRuntime::expireEmptyChunkPool(const AutoLockGC& lock) {
+ MOZ_ASSERT(emptyChunks(lock).verify());
+ MOZ_ASSERT(tunables.minEmptyChunkCount(lock) <=
+ tunables.maxEmptyChunkCount());
+
+ ChunkPool expired;
+ while (tooManyEmptyChunks(lock)) {
+ TenuredChunk* chunk = emptyChunks(lock).pop();
+ prepareToFreeChunk(chunk->info);
+ expired.push(chunk);
+ }
+
+ MOZ_ASSERT(expired.verify());
+ MOZ_ASSERT(emptyChunks(lock).verify());
+ MOZ_ASSERT(emptyChunks(lock).count() <= tunables.maxEmptyChunkCount());
+ MOZ_ASSERT(emptyChunks(lock).count() <= tunables.minEmptyChunkCount(lock));
+ return expired;
+}
+
+static void FreeChunkPool(ChunkPool& pool) {
+ for (ChunkPool::Iter iter(pool); !iter.done();) {
+ TenuredChunk* chunk = iter.get();
+ iter.next();
+ pool.remove(chunk);
+ MOZ_ASSERT(!chunk->info.numArenasFreeCommitted);
+ UnmapPages(static_cast<void*>(chunk), ChunkSize);
+ }
+ MOZ_ASSERT(pool.count() == 0);
+}
+
+void GCRuntime::freeEmptyChunks(const AutoLockGC& lock) {
+ FreeChunkPool(emptyChunks(lock));
+}
+
+inline void GCRuntime::prepareToFreeChunk(TenuredChunkInfo& info) {
+ MOZ_ASSERT(numArenasFreeCommitted >= info.numArenasFreeCommitted);
+ numArenasFreeCommitted -= info.numArenasFreeCommitted;
+ stats().count(gcstats::COUNT_DESTROY_CHUNK);
+#ifdef DEBUG
+ /*
+ * Let FreeChunkPool detect a missing prepareToFreeChunk call before it
+ * frees chunk.
+ */
+ info.numArenasFreeCommitted = 0;
+#endif
+}
+
+inline void GCRuntime::updateOnArenaFree() { ++numArenasFreeCommitted; }
+
+void TenuredChunk::addArenaToFreeList(GCRuntime* gc, Arena* arena) {
+ MOZ_ASSERT(!arena->allocated());
+ arena->next = info.freeArenasHead;
+ info.freeArenasHead = arena;
+ ++info.numArenasFreeCommitted;
+ ++info.numArenasFree;
+ gc->updateOnArenaFree();
+}
+
+void TenuredChunk::addArenaToDecommittedList(const Arena* arena) {
+ ++info.numArenasFree;
+ decommittedArenas[TenuredChunk::arenaIndex(arena->address())] = true;
+}
+
+void TenuredChunk::recycleArena(Arena* arena, SortedArenaList& dest,
+ size_t thingsPerArena) {
+ arena->setAsFullyUnused();
+ dest.insertAt(arena, thingsPerArena);
+}
+
+void TenuredChunk::releaseArena(GCRuntime* gc, Arena* arena,
+ const AutoLockGC& lock) {
+ addArenaToFreeList(gc, arena);
+ updateChunkListAfterFree(gc, lock);
+}
+
+bool TenuredChunk::decommitOneFreeArena(GCRuntime* gc, AutoLockGC& lock) {
+ MOZ_ASSERT(info.numArenasFreeCommitted > 0);
+ Arena* arena = fetchNextFreeArena(gc);
+ updateChunkListAfterAlloc(gc, lock);
+
+ bool ok;
+ {
+ AutoUnlockGC unlock(lock);
+ ok = MarkPagesUnusedSoft(arena, ArenaSize);
+ }
+
+ if (ok) {
+ addArenaToDecommittedList(arena);
+ } else {
+ addArenaToFreeList(gc, arena);
+ }
+ updateChunkListAfterFree(gc, lock);
+
+ return ok;
+}
+
+void TenuredChunk::decommitFreeArenasWithoutUnlocking(const AutoLockGC& lock) {
+ for (size_t i = 0; i < ArenasPerChunk; ++i) {
+ if (decommittedArenas[i] || arenas[i].allocated()) {
+ continue;
+ }
+
+ if (MarkPagesUnusedSoft(&arenas[i], ArenaSize)) {
+ info.numArenasFreeCommitted--;
+ decommittedArenas[i] = true;
+ }
+ }
+}
+
+void TenuredChunk::updateChunkListAfterAlloc(GCRuntime* gc,
+ const AutoLockGC& lock) {
+ if (MOZ_UNLIKELY(!hasAvailableArenas())) {
+ gc->availableChunks(lock).remove(this);
+ gc->fullChunks(lock).push(this);
+ }
+}
+
+void TenuredChunk::updateChunkListAfterFree(GCRuntime* gc,
+ const AutoLockGC& lock) {
+ if (info.numArenasFree == 1) {
+ gc->fullChunks(lock).remove(this);
+ gc->availableChunks(lock).push(this);
+ } else if (!unused()) {
+ MOZ_ASSERT(gc->availableChunks(lock).contains(this));
+ } else {
+ MOZ_ASSERT(unused());
+ gc->availableChunks(lock).remove(this);
+ decommitAllArenas();
+ MOZ_ASSERT(info.numArenasFreeCommitted == 0);
+ gc->recycleChunk(this, lock);
+ }
+}
+
+void GCRuntime::releaseArena(Arena* arena, const AutoLockGC& lock) {
+ MOZ_ASSERT(arena->allocated());
+ MOZ_ASSERT(!arena->onDelayedMarkingList());
+
+ arena->zone->gcHeapSize.removeGCArena();
+ arena->release(lock);
+ arena->chunk()->releaseArena(this, arena, lock);
+}
+
+GCRuntime::GCRuntime(JSRuntime* rt)
+ : rt(rt),
+ systemZone(nullptr),
+ atomsZone(nullptr),
+ heapState_(JS::HeapState::Idle),
+ stats_(this),
+ marker(rt),
+ heapSize(nullptr),
+ helperThreadRatio(TuningDefaults::HelperThreadRatio),
+ maxHelperThreads(TuningDefaults::MaxHelperThreads),
+ helperThreadCount(1),
+ rootsHash(256),
+ nextCellUniqueId_(LargestTaggedNullCellPointer +
+ 1), // Ensure disjoint from null tagged pointers.
+ numArenasFreeCommitted(0),
+ verifyPreData(nullptr),
+ lastGCStartTime_(ReallyNow()),
+ lastGCEndTime_(ReallyNow()),
+ incrementalGCEnabled(TuningDefaults::IncrementalGCEnabled),
+ perZoneGCEnabled(TuningDefaults::PerZoneGCEnabled),
+ numActiveZoneIters(0),
+ cleanUpEverything(false),
+ grayBufferState(GCRuntime::GrayBufferState::Unused),
+ grayBitsValid(false),
+ majorGCTriggerReason(JS::GCReason::NO_REASON),
+ fullGCForAtomsRequested_(false),
+ minorGCNumber(0),
+ majorGCNumber(0),
+ number(0),
+ sliceNumber(0),
+ isFull(false),
+ incrementalState(gc::State::NotActive),
+ initialState(gc::State::NotActive),
+ useZeal(false),
+ lastMarkSlice(false),
+ safeToYield(true),
+ markOnBackgroundThreadDuringSweeping(false),
+ sweepOnBackgroundThread(false),
+ requestSliceAfterBackgroundTask(false),
+ lifoBlocksToFree((size_t)JSContext::TEMP_LIFO_ALLOC_PRIMARY_CHUNK_SIZE),
+ lifoBlocksToFreeAfterMinorGC(
+ (size_t)JSContext::TEMP_LIFO_ALLOC_PRIMARY_CHUNK_SIZE),
+ sweepGroupIndex(0),
+ sweepGroups(nullptr),
+ currentSweepGroup(nullptr),
+ sweepZone(nullptr),
+ hasMarkedGrayRoots(false),
+ abortSweepAfterCurrentGroup(false),
+ sweepMarkResult(IncrementalProgress::NotFinished),
+ startedCompacting(false),
+ relocatedArenasToRelease(nullptr),
+ zonesCompacted(0),
+#ifdef JS_GC_ZEAL
+ markingValidator(nullptr),
+#endif
+ defaultTimeBudgetMS_(TuningDefaults::DefaultTimeBudgetMS),
+ incrementalAllowed(true),
+ compactingEnabled(TuningDefaults::CompactingEnabled),
+ rootsRemoved(false),
+#ifdef JS_GC_ZEAL
+ zealModeBits(0),
+ zealFrequency(0),
+ nextScheduled(0),
+ deterministicOnly(false),
+ zealSliceBudget(0),
+ selectedForMarking(rt),
+#endif
+ fullCompartmentChecks(false),
+ gcCallbackDepth(0),
+ alwaysPreserveCode(false),
+ lowMemoryState(false),
+ lock(mutexid::GCLock),
+ allocTask(this, emptyChunks_.ref()),
+ unmarkTask(this),
+ markTask(this),
+ sweepTask(this),
+ freeTask(this),
+ decommitTask(this),
+ nursery_(this),
+ storeBuffer_(rt, nursery()) {
+ marker.setIncrementalGCEnabled(incrementalGCEnabled);
+}
+
+#ifdef JS_GC_ZEAL
+
+void GCRuntime::getZealBits(uint32_t* zealBits, uint32_t* frequency,
+ uint32_t* scheduled) {
+ *zealBits = zealModeBits;
+ *frequency = zealFrequency;
+ *scheduled = nextScheduled;
+}
+
+const char gc::ZealModeHelpText[] =
+ " Specifies how zealous the garbage collector should be. Some of these "
+ "modes can\n"
+ " be set simultaneously, by passing multiple level options, e.g. \"2;4\" "
+ "will activate\n"
+ " both modes 2 and 4. Modes can be specified by name or number.\n"
+ " \n"
+ " Values:\n"
+ " 0: (None) Normal amount of collection (resets all modes)\n"
+ " 1: (RootsChange) Collect when roots are added or removed\n"
+ " 2: (Alloc) Collect when every N allocations (default: 100)\n"
+ " 4: (VerifierPre) Verify pre write barriers between instructions\n"
+ " 6: (YieldBeforeRootMarking) Incremental GC in two slices that yields "
+ "before root marking\n"
+ " 7: (GenerationalGC) Collect the nursery every N nursery allocations\n"
+ " 8: (YieldBeforeMarking) Incremental GC in two slices that yields "
+ "between\n"
+ " the root marking and marking phases\n"
+ " 9: (YieldBeforeSweeping) Incremental GC in two slices that yields "
+ "between\n"
+ " the marking and sweeping phases\n"
+ " 10: (IncrementalMultipleSlices) Incremental GC in many slices\n"
+ " 11: (IncrementalMarkingValidator) Verify incremental marking\n"
+ " 12: (ElementsBarrier) Use the individual element post-write barrier\n"
+ " regardless of elements size\n"
+ " 13: (CheckHashTablesOnMinorGC) Check internal hashtables on minor GC\n"
+ " 14: (Compact) Perform a shrinking collection every N allocations\n"
+ " 15: (CheckHeapAfterGC) Walk the heap to check its integrity after "
+ "every GC\n"
+ " 16: (CheckNursery) Check nursery integrity on minor GC\n"
+ " 17: (YieldBeforeSweepingAtoms) Incremental GC in two slices that "
+ "yields\n"
+ " before sweeping the atoms table\n"
+ " 18: (CheckGrayMarking) Check gray marking invariants after every GC\n"
+ " 19: (YieldBeforeSweepingCaches) Incremental GC in two slices that "
+ "yields\n"
+ " before sweeping weak caches\n"
+ " 21: (YieldBeforeSweepingObjects) Incremental GC in two slices that "
+ "yields\n"
+ " before sweeping foreground finalized objects\n"
+ " 22: (YieldBeforeSweepingNonObjects) Incremental GC in two slices that "
+ "yields\n"
+ " before sweeping non-object GC things\n"
+ " 23: (YieldBeforeSweepingShapeTrees) Incremental GC in two slices that "
+ "yields\n"
+ " before sweeping shape trees\n"
+ " 24: (CheckWeakMapMarking) Check weak map marking invariants after "
+ "every GC\n"
+ " 25: (YieldWhileGrayMarking) Incremental GC in two slices that yields\n"
+ " during gray marking\n";
+
+// The set of zeal modes that control incremental slices. These modes are
+// mutually exclusive.
+static const mozilla::EnumSet<ZealMode> IncrementalSliceZealModes = {
+ ZealMode::YieldBeforeRootMarking,
+ ZealMode::YieldBeforeMarking,
+ ZealMode::YieldBeforeSweeping,
+ ZealMode::IncrementalMultipleSlices,
+ ZealMode::YieldBeforeSweepingAtoms,
+ ZealMode::YieldBeforeSweepingCaches,
+ ZealMode::YieldBeforeSweepingObjects,
+ ZealMode::YieldBeforeSweepingNonObjects,
+ ZealMode::YieldBeforeSweepingShapeTrees};
+
+void GCRuntime::setZeal(uint8_t zeal, uint32_t frequency) {
+ MOZ_ASSERT(zeal <= unsigned(ZealMode::Limit));
+
+ if (verifyPreData) {
+ VerifyBarriers(rt, PreBarrierVerifier);
+ }
+
+ if (zeal == 0) {
+ if (hasZealMode(ZealMode::GenerationalGC)) {
+ evictNursery(JS::GCReason::DEBUG_GC);
+ nursery().leaveZealMode();
+ }
+
+ if (isIncrementalGCInProgress()) {
+ finishGC(JS::GCReason::DEBUG_GC);
+ }
+ }
+
+ ZealMode zealMode = ZealMode(zeal);
+ if (zealMode == ZealMode::GenerationalGC) {
+ evictNursery(JS::GCReason::DEBUG_GC);
+ nursery().enterZealMode();
+ }
+
+ // Some modes are mutually exclusive. If we're setting one of those, we
+ // first reset all of them.
+ if (IncrementalSliceZealModes.contains(zealMode)) {
+ for (auto mode : IncrementalSliceZealModes) {
+ clearZealMode(mode);
+ }
+ }
+
+ bool schedule = zealMode >= ZealMode::Alloc;
+ if (zeal != 0) {
+ zealModeBits |= 1 << unsigned(zeal);
+ } else {
+ zealModeBits = 0;
+ }
+ zealFrequency = frequency;
+ nextScheduled = schedule ? frequency : 0;
+}
+
+void GCRuntime::unsetZeal(uint8_t zeal) {
+ MOZ_ASSERT(zeal <= unsigned(ZealMode::Limit));
+ ZealMode zealMode = ZealMode(zeal);
+
+ if (!hasZealMode(zealMode)) {
+ return;
+ }
+
+ if (verifyPreData) {
+ VerifyBarriers(rt, PreBarrierVerifier);
+ }
+
+ if (zealMode == ZealMode::GenerationalGC) {
+ evictNursery(JS::GCReason::DEBUG_GC);
+ nursery().leaveZealMode();
+ }
+
+ clearZealMode(zealMode);
+
+ if (zealModeBits == 0) {
+ if (isIncrementalGCInProgress()) {
+ finishGC(JS::GCReason::DEBUG_GC);
+ }
+
+ zealFrequency = 0;
+ nextScheduled = 0;
+ }
+}
+
+void GCRuntime::setNextScheduled(uint32_t count) { nextScheduled = count; }
+
+using CharRange = mozilla::Range<const char>;
+using CharRangeVector = Vector<CharRange, 0, SystemAllocPolicy>;
+
+static bool ParseZealModeName(CharRange text, uint32_t* modeOut) {
+ struct ModeInfo {
+ const char* name;
+ size_t length;
+ uint32_t value;
+ };
+
+ static const ModeInfo zealModes[] = {{"None", 0},
+# define ZEAL_MODE(name, value) {# name, strlen(# name), value},
+ JS_FOR_EACH_ZEAL_MODE(ZEAL_MODE)
+# undef ZEAL_MODE
+ };
+
+ for (auto mode : zealModes) {
+ if (text.length() == mode.length &&
+ memcmp(text.begin().get(), mode.name, mode.length) == 0) {
+ *modeOut = mode.value;
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static bool ParseZealModeNumericParam(CharRange text, uint32_t* paramOut) {
+ if (text.length() == 0) {
+ return false;
+ }
+
+ for (auto c : text) {
+ if (!mozilla::IsAsciiDigit(c)) {
+ return false;
+ }
+ }
+
+ *paramOut = atoi(text.begin().get());
+ return true;
+}
+
+static bool SplitStringBy(CharRange text, char delimiter,
+ CharRangeVector* result) {
+ auto start = text.begin();
+ for (auto ptr = start; ptr != text.end(); ptr++) {
+ if (*ptr == delimiter) {
+ if (!result->emplaceBack(start, ptr)) {
+ return false;
+ }
+ start = ptr + 1;
+ }
+ }
+
+ return result->emplaceBack(start, text.end());
+}
+
+static bool PrintZealHelpAndFail() {
+ fprintf(stderr, "Format: JS_GC_ZEAL=level(;level)*[,N]\n");
+ fputs(ZealModeHelpText, stderr);
+ return false;
+}
+
+bool GCRuntime::parseAndSetZeal(const char* str) {
+ // Set the zeal mode from a string consisting of one or more mode specifiers
+ // separated by ';', optionally followed by a ',' and the trigger frequency.
+ // The mode specifiers can by a mode name or its number.
+
+ auto text = CharRange(str, strlen(str));
+
+ CharRangeVector parts;
+ if (!SplitStringBy(text, ',', &parts)) {
+ return false;
+ }
+
+ if (parts.length() == 0 || parts.length() > 2) {
+ return PrintZealHelpAndFail();
+ }
+
+ uint32_t frequency = JS_DEFAULT_ZEAL_FREQ;
+ if (parts.length() == 2 && !ParseZealModeNumericParam(parts[1], &frequency)) {
+ return PrintZealHelpAndFail();
+ }
+
+ CharRangeVector modes;
+ if (!SplitStringBy(parts[0], ';', &modes)) {
+ return false;
+ }
+
+ for (const auto& descr : modes) {
+ uint32_t mode;
+ if (!ParseZealModeName(descr, &mode) &&
+ !(ParseZealModeNumericParam(descr, &mode) &&
+ mode <= unsigned(ZealMode::Limit))) {
+ return PrintZealHelpAndFail();
+ }
+
+ setZeal(mode, frequency);
+ }
+
+ return true;
+}
+
+const char* js::gc::AllocKindName(AllocKind kind) {
+ static const char* const names[] = {
+# define EXPAND_THING_NAME(allocKind, _1, _2, _3, _4, _5, _6) # allocKind,
+ FOR_EACH_ALLOCKIND(EXPAND_THING_NAME)
+# undef EXPAND_THING_NAME
+ };
+ static_assert(std::size(names) == AllocKindCount,
+ "names array should have an entry for every AllocKind");
+
+ size_t i = size_t(kind);
+ MOZ_ASSERT(i < std::size(names));
+ return names[i];
+}
+
+void js::gc::DumpArenaInfo() {
+ fprintf(stderr, "Arena header size: %zu\n\n", ArenaHeaderSize);
+
+ fprintf(stderr, "GC thing kinds:\n");
+ fprintf(stderr, "%25s %8s %8s %8s\n",
+ "AllocKind:", "Size:", "Count:", "Padding:");
+ for (auto kind : AllAllocKinds()) {
+ fprintf(stderr, "%25s %8zu %8zu %8zu\n", AllocKindName(kind),
+ Arena::thingSize(kind), Arena::thingsPerArena(kind),
+ Arena::firstThingOffset(kind) - ArenaHeaderSize);
+ }
+}
+
+#endif // JS_GC_ZEAL
+
+bool GCRuntime::init(uint32_t maxbytes) {
+ MOZ_ASSERT(SystemPageSize());
+ Arena::checkLookupTables();
+
+ {
+ AutoLockGCBgAlloc lock(this);
+
+ MOZ_ALWAYS_TRUE(tunables.setParameter(JSGC_MAX_BYTES, maxbytes, lock));
+
+ const char* size = getenv("JSGC_MARK_STACK_LIMIT");
+ if (size) {
+ setMarkStackLimit(atoi(size), lock);
+ }
+
+ if (!nursery().init(lock)) {
+ return false;
+ }
+
+ const char* pretenureThresholdStr = getenv("JSGC_PRETENURE_THRESHOLD");
+ if (pretenureThresholdStr && pretenureThresholdStr[0]) {
+ char* last;
+ long pretenureThreshold = strtol(pretenureThresholdStr, &last, 10);
+ if (last[0] || !tunables.setParameter(JSGC_PRETENURE_THRESHOLD,
+ pretenureThreshold, lock)) {
+ fprintf(stderr, "Invalid value for JSGC_PRETENURE_THRESHOLD: %s\n",
+ pretenureThresholdStr);
+ }
+ }
+ }
+
+#ifdef JS_GC_ZEAL
+ const char* zealSpec = getenv("JS_GC_ZEAL");
+ if (zealSpec && zealSpec[0] && !parseAndSetZeal(zealSpec)) {
+ return false;
+ }
+#endif
+
+ if (!marker.init() || !initSweepActions()) {
+ return false;
+ }
+
+ gcprobes::Init(this);
+
+ updateHelperThreadCount();
+
+ return true;
+}
+
+void GCRuntime::freezeSelfHostingZone() {
+ MOZ_ASSERT(!selfHostingZoneFrozen);
+ MOZ_ASSERT(!isIncrementalGCInProgress());
+
+ for (ZonesIter zone(this, WithAtoms); !zone.done(); zone.next()) {
+ MOZ_ASSERT(!zone->isGCScheduled());
+ if (zone->isSelfHostingZone()) {
+ zone->scheduleGC();
+ }
+ }
+
+ gc(GC_SHRINK, JS::GCReason::INIT_SELF_HOSTING);
+ selfHostingZoneFrozen = true;
+}
+
+void GCRuntime::finish() {
+ MOZ_ASSERT(inPageLoadCount == 0);
+
+ // Wait for nursery background free to end and disable it to release memory.
+ if (nursery().isEnabled()) {
+ nursery().disable();
+ }
+
+ // Wait until the background finalization and allocation stops and the
+ // helper thread shuts down before we forcefully release any remaining GC
+ // memory.
+ sweepTask.join();
+ freeTask.join();
+ allocTask.cancelAndWait();
+ decommitTask.cancelAndWait();
+
+#ifdef JS_GC_ZEAL
+ // Free memory associated with GC verification.
+ finishVerifier();
+#endif
+
+ // Delete all remaining zones.
+ if (rt->gcInitialized) {
+ for (ZonesIter zone(this, WithAtoms); !zone.done(); zone.next()) {
+ AutoSetThreadIsSweeping threadIsSweeping(zone);
+ for (CompartmentsInZoneIter comp(zone); !comp.done(); comp.next()) {
+ for (RealmsInCompartmentIter realm(comp); !realm.done(); realm.next()) {
+ js_delete(realm.get());
+ }
+ comp->realms().clear();
+ js_delete(comp.get());
+ }
+ zone->compartments().clear();
+ js_delete(zone.get());
+ }
+ }
+
+ zones().clear();
+
+ FreeChunkPool(fullChunks_.ref());
+ FreeChunkPool(availableChunks_.ref());
+ FreeChunkPool(emptyChunks_.ref());
+
+ gcprobes::Finish(this);
+
+ nursery().printTotalProfileTimes();
+ stats().printTotalProfileTimes();
+}
+
+bool GCRuntime::setParameter(JSGCParamKey key, uint32_t value) {
+ MOZ_ASSERT(CurrentThreadCanAccessRuntime(rt));
+ waitBackgroundSweepEnd();
+ AutoLockGC lock(this);
+ return setParameter(key, value, lock);
+}
+
+bool GCRuntime::setParameter(JSGCParamKey key, uint32_t value,
+ AutoLockGC& lock) {
+ switch (key) {
+ case JSGC_SLICE_TIME_BUDGET_MS:
+ defaultTimeBudgetMS_ = value ? value : SliceBudget::UnlimitedTimeBudget;
+ break;
+ case JSGC_MARK_STACK_LIMIT:
+ if (value == 0) {
+ return false;
+ }
+ setMarkStackLimit(value, lock);
+ break;
+ case JSGC_INCREMENTAL_GC_ENABLED:
+ setIncrementalGCEnabled(value != 0);
+ break;
+ case JSGC_PER_ZONE_GC_ENABLED:
+ perZoneGCEnabled = value != 0;
+ break;
+ case JSGC_COMPACTING_ENABLED:
+ compactingEnabled = value != 0;
+ break;
+ case JSGC_INCREMENTAL_WEAKMAP_ENABLED:
+ marker.incrementalWeakMapMarkingEnabled = value != 0;
+ break;
+ case JSGC_HELPER_THREAD_RATIO:
+ if (rt->parentRuntime) {
+ // Don't allow this to be set for worker runtimes.
+ return false;
+ }
+ if (value == 0) {
+ return false;
+ }
+ helperThreadRatio = double(value) / 100.0;
+ updateHelperThreadCount();
+ break;
+ case JSGC_MAX_HELPER_THREADS:
+ if (rt->parentRuntime) {
+ // Don't allow this to be set for worker runtimes.
+ return false;
+ }
+ if (value == 0) {
+ return false;
+ }
+ maxHelperThreads = value;
+ updateHelperThreadCount();
+ break;
+ default:
+ if (!tunables.setParameter(key, value, lock)) {
+ return false;
+ }
+ updateAllGCStartThresholds(lock);
+ }
+
+ return true;
+}
+
+void GCRuntime::resetParameter(JSGCParamKey key) {
+ MOZ_ASSERT(CurrentThreadCanAccessRuntime(rt));
+ waitBackgroundSweepEnd();
+ AutoLockGC lock(this);
+ resetParameter(key, lock);
+}
+
+void GCRuntime::resetParameter(JSGCParamKey key, AutoLockGC& lock) {
+ switch (key) {
+ case JSGC_SLICE_TIME_BUDGET_MS:
+ defaultTimeBudgetMS_ = TuningDefaults::DefaultTimeBudgetMS;
+ break;
+ case JSGC_MARK_STACK_LIMIT:
+ setMarkStackLimit(MarkStack::DefaultCapacity, lock);
+ break;
+ case JSGC_INCREMENTAL_GC_ENABLED:
+ setIncrementalGCEnabled(TuningDefaults::IncrementalGCEnabled);
+ break;
+ case JSGC_PER_ZONE_GC_ENABLED:
+ perZoneGCEnabled = TuningDefaults::PerZoneGCEnabled;
+ break;
+ case JSGC_COMPACTING_ENABLED:
+ compactingEnabled = TuningDefaults::CompactingEnabled;
+ break;
+ case JSGC_INCREMENTAL_WEAKMAP_ENABLED:
+ marker.incrementalWeakMapMarkingEnabled =
+ TuningDefaults::IncrementalWeakMapMarkingEnabled;
+ break;
+ case JSGC_HELPER_THREAD_RATIO:
+ if (rt->parentRuntime) {
+ return;
+ }
+ helperThreadRatio = TuningDefaults::HelperThreadRatio;
+ updateHelperThreadCount();
+ break;
+ case JSGC_MAX_HELPER_THREADS:
+ if (rt->parentRuntime) {
+ return;
+ }
+ maxHelperThreads = TuningDefaults::MaxHelperThreads;
+ updateHelperThreadCount();
+ break;
+ default:
+ tunables.resetParameter(key, lock);
+ updateAllGCStartThresholds(lock);
+ }
+}
+
+uint32_t GCRuntime::getParameter(JSGCParamKey key) {
+ MOZ_ASSERT(CurrentThreadCanAccessRuntime(rt));
+ AutoLockGC lock(this);
+ return getParameter(key, lock);
+}
+
+uint32_t GCRuntime::getParameter(JSGCParamKey key, const AutoLockGC& lock) {
+ switch (key) {
+ case JSGC_MAX_BYTES:
+ return uint32_t(tunables.gcMaxBytes());
+ case JSGC_MIN_NURSERY_BYTES:
+ MOZ_ASSERT(tunables.gcMinNurseryBytes() < UINT32_MAX);
+ return uint32_t(tunables.gcMinNurseryBytes());
+ case JSGC_MAX_NURSERY_BYTES:
+ MOZ_ASSERT(tunables.gcMaxNurseryBytes() < UINT32_MAX);
+ return uint32_t(tunables.gcMaxNurseryBytes());
+ case JSGC_BYTES:
+ return uint32_t(heapSize.bytes());
+ case JSGC_NURSERY_BYTES:
+ return nursery().capacity();
+ case JSGC_NUMBER:
+ return uint32_t(number);
+ case JSGC_MAJOR_GC_NUMBER:
+ return uint32_t(majorGCNumber);
+ case JSGC_MINOR_GC_NUMBER:
+ return uint32_t(minorGCNumber);
+ case JSGC_INCREMENTAL_GC_ENABLED:
+ return incrementalGCEnabled;
+ case JSGC_PER_ZONE_GC_ENABLED:
+ return perZoneGCEnabled;
+ case JSGC_UNUSED_CHUNKS:
+ return uint32_t(emptyChunks(lock).count());
+ case JSGC_TOTAL_CHUNKS:
+ return uint32_t(fullChunks(lock).count() + availableChunks(lock).count() +
+ emptyChunks(lock).count());
+ case JSGC_SLICE_TIME_BUDGET_MS:
+ if (defaultTimeBudgetMS_.ref() == SliceBudget::UnlimitedTimeBudget) {
+ return 0;
+ } else {
+ MOZ_RELEASE_ASSERT(defaultTimeBudgetMS_ >= 0);
+ MOZ_RELEASE_ASSERT(defaultTimeBudgetMS_ <= UINT32_MAX);
+ return uint32_t(defaultTimeBudgetMS_);
+ }
+ case JSGC_MARK_STACK_LIMIT:
+ return marker.maxCapacity();
+ case JSGC_HIGH_FREQUENCY_TIME_LIMIT:
+ return tunables.highFrequencyThreshold().ToMilliseconds();
+ case JSGC_SMALL_HEAP_SIZE_MAX:
+ return tunables.smallHeapSizeMaxBytes() / 1024 / 1024;
+ case JSGC_LARGE_HEAP_SIZE_MIN:
+ return tunables.largeHeapSizeMinBytes() / 1024 / 1024;
+ case JSGC_HIGH_FREQUENCY_SMALL_HEAP_GROWTH:
+ return uint32_t(tunables.highFrequencySmallHeapGrowth() * 100);
+ case JSGC_HIGH_FREQUENCY_LARGE_HEAP_GROWTH:
+ return uint32_t(tunables.highFrequencyLargeHeapGrowth() * 100);
+ case JSGC_LOW_FREQUENCY_HEAP_GROWTH:
+ return uint32_t(tunables.lowFrequencyHeapGrowth() * 100);
+ case JSGC_ALLOCATION_THRESHOLD:
+ return tunables.gcZoneAllocThresholdBase() / 1024 / 1024;
+ case JSGC_SMALL_HEAP_INCREMENTAL_LIMIT:
+ return uint32_t(tunables.smallHeapIncrementalLimit() * 100);
+ case JSGC_LARGE_HEAP_INCREMENTAL_LIMIT:
+ return uint32_t(tunables.largeHeapIncrementalLimit() * 100);
+ case JSGC_MIN_EMPTY_CHUNK_COUNT:
+ return tunables.minEmptyChunkCount(lock);
+ case JSGC_MAX_EMPTY_CHUNK_COUNT:
+ return tunables.maxEmptyChunkCount();
+ case JSGC_COMPACTING_ENABLED:
+ return compactingEnabled;
+ case JSGC_INCREMENTAL_WEAKMAP_ENABLED:
+ return marker.incrementalWeakMapMarkingEnabled;
+ case JSGC_NURSERY_FREE_THRESHOLD_FOR_IDLE_COLLECTION:
+ return tunables.nurseryFreeThresholdForIdleCollection();
+ case JSGC_NURSERY_FREE_THRESHOLD_FOR_IDLE_COLLECTION_PERCENT:
+ return uint32_t(tunables.nurseryFreeThresholdForIdleCollectionFraction() *
+ 100.0f);
+ case JSGC_PRETENURE_THRESHOLD:
+ return uint32_t(tunables.pretenureThreshold() * 100);
+ case JSGC_PRETENURE_GROUP_THRESHOLD:
+ return tunables.pretenureGroupThreshold();
+ case JSGC_PRETENURE_STRING_THRESHOLD:
+ return uint32_t(tunables.pretenureStringThreshold() * 100);
+ case JSGC_STOP_PRETENURE_STRING_THRESHOLD:
+ return uint32_t(tunables.stopPretenureStringThreshold() * 100);
+ case JSGC_MIN_LAST_DITCH_GC_PERIOD:
+ return tunables.minLastDitchGCPeriod().ToSeconds();
+ case JSGC_ZONE_ALLOC_DELAY_KB:
+ return tunables.zoneAllocDelayBytes() / 1024;
+ case JSGC_MALLOC_THRESHOLD_BASE:
+ return tunables.mallocThresholdBase() / 1024 / 1024;
+ case JSGC_MALLOC_GROWTH_FACTOR:
+ return uint32_t(tunables.mallocGrowthFactor() * 100);
+ case JSGC_CHUNK_BYTES:
+ return ChunkSize;
+ case JSGC_HELPER_THREAD_RATIO:
+ MOZ_ASSERT(helperThreadRatio > 0.0);
+ return uint32_t(helperThreadRatio * 100.0);
+ case JSGC_MAX_HELPER_THREADS:
+ MOZ_ASSERT(maxHelperThreads <= UINT32_MAX);
+ return maxHelperThreads;
+ case JSGC_HELPER_THREAD_COUNT:
+ return helperThreadCount;
+ default:
+ MOZ_CRASH("Unknown parameter key");
+ }
+}
+
+void GCRuntime::setMarkStackLimit(size_t limit, AutoLockGC& lock) {
+ MOZ_ASSERT(!JS::RuntimeHeapIsBusy());
+ AutoUnlockGC unlock(lock);
+ AutoStopVerifyingBarriers pauseVerification(rt, false);
+ marker.setMaxCapacity(limit);
+}
+
+void GCRuntime::setIncrementalGCEnabled(bool enabled) {
+ incrementalGCEnabled = enabled;
+ marker.setIncrementalGCEnabled(enabled);
+}
+
+void GCRuntime::updateHelperThreadCount() {
+ if (!CanUseExtraThreads()) {
+ // startTask will run the work on the main thread if the count is 1.
+ MOZ_ASSERT(helperThreadCount == 1);
+ return;
+ }
+
+ // The count of helper threads used for GC tasks is process wide. Don't set it
+ // for worker JS runtimes.
+ if (rt->parentRuntime) {
+ helperThreadCount = rt->parentRuntime->gc.helperThreadCount;
+ return;
+ }
+
+ double cpuCount = HelperThreadState().cpuCount;
+ size_t target = size_t(cpuCount * helperThreadRatio.ref());
+ helperThreadCount = mozilla::Clamp(target, size_t(1), maxHelperThreads.ref());
+
+ HelperThreadState().ensureThreadCount(helperThreadCount);
+
+ AutoLockHelperThreadState lock;
+ HelperThreadState().setGCParallelThreadCount(helperThreadCount, lock);
+}
+
+bool GCRuntime::addBlackRootsTracer(JSTraceDataOp traceOp, void* data) {
+ AssertHeapIsIdle();
+ return !!blackRootTracers.ref().append(
+ Callback<JSTraceDataOp>(traceOp, data));
+}
+
+void GCRuntime::removeBlackRootsTracer(JSTraceDataOp traceOp, void* data) {
+ // Can be called from finalizers
+ for (size_t i = 0; i < blackRootTracers.ref().length(); i++) {
+ Callback<JSTraceDataOp>* e = &blackRootTracers.ref()[i];
+ if (e->op == traceOp && e->data == data) {
+ blackRootTracers.ref().erase(e);
+ break;
+ }
+ }
+}
+
+void GCRuntime::setGrayRootsTracer(JSTraceDataOp traceOp, void* data) {
+ AssertHeapIsIdle();
+ grayRootTracer.ref() = {traceOp, data};
+}
+
+void GCRuntime::clearBlackAndGrayRootTracers() {
+ MOZ_ASSERT(rt->isBeingDestroyed());
+ blackRootTracers.ref().clear();
+ setGrayRootsTracer(nullptr, nullptr);
+}
+
+void GCRuntime::setGCCallback(JSGCCallback callback, void* data) {
+ gcCallback.ref() = {callback, data};
+}
+
+void GCRuntime::callGCCallback(JSGCStatus status, JS::GCReason reason) const {
+ const auto& callback = gcCallback.ref();
+ MOZ_ASSERT(callback.op);
+ callback.op(rt->mainContextFromOwnThread(), status, reason, callback.data);
+}
+
+void GCRuntime::setObjectsTenuredCallback(JSObjectsTenuredCallback callback,
+ void* data) {
+ tenuredCallback.ref() = {callback, data};
+}
+
+void GCRuntime::callObjectsTenuredCallback() {
+ JS::AutoSuppressGCAnalysis nogc;
+ const auto& callback = tenuredCallback.ref();
+ if (callback.op) {
+ callback.op(rt->mainContextFromOwnThread(), callback.data);
+ }
+}
+
+bool GCRuntime::addFinalizeCallback(JSFinalizeCallback callback, void* data) {
+ return finalizeCallbacks.ref().append(
+ Callback<JSFinalizeCallback>(callback, data));
+}
+
+template <typename F>
+static void EraseCallback(CallbackVector<F>& vector, F callback) {
+ for (Callback<F>* p = vector.begin(); p != vector.end(); p++) {
+ if (p->op == callback) {
+ vector.erase(p);
+ return;
+ }
+ }
+}
+
+void GCRuntime::removeFinalizeCallback(JSFinalizeCallback callback) {
+ EraseCallback(finalizeCallbacks.ref(), callback);
+}
+
+void GCRuntime::callFinalizeCallbacks(JSFreeOp* fop,
+ JSFinalizeStatus status) const {
+ for (auto& p : finalizeCallbacks.ref()) {
+ p.op(fop, status, p.data);
+ }
+}
+
+void GCRuntime::setHostCleanupFinalizationRegistryCallback(
+ JSHostCleanupFinalizationRegistryCallback callback, void* data) {
+ hostCleanupFinalizationRegistryCallback.ref() = {callback, data};
+}
+
+void GCRuntime::callHostCleanupFinalizationRegistryCallback(
+ JSFunction* doCleanup, GlobalObject* incumbentGlobal) {
+ JS::AutoSuppressGCAnalysis nogc;
+ const auto& callback = hostCleanupFinalizationRegistryCallback.ref();
+ if (callback.op) {
+ callback.op(doCleanup, incumbentGlobal, callback.data);
+ }
+}
+
+bool GCRuntime::addWeakPointerZonesCallback(JSWeakPointerZonesCallback callback,
+ void* data) {
+ return updateWeakPointerZonesCallbacks.ref().append(
+ Callback<JSWeakPointerZonesCallback>(callback, data));
+}
+
+void GCRuntime::removeWeakPointerZonesCallback(
+ JSWeakPointerZonesCallback callback) {
+ EraseCallback(updateWeakPointerZonesCallbacks.ref(), callback);
+}
+
+void GCRuntime::callWeakPointerZonesCallbacks() const {
+ JSContext* cx = rt->mainContextFromOwnThread();
+ for (auto const& p : updateWeakPointerZonesCallbacks.ref()) {
+ p.op(cx, p.data);
+ }
+}
+
+bool GCRuntime::addWeakPointerCompartmentCallback(
+ JSWeakPointerCompartmentCallback callback, void* data) {
+ return updateWeakPointerCompartmentCallbacks.ref().append(
+ Callback<JSWeakPointerCompartmentCallback>(callback, data));
+}
+
+void GCRuntime::removeWeakPointerCompartmentCallback(
+ JSWeakPointerCompartmentCallback callback) {
+ EraseCallback(updateWeakPointerCompartmentCallbacks.ref(), callback);
+}
+
+void GCRuntime::callWeakPointerCompartmentCallbacks(
+ JS::Compartment* comp) const {
+ JSContext* cx = rt->mainContextFromOwnThread();
+ for (auto const& p : updateWeakPointerCompartmentCallbacks.ref()) {
+ p.op(cx, comp, p.data);
+ }
+}
+
+JS::GCSliceCallback GCRuntime::setSliceCallback(JS::GCSliceCallback callback) {
+ return stats().setSliceCallback(callback);
+}
+
+JS::GCNurseryCollectionCallback GCRuntime::setNurseryCollectionCallback(
+ JS::GCNurseryCollectionCallback callback) {
+ return stats().setNurseryCollectionCallback(callback);
+}
+
+JS::DoCycleCollectionCallback GCRuntime::setDoCycleCollectionCallback(
+ JS::DoCycleCollectionCallback callback) {
+ const auto prior = gcDoCycleCollectionCallback.ref();
+ gcDoCycleCollectionCallback.ref() = {callback, nullptr};
+ return prior.op;
+}
+
+void GCRuntime::callDoCycleCollectionCallback(JSContext* cx) {
+ const auto& callback = gcDoCycleCollectionCallback.ref();
+ if (callback.op) {
+ callback.op(cx);
+ }
+}
+
+bool GCRuntime::addRoot(Value* vp, const char* name) {
+ /*
+ * Sometimes Firefox will hold weak references to objects and then convert
+ * them to strong references by calling AddRoot (e.g., via PreserveWrapper,
+ * or ModifyBusyCount in workers). We need a read barrier to cover these
+ * cases.
+ */
+ MOZ_ASSERT(vp);
+ Value value = *vp;
+ if (value.isGCThing()) {
+ ValuePreWriteBarrier(value);
+ }
+
+ return rootsHash.ref().put(vp, name);
+}
+
+void GCRuntime::removeRoot(Value* vp) {
+ rootsHash.ref().remove(vp);
+ notifyRootsRemoved();
+}
+
+extern JS_FRIEND_API bool js::AddRawValueRoot(JSContext* cx, Value* vp,
+ const char* name) {
+ MOZ_ASSERT(vp);
+ MOZ_ASSERT(name);
+ bool ok = cx->runtime()->gc.addRoot(vp, name);
+ if (!ok) {
+ JS_ReportOutOfMemory(cx);
+ }
+ return ok;
+}
+
+extern JS_FRIEND_API void js::RemoveRawValueRoot(JSContext* cx, Value* vp) {
+ cx->runtime()->gc.removeRoot(vp);
+}
+
+/* Compacting GC */
+
+bool js::gc::IsCurrentlyAnimating(const TimeStamp& lastAnimationTime,
+ const TimeStamp& currentTime) {
+ // Assume that we're currently animating if js::NotifyAnimationActivity has
+ // been called in the last second.
+ static const auto oneSecond = TimeDuration::FromSeconds(1);
+ return !lastAnimationTime.IsNull() &&
+ currentTime < (lastAnimationTime + oneSecond);
+}
+
+bool GCRuntime::shouldCompact() {
+ // Compact on shrinking GC if enabled. Skip compacting in incremental GCs
+ // if we are currently animating, unless the user is inactive or we're
+ // responding to memory pressure.
+
+ if (invocationKind != GC_SHRINK || !isCompactingGCEnabled()) {
+ return false;
+ }
+
+ if (initialReason == JS::GCReason::USER_INACTIVE ||
+ initialReason == JS::GCReason::MEM_PRESSURE) {
+ return true;
+ }
+
+ return !isIncremental ||
+ !IsCurrentlyAnimating(rt->lastAnimationTime, TimeStamp::Now());
+}
+
+bool GCRuntime::isCompactingGCEnabled() const {
+ return compactingEnabled &&
+ rt->mainContextFromOwnThread()->compactingDisabledCount == 0;
+}
+
+AutoDisableCompactingGC::AutoDisableCompactingGC(JSContext* cx) : cx(cx) {
+ ++cx->compactingDisabledCount;
+ if (cx->runtime()->gc.isIncrementalGCInProgress() &&
+ cx->runtime()->gc.isCompactingGc()) {
+ FinishGC(cx);
+ }
+}
+
+AutoDisableCompactingGC::~AutoDisableCompactingGC() {
+ MOZ_ASSERT(cx->compactingDisabledCount > 0);
+ --cx->compactingDisabledCount;
+}
+
+bool GCRuntime::canRelocateZone(Zone* zone) const {
+ if (zone->isAtomsZone()) {
+ return false;
+ }
+
+ if (zone->isSelfHostingZone() && selfHostingZoneFrozen) {
+ return false;
+ }
+
+ return true;
+}
+
+#ifdef DEBUG
+void js::gc::ArenaList::dump() {
+ fprintf(stderr, "ArenaList %p:", this);
+ if (cursorp_ == &head_) {
+ fprintf(stderr, " *");
+ }
+ for (Arena* arena = head(); arena; arena = arena->next) {
+ fprintf(stderr, " %p", arena);
+ if (cursorp_ == &arena->next) {
+ fprintf(stderr, " *");
+ }
+ }
+ fprintf(stderr, "\n");
+}
+#endif
+
+Arena* ArenaList::removeRemainingArenas(Arena** arenap) {
+ // This is only ever called to remove arenas that are after the cursor, so
+ // we don't need to update it.
+#ifdef DEBUG
+ for (Arena* arena = *arenap; arena; arena = arena->next) {
+ MOZ_ASSERT(cursorp_ != &arena->next);
+ }
+#endif
+ Arena* remainingArenas = *arenap;
+ *arenap = nullptr;
+ check();
+ return remainingArenas;
+}
+
+static bool ShouldRelocateAllArenas(JS::GCReason reason) {
+ return reason == JS::GCReason::DEBUG_GC;
+}
+
+/*
+ * Choose which arenas to relocate all cells from. Return an arena cursor that
+ * can be passed to removeRemainingArenas().
+ */
+Arena** ArenaList::pickArenasToRelocate(size_t& arenaTotalOut,
+ size_t& relocTotalOut) {
+ // Relocate the greatest number of arenas such that the number of used cells
+ // in relocated arenas is less than or equal to the number of free cells in
+ // unrelocated arenas. In other words we only relocate cells we can move
+ // into existing arenas, and we choose the least full areans to relocate.
+ //
+ // This is made easier by the fact that the arena list has been sorted in
+ // descending order of number of used cells, so we will always relocate a
+ // tail of the arena list. All we need to do is find the point at which to
+ // start relocating.
+
+ check();
+
+ if (isCursorAtEnd()) {
+ return nullptr;
+ }
+
+ Arena** arenap = cursorp_; // Next arena to consider for relocation.
+ size_t previousFreeCells = 0; // Count of free cells before arenap.
+ size_t followingUsedCells = 0; // Count of used cells after arenap.
+ size_t fullArenaCount = 0; // Number of full arenas (not relocated).
+ size_t nonFullArenaCount =
+ 0; // Number of non-full arenas (considered for relocation).
+ size_t arenaIndex = 0; // Index of the next arena to consider.
+
+ for (Arena* arena = head_; arena != *cursorp_; arena = arena->next) {
+ fullArenaCount++;
+ }
+
+ for (Arena* arena = *cursorp_; arena; arena = arena->next) {
+ followingUsedCells += arena->countUsedCells();
+ nonFullArenaCount++;
+ }
+
+ mozilla::DebugOnly<size_t> lastFreeCells(0);
+ size_t cellsPerArena = Arena::thingsPerArena((*arenap)->getAllocKind());
+
+ while (*arenap) {
+ Arena* arena = *arenap;
+ if (followingUsedCells <= previousFreeCells) {
+ break;
+ }
+
+ size_t freeCells = arena->countFreeCells();
+ size_t usedCells = cellsPerArena - freeCells;
+ followingUsedCells -= usedCells;
+#ifdef DEBUG
+ MOZ_ASSERT(freeCells >= lastFreeCells);
+ lastFreeCells = freeCells;
+#endif
+ previousFreeCells += freeCells;
+ arenap = &arena->next;
+ arenaIndex++;
+ }
+
+ size_t relocCount = nonFullArenaCount - arenaIndex;
+ MOZ_ASSERT(relocCount < nonFullArenaCount);
+ MOZ_ASSERT((relocCount == 0) == (!*arenap));
+ arenaTotalOut += fullArenaCount + nonFullArenaCount;
+ relocTotalOut += relocCount;
+
+ return arenap;
+}
+
+#ifdef DEBUG
+inline bool PtrIsInRange(const void* ptr, const void* start, size_t length) {
+ return uintptr_t(ptr) - uintptr_t(start) < length;
+}
+#endif
+
+static void RelocateCell(Zone* zone, TenuredCell* src, AllocKind thingKind,
+ size_t thingSize) {
+ JS::AutoSuppressGCAnalysis nogc(TlsContext.get());
+
+ // Allocate a new cell.
+ MOZ_ASSERT(zone == src->zone());
+ TenuredCell* dst = AllocateCellInGC(zone, thingKind);
+
+ // Copy source cell contents to destination.
+ memcpy(dst, src, thingSize);
+
+ // Move any uid attached to the object.
+ src->zone()->transferUniqueId(dst, src);
+
+ if (IsObjectAllocKind(thingKind)) {
+ auto* srcObj = static_cast<JSObject*>(static_cast<Cell*>(src));
+ auto* dstObj = static_cast<JSObject*>(static_cast<Cell*>(dst));
+
+ if (srcObj->isNative()) {
+ NativeObject* srcNative = &srcObj->as<NativeObject>();
+ NativeObject* dstNative = &dstObj->as<NativeObject>();
+
+ // Fixup the pointer to inline object elements if necessary.
+ if (srcNative->hasFixedElements()) {
+ uint32_t numShifted =
+ srcNative->getElementsHeader()->numShiftedElements();
+ dstNative->setFixedElements(numShifted);
+ }
+ } else if (srcObj->is<ProxyObject>()) {
+ if (srcObj->as<ProxyObject>().usingInlineValueArray()) {
+ dstObj->as<ProxyObject>().setInlineValueArray();
+ }
+ }
+
+ // Call object moved hook if present.
+ if (JSObjectMovedOp op = srcObj->getClass()->extObjectMovedOp()) {
+ op(dstObj, srcObj);
+ }
+
+ MOZ_ASSERT_IF(
+ dstObj->isNative(),
+ !PtrIsInRange(
+ (const Value*)dstObj->as<NativeObject>().getDenseElements(), src,
+ thingSize));
+ }
+
+ // Copy the mark bits.
+ dst->copyMarkBitsFrom(src);
+
+ // Poison the source cell contents except for the forwarding flag and pointer
+ // which will be stored in the first word. We can't do this for native object
+ // with fixed elements because this would overwrite the element flags and
+ // these are needed when updating COW elements referred to by other objects.
+#ifdef DEBUG
+ JSObject* srcObj = IsObjectAllocKind(thingKind)
+ ? static_cast<JSObject*>(static_cast<Cell*>(src))
+ : nullptr;
+ if (!srcObj || !srcObj->isNative() ||
+ !srcObj->as<NativeObject>().hasFixedElements()) {
+ AlwaysPoison(reinterpret_cast<uint8_t*>(src) + sizeof(uintptr_t),
+ JS_MOVED_TENURED_PATTERN, thingSize - sizeof(uintptr_t),
+ MemCheckKind::MakeNoAccess);
+ }
+#endif
+
+ // Mark source cell as forwarded and leave a pointer to the destination.
+ RelocationOverlay::forwardCell(src, dst);
+}
+
+static void RelocateArena(Arena* arena, SliceBudget& sliceBudget) {
+ MOZ_ASSERT(arena->allocated());
+ MOZ_ASSERT(!arena->onDelayedMarkingList());
+ MOZ_ASSERT(arena->bufferedCells()->isEmpty());
+
+ Zone* zone = arena->zone;
+
+ AllocKind thingKind = arena->getAllocKind();
+ size_t thingSize = arena->getThingSize();
+
+ for (ArenaCellIterUnderGC cell(arena); !cell.done(); cell.next()) {
+ RelocateCell(zone, cell, thingKind, thingSize);
+ sliceBudget.step();
+ }
+
+#ifdef DEBUG
+ for (ArenaCellIterUnderGC cell(arena); !cell.done(); cell.next()) {
+ TenuredCell* src = cell;
+ MOZ_ASSERT(src->isForwarded());
+ TenuredCell* dest = Forwarded(src);
+ MOZ_ASSERT(src->isMarkedBlack() == dest->isMarkedBlack());
+ MOZ_ASSERT(src->isMarkedGray() == dest->isMarkedGray());
+ }
+#endif
+}
+
+#ifdef DEBUG
+static inline bool CanProtectArenas() {
+ // On some systems the page size is larger than the size of an arena so we
+ // can't change the mapping permissions per arena.
+ return SystemPageSize() <= ArenaSize;
+}
+#endif
+
+static inline bool ShouldProtectRelocatedArenas(JS::GCReason reason) {
+ // For zeal mode collections we don't release the relocated arenas
+ // immediately. Instead we protect them and keep them around until the next
+ // collection so we can catch any stray accesses to them.
+#ifdef DEBUG
+ return reason == JS::GCReason::DEBUG_GC && CanProtectArenas();
+#else
+ return false;
+#endif
+}
+
+/*
+ * Relocate all arenas identified by pickArenasToRelocate: for each arena,
+ * relocate each cell within it, then add it to a list of relocated arenas.
+ */
+Arena* ArenaList::relocateArenas(Arena* toRelocate, Arena* relocated,
+ SliceBudget& sliceBudget,
+ gcstats::Statistics& stats) {
+ check();
+
+ while (Arena* arena = toRelocate) {
+ toRelocate = arena->next;
+ RelocateArena(arena, sliceBudget);
+ // Prepend to list of relocated arenas
+ arena->next = relocated;
+ relocated = arena;
+ stats.count(gcstats::COUNT_ARENA_RELOCATED);
+ }
+
+ check();
+
+ return relocated;
+}
+
+// Skip compacting zones unless we can free a certain proportion of their GC
+// heap memory.
+static const float MIN_ZONE_RECLAIM_PERCENT = 2.0;
+
+static bool ShouldRelocateZone(size_t arenaCount, size_t relocCount,
+ JS::GCReason reason) {
+ if (relocCount == 0) {
+ return false;
+ }
+
+ if (IsOOMReason(reason)) {
+ return true;
+ }
+
+ return (relocCount * 100.0f) / arenaCount >= MIN_ZONE_RECLAIM_PERCENT;
+}
+
+static AllocKinds CompactingAllocKinds() {
+ AllocKinds result;
+ for (AllocKind kind : AllAllocKinds()) {
+ if (IsCompactingKind(kind)) {
+ result += kind;
+ }
+ }
+ return result;
+}
+
+bool ArenaLists::relocateArenas(Arena*& relocatedListOut, JS::GCReason reason,
+ SliceBudget& sliceBudget,
+ gcstats::Statistics& stats) {
+ // This is only called from the main thread while we are doing a GC, so
+ // there is no need to lock.
+ MOZ_ASSERT(CurrentThreadCanAccessRuntime(runtime()));
+ MOZ_ASSERT(runtime()->gc.isHeapCompacting());
+ MOZ_ASSERT(!runtime()->gc.isBackgroundSweeping());
+
+ // Relocate all compatible kinds
+ AllocKinds allocKindsToRelocate = CompactingAllocKinds();
+
+ // Clear all the free lists.
+ clearFreeLists();
+
+ if (ShouldRelocateAllArenas(reason)) {
+ zone_->prepareForCompacting();
+ for (auto kind : allocKindsToRelocate) {
+ ArenaList& al = arenaList(kind);
+ Arena* allArenas = al.head();
+ al.clear();
+ relocatedListOut =
+ al.relocateArenas(allArenas, relocatedListOut, sliceBudget, stats);
+ }
+ } else {
+ size_t arenaCount = 0;
+ size_t relocCount = 0;
+ AllAllocKindArray<Arena**> toRelocate;
+
+ for (auto kind : allocKindsToRelocate) {
+ toRelocate[kind] =
+ arenaList(kind).pickArenasToRelocate(arenaCount, relocCount);
+ }
+
+ if (!ShouldRelocateZone(arenaCount, relocCount, reason)) {
+ return false;
+ }
+
+ zone_->prepareForCompacting();
+ for (auto kind : allocKindsToRelocate) {
+ if (toRelocate[kind]) {
+ ArenaList& al = arenaList(kind);
+ Arena* arenas = al.removeRemainingArenas(toRelocate[kind]);
+ relocatedListOut =
+ al.relocateArenas(arenas, relocatedListOut, sliceBudget, stats);
+ }
+ }
+ }
+
+ return true;
+}
+
+bool GCRuntime::relocateArenas(Zone* zone, JS::GCReason reason,
+ Arena*& relocatedListOut,
+ SliceBudget& sliceBudget) {
+ gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::COMPACT_MOVE);
+
+ MOZ_ASSERT(!zone->isPreservingCode());
+ MOZ_ASSERT(canRelocateZone(zone));
+
+ js::CancelOffThreadIonCompile(rt, JS::Zone::Compact);
+
+ if (!zone->arenas.relocateArenas(relocatedListOut, reason, sliceBudget,
+ stats())) {
+ return false;
+ }
+
+#ifdef DEBUG
+ // Check that we did as much compaction as we should have. There
+ // should always be less than one arena's worth of free cells.
+ for (auto kind : CompactingAllocKinds()) {
+ ArenaList& al = zone->arenas.arenaList(kind);
+ size_t freeCells = 0;
+ for (Arena* arena = al.arenaAfterCursor(); arena; arena = arena->next) {
+ freeCells += arena->countFreeCells();
+ }
+ MOZ_ASSERT(freeCells < Arena::thingsPerArena(kind));
+ }
+#endif
+
+ return true;
+}
+
+template <typename T>
+inline T* MovingTracer::onEdge(T* thing) {
+ if (thing->runtimeFromAnyThread() == runtime() && IsForwarded(thing)) {
+ thing = Forwarded(thing);
+ }
+
+ return thing;
+}
+
+JSObject* MovingTracer::onObjectEdge(JSObject* obj) { return onEdge(obj); }
+Shape* MovingTracer::onShapeEdge(Shape* shape) { return onEdge(shape); }
+JSString* MovingTracer::onStringEdge(JSString* string) {
+ return onEdge(string);
+}
+js::BaseScript* MovingTracer::onScriptEdge(js::BaseScript* script) {
+ return onEdge(script);
+}
+BaseShape* MovingTracer::onBaseShapeEdge(BaseShape* base) {
+ return onEdge(base);
+}
+Scope* MovingTracer::onScopeEdge(Scope* scope) { return onEdge(scope); }
+RegExpShared* MovingTracer::onRegExpSharedEdge(RegExpShared* shared) {
+ return onEdge(shared);
+}
+BigInt* MovingTracer::onBigIntEdge(BigInt* bi) { return onEdge(bi); }
+ObjectGroup* MovingTracer::onObjectGroupEdge(ObjectGroup* group) {
+ return onEdge(group);
+}
+JS::Symbol* MovingTracer::onSymbolEdge(JS::Symbol* sym) {
+ MOZ_ASSERT(!sym->isForwarded());
+ return sym;
+}
+jit::JitCode* MovingTracer::onJitCodeEdge(jit::JitCode* jit) {
+ MOZ_ASSERT(!jit->isForwarded());
+ return jit;
+}
+
+void Zone::prepareForCompacting() {
+ JSFreeOp* fop = runtimeFromMainThread()->defaultFreeOp();
+ discardJitCode(fop);
+}
+
+void GCRuntime::sweepZoneAfterCompacting(MovingTracer* trc, Zone* zone) {
+ MOZ_ASSERT(zone->isCollecting());
+ sweepFinalizationRegistries(zone);
+ zone->weakRefMap().sweep(&storeBuffer());
+
+ {
+ zone->sweepWeakMaps();
+ for (auto* cache : zone->weakCaches()) {
+ cache->sweep(nullptr);
+ }
+ }
+
+ if (jit::JitZone* jitZone = zone->jitZone()) {
+ jitZone->traceWeak(trc);
+ }
+
+ for (RealmsInZoneIter r(zone); !r.done(); r.next()) {
+ r->traceWeakRegExps(trc);
+ r->traceWeakSavedStacks(trc);
+ r->tracekWeakVarNames(trc);
+ r->traceWeakObjects(trc);
+ r->traceWeakSelfHostingScriptSource(trc);
+ r->sweepDebugEnvironments();
+ r->traceWeakEdgesInJitRealm(trc);
+ r->traceWeakObjectRealm(trc);
+ r->traceWeakTemplateObjects(trc);
+ }
+}
+
+template <typename T>
+static inline void UpdateCellPointers(MovingTracer* trc, T* cell) {
+ // We only update unmoved GC things or the new copy of moved GC things, never
+ // the old copy. If this happened it could clear the forwarded flag which
+ // could lead to pointers to the old copy not being updated.
+ MOZ_ASSERT(!cell->isForwarded());
+
+ cell->fixupAfterMovingGC();
+ cell->traceChildren(trc);
+}
+
+template <typename T>
+static void UpdateArenaPointersTyped(MovingTracer* trc, Arena* arena) {
+ for (ArenaCellIterUnderGC cell(arena); !cell.done(); cell.next()) {
+ UpdateCellPointers(trc, cell.as<T>());
+ }
+}
+
+static bool CanUpdateKindInBackground(AllocKind kind) {
+ // We try to update as many GC things in parallel as we can, but there are
+ // kinds for which this might not be safe:
+ // - we assume JSObjects that are foreground finalized are not safe to
+ // update in parallel
+ // - updating a shape touches child shapes in fixupShapeTreeAfterMovingGC()
+ return js::gc::IsBackgroundFinalized(kind) && !IsShapeAllocKind(kind) &&
+ kind != AllocKind::BASE_SHAPE;
+}
+
+/*
+ * Update the internal pointers for all cells in an arena.
+ */
+static void UpdateArenaPointers(MovingTracer* trc, Arena* arena) {
+ AllocKind kind = arena->getAllocKind();
+
+ MOZ_ASSERT_IF(!CanUpdateKindInBackground(kind),
+ CurrentThreadCanAccessRuntime(trc->runtime()));
+
+ switch (kind) {
+#define EXPAND_CASE(allocKind, traceKind, type, sizedType, bgFinal, nursery, \
+ compact) \
+ case AllocKind::allocKind: \
+ UpdateArenaPointersTyped<type>(trc, arena); \
+ return;
+ FOR_EACH_ALLOCKIND(EXPAND_CASE)
+#undef EXPAND_CASE
+
+ default:
+ MOZ_CRASH("Invalid alloc kind for UpdateArenaPointers");
+ }
+}
+
+struct ArenaListSegment {
+ Arena* begin;
+ Arena* end;
+};
+
+/*
+ * Update the internal pointers for all arenas in a segment of an arena list.
+ *
+ * Returns the number of steps to count against the slice budget.
+ */
+static size_t UpdateArenaListSegmentPointers(GCRuntime* gc,
+ const ArenaListSegment& arenas) {
+ MOZ_ASSERT(arenas.begin);
+ MovingTracer trc(gc->rt);
+ size_t count = 0;
+ for (Arena* arena = arenas.begin; arena != arenas.end; arena = arena->next) {
+ UpdateArenaPointers(&trc, arena);
+ count++;
+ }
+ return count * 256;
+}
+
+class ArenasToUpdate {
+ // Maximum number of arenas to update in one block.
+#ifdef DEBUG
+ static const unsigned MaxArenasToProcess = 16;
+#else
+ static const unsigned MaxArenasToProcess = 256;
+#endif
+
+ public:
+ explicit ArenasToUpdate(Zone* zone);
+ ArenasToUpdate(Zone* zone, const AllocKinds& kinds);
+
+ bool done() const { return !segmentBegin; }
+
+ ArenaListSegment get() const {
+ MOZ_ASSERT(!done());
+ return {segmentBegin, segmentEnd};
+ }
+
+ void next();
+
+ private:
+ Maybe<AllocKinds> kinds; // Selects which thing kinds to update.
+ Zone* zone; // Zone to process.
+ AllocKind kind = AllocKind::FIRST; // Current alloc kind to process.
+ Arena* segmentBegin = nullptr;
+ Arena* segmentEnd = nullptr;
+
+ static AllocKind nextAllocKind(AllocKind i) {
+ return AllocKind(uint8_t(i) + 1);
+ }
+
+ void settle();
+ void findSegmentEnd();
+};
+
+ArenasToUpdate::ArenasToUpdate(Zone* zone) : zone(zone) { settle(); }
+
+ArenasToUpdate::ArenasToUpdate(Zone* zone, const AllocKinds& kinds)
+ : kinds(Some(kinds)), zone(zone) {
+ settle();
+}
+
+void ArenasToUpdate::settle() {
+ // Called when we have set |kind| to a new kind. Sets |arena| to the next
+ // arena or null if there are no more arenas to update.
+
+ MOZ_ASSERT(!segmentBegin);
+
+ for (; kind < AllocKind::LIMIT; kind = nextAllocKind(kind)) {
+ if (kinds && !kinds.ref().contains(kind)) {
+ continue;
+ }
+
+ Arena* arena = zone->arenas.getFirstArena(kind);
+ if (arena) {
+ segmentBegin = arena;
+ findSegmentEnd();
+ break;
+ }
+ }
+}
+
+void ArenasToUpdate::findSegmentEnd() {
+ // Take up to MaxArenasToProcess arenas from the list starting at
+ // |segmentBegin| and set |segmentEnd|.
+ Arena* arena = segmentBegin;
+ for (size_t i = 0; arena && i < MaxArenasToProcess; i++) {
+ arena = arena->next;
+ }
+ segmentEnd = arena;
+}
+
+void ArenasToUpdate::next() {
+ MOZ_ASSERT(!done());
+
+ segmentBegin = segmentEnd;
+ if (segmentBegin) {
+ findSegmentEnd();
+ return;
+ }
+
+ kind = nextAllocKind(kind);
+ settle();
+}
+
+static AllocKinds ForegroundUpdateKinds(AllocKinds kinds) {
+ AllocKinds result;
+ for (AllocKind kind : kinds) {
+ if (!CanUpdateKindInBackground(kind)) {
+ result += kind;
+ }
+ }
+ return result;
+}
+
+void GCRuntime::updateTypeDescrObjects(MovingTracer* trc, Zone* zone) {
+ // We need to update each type descriptor object and any objects stored in
+ // its reserved slots, since some of these contain array objects that also
+ // need to be updated. Do not update any non-reserved slots, since they might
+ // point back to unprocessed descriptor objects.
+
+ zone->typeDescrObjects().sweep(nullptr);
+
+ for (auto r = zone->typeDescrObjects().all(); !r.empty(); r.popFront()) {
+ MOZ_ASSERT(MaybeForwardedObjectClass(r.front())->isNative());
+ NativeObject* obj = static_cast<NativeObject*>(r.front());
+ UpdateCellPointers(trc, obj);
+ MOZ_ASSERT(JSCLASS_RESERVED_SLOTS(MaybeForwardedObjectClass(obj)) ==
+ TypeDescr::SlotCount);
+ for (size_t i = 0; i < TypeDescr::SlotCount; i++) {
+ Value value = obj->getSlot(i);
+ if (value.isObject()) {
+ UpdateCellPointers(trc, &value.toObject());
+ }
+ }
+ }
+}
+
+void GCRuntime::updateCellPointers(Zone* zone, AllocKinds kinds) {
+ AllocKinds fgKinds = ForegroundUpdateKinds(kinds);
+ AllocKinds bgKinds = kinds - fgKinds;
+
+ ArenasToUpdate fgArenas(zone, fgKinds);
+ ArenasToUpdate bgArenas(zone, bgKinds);
+
+ AutoLockHelperThreadState lock;
+
+ AutoRunParallelWork bgTasks(this, UpdateArenaListSegmentPointers,
+ gcstats::PhaseKind::COMPACT_UPDATE_CELLS,
+ bgArenas, SliceBudget::unlimited(), lock);
+
+ AutoUnlockHelperThreadState unlock(lock);
+
+ for (; !fgArenas.done(); fgArenas.next()) {
+ UpdateArenaListSegmentPointers(this, fgArenas.get());
+ }
+}
+
+// After cells have been relocated any pointers to a cell's old locations must
+// be updated to point to the new location. This happens by iterating through
+// all cells in heap and tracing their children (non-recursively) to update
+// them.
+//
+// This is complicated by the fact that updating a GC thing sometimes depends on
+// making use of other GC things. After a moving GC these things may not be in
+// a valid state since they may contain pointers which have not been updated
+// yet.
+//
+// The main dependencies are:
+//
+// - Updating a JSObject makes use of its shape
+// - Updating a typed object makes use of its type descriptor object
+//
+// This means we require at least three phases for update:
+//
+// 1) shapes
+// 2) typed object type descriptor objects
+// 3) all other objects
+//
+// Also, there can be data races calling IsForwarded() on the new location of a
+// cell whose first word is being updated in parallel on another thread. This
+// easiest way to avoid this is to not store a GC pointer in the first word of a
+// cell. Otherwise this can be avoided by updating different kinds of cell in
+// different phases.
+//
+// Since we want to minimize the number of phases, arrange kinds into three
+// arbitrary phases.
+
+static constexpr AllocKinds UpdatePhaseOne{
+ AllocKind::SCRIPT, AllocKind::BASE_SHAPE, AllocKind::SHAPE,
+ AllocKind::ACCESSOR_SHAPE, AllocKind::OBJECT_GROUP, AllocKind::STRING,
+ AllocKind::JITCODE, AllocKind::REGEXP_SHARED, AllocKind::SCOPE};
+
+// UpdatePhaseTwo is typed object descriptor objects.
+
+static constexpr AllocKinds UpdatePhaseThree{AllocKind::FUNCTION,
+ AllocKind::FUNCTION_EXTENDED,
+ AllocKind::OBJECT0,
+ AllocKind::OBJECT0_BACKGROUND,
+ AllocKind::OBJECT2,
+ AllocKind::OBJECT2_BACKGROUND,
+ AllocKind::ARRAYBUFFER4,
+ AllocKind::OBJECT4,
+ AllocKind::OBJECT4_BACKGROUND,
+ AllocKind::ARRAYBUFFER8,
+ AllocKind::OBJECT8,
+ AllocKind::OBJECT8_BACKGROUND,
+ AllocKind::ARRAYBUFFER12,
+ AllocKind::OBJECT12,
+ AllocKind::OBJECT12_BACKGROUND,
+ AllocKind::ARRAYBUFFER16,
+ AllocKind::OBJECT16,
+ AllocKind::OBJECT16_BACKGROUND};
+
+void GCRuntime::updateAllCellPointers(MovingTracer* trc, Zone* zone) {
+ updateCellPointers(zone, UpdatePhaseOne);
+
+ // UpdatePhaseTwo: Update TypeDescrs before all other objects as typed
+ // objects access these objects when we trace them.
+ updateTypeDescrObjects(trc, zone);
+
+ updateCellPointers(zone, UpdatePhaseThree);
+}
+
+/*
+ * Update pointers to relocated cells in a single zone by doing a traversal of
+ * that zone's arenas and calling per-zone sweep hooks.
+ *
+ * The latter is necessary to update weak references which are not marked as
+ * part of the traversal.
+ */
+void GCRuntime::updateZonePointersToRelocatedCells(Zone* zone) {
+ MOZ_ASSERT(!rt->isBeingDestroyed());
+ MOZ_ASSERT(zone->isGCCompacting());
+
+ AutoTouchingGrayThings tgt;
+
+ gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::COMPACT_UPDATE);
+ MovingTracer trc(rt);
+
+ zone->fixupAfterMovingGC();
+ zone->fixupScriptMapsAfterMovingGC(&trc);
+
+ // Fixup compartment global pointers as these get accessed during marking.
+ for (CompartmentsInZoneIter comp(zone); !comp.done(); comp.next()) {
+ comp->fixupAfterMovingGC(&trc);
+ }
+
+ zone->externalStringCache().purge();
+ zone->functionToStringCache().purge();
+ rt->caches().stringToAtomCache.purge();
+
+ // Iterate through all cells that can contain relocatable pointers to update
+ // them. Since updating each cell is independent we try to parallelize this
+ // as much as possible.
+ updateAllCellPointers(&trc, zone);
+
+ // Mark roots to update them.
+ {
+ gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::MARK_ROOTS);
+
+ WeakMapBase::traceZone(zone, &trc);
+ }
+
+ // Sweep everything to fix up weak pointers.
+ sweepZoneAfterCompacting(&trc, zone);
+
+ // Call callbacks to get the rest of the system to fixup other untraced
+ // pointers.
+ for (CompartmentsInZoneIter comp(zone); !comp.done(); comp.next()) {
+ callWeakPointerCompartmentCallbacks(comp);
+ }
+}
+
+/*
+ * Update runtime-wide pointers to relocated cells.
+ */
+void GCRuntime::updateRuntimePointersToRelocatedCells(AutoGCSession& session) {
+ MOZ_ASSERT(!rt->isBeingDestroyed());
+
+ gcstats::AutoPhase ap1(stats(), gcstats::PhaseKind::COMPACT_UPDATE);
+ MovingTracer trc(rt);
+
+ Zone::fixupAllCrossCompartmentWrappersAfterMovingGC(&trc);
+
+ rt->geckoProfiler().fixupStringsMapAfterMovingGC();
+
+ // Mark roots to update them.
+
+ traceRuntimeForMajorGC(&trc, session);
+
+ {
+ gcstats::AutoPhase ap2(stats(), gcstats::PhaseKind::MARK_ROOTS);
+ DebugAPI::traceAllForMovingGC(&trc);
+ DebugAPI::traceCrossCompartmentEdges(&trc);
+
+ // Mark all gray roots. We call the trace callback to get the current set.
+ traceEmbeddingGrayRoots(&trc);
+ Compartment::traceIncomingCrossCompartmentEdgesForZoneGC(
+ &trc, Compartment::GrayEdges);
+ }
+
+ // Sweep everything to fix up weak pointers.
+ DebugAPI::sweepAll(rt->defaultFreeOp());
+ jit::JitRuntime::TraceWeakJitcodeGlobalTable(rt, &trc);
+ for (JS::detail::WeakCacheBase* cache : rt->weakCaches()) {
+ cache->sweep(nullptr);
+ }
+
+ // Type inference may put more blocks here to free.
+ {
+ AutoLockHelperThreadState lock;
+ lifoBlocksToFree.ref().freeAll();
+ }
+
+ // Call callbacks to get the rest of the system to fixup other untraced
+ // pointers.
+ callWeakPointerZonesCallbacks();
+}
+
+void GCRuntime::clearRelocatedArenas(Arena* arenaList, JS::GCReason reason) {
+ AutoLockGC lock(this);
+ clearRelocatedArenasWithoutUnlocking(arenaList, reason, lock);
+}
+
+void GCRuntime::clearRelocatedArenasWithoutUnlocking(Arena* arenaList,
+ JS::GCReason reason,
+ const AutoLockGC& lock) {
+ // Clear the relocated arenas, now containing only forwarding pointers
+ while (arenaList) {
+ Arena* arena = arenaList;
+ arenaList = arenaList->next;
+
+ // Clear the mark bits
+ arena->unmarkAll();
+
+ // Mark arena as empty
+ arena->setAsFullyUnused();
+
+#ifdef DEBUG
+ // The cell contents have been partially marked no access in RelocateCell,
+ // so we need to mark the region as undefined again so we can poison it.
+ SetMemCheckKind(reinterpret_cast<void*>(arena->thingsStart()),
+ arena->getThingsSpan(), MemCheckKind::MakeUndefined);
+#endif
+
+ AlwaysPoison(reinterpret_cast<void*>(arena->thingsStart()),
+ JS_MOVED_TENURED_PATTERN, arena->getThingsSpan(),
+ MemCheckKind::MakeNoAccess);
+
+ // Don't count arenas as being freed by the GC if we purposely moved
+ // everything to new arenas, as that will already have allocated a similar
+ // number of arenas. This only happens for collections triggered by GC zeal.
+ bool allArenasRelocated = ShouldRelocateAllArenas(reason);
+ arena->zone->gcHeapSize.removeBytes(ArenaSize, !allArenasRelocated);
+
+ // Release the arena but don't return it to the chunk yet.
+ arena->release(lock);
+ }
+}
+
+void GCRuntime::protectAndHoldArenas(Arena* arenaList) {
+ for (Arena* arena = arenaList; arena;) {
+ MOZ_ASSERT(!arena->allocated());
+ Arena* next = arena->next;
+ if (!next) {
+ // Prepend to hold list before we protect the memory.
+ arena->next = relocatedArenasToRelease;
+ relocatedArenasToRelease = arenaList;
+ }
+ ProtectPages(arena, ArenaSize);
+ arena = next;
+ }
+}
+
+void GCRuntime::unprotectHeldRelocatedArenas() {
+ for (Arena* arena = relocatedArenasToRelease; arena; arena = arena->next) {
+ UnprotectPages(arena, ArenaSize);
+ MOZ_ASSERT(!arena->allocated());
+ }
+}
+
+void GCRuntime::releaseRelocatedArenas(Arena* arenaList) {
+ AutoLockGC lock(this);
+ releaseRelocatedArenasWithoutUnlocking(arenaList, lock);
+}
+
+void GCRuntime::releaseRelocatedArenasWithoutUnlocking(Arena* arenaList,
+ const AutoLockGC& lock) {
+ // Release relocated arenas previously cleared with clearRelocatedArenas().
+ while (arenaList) {
+ Arena* arena = arenaList;
+ arenaList = arenaList->next;
+
+ // We already updated the memory accounting so just call
+ // Chunk::releaseArena.
+ arena->chunk()->releaseArena(this, arena, lock);
+ }
+}
+
+// In debug mode we don't always release relocated arenas straight away.
+// Sometimes protect them instead and hold onto them until the next GC sweep
+// phase to catch any pointers to them that didn't get forwarded.
+
+void GCRuntime::releaseHeldRelocatedArenas() {
+#ifdef DEBUG
+ unprotectHeldRelocatedArenas();
+ Arena* arenas = relocatedArenasToRelease;
+ relocatedArenasToRelease = nullptr;
+ releaseRelocatedArenas(arenas);
+#endif
+}
+
+void GCRuntime::releaseHeldRelocatedArenasWithoutUnlocking(
+ const AutoLockGC& lock) {
+#ifdef DEBUG
+ unprotectHeldRelocatedArenas();
+ releaseRelocatedArenasWithoutUnlocking(relocatedArenasToRelease, lock);
+ relocatedArenasToRelease = nullptr;
+#endif
+}
+
+FreeLists::FreeLists() {
+ for (auto i : AllAllocKinds()) {
+ freeLists_[i] = &emptySentinel;
+ }
+}
+
+ArenaLists::ArenaLists(Zone* zone)
+ : zone_(zone),
+ freeLists_(zone),
+ arenaLists_(zone),
+ newArenasInMarkPhase_(zone),
+ arenasToSweep_(),
+ incrementalSweptArenaKind(zone, AllocKind::LIMIT),
+ incrementalSweptArenas(zone),
+ gcShapeArenasToUpdate(zone, nullptr),
+ gcAccessorShapeArenasToUpdate(zone, nullptr),
+ savedEmptyArenas(zone, nullptr) {
+ for (auto i : AllAllocKinds()) {
+ concurrentUse(i) = ConcurrentUse::None;
+ arenasToSweep(i) = nullptr;
+ }
+}
+
+void ReleaseArenas(JSRuntime* rt, Arena* arena, const AutoLockGC& lock) {
+ Arena* next;
+ for (; arena; arena = next) {
+ next = arena->next;
+ rt->gc.releaseArena(arena, lock);
+ }
+}
+
+void ReleaseArenaList(JSRuntime* rt, ArenaList& arenaList,
+ const AutoLockGC& lock) {
+ ReleaseArenas(rt, arenaList.head(), lock);
+ arenaList.clear();
+}
+
+ArenaLists::~ArenaLists() {
+ AutoLockGC lock(runtime());
+
+ for (auto i : AllAllocKinds()) {
+ /*
+ * We can only call this during the shutdown after the last GC when
+ * the background finalization is disabled.
+ */
+ MOZ_ASSERT(concurrentUse(i) == ConcurrentUse::None);
+ ReleaseArenaList(runtime(), arenaList(i), lock);
+ }
+ ReleaseArenaList(runtime(), incrementalSweptArenas.ref(), lock);
+
+ ReleaseArenas(runtime(), savedEmptyArenas, lock);
+}
+
+void ArenaLists::queueForForegroundSweep(JSFreeOp* fop,
+ const FinalizePhase& phase) {
+ gcstats::AutoPhase ap(fop->runtime()->gc.stats(), phase.statsPhase);
+ for (auto kind : phase.kinds) {
+ queueForForegroundSweep(kind);
+ }
+}
+
+void ArenaLists::queueForForegroundSweep(AllocKind thingKind) {
+ MOZ_ASSERT(!IsBackgroundFinalized(thingKind));
+ MOZ_ASSERT(concurrentUse(thingKind) == ConcurrentUse::None);
+ MOZ_ASSERT(!arenasToSweep(thingKind));
+
+ arenasToSweep(thingKind) = arenaList(thingKind).head();
+ arenaList(thingKind).clear();
+}
+
+void ArenaLists::queueForBackgroundSweep(JSFreeOp* fop,
+ const FinalizePhase& phase) {
+ gcstats::AutoPhase ap(fop->runtime()->gc.stats(), phase.statsPhase);
+ for (auto kind : phase.kinds) {
+ queueForBackgroundSweep(kind);
+ }
+}
+
+inline void ArenaLists::queueForBackgroundSweep(AllocKind thingKind) {
+ MOZ_ASSERT(IsBackgroundFinalized(thingKind));
+ MOZ_ASSERT(concurrentUse(thingKind) == ConcurrentUse::None);
+
+ ArenaList* al = &arenaList(thingKind);
+ arenasToSweep(thingKind) = al->head();
+ arenaList(thingKind).clear();
+
+ if (arenasToSweep(thingKind)) {
+ concurrentUse(thingKind) = ConcurrentUse::BackgroundFinalize;
+ } else {
+ arenaList(thingKind) = std::move(newArenasInMarkPhase(thingKind));
+ }
+}
+
+/*static*/
+void ArenaLists::backgroundFinalize(JSFreeOp* fop, Arena* listHead,
+ Arena** empty) {
+ MOZ_ASSERT(listHead);
+ MOZ_ASSERT(empty);
+
+ AllocKind thingKind = listHead->getAllocKind();
+ Zone* zone = listHead->zone;
+
+ size_t thingsPerArena = Arena::thingsPerArena(thingKind);
+ SortedArenaList finalizedSorted(thingsPerArena);
+
+ auto unlimited = SliceBudget::unlimited();
+ FinalizeArenas(fop, &listHead, finalizedSorted, thingKind, unlimited);
+ MOZ_ASSERT(!listHead);
+
+ finalizedSorted.extractEmpty(empty);
+
+ // When arenas are queued for background finalization, all arenas are moved to
+ // arenasToSweep, leaving the arena list empty. However, new arenas may be
+ // allocated before background finalization finishes; now that finalization is
+ // complete, we want to merge these lists back together.
+ ArenaLists* lists = &zone->arenas;
+ ArenaList& al = lists->arenaList(thingKind);
+
+ // Flatten |finalizedSorted| into a regular ArenaList.
+ ArenaList finalized = finalizedSorted.toArenaList();
+
+ // We must take the GC lock to be able to safely modify the ArenaList;
+ // however, this does not by itself make the changes visible to all threads,
+ // as not all threads take the GC lock to read the ArenaLists.
+ // That safety is provided by the ReleaseAcquire memory ordering of the
+ // background finalize state, which we explicitly set as the final step.
+ {
+ AutoLockGC lock(lists->runtimeFromAnyThread());
+ MOZ_ASSERT(lists->concurrentUse(thingKind) ==
+ ConcurrentUse::BackgroundFinalize);
+
+ // Join |al| and |finalized| into a single list.
+ ArenaList allocatedDuringSweep = std::move(al);
+ al = std::move(finalized);
+ al.insertListWithCursorAtEnd(lists->newArenasInMarkPhase(thingKind));
+ al.insertListWithCursorAtEnd(allocatedDuringSweep);
+
+ lists->newArenasInMarkPhase(thingKind).clear();
+ lists->arenasToSweep(thingKind) = nullptr;
+ }
+
+ lists->concurrentUse(thingKind) = ConcurrentUse::None;
+}
+
+Arena* ArenaLists::takeSweptEmptyArenas() {
+ Arena* arenas = savedEmptyArenas;
+ savedEmptyArenas = nullptr;
+ return arenas;
+}
+
+void ArenaLists::queueForegroundThingsForSweep() {
+ gcShapeArenasToUpdate = arenasToSweep(AllocKind::SHAPE);
+ gcAccessorShapeArenasToUpdate = arenasToSweep(AllocKind::ACCESSOR_SHAPE);
+}
+
+void ArenaLists::checkGCStateNotInUse() {
+ // Called before and after collection to check the state is as expected.
+#ifdef DEBUG
+ checkSweepStateNotInUse();
+ for (auto i : AllAllocKinds()) {
+ MOZ_ASSERT(newArenasInMarkPhase(i).isEmpty());
+ }
+#endif
+}
+
+void ArenaLists::checkSweepStateNotInUse() {
+#ifdef DEBUG
+ checkNoArenasToUpdate();
+ MOZ_ASSERT(incrementalSweptArenaKind == AllocKind::LIMIT);
+ MOZ_ASSERT(incrementalSweptArenas.ref().isEmpty());
+ MOZ_ASSERT(!savedEmptyArenas);
+ for (auto i : AllAllocKinds()) {
+ MOZ_ASSERT(concurrentUse(i) == ConcurrentUse::None);
+ MOZ_ASSERT(!arenasToSweep(i));
+ }
+#endif
+}
+
+void ArenaLists::checkNoArenasToUpdate() {
+ MOZ_ASSERT(!gcShapeArenasToUpdate);
+ MOZ_ASSERT(!gcAccessorShapeArenasToUpdate);
+}
+
+void ArenaLists::checkNoArenasToUpdateForKind(AllocKind kind) {
+#ifdef DEBUG
+ switch (kind) {
+ case AllocKind::SHAPE:
+ MOZ_ASSERT(!gcShapeArenasToUpdate);
+ break;
+ case AllocKind::ACCESSOR_SHAPE:
+ MOZ_ASSERT(!gcShapeArenasToUpdate);
+ break;
+ default:
+ break;
+ }
+#endif
+}
+
+TimeStamp SliceBudget::unlimitedDeadline;
+
+void SliceBudget::Init() {
+ MOZ_ASSERT(!unlimitedDeadline);
+ uint64_t oneYearsInSeconds = 365 * 24 * 60 * 60;
+ unlimitedDeadline =
+ ReallyNow() + TimeDuration::FromSeconds(100 * oneYearsInSeconds);
+}
+
+SliceBudget::SliceBudget()
+ : timeBudget(UnlimitedTimeBudget), workBudget(UnlimitedWorkBudget) {
+ makeUnlimited();
+}
+
+SliceBudget::SliceBudget(TimeBudget time)
+ : timeBudget(time), workBudget(UnlimitedWorkBudget) {
+ if (time.budget < 0) {
+ makeUnlimited();
+ } else {
+ // Note: TimeBudget(0) is equivalent to WorkBudget(CounterReset).
+ deadline = ReallyNow() + TimeDuration::FromMilliseconds(time.budget);
+ counter = CounterReset;
+ }
+}
+
+SliceBudget::SliceBudget(WorkBudget work)
+ : timeBudget(UnlimitedTimeBudget), workBudget(work) {
+ if (work.budget < 0) {
+ makeUnlimited();
+ } else {
+ deadline = TimeStamp();
+ counter = work.budget;
+ }
+}
+
+int SliceBudget::describe(char* buffer, size_t maxlen) const {
+ if (isUnlimited()) {
+ return snprintf(buffer, maxlen, "unlimited");
+ } else if (isWorkBudget()) {
+ return snprintf(buffer, maxlen, "work(%" PRId64 ")", workBudget.budget);
+ } else {
+ return snprintf(buffer, maxlen, "%" PRId64 "ms", timeBudget.budget);
+ }
+}
+
+bool SliceBudget::checkOverBudget() {
+ if (deadline.IsNull()) {
+ return true;
+ }
+
+ bool over = ReallyNow() >= deadline;
+ if (!over) {
+ counter = CounterReset;
+ }
+ return over;
+}
+
+void GCRuntime::requestMajorGC(JS::GCReason reason) {
+ MOZ_ASSERT_IF(reason != JS::GCReason::BG_TASK_FINISHED,
+ !CurrentThreadIsPerformingGC());
+
+ if (majorGCRequested()) {
+ return;
+ }
+
+ majorGCTriggerReason = reason;
+ rt->mainContextFromAnyThread()->requestInterrupt(InterruptReason::GC);
+}
+
+void Nursery::requestMinorGC(JS::GCReason reason) const {
+ MOZ_ASSERT(CurrentThreadCanAccessRuntime(runtime()));
+
+ if (minorGCRequested()) {
+ return;
+ }
+
+ minorGCTriggerReason_ = reason;
+ runtime()->mainContextFromOwnThread()->requestInterrupt(InterruptReason::GC);
+}
+
+bool GCRuntime::triggerGC(JS::GCReason reason) {
+ /*
+ * Don't trigger GCs if this is being called off the main thread from
+ * onTooMuchMalloc().
+ */
+ if (!CurrentThreadCanAccessRuntime(rt)) {
+ return false;
+ }
+
+ /* GC is already running. */
+ if (JS::RuntimeHeapIsCollecting()) {
+ return false;
+ }
+
+ JS::PrepareForFullGC(rt->mainContextFromOwnThread());
+ requestMajorGC(reason);
+ return true;
+}
+
+void GCRuntime::maybeTriggerGCAfterAlloc(Zone* zone) {
+ if (!CurrentThreadCanAccessRuntime(rt)) {
+ // Zones in use by a helper thread can't be collected.
+ MOZ_ASSERT(zone->usedByHelperThread() || zone->isAtomsZone());
+ return;
+ }
+
+ MOZ_ASSERT(!JS::RuntimeHeapIsCollecting());
+
+ TriggerResult trigger =
+ checkHeapThreshold(zone, zone->gcHeapSize, zone->gcHeapThreshold);
+
+ if (trigger.shouldTrigger) {
+ // Start or continue an in progress incremental GC. We do this to try to
+ // avoid performing non-incremental GCs on zones which allocate a lot of
+ // data, even when incremental slices can't be triggered via scheduling in
+ // the event loop.
+ triggerZoneGC(zone, JS::GCReason::ALLOC_TRIGGER, trigger.usedBytes,
+ trigger.thresholdBytes);
+ }
+}
+
+void js::gc::MaybeMallocTriggerZoneGC(JSRuntime* rt, ZoneAllocator* zoneAlloc,
+ const HeapSize& heap,
+ const HeapThreshold& threshold,
+ JS::GCReason reason) {
+ rt->gc.maybeTriggerGCAfterMalloc(Zone::from(zoneAlloc), heap, threshold,
+ reason);
+}
+
+void GCRuntime::maybeTriggerGCAfterMalloc(Zone* zone) {
+ if (maybeTriggerGCAfterMalloc(zone, zone->mallocHeapSize,
+ zone->mallocHeapThreshold,
+ JS::GCReason::TOO_MUCH_MALLOC)) {
+ return;
+ }
+
+ maybeTriggerGCAfterMalloc(zone, zone->jitHeapSize, zone->jitHeapThreshold,
+ JS::GCReason::TOO_MUCH_JIT_CODE);
+}
+
+bool GCRuntime::maybeTriggerGCAfterMalloc(Zone* zone, const HeapSize& heap,
+ const HeapThreshold& threshold,
+ JS::GCReason reason) {
+ if (!CurrentThreadCanAccessRuntime(rt)) {
+ // Zones in use by a helper thread can't be collected. Also ignore malloc
+ // during sweeping, for example when we resize hash tables.
+ MOZ_ASSERT(zone->usedByHelperThread() || zone->isAtomsZone() ||
+ JS::RuntimeHeapIsBusy());
+ return false;
+ }
+
+ if (rt->heapState() != JS::HeapState::Idle) {
+ return false;
+ }
+
+ TriggerResult trigger = checkHeapThreshold(zone, heap, threshold);
+ if (!trigger.shouldTrigger) {
+ return false;
+ }
+
+ // Trigger a zone GC. budgetIncrementalGC() will work out whether to do an
+ // incremental or non-incremental collection.
+ triggerZoneGC(zone, reason, trigger.usedBytes, trigger.thresholdBytes);
+ return true;
+}
+
+TriggerResult GCRuntime::checkHeapThreshold(
+ Zone* zone, const HeapSize& heapSize, const HeapThreshold& heapThreshold) {
+ MOZ_ASSERT_IF(heapThreshold.hasSliceThreshold(), zone->wasGCStarted());
+
+ size_t usedBytes = heapSize.bytes();
+ size_t thresholdBytes = zone->gcState() > Zone::Prepare
+ ? heapThreshold.sliceBytes()
+ : heapThreshold.startBytes();
+ size_t niThreshold = heapThreshold.incrementalLimitBytes();
+ MOZ_ASSERT(niThreshold >= thresholdBytes);
+
+ if (usedBytes < thresholdBytes) {
+ return TriggerResult{false, 0, 0};
+ }
+
+ // Don't trigger incremental slices during background sweeping or decommit, as
+ // these will have no effect. A slice will be triggered automatically when
+ // these tasks finish.
+ if (usedBytes < niThreshold && zone->wasGCStarted() &&
+ (state() == State::Finalize || state() == State::Decommit)) {
+ return TriggerResult{false, 0, 0};
+ }
+
+ // Start or continue an in progress incremental GC.
+ return TriggerResult{true, usedBytes, thresholdBytes};
+}
+
+bool GCRuntime::triggerZoneGC(Zone* zone, JS::GCReason reason, size_t used,
+ size_t threshold) {
+ MOZ_ASSERT(CurrentThreadCanAccessRuntime(rt));
+
+ /* GC is already running. */
+ if (JS::RuntimeHeapIsBusy()) {
+ return false;
+ }
+
+#ifdef JS_GC_ZEAL
+ if (hasZealMode(ZealMode::Alloc)) {
+ MOZ_RELEASE_ASSERT(triggerGC(reason));
+ return true;
+ }
+#endif
+
+ if (zone->isAtomsZone()) {
+ /* We can't do a zone GC of just the atoms zone. */
+ if (rt->hasHelperThreadZones()) {
+ /* We can't collect atoms while off-thread parsing is allocating. */
+ fullGCForAtomsRequested_ = true;
+ return false;
+ }
+ stats().recordTrigger(used, threshold);
+ MOZ_RELEASE_ASSERT(triggerGC(reason));
+ return true;
+ }
+
+ stats().recordTrigger(used, threshold);
+ zone->scheduleGC();
+ requestMajorGC(reason);
+ return true;
+}
+
+void GCRuntime::maybeGC() {
+ MOZ_ASSERT(CurrentThreadCanAccessRuntime(rt));
+
+#ifdef JS_GC_ZEAL
+ if (hasZealMode(ZealMode::Alloc) || hasZealMode(ZealMode::RootsChange)) {
+ JS::PrepareForFullGC(rt->mainContextFromOwnThread());
+ gc(GC_NORMAL, JS::GCReason::DEBUG_GC);
+ return;
+ }
+#endif
+
+ if (gcIfRequested()) {
+ return;
+ }
+
+ if (isIncrementalGCInProgress()) {
+ return;
+ }
+
+ bool scheduledZones = false;
+ for (ZonesIter zone(this, WithAtoms); !zone.done(); zone.next()) {
+ if (checkEagerAllocTrigger(zone->gcHeapSize, zone->gcHeapThreshold) ||
+ checkEagerAllocTrigger(zone->mallocHeapSize,
+ zone->mallocHeapThreshold)) {
+ zone->scheduleGC();
+ scheduledZones = true;
+ }
+ }
+
+ if (scheduledZones) {
+ startGC(GC_NORMAL, JS::GCReason::EAGER_ALLOC_TRIGGER);
+ }
+}
+
+bool GCRuntime::checkEagerAllocTrigger(const HeapSize& size,
+ const HeapThreshold& threshold) {
+ double thresholdBytes =
+ threshold.eagerAllocTrigger(schedulingState.inHighFrequencyGCMode());
+ double usedBytes = size.bytes();
+ if (usedBytes <= 1024 * 1024 || usedBytes < thresholdBytes) {
+ return false;
+ }
+
+ stats().recordTrigger(usedBytes, thresholdBytes);
+ return true;
+}
+
+void GCRuntime::triggerFullGCForAtoms(JSContext* cx) {
+ MOZ_ASSERT(fullGCForAtomsRequested_);
+ MOZ_ASSERT(CurrentThreadCanAccessRuntime(rt));
+ MOZ_ASSERT(!JS::RuntimeHeapIsCollecting());
+ MOZ_ASSERT(cx->canCollectAtoms());
+ fullGCForAtomsRequested_ = false;
+ MOZ_RELEASE_ASSERT(triggerGC(JS::GCReason::DELAYED_ATOMS_GC));
+}
+
+void GCRuntime::startDecommit() {
+ gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::DECOMMIT);
+
+#ifdef DEBUG
+ MOZ_ASSERT(CurrentThreadCanAccessRuntime(rt));
+ MOZ_ASSERT(decommitTask.isIdle());
+
+ {
+ AutoLockGC lock(this);
+ MOZ_ASSERT(fullChunks(lock).verify());
+ MOZ_ASSERT(availableChunks(lock).verify());
+ MOZ_ASSERT(emptyChunks(lock).verify());
+
+ // Verify that all entries in the empty chunks pool are already decommitted.
+ for (ChunkPool::Iter chunk(emptyChunks(lock)); !chunk.done();
+ chunk.next()) {
+ MOZ_ASSERT(chunk->info.numArenasFreeCommitted == 0);
+ }
+ }
+#endif
+
+ // If we are allocating heavily enough to trigger "high frequency" GC, then
+ // skip decommit so that we do not compete with the mutator. However if we're
+ // doing a shrinking GC we always decommit to release as much memory as
+ // possible.
+ if (schedulingState.inHighFrequencyGCMode() && !cleanUpEverything) {
+ return;
+ }
+
+ {
+ AutoLockGC lock(this);
+ if (availableChunks(lock).empty() && !tooManyEmptyChunks(lock)) {
+ return; // Nothing to do.
+ }
+ }
+
+#ifdef DEBUG
+ {
+ AutoLockHelperThreadState lock;
+ MOZ_ASSERT(!requestSliceAfterBackgroundTask);
+ }
+#endif
+
+ if (sweepOnBackgroundThread) {
+ decommitTask.start();
+ return;
+ }
+
+ decommitTask.runFromMainThread();
+}
+
+void js::gc::BackgroundDecommitTask::run(AutoLockHelperThreadState& lock) {
+ {
+ AutoUnlockHelperThreadState unlock(lock);
+
+ ChunkPool emptyChunksToFree;
+ {
+ AutoLockGC gcLock(gc);
+
+ // To help minimize the total number of chunks needed over time, sort the
+ // available chunks list so that we allocate into more-used chunks first.
+ gc->availableChunks(gcLock).sort();
+
+ gc->decommitFreeArenas(cancel_, gcLock);
+
+ emptyChunksToFree = gc->expireEmptyChunkPool(gcLock);
+ }
+
+ FreeChunkPool(emptyChunksToFree);
+ }
+
+ gc->maybeRequestGCAfterBackgroundTask(lock);
+}
+
+// Called from a background thread to decommit free arenas. Releases the GC
+// lock.
+void GCRuntime::decommitFreeArenas(const bool& cancel, AutoLockGC& lock) {
+ // Since we release the GC lock while doing the decommit syscall below,
+ // it is dangerous to iterate the available list directly, as the active
+ // thread could modify it concurrently. Instead, we build and pass an
+ // explicit Vector containing the Chunks we want to visit.
+ Vector<TenuredChunk*, 0, SystemAllocPolicy> chunksToDecommit;
+ for (ChunkPool::Iter chunk(availableChunks(lock)); !chunk.done();
+ chunk.next()) {
+ if (chunk->info.numArenasFreeCommitted != 0 &&
+ !chunksToDecommit.append(chunk)) {
+ onOutOfMallocMemory(lock);
+ return;
+ }
+ }
+
+ for (TenuredChunk* chunk : chunksToDecommit) {
+ // The arena list is not doubly-linked, so we have to work in the free
+ // list order and not in the natural order.
+
+ while (chunk->info.numArenasFreeCommitted && !cancel) {
+ if (!chunk->decommitOneFreeArena(this, lock)) {
+ // If we are low enough on memory that we can't update the page
+ // tables, break out of the loop.
+ break;
+ }
+ }
+ }
+}
+
+// Do all possible decommit immediately from the current thread without
+// releasing the GC lock or allocating any memory.
+void GCRuntime::decommitFreeArenasWithoutUnlocking(const AutoLockGC& lock) {
+ for (ChunkPool::Iter chunk(availableChunks(lock)); !chunk.done();
+ chunk.next()) {
+ chunk->decommitFreeArenasWithoutUnlocking(lock);
+ }
+ MOZ_ASSERT(availableChunks(lock).verify());
+}
+
+void GCRuntime::maybeRequestGCAfterBackgroundTask(
+ const AutoLockHelperThreadState& lock) {
+ if (requestSliceAfterBackgroundTask) {
+ // Trigger a slice so the main thread can continue the collection
+ // immediately.
+ requestSliceAfterBackgroundTask = false;
+ requestMajorGC(JS::GCReason::BG_TASK_FINISHED);
+ }
+}
+
+void GCRuntime::cancelRequestedGCAfterBackgroundTask() {
+ MOZ_ASSERT(CurrentThreadCanAccessRuntime(rt));
+
+#ifdef DEBUG
+ {
+ AutoLockHelperThreadState lock;
+ MOZ_ASSERT(!requestSliceAfterBackgroundTask);
+ }
+#endif
+
+ majorGCTriggerReason.compareExchange(JS::GCReason::BG_TASK_FINISHED,
+ JS::GCReason::NO_REASON);
+}
+
+void GCRuntime::sweepBackgroundThings(ZoneList& zones) {
+ if (zones.isEmpty()) {
+ return;
+ }
+
+ JSFreeOp fop(nullptr);
+
+ // Sweep zones in order. The atoms zone must be finalized last as other
+ // zones may have direct pointers into it.
+ while (!zones.isEmpty()) {
+ Zone* zone = zones.removeFront();
+ MOZ_ASSERT(zone->isGCFinished());
+
+ Arena* emptyArenas = zone->arenas.takeSweptEmptyArenas();
+
+ AutoSetThreadIsSweeping threadIsSweeping(zone);
+
+ // We must finalize thing kinds in the order specified by
+ // BackgroundFinalizePhases.
+ for (auto phase : BackgroundFinalizePhases) {
+ for (auto kind : phase.kinds) {
+ Arena* arenas = zone->arenas.arenasToSweep(kind);
+ MOZ_RELEASE_ASSERT(uintptr_t(arenas) != uintptr_t(-1));
+ if (arenas) {
+ ArenaLists::backgroundFinalize(&fop, arenas, &emptyArenas);
+ }
+ }
+ }
+
+ // Release any arenas that are now empty.
+ //
+ // Empty arenas are only released after everything has been finalized so
+ // that it's still possible to get a thing's zone after the thing has been
+ // finalized. The HeapPtr destructor depends on this, and this allows
+ // HeapPtrs between things of different alloc kind regardless of
+ // finalization order.
+ //
+ // Periodically drop and reaquire the GC lock every so often to avoid
+ // blocking the main thread from allocating chunks.
+ static const size_t LockReleasePeriod = 32;
+
+ while (emptyArenas) {
+ AutoLockGC lock(this);
+ for (size_t i = 0; i < LockReleasePeriod && emptyArenas; i++) {
+ Arena* arena = emptyArenas;
+ emptyArenas = emptyArenas->next;
+ releaseArena(arena, lock);
+ }
+ }
+ }
+}
+
+void GCRuntime::assertBackgroundSweepingFinished() {
+#ifdef DEBUG
+ {
+ AutoLockHelperThreadState lock;
+ MOZ_ASSERT(backgroundSweepZones.ref().isEmpty());
+ }
+
+ for (ZonesIter zone(this, WithAtoms); !zone.done(); zone.next()) {
+ for (auto i : AllAllocKinds()) {
+ MOZ_ASSERT(!zone->arenas.arenasToSweep(i));
+ MOZ_ASSERT(zone->arenas.doneBackgroundFinalize(i));
+ }
+ }
+#endif
+}
+
+void GCRuntime::queueZonesAndStartBackgroundSweep(ZoneList& zones) {
+ {
+ AutoLockHelperThreadState lock;
+ MOZ_ASSERT(!requestSliceAfterBackgroundTask);
+ backgroundSweepZones.ref().transferFrom(zones);
+ if (sweepOnBackgroundThread) {
+ sweepTask.startOrRunIfIdle(lock);
+ }
+ }
+ if (!sweepOnBackgroundThread) {
+ sweepTask.join();
+ sweepTask.runFromMainThread();
+ }
+}
+
+void BackgroundSweepTask::run(AutoLockHelperThreadState& lock) {
+ AutoTraceLog logSweeping(TraceLoggerForCurrentThread(),
+ TraceLogger_GCSweeping);
+
+ gc->sweepFromBackgroundThread(lock);
+}
+
+void GCRuntime::sweepFromBackgroundThread(AutoLockHelperThreadState& lock) {
+ do {
+ ZoneList zones;
+ zones.transferFrom(backgroundSweepZones.ref());
+
+ AutoUnlockHelperThreadState unlock(lock);
+ sweepBackgroundThings(zones);
+
+ // The main thread may call queueZonesAndStartBackgroundSweep() while this
+ // is running so we must check there is no more work after releasing the
+ // lock.
+ } while (!backgroundSweepZones.ref().isEmpty());
+
+ maybeRequestGCAfterBackgroundTask(lock);
+}
+
+void GCRuntime::waitBackgroundSweepEnd() {
+ sweepTask.join();
+ if (state() != State::Sweep) {
+ assertBackgroundSweepingFinished();
+ }
+}
+
+void GCRuntime::queueUnusedLifoBlocksForFree(LifoAlloc* lifo) {
+ MOZ_ASSERT(JS::RuntimeHeapIsBusy());
+ AutoLockHelperThreadState lock;
+ lifoBlocksToFree.ref().transferUnusedFrom(lifo);
+}
+
+void GCRuntime::queueAllLifoBlocksForFree(LifoAlloc* lifo) {
+ MOZ_ASSERT(JS::RuntimeHeapIsBusy());
+ AutoLockHelperThreadState lock;
+ lifoBlocksToFree.ref().transferFrom(lifo);
+}
+
+void GCRuntime::queueAllLifoBlocksForFreeAfterMinorGC(LifoAlloc* lifo) {
+ lifoBlocksToFreeAfterMinorGC.ref().transferFrom(lifo);
+}
+
+void GCRuntime::queueBuffersForFreeAfterMinorGC(Nursery::BufferSet& buffers) {
+ AutoLockHelperThreadState lock;
+
+ if (!buffersToFreeAfterMinorGC.ref().empty()) {
+ // In the rare case that this hasn't processed the buffers from a previous
+ // minor GC we have to wait here.
+ MOZ_ASSERT(!freeTask.isIdle(lock));
+ freeTask.joinWithLockHeld(lock);
+ }
+
+ MOZ_ASSERT(buffersToFreeAfterMinorGC.ref().empty());
+ std::swap(buffersToFreeAfterMinorGC.ref(), buffers);
+}
+
+void GCRuntime::startBackgroundFree() {
+ AutoLockHelperThreadState lock;
+ freeTask.startOrRunIfIdle(lock);
+}
+
+void BackgroundFreeTask::run(AutoLockHelperThreadState& lock) {
+ AutoTraceLog logFreeing(TraceLoggerForCurrentThread(), TraceLogger_GCFree);
+
+ gc->freeFromBackgroundThread(lock);
+}
+
+void GCRuntime::freeFromBackgroundThread(AutoLockHelperThreadState& lock) {
+ do {
+ LifoAlloc lifoBlocks(JSContext::TEMP_LIFO_ALLOC_PRIMARY_CHUNK_SIZE);
+ lifoBlocks.transferFrom(&lifoBlocksToFree.ref());
+
+ Nursery::BufferSet buffers;
+ std::swap(buffers, buffersToFreeAfterMinorGC.ref());
+
+ AutoUnlockHelperThreadState unlock(lock);
+
+ lifoBlocks.freeAll();
+
+ JSFreeOp* fop = TlsContext.get()->defaultFreeOp();
+ for (Nursery::BufferSet::Range r = buffers.all(); !r.empty();
+ r.popFront()) {
+ // Malloc memory associated with nursery objects is not tracked as these
+ // are assumed to be short lived.
+ fop->freeUntracked(r.front());
+ }
+ } while (!lifoBlocksToFree.ref().isEmpty() ||
+ !buffersToFreeAfterMinorGC.ref().empty());
+}
+
+void GCRuntime::waitBackgroundFreeEnd() { freeTask.join(); }
+
+/* static */
+bool UniqueIdGCPolicy::needsSweep(Cell** cellp, uint64_t*) {
+ Cell* cell = *cellp;
+ return MapGCThingTyped(cell, cell->getTraceKind(), [](auto t) {
+ mozilla::DebugOnly<const Cell*> prior = t;
+ bool result = IsAboutToBeFinalizedUnbarriered(&t);
+ // Sweep should not have to deal with moved pointers, since moving GC
+ // handles updating the UID table manually.
+ MOZ_ASSERT(t == prior);
+ return result;
+ });
+}
+
+void JS::Zone::sweepUniqueIds() { uniqueIds().sweep(); }
+
+void Realm::destroy(JSFreeOp* fop) {
+ JSRuntime* rt = fop->runtime();
+ if (auto callback = rt->destroyRealmCallback) {
+ callback(fop, this);
+ }
+ if (principals()) {
+ JS_DropPrincipals(rt->mainContextFromOwnThread(), principals());
+ }
+ // Bug 1560019: Malloc memory associated with a zone but not with a specific
+ // GC thing is not currently tracked.
+ fop->deleteUntracked(this);
+}
+
+void Compartment::destroy(JSFreeOp* fop) {
+ JSRuntime* rt = fop->runtime();
+ if (auto callback = rt->destroyCompartmentCallback) {
+ callback(fop, this);
+ }
+ // Bug 1560019: Malloc memory associated with a zone but not with a specific
+ // GC thing is not currently tracked.
+ fop->deleteUntracked(this);
+ rt->gc.stats().sweptCompartment();
+}
+
+void Zone::destroy(JSFreeOp* fop) {
+ MOZ_ASSERT(compartments().empty());
+ JSRuntime* rt = fop->runtime();
+ if (auto callback = rt->destroyZoneCallback) {
+ callback(fop, this);
+ }
+ // Bug 1560019: Malloc memory associated with a zone but not with a specific
+ // GC thing is not currently tracked.
+ fop->deleteUntracked(this);
+ fop->runtime()->gc.stats().sweptZone();
+}
+
+/*
+ * It's simpler if we preserve the invariant that every zone (except the atoms
+ * zone) has at least one compartment, and every compartment has at least one
+ * realm. If we know we're deleting the entire zone, then sweepCompartments is
+ * allowed to delete all compartments. In this case, |keepAtleastOne| is false.
+ * If any cells remain alive in the zone, set |keepAtleastOne| true to prohibit
+ * sweepCompartments from deleting every compartment. Instead, it preserves an
+ * arbitrary compartment in the zone.
+ */
+void Zone::sweepCompartments(JSFreeOp* fop, bool keepAtleastOne,
+ bool destroyingRuntime) {
+ MOZ_ASSERT(!compartments().empty());
+ MOZ_ASSERT_IF(destroyingRuntime, !keepAtleastOne);
+
+ Compartment** read = compartments().begin();
+ Compartment** end = compartments().end();
+ Compartment** write = read;
+ while (read < end) {
+ Compartment* comp = *read++;
+
+ /*
+ * Don't delete the last compartment and realm if keepAtleastOne is
+ * still true, meaning all the other compartments were deleted.
+ */
+ bool keepAtleastOneRealm = read == end && keepAtleastOne;
+ comp->sweepRealms(fop, keepAtleastOneRealm, destroyingRuntime);
+
+ if (!comp->realms().empty()) {
+ *write++ = comp;
+ keepAtleastOne = false;
+ } else {
+ comp->destroy(fop);
+ }
+ }
+ compartments().shrinkTo(write - compartments().begin());
+ MOZ_ASSERT_IF(keepAtleastOne, !compartments().empty());
+ MOZ_ASSERT_IF(destroyingRuntime, compartments().empty());
+}
+
+void Compartment::sweepRealms(JSFreeOp* fop, bool keepAtleastOne,
+ bool destroyingRuntime) {
+ MOZ_ASSERT(!realms().empty());
+ MOZ_ASSERT_IF(destroyingRuntime, !keepAtleastOne);
+
+ Realm** read = realms().begin();
+ Realm** end = realms().end();
+ Realm** write = read;
+ while (read < end) {
+ Realm* realm = *read++;
+
+ /*
+ * Don't delete the last realm if keepAtleastOne is still true, meaning
+ * all the other realms were deleted.
+ */
+ bool dontDelete = read == end && keepAtleastOne;
+ if ((realm->marked() || dontDelete) && !destroyingRuntime) {
+ *write++ = realm;
+ keepAtleastOne = false;
+ } else {
+ realm->destroy(fop);
+ }
+ }
+ realms().shrinkTo(write - realms().begin());
+ MOZ_ASSERT_IF(keepAtleastOne, !realms().empty());
+ MOZ_ASSERT_IF(destroyingRuntime, realms().empty());
+}
+
+void GCRuntime::deleteEmptyZone(Zone* zone) {
+ MOZ_ASSERT(CurrentThreadCanAccessRuntime(rt));
+ MOZ_ASSERT(zone->compartments().empty());
+ for (auto& i : zones()) {
+ if (i == zone) {
+ zones().erase(&i);
+ zone->destroy(rt->defaultFreeOp());
+ return;
+ }
+ }
+ MOZ_CRASH("Zone not found");
+}
+
+void GCRuntime::sweepZones(JSFreeOp* fop, bool destroyingRuntime) {
+ MOZ_ASSERT_IF(destroyingRuntime, numActiveZoneIters == 0);
+
+ if (numActiveZoneIters) {
+ return;
+ }
+
+ assertBackgroundSweepingFinished();
+
+ Zone** read = zones().begin();
+ Zone** end = zones().end();
+ Zone** write = read;
+
+ while (read < end) {
+ Zone* zone = *read++;
+
+ if (zone->wasGCStarted()) {
+ MOZ_ASSERT(!zone->isQueuedForBackgroundSweep());
+ const bool zoneIsDead =
+ zone->arenas.arenaListsAreEmpty() && !zone->hasMarkedRealms();
+ MOZ_ASSERT_IF(destroyingRuntime, zoneIsDead);
+ if (zoneIsDead) {
+ AutoSetThreadIsSweeping threadIsSweeping(zone);
+ zone->arenas.checkEmptyFreeLists();
+ zone->sweepCompartments(fop, false, destroyingRuntime);
+ MOZ_ASSERT(zone->compartments().empty());
+ MOZ_ASSERT(zone->typeDescrObjects().empty());
+ zone->destroy(fop);
+ continue;
+ }
+ zone->sweepCompartments(fop, true, destroyingRuntime);
+ }
+ *write++ = zone;
+ }
+ zones().shrinkTo(write - zones().begin());
+}
+
+void ArenaLists::checkEmptyArenaList(AllocKind kind) {
+ MOZ_ASSERT(arenaList(kind).isEmpty());
+}
+
+class MOZ_RAII AutoRunParallelTask : public GCParallelTask {
+ // This class takes a pointer to a member function of GCRuntime.
+ using TaskFunc = JS_MEMBER_FN_PTR_TYPE(GCRuntime, void);
+
+ TaskFunc func_;
+ gcstats::PhaseKind phase_;
+ AutoLockHelperThreadState& lock_;
+
+ public:
+ AutoRunParallelTask(GCRuntime* gc, TaskFunc func, gcstats::PhaseKind phase,
+ AutoLockHelperThreadState& lock)
+ : GCParallelTask(gc), func_(func), phase_(phase), lock_(lock) {
+ gc->startTask(*this, phase_, lock_);
+ }
+
+ ~AutoRunParallelTask() { gc->joinTask(*this, phase_, lock_); }
+
+ void run(AutoLockHelperThreadState& lock) override {
+ AutoUnlockHelperThreadState unlock(lock);
+
+ // The hazard analysis can't tell what the call to func_ will do but it's
+ // not allowed to GC.
+ JS::AutoSuppressGCAnalysis nogc;
+
+ // Call pointer to member function on |gc|.
+ JS_CALL_MEMBER_FN_PTR(gc, func_);
+ }
+};
+
+void GCRuntime::purgeRuntimeForMinorGC() {
+ // If external strings become nursery allocable, remember to call
+ // zone->externalStringCache().purge() (and delete this assert.)
+ MOZ_ASSERT(!IsNurseryAllocable(AllocKind::EXTERNAL_STRING));
+
+ for (ZonesIter zone(this, SkipAtoms); !zone.done(); zone.next()) {
+ zone->functionToStringCache().purge();
+ }
+
+ rt->caches().purgeForMinorGC(rt);
+}
+
+void GCRuntime::purgeRuntime() {
+ gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::PURGE);
+
+ for (GCRealmsIter realm(rt); !realm.done(); realm.next()) {
+ realm->purge();
+ }
+
+ for (GCZonesIter zone(this); !zone.done(); zone.next()) {
+ zone->purgeAtomCache();
+ zone->externalStringCache().purge();
+ zone->functionToStringCache().purge();
+ }
+
+ JSContext* cx = rt->mainContextFromOwnThread();
+ queueUnusedLifoBlocksForFree(&cx->tempLifoAlloc());
+ cx->interpreterStack().purge(rt);
+ cx->frontendCollectionPool().purge();
+
+ rt->caches().purge();
+
+ if (auto cache = rt->maybeThisRuntimeSharedImmutableStrings()) {
+ cache->purge();
+ }
+
+ MOZ_ASSERT(unmarkGrayStack.empty());
+ unmarkGrayStack.clearAndFree();
+
+ // If we're the main runtime, tell helper threads to free their unused
+ // memory when they are next idle.
+ if (!rt->parentRuntime) {
+ HelperThreadState().triggerFreeUnusedMemory();
+ }
+}
+
+bool GCRuntime::shouldPreserveJITCode(Realm* realm,
+ const TimeStamp& currentTime,
+ JS::GCReason reason,
+ bool canAllocateMoreCode) {
+ if (cleanUpEverything) {
+ return false;
+ }
+ if (!canAllocateMoreCode) {
+ return false;
+ }
+
+ if (alwaysPreserveCode) {
+ return true;
+ }
+ if (realm->preserveJitCode()) {
+ return true;
+ }
+
+ if (IsCurrentlyAnimating(realm->lastAnimationTime, currentTime)) {
+ return true;
+ }
+
+ if (reason == JS::GCReason::DEBUG_GC) {
+ return true;
+ }
+
+ return false;
+}
+
+#ifdef DEBUG
+class CompartmentCheckTracer final : public JS::CallbackTracer {
+ void onChild(const JS::GCCellPtr& thing) override;
+
+ public:
+ explicit CompartmentCheckTracer(JSRuntime* rt)
+ : JS::CallbackTracer(rt, JS::TracerKind::Callback,
+ JS::WeakEdgeTraceAction::Skip),
+ src(nullptr),
+ zone(nullptr),
+ compartment(nullptr) {}
+
+ Cell* src;
+ JS::TraceKind srcKind;
+ Zone* zone;
+ Compartment* compartment;
+};
+
+static bool InCrossCompartmentMap(JSRuntime* rt, JSObject* src,
+ JS::GCCellPtr dst) {
+ // Cross compartment edges are either in the cross compartment map or in a
+ // debugger weakmap.
+
+ Compartment* srccomp = src->compartment();
+
+ if (dst.is<JSObject>()) {
+ if (ObjectWrapperMap::Ptr p = srccomp->lookupWrapper(&dst.as<JSObject>())) {
+ if (*p->value().unsafeGet() == src) {
+ return true;
+ }
+ }
+ }
+
+ if (DebugAPI::edgeIsInDebuggerWeakmap(rt, src, dst)) {
+ return true;
+ }
+
+ return false;
+}
+
+void CompartmentCheckTracer::onChild(const JS::GCCellPtr& thing) {
+ Compartment* comp =
+ MapGCThingTyped(thing, [](auto t) { return t->maybeCompartment(); });
+ if (comp && compartment) {
+ MOZ_ASSERT(
+ comp == compartment ||
+ (srcKind == JS::TraceKind::Object &&
+ InCrossCompartmentMap(runtime(), static_cast<JSObject*>(src), thing)));
+ } else {
+ TenuredCell* tenured = &thing.asCell()->asTenured();
+ Zone* thingZone = tenured->zoneFromAnyThread();
+ MOZ_ASSERT(thingZone == zone || thingZone->isAtomsZone());
+ }
+}
+
+void GCRuntime::checkForCompartmentMismatches() {
+ JSContext* cx = rt->mainContextFromOwnThread();
+ if (cx->disableStrictProxyCheckingCount) {
+ return;
+ }
+
+ CompartmentCheckTracer trc(rt);
+ AutoAssertEmptyNursery empty(cx);
+ for (ZonesIter zone(this, SkipAtoms); !zone.done(); zone.next()) {
+ trc.zone = zone;
+ for (auto thingKind : AllAllocKinds()) {
+ for (auto i = zone->cellIterUnsafe<TenuredCell>(thingKind, empty);
+ !i.done(); i.next()) {
+ trc.src = i.getCell();
+ trc.srcKind = MapAllocToTraceKind(thingKind);
+ trc.compartment = MapGCThingTyped(
+ trc.src, trc.srcKind, [](auto t) { return t->maybeCompartment(); });
+ JS::TraceChildren(&trc, JS::GCCellPtr(trc.src, trc.srcKind));
+ }
+ }
+ }
+}
+#endif
+
+static void RelazifyFunctions(Zone* zone, AllocKind kind) {
+ MOZ_ASSERT(kind == AllocKind::FUNCTION ||
+ kind == AllocKind::FUNCTION_EXTENDED);
+
+ JSRuntime* rt = zone->runtimeFromMainThread();
+ AutoAssertEmptyNursery empty(rt->mainContextFromOwnThread());
+
+ for (auto i = zone->cellIterUnsafe<JSObject>(kind, empty); !i.done();
+ i.next()) {
+ JSFunction* fun = &i->as<JSFunction>();
+ // When iterating over the GC-heap, we may encounter function objects that
+ // are incomplete (missing a BaseScript when we expect one). We must check
+ // for this case before we can call JSFunction::hasBytecode().
+ if (fun->isIncomplete()) {
+ continue;
+ }
+ if (fun->hasBytecode()) {
+ fun->maybeRelazify(rt);
+ }
+ }
+}
+
+static bool ShouldCollectZone(Zone* zone, JS::GCReason reason) {
+ // If we are repeating a GC because we noticed dead compartments haven't
+ // been collected, then only collect zones containing those compartments.
+ if (reason == JS::GCReason::COMPARTMENT_REVIVED) {
+ for (CompartmentsInZoneIter comp(zone); !comp.done(); comp.next()) {
+ if (comp->gcState.scheduledForDestruction) {
+ return true;
+ }
+ }
+
+ return false;
+ }
+
+ // Otherwise we only collect scheduled zones.
+ if (!zone->isGCScheduled()) {
+ return false;
+ }
+
+ // If canCollectAtoms() is false then parsing is currently happening on
+ // another thread, in which case we don't have information about which atoms
+ // are roots, so we must skip collecting atoms.
+ //
+ // Note that only affects the first slice of an incremental GC since root
+ // marking is completed before we return to the mutator.
+ //
+ // Off-thread parsing is inhibited after the start of GC which prevents
+ // races between creating atoms during parsing and sweeping atoms on the
+ // main thread.
+ //
+ // Otherwise, we always schedule a GC in the atoms zone so that atoms which
+ // the other collected zones are using are marked, and we can update the
+ // set of atoms in use by the other collected zones at the end of the GC.
+ if (zone->isAtomsZone()) {
+ return TlsContext.get()->canCollectAtoms();
+ }
+
+ return zone->canCollect();
+}
+
+bool GCRuntime::prepareZonesForCollection(JS::GCReason reason,
+ bool* isFullOut) {
+#ifdef DEBUG
+ /* Assert that zone state is as we expect */
+ for (ZonesIter zone(this, WithAtoms); !zone.done(); zone.next()) {
+ MOZ_ASSERT(!zone->isCollecting());
+ MOZ_ASSERT_IF(!zone->isAtomsZone(), !zone->compartments().empty());
+ for (auto i : AllAllocKinds()) {
+ MOZ_ASSERT(!zone->arenas.arenasToSweep(i));
+ }
+ }
+#endif
+
+ *isFullOut = true;
+ bool any = false;
+
+ for (ZonesIter zone(this, WithAtoms); !zone.done(); zone.next()) {
+ /* Set up which zones will be collected. */
+ bool shouldCollect = ShouldCollectZone(zone, reason);
+ if (shouldCollect) {
+ MOZ_ASSERT(zone->canCollect());
+ any = true;
+ zone->changeGCState(Zone::NoGC, Zone::Prepare);
+ } else if (zone->canCollect()) {
+ *isFullOut = false;
+ }
+
+ zone->setWasCollected(shouldCollect);
+ }
+
+ /*
+ * Check that we do collect the atoms zone if we triggered a GC for that
+ * purpose.
+ */
+ MOZ_ASSERT_IF(reason == JS::GCReason::DELAYED_ATOMS_GC,
+ atomsZone->isGCPreparing());
+
+ /* Check that at least one zone is scheduled for collection. */
+ return any;
+}
+
+void GCRuntime::discardJITCodeForGC() {
+ js::CancelOffThreadIonCompile(rt, JS::Zone::Prepare);
+ for (GCZonesIter zone(this); !zone.done(); zone.next()) {
+ gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::MARK_DISCARD_CODE);
+ zone->discardJitCode(rt->defaultFreeOp(), Zone::DiscardBaselineCode,
+ Zone::DiscardJitScripts);
+ }
+}
+
+void GCRuntime::relazifyFunctionsForShrinkingGC() {
+ gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::RELAZIFY_FUNCTIONS);
+ for (GCZonesIter zone(this); !zone.done(); zone.next()) {
+ if (zone->isSelfHostingZone()) {
+ continue;
+ }
+ RelazifyFunctions(zone, AllocKind::FUNCTION);
+ RelazifyFunctions(zone, AllocKind::FUNCTION_EXTENDED);
+ }
+}
+
+void GCRuntime::purgeShapeCachesForShrinkingGC() {
+ gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::PURGE_SHAPE_CACHES);
+ for (GCZonesIter zone(this); !zone.done(); zone.next()) {
+ if (!canRelocateZone(zone) || zone->keepShapeCaches()) {
+ continue;
+ }
+ for (auto baseShape = zone->cellIterUnsafe<BaseShape>(); !baseShape.done();
+ baseShape.next()) {
+ baseShape->maybePurgeCache(rt->defaultFreeOp());
+ }
+ }
+}
+
+// The debugger keeps track of the URLs for the sources of each realm's scripts.
+// These URLs are purged on shrinking GCs.
+void GCRuntime::purgeSourceURLsForShrinkingGC() {
+ gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::PURGE_SOURCE_URLS);
+ for (GCZonesIter zone(this); !zone.done(); zone.next()) {
+ // URLs are not tracked for realms in the system zone.
+ if (!canRelocateZone(zone) || zone->isSystemZone()) {
+ continue;
+ }
+ for (CompartmentsInZoneIter comp(zone); !comp.done(); comp.next()) {
+ for (RealmsInCompartmentIter realm(comp); !realm.done(); realm.next()) {
+ GlobalObject* global = realm.get()->unsafeUnbarrieredMaybeGlobal();
+ if (global) {
+ global->clearSourceURLSHolder();
+ }
+ }
+ }
+ }
+}
+
+void GCRuntime::unmarkWeakMaps() {
+ for (GCZonesIter zone(this); !zone.done(); zone.next()) {
+ /* Unmark all weak maps in the zones being collected. */
+ WeakMapBase::unmarkZone(zone);
+ }
+}
+
+bool GCRuntime::beginPreparePhase(JS::GCReason reason, AutoGCSession& session) {
+ gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::PREPARE);
+
+ if (!prepareZonesForCollection(reason, &isFull.ref())) {
+ return false;
+ }
+
+ /* Check it's safe to access the atoms zone if we are collecting it. */
+ if (atomsZone->isCollecting()) {
+ session.maybeCheckAtomsAccess.emplace(rt);
+ }
+
+ /*
+ * Start a parallel task to clear all mark state for the zones we are
+ * collecting. This is linear in the size of the heap we are collecting and so
+ * can be slow. This happens concurrently with the mutator and GC proper does
+ * not start until this is complete.
+ */
+ setParallelUnmarkEnabled(true);
+ unmarkTask.initZones();
+ unmarkTask.start();
+
+ /*
+ * Process any queued source compressions during the start of a major
+ * GC.
+ */
+ if (!IsShutdownReason(reason) && reason != JS::GCReason::ROOTS_REMOVED &&
+ reason != JS::GCReason::XPCONNECT_SHUTDOWN) {
+ StartHandlingCompressionsOnGC(rt);
+ }
+
+ return true;
+}
+
+void BackgroundUnmarkTask::initZones() {
+ MOZ_ASSERT(isIdle());
+ MOZ_ASSERT(zones.empty());
+ MOZ_ASSERT(!isCancelled());
+
+ // We can't safely iterate the zones vector from another thread so we copy the
+ // zones to be collected into another vector.
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ for (GCZonesIter zone(gc); !zone.done(); zone.next()) {
+ if (!zones.append(zone.get())) {
+ oomUnsafe.crash("BackgroundUnmarkTask::initZones");
+ }
+ }
+}
+
+void BackgroundUnmarkTask::run(AutoLockHelperThreadState& helperTheadLock) {
+ AutoUnlockHelperThreadState unlock(helperTheadLock);
+
+ AutoTraceLog log(TraceLoggerForCurrentThread(), TraceLogger_GCUnmarking);
+
+ // We need to hold the GC lock while traversing the arena lists.
+ AutoLockGC gcLock(gc);
+
+ unmarkZones(gcLock);
+ zones.clear();
+}
+
+void BackgroundUnmarkTask::unmarkZones(AutoLockGC& lock) {
+ for (Zone* zone : zones) {
+ for (auto kind : AllAllocKinds()) {
+ for (ArenaIter arena(zone, kind); !arena.done(); arena.next()) {
+ AutoUnlockGC unlock(lock);
+ arena->unmarkAll();
+ if (isCancelled()) {
+ return;
+ }
+ }
+ }
+ }
+}
+
+void GCRuntime::endPreparePhase(JS::GCReason reason) {
+ MOZ_ASSERT(unmarkTask.isIdle());
+ setParallelUnmarkEnabled(false);
+
+ for (GCZonesIter zone(this); !zone.done(); zone.next()) {
+ /*
+ * In an incremental GC, clear the area free lists to ensure that subsequent
+ * allocations refill them and end up marking new cells back. See
+ * arenaAllocatedDuringGC().
+ */
+ zone->arenas.clearFreeLists();
+
+ zone->arenas.checkGCStateNotInUse();
+
+ zone->markedStrings = 0;
+ zone->finalizedStrings = 0;
+
+ zone->setPreservingCode(false);
+
+#ifdef JS_GC_ZEAL
+ if (hasZealMode(ZealMode::YieldBeforeRootMarking)) {
+ for (auto kind : AllAllocKinds()) {
+ for (ArenaIter arena(zone, kind); !arena.done(); arena.next()) {
+ arena->checkNoMarkedCells();
+ }
+ }
+ }
+#endif
+ }
+
+ // Discard JIT code more aggressively if the process is approaching its
+ // executable code limit.
+ bool canAllocateMoreCode = jit::CanLikelyAllocateMoreExecutableMemory();
+ auto currentTime = ReallyNow();
+
+ for (CompartmentsIter c(rt); !c.done(); c.next()) {
+ c->gcState.scheduledForDestruction = false;
+ c->gcState.maybeAlive = false;
+ c->gcState.hasEnteredRealm = false;
+ for (RealmsInCompartmentIter r(c); !r.done(); r.next()) {
+ if (r->shouldTraceGlobal() || !r->zone()->isGCScheduled()) {
+ c->gcState.maybeAlive = true;
+ }
+ if (shouldPreserveJITCode(r, currentTime, reason, canAllocateMoreCode)) {
+ r->zone()->setPreservingCode(true);
+ }
+ if (r->hasBeenEnteredIgnoringJit()) {
+ c->gcState.hasEnteredRealm = true;
+ }
+ }
+ }
+
+ if (!cleanUpEverything && canAllocateMoreCode) {
+ jit::JitActivationIterator activation(rt->mainContextFromOwnThread());
+ if (!activation.done()) {
+ activation->compartment()->zone()->setPreservingCode(true);
+ }
+ }
+
+ /*
+ * Perform remaining preparation work that must take place in the first true
+ * GC slice.
+ */
+
+ {
+ gcstats::AutoPhase ap1(stats(), gcstats::PhaseKind::PREPARE);
+
+ AutoLockHelperThreadState helperLock;
+
+ /* Clear mark state for WeakMaps in parallel with other work. */
+ AutoRunParallelTask unmarkWeakMaps(this, &GCRuntime::unmarkWeakMaps,
+ gcstats::PhaseKind::UNMARK_WEAKMAPS,
+ helperLock);
+
+ /*
+ * Buffer gray roots for incremental collections. This is linear in the
+ * number of roots which can be in the tens of thousands. Do this in
+ * parallel with the rest of this block.
+ */
+ Maybe<AutoRunParallelTask> bufferGrayRootsTask;
+ if (isIncremental) {
+ bufferGrayRootsTask.emplace(this, &GCRuntime::bufferGrayRoots,
+ gcstats::PhaseKind::BUFFER_GRAY_ROOTS,
+ helperLock);
+ }
+ AutoUnlockHelperThreadState unlock(helperLock);
+
+ // Discard JIT code. For incremental collections, the sweep phase will
+ // also discard JIT code.
+ discardJITCodeForGC();
+ startBackgroundFreeAfterMinorGC();
+
+ /*
+ * Relazify functions after discarding JIT code (we can't relazify
+ * functions with JIT code) and before the actual mark phase, so that
+ * the current GC can collect the JSScripts we're unlinking here. We do
+ * this only when we're performing a shrinking GC, as too much
+ * relazification can cause performance issues when we have to reparse
+ * the same functions over and over.
+ */
+ if (invocationKind == GC_SHRINK) {
+ relazifyFunctionsForShrinkingGC();
+ purgeShapeCachesForShrinkingGC();
+ purgeSourceURLsForShrinkingGC();
+ }
+
+ /*
+ * We must purge the runtime at the beginning of an incremental GC. The
+ * danger if we purge later is that the snapshot invariant of
+ * incremental GC will be broken, as follows. If some object is
+ * reachable only through some cache (say the dtoaCache) then it will
+ * not be part of the snapshot. If we purge after root marking, then
+ * the mutator could obtain a pointer to the object and start using
+ * it. This object might never be marked, so a GC hazard would exist.
+ */
+ purgeRuntime();
+
+ if (IsShutdownReason(reason)) {
+ /* Clear any engine roots that may hold external data live. */
+ for (GCZonesIter zone(this); !zone.done(); zone.next()) {
+ zone->clearRootsForShutdownGC();
+ }
+ }
+ }
+
+#ifdef DEBUG
+ if (fullCompartmentChecks) {
+ checkForCompartmentMismatches();
+ }
+#endif
+}
+
+void GCRuntime::beginMarkPhase(AutoGCSession& session) {
+ /*
+ * Mark phase.
+ */
+ gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::MARK);
+
+ // This is the slice we actually start collecting. The number can be used to
+ // check whether a major GC has started so we must not increment it until we
+ // get here.
+ incMajorGcNumber();
+
+ marker.start();
+ GCMarker* gcmarker = &marker;
+ gcmarker->clearMarkCount();
+
+ for (GCZonesIter zone(this); !zone.done(); zone.next()) {
+ // Incremental marking barriers are enabled at this point.
+ zone->changeGCState(Zone::Prepare, Zone::MarkBlackOnly);
+ }
+
+ if (rt->isBeingDestroyed()) {
+ checkNoRuntimeRoots(session);
+ } else {
+ traceRuntimeForMajorGC(gcmarker, session);
+ }
+
+ if (isIncremental) {
+ findDeadCompartments();
+ }
+
+ updateMemoryCountersOnGCStart();
+ stats().measureInitialHeapSize();
+}
+
+void GCRuntime::findDeadCompartments() {
+ gcstats::AutoPhase ap1(stats(), gcstats::PhaseKind::MARK_ROOTS);
+ gcstats::AutoPhase ap2(stats(), gcstats::PhaseKind::MARK_COMPARTMENTS);
+
+ /*
+ * This code ensures that if a compartment is "dead", then it will be
+ * collected in this GC. A compartment is considered dead if its maybeAlive
+ * flag is false. The maybeAlive flag is set if:
+ *
+ * (1) the compartment has been entered (set in beginMarkPhase() above)
+ * (2) the compartment's zone is not being collected (set in
+ * beginMarkPhase() above)
+ * (3) an object in the compartment was marked during root marking, either
+ * as a black root or a gray root (set in RootMarking.cpp), or
+ * (4) the compartment has incoming cross-compartment edges from another
+ * compartment that has maybeAlive set (set by this method).
+ *
+ * If the maybeAlive is false, then we set the scheduledForDestruction flag.
+ * At the end of the GC, we look for compartments where
+ * scheduledForDestruction is true. These are compartments that were somehow
+ * "revived" during the incremental GC. If any are found, we do a special,
+ * non-incremental GC of those compartments to try to collect them.
+ *
+ * Compartments can be revived for a variety of reasons. On reason is bug
+ * 811587, where a reflector that was dead can be revived by DOM code that
+ * still refers to the underlying DOM node.
+ *
+ * Read barriers and allocations can also cause revival. This might happen
+ * during a function like JS_TransplantObject, which iterates over all
+ * compartments, live or dead, and operates on their objects. See bug 803376
+ * for details on this problem. To avoid the problem, we try to avoid
+ * allocation and read barriers during JS_TransplantObject and the like.
+ */
+
+ // Propagate the maybeAlive flag via cross-compartment edges.
+
+ Vector<Compartment*, 0, js::SystemAllocPolicy> workList;
+
+ for (CompartmentsIter comp(rt); !comp.done(); comp.next()) {
+ if (comp->gcState.maybeAlive) {
+ if (!workList.append(comp)) {
+ return;
+ }
+ }
+ }
+
+ while (!workList.empty()) {
+ Compartment* comp = workList.popCopy();
+ for (Compartment::WrappedObjectCompartmentEnum e(comp); !e.empty();
+ e.popFront()) {
+ Compartment* dest = e.front();
+ if (!dest->gcState.maybeAlive) {
+ dest->gcState.maybeAlive = true;
+ if (!workList.append(dest)) {
+ return;
+ }
+ }
+ }
+ }
+
+ // Set scheduledForDestruction based on maybeAlive.
+
+ for (GCCompartmentsIter comp(rt); !comp.done(); comp.next()) {
+ MOZ_ASSERT(!comp->gcState.scheduledForDestruction);
+ if (!comp->gcState.maybeAlive) {
+ comp->gcState.scheduledForDestruction = true;
+ }
+ }
+}
+
+void GCRuntime::updateMemoryCountersOnGCStart() {
+ heapSize.updateOnGCStart();
+
+ // Update memory counters for the zones we are collecting.
+ for (GCZonesIter zone(this); !zone.done(); zone.next()) {
+ zone->updateMemoryCountersOnGCStart();
+ }
+}
+
+template <class ZoneIterT>
+IncrementalProgress GCRuntime::markWeakReferences(
+ SliceBudget& incrementalBudget) {
+ gcstats::AutoPhase ap1(stats(), gcstats::PhaseKind::SWEEP_MARK_WEAK);
+
+ auto unlimited = SliceBudget::unlimited();
+ SliceBudget& budget =
+ marker.incrementalWeakMapMarkingEnabled ? incrementalBudget : unlimited;
+
+ // We may have already entered weak marking mode.
+ if (!marker.isWeakMarking() && marker.enterWeakMarkingMode()) {
+ // Do not rely on the information about not-yet-marked weak keys that have
+ // been collected by barriers. Clear out the gcWeakKeys entries and rebuild
+ // the full table. Note that this a cross-zone operation; delegate zone
+ // entries will be populated by map zone traversals, so everything needs to
+ // be cleared first, then populated.
+ if (!marker.incrementalWeakMapMarkingEnabled) {
+ for (ZoneIterT zone(this); !zone.done(); zone.next()) {
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ if (!zone->gcWeakKeys().clear()) {
+ oomUnsafe.crash("clearing weak keys when entering weak marking mode");
+ }
+ }
+ }
+
+ for (ZoneIterT zone(this); !zone.done(); zone.next()) {
+ if (zone->enterWeakMarkingMode(&marker, budget) == NotFinished) {
+ MOZ_ASSERT(marker.incrementalWeakMapMarkingEnabled);
+ marker.leaveWeakMarkingMode();
+ return NotFinished;
+ }
+ }
+ }
+
+#ifdef DEBUG
+ for (ZoneIterT zone(this); !zone.done(); zone.next()) {
+ zone->checkWeakMarkingMode();
+ }
+#endif
+
+ // This is not strictly necessary; if we yield here, we could run the mutator
+ // in weak marking mode and unmark gray would end up doing the key lookups.
+ // But it seems better to not slow down barriers. Re-entering weak marking
+ // mode will be fast since already-processed markables have been removed.
+ auto leaveOnExit =
+ mozilla::MakeScopeExit([&] { marker.leaveWeakMarkingMode(); });
+
+ bool markedAny = true;
+ while (markedAny) {
+ if (!marker.markUntilBudgetExhausted(budget)) {
+ MOZ_ASSERT(marker.incrementalWeakMapMarkingEnabled);
+ return NotFinished;
+ }
+
+ markedAny = false;
+
+ if (!marker.isWeakMarking()) {
+ for (ZoneIterT zone(this); !zone.done(); zone.next()) {
+ markedAny |= WeakMapBase::markZoneIteratively(zone, &marker);
+ }
+ }
+
+ markedAny |= jit::JitRuntime::MarkJitcodeGlobalTableIteratively(&marker);
+ }
+ MOZ_ASSERT(marker.isDrained());
+
+ return Finished;
+}
+
+IncrementalProgress GCRuntime::markWeakReferencesInCurrentGroup(
+ SliceBudget& budget) {
+ return markWeakReferences<SweepGroupZonesIter>(budget);
+}
+
+template <class ZoneIterT>
+void GCRuntime::markGrayRoots(gcstats::PhaseKind phase) {
+ MOZ_ASSERT(marker.markColor() == MarkColor::Gray);
+
+ gcstats::AutoPhase ap(stats(), phase);
+ if (hasValidGrayRootsBuffer()) {
+ for (ZoneIterT zone(this); !zone.done(); zone.next()) {
+ markBufferedGrayRoots(zone);
+ }
+ } else {
+ MOZ_ASSERT(!isIncremental);
+ traceEmbeddingGrayRoots(&marker);
+ Compartment::traceIncomingCrossCompartmentEdgesForZoneGC(
+ &marker, Compartment::GrayEdges);
+ }
+}
+
+IncrementalProgress GCRuntime::markAllWeakReferences() {
+ SliceBudget budget = SliceBudget::unlimited();
+ return markWeakReferences<GCZonesIter>(budget);
+}
+
+void GCRuntime::markAllGrayReferences(gcstats::PhaseKind phase) {
+ markGrayRoots<GCZonesIter>(phase);
+ drainMarkStack();
+}
+
+void GCRuntime::dropStringWrappers() {
+ /*
+ * String "wrappers" are dropped on GC because their presence would require
+ * us to sweep the wrappers in all compartments every time we sweep a
+ * compartment group.
+ */
+ for (ZonesIter zone(this, WithAtoms); !zone.done(); zone.next()) {
+ zone->dropStringWrappersOnGC();
+ }
+}
+
+/*
+ * Group zones that must be swept at the same time.
+ *
+ * From the point of view of the mutator, groups of zones transition atomically
+ * from marking to sweeping. If compartment A has an edge to an unmarked object
+ * in compartment B, then we must not start sweeping A in a later slice than we
+ * start sweeping B. That's because a write barrier in A could lead to the
+ * unmarked object in B becoming marked. However, if we had already swept that
+ * object, we would be in trouble.
+ *
+ * If we consider these dependencies as a graph, then all the compartments in
+ * any strongly-connected component of this graph must start sweeping in the
+ * same slice.
+ *
+ * Tarjan's algorithm is used to calculate the components.
+ */
+
+bool Compartment::findSweepGroupEdges() {
+ Zone* source = zone();
+ for (WrappedObjectCompartmentEnum e(this); !e.empty(); e.popFront()) {
+ Compartment* targetComp = e.front();
+ Zone* target = targetComp->zone();
+
+ if (!target->isGCMarking() || source->hasSweepGroupEdgeTo(target)) {
+ continue;
+ }
+
+ for (ObjectWrapperEnum e(this, targetComp); !e.empty(); e.popFront()) {
+ JSObject* key = e.front().mutableKey();
+ MOZ_ASSERT(key->zone() == target);
+
+ // Add an edge to the wrapped object's zone to ensure that the wrapper
+ // zone is not still being marked when we start sweeping the wrapped zone.
+ // As an optimization, if the wrapped object is already marked black there
+ // is no danger of later marking and we can skip this.
+ if (key->isMarkedBlack()) {
+ continue;
+ }
+
+ if (!source->addSweepGroupEdgeTo(target)) {
+ return false;
+ }
+
+ // We don't need to consider any more wrappers for this target
+ // compartment since we already added an edge.
+ break;
+ }
+ }
+
+ return true;
+}
+
+bool Zone::findSweepGroupEdges(Zone* atomsZone) {
+ // Any zone may have a pointer to an atom in the atoms zone, and these aren't
+ // in the cross compartment map.
+ if (atomsZone->wasGCStarted() && !addSweepGroupEdgeTo(atomsZone)) {
+ return false;
+ }
+
+ for (CompartmentsInZoneIter comp(this); !comp.done(); comp.next()) {
+ if (!comp->findSweepGroupEdges()) {
+ return false;
+ }
+ }
+
+ return WeakMapBase::findSweepGroupEdgesForZone(this);
+}
+
+static bool AddEdgesForMarkQueue(GCMarker& marker) {
+#ifdef DEBUG
+ // For testing only.
+ //
+ // Add edges between all objects mentioned in the test mark queue, since
+ // otherwise they will get marked in a different order than their sweep
+ // groups. Note that this is only done at the beginning of an incremental
+ // collection, so it is possible for objects to be added later that do not
+ // follow the sweep group ordering. These objects will wait until their sweep
+ // group comes up, or will be skipped if their sweep group is already past.
+ JS::Zone* prevZone = nullptr;
+ for (size_t i = 0; i < marker.markQueue.length(); i++) {
+ Value val = marker.markQueue[i].get().unbarrieredGet();
+ if (!val.isObject()) {
+ continue;
+ }
+ JSObject* obj = &val.toObject();
+ JS::Zone* zone = obj->zone();
+ if (!zone->isGCMarking()) {
+ continue;
+ }
+ if (prevZone && prevZone != zone) {
+ if (!prevZone->addSweepGroupEdgeTo(zone)) {
+ return false;
+ }
+ }
+ prevZone = zone;
+ }
+#endif
+ return true;
+}
+
+bool GCRuntime::findSweepGroupEdges() {
+ for (GCZonesIter zone(this); !zone.done(); zone.next()) {
+ if (!zone->findSweepGroupEdges(atomsZone)) {
+ return false;
+ }
+ }
+
+ if (!AddEdgesForMarkQueue(marker)) {
+ return false;
+ }
+
+ return DebugAPI::findSweepGroupEdges(rt);
+}
+
+void GCRuntime::groupZonesForSweeping(JS::GCReason reason) {
+#ifdef DEBUG
+ for (ZonesIter zone(this, WithAtoms); !zone.done(); zone.next()) {
+ MOZ_ASSERT(zone->gcSweepGroupEdges().empty());
+ }
+#endif
+
+ JSContext* cx = rt->mainContextFromOwnThread();
+ ZoneComponentFinder finder(cx->nativeStackLimit[JS::StackForSystemCode]);
+ if (!isIncremental || !findSweepGroupEdges()) {
+ finder.useOneComponent();
+ }
+
+ // Use one component for two-slice zeal modes.
+ if (useZeal && hasIncrementalTwoSliceZealMode()) {
+ finder.useOneComponent();
+ }
+
+ for (GCZonesIter zone(this); !zone.done(); zone.next()) {
+ MOZ_ASSERT(zone->isGCMarking());
+ finder.addNode(zone);
+ }
+ sweepGroups = finder.getResultsList();
+ currentSweepGroup = sweepGroups;
+ sweepGroupIndex = 1;
+
+ for (GCZonesIter zone(this); !zone.done(); zone.next()) {
+ zone->clearSweepGroupEdges();
+ }
+
+#ifdef DEBUG
+ unsigned idx = sweepGroupIndex;
+ for (Zone* head = currentSweepGroup; head; head = head->nextGroup()) {
+ for (Zone* zone = head; zone; zone = zone->nextNodeInGroup()) {
+ MOZ_ASSERT(zone->isGCMarking());
+ zone->gcSweepGroupIndex = idx;
+ }
+ idx++;
+ }
+
+ MOZ_ASSERT_IF(!isIncremental, !currentSweepGroup->nextGroup());
+ for (ZonesIter zone(this, WithAtoms); !zone.done(); zone.next()) {
+ MOZ_ASSERT(zone->gcSweepGroupEdges().empty());
+ }
+#endif
+}
+
+static void ResetGrayList(Compartment* comp);
+
+void GCRuntime::getNextSweepGroup() {
+ currentSweepGroup = currentSweepGroup->nextGroup();
+ ++sweepGroupIndex;
+ if (!currentSweepGroup) {
+ abortSweepAfterCurrentGroup = false;
+ return;
+ }
+
+ MOZ_ASSERT_IF(abortSweepAfterCurrentGroup, !isIncremental);
+ if (!isIncremental) {
+ ZoneComponentFinder::mergeGroups(currentSweepGroup);
+ }
+
+ for (Zone* zone = currentSweepGroup; zone; zone = zone->nextNodeInGroup()) {
+ MOZ_ASSERT(zone->isGCMarkingBlackOnly());
+ MOZ_ASSERT(!zone->isQueuedForBackgroundSweep());
+ }
+
+ if (abortSweepAfterCurrentGroup) {
+ joinTask(markTask, gcstats::PhaseKind::SWEEP_MARK);
+
+ // Abort collection of subsequent sweep groups.
+ for (SweepGroupZonesIter zone(this); !zone.done(); zone.next()) {
+ MOZ_ASSERT(!zone->gcNextGraphComponent);
+ zone->changeGCState(Zone::MarkBlackOnly, Zone::NoGC);
+ zone->arenas.unmarkPreMarkedFreeCells();
+ zone->arenas.mergeNewArenasInMarkPhase();
+ zone->gcGrayRoots().Clear();
+ zone->clearGCSliceThresholds();
+ }
+
+ for (SweepGroupCompartmentsIter comp(rt); !comp.done(); comp.next()) {
+ ResetGrayList(comp);
+ }
+
+ abortSweepAfterCurrentGroup = false;
+ currentSweepGroup = nullptr;
+ }
+
+ hasMarkedGrayRoots = false;
+}
+
+/*
+ * Gray marking:
+ *
+ * At the end of collection, anything reachable from a gray root that has not
+ * otherwise been marked black must be marked gray.
+ *
+ * This means that when marking things gray we must not allow marking to leave
+ * the current compartment group, as that could result in things being marked
+ * gray when they might subsequently be marked black. To achieve this, when we
+ * find a cross compartment pointer we don't mark the referent but add it to a
+ * singly-linked list of incoming gray pointers that is stored with each
+ * compartment.
+ *
+ * The list head is stored in Compartment::gcIncomingGrayPointers and contains
+ * cross compartment wrapper objects. The next pointer is stored in the second
+ * extra slot of the cross compartment wrapper.
+ *
+ * The list is created during gray marking when one of the
+ * MarkCrossCompartmentXXX functions is called for a pointer that leaves the
+ * current compartent group. This calls DelayCrossCompartmentGrayMarking to
+ * push the referring object onto the list.
+ *
+ * The list is traversed and then unlinked in
+ * GCRuntime::markIncomingCrossCompartmentPointers.
+ */
+
+static bool IsGrayListObject(JSObject* obj) {
+ MOZ_ASSERT(obj);
+ return obj->is<CrossCompartmentWrapperObject>() && !IsDeadProxyObject(obj);
+}
+
+/* static */
+unsigned ProxyObject::grayLinkReservedSlot(JSObject* obj) {
+ MOZ_ASSERT(IsGrayListObject(obj));
+ return CrossCompartmentWrapperObject::GrayLinkReservedSlot;
+}
+
+#ifdef DEBUG
+static void AssertNotOnGrayList(JSObject* obj) {
+ MOZ_ASSERT_IF(
+ IsGrayListObject(obj),
+ GetProxyReservedSlot(obj, ProxyObject::grayLinkReservedSlot(obj))
+ .isUndefined());
+}
+#endif
+
+static void AssertNoWrappersInGrayList(JSRuntime* rt) {
+#ifdef DEBUG
+ for (CompartmentsIter c(rt); !c.done(); c.next()) {
+ MOZ_ASSERT(!c->gcIncomingGrayPointers);
+ for (Compartment::ObjectWrapperEnum e(c); !e.empty(); e.popFront()) {
+ AssertNotOnGrayList(e.front().value().unbarrieredGet());
+ }
+ }
+#endif
+}
+
+static JSObject* CrossCompartmentPointerReferent(JSObject* obj) {
+ MOZ_ASSERT(IsGrayListObject(obj));
+ return &obj->as<ProxyObject>().private_().toObject();
+}
+
+static JSObject* NextIncomingCrossCompartmentPointer(JSObject* prev,
+ bool unlink) {
+ unsigned slot = ProxyObject::grayLinkReservedSlot(prev);
+ JSObject* next = GetProxyReservedSlot(prev, slot).toObjectOrNull();
+ MOZ_ASSERT_IF(next, IsGrayListObject(next));
+
+ if (unlink) {
+ SetProxyReservedSlot(prev, slot, UndefinedValue());
+ }
+
+ return next;
+}
+
+void js::gc::DelayCrossCompartmentGrayMarking(JSObject* src) {
+ MOZ_ASSERT(IsGrayListObject(src));
+ MOZ_ASSERT(src->isMarkedGray());
+
+ AutoTouchingGrayThings tgt;
+
+ /* Called from MarkCrossCompartmentXXX functions. */
+ unsigned slot = ProxyObject::grayLinkReservedSlot(src);
+ JSObject* dest = CrossCompartmentPointerReferent(src);
+ Compartment* comp = dest->compartment();
+
+ if (GetProxyReservedSlot(src, slot).isUndefined()) {
+ SetProxyReservedSlot(src, slot,
+ ObjectOrNullValue(comp->gcIncomingGrayPointers));
+ comp->gcIncomingGrayPointers = src;
+ } else {
+ MOZ_ASSERT(GetProxyReservedSlot(src, slot).isObjectOrNull());
+ }
+
+#ifdef DEBUG
+ /*
+ * Assert that the object is in our list, also walking the list to check its
+ * integrity.
+ */
+ JSObject* obj = comp->gcIncomingGrayPointers;
+ bool found = false;
+ while (obj) {
+ if (obj == src) {
+ found = true;
+ }
+ obj = NextIncomingCrossCompartmentPointer(obj, false);
+ }
+ MOZ_ASSERT(found);
+#endif
+}
+
+void GCRuntime::markIncomingCrossCompartmentPointers(MarkColor color) {
+ gcstats::AutoPhase ap(stats(),
+ color == MarkColor::Black
+ ? gcstats::PhaseKind::SWEEP_MARK_INCOMING_BLACK
+ : gcstats::PhaseKind::SWEEP_MARK_INCOMING_GRAY);
+
+ bool unlinkList = color == MarkColor::Gray;
+
+ for (SweepGroupCompartmentsIter c(rt); !c.done(); c.next()) {
+ MOZ_ASSERT(c->zone()->isGCMarking());
+ MOZ_ASSERT_IF(color == MarkColor::Gray,
+ c->zone()->isGCMarkingBlackAndGray());
+ MOZ_ASSERT_IF(c->gcIncomingGrayPointers,
+ IsGrayListObject(c->gcIncomingGrayPointers));
+
+ for (JSObject* src = c->gcIncomingGrayPointers; src;
+ src = NextIncomingCrossCompartmentPointer(src, unlinkList)) {
+ JSObject* dst = CrossCompartmentPointerReferent(src);
+ MOZ_ASSERT(dst->compartment() == c);
+
+ if (color == MarkColor::Gray) {
+ if (src->asTenured().isMarkedGray()) {
+ TraceManuallyBarrieredEdge(&marker, &dst,
+ "cross-compartment gray pointer");
+ }
+ } else {
+ if (src->asTenured().isMarkedBlack()) {
+ TraceManuallyBarrieredEdge(&marker, &dst,
+ "cross-compartment black pointer");
+ }
+ }
+ }
+
+ if (unlinkList) {
+ c->gcIncomingGrayPointers = nullptr;
+ }
+ }
+}
+
+static bool RemoveFromGrayList(JSObject* wrapper) {
+ AutoTouchingGrayThings tgt;
+
+ if (!IsGrayListObject(wrapper)) {
+ return false;
+ }
+
+ unsigned slot = ProxyObject::grayLinkReservedSlot(wrapper);
+ if (GetProxyReservedSlot(wrapper, slot).isUndefined()) {
+ return false; /* Not on our list. */
+ }
+
+ JSObject* tail = GetProxyReservedSlot(wrapper, slot).toObjectOrNull();
+ SetProxyReservedSlot(wrapper, slot, UndefinedValue());
+
+ Compartment* comp = CrossCompartmentPointerReferent(wrapper)->compartment();
+ JSObject* obj = comp->gcIncomingGrayPointers;
+ if (obj == wrapper) {
+ comp->gcIncomingGrayPointers = tail;
+ return true;
+ }
+
+ while (obj) {
+ unsigned slot = ProxyObject::grayLinkReservedSlot(obj);
+ JSObject* next = GetProxyReservedSlot(obj, slot).toObjectOrNull();
+ if (next == wrapper) {
+ js::detail::SetProxyReservedSlotUnchecked(obj, slot,
+ ObjectOrNullValue(tail));
+ return true;
+ }
+ obj = next;
+ }
+
+ MOZ_CRASH("object not found in gray link list");
+}
+
+static void ResetGrayList(Compartment* comp) {
+ JSObject* src = comp->gcIncomingGrayPointers;
+ while (src) {
+ src = NextIncomingCrossCompartmentPointer(src, true);
+ }
+ comp->gcIncomingGrayPointers = nullptr;
+}
+
+#ifdef DEBUG
+static bool HasIncomingCrossCompartmentPointers(JSRuntime* rt) {
+ for (SweepGroupCompartmentsIter c(rt); !c.done(); c.next()) {
+ if (c->gcIncomingGrayPointers) {
+ return true;
+ }
+ }
+
+ return false;
+}
+#endif
+
+void js::NotifyGCNukeWrapper(JSObject* wrapper) {
+ MOZ_ASSERT(IsCrossCompartmentWrapper(wrapper));
+
+ /*
+ * References to target of wrapper are being removed, we no longer have to
+ * remember to mark it.
+ */
+ RemoveFromGrayList(wrapper);
+
+ /*
+ * Clean up WeakRef maps which might include this wrapper.
+ */
+ JSObject* target = UncheckedUnwrapWithoutExpose(wrapper);
+ if (target->is<WeakRefObject>()) {
+ WeakRefObject* weakRef = &target->as<WeakRefObject>();
+ GCRuntime* gc = &weakRef->runtimeFromMainThread()->gc;
+ if (weakRef->target() && gc->unregisterWeakRefWrapper(wrapper)) {
+ weakRef->setTarget(nullptr);
+ }
+ }
+
+ /*
+ * Clean up FinalizationRecord record objects which might be the target of
+ * this wrapper.
+ */
+ if (target->is<FinalizationRecordObject>()) {
+ auto* record = &target->as<FinalizationRecordObject>();
+ FinalizationRegistryObject::unregisterRecord(record);
+ }
+}
+
+enum {
+ JS_GC_SWAP_OBJECT_A_REMOVED = 1 << 0,
+ JS_GC_SWAP_OBJECT_B_REMOVED = 1 << 1
+};
+
+unsigned js::NotifyGCPreSwap(JSObject* a, JSObject* b) {
+ /*
+ * Two objects in the same compartment are about to have had their contents
+ * swapped. If either of them are in our gray pointer list, then we remove
+ * them from the lists, returning a bitset indicating what happened.
+ */
+ return (RemoveFromGrayList(a) ? JS_GC_SWAP_OBJECT_A_REMOVED : 0) |
+ (RemoveFromGrayList(b) ? JS_GC_SWAP_OBJECT_B_REMOVED : 0);
+}
+
+void js::NotifyGCPostSwap(JSObject* a, JSObject* b, unsigned removedFlags) {
+ /*
+ * Two objects in the same compartment have had their contents swapped. If
+ * either of them were in our gray pointer list, we re-add them again.
+ */
+ if (removedFlags & JS_GC_SWAP_OBJECT_A_REMOVED) {
+ DelayCrossCompartmentGrayMarking(b);
+ }
+ if (removedFlags & JS_GC_SWAP_OBJECT_B_REMOVED) {
+ DelayCrossCompartmentGrayMarking(a);
+ }
+}
+
+static inline void MaybeCheckWeakMapMarking(GCRuntime* gc) {
+#if defined(JS_GC_ZEAL) || defined(DEBUG)
+
+ bool shouldCheck;
+# if defined(DEBUG)
+ shouldCheck = true;
+# else
+ shouldCheck = gc->hasZealMode(ZealMode::CheckWeakMapMarking);
+# endif
+
+ if (shouldCheck) {
+ for (SweepGroupZonesIter zone(gc); !zone.done(); zone.next()) {
+ MOZ_RELEASE_ASSERT(WeakMapBase::checkMarkingForZone(zone));
+ }
+ }
+
+#endif
+}
+
+IncrementalProgress GCRuntime::markGrayReferencesInCurrentGroup(
+ JSFreeOp* fop, SliceBudget& budget) {
+ MOZ_ASSERT(!markOnBackgroundThreadDuringSweeping);
+ MOZ_ASSERT(marker.isDrained());
+
+ MOZ_ASSERT(marker.markColor() == MarkColor::Black);
+
+ if (hasMarkedGrayRoots) {
+ return Finished;
+ }
+
+ MOZ_ASSERT(cellsToAssertNotGray.ref().empty());
+
+ gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::SWEEP_MARK);
+
+ // Mark any incoming gray pointers from previously swept compartments that
+ // have subsequently been marked black. This can occur when gray cells
+ // become black by the action of UnmarkGray.
+ markIncomingCrossCompartmentPointers(MarkColor::Black);
+ drainMarkStack();
+
+ // Change state of current group to MarkGray to restrict marking to this
+ // group. Note that there may be pointers to the atoms zone, and
+ // these will be marked through, as they are not marked with
+ // TraceCrossCompartmentEdge.
+ for (SweepGroupZonesIter zone(this); !zone.done(); zone.next()) {
+ zone->changeGCState(Zone::MarkBlackOnly, Zone::MarkBlackAndGray);
+ }
+
+ AutoSetMarkColor setColorGray(marker, MarkColor::Gray);
+ marker.setMainStackColor(MarkColor::Gray);
+
+ // Mark incoming gray pointers from previously swept compartments.
+ markIncomingCrossCompartmentPointers(MarkColor::Gray);
+
+ markGrayRoots<SweepGroupZonesIter>(gcstats::PhaseKind::SWEEP_MARK_GRAY);
+
+ hasMarkedGrayRoots = true;
+
+#ifdef JS_GC_ZEAL
+ if (shouldYieldForZeal(ZealMode::YieldWhileGrayMarking)) {
+ return NotFinished;
+ }
+#endif
+
+ if (markUntilBudgetExhausted(budget) == NotFinished) {
+ return NotFinished;
+ }
+ marker.setMainStackColor(MarkColor::Black);
+ return Finished;
+}
+
+IncrementalProgress GCRuntime::endMarkingSweepGroup(JSFreeOp* fop,
+ SliceBudget& budget) {
+ MOZ_ASSERT(!markOnBackgroundThreadDuringSweeping);
+ MOZ_ASSERT(marker.isDrained());
+
+ MOZ_ASSERT(marker.markColor() == MarkColor::Black);
+ MOZ_ASSERT(!HasIncomingCrossCompartmentPointers(rt));
+
+ gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::SWEEP_MARK);
+
+ if (markWeakReferencesInCurrentGroup(budget) == NotFinished) {
+ return NotFinished;
+ }
+
+ AutoSetMarkColor setColorGray(marker, MarkColor::Gray);
+ marker.setMainStackColor(MarkColor::Gray);
+
+ // Mark transitively inside the current compartment group.
+ if (markWeakReferencesInCurrentGroup(budget) == NotFinished) {
+ return NotFinished;
+ }
+
+ MOZ_ASSERT(marker.isDrained());
+
+ // We must not yield after this point before we start sweeping the group.
+ safeToYield = false;
+
+ MaybeCheckWeakMapMarking(this);
+
+ return Finished;
+}
+
+// Causes the given WeakCache to be swept when run.
+class ImmediateSweepWeakCacheTask : public GCParallelTask {
+ Zone* zone;
+ JS::detail::WeakCacheBase& cache;
+
+ ImmediateSweepWeakCacheTask(const ImmediateSweepWeakCacheTask&) = delete;
+
+ public:
+ ImmediateSweepWeakCacheTask(GCRuntime* gc, Zone* zone,
+ JS::detail::WeakCacheBase& wc)
+ : GCParallelTask(gc), zone(zone), cache(wc) {}
+
+ ImmediateSweepWeakCacheTask(ImmediateSweepWeakCacheTask&& other)
+ : GCParallelTask(std::move(other)),
+ zone(other.zone),
+ cache(other.cache) {}
+
+ void run(AutoLockHelperThreadState& lock) override {
+ AutoUnlockHelperThreadState unlock(lock);
+ AutoSetThreadIsSweeping threadIsSweeping(zone);
+ cache.sweep(&gc->storeBuffer());
+ }
+};
+
+void GCRuntime::updateAtomsBitmap() {
+ DenseBitmap marked;
+ if (atomMarking.computeBitmapFromChunkMarkBits(rt, marked)) {
+ for (GCZonesIter zone(this); !zone.done(); zone.next()) {
+ atomMarking.refineZoneBitmapForCollectedZone(zone, marked);
+ }
+ } else {
+ // Ignore OOM in computeBitmapFromChunkMarkBits. The
+ // refineZoneBitmapForCollectedZone call can only remove atoms from the
+ // zone bitmap, so it is conservative to just not call it.
+ }
+
+ atomMarking.markAtomsUsedByUncollectedZones(rt);
+
+ // For convenience sweep these tables non-incrementally as part of bitmap
+ // sweeping; they are likely to be much smaller than the main atoms table.
+ rt->symbolRegistry().sweep();
+ SweepingTracer trc(rt);
+ for (RealmsIter realm(this); !realm.done(); realm.next()) {
+ realm->tracekWeakVarNames(&trc);
+ }
+}
+
+void GCRuntime::sweepCCWrappers() {
+ AutoSetThreadIsSweeping threadIsSweeping; // This can touch all zones.
+ for (SweepGroupZonesIter zone(this); !zone.done(); zone.next()) {
+ zone->sweepAllCrossCompartmentWrappers();
+ }
+}
+
+void GCRuntime::sweepMisc() {
+ SweepingTracer trc(rt);
+ for (SweepGroupRealmsIter r(this); !r.done(); r.next()) {
+ AutoSetThreadIsSweeping threadIsSweeping(r->zone());
+ r->traceWeakObjects(&trc);
+ r->traceWeakTemplateObjects(&trc);
+ r->traceWeakSavedStacks(&trc);
+ r->traceWeakSelfHostingScriptSource(&trc);
+ r->traceWeakObjectRealm(&trc);
+ r->traceWeakRegExps(&trc);
+ }
+}
+
+void GCRuntime::sweepCompressionTasks() {
+ JSRuntime* runtime = rt;
+
+ // Attach finished compression tasks.
+ AutoLockHelperThreadState lock;
+ AttachFinishedCompressions(runtime, lock);
+ SweepPendingCompressions(lock);
+}
+
+void GCRuntime::sweepWeakMaps() {
+ AutoSetThreadIsSweeping threadIsSweeping; // This may touch any zone.
+ for (SweepGroupZonesIter zone(this); !zone.done(); zone.next()) {
+ /* No need to look up any more weakmap keys from this sweep group. */
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ if (!zone->gcWeakKeys().clear()) {
+ oomUnsafe.crash("clearing weak keys in beginSweepingSweepGroup()");
+ }
+
+ // Lock the storebuffer since this may access it when rehashing or resizing
+ // the tables.
+ AutoLockStoreBuffer lock(&storeBuffer());
+ zone->sweepWeakMaps();
+ }
+}
+
+void GCRuntime::sweepUniqueIds() {
+ for (SweepGroupZonesIter zone(this); !zone.done(); zone.next()) {
+ AutoSetThreadIsSweeping threadIsSweeping(zone);
+ zone->sweepUniqueIds();
+ }
+}
+
+void GCRuntime::sweepWeakRefs() {
+ for (SweepGroupZonesIter zone(this); !zone.done(); zone.next()) {
+ AutoSetThreadIsSweeping threadIsSweeping(zone);
+ zone->weakRefMap().sweep(&storeBuffer());
+ }
+}
+
+void GCRuntime::sweepFinalizationRegistriesOnMainThread() {
+ // This calls back into the browser which expects to be called from the main
+ // thread.
+ gcstats::AutoPhase ap1(stats(), gcstats::PhaseKind::SWEEP_COMPARTMENTS);
+ gcstats::AutoPhase ap2(stats(),
+ gcstats::PhaseKind::SWEEP_FINALIZATION_REGISTRIES);
+ AutoLockStoreBuffer lock(&storeBuffer());
+ for (SweepGroupZonesIter zone(this); !zone.done(); zone.next()) {
+ sweepFinalizationRegistries(zone);
+ }
+}
+
+void GCRuntime::startTask(GCParallelTask& task, gcstats::PhaseKind phase,
+ AutoLockHelperThreadState& lock) {
+ if (!CanUseExtraThreads()) {
+ AutoUnlockHelperThreadState unlock(lock);
+ task.runFromMainThread();
+ stats().recordParallelPhase(phase, task.duration());
+ return;
+ }
+
+ task.startWithLockHeld(lock);
+}
+
+void GCRuntime::joinTask(GCParallelTask& task, gcstats::PhaseKind phase,
+ AutoLockHelperThreadState& lock) {
+ // This is similar to GCParallelTask::joinWithLockHeld but handles recording
+ // execution and wait time.
+
+ if (task.isIdle(lock)) {
+ return;
+ }
+
+ if (task.isDispatched(lock)) {
+ // If the task was dispatched but has not yet started then cancel the task
+ // and run it from the main thread. This stops us from blocking here when
+ // the helper threads are busy with other tasks.
+ task.cancelDispatchedTask(lock);
+ AutoUnlockHelperThreadState unlock(lock);
+ task.runFromMainThread();
+ } else {
+ // Otherwise wait for the task to complete.
+ gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::JOIN_PARALLEL_TASKS);
+ task.joinRunningOrFinishedTask(lock);
+ }
+
+ stats().recordParallelPhase(phase, task.duration());
+}
+
+void GCRuntime::joinTask(GCParallelTask& task, gcstats::PhaseKind phase) {
+ AutoLockHelperThreadState lock;
+ joinTask(task, phase, lock);
+}
+
+void GCRuntime::sweepDebuggerOnMainThread(JSFreeOp* fop) {
+ AutoLockStoreBuffer lock(&storeBuffer());
+
+ // Detach unreachable debuggers and global objects from each other.
+ // This can modify weakmaps and so must happen before weakmap sweeping.
+ DebugAPI::sweepAll(fop);
+
+ gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::SWEEP_COMPARTMENTS);
+
+ // Sweep debug environment information. This performs lookups in the Zone's
+ // unique IDs table and so must not happen in parallel with sweeping that
+ // table.
+ {
+ gcstats::AutoPhase ap2(stats(), gcstats::PhaseKind::SWEEP_MISC);
+ for (SweepGroupRealmsIter r(rt); !r.done(); r.next()) {
+ r->sweepDebugEnvironments();
+ }
+ }
+}
+
+void GCRuntime::sweepJitDataOnMainThread(JSFreeOp* fop) {
+ SweepingTracer trc(rt);
+ {
+ gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::SWEEP_JIT_DATA);
+
+ if (initialState != State::NotActive) {
+ // Cancel any active or pending off thread compilations. We also did
+ // this before marking (in DiscardJITCodeForGC) so this is a no-op
+ // for non-incremental GCs.
+ js::CancelOffThreadIonCompile(rt, JS::Zone::Sweep);
+ }
+
+ // Bug 1071218: the following method has not yet been refactored to
+ // work on a single zone-group at once.
+
+ // Sweep entries containing about-to-be-finalized JitCode and
+ // update relocated TypeSet::Types inside the JitcodeGlobalTable.
+ jit::JitRuntime::TraceWeakJitcodeGlobalTable(rt, &trc);
+ }
+
+ if (initialState != State::NotActive) {
+ gcstats::AutoPhase apdc(stats(), gcstats::PhaseKind::SWEEP_DISCARD_CODE);
+ for (SweepGroupZonesIter zone(this); !zone.done(); zone.next()) {
+ zone->discardJitCode(fop);
+ }
+ }
+
+ // JitZone/JitRealm must be swept *after* discarding JIT code, because
+ // Zone::discardJitCode might access CacheIRStubInfos deleted here.
+ {
+ gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::SWEEP_JIT_DATA);
+
+ for (SweepGroupRealmsIter r(rt); !r.done(); r.next()) {
+ r->traceWeakEdgesInJitRealm(&trc);
+ }
+
+ for (SweepGroupZonesIter zone(this); !zone.done(); zone.next()) {
+ if (jit::JitZone* jitZone = zone->jitZone()) {
+ jitZone->traceWeak(&trc);
+ }
+ }
+ }
+}
+
+using WeakCacheTaskVector =
+ mozilla::Vector<ImmediateSweepWeakCacheTask, 0, SystemAllocPolicy>;
+
+// Call a functor for all weak caches that need to be swept in the current
+// sweep group.
+template <typename Functor>
+static inline bool IterateWeakCaches(JSRuntime* rt, Functor f) {
+ for (SweepGroupZonesIter zone(rt); !zone.done(); zone.next()) {
+ for (JS::detail::WeakCacheBase* cache : zone->weakCaches()) {
+ if (!f(cache, zone.get())) {
+ return false;
+ }
+ }
+ }
+
+ for (JS::detail::WeakCacheBase* cache : rt->weakCaches()) {
+ if (!f(cache, nullptr)) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static bool PrepareWeakCacheTasks(JSRuntime* rt,
+ WeakCacheTaskVector* immediateTasks) {
+ // Start incremental sweeping for caches that support it or add to a vector
+ // of sweep tasks to run on a helper thread.
+
+ MOZ_ASSERT(immediateTasks->empty());
+
+ bool ok =
+ IterateWeakCaches(rt, [&](JS::detail::WeakCacheBase* cache, Zone* zone) {
+ if (!cache->needsSweep()) {
+ return true;
+ }
+
+ // Caches that support incremental sweeping will be swept later.
+ if (zone && cache->setNeedsIncrementalBarrier(true)) {
+ return true;
+ }
+
+ return immediateTasks->emplaceBack(&rt->gc, zone, *cache);
+ });
+
+ if (!ok) {
+ immediateTasks->clearAndFree();
+ }
+
+ return ok;
+}
+
+static void SweepAllWeakCachesOnMainThread(JSRuntime* rt) {
+ // If we ran out of memory, do all the work on the main thread.
+ gcstats::AutoPhase ap(rt->gc.stats(), gcstats::PhaseKind::SWEEP_WEAK_CACHES);
+ IterateWeakCaches(rt, [&](JS::detail::WeakCacheBase* cache, Zone* zone) {
+ if (cache->needsIncrementalBarrier()) {
+ cache->setNeedsIncrementalBarrier(false);
+ }
+ cache->sweep(&rt->gc.storeBuffer());
+ return true;
+ });
+}
+
+IncrementalProgress GCRuntime::beginSweepingSweepGroup(JSFreeOp* fop,
+ SliceBudget& budget) {
+ /*
+ * Begin sweeping the group of zones in currentSweepGroup, performing
+ * actions that must be done before yielding to caller.
+ */
+
+ using namespace gcstats;
+
+ AutoSCC scc(stats(), sweepGroupIndex);
+
+ bool sweepingAtoms = false;
+ for (SweepGroupZonesIter zone(this); !zone.done(); zone.next()) {
+ /* Set the GC state to sweeping. */
+ zone->changeGCState(Zone::MarkBlackAndGray, Zone::Sweep);
+
+ /* Purge the ArenaLists before sweeping. */
+ zone->arenas.checkSweepStateNotInUse();
+ zone->arenas.unmarkPreMarkedFreeCells();
+ zone->arenas.clearFreeLists();
+
+ if (zone->isAtomsZone()) {
+ sweepingAtoms = true;
+ }
+ }
+
+#ifdef JS_GC_ZEAL
+ validateIncrementalMarking();
+#endif
+
+#ifdef DEBUG
+ for (auto cell : cellsToAssertNotGray.ref()) {
+ JS::AssertCellIsNotGray(cell);
+ }
+ cellsToAssertNotGray.ref().clearAndFree();
+#endif
+
+ {
+ AutoLockStoreBuffer lock(&storeBuffer());
+
+ AutoPhase ap(stats(), PhaseKind::FINALIZE_START);
+ callFinalizeCallbacks(fop, JSFINALIZE_GROUP_PREPARE);
+ {
+ AutoPhase ap2(stats(), PhaseKind::WEAK_ZONES_CALLBACK);
+ callWeakPointerZonesCallbacks();
+ }
+ {
+ AutoPhase ap2(stats(), PhaseKind::WEAK_COMPARTMENT_CALLBACK);
+ for (SweepGroupZonesIter zone(this); !zone.done(); zone.next()) {
+ for (CompartmentsInZoneIter comp(zone); !comp.done(); comp.next()) {
+ callWeakPointerCompartmentCallbacks(comp);
+ }
+ }
+ }
+ callFinalizeCallbacks(fop, JSFINALIZE_GROUP_START);
+ }
+
+ // Updating the atom marking bitmaps. This marks atoms referenced by
+ // uncollected zones so cannot be done in parallel with the other sweeping
+ // work below.
+ if (sweepingAtoms) {
+ AutoPhase ap(stats(), PhaseKind::UPDATE_ATOMS_BITMAP);
+ updateAtomsBitmap();
+ }
+
+ AutoSetThreadIsSweeping threadIsSweeping;
+
+ sweepDebuggerOnMainThread(fop);
+
+ {
+ AutoLockHelperThreadState lock;
+
+ AutoPhase ap(stats(), PhaseKind::SWEEP_COMPARTMENTS);
+
+ AutoRunParallelTask sweepCCWrappers(this, &GCRuntime::sweepCCWrappers,
+ PhaseKind::SWEEP_CC_WRAPPER, lock);
+ AutoRunParallelTask sweepMisc(this, &GCRuntime::sweepMisc,
+ PhaseKind::SWEEP_MISC, lock);
+ AutoRunParallelTask sweepCompTasks(this, &GCRuntime::sweepCompressionTasks,
+ PhaseKind::SWEEP_COMPRESSION, lock);
+ AutoRunParallelTask sweepWeakMaps(this, &GCRuntime::sweepWeakMaps,
+ PhaseKind::SWEEP_WEAKMAPS, lock);
+ AutoRunParallelTask sweepUniqueIds(this, &GCRuntime::sweepUniqueIds,
+ PhaseKind::SWEEP_UNIQUEIDS, lock);
+ AutoRunParallelTask sweepWeakRefs(this, &GCRuntime::sweepWeakRefs,
+ PhaseKind::SWEEP_WEAKREFS, lock);
+
+ WeakCacheTaskVector sweepCacheTasks;
+ bool canSweepWeakCachesOffThread =
+ PrepareWeakCacheTasks(rt, &sweepCacheTasks);
+ if (canSweepWeakCachesOffThread) {
+ weakCachesToSweep.ref().emplace(currentSweepGroup);
+ for (auto& task : sweepCacheTasks) {
+ startTask(task, PhaseKind::SWEEP_WEAK_CACHES, lock);
+ }
+ }
+
+ {
+ AutoUnlockHelperThreadState unlock(lock);
+ sweepJitDataOnMainThread(fop);
+
+ if (!canSweepWeakCachesOffThread) {
+ MOZ_ASSERT(sweepCacheTasks.empty());
+ SweepAllWeakCachesOnMainThread(rt);
+ }
+ }
+
+ for (auto& task : sweepCacheTasks) {
+ joinTask(task, PhaseKind::SWEEP_WEAK_CACHES, lock);
+ }
+ }
+
+ if (sweepingAtoms) {
+ startSweepingAtomsTable();
+ }
+
+ // FinalizationRegistry sweeping touches weak maps and so must not run in
+ // parallel with that. This triggers a read barrier and can add marking work
+ // for zones that are still marking.
+ sweepFinalizationRegistriesOnMainThread();
+
+ // Queue all GC things in all zones for sweeping, either on the foreground
+ // or on the background thread.
+
+ for (SweepGroupZonesIter zone(this); !zone.done(); zone.next()) {
+ zone->arenas.queueForForegroundSweep(fop, ForegroundObjectFinalizePhase);
+ zone->arenas.queueForForegroundSweep(fop, ForegroundNonObjectFinalizePhase);
+ for (const auto& phase : BackgroundFinalizePhases) {
+ zone->arenas.queueForBackgroundSweep(fop, phase);
+ }
+
+ zone->arenas.queueForegroundThingsForSweep();
+ }
+
+ MOZ_ASSERT(!sweepZone);
+
+ safeToYield = true;
+ markOnBackgroundThreadDuringSweeping = CanUseExtraThreads();
+
+ return Finished;
+}
+
+#ifdef JS_GC_ZEAL
+bool GCRuntime::shouldYieldForZeal(ZealMode mode) {
+ bool yield = useZeal && hasZealMode(mode);
+
+ // Only yield on the first sweep slice for this mode.
+ bool firstSweepSlice = initialState != State::Sweep;
+ if (mode == ZealMode::IncrementalMultipleSlices && !firstSweepSlice) {
+ yield = false;
+ }
+
+ return yield;
+}
+#endif
+
+IncrementalProgress GCRuntime::endSweepingSweepGroup(JSFreeOp* fop,
+ SliceBudget& budget) {
+ // This is to prevent a race between markTask checking the zone state and
+ // us changing it below.
+ if (joinBackgroundMarkTask() == NotFinished) {
+ return NotFinished;
+ }
+
+ MOZ_ASSERT(marker.isDrained());
+
+ // Disable background marking during sweeping until we start sweeping the next
+ // zone group.
+ markOnBackgroundThreadDuringSweeping = false;
+
+ {
+ gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::FINALIZE_END);
+ AutoLockStoreBuffer lock(&storeBuffer());
+ JSFreeOp fop(rt);
+ callFinalizeCallbacks(&fop, JSFINALIZE_GROUP_END);
+ }
+
+ /* Free LIFO blocks on a background thread if possible. */
+ startBackgroundFree();
+
+ /* Update the GC state for zones we have swept. */
+ for (SweepGroupZonesIter zone(this); !zone.done(); zone.next()) {
+ if (jit::JitZone* jitZone = zone->jitZone()) {
+ // Clear out any small pools that we're hanging on to.
+ jitZone->execAlloc().purge();
+ }
+ AutoLockGC lock(this);
+ zone->changeGCState(Zone::Sweep, Zone::Finished);
+ zone->arenas.unmarkPreMarkedFreeCells();
+ zone->arenas.checkNoArenasToUpdate();
+ }
+
+ /*
+ * Start background thread to sweep zones if required, sweeping the atoms
+ * zone last if present.
+ */
+ bool sweepAtomsZone = false;
+ ZoneList zones;
+ for (SweepGroupZonesIter zone(this); !zone.done(); zone.next()) {
+ if (zone->isAtomsZone()) {
+ sweepAtomsZone = true;
+ } else {
+ zones.append(zone);
+ }
+ }
+ if (sweepAtomsZone) {
+ zones.append(atomsZone);
+ }
+
+ queueZonesAndStartBackgroundSweep(zones);
+
+ return Finished;
+}
+
+IncrementalProgress GCRuntime::markDuringSweeping(JSFreeOp* fop,
+ SliceBudget& budget) {
+ MOZ_ASSERT(markTask.isIdle());
+
+ if (marker.isDrained()) {
+ return Finished;
+ }
+
+ if (markOnBackgroundThreadDuringSweeping) {
+ AutoLockHelperThreadState lock;
+ MOZ_ASSERT(markTask.isIdle(lock));
+ markTask.setBudget(budget);
+ markTask.startOrRunIfIdle(lock);
+ return Finished; // This means don't yield to the mutator here.
+ }
+
+ gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::SWEEP_MARK);
+ return markUntilBudgetExhausted(budget);
+}
+
+void GCRuntime::beginSweepPhase(JS::GCReason reason, AutoGCSession& session) {
+ /*
+ * Sweep phase.
+ *
+ * Finalize as we sweep, outside of lock but with RuntimeHeapIsBusy()
+ * true so that any attempt to allocate a GC-thing from a finalizer will
+ * fail, rather than nest badly and leave the unmarked newborn to be swept.
+ */
+
+ MOZ_ASSERT(!abortSweepAfterCurrentGroup);
+ MOZ_ASSERT(!markOnBackgroundThreadDuringSweeping);
+
+ releaseHeldRelocatedArenas();
+
+#ifdef JS_GC_ZEAL
+ computeNonIncrementalMarkingForValidation(session);
+#endif
+
+ gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::SWEEP);
+
+ hasMarkedGrayRoots = false;
+
+ AssertNoWrappersInGrayList(rt);
+ dropStringWrappers();
+
+ groupZonesForSweeping(reason);
+
+ sweepActions->assertFinished();
+}
+
+bool ArenaLists::foregroundFinalize(JSFreeOp* fop, AllocKind thingKind,
+ SliceBudget& sliceBudget,
+ SortedArenaList& sweepList) {
+ checkNoArenasToUpdateForKind(thingKind);
+
+ // Arenas are released for use for new allocations as soon as the finalizers
+ // for that allocation kind have run. This means that a cell's finalizer can
+ // safely use IsAboutToBeFinalized to check other cells of the same alloc
+ // kind, but not of different alloc kinds: the other arena may have already
+ // had new objects allocated in it, and since we allocate black,
+ // IsAboutToBeFinalized will return false even though the referent we intended
+ // to check is long gone.
+ if (!FinalizeArenas(fop, &arenasToSweep(thingKind), sweepList, thingKind,
+ sliceBudget)) {
+ // Copy the current contents of sweepList so that ArenaIter can find them.
+ incrementalSweptArenaKind = thingKind;
+ incrementalSweptArenas.ref().clear();
+ incrementalSweptArenas = sweepList.toArenaList();
+ return false;
+ }
+
+ // Clear the list of swept arenas now these are moving back to the main arena
+ // lists.
+ incrementalSweptArenaKind = AllocKind::LIMIT;
+ incrementalSweptArenas.ref().clear();
+
+ sweepList.extractEmpty(&savedEmptyArenas.ref());
+
+ ArenaList& al = arenaList(thingKind);
+ ArenaList allocatedDuringSweep = std::move(al);
+ al = sweepList.toArenaList();
+ al.insertListWithCursorAtEnd(newArenasInMarkPhase(thingKind));
+ al.insertListWithCursorAtEnd(allocatedDuringSweep);
+
+ newArenasInMarkPhase(thingKind).clear();
+
+ return true;
+}
+
+void js::gc::BackgroundMarkTask::run(AutoLockHelperThreadState& lock) {
+ AutoUnlockHelperThreadState unlock(lock);
+
+ // Time reporting is handled separately for parallel tasks.
+ gc->sweepMarkResult =
+ gc->markUntilBudgetExhausted(this->budget, GCMarker::DontReportMarkTime);
+}
+
+IncrementalProgress GCRuntime::joinBackgroundMarkTask() {
+ AutoLockHelperThreadState lock;
+ if (markTask.isIdle(lock)) {
+ return Finished;
+ }
+
+ joinTask(markTask, gcstats::PhaseKind::SWEEP_MARK, lock);
+
+ IncrementalProgress result = sweepMarkResult;
+ sweepMarkResult = Finished;
+ return result;
+}
+
+IncrementalProgress GCRuntime::markUntilBudgetExhausted(
+ SliceBudget& sliceBudget, GCMarker::ShouldReportMarkTime reportTime) {
+ // Run a marking slice and return whether the stack is now empty.
+
+ AutoMajorGCProfilerEntry s(this);
+
+#ifdef DEBUG
+ AutoSetThreadIsMarking threadIsMarking;
+#endif // DEBUG
+
+ if (marker.processMarkQueue() == GCMarker::QueueYielded) {
+ return NotFinished;
+ }
+
+ return marker.markUntilBudgetExhausted(sliceBudget, reportTime) ? Finished
+ : NotFinished;
+}
+
+void GCRuntime::drainMarkStack() {
+ auto unlimited = SliceBudget::unlimited();
+ MOZ_RELEASE_ASSERT(marker.markUntilBudgetExhausted(unlimited));
+}
+
+static void SweepThing(JSFreeOp* fop, Shape* shape) {
+ if (!shape->isMarkedAny()) {
+ shape->sweep(fop);
+ }
+}
+
+template <typename T>
+static bool SweepArenaList(JSFreeOp* fop, Arena** arenasToSweep,
+ SliceBudget& sliceBudget) {
+ while (Arena* arena = *arenasToSweep) {
+ MOZ_ASSERT(arena->zone->isGCSweeping());
+
+ for (ArenaCellIterUnderGC cell(arena); !cell.done(); cell.next()) {
+ SweepThing(fop, cell.as<T>());
+ }
+
+ Arena* next = arena->next;
+ MOZ_ASSERT_IF(next, next->zone == arena->zone);
+ *arenasToSweep = next;
+
+ AllocKind kind = MapTypeToFinalizeKind<T>::kind;
+ sliceBudget.step(Arena::thingsPerArena(kind));
+ if (sliceBudget.isOverBudget()) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+void GCRuntime::startSweepingAtomsTable() {
+ auto& maybeAtoms = maybeAtomsToSweep.ref();
+ MOZ_ASSERT(maybeAtoms.isNothing());
+
+ AtomsTable* atomsTable = rt->atomsForSweeping();
+ if (!atomsTable) {
+ return;
+ }
+
+ // Create secondary tables to hold new atoms added while we're sweeping the
+ // main tables incrementally.
+ if (!atomsTable->startIncrementalSweep()) {
+ SweepingTracer trc(rt);
+ atomsTable->traceWeak(&trc);
+ return;
+ }
+
+ // Initialize remaining atoms to sweep.
+ maybeAtoms.emplace(*atomsTable);
+}
+
+IncrementalProgress GCRuntime::sweepAtomsTable(JSFreeOp* fop,
+ SliceBudget& budget) {
+ if (!atomsZone->isGCSweeping()) {
+ return Finished;
+ }
+
+ gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::SWEEP_ATOMS_TABLE);
+
+ auto& maybeAtoms = maybeAtomsToSweep.ref();
+ if (!maybeAtoms) {
+ return Finished;
+ }
+
+ if (!rt->atomsForSweeping()->sweepIncrementally(maybeAtoms.ref(), budget)) {
+ return NotFinished;
+ }
+
+ maybeAtoms.reset();
+
+ return Finished;
+}
+
+static size_t IncrementalSweepWeakCache(GCRuntime* gc,
+ const WeakCacheToSweep& item) {
+ AutoSetThreadIsSweeping threadIsSweeping(item.zone);
+
+ JS::detail::WeakCacheBase* cache = item.cache;
+ MOZ_ASSERT(cache->needsIncrementalBarrier());
+ size_t steps = cache->sweep(&gc->storeBuffer());
+ cache->setNeedsIncrementalBarrier(false);
+
+ return steps;
+}
+
+WeakCacheSweepIterator::WeakCacheSweepIterator(JS::Zone* sweepGroup)
+ : sweepZone(sweepGroup), sweepCache(sweepZone->weakCaches().getFirst()) {
+ settle();
+}
+
+bool WeakCacheSweepIterator::done() const { return !sweepZone; }
+
+WeakCacheToSweep WeakCacheSweepIterator::get() const {
+ MOZ_ASSERT(!done());
+
+ return {sweepCache, sweepZone};
+}
+
+void WeakCacheSweepIterator::next() {
+ MOZ_ASSERT(!done());
+
+ sweepCache = sweepCache->getNext();
+ settle();
+}
+
+void WeakCacheSweepIterator::settle() {
+ while (sweepZone) {
+ while (sweepCache && !sweepCache->needsIncrementalBarrier()) {
+ sweepCache = sweepCache->getNext();
+ }
+
+ if (sweepCache) {
+ break;
+ }
+
+ sweepZone = sweepZone->nextNodeInGroup();
+ if (sweepZone) {
+ sweepCache = sweepZone->weakCaches().getFirst();
+ }
+ }
+
+ MOZ_ASSERT((!sweepZone && !sweepCache) ||
+ (sweepCache && sweepCache->needsIncrementalBarrier()));
+}
+
+IncrementalProgress GCRuntime::sweepWeakCaches(JSFreeOp* fop,
+ SliceBudget& budget) {
+ if (weakCachesToSweep.ref().isNothing()) {
+ return Finished;
+ }
+
+ gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::SWEEP_COMPARTMENTS);
+
+ WeakCacheSweepIterator& work = weakCachesToSweep.ref().ref();
+
+ AutoLockHelperThreadState lock;
+
+ {
+ AutoRunParallelWork runWork(this, IncrementalSweepWeakCache,
+ gcstats::PhaseKind::SWEEP_WEAK_CACHES, work,
+ budget, lock);
+ AutoUnlockHelperThreadState unlock(lock);
+ }
+
+ if (work.done()) {
+ weakCachesToSweep.ref().reset();
+ return Finished;
+ }
+
+ return NotFinished;
+}
+
+IncrementalProgress GCRuntime::finalizeAllocKind(JSFreeOp* fop,
+ SliceBudget& budget) {
+ MOZ_ASSERT(sweepZone->isGCSweeping());
+
+ // Set the number of things per arena for this AllocKind.
+ size_t thingsPerArena = Arena::thingsPerArena(sweepAllocKind);
+ auto& sweepList = incrementalSweepList.ref();
+ sweepList.setThingsPerArena(thingsPerArena);
+
+ AutoSetThreadIsSweeping threadIsSweeping(sweepZone);
+
+ if (!sweepZone->arenas.foregroundFinalize(fop, sweepAllocKind, budget,
+ sweepList)) {
+ return NotFinished;
+ }
+
+ // Reset the slots of the sweep list that we used.
+ sweepList.reset(thingsPerArena);
+
+ return Finished;
+}
+
+IncrementalProgress GCRuntime::sweepShapeTree(JSFreeOp* fop,
+ SliceBudget& budget) {
+ // Remove dead shapes from the shape tree, but don't finalize them yet.
+
+ gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::SWEEP_SHAPE);
+
+ ArenaLists& al = sweepZone->arenas;
+
+ if (!SweepArenaList<Shape>(fop, &al.gcShapeArenasToUpdate.ref(), budget)) {
+ return NotFinished;
+ }
+
+ if (!SweepArenaList<AccessorShape>(
+ fop, &al.gcAccessorShapeArenasToUpdate.ref(), budget)) {
+ return NotFinished;
+ }
+
+ return Finished;
+}
+
+// An iterator for a standard container that provides an STL-like begin()/end()
+// interface. This iterator provides a done()/get()/next() style interface.
+template <typename Container>
+class ContainerIter {
+ using Iter = decltype(std::declval<const Container>().begin());
+ using Elem = decltype(*std::declval<Iter>());
+
+ Iter iter;
+ const Iter end;
+
+ public:
+ explicit ContainerIter(const Container& container)
+ : iter(container.begin()), end(container.end()) {}
+
+ bool done() const { return iter == end; }
+
+ Elem get() const { return *iter; }
+
+ void next() {
+ MOZ_ASSERT(!done());
+ ++iter;
+ }
+};
+
+// IncrementalIter is a template class that makes a normal iterator into one
+// that can be used to perform incremental work by using external state that
+// persists between instantiations. The state is only initialised on the first
+// use and subsequent uses carry on from the previous state.
+template <typename Iter>
+struct IncrementalIter {
+ using State = Maybe<Iter>;
+ using Elem = decltype(std::declval<Iter>().get());
+
+ private:
+ State& maybeIter;
+
+ public:
+ template <typename... Args>
+ explicit IncrementalIter(State& maybeIter, Args&&... args)
+ : maybeIter(maybeIter) {
+ if (maybeIter.isNothing()) {
+ maybeIter.emplace(std::forward<Args>(args)...);
+ }
+ }
+
+ ~IncrementalIter() {
+ if (done()) {
+ maybeIter.reset();
+ }
+ }
+
+ bool done() const { return maybeIter.ref().done(); }
+
+ Elem get() const { return maybeIter.ref().get(); }
+
+ void next() { maybeIter.ref().next(); }
+};
+
+// Iterate through the sweep groups created by
+// GCRuntime::groupZonesForSweeping().
+class js::gc::SweepGroupsIter {
+ GCRuntime* gc;
+
+ public:
+ explicit SweepGroupsIter(JSRuntime* rt) : gc(&rt->gc) {
+ MOZ_ASSERT(gc->currentSweepGroup);
+ }
+
+ bool done() const { return !gc->currentSweepGroup; }
+
+ Zone* get() const { return gc->currentSweepGroup; }
+
+ void next() {
+ MOZ_ASSERT(!done());
+ gc->getNextSweepGroup();
+ }
+};
+
+namespace sweepaction {
+
+// Implementation of the SweepAction interface that calls a method on GCRuntime.
+class SweepActionCall final : public SweepAction {
+ using Method = IncrementalProgress (GCRuntime::*)(JSFreeOp* fop,
+ SliceBudget& budget);
+
+ Method method;
+
+ public:
+ explicit SweepActionCall(Method m) : method(m) {}
+ IncrementalProgress run(Args& args) override {
+ return (args.gc->*method)(args.fop, args.budget);
+ }
+ void assertFinished() const override {}
+};
+
+// Implementation of the SweepAction interface that yields in a specified zeal
+// mode.
+class SweepActionMaybeYield final : public SweepAction {
+#ifdef JS_GC_ZEAL
+ ZealMode mode;
+ bool isYielding;
+#endif
+
+ public:
+ explicit SweepActionMaybeYield(ZealMode mode)
+#ifdef JS_GC_ZEAL
+ : mode(mode),
+ isYielding(false)
+#endif
+ {
+ }
+
+ IncrementalProgress run(Args& args) override {
+#ifdef JS_GC_ZEAL
+ if (!isYielding && args.gc->shouldYieldForZeal(mode)) {
+ isYielding = true;
+ return NotFinished;
+ }
+
+ isYielding = false;
+#endif
+ return Finished;
+ }
+
+ void assertFinished() const override { MOZ_ASSERT(!isYielding); }
+
+ // These actions should be skipped if GC zeal is not configured.
+#ifndef JS_GC_ZEAL
+ bool shouldSkip() override { return true; }
+#endif
+};
+
+// Implementation of the SweepAction interface that calls a list of actions in
+// sequence.
+class SweepActionSequence final : public SweepAction {
+ using ActionVector = Vector<UniquePtr<SweepAction>, 0, SystemAllocPolicy>;
+ using Iter = IncrementalIter<ContainerIter<ActionVector>>;
+
+ ActionVector actions;
+ typename Iter::State iterState;
+
+ public:
+ bool init(UniquePtr<SweepAction>* acts, size_t count) {
+ for (size_t i = 0; i < count; i++) {
+ auto& action = acts[i];
+ if (!action) {
+ return false;
+ }
+ if (action->shouldSkip()) {
+ continue;
+ }
+ if (!actions.emplaceBack(std::move(action))) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ IncrementalProgress run(Args& args) override {
+ for (Iter iter(iterState, actions); !iter.done(); iter.next()) {
+ if (iter.get()->run(args) == NotFinished) {
+ return NotFinished;
+ }
+ }
+ return Finished;
+ }
+
+ void assertFinished() const override {
+ MOZ_ASSERT(iterState.isNothing());
+ for (const auto& action : actions) {
+ action->assertFinished();
+ }
+ }
+};
+
+template <typename Iter, typename Init>
+class SweepActionForEach final : public SweepAction {
+ using Elem = decltype(std::declval<Iter>().get());
+ using IncrIter = IncrementalIter<Iter>;
+
+ Init iterInit;
+ Elem* elemOut;
+ UniquePtr<SweepAction> action;
+ typename IncrIter::State iterState;
+
+ public:
+ SweepActionForEach(const Init& init, Elem* maybeElemOut,
+ UniquePtr<SweepAction> action)
+ : iterInit(init), elemOut(maybeElemOut), action(std::move(action)) {}
+
+ IncrementalProgress run(Args& args) override {
+ MOZ_ASSERT_IF(elemOut, *elemOut == Elem());
+ auto clearElem = mozilla::MakeScopeExit([&] { setElem(Elem()); });
+ for (IncrIter iter(iterState, iterInit); !iter.done(); iter.next()) {
+ setElem(iter.get());
+ if (action->run(args) == NotFinished) {
+ return NotFinished;
+ }
+ }
+ return Finished;
+ }
+
+ void assertFinished() const override {
+ MOZ_ASSERT(iterState.isNothing());
+ MOZ_ASSERT_IF(elemOut, *elemOut == Elem());
+ action->assertFinished();
+ }
+
+ private:
+ void setElem(const Elem& value) {
+ if (elemOut) {
+ *elemOut = value;
+ }
+ }
+};
+
+static UniquePtr<SweepAction> Call(IncrementalProgress (GCRuntime::*method)(
+ JSFreeOp* fop, SliceBudget& budget)) {
+ return MakeUnique<SweepActionCall>(method);
+}
+
+static UniquePtr<SweepAction> MaybeYield(ZealMode zealMode) {
+ return MakeUnique<SweepActionMaybeYield>(zealMode);
+}
+
+template <typename... Rest>
+static UniquePtr<SweepAction> Sequence(UniquePtr<SweepAction> first,
+ Rest... rest) {
+ UniquePtr<SweepAction> actions[] = {std::move(first), std::move(rest)...};
+ auto seq = MakeUnique<SweepActionSequence>();
+ if (!seq || !seq->init(actions, std::size(actions))) {
+ return nullptr;
+ }
+
+ return UniquePtr<SweepAction>(std::move(seq));
+}
+
+static UniquePtr<SweepAction> RepeatForSweepGroup(
+ JSRuntime* rt, UniquePtr<SweepAction> action) {
+ if (!action) {
+ return nullptr;
+ }
+
+ using Action = SweepActionForEach<SweepGroupsIter, JSRuntime*>;
+ return js::MakeUnique<Action>(rt, nullptr, std::move(action));
+}
+
+static UniquePtr<SweepAction> ForEachZoneInSweepGroup(
+ JSRuntime* rt, Zone** zoneOut, UniquePtr<SweepAction> action) {
+ if (!action) {
+ return nullptr;
+ }
+
+ using Action = SweepActionForEach<SweepGroupZonesIter, JSRuntime*>;
+ return js::MakeUnique<Action>(rt, zoneOut, std::move(action));
+}
+
+static UniquePtr<SweepAction> ForEachAllocKind(AllocKinds kinds,
+ AllocKind* kindOut,
+ UniquePtr<SweepAction> action) {
+ if (!action) {
+ return nullptr;
+ }
+
+ using Action = SweepActionForEach<ContainerIter<AllocKinds>, AllocKinds>;
+ return js::MakeUnique<Action>(kinds, kindOut, std::move(action));
+}
+
+} // namespace sweepaction
+
+bool GCRuntime::initSweepActions() {
+ using namespace sweepaction;
+ using sweepaction::Call;
+
+ sweepActions.ref() = RepeatForSweepGroup(
+ rt,
+ Sequence(
+ Call(&GCRuntime::markGrayReferencesInCurrentGroup),
+ Call(&GCRuntime::endMarkingSweepGroup),
+ Call(&GCRuntime::beginSweepingSweepGroup),
+ MaybeYield(ZealMode::IncrementalMultipleSlices),
+ MaybeYield(ZealMode::YieldBeforeSweepingAtoms),
+ Call(&GCRuntime::sweepAtomsTable),
+ MaybeYield(ZealMode::YieldBeforeSweepingCaches),
+ Call(&GCRuntime::sweepWeakCaches),
+ ForEachZoneInSweepGroup(
+ rt, &sweepZone.ref(),
+ Sequence(MaybeYield(ZealMode::YieldBeforeSweepingObjects),
+ ForEachAllocKind(ForegroundObjectFinalizePhase.kinds,
+ &sweepAllocKind.ref(),
+ Call(&GCRuntime::finalizeAllocKind)),
+ MaybeYield(ZealMode::YieldBeforeSweepingNonObjects),
+ ForEachAllocKind(ForegroundNonObjectFinalizePhase.kinds,
+ &sweepAllocKind.ref(),
+ Call(&GCRuntime::finalizeAllocKind)),
+ MaybeYield(ZealMode::YieldBeforeSweepingShapeTrees),
+ Call(&GCRuntime::sweepShapeTree))),
+ Call(&GCRuntime::endSweepingSweepGroup)));
+
+ return sweepActions != nullptr;
+}
+
+IncrementalProgress GCRuntime::performSweepActions(SliceBudget& budget) {
+ AutoMajorGCProfilerEntry s(this);
+ gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::SWEEP);
+ JSFreeOp fop(rt);
+
+ // Don't trigger pre-barriers when finalizing.
+ AutoDisableBarriers disableBarriers(this);
+
+ // Drain the mark stack, possibly in a parallel task if we're in a part of
+ // sweeping that allows it.
+ //
+ // In the first sweep slice where we must not yield to the mutator until we've
+ // starting sweeping a sweep group but in that case the stack must be empty
+ // already.
+
+ MOZ_ASSERT(initialState <= State::Sweep);
+ MOZ_ASSERT_IF(initialState != State::Sweep, marker.isDrained());
+ if (initialState == State::Sweep &&
+ markDuringSweeping(&fop, budget) == NotFinished) {
+ return NotFinished;
+ }
+
+ // Then continue running sweep actions.
+
+ SweepAction::Args args{this, &fop, budget};
+ IncrementalProgress sweepProgress = sweepActions->run(args);
+ IncrementalProgress markProgress = joinBackgroundMarkTask();
+
+ if (sweepProgress == Finished && markProgress == Finished) {
+ return Finished;
+ }
+
+ MOZ_ASSERT(isIncremental);
+ return NotFinished;
+}
+
+bool GCRuntime::allCCVisibleZonesWereCollected() {
+ // Calculate whether the gray marking state is now valid.
+ //
+ // The gray bits change from invalid to valid if we finished a full GC from
+ // the point of view of the cycle collector. We ignore the following:
+ //
+ // - Helper thread zones, as these are not reachable from the main heap.
+ // - The atoms zone, since strings and symbols are never marked gray.
+ // - Empty zones.
+ //
+ // These exceptions ensure that when the CC requests a full GC the gray mark
+ // state ends up valid even it we don't collect all of the zones.
+
+ for (ZonesIter zone(this, SkipAtoms); !zone.done(); zone.next()) {
+ if (!zone->isCollecting() && !zone->usedByHelperThread() &&
+ !zone->arenas.arenaListsAreEmpty()) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+void GCRuntime::endSweepPhase(bool destroyingRuntime) {
+ MOZ_ASSERT(!markOnBackgroundThreadDuringSweeping);
+
+ sweepActions->assertFinished();
+
+ gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::SWEEP);
+ JSFreeOp fop(rt);
+
+ MOZ_ASSERT_IF(destroyingRuntime, !sweepOnBackgroundThread);
+
+ {
+ gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::DESTROY);
+
+ /*
+ * Sweep script filenames after sweeping functions in the generic loop
+ * above. In this way when a scripted function's finalizer destroys the
+ * script and calls rt->destroyScriptHook, the hook can still access the
+ * script's filename. See bug 323267.
+ */
+ SweepScriptData(rt);
+ }
+
+ {
+ gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::FINALIZE_END);
+ AutoLockStoreBuffer lock(&storeBuffer());
+ callFinalizeCallbacks(&fop, JSFINALIZE_COLLECTION_END);
+
+ if (allCCVisibleZonesWereCollected()) {
+ grayBitsValid = true;
+ }
+ }
+
+#ifdef JS_GC_ZEAL
+ finishMarkingValidation();
+#endif
+
+#ifdef DEBUG
+ for (ZonesIter zone(this, WithAtoms); !zone.done(); zone.next()) {
+ for (auto i : AllAllocKinds()) {
+ MOZ_ASSERT_IF(!IsBackgroundFinalized(i) || !sweepOnBackgroundThread,
+ !zone->arenas.arenasToSweep(i));
+ }
+ }
+#endif
+
+ AssertNoWrappersInGrayList(rt);
+}
+
+void GCRuntime::beginCompactPhase() {
+ MOZ_ASSERT(!isBackgroundSweeping());
+ assertBackgroundSweepingFinished();
+
+ gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::COMPACT);
+
+ MOZ_ASSERT(zonesToMaybeCompact.ref().isEmpty());
+ for (GCZonesIter zone(this); !zone.done(); zone.next()) {
+ if (canRelocateZone(zone)) {
+ zonesToMaybeCompact.ref().append(zone);
+ }
+ }
+
+ MOZ_ASSERT(!relocatedArenasToRelease);
+ startedCompacting = true;
+ zonesCompacted = 0;
+}
+
+IncrementalProgress GCRuntime::compactPhase(JS::GCReason reason,
+ SliceBudget& sliceBudget,
+ AutoGCSession& session) {
+ assertBackgroundSweepingFinished();
+ MOZ_ASSERT(startedCompacting);
+
+ AutoMajorGCProfilerEntry s(this);
+ gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::COMPACT);
+
+ // TODO: JSScripts can move. If the sampler interrupts the GC in the
+ // middle of relocating an arena, invalid JSScript pointers may be
+ // accessed. Suppress all sampling until a finer-grained solution can be
+ // found. See bug 1295775.
+ AutoSuppressProfilerSampling suppressSampling(rt->mainContextFromOwnThread());
+
+ ZoneList relocatedZones;
+ Arena* relocatedArenas = nullptr;
+ while (!zonesToMaybeCompact.ref().isEmpty()) {
+ Zone* zone = zonesToMaybeCompact.ref().front();
+ zonesToMaybeCompact.ref().removeFront();
+
+ MOZ_ASSERT(nursery().isEmpty());
+ zone->changeGCState(Zone::Finished, Zone::Compact);
+
+ if (relocateArenas(zone, reason, relocatedArenas, sliceBudget)) {
+ updateZonePointersToRelocatedCells(zone);
+ relocatedZones.append(zone);
+ zonesCompacted++;
+ } else {
+ zone->changeGCState(Zone::Compact, Zone::Finished);
+ }
+
+ if (sliceBudget.isOverBudget()) {
+ break;
+ }
+ }
+
+ if (!relocatedZones.isEmpty()) {
+ updateRuntimePointersToRelocatedCells(session);
+
+ do {
+ Zone* zone = relocatedZones.front();
+ relocatedZones.removeFront();
+ zone->changeGCState(Zone::Compact, Zone::Finished);
+ } while (!relocatedZones.isEmpty());
+ }
+
+ clearRelocatedArenas(relocatedArenas, reason);
+
+ if (ShouldProtectRelocatedArenas(reason)) {
+ protectAndHoldArenas(relocatedArenas);
+ } else {
+ releaseRelocatedArenas(relocatedArenas);
+ }
+
+ // Clear caches that can contain cell pointers.
+ rt->caches().purgeForCompaction();
+
+#ifdef DEBUG
+ checkHashTablesAfterMovingGC();
+#endif
+
+ return zonesToMaybeCompact.ref().isEmpty() ? Finished : NotFinished;
+}
+
+void GCRuntime::endCompactPhase() { startedCompacting = false; }
+
+void GCRuntime::finishCollection() {
+ assertBackgroundSweepingFinished();
+
+ MOZ_ASSERT(marker.isDrained());
+ marker.stop();
+
+ clearBufferedGrayRoots();
+
+ maybeStopStringPretenuring();
+
+ {
+ AutoLockGC lock(this);
+ updateGCThresholdsAfterCollection(lock);
+ }
+
+ for (GCZonesIter zone(this); !zone.done(); zone.next()) {
+ zone->changeGCState(Zone::Finished, Zone::NoGC);
+ zone->notifyObservingDebuggers();
+ }
+
+#ifdef JS_GC_ZEAL
+ clearSelectedForMarking();
+#endif
+
+ auto currentTime = ReallyNow();
+ schedulingState.updateHighFrequencyMode(lastGCEndTime_, currentTime,
+ tunables);
+ lastGCEndTime_ = currentTime;
+
+ checkGCStateNotInUse();
+}
+
+void GCRuntime::checkGCStateNotInUse() {
+#ifdef DEBUG
+ MOZ_ASSERT(!marker.isActive());
+
+ for (ZonesIter zone(this, WithAtoms); !zone.done(); zone.next()) {
+ if (zone->wasCollected()) {
+ zone->arenas.checkGCStateNotInUse();
+ }
+ MOZ_ASSERT(!zone->wasGCStarted());
+ MOZ_ASSERT(!zone->needsIncrementalBarrier());
+ MOZ_ASSERT(!zone->isOnList());
+ }
+
+ MOZ_ASSERT(zonesToMaybeCompact.ref().isEmpty());
+ MOZ_ASSERT(cellsToAssertNotGray.ref().empty());
+
+ AutoLockHelperThreadState lock;
+ MOZ_ASSERT(!requestSliceAfterBackgroundTask);
+#endif
+}
+
+void GCRuntime::maybeStopStringPretenuring() {
+ for (GCZonesIter zone(this); !zone.done(); zone.next()) {
+ if (zone->allocNurseryStrings) {
+ continue;
+ }
+
+ // Count the number of strings before the major GC.
+ size_t numStrings = zone->markedStrings + zone->finalizedStrings;
+ double rate = double(zone->finalizedStrings) / double(numStrings);
+ if (rate > tunables.stopPretenureStringThreshold()) {
+ CancelOffThreadIonCompile(zone);
+ bool preserving = zone->isPreservingCode();
+ zone->setPreservingCode(false);
+ zone->discardJitCode(rt->defaultFreeOp());
+ zone->setPreservingCode(preserving);
+ for (RealmsInZoneIter r(zone); !r.done(); r.next()) {
+ if (jit::JitRealm* jitRealm = r->jitRealm()) {
+ jitRealm->discardStubs();
+ jitRealm->setStringsCanBeInNursery(true);
+ }
+ }
+
+ zone->markedStrings = 0;
+ zone->finalizedStrings = 0;
+ zone->allocNurseryStrings = true;
+ }
+ }
+}
+
+void GCRuntime::updateGCThresholdsAfterCollection(const AutoLockGC& lock) {
+ for (GCZonesIter zone(this); !zone.done(); zone.next()) {
+ zone->clearGCSliceThresholds();
+ zone->updateGCStartThresholds(*this, invocationKind, lock);
+ }
+}
+
+void GCRuntime::updateAllGCStartThresholds(const AutoLockGC& lock) {
+ for (ZonesIter zone(this, WithAtoms); !zone.done(); zone.next()) {
+ zone->updateGCStartThresholds(*this, GC_NORMAL, lock);
+ }
+}
+
+static const char* GCHeapStateToLabel(JS::HeapState heapState) {
+ switch (heapState) {
+ case JS::HeapState::MinorCollecting:
+ return "js::Nursery::collect";
+ case JS::HeapState::MajorCollecting:
+ return "js::GCRuntime::collect";
+ default:
+ MOZ_CRASH("Unexpected heap state when pushing GC profiling stack frame");
+ }
+ MOZ_ASSERT_UNREACHABLE("Should have exhausted every JS::HeapState variant!");
+ return nullptr;
+}
+
+static JS::ProfilingCategoryPair GCHeapStateToProfilingCategory(
+ JS::HeapState heapState) {
+ return heapState == JS::HeapState::MinorCollecting
+ ? JS::ProfilingCategoryPair::GCCC_MinorGC
+ : JS::ProfilingCategoryPair::GCCC_MajorGC;
+}
+
+/* Start a new heap session. */
+AutoHeapSession::AutoHeapSession(GCRuntime* gc, JS::HeapState heapState)
+ : gc(gc), prevState(gc->heapState_) {
+ MOZ_ASSERT(CurrentThreadCanAccessRuntime(gc->rt));
+ MOZ_ASSERT(prevState == JS::HeapState::Idle ||
+ (prevState == JS::HeapState::MajorCollecting &&
+ heapState == JS::HeapState::MinorCollecting));
+ MOZ_ASSERT(heapState != JS::HeapState::Idle);
+
+ gc->heapState_ = heapState;
+
+ if (heapState == JS::HeapState::MinorCollecting ||
+ heapState == JS::HeapState::MajorCollecting) {
+ profilingStackFrame.emplace(gc->rt->mainContextFromOwnThread(),
+ GCHeapStateToLabel(heapState),
+ GCHeapStateToProfilingCategory(heapState));
+ }
+}
+
+AutoHeapSession::~AutoHeapSession() {
+ MOZ_ASSERT(JS::RuntimeHeapIsBusy());
+ gc->heapState_ = prevState;
+}
+
+static const char* MajorGCStateToLabel(State state) {
+ switch (state) {
+ case State::Mark:
+ return "js::GCRuntime::markUntilBudgetExhausted";
+ case State::Sweep:
+ return "js::GCRuntime::performSweepActions";
+ case State::Compact:
+ return "js::GCRuntime::compactPhase";
+ default:
+ MOZ_CRASH("Unexpected heap state when pushing GC profiling stack frame");
+ }
+
+ MOZ_ASSERT_UNREACHABLE("Should have exhausted every State variant!");
+ return nullptr;
+}
+
+static JS::ProfilingCategoryPair MajorGCStateToProfilingCategory(State state) {
+ switch (state) {
+ case State::Mark:
+ return JS::ProfilingCategoryPair::GCCC_MajorGC_Mark;
+ case State::Sweep:
+ return JS::ProfilingCategoryPair::GCCC_MajorGC_Sweep;
+ case State::Compact:
+ return JS::ProfilingCategoryPair::GCCC_MajorGC_Compact;
+ default:
+ MOZ_CRASH("Unexpected heap state when pushing GC profiling stack frame");
+ }
+}
+
+AutoMajorGCProfilerEntry::AutoMajorGCProfilerEntry(GCRuntime* gc)
+ : AutoGeckoProfilerEntry(gc->rt->mainContextFromAnyThread(),
+ MajorGCStateToLabel(gc->state()),
+ MajorGCStateToProfilingCategory(gc->state())) {
+ MOZ_ASSERT(gc->heapState() == JS::HeapState::MajorCollecting);
+}
+
+JS_PUBLIC_API JS::HeapState JS::RuntimeHeapState() {
+ return TlsContext.get()->runtime()->gc.heapState();
+}
+
+GCRuntime::IncrementalResult GCRuntime::resetIncrementalGC(
+ GCAbortReason reason) {
+ // Drop as much work as possible from an ongoing incremental GC so
+ // we can start a new GC after it has finished.
+ if (incrementalState == State::NotActive) {
+ return IncrementalResult::Ok;
+ }
+
+ AutoGCSession session(this, JS::HeapState::MajorCollecting);
+
+ switch (incrementalState) {
+ case State::NotActive:
+ case State::MarkRoots:
+ case State::Finish:
+ MOZ_CRASH("Unexpected GC state in resetIncrementalGC");
+ break;
+
+ case State::Prepare:
+ unmarkTask.cancelAndWait();
+ setParallelUnmarkEnabled(false);
+
+ for (GCZonesIter zone(this); !zone.done(); zone.next()) {
+ zone->changeGCState(Zone::Prepare, Zone::NoGC);
+ }
+
+ incrementalState = State::NotActive;
+ checkGCStateNotInUse();
+ break;
+
+ case State::Mark: {
+ // Cancel any ongoing marking.
+ marker.reset();
+ clearBufferedGrayRoots();
+
+ for (GCCompartmentsIter c(rt); !c.done(); c.next()) {
+ ResetGrayList(c);
+ }
+
+ for (GCZonesIter zone(this); !zone.done(); zone.next()) {
+ zone->changeGCState(Zone::MarkBlackOnly, Zone::NoGC);
+ zone->clearGCSliceThresholds();
+ zone->arenas.unmarkPreMarkedFreeCells();
+ zone->arenas.mergeNewArenasInMarkPhase();
+ }
+
+ {
+ AutoLockHelperThreadState lock;
+ lifoBlocksToFree.ref().freeAll();
+ }
+
+ lastMarkSlice = false;
+ incrementalState = State::Finish;
+
+ MOZ_ASSERT(!marker.shouldCheckCompartments());
+
+ break;
+ }
+
+ case State::Sweep: {
+ // Finish sweeping the current sweep group, then abort.
+ for (CompartmentsIter c(rt); !c.done(); c.next()) {
+ c->gcState.scheduledForDestruction = false;
+ }
+
+ abortSweepAfterCurrentGroup = true;
+ isCompacting = false;
+
+ break;
+ }
+
+ case State::Finalize: {
+ isCompacting = false;
+ break;
+ }
+
+ case State::Compact: {
+ // Skip any remaining zones that would have been compacted.
+ MOZ_ASSERT(isCompacting);
+ startedCompacting = true;
+ zonesToMaybeCompact.ref().clear();
+ break;
+ }
+
+ case State::Decommit: {
+ break;
+ }
+ }
+
+ stats().reset(reason);
+
+ return IncrementalResult::ResetIncremental;
+}
+
+AutoDisableBarriers::AutoDisableBarriers(GCRuntime* gc) : gc(gc) {
+ /*
+ * Clear needsIncrementalBarrier early so we don't do any write barriers
+ * during sweeping.
+ */
+ for (GCZonesIter zone(gc); !zone.done(); zone.next()) {
+ if (zone->isGCMarking()) {
+ MOZ_ASSERT(zone->needsIncrementalBarrier());
+ zone->setNeedsIncrementalBarrier(false);
+ }
+ MOZ_ASSERT(!zone->needsIncrementalBarrier());
+ }
+}
+
+AutoDisableBarriers::~AutoDisableBarriers() {
+ for (GCZonesIter zone(gc); !zone.done(); zone.next()) {
+ MOZ_ASSERT(!zone->needsIncrementalBarrier());
+ if (zone->isGCMarking()) {
+ zone->setNeedsIncrementalBarrier(true);
+ }
+ }
+}
+
+static bool ShouldCleanUpEverything(JS::GCReason reason,
+ JSGCInvocationKind gckind) {
+ // During shutdown, we must clean everything up, for the sake of leak
+ // detection. When a runtime has no contexts, or we're doing a GC before a
+ // shutdown CC, those are strong indications that we're shutting down.
+ return IsShutdownReason(reason) || gckind == GC_SHRINK;
+}
+
+static bool ShouldSweepOnBackgroundThread(JS::GCReason reason) {
+ return reason != JS::GCReason::DESTROY_RUNTIME && CanUseExtraThreads();
+}
+
+static bool NeedToCollectNursery(GCRuntime* gc) {
+ return !gc->nursery().isEmpty() || !gc->storeBuffer().isEmpty();
+}
+
+#ifdef DEBUG
+static const char* DescribeBudget(const SliceBudget& budget) {
+ MOZ_ASSERT(TlsContext.get()->isMainThreadContext());
+ constexpr size_t length = 32;
+ static char buffer[length];
+ budget.describe(buffer, length);
+ return buffer;
+}
+#endif
+
+void GCRuntime::incrementalSlice(SliceBudget& budget,
+ const MaybeInvocationKind& gckind,
+ JS::GCReason reason) {
+ MOZ_ASSERT_IF(isIncrementalGCInProgress(), isIncremental);
+
+ AutoSetThreadIsPerformingGC performingGC;
+
+ AutoGCSession session(this, JS::HeapState::MajorCollecting);
+
+ // We don't allow off-thread parsing to start while we're doing an
+ // incremental GC of the atoms zone.
+ if (rt->activeGCInAtomsZone()) {
+ session.maybeCheckAtomsAccess.emplace(rt);
+ }
+
+ bool destroyingRuntime = (reason == JS::GCReason::DESTROY_RUNTIME);
+
+ initialState = incrementalState;
+ isIncremental = !budget.isUnlimited();
+
+#ifdef JS_GC_ZEAL
+ // Do the incremental collection type specified by zeal mode if the collection
+ // was triggered by runDebugGC() and incremental GC has not been cancelled by
+ // resetIncrementalGC().
+ useZeal = isIncremental && reason == JS::GCReason::DEBUG_GC;
+#endif
+
+#ifdef DEBUG
+ stats().log("Incremental: %d, lastMarkSlice: %d, useZeal: %d, budget: %s",
+ bool(isIncremental), bool(lastMarkSlice), bool(useZeal),
+ DescribeBudget(budget));
+#endif
+
+ if (useZeal && hasIncrementalTwoSliceZealMode()) {
+ // Yields between slices occurs at predetermined points in these modes; the
+ // budget is not used. |isIncremental| is still true.
+ stats().log("Using unlimited budget for two-slice zeal mode");
+ budget.makeUnlimited();
+ }
+
+ switch (incrementalState) {
+ case State::NotActive:
+ invocationKind = gckind.valueOr(GC_NORMAL);
+ initialReason = reason;
+ cleanUpEverything = ShouldCleanUpEverything(reason, invocationKind);
+ sweepOnBackgroundThread = ShouldSweepOnBackgroundThread(reason);
+ isCompacting = shouldCompact();
+ MOZ_ASSERT(!lastMarkSlice);
+ rootsRemoved = false;
+ lastGCStartTime_ = ReallyNow();
+
+#ifdef DEBUG
+ for (ZonesIter zone(this, WithAtoms); !zone.done(); zone.next()) {
+ zone->gcSweepGroupIndex = 0;
+ }
+#endif
+
+ incrementalState = State::Prepare;
+ if (!beginPreparePhase(reason, session)) {
+ incrementalState = State::NotActive;
+ break;
+ }
+
+ if (useZeal && hasZealMode(ZealMode::YieldBeforeRootMarking)) {
+ break;
+ }
+
+ [[fallthrough]];
+
+ case State::Prepare:
+ if (waitForBackgroundTask(unmarkTask, budget,
+ DontTriggerSliceWhenFinished) == NotFinished) {
+ break;
+ }
+
+ incrementalState = State::MarkRoots;
+ [[fallthrough]];
+
+ case State::MarkRoots:
+ if (NeedToCollectNursery(this)) {
+ collectNurseryFromMajorGC(gckind, reason);
+ }
+
+ endPreparePhase(reason);
+
+ beginMarkPhase(session);
+
+ // If we needed delayed marking for gray roots, then collect until done.
+ if (isIncremental && !hasValidGrayRootsBuffer()) {
+ budget.makeUnlimited();
+ isIncremental = false;
+ stats().nonincremental(GCAbortReason::GrayRootBufferingFailed);
+ }
+
+ incrementalState = State::Mark;
+
+ if (useZeal && hasZealMode(ZealMode::YieldBeforeMarking)) {
+ break;
+ }
+
+ [[fallthrough]];
+
+ case State::Mark:
+ if (mightSweepInThisSlice(budget.isUnlimited())) {
+ // Trace wrapper rooters before marking if we might start sweeping in
+ // this slice.
+ rt->mainContextFromOwnThread()->traceWrapperGCRooters(&marker);
+ }
+
+ {
+ gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::MARK);
+ if (markUntilBudgetExhausted(budget) == NotFinished) {
+ break;
+ }
+ }
+
+ MOZ_ASSERT(marker.isDrained());
+
+ /*
+ * There are a number of reasons why we break out of collection here,
+ * either ending the slice or to run a new interation of the loop in
+ * GCRuntime::collect()
+ */
+
+ /*
+ * In incremental GCs where we have already performed more than one
+ * slice we yield after marking with the aim of starting the sweep in
+ * the next slice, since the first slice of sweeping can be expensive.
+ *
+ * This is modified by the various zeal modes. We don't yield in
+ * YieldBeforeMarking mode and we always yield in YieldBeforeSweeping
+ * mode.
+ *
+ * We will need to mark anything new on the stack when we resume, so
+ * we stay in Mark state.
+ */
+ if (isIncremental && !lastMarkSlice) {
+ if ((initialState == State::Mark &&
+ !(useZeal && hasZealMode(ZealMode::YieldBeforeMarking))) ||
+ (useZeal && hasZealMode(ZealMode::YieldBeforeSweeping))) {
+ lastMarkSlice = true;
+ stats().log("Yielding before starting sweeping");
+ break;
+ }
+ }
+
+ incrementalState = State::Sweep;
+ lastMarkSlice = false;
+
+ beginSweepPhase(reason, session);
+
+ [[fallthrough]];
+
+ case State::Sweep:
+ if (storeBuffer().mayHavePointersToDeadCells()) {
+ collectNurseryFromMajorGC(gckind, reason);
+ }
+
+ if (initialState == State::Sweep) {
+ rt->mainContextFromOwnThread()->traceWrapperGCRooters(&marker);
+ }
+
+ if (performSweepActions(budget) == NotFinished) {
+ break;
+ }
+
+ endSweepPhase(destroyingRuntime);
+
+ incrementalState = State::Finalize;
+
+ [[fallthrough]];
+
+ case State::Finalize:
+ if (waitForBackgroundTask(sweepTask, budget, TriggerSliceWhenFinished) ==
+ NotFinished) {
+ break;
+ }
+
+ assertBackgroundSweepingFinished();
+
+ {
+ // Sweep the zones list now that background finalization is finished to
+ // remove and free dead zones, compartments and realms.
+ gcstats::AutoPhase ap1(stats(), gcstats::PhaseKind::SWEEP);
+ gcstats::AutoPhase ap2(stats(), gcstats::PhaseKind::DESTROY);
+ JSFreeOp fop(rt);
+ sweepZones(&fop, destroyingRuntime);
+ }
+
+ MOZ_ASSERT(!startedCompacting);
+ incrementalState = State::Compact;
+
+ // Always yield before compacting since it is not incremental.
+ if (isCompacting && !budget.isUnlimited()) {
+ break;
+ }
+
+ [[fallthrough]];
+
+ case State::Compact:
+ if (isCompacting) {
+ if (NeedToCollectNursery(this)) {
+ collectNurseryFromMajorGC(gckind, reason);
+ }
+
+ storeBuffer().checkEmpty();
+ if (!startedCompacting) {
+ beginCompactPhase();
+ }
+
+ if (compactPhase(reason, budget, session) == NotFinished) {
+ break;
+ }
+
+ endCompactPhase();
+ }
+
+ startDecommit();
+ incrementalState = State::Decommit;
+
+ [[fallthrough]];
+
+ case State::Decommit:
+ if (waitForBackgroundTask(decommitTask, budget,
+ TriggerSliceWhenFinished) == NotFinished) {
+ break;
+ }
+
+ incrementalState = State::Finish;
+
+ [[fallthrough]];
+
+ case State::Finish:
+ finishCollection();
+ incrementalState = State::NotActive;
+ break;
+ }
+
+ MOZ_ASSERT(safeToYield);
+ MOZ_ASSERT(marker.markColor() == MarkColor::Black);
+}
+
+void GCRuntime::collectNurseryFromMajorGC(const MaybeInvocationKind& gckind,
+ JS::GCReason reason) {
+ collectNursery(gckind.valueOr(GC_NORMAL), reason,
+ gcstats::PhaseKind::EVICT_NURSERY_FOR_MAJOR_GC);
+}
+
+bool GCRuntime::hasForegroundWork() const {
+ switch (incrementalState) {
+ case State::NotActive:
+ // Incremental GC is not running and no work is pending.
+ return false;
+ case State::Prepare:
+ // We yield in the Prepare state after starting unmarking.
+ return !unmarkTask.wasStarted();
+ case State::Finalize:
+ // We yield in the Finalize state to wait for background sweeping.
+ return !isBackgroundSweeping();
+ case State::Decommit:
+ // We yield in the Decommit state to wait for background decommit.
+ return !decommitTask.wasStarted();
+ default:
+ // In all other states there is still work to do.
+ return true;
+ }
+}
+
+IncrementalProgress GCRuntime::waitForBackgroundTask(
+ GCParallelTask& task, const SliceBudget& budget,
+ ShouldTriggerSliceWhenFinished triggerSlice) {
+ // In incremental collections, yield if the task has not finished and request
+ // a slice to notify us when this happens.
+ if (!budget.isUnlimited()) {
+ AutoLockHelperThreadState lock;
+ if (task.wasStarted(lock)) {
+ if (triggerSlice) {
+ requestSliceAfterBackgroundTask = true;
+ }
+ return NotFinished;
+ }
+ }
+
+ // Otherwise in non-incremental collections, wait here.
+ gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::WAIT_BACKGROUND_THREAD);
+ task.join();
+ if (triggerSlice) {
+ cancelRequestedGCAfterBackgroundTask();
+ }
+
+ return Finished;
+}
+
+GCAbortReason gc::IsIncrementalGCUnsafe(JSRuntime* rt) {
+ MOZ_ASSERT(!rt->mainContextFromOwnThread()->suppressGC);
+
+ if (!rt->gc.isIncrementalGCAllowed()) {
+ return GCAbortReason::IncrementalDisabled;
+ }
+
+ return GCAbortReason::None;
+}
+
+inline void GCRuntime::checkZoneIsScheduled(Zone* zone, JS::GCReason reason,
+ const char* trigger) {
+#ifdef DEBUG
+ if (zone->isGCScheduled()) {
+ return;
+ }
+
+ fprintf(stderr,
+ "checkZoneIsScheduled: Zone %p not scheduled as expected in %s GC "
+ "for %s trigger\n",
+ zone, JS::ExplainGCReason(reason), trigger);
+ for (ZonesIter zone(this, WithAtoms); !zone.done(); zone.next()) {
+ fprintf(stderr, " Zone %p:%s%s\n", zone.get(),
+ zone->isAtomsZone() ? " atoms" : "",
+ zone->isGCScheduled() ? " scheduled" : "");
+ }
+ fflush(stderr);
+ MOZ_CRASH("Zone not scheduled");
+#endif
+}
+
+GCRuntime::IncrementalResult GCRuntime::budgetIncrementalGC(
+ bool nonincrementalByAPI, JS::GCReason reason, SliceBudget& budget) {
+ if (nonincrementalByAPI) {
+ stats().nonincremental(GCAbortReason::NonIncrementalRequested);
+ budget.makeUnlimited();
+
+ // Reset any in progress incremental GC if this was triggered via the
+ // API. This isn't required for correctness, but sometimes during tests
+ // the caller expects this GC to collect certain objects, and we need
+ // to make sure to collect everything possible.
+ if (reason != JS::GCReason::ALLOC_TRIGGER) {
+ return resetIncrementalGC(GCAbortReason::NonIncrementalRequested);
+ }
+
+ return IncrementalResult::Ok;
+ }
+
+ if (reason == JS::GCReason::ABORT_GC) {
+ budget.makeUnlimited();
+ stats().nonincremental(GCAbortReason::AbortRequested);
+ return resetIncrementalGC(GCAbortReason::AbortRequested);
+ }
+
+ if (!budget.isUnlimited()) {
+ GCAbortReason unsafeReason = IsIncrementalGCUnsafe(rt);
+ if (unsafeReason == GCAbortReason::None) {
+ if (reason == JS::GCReason::COMPARTMENT_REVIVED) {
+ unsafeReason = GCAbortReason::CompartmentRevived;
+ } else if (!incrementalGCEnabled) {
+ unsafeReason = GCAbortReason::ModeChange;
+ }
+ }
+
+ if (unsafeReason != GCAbortReason::None) {
+ budget.makeUnlimited();
+ stats().nonincremental(unsafeReason);
+ return resetIncrementalGC(unsafeReason);
+ }
+ }
+
+ GCAbortReason resetReason = GCAbortReason::None;
+ for (ZonesIter zone(this, WithAtoms); !zone.done(); zone.next()) {
+ if (!zone->canCollect()) {
+ continue;
+ }
+
+ if (zone->gcHeapSize.bytes() >=
+ zone->gcHeapThreshold.incrementalLimitBytes()) {
+ checkZoneIsScheduled(zone, reason, "GC bytes");
+ budget.makeUnlimited();
+ stats().nonincremental(GCAbortReason::GCBytesTrigger);
+ if (zone->wasGCStarted() && zone->gcState() > Zone::Sweep) {
+ resetReason = GCAbortReason::GCBytesTrigger;
+ }
+ }
+
+ if (zone->mallocHeapSize.bytes() >=
+ zone->mallocHeapThreshold.incrementalLimitBytes()) {
+ checkZoneIsScheduled(zone, reason, "malloc bytes");
+ budget.makeUnlimited();
+ stats().nonincremental(GCAbortReason::MallocBytesTrigger);
+ if (zone->wasGCStarted() && zone->gcState() > Zone::Sweep) {
+ resetReason = GCAbortReason::MallocBytesTrigger;
+ }
+ }
+
+ if (zone->jitHeapSize.bytes() >=
+ zone->jitHeapThreshold.incrementalLimitBytes()) {
+ checkZoneIsScheduled(zone, reason, "JIT code bytes");
+ budget.makeUnlimited();
+ stats().nonincremental(GCAbortReason::JitCodeBytesTrigger);
+ if (zone->wasGCStarted() && zone->gcState() > Zone::Sweep) {
+ resetReason = GCAbortReason::JitCodeBytesTrigger;
+ }
+ }
+
+ if (isIncrementalGCInProgress() &&
+ zone->isGCScheduled() != zone->wasGCStarted()) {
+ budget.makeUnlimited();
+ resetReason = GCAbortReason::ZoneChange;
+ }
+ }
+
+ if (resetReason != GCAbortReason::None) {
+ return resetIncrementalGC(resetReason);
+ }
+
+ return IncrementalResult::Ok;
+}
+
+void GCRuntime::maybeIncreaseSliceBudget(SliceBudget& budget) {
+ if (js::SupportDifferentialTesting()) {
+ return;
+ }
+
+ // Increase time budget for long-running incremental collections. Enforce a
+ // minimum time budget that increases linearly with time/slice count up to a
+ // maximum.
+
+ if (budget.isTimeBudget() && !budget.isUnlimited() &&
+ isIncrementalGCInProgress()) {
+ // All times are in milliseconds.
+ struct BudgetAtTime {
+ double time;
+ double budget;
+ };
+ const BudgetAtTime MinBudgetStart{1500, 0.0};
+ const BudgetAtTime MinBudgetEnd{2500, 100.0};
+
+ double totalTime = (ReallyNow() - lastGCStartTime()).ToMilliseconds();
+
+ double minBudget =
+ LinearInterpolate(totalTime, MinBudgetStart.time, MinBudgetStart.budget,
+ MinBudgetEnd.time, MinBudgetEnd.budget);
+
+ if (budget.timeBudget.budget < minBudget) {
+ budget = SliceBudget(TimeBudget(minBudget));
+ }
+ }
+}
+
+static void ScheduleZones(GCRuntime* gc) {
+ for (ZonesIter zone(gc, WithAtoms); !zone.done(); zone.next()) {
+ if (!zone->canCollect()) {
+ continue;
+ }
+
+ if (!gc->isPerZoneGCEnabled()) {
+ zone->scheduleGC();
+ }
+
+ // To avoid resets, continue to collect any zones that were being
+ // collected in a previous slice.
+ if (gc->isIncrementalGCInProgress() && zone->wasGCStarted()) {
+ zone->scheduleGC();
+ }
+
+ // This is a heuristic to reduce the total number of collections.
+ bool inHighFrequencyMode = gc->schedulingState.inHighFrequencyGCMode();
+ if (zone->gcHeapSize.bytes() >=
+ zone->gcHeapThreshold.eagerAllocTrigger(inHighFrequencyMode) ||
+ zone->mallocHeapSize.bytes() >=
+ zone->mallocHeapThreshold.eagerAllocTrigger(inHighFrequencyMode) ||
+ zone->jitHeapSize.bytes() >= zone->jitHeapThreshold.startBytes()) {
+ zone->scheduleGC();
+ }
+ }
+}
+
+static void UnscheduleZones(GCRuntime* gc) {
+ for (ZonesIter zone(gc->rt, WithAtoms); !zone.done(); zone.next()) {
+ zone->unscheduleGC();
+ }
+}
+
+class js::gc::AutoCallGCCallbacks {
+ GCRuntime& gc_;
+ JS::GCReason reason_;
+
+ public:
+ explicit AutoCallGCCallbacks(GCRuntime& gc, JS::GCReason reason)
+ : gc_(gc), reason_(reason) {
+ gc_.maybeCallGCCallback(JSGC_BEGIN, reason);
+ }
+ ~AutoCallGCCallbacks() { gc_.maybeCallGCCallback(JSGC_END, reason_); }
+};
+
+void GCRuntime::maybeCallGCCallback(JSGCStatus status, JS::GCReason reason) {
+ if (!gcCallback.ref().op) {
+ return;
+ }
+
+ if (isIncrementalGCInProgress()) {
+ return;
+ }
+
+ if (gcCallbackDepth == 0) {
+ // Save scheduled zone information in case the callback clears it.
+ for (ZonesIter zone(this, WithAtoms); !zone.done(); zone.next()) {
+ zone->gcScheduledSaved_ = zone->gcScheduled_;
+ }
+ }
+
+ gcCallbackDepth++;
+
+ callGCCallback(status, reason);
+
+ MOZ_ASSERT(gcCallbackDepth != 0);
+ gcCallbackDepth--;
+
+ if (gcCallbackDepth == 0) {
+ // Ensure any zone that was originally scheduled stays scheduled.
+ for (ZonesIter zone(this, WithAtoms); !zone.done(); zone.next()) {
+ zone->gcScheduled_ = zone->gcScheduled_ || zone->gcScheduledSaved_;
+ }
+ }
+}
+
+/*
+ * We disable inlining to ensure that the bottom of the stack with possible GC
+ * roots recorded in MarkRuntime excludes any pointers we use during the marking
+ * implementation.
+ */
+MOZ_NEVER_INLINE GCRuntime::IncrementalResult GCRuntime::gcCycle(
+ bool nonincrementalByAPI, SliceBudget budget,
+ const MaybeInvocationKind& gckind, JS::GCReason reason) {
+ // Assert if this is a GC unsafe region.
+ rt->mainContextFromOwnThread()->verifyIsSafeToGC();
+
+ // It's ok if threads other than the main thread have suppressGC set, as
+ // they are operating on zones which will not be collected from here.
+ MOZ_ASSERT(!rt->mainContextFromOwnThread()->suppressGC);
+
+ // This reason is used internally. See below.
+ MOZ_ASSERT(reason != JS::GCReason::RESET);
+
+ // Background finalization and decommit are finished by definition before we
+ // can start a new major GC. Background allocation may still be running, but
+ // that's OK because chunk pools are protected by the GC lock.
+ if (!isIncrementalGCInProgress()) {
+ assertBackgroundSweepingFinished();
+ MOZ_ASSERT(decommitTask.isIdle());
+ }
+
+ // Note that GC callbacks are allowed to re-enter GC.
+ AutoCallGCCallbacks callCallbacks(*this, reason);
+
+ // Increase slice budget for long running collections before it is recorded by
+ // AutoGCSlice.
+ maybeIncreaseSliceBudget(budget);
+
+ ScheduleZones(this);
+ gcstats::AutoGCSlice agc(stats(), scanZonesBeforeGC(),
+ gckind.valueOr(invocationKind), budget, reason);
+
+ IncrementalResult result =
+ budgetIncrementalGC(nonincrementalByAPI, reason, budget);
+ if (result == IncrementalResult::ResetIncremental) {
+ if (incrementalState == State::NotActive) {
+ // The collection was reset and has finished.
+ return result;
+ }
+
+ // The collection was reset but we must finish up some remaining work.
+ reason = JS::GCReason::RESET;
+ }
+
+ majorGCTriggerReason = JS::GCReason::NO_REASON;
+ MOZ_ASSERT(!stats().hasTrigger());
+
+ incGcNumber();
+ incGcSliceNumber();
+
+ gcprobes::MajorGCStart();
+ incrementalSlice(budget, gckind, reason);
+ gcprobes::MajorGCEnd();
+
+ MOZ_ASSERT_IF(result == IncrementalResult::ResetIncremental,
+ !isIncrementalGCInProgress());
+ return result;
+}
+
+void GCRuntime::waitForBackgroundTasksBeforeSlice() {
+ gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::WAIT_BACKGROUND_THREAD);
+
+ // Background finalization and decommit are finished by definition before we
+ // can start a new major GC.
+ if (!isIncrementalGCInProgress()) {
+ assertBackgroundSweepingFinished();
+ MOZ_ASSERT(decommitTask.isIdle());
+ }
+
+ // We must also wait for background allocation to finish so we can avoid
+ // taking the GC lock when manipulating the chunks during the GC. The
+ // background alloc task can run between slices, so we must wait for it at the
+ // start of every slice.
+ //
+ // TODO: Is this still necessary?
+ allocTask.cancelAndWait();
+}
+
+inline bool GCRuntime::mightSweepInThisSlice(bool nonIncremental) {
+ MOZ_ASSERT(incrementalState < State::Sweep);
+ return nonIncremental || lastMarkSlice || hasIncrementalTwoSliceZealMode();
+}
+
+#ifdef JS_GC_ZEAL
+static bool IsDeterministicGCReason(JS::GCReason reason) {
+ switch (reason) {
+ case JS::GCReason::API:
+ case JS::GCReason::DESTROY_RUNTIME:
+ case JS::GCReason::LAST_DITCH:
+ case JS::GCReason::TOO_MUCH_MALLOC:
+ case JS::GCReason::TOO_MUCH_WASM_MEMORY:
+ case JS::GCReason::TOO_MUCH_JIT_CODE:
+ case JS::GCReason::ALLOC_TRIGGER:
+ case JS::GCReason::DEBUG_GC:
+ case JS::GCReason::CC_FORCED:
+ case JS::GCReason::SHUTDOWN_CC:
+ case JS::GCReason::ABORT_GC:
+ case JS::GCReason::DISABLE_GENERATIONAL_GC:
+ case JS::GCReason::FINISH_GC:
+ case JS::GCReason::PREPARE_FOR_TRACING:
+ return true;
+
+ default:
+ return false;
+ }
+}
+#endif
+
+gcstats::ZoneGCStats GCRuntime::scanZonesBeforeGC() {
+ gcstats::ZoneGCStats zoneStats;
+ for (ZonesIter zone(this, WithAtoms); !zone.done(); zone.next()) {
+ zoneStats.zoneCount++;
+ zoneStats.compartmentCount += zone->compartments().length();
+ if (zone->canCollect()) {
+ zoneStats.collectableZoneCount++;
+ if (zone->isGCScheduled()) {
+ zoneStats.collectedZoneCount++;
+ zoneStats.collectedCompartmentCount += zone->compartments().length();
+ }
+ }
+ }
+
+ return zoneStats;
+}
+
+// The GC can only clean up scheduledForDestruction realms that were marked live
+// by a barrier (e.g. by RemapWrappers from a navigation event). It is also
+// common to have realms held live because they are part of a cycle in gecko,
+// e.g. involving the HTMLDocument wrapper. In this case, we need to run the
+// CycleCollector in order to remove these edges before the realm can be freed.
+void GCRuntime::maybeDoCycleCollection() {
+ const static float ExcessiveGrayRealms = 0.8f;
+ const static size_t LimitGrayRealms = 200;
+
+ size_t realmsTotal = 0;
+ size_t realmsGray = 0;
+ for (RealmsIter realm(rt); !realm.done(); realm.next()) {
+ ++realmsTotal;
+ GlobalObject* global = realm->unsafeUnbarrieredMaybeGlobal();
+ if (global && global->isMarkedGray()) {
+ ++realmsGray;
+ }
+ }
+ float grayFraction = float(realmsGray) / float(realmsTotal);
+ if (grayFraction > ExcessiveGrayRealms || realmsGray > LimitGrayRealms) {
+ callDoCycleCollectionCallback(rt->mainContextFromOwnThread());
+ }
+}
+
+void GCRuntime::checkCanCallAPI() {
+ MOZ_RELEASE_ASSERT(CurrentThreadCanAccessRuntime(rt));
+
+ /* If we attempt to invoke the GC while we are running in the GC, assert. */
+ MOZ_RELEASE_ASSERT(!JS::RuntimeHeapIsBusy());
+}
+
+bool GCRuntime::checkIfGCAllowedInCurrentState(JS::GCReason reason) {
+ if (rt->mainContextFromOwnThread()->suppressGC) {
+ return false;
+ }
+
+ // Only allow shutdown GCs when we're destroying the runtime. This keeps
+ // the GC callback from triggering a nested GC and resetting global state.
+ if (rt->isBeingDestroyed() && !IsShutdownReason(reason)) {
+ return false;
+ }
+
+#ifdef JS_GC_ZEAL
+ if (deterministicOnly && !IsDeterministicGCReason(reason)) {
+ return false;
+ }
+#endif
+
+ return true;
+}
+
+bool GCRuntime::shouldRepeatForDeadZone(JS::GCReason reason) {
+ MOZ_ASSERT_IF(reason == JS::GCReason::COMPARTMENT_REVIVED, !isIncremental);
+ MOZ_ASSERT(!isIncrementalGCInProgress());
+
+ if (!isIncremental) {
+ return false;
+ }
+
+ for (CompartmentsIter c(rt); !c.done(); c.next()) {
+ if (c->gcState.scheduledForDestruction) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+struct MOZ_RAII AutoSetZoneSliceThresholds {
+ explicit AutoSetZoneSliceThresholds(GCRuntime* gc) : gc(gc) {
+ // On entry, zones that are already collecting should have a slice threshold
+ // set.
+ for (ZonesIter zone(gc, WithAtoms); !zone.done(); zone.next()) {
+ MOZ_ASSERT((zone->gcState() > Zone::Prepare) ==
+ zone->gcHeapThreshold.hasSliceThreshold());
+ MOZ_ASSERT((zone->gcState() > Zone::Prepare) ==
+ zone->mallocHeapThreshold.hasSliceThreshold());
+ }
+ }
+
+ ~AutoSetZoneSliceThresholds() {
+ // On exit, update the thresholds for all collecting zones.
+ for (ZonesIter zone(gc, WithAtoms); !zone.done(); zone.next()) {
+ if (zone->gcState() > Zone::Prepare) {
+ zone->setGCSliceThresholds(*gc);
+ } else {
+ MOZ_ASSERT(!zone->gcHeapThreshold.hasSliceThreshold());
+ MOZ_ASSERT(!zone->mallocHeapThreshold.hasSliceThreshold());
+ }
+ }
+ }
+
+ GCRuntime* gc;
+};
+
+void GCRuntime::collect(bool nonincrementalByAPI, SliceBudget budget,
+ const MaybeInvocationKind& gckindArg,
+ JS::GCReason reason) {
+ MOZ_ASSERT(reason != JS::GCReason::NO_REASON);
+
+ MaybeInvocationKind gckind = gckindArg;
+ MOZ_ASSERT_IF(!isIncrementalGCInProgress(), gckind.isSome());
+
+ // Checks run for each request, even if we do not actually GC.
+ checkCanCallAPI();
+
+ // Check if we are allowed to GC at this time before proceeding.
+ if (!checkIfGCAllowedInCurrentState(reason)) {
+ return;
+ }
+
+ stats().log("GC starting in state %s", StateName(incrementalState));
+
+ AutoTraceLog logGC(TraceLoggerForCurrentThread(), TraceLogger_GC);
+ AutoStopVerifyingBarriers av(rt, IsShutdownReason(reason));
+ AutoEnqueuePendingParseTasksAfterGC aept(*this);
+ AutoMaybeLeaveAtomsZone leaveAtomsZone(rt->mainContextFromOwnThread());
+ AutoSetZoneSliceThresholds sliceThresholds(this);
+
+#ifdef DEBUG
+ if (IsShutdownReason(reason)) {
+ marker.markQueue.clear();
+ marker.queuePos = 0;
+ }
+#endif
+
+ bool repeat;
+ do {
+ IncrementalResult cycleResult =
+ gcCycle(nonincrementalByAPI, budget, gckind, reason);
+
+ if (reason == JS::GCReason::ABORT_GC) {
+ MOZ_ASSERT(!isIncrementalGCInProgress());
+ stats().log("GC aborted by request");
+ break;
+ }
+
+ /*
+ * Sometimes when we finish a GC we need to immediately start a new one.
+ * This happens in the following cases:
+ * - when we reset the current GC
+ * - when finalizers drop roots during shutdown
+ * - when zones that we thought were dead at the start of GC are
+ * not collected (see the large comment in beginMarkPhase)
+ */
+ repeat = false;
+ if (!isIncrementalGCInProgress()) {
+ if (cycleResult == ResetIncremental) {
+ repeat = true;
+ } else if (rootsRemoved && IsShutdownReason(reason)) {
+ /* Need to re-schedule all zones for GC. */
+ JS::PrepareForFullGC(rt->mainContextFromOwnThread());
+ repeat = true;
+ reason = JS::GCReason::ROOTS_REMOVED;
+ } else if (shouldRepeatForDeadZone(reason)) {
+ repeat = true;
+ reason = JS::GCReason::COMPARTMENT_REVIVED;
+ }
+ }
+
+ if (repeat) {
+ gckind = Some(invocationKind);
+ }
+ } while (repeat);
+
+ if (reason == JS::GCReason::COMPARTMENT_REVIVED) {
+ maybeDoCycleCollection();
+ }
+
+#ifdef JS_GC_ZEAL
+ if (hasZealMode(ZealMode::CheckHeapAfterGC)) {
+ gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::TRACE_HEAP);
+ CheckHeapAfterGC(rt);
+ }
+ if (hasZealMode(ZealMode::CheckGrayMarking) && !isIncrementalGCInProgress()) {
+ MOZ_RELEASE_ASSERT(CheckGrayMarkingState(rt));
+ }
+#endif
+ stats().log("GC ending in state %s", StateName(incrementalState));
+
+ UnscheduleZones(this);
+}
+
+js::AutoEnqueuePendingParseTasksAfterGC::
+ ~AutoEnqueuePendingParseTasksAfterGC() {
+ if (!OffThreadParsingMustWaitForGC(gc_.rt)) {
+ EnqueuePendingParseTasksAfterGC(gc_.rt);
+ }
+}
+
+SliceBudget GCRuntime::defaultBudget(JS::GCReason reason, int64_t millis) {
+ if (millis == 0) {
+ if (reason == JS::GCReason::ALLOC_TRIGGER) {
+ millis = defaultSliceBudgetMS();
+ } else if (schedulingState.inHighFrequencyGCMode()) {
+ millis = defaultSliceBudgetMS() * IGC_MARK_SLICE_MULTIPLIER;
+ } else {
+ millis = defaultSliceBudgetMS();
+ }
+ }
+
+ return SliceBudget(TimeBudget(millis));
+}
+
+void GCRuntime::gc(JSGCInvocationKind gckind, JS::GCReason reason) {
+ collect(true, SliceBudget::unlimited(), mozilla::Some(gckind), reason);
+}
+
+void GCRuntime::startGC(JSGCInvocationKind gckind, JS::GCReason reason,
+ int64_t millis) {
+ MOZ_ASSERT(!isIncrementalGCInProgress());
+ if (!JS::IsIncrementalGCEnabled(rt->mainContextFromOwnThread())) {
+ gc(gckind, reason);
+ return;
+ }
+ collect(false, defaultBudget(reason, millis), Some(gckind), reason);
+}
+
+void GCRuntime::gcSlice(JS::GCReason reason, int64_t millis) {
+ MOZ_ASSERT(isIncrementalGCInProgress());
+ collect(false, defaultBudget(reason, millis), Nothing(), reason);
+}
+
+void GCRuntime::finishGC(JS::GCReason reason) {
+ MOZ_ASSERT(isIncrementalGCInProgress());
+
+ // If we're not collecting because we're out of memory then skip the
+ // compacting phase if we need to finish an ongoing incremental GC
+ // non-incrementally to avoid janking the browser.
+ if (!IsOOMReason(initialReason)) {
+ if (incrementalState == State::Compact) {
+ abortGC();
+ return;
+ }
+
+ isCompacting = false;
+ }
+
+ collect(false, SliceBudget::unlimited(), Nothing(), reason);
+}
+
+void GCRuntime::abortGC() {
+ MOZ_ASSERT(isIncrementalGCInProgress());
+ checkCanCallAPI();
+ MOZ_ASSERT(!rt->mainContextFromOwnThread()->suppressGC);
+
+ collect(false, SliceBudget::unlimited(), Nothing(), JS::GCReason::ABORT_GC);
+}
+
+static bool ZonesSelected(GCRuntime* gc) {
+ for (ZonesIter zone(gc, WithAtoms); !zone.done(); zone.next()) {
+ if (zone->isGCScheduled()) {
+ return true;
+ }
+ }
+ return false;
+}
+
+void GCRuntime::startDebugGC(JSGCInvocationKind gckind, SliceBudget& budget) {
+ MOZ_ASSERT(!isIncrementalGCInProgress());
+ if (!ZonesSelected(this)) {
+ JS::PrepareForFullGC(rt->mainContextFromOwnThread());
+ }
+ collect(false, budget, Some(gckind), JS::GCReason::DEBUG_GC);
+}
+
+void GCRuntime::debugGCSlice(SliceBudget& budget) {
+ MOZ_ASSERT(isIncrementalGCInProgress());
+ if (!ZonesSelected(this)) {
+ JS::PrepareForIncrementalGC(rt->mainContextFromOwnThread());
+ }
+ collect(false, budget, Nothing(), JS::GCReason::DEBUG_GC);
+}
+
+/* Schedule a full GC unless a zone will already be collected. */
+void js::PrepareForDebugGC(JSRuntime* rt) {
+ if (!ZonesSelected(&rt->gc)) {
+ JS::PrepareForFullGC(rt->mainContextFromOwnThread());
+ }
+}
+
+void GCRuntime::onOutOfMallocMemory() {
+ // Stop allocating new chunks.
+ allocTask.cancelAndWait();
+
+ // Make sure we release anything queued for release.
+ decommitTask.join();
+ nursery().joinDecommitTask();
+
+ // Wait for background free of nursery huge slots to finish.
+ sweepTask.join();
+
+ AutoLockGC lock(this);
+ onOutOfMallocMemory(lock);
+}
+
+void GCRuntime::onOutOfMallocMemory(const AutoLockGC& lock) {
+ // Release any relocated arenas we may be holding on to, without releasing
+ // the GC lock.
+ releaseHeldRelocatedArenasWithoutUnlocking(lock);
+
+ // Throw away any excess chunks we have lying around.
+ freeEmptyChunks(lock);
+
+ // Immediately decommit as many arenas as possible in the hopes that this
+ // might let the OS scrape together enough pages to satisfy the failing
+ // malloc request.
+ decommitFreeArenasWithoutUnlocking(lock);
+}
+
+void GCRuntime::minorGC(JS::GCReason reason, gcstats::PhaseKind phase) {
+ MOZ_ASSERT(!JS::RuntimeHeapIsBusy());
+
+ MOZ_ASSERT_IF(reason == JS::GCReason::EVICT_NURSERY,
+ !rt->mainContextFromOwnThread()->suppressGC);
+ if (rt->mainContextFromOwnThread()->suppressGC) {
+ return;
+ }
+
+ incGcNumber();
+
+ collectNursery(GC_NORMAL, reason, phase);
+
+#ifdef JS_GC_ZEAL
+ if (hasZealMode(ZealMode::CheckHeapAfterGC)) {
+ gcstats::AutoPhase ap(stats(), phase);
+ CheckHeapAfterGC(rt);
+ }
+#endif
+
+ for (ZonesIter zone(this, WithAtoms); !zone.done(); zone.next()) {
+ maybeTriggerGCAfterAlloc(zone);
+ maybeTriggerGCAfterMalloc(zone);
+ }
+}
+
+void GCRuntime::collectNursery(JSGCInvocationKind kind, JS::GCReason reason,
+ gcstats::PhaseKind phase) {
+ AutoMaybeLeaveAtomsZone leaveAtomsZone(rt->mainContextFromOwnThread());
+
+ // Note that we aren't collecting the updated alloc counts from any helper
+ // threads. We should be but I'm not sure where to add that
+ // synchronisation.
+ uint32_t numAllocs =
+ rt->mainContextFromOwnThread()->getAndResetAllocsThisZoneSinceMinorGC();
+ for (ZonesIter zone(this, WithAtoms); !zone.done(); zone.next()) {
+ numAllocs += zone->getAndResetTenuredAllocsSinceMinorGC();
+ }
+ stats().setAllocsSinceMinorGCTenured(numAllocs);
+
+ gcstats::AutoPhase ap(stats(), phase);
+
+ nursery().clearMinorGCRequest();
+ TraceLoggerThread* logger = TraceLoggerForCurrentThread();
+ AutoTraceLog logMinorGC(logger, TraceLogger_MinorGC);
+ nursery().collect(kind, reason);
+ MOZ_ASSERT(nursery().isEmpty());
+
+ startBackgroundFreeAfterMinorGC();
+}
+
+void GCRuntime::startBackgroundFreeAfterMinorGC() {
+ MOZ_ASSERT(nursery().isEmpty());
+
+ {
+ AutoLockHelperThreadState lock;
+
+ lifoBlocksToFree.ref().transferFrom(&lifoBlocksToFreeAfterMinorGC.ref());
+
+ if (lifoBlocksToFree.ref().isEmpty() &&
+ buffersToFreeAfterMinorGC.ref().empty()) {
+ return;
+ }
+ }
+
+ startBackgroundFree();
+}
+
+JS::AutoDisableGenerationalGC::AutoDisableGenerationalGC(JSContext* cx)
+ : cx(cx) {
+ if (!cx->generationalDisabled) {
+ cx->runtime()->gc.evictNursery(JS::GCReason::DISABLE_GENERATIONAL_GC);
+ cx->nursery().disable();
+ }
+ ++cx->generationalDisabled;
+}
+
+JS::AutoDisableGenerationalGC::~AutoDisableGenerationalGC() {
+ if (--cx->generationalDisabled == 0 &&
+ cx->runtime()->gc.tunables.gcMaxNurseryBytes() > 0) {
+ cx->nursery().enable();
+ }
+}
+
+JS_PUBLIC_API bool JS::IsGenerationalGCEnabled(JSRuntime* rt) {
+ return !rt->mainContextFromOwnThread()->generationalDisabled;
+}
+
+bool GCRuntime::gcIfRequested() {
+ // This method returns whether a major GC was performed.
+
+ if (nursery().minorGCRequested()) {
+ minorGC(nursery().minorGCTriggerReason());
+ }
+
+ if (majorGCRequested()) {
+ if (majorGCTriggerReason == JS::GCReason::DELAYED_ATOMS_GC &&
+ !rt->mainContextFromOwnThread()->canCollectAtoms()) {
+ // A GC was requested to collect the atoms zone, but it's no longer
+ // possible. Skip this collection.
+ majorGCTriggerReason = JS::GCReason::NO_REASON;
+ return false;
+ }
+
+ if (!isIncrementalGCInProgress()) {
+ startGC(GC_NORMAL, majorGCTriggerReason);
+ } else {
+ gcSlice(majorGCTriggerReason);
+ }
+ return true;
+ }
+
+ return false;
+}
+
+void js::gc::FinishGC(JSContext* cx, JS::GCReason reason) {
+ // Calling this when GC is suppressed won't have any effect.
+ MOZ_ASSERT(!cx->suppressGC);
+
+ // GC callbacks may run arbitrary code, including JS. Check this regardless of
+ // whether we GC for this invocation.
+ MOZ_ASSERT(cx->isNurseryAllocAllowed());
+
+ if (JS::IsIncrementalGCInProgress(cx)) {
+ JS::PrepareForIncrementalGC(cx);
+ JS::FinishIncrementalGC(cx, reason);
+ }
+}
+
+void js::gc::WaitForBackgroundTasks(JSContext* cx) {
+ cx->runtime()->gc.waitForBackgroundTasks();
+}
+
+void GCRuntime::waitForBackgroundTasks() {
+ MOZ_ASSERT(!isIncrementalGCInProgress());
+ MOZ_ASSERT(sweepTask.isIdle());
+ MOZ_ASSERT(decommitTask.isIdle());
+ MOZ_ASSERT(markTask.isIdle());
+
+ allocTask.join();
+ freeTask.join();
+ nursery().joinDecommitTask();
+}
+
+Realm* js::NewRealm(JSContext* cx, JSPrincipals* principals,
+ const JS::RealmOptions& options) {
+ JSRuntime* rt = cx->runtime();
+ JS_AbortIfWrongThread(cx);
+
+ UniquePtr<Zone> zoneHolder;
+ UniquePtr<Compartment> compHolder;
+
+ Compartment* comp = nullptr;
+ Zone* zone = nullptr;
+ JS::CompartmentSpecifier compSpec =
+ options.creationOptions().compartmentSpecifier();
+ switch (compSpec) {
+ case JS::CompartmentSpecifier::NewCompartmentInSystemZone:
+ // systemZone might be null here, in which case we'll make a zone and
+ // set this field below.
+ zone = rt->gc.systemZone;
+ break;
+ case JS::CompartmentSpecifier::NewCompartmentInExistingZone:
+ zone = options.creationOptions().zone();
+ MOZ_ASSERT(zone);
+ break;
+ case JS::CompartmentSpecifier::ExistingCompartment:
+ comp = options.creationOptions().compartment();
+ zone = comp->zone();
+ break;
+ case JS::CompartmentSpecifier::NewCompartmentAndZone:
+ case JS::CompartmentSpecifier::NewCompartmentInSelfHostingZone:
+ break;
+ }
+
+ if (!zone) {
+ Zone::Kind kind = Zone::NormalZone;
+ const JSPrincipals* trusted = rt->trustedPrincipals();
+ if (compSpec == JS::CompartmentSpecifier::NewCompartmentInSelfHostingZone) {
+ MOZ_ASSERT(!rt->hasInitializedSelfHosting());
+ kind = Zone::SelfHostingZone;
+ } else if (compSpec ==
+ JS::CompartmentSpecifier::NewCompartmentInSystemZone ||
+ (principals && principals == trusted)) {
+ kind = Zone::SystemZone;
+ }
+
+ zoneHolder = MakeUnique<Zone>(cx->runtime(), kind);
+ if (!zoneHolder || !zoneHolder->init()) {
+ ReportOutOfMemory(cx);
+ return nullptr;
+ }
+
+ zone = zoneHolder.get();
+ }
+
+ bool invisibleToDebugger = options.creationOptions().invisibleToDebugger();
+ if (comp) {
+ // Debugger visibility is per-compartment, not per-realm, so make sure the
+ // new realm's visibility matches its compartment's.
+ MOZ_ASSERT(comp->invisibleToDebugger() == invisibleToDebugger);
+ } else {
+ compHolder = cx->make_unique<JS::Compartment>(zone, invisibleToDebugger);
+ if (!compHolder) {
+ return nullptr;
+ }
+
+ comp = compHolder.get();
+ }
+
+ UniquePtr<Realm> realm(cx->new_<Realm>(comp, options));
+ if (!realm || !realm->init(cx, principals)) {
+ return nullptr;
+ }
+
+ // Make sure we don't put system and non-system realms in the same
+ // compartment.
+ if (!compHolder) {
+ MOZ_RELEASE_ASSERT(realm->isSystem() == IsSystemCompartment(comp));
+ }
+
+ AutoLockGC lock(rt);
+
+ // Reserve space in the Vectors before we start mutating them.
+ if (!comp->realms().reserve(comp->realms().length() + 1) ||
+ (compHolder &&
+ !zone->compartments().reserve(zone->compartments().length() + 1)) ||
+ (zoneHolder && !rt->gc.zones().reserve(rt->gc.zones().length() + 1))) {
+ ReportOutOfMemory(cx);
+ return nullptr;
+ }
+
+ // After this everything must be infallible.
+
+ comp->realms().infallibleAppend(realm.get());
+
+ if (compHolder) {
+ zone->compartments().infallibleAppend(compHolder.release());
+ }
+
+ if (zoneHolder) {
+ rt->gc.zones().infallibleAppend(zoneHolder.release());
+
+ // Lazily set the runtime's system zone.
+ if (compSpec == JS::CompartmentSpecifier::NewCompartmentInSystemZone) {
+ MOZ_RELEASE_ASSERT(!rt->gc.systemZone);
+ MOZ_ASSERT(zone->isSystemZone());
+ rt->gc.systemZone = zone;
+ }
+ }
+
+ return realm.release();
+}
+
+void gc::MergeRealms(Realm* source, Realm* target) {
+ JSRuntime* rt = source->runtimeFromMainThread();
+ rt->gc.mergeRealms(source, target);
+ rt->gc.maybeTriggerGCAfterAlloc(target->zone());
+ rt->gc.maybeTriggerGCAfterMalloc(target->zone());
+}
+
+void GCRuntime::mergeRealms(Realm* source, Realm* target) {
+ // The source realm must be specifically flagged as mergable. This
+ // also implies that the realm is not visible to the debugger.
+ MOZ_ASSERT(source->creationOptions().mergeable());
+ MOZ_ASSERT(source->creationOptions().invisibleToDebugger());
+
+ MOZ_ASSERT(!source->hasBeenEnteredIgnoringJit());
+ MOZ_ASSERT(source->zone()->compartments().length() == 1);
+
+ JSContext* cx = rt->mainContextFromOwnThread();
+
+ MOZ_ASSERT(!source->zone()->wasGCStarted());
+ JS::AutoAssertNoGC nogc(cx);
+
+ AutoTraceSession session(rt);
+
+ // Cleanup tables and other state in the source realm/zone that will be
+ // meaningless after merging into the target realm/zone.
+
+ source->clearTables();
+ source->zone()->clearTables();
+ source->unsetIsDebuggee();
+
+ // Release any relocated arenas which we may be holding on to as they might
+ // be in the source zone
+ releaseHeldRelocatedArenas();
+
+ // Fixup realm pointers in source to refer to target, and make sure
+ // type information generations are in sync.
+
+ GlobalObject* global = target->maybeGlobal();
+ MOZ_ASSERT(global);
+ AssertTargetIsNotGray(global);
+
+ for (auto group = source->zone()->cellIterUnsafe<ObjectGroup>();
+ !group.done(); group.next()) {
+ // Replace placeholder object prototypes with the correct prototype in
+ // the target realm.
+ TaggedProto proto(group->proto());
+ if (proto.isObject()) {
+ JSObject* obj = proto.toObject();
+ if (GlobalObject::isOffThreadPrototypePlaceholder(obj)) {
+ JSObject* targetProto =
+ global->getPrototypeForOffThreadPlaceholder(obj);
+ MOZ_ASSERT(targetProto->isDelegate());
+ group->setProtoUnchecked(TaggedProto(targetProto));
+ }
+ }
+
+ group->realm_ = target;
+ }
+
+ // Fixup zone pointers in source's zone to refer to target's zone.
+
+ bool targetZoneIsCollecting = target->zone()->gcState() > Zone::Prepare;
+ for (auto thingKind : AllAllocKinds()) {
+ for (ArenaIter aiter(source->zone(), thingKind); !aiter.done();
+ aiter.next()) {
+ Arena* arena = aiter.get();
+ arena->zone = target->zone();
+ if (MOZ_UNLIKELY(targetZoneIsCollecting)) {
+ // If we are currently collecting the target zone then we must
+ // treat all merged things as if they were allocated during the
+ // collection.
+ for (ArenaCellIter cell(arena); !cell.done(); cell.next()) {
+ MOZ_ASSERT(!cell->isMarkedAny());
+ cell->markBlack();
+ }
+ }
+ }
+ }
+
+ // The source should be the only realm in its zone.
+ for (RealmsInZoneIter r(source->zone()); !r.done(); r.next()) {
+ MOZ_ASSERT(r.get() == source);
+ }
+
+ // Merge the allocator, stats and UIDs in source's zone into target's zone.
+ target->zone()->arenas.adoptArenas(&source->zone()->arenas,
+ targetZoneIsCollecting);
+ target->zone()->addTenuredAllocsSinceMinorGC(
+ source->zone()->getAndResetTenuredAllocsSinceMinorGC());
+ target->zone()->gcHeapSize.adopt(source->zone()->gcHeapSize);
+ target->zone()->adoptUniqueIds(source->zone());
+ target->zone()->adoptMallocBytes(source->zone());
+
+ // Atoms which are marked in source's zone are now marked in target's zone.
+ atomMarking.adoptMarkedAtoms(target->zone(), source->zone());
+
+ // The source Realm is a parse-only realm and should not have collected any
+ // zone-tracked metadata.
+ Zone* sourceZone = source->zone();
+ MOZ_ASSERT(!sourceZone->scriptLCovMap);
+ MOZ_ASSERT(!sourceZone->scriptCountsMap);
+ MOZ_ASSERT(!sourceZone->debugScriptMap);
+#ifdef MOZ_VTUNE
+ MOZ_ASSERT(!sourceZone->scriptVTuneIdMap);
+#endif
+#ifdef JS_CACHEIR_SPEW
+ MOZ_ASSERT(!sourceZone->scriptFinalWarmUpCountMap);
+#endif
+
+ // The source realm is now completely empty, and is the only realm in its
+ // compartment, which is the only compartment in its zone. Delete realm,
+ // compartment and zone without waiting for this to be cleaned up by a full
+ // GC.
+
+ sourceZone->deleteEmptyCompartment(source->compartment());
+ deleteEmptyZone(sourceZone);
+}
+
+void GCRuntime::runDebugGC() {
+#ifdef JS_GC_ZEAL
+ if (rt->mainContextFromOwnThread()->suppressGC) {
+ return;
+ }
+
+ if (hasZealMode(ZealMode::GenerationalGC)) {
+ return minorGC(JS::GCReason::DEBUG_GC);
+ }
+
+ PrepareForDebugGC(rt);
+
+ auto budget = SliceBudget::unlimited();
+ if (hasZealMode(ZealMode::IncrementalMultipleSlices)) {
+ /*
+ * Start with a small slice limit and double it every slice. This
+ * ensure that we get multiple slices, and collection runs to
+ * completion.
+ */
+ if (!isIncrementalGCInProgress()) {
+ zealSliceBudget = zealFrequency / 2;
+ } else {
+ zealSliceBudget *= 2;
+ }
+ budget = SliceBudget(WorkBudget(zealSliceBudget));
+
+ js::gc::State initialState = incrementalState;
+ Maybe<JSGCInvocationKind> gckind =
+ isIncrementalGCInProgress() ? Nothing() : Some(GC_SHRINK);
+ collect(false, budget, gckind, JS::GCReason::DEBUG_GC);
+
+ /* Reset the slice size when we get to the sweep or compact phases. */
+ if ((initialState == State::Mark && incrementalState == State::Sweep) ||
+ (initialState == State::Sweep && incrementalState == State::Compact)) {
+ zealSliceBudget = zealFrequency / 2;
+ }
+ } else if (hasIncrementalTwoSliceZealMode()) {
+ // These modes trigger incremental GC that happens in two slices and the
+ // supplied budget is ignored by incrementalSlice.
+ budget = SliceBudget(WorkBudget(1));
+
+ Maybe<JSGCInvocationKind> gckind =
+ isIncrementalGCInProgress() ? Nothing() : Some(GC_NORMAL);
+ collect(false, budget, gckind, JS::GCReason::DEBUG_GC);
+ } else if (hasZealMode(ZealMode::Compact)) {
+ gc(GC_SHRINK, JS::GCReason::DEBUG_GC);
+ } else {
+ gc(GC_NORMAL, JS::GCReason::DEBUG_GC);
+ }
+
+#endif
+}
+
+void GCRuntime::setFullCompartmentChecks(bool enabled) {
+ MOZ_ASSERT(!JS::RuntimeHeapIsMajorCollecting());
+ fullCompartmentChecks = enabled;
+}
+
+void GCRuntime::notifyRootsRemoved() {
+ rootsRemoved = true;
+
+#ifdef JS_GC_ZEAL
+ /* Schedule a GC to happen "soon". */
+ if (hasZealMode(ZealMode::RootsChange)) {
+ nextScheduled = 1;
+ }
+#endif
+}
+
+#ifdef JS_GC_ZEAL
+bool GCRuntime::selectForMarking(JSObject* object) {
+ MOZ_ASSERT(!JS::RuntimeHeapIsMajorCollecting());
+ return selectedForMarking.ref().get().append(object);
+}
+
+void GCRuntime::clearSelectedForMarking() {
+ selectedForMarking.ref().get().clearAndFree();
+}
+
+void GCRuntime::setDeterministic(bool enabled) {
+ MOZ_ASSERT(!JS::RuntimeHeapIsMajorCollecting());
+ deterministicOnly = enabled;
+}
+#endif
+
+#ifdef DEBUG
+
+/* Should only be called manually under gdb */
+void PreventGCDuringInteractiveDebug() { TlsContext.get()->suppressGC++; }
+
+#endif
+
+void js::ReleaseAllJITCode(JSFreeOp* fop) {
+ js::CancelOffThreadIonCompile(fop->runtime());
+
+ for (ZonesIter zone(fop->runtime(), SkipAtoms); !zone.done(); zone.next()) {
+ zone->setPreservingCode(false);
+ zone->discardJitCode(fop);
+ }
+
+ for (RealmsIter realm(fop->runtime()); !realm.done(); realm.next()) {
+ if (jit::JitRealm* jitRealm = realm->jitRealm()) {
+ jitRealm->discardStubs();
+ }
+ }
+}
+
+void ArenaLists::adoptArenas(ArenaLists* fromArenaLists,
+ bool targetZoneIsCollecting) {
+ // GC may be active so take the lock here so we can mutate the arena lists.
+ AutoLockGC lock(runtime());
+
+ fromArenaLists->clearFreeLists();
+
+ for (auto thingKind : AllAllocKinds()) {
+ MOZ_ASSERT(fromArenaLists->concurrentUse(thingKind) == ConcurrentUse::None);
+ ArenaList* fromList = &fromArenaLists->arenaList(thingKind);
+ ArenaList* toList = &arenaList(thingKind);
+ fromList->check();
+ toList->check();
+ Arena* next;
+ for (Arena* fromArena = fromList->head(); fromArena; fromArena = next) {
+ // Copy fromArena->next before releasing/reinserting.
+ next = fromArena->next;
+
+#ifdef DEBUG
+ MOZ_ASSERT(!fromArena->isEmpty());
+ if (targetZoneIsCollecting) {
+ fromArena->checkAllCellsMarkedBlack();
+ } else {
+ fromArena->checkNoMarkedCells();
+ }
+#endif
+
+ // If the target zone is being collected then we need to add the
+ // arenas before the cursor because the collector assumes that the
+ // cursor is always at the end of the list. This has the side-effect
+ // of preventing allocation into any non-full arenas until the end
+ // of the next GC.
+ if (targetZoneIsCollecting) {
+ toList->insertBeforeCursor(fromArena);
+ } else {
+ toList->insertAtCursor(fromArena);
+ }
+ }
+ fromList->clear();
+ toList->check();
+ }
+}
+
+AutoSuppressGC::AutoSuppressGC(JSContext* cx)
+ : suppressGC_(cx->suppressGC.ref()) {
+ suppressGC_++;
+}
+
+#ifdef DEBUG
+AutoDisableProxyCheck::AutoDisableProxyCheck() {
+ TlsContext.get()->disableStrictProxyChecking();
+}
+
+AutoDisableProxyCheck::~AutoDisableProxyCheck() {
+ TlsContext.get()->enableStrictProxyChecking();
+}
+
+JS_FRIEND_API void JS::AssertGCThingMustBeTenured(JSObject* obj) {
+ MOZ_ASSERT(obj->isTenured() &&
+ (!IsNurseryAllocable(obj->asTenured().getAllocKind()) ||
+ obj->getClass()->hasFinalize()));
+}
+
+JS_FRIEND_API void JS::AssertGCThingIsNotNurseryAllocable(Cell* cell) {
+ MOZ_ASSERT(cell);
+ MOZ_ASSERT(!cell->is<JSObject>() && !cell->is<JSString>() &&
+ !cell->is<JS::BigInt>());
+}
+
+JS_FRIEND_API void js::gc::AssertGCThingHasType(js::gc::Cell* cell,
+ JS::TraceKind kind) {
+ if (!cell) {
+ MOZ_ASSERT(kind == JS::TraceKind::Null);
+ return;
+ }
+
+ MOZ_ASSERT(IsCellPointerValid(cell));
+ MOZ_ASSERT(cell->getTraceKind() == kind);
+}
+#endif
+
+#ifdef MOZ_DIAGNOSTIC_ASSERT_ENABLED
+
+JS::AutoAssertNoGC::AutoAssertNoGC(JSContext* maybecx)
+ : cx_(maybecx ? maybecx : TlsContext.get()) {
+ if (cx_) {
+ cx_->inUnsafeRegion++;
+ }
+}
+
+JS::AutoAssertNoGC::~AutoAssertNoGC() {
+ if (cx_) {
+ MOZ_ASSERT(cx_->inUnsafeRegion > 0);
+ cx_->inUnsafeRegion--;
+ }
+}
+
+#endif // MOZ_DIAGNOSTIC_ASSERT_ENABLED
+
+#ifdef DEBUG
+
+AutoAssertNoNurseryAlloc::AutoAssertNoNurseryAlloc() {
+ TlsContext.get()->disallowNurseryAlloc();
+}
+
+AutoAssertNoNurseryAlloc::~AutoAssertNoNurseryAlloc() {
+ TlsContext.get()->allowNurseryAlloc();
+}
+
+JS::AutoEnterCycleCollection::AutoEnterCycleCollection(JSRuntime* rt)
+ : runtime_(rt) {
+ MOZ_ASSERT(CurrentThreadCanAccessRuntime(rt));
+ MOZ_ASSERT(!JS::RuntimeHeapIsBusy());
+ runtime_->gc.heapState_ = HeapState::CycleCollecting;
+}
+
+JS::AutoEnterCycleCollection::~AutoEnterCycleCollection() {
+ MOZ_ASSERT(JS::RuntimeHeapIsCycleCollecting());
+ runtime_->gc.heapState_ = HeapState::Idle;
+}
+
+JS::AutoAssertGCCallback::AutoAssertGCCallback() : AutoSuppressGCAnalysis() {
+ MOZ_ASSERT(JS::RuntimeHeapIsCollecting());
+}
+
+#endif // DEBUG
+
+JS_FRIEND_API const char* JS::GCTraceKindToAscii(JS::TraceKind kind) {
+ switch (kind) {
+#define MAP_NAME(name, _0, _1, _2) \
+ case JS::TraceKind::name: \
+ return "JS " #name;
+ JS_FOR_EACH_TRACEKIND(MAP_NAME);
+#undef MAP_NAME
+ default:
+ return "Invalid";
+ }
+}
+
+JS_FRIEND_API size_t JS::GCTraceKindSize(JS::TraceKind kind) {
+ switch (kind) {
+#define MAP_SIZE(name, type, _0, _1) \
+ case JS::TraceKind::name: \
+ return sizeof(type);
+ JS_FOR_EACH_TRACEKIND(MAP_SIZE);
+#undef MAP_SIZE
+ default:
+ return 0;
+ }
+}
+
+JS::GCCellPtr::GCCellPtr(const Value& v) : ptr(0) {
+ switch (v.type()) {
+ case ValueType::String:
+ ptr = checkedCast(v.toString(), JS::TraceKind::String);
+ return;
+ case ValueType::Object:
+ ptr = checkedCast(&v.toObject(), JS::TraceKind::Object);
+ return;
+ case ValueType::Symbol:
+ ptr = checkedCast(v.toSymbol(), JS::TraceKind::Symbol);
+ return;
+ case ValueType::BigInt:
+ ptr = checkedCast(v.toBigInt(), JS::TraceKind::BigInt);
+ return;
+ case ValueType::PrivateGCThing:
+ ptr = checkedCast(v.toGCThing(), v.toGCThing()->getTraceKind());
+ return;
+ case ValueType::Double:
+ case ValueType::Int32:
+ case ValueType::Boolean:
+ case ValueType::Undefined:
+ case ValueType::Null:
+ case ValueType::Magic: {
+ MOZ_ASSERT(!v.isGCThing());
+ ptr = checkedCast(nullptr, JS::TraceKind::Null);
+ return;
+ }
+ }
+
+ ReportBadValueTypeAndCrash(v);
+}
+
+JS::TraceKind JS::GCCellPtr::outOfLineKind() const {
+ MOZ_ASSERT((ptr & OutOfLineTraceKindMask) == OutOfLineTraceKindMask);
+ MOZ_ASSERT(asCell()->isTenured());
+ return MapAllocToTraceKind(asCell()->asTenured().getAllocKind());
+}
+
+#ifdef JSGC_HASH_TABLE_CHECKS
+void GCRuntime::checkHashTablesAfterMovingGC() {
+ /*
+ * Check that internal hash tables no longer have any pointers to things
+ * that have been moved.
+ */
+ rt->geckoProfiler().checkStringsMapAfterMovingGC();
+ for (ZonesIter zone(this, SkipAtoms); !zone.done(); zone.next()) {
+ zone->checkUniqueIdTableAfterMovingGC();
+ zone->checkInitialShapesTableAfterMovingGC();
+ zone->checkBaseShapeTableAfterMovingGC();
+ zone->checkAllCrossCompartmentWrappersAfterMovingGC();
+ zone->checkScriptMapsAfterMovingGC();
+
+ JS::AutoCheckCannotGC nogc;
+ for (auto baseShape = zone->cellIterUnsafe<BaseShape>(); !baseShape.done();
+ baseShape.next()) {
+ ShapeCachePtr p = baseShape->getCache(nogc);
+ p.checkAfterMovingGC();
+ }
+ }
+
+ for (CompartmentsIter c(this); !c.done(); c.next()) {
+ for (RealmsInCompartmentIter r(c); !r.done(); r.next()) {
+ r->checkObjectGroupTablesAfterMovingGC();
+ r->dtoaCache.checkCacheAfterMovingGC();
+ if (r->debugEnvs()) {
+ r->debugEnvs()->checkHashTablesAfterMovingGC();
+ }
+ }
+ }
+}
+#endif
+
+#ifdef DEBUG
+bool GCRuntime::hasZone(Zone* target) {
+ for (AllZonesIter zone(this); !zone.done(); zone.next()) {
+ if (zone == target) {
+ return true;
+ }
+ }
+ return false;
+}
+#endif
+
+JS_PUBLIC_API void JS::PrepareZoneForGC(JSContext* cx, Zone* zone) {
+ AssertHeapIsIdle();
+ CHECK_THREAD(cx);
+ MOZ_ASSERT(cx->runtime()->gc.hasZone(zone));
+
+ zone->scheduleGC();
+}
+
+JS_PUBLIC_API void JS::PrepareForFullGC(JSContext* cx) {
+ AssertHeapIsIdle();
+ CHECK_THREAD(cx);
+
+ for (ZonesIter zone(cx->runtime(), WithAtoms); !zone.done(); zone.next()) {
+ zone->scheduleGC();
+ }
+}
+
+JS_PUBLIC_API void JS::PrepareForIncrementalGC(JSContext* cx) {
+ AssertHeapIsIdle();
+ CHECK_THREAD(cx);
+
+ if (!JS::IsIncrementalGCInProgress(cx)) {
+ return;
+ }
+
+ for (ZonesIter zone(cx->runtime(), WithAtoms); !zone.done(); zone.next()) {
+ if (zone->wasGCStarted()) {
+ zone->scheduleGC();
+ }
+ }
+}
+
+JS_PUBLIC_API bool JS::IsGCScheduled(JSContext* cx) {
+ AssertHeapIsIdle();
+ CHECK_THREAD(cx);
+
+ for (ZonesIter zone(cx->runtime(), WithAtoms); !zone.done(); zone.next()) {
+ if (zone->isGCScheduled()) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+JS_PUBLIC_API void JS::SkipZoneForGC(JSContext* cx, Zone* zone) {
+ AssertHeapIsIdle();
+ CHECK_THREAD(cx);
+ MOZ_ASSERT(cx->runtime()->gc.hasZone(zone));
+
+ zone->unscheduleGC();
+}
+
+JS_PUBLIC_API void JS::NonIncrementalGC(JSContext* cx,
+ JSGCInvocationKind gckind,
+ GCReason reason) {
+ AssertHeapIsIdle();
+ CHECK_THREAD(cx);
+ MOZ_ASSERT(gckind == GC_NORMAL || gckind == GC_SHRINK);
+
+ cx->runtime()->gc.gc(gckind, reason);
+
+ MOZ_ASSERT(!IsIncrementalGCInProgress(cx));
+}
+
+JS_PUBLIC_API void JS::StartIncrementalGC(JSContext* cx,
+ JSGCInvocationKind gckind,
+ GCReason reason, int64_t millis) {
+ AssertHeapIsIdle();
+ CHECK_THREAD(cx);
+ MOZ_ASSERT(gckind == GC_NORMAL || gckind == GC_SHRINK);
+
+ cx->runtime()->gc.startGC(gckind, reason, millis);
+}
+
+JS_PUBLIC_API void JS::IncrementalGCSlice(JSContext* cx, GCReason reason,
+ int64_t millis) {
+ AssertHeapIsIdle();
+ CHECK_THREAD(cx);
+
+ cx->runtime()->gc.gcSlice(reason, millis);
+}
+
+JS_PUBLIC_API bool JS::IncrementalGCHasForegroundWork(JSContext* cx) {
+ AssertHeapIsIdle();
+ CHECK_THREAD(cx);
+
+ return cx->runtime()->gc.hasForegroundWork();
+}
+
+JS_PUBLIC_API void JS::FinishIncrementalGC(JSContext* cx, GCReason reason) {
+ AssertHeapIsIdle();
+ CHECK_THREAD(cx);
+
+ cx->runtime()->gc.finishGC(reason);
+}
+
+JS_PUBLIC_API void JS::AbortIncrementalGC(JSContext* cx) {
+ AssertHeapIsIdle();
+ CHECK_THREAD(cx);
+
+ if (IsIncrementalGCInProgress(cx)) {
+ cx->runtime()->gc.abortGC();
+ }
+}
+
+char16_t* JS::GCDescription::formatSliceMessage(JSContext* cx) const {
+ UniqueChars cstr = cx->runtime()->gc.stats().formatCompactSliceMessage();
+
+ size_t nchars = strlen(cstr.get());
+ UniqueTwoByteChars out(js_pod_malloc<char16_t>(nchars + 1));
+ if (!out) {
+ return nullptr;
+ }
+ out.get()[nchars] = 0;
+
+ CopyAndInflateChars(out.get(), cstr.get(), nchars);
+ return out.release();
+}
+
+char16_t* JS::GCDescription::formatSummaryMessage(JSContext* cx) const {
+ UniqueChars cstr = cx->runtime()->gc.stats().formatCompactSummaryMessage();
+
+ size_t nchars = strlen(cstr.get());
+ UniqueTwoByteChars out(js_pod_malloc<char16_t>(nchars + 1));
+ if (!out) {
+ return nullptr;
+ }
+ out.get()[nchars] = 0;
+
+ CopyAndInflateChars(out.get(), cstr.get(), nchars);
+ return out.release();
+}
+
+JS::dbg::GarbageCollectionEvent::Ptr JS::GCDescription::toGCEvent(
+ JSContext* cx) const {
+ return JS::dbg::GarbageCollectionEvent::Create(
+ cx->runtime(), cx->runtime()->gc.stats(),
+ cx->runtime()->gc.majorGCCount());
+}
+
+TimeStamp JS::GCDescription::startTime(JSContext* cx) const {
+ return cx->runtime()->gc.stats().start();
+}
+
+TimeStamp JS::GCDescription::endTime(JSContext* cx) const {
+ return cx->runtime()->gc.stats().end();
+}
+
+TimeStamp JS::GCDescription::lastSliceStart(JSContext* cx) const {
+ return cx->runtime()->gc.stats().slices().back().start;
+}
+
+TimeStamp JS::GCDescription::lastSliceEnd(JSContext* cx) const {
+ return cx->runtime()->gc.stats().slices().back().end;
+}
+
+JS::UniqueChars JS::GCDescription::sliceToJSONProfiler(JSContext* cx) const {
+ size_t slices = cx->runtime()->gc.stats().slices().length();
+ MOZ_ASSERT(slices > 0);
+ return cx->runtime()->gc.stats().renderJsonSlice(slices - 1);
+}
+
+JS::UniqueChars JS::GCDescription::formatJSONProfiler(JSContext* cx) const {
+ return cx->runtime()->gc.stats().renderJsonMessage();
+}
+
+JS_PUBLIC_API JS::UniqueChars JS::MinorGcToJSON(JSContext* cx) {
+ JSRuntime* rt = cx->runtime();
+ return rt->gc.stats().renderNurseryJson();
+}
+
+JS_PUBLIC_API JS::GCSliceCallback JS::SetGCSliceCallback(
+ JSContext* cx, GCSliceCallback callback) {
+ return cx->runtime()->gc.setSliceCallback(callback);
+}
+
+JS_PUBLIC_API JS::DoCycleCollectionCallback JS::SetDoCycleCollectionCallback(
+ JSContext* cx, JS::DoCycleCollectionCallback callback) {
+ return cx->runtime()->gc.setDoCycleCollectionCallback(callback);
+}
+
+JS_PUBLIC_API JS::GCNurseryCollectionCallback
+JS::SetGCNurseryCollectionCallback(JSContext* cx,
+ GCNurseryCollectionCallback callback) {
+ return cx->runtime()->gc.setNurseryCollectionCallback(callback);
+}
+
+JS_PUBLIC_API void JS::SetLowMemoryState(JSContext* cx, bool newState) {
+ return cx->runtime()->gc.setLowMemoryState(newState);
+}
+
+JS_PUBLIC_API void JS::DisableIncrementalGC(JSContext* cx) {
+ cx->runtime()->gc.disallowIncrementalGC();
+}
+
+JS_PUBLIC_API bool JS::IsIncrementalGCEnabled(JSContext* cx) {
+ GCRuntime& gc = cx->runtime()->gc;
+ return gc.isIncrementalGCEnabled() && gc.isIncrementalGCAllowed();
+}
+
+JS_PUBLIC_API bool JS::IsIncrementalGCInProgress(JSContext* cx) {
+ return cx->runtime()->gc.isIncrementalGCInProgress();
+}
+
+JS_PUBLIC_API bool JS::IsIncrementalGCInProgress(JSRuntime* rt) {
+ return rt->gc.isIncrementalGCInProgress() &&
+ !rt->gc.isVerifyPreBarriersEnabled();
+}
+
+JS_PUBLIC_API bool JS::IsIncrementalBarrierNeeded(JSContext* cx) {
+ if (JS::RuntimeHeapIsBusy()) {
+ return false;
+ }
+
+ auto state = cx->runtime()->gc.state();
+ return state != gc::State::NotActive && state <= gc::State::Sweep;
+}
+
+JS_PUBLIC_API void JS::IncrementalPreWriteBarrier(JSObject* obj) {
+ if (!obj) {
+ return;
+ }
+
+ AutoGeckoProfilerEntry profilingStackFrame(
+ TlsContext.get(), "IncrementalPreWriteBarrier(JSObject*)",
+ JS::ProfilingCategoryPair::GCCC_Barrier);
+ PreWriteBarrier(obj);
+}
+
+JS_PUBLIC_API void JS::IncrementalPreWriteBarrier(GCCellPtr thing) {
+ if (!thing) {
+ return;
+ }
+
+ AutoGeckoProfilerEntry profilingStackFrame(
+ TlsContext.get(), "IncrementalPreWriteBarrier(GCCellPtr)",
+ JS::ProfilingCategoryPair::GCCC_Barrier);
+ CellPtrPreWriteBarrier(thing);
+}
+
+JS_PUBLIC_API bool JS::WasIncrementalGC(JSRuntime* rt) {
+ return rt->gc.isIncrementalGc();
+}
+
+uint64_t js::gc::NextCellUniqueId(JSRuntime* rt) {
+ return rt->gc.nextCellUniqueId();
+}
+
+namespace js {
+namespace gc {
+namespace MemInfo {
+
+static bool GCBytesGetter(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ args.rval().setNumber(double(cx->runtime()->gc.heapSize.bytes()));
+ return true;
+}
+
+static bool MallocBytesGetter(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ double bytes = 0;
+ for (ZonesIter zone(cx->runtime(), WithAtoms); !zone.done(); zone.next()) {
+ bytes += zone->mallocHeapSize.bytes();
+ }
+ args.rval().setNumber(bytes);
+ return true;
+}
+
+static bool GCMaxBytesGetter(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ args.rval().setNumber(double(cx->runtime()->gc.tunables.gcMaxBytes()));
+ return true;
+}
+
+static bool GCHighFreqGetter(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ args.rval().setBoolean(
+ cx->runtime()->gc.schedulingState.inHighFrequencyGCMode());
+ return true;
+}
+
+static bool GCNumberGetter(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ args.rval().setNumber(double(cx->runtime()->gc.gcNumber()));
+ return true;
+}
+
+static bool MajorGCCountGetter(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ args.rval().setNumber(double(cx->runtime()->gc.majorGCCount()));
+ return true;
+}
+
+static bool MinorGCCountGetter(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ args.rval().setNumber(double(cx->runtime()->gc.minorGCCount()));
+ return true;
+}
+
+static bool GCSliceCountGetter(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ args.rval().setNumber(double(cx->runtime()->gc.gcSliceCount()));
+ return true;
+}
+
+static bool ZoneGCBytesGetter(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ args.rval().setNumber(double(cx->zone()->gcHeapSize.bytes()));
+ return true;
+}
+
+static bool ZoneGCTriggerBytesGetter(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ args.rval().setNumber(double(cx->zone()->gcHeapThreshold.startBytes()));
+ return true;
+}
+
+static bool ZoneGCAllocTriggerGetter(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ bool highFrequency =
+ cx->runtime()->gc.schedulingState.inHighFrequencyGCMode();
+ args.rval().setNumber(
+ double(cx->zone()->gcHeapThreshold.eagerAllocTrigger(highFrequency)));
+ return true;
+}
+
+static bool ZoneMallocBytesGetter(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ args.rval().setNumber(double(cx->zone()->mallocHeapSize.bytes()));
+ return true;
+}
+
+static bool ZoneMallocTriggerBytesGetter(JSContext* cx, unsigned argc,
+ Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ args.rval().setNumber(double(cx->zone()->mallocHeapThreshold.startBytes()));
+ return true;
+}
+
+static bool ZoneGCNumberGetter(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ args.rval().setNumber(double(cx->zone()->gcNumber()));
+ return true;
+}
+
+#ifdef DEBUG
+static bool DummyGetter(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ args.rval().setUndefined();
+ return true;
+}
+#endif
+
+} /* namespace MemInfo */
+
+JSObject* NewMemoryInfoObject(JSContext* cx) {
+ RootedObject obj(cx, JS_NewObject(cx, nullptr));
+ if (!obj) {
+ return nullptr;
+ }
+
+ using namespace MemInfo;
+ struct NamedGetter {
+ const char* name;
+ JSNative getter;
+ } getters[] = {{"gcBytes", GCBytesGetter},
+ {"gcMaxBytes", GCMaxBytesGetter},
+ {"mallocBytes", MallocBytesGetter},
+ {"gcIsHighFrequencyMode", GCHighFreqGetter},
+ {"gcNumber", GCNumberGetter},
+ {"majorGCCount", MajorGCCountGetter},
+ {"minorGCCount", MinorGCCountGetter},
+ {"sliceCount", GCSliceCountGetter}};
+
+ for (auto pair : getters) {
+ JSNative getter = pair.getter;
+
+#ifdef DEBUG
+ if (js::SupportDifferentialTesting()) {
+ getter = DummyGetter;
+ }
+#endif
+
+ if (!JS_DefineProperty(cx, obj, pair.name, getter, nullptr,
+ JSPROP_ENUMERATE)) {
+ return nullptr;
+ }
+ }
+
+ RootedObject zoneObj(cx, JS_NewObject(cx, nullptr));
+ if (!zoneObj) {
+ return nullptr;
+ }
+
+ if (!JS_DefineProperty(cx, obj, "zone", zoneObj, JSPROP_ENUMERATE)) {
+ return nullptr;
+ }
+
+ struct NamedZoneGetter {
+ const char* name;
+ JSNative getter;
+ } zoneGetters[] = {{"gcBytes", ZoneGCBytesGetter},
+ {"gcTriggerBytes", ZoneGCTriggerBytesGetter},
+ {"gcAllocTrigger", ZoneGCAllocTriggerGetter},
+ {"mallocBytes", ZoneMallocBytesGetter},
+ {"mallocTriggerBytes", ZoneMallocTriggerBytesGetter},
+ {"gcNumber", ZoneGCNumberGetter}};
+
+ for (auto pair : zoneGetters) {
+ JSNative getter = pair.getter;
+
+#ifdef DEBUG
+ if (js::SupportDifferentialTesting()) {
+ getter = DummyGetter;
+ }
+#endif
+
+ if (!JS_DefineProperty(cx, zoneObj, pair.name, getter, nullptr,
+ JSPROP_ENUMERATE)) {
+ return nullptr;
+ }
+ }
+
+ return obj;
+}
+
+const char* StateName(State state) {
+ switch (state) {
+#define MAKE_CASE(name) \
+ case State::name: \
+ return #name;
+ GCSTATES(MAKE_CASE)
+#undef MAKE_CASE
+ }
+ MOZ_CRASH("Invalid gc::State enum value");
+}
+
+const char* StateName(JS::Zone::GCState state) {
+ switch (state) {
+ case JS::Zone::NoGC:
+ return "NoGC";
+ case JS::Zone::Prepare:
+ return "Prepare";
+ case JS::Zone::MarkBlackOnly:
+ return "MarkBlackOnly";
+ case JS::Zone::MarkBlackAndGray:
+ return "MarkBlackAndGray";
+ case JS::Zone::Sweep:
+ return "Sweep";
+ case JS::Zone::Finished:
+ return "Finished";
+ case JS::Zone::Compact:
+ return "Compact";
+ }
+ MOZ_CRASH("Invalid Zone::GCState enum value");
+}
+
+void AutoAssertEmptyNursery::checkCondition(JSContext* cx) {
+ if (!noAlloc) {
+ noAlloc.emplace();
+ }
+ this->cx = cx;
+ MOZ_ASSERT(cx->nursery().isEmpty());
+}
+
+AutoEmptyNursery::AutoEmptyNursery(JSContext* cx) : AutoAssertEmptyNursery() {
+ MOZ_ASSERT(!cx->suppressGC);
+ cx->runtime()->gc.stats().suspendPhases();
+ cx->runtime()->gc.evictNursery(JS::GCReason::EVICT_NURSERY);
+ cx->runtime()->gc.stats().resumePhases();
+ checkCondition(cx);
+}
+
+} /* namespace gc */
+} /* namespace js */
+
+#ifdef DEBUG
+
+namespace js {
+
+// We don't want jsfriendapi.h to depend on GenericPrinter,
+// so these functions are declared directly in the cpp.
+
+extern JS_FRIEND_API void DumpString(JSString* str, js::GenericPrinter& out);
+
+} // namespace js
+
+void js::gc::Cell::dump(js::GenericPrinter& out) const {
+ switch (getTraceKind()) {
+ case JS::TraceKind::Object:
+ reinterpret_cast<const JSObject*>(this)->dump(out);
+ break;
+
+ case JS::TraceKind::String:
+ js::DumpString(reinterpret_cast<JSString*>(const_cast<Cell*>(this)), out);
+ break;
+
+ case JS::TraceKind::Shape:
+ reinterpret_cast<const Shape*>(this)->dump(out);
+ break;
+
+ default:
+ out.printf("%s(%p)\n", JS::GCTraceKindToAscii(getTraceKind()),
+ (void*)this);
+ }
+}
+
+// For use in a debugger.
+void js::gc::Cell::dump() const {
+ js::Fprinter out(stderr);
+ dump(out);
+}
+#endif
+
+static inline bool CanCheckGrayBits(const Cell* cell) {
+ MOZ_ASSERT(cell);
+ if (!cell->isTenured()) {
+ return false;
+ }
+
+ auto tc = &cell->asTenured();
+ auto rt = tc->runtimeFromAnyThread();
+ if (!CurrentThreadCanAccessRuntime(rt) || !rt->gc.areGrayBitsValid()) {
+ return false;
+ }
+
+ // If the zone's mark bits are being cleared concurrently we can't depend on
+ // the contents.
+ return !tc->zone()->isGCPreparing();
+}
+
+JS_PUBLIC_API bool js::gc::detail::CellIsMarkedGrayIfKnown(const Cell* cell) {
+ // We ignore the gray marking state of cells and return false in the
+ // following cases:
+ //
+ // 1) When OOM has caused us to clear the gcGrayBitsValid_ flag.
+ //
+ // 2) When we are in an incremental GC and examine a cell that is in a zone
+ // that is not being collected. Gray targets of CCWs that are marked black
+ // by a barrier will eventually be marked black in the next GC slice.
+ //
+ // 3) When we are not on the runtime's main thread. Helper threads might
+ // call this while parsing, and they are not allowed to inspect the
+ // runtime's incremental state. The objects being operated on are not able
+ // to be collected and will not be marked any color.
+
+ if (!CanCheckGrayBits(cell)) {
+ return false;
+ }
+
+ auto tc = &cell->asTenured();
+ MOZ_ASSERT(!tc->zoneFromAnyThread()->usedByHelperThread());
+
+ auto rt = tc->runtimeFromMainThread();
+ if (rt->gc.isIncrementalGCInProgress() && !tc->zone()->wasGCStarted()) {
+ return false;
+ }
+
+ return detail::CellIsMarkedGray(tc);
+}
+
+#ifdef DEBUG
+
+JS_PUBLIC_API void js::gc::detail::AssertCellIsNotGray(const Cell* cell) {
+ // Check that a cell is not marked gray.
+ //
+ // Since this is a debug-only check, take account of the eventual mark state
+ // of cells that will be marked black by the next GC slice in an incremental
+ // GC. For performance reasons we don't do this in CellIsMarkedGrayIfKnown.
+
+ if (!CanCheckGrayBits(cell)) {
+ return;
+ }
+
+ // TODO: I'd like to AssertHeapIsIdle() here, but this ends up getting
+ // called during GC and while iterating the heap for memory reporting.
+ MOZ_ASSERT(!JS::RuntimeHeapIsCycleCollecting());
+
+ auto tc = &cell->asTenured();
+ if (tc->zone()->isGCMarkingBlackAndGray()) {
+ // We are doing gray marking in the cell's zone. Even if the cell is
+ // currently marked gray it may eventually be marked black. Delay checking
+ // non-black cells until we finish gray marking.
+
+ if (!tc->isMarkedBlack()) {
+ JSRuntime* rt = tc->zone()->runtimeFromMainThread();
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ if (!rt->gc.cellsToAssertNotGray.ref().append(cell)) {
+ oomUnsafe.crash("Can't append to delayed gray checks list");
+ }
+ }
+ return;
+ }
+
+ MOZ_ASSERT(!tc->isMarkedGray());
+}
+
+extern JS_PUBLIC_API bool js::gc::detail::ObjectIsMarkedBlack(
+ const JSObject* obj) {
+ return obj->isMarkedBlack();
+}
+
+#endif
+
+js::gc::ClearEdgesTracer::ClearEdgesTracer(JSRuntime* rt)
+ : GenericTracer(rt, JS::TracerKind::ClearEdges,
+ JS::WeakMapTraceAction::TraceKeysAndValues) {}
+
+js::gc::ClearEdgesTracer::ClearEdgesTracer()
+ : ClearEdgesTracer(TlsContext.get()->runtime()) {}
+
+template <typename S>
+inline S* js::gc::ClearEdgesTracer::onEdge(S* thing) {
+ // We don't handle removing pointers to nursery edges from the store buffer
+ // with this tracer. Check that this doesn't happen.
+ MOZ_ASSERT(!IsInsideNursery(thing));
+
+ // Fire the pre-barrier since we're removing an edge from the graph.
+ InternalBarrierMethods<S*>::preBarrier(thing);
+
+ // Return nullptr to clear the edge.
+ return nullptr;
+}
+
+JSObject* js::gc::ClearEdgesTracer::onObjectEdge(JSObject* obj) {
+ return onEdge(obj);
+}
+JSString* js::gc::ClearEdgesTracer::onStringEdge(JSString* str) {
+ return onEdge(str);
+}
+JS::Symbol* js::gc::ClearEdgesTracer::onSymbolEdge(JS::Symbol* sym) {
+ return onEdge(sym);
+}
+JS::BigInt* js::gc::ClearEdgesTracer::onBigIntEdge(JS::BigInt* bi) {
+ return onEdge(bi);
+}
+js::BaseScript* js::gc::ClearEdgesTracer::onScriptEdge(js::BaseScript* script) {
+ return onEdge(script);
+}
+js::Shape* js::gc::ClearEdgesTracer::onShapeEdge(js::Shape* shape) {
+ return onEdge(shape);
+}
+js::ObjectGroup* js::gc::ClearEdgesTracer::onObjectGroupEdge(
+ js::ObjectGroup* group) {
+ return onEdge(group);
+}
+js::BaseShape* js::gc::ClearEdgesTracer::onBaseShapeEdge(js::BaseShape* base) {
+ return onEdge(base);
+}
+js::jit::JitCode* js::gc::ClearEdgesTracer::onJitCodeEdge(
+ js::jit::JitCode* code) {
+ return onEdge(code);
+}
+js::Scope* js::gc::ClearEdgesTracer::onScopeEdge(js::Scope* scope) {
+ return onEdge(scope);
+}
+js::RegExpShared* js::gc::ClearEdgesTracer::onRegExpSharedEdge(
+ js::RegExpShared* shared) {
+ return onEdge(shared);
+}
+
+JS_PUBLIC_API void js::gc::FinalizeDeadNurseryObject(JSContext* cx,
+ JSObject* obj) {
+ CHECK_THREAD(cx);
+ MOZ_ASSERT(JS::RuntimeHeapIsMinorCollecting());
+
+ MOZ_ASSERT(obj);
+ MOZ_ASSERT(IsInsideNursery(obj));
+ mozilla::DebugOnly<JSObject*> prior(obj);
+ MOZ_ASSERT(IsAboutToBeFinalizedUnbarriered(&prior));
+ MOZ_ASSERT(obj == prior);
+
+ const JSClass* jsClass = JS::GetClass(obj);
+ jsClass->doFinalize(cx->defaultFreeOp(), obj);
+}
+
+JS_FRIEND_API void js::gc::SetPerformanceHint(JSContext* cx,
+ PerformanceHint hint) {
+ CHECK_THREAD(cx);
+ MOZ_ASSERT(!JS::RuntimeHeapIsCollecting());
+
+ cx->runtime()->gc.setPerformanceHint(hint);
+}
+
+void GCRuntime::setPerformanceHint(PerformanceHint hint) {
+ bool wasInPageLoad = inPageLoadCount != 0;
+
+ if (hint == PerformanceHint::InPageLoad) {
+ inPageLoadCount++;
+ } else {
+ MOZ_ASSERT(inPageLoadCount);
+ inPageLoadCount--;
+ }
+
+ bool inPageLoad = inPageLoadCount != 0;
+ if (inPageLoad == wasInPageLoad) {
+ return;
+ }
+
+ AutoLockGC lock(this);
+ schedulingState.inPageLoad = inPageLoad;
+ atomsZone->updateGCStartThresholds(*this, invocationKind, lock);
+ maybeTriggerGCAfterAlloc(atomsZone);
+}
diff --git a/js/src/gc/GC.h b/js/src/gc/GC.h
new file mode 100644
index 0000000000..8dabb9c447
--- /dev/null
+++ b/js/src/gc/GC.h
@@ -0,0 +1,215 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * JS engine garbage collector API.
+ */
+
+#ifndef gc_GC_h
+#define gc_GC_h
+
+#include "jsapi.h"
+
+#include "gc/AllocKind.h"
+#include "gc/GCEnum.h"
+#include "js/TraceKind.h"
+
+class JSExternalString;
+class JSFatInlineString;
+class JSTracer;
+
+namespace js {
+
+class AccessorShape;
+class FatInlineAtom;
+class NormalAtom;
+
+class Nursery;
+
+namespace gc {
+
+class Arena;
+class TenuredChunk;
+struct Cell;
+
+/*
+ * Map from C++ type to alloc kind for non-object types. JSObject does not have
+ * a 1:1 mapping, so must use Arena::thingSize.
+ *
+ * The AllocKind is available as MapTypeToFinalizeKind<SomeType>::kind.
+ */
+template <typename T>
+struct MapTypeToFinalizeKind {};
+#define EXPAND_MAPTYPETOFINALIZEKIND(allocKind, traceKind, type, sizedType, \
+ bgFinal, nursery, compact) \
+ template <> \
+ struct MapTypeToFinalizeKind<type> { \
+ static const AllocKind kind = AllocKind::allocKind; \
+ };
+FOR_EACH_NONOBJECT_ALLOCKIND(EXPAND_MAPTYPETOFINALIZEKIND)
+#undef EXPAND_MAPTYPETOFINALIZEKIND
+
+} /* namespace gc */
+
+extern void TraceRuntime(JSTracer* trc);
+
+// Trace roots but don't evict the nursery first; used from DumpHeap.
+extern void TraceRuntimeWithoutEviction(JSTracer* trc);
+
+extern void ReleaseAllJITCode(JSFreeOp* op);
+
+extern void PrepareForDebugGC(JSRuntime* rt);
+
+/* Functions for managing cross compartment gray pointers. */
+
+extern void NotifyGCNukeWrapper(JSObject* o);
+
+extern unsigned NotifyGCPreSwap(JSObject* a, JSObject* b);
+
+extern void NotifyGCPostSwap(JSObject* a, JSObject* b, unsigned preResult);
+
+using IterateChunkCallback = void (*)(JSRuntime*, void*, gc::TenuredChunk*,
+ const JS::AutoRequireNoGC&);
+using IterateZoneCallback = void (*)(JSRuntime*, void*, JS::Zone*,
+ const JS::AutoRequireNoGC&);
+using IterateArenaCallback = void (*)(JSRuntime*, void*, gc::Arena*,
+ JS::TraceKind, size_t,
+ const JS::AutoRequireNoGC&);
+using IterateCellCallback = void (*)(JSRuntime*, void*, JS::GCCellPtr, size_t,
+ const JS::AutoRequireNoGC&);
+
+/*
+ * This function calls |zoneCallback| on every zone, |realmCallback| on
+ * every realm, |arenaCallback| on every in-use arena, and |cellCallback|
+ * on every in-use cell in the GC heap.
+ *
+ * Note that no read barrier is triggered on the cells passed to cellCallback,
+ * so no these pointers must not escape the callback.
+ */
+extern void IterateHeapUnbarriered(JSContext* cx, void* data,
+ IterateZoneCallback zoneCallback,
+ JS::IterateRealmCallback realmCallback,
+ IterateArenaCallback arenaCallback,
+ IterateCellCallback cellCallback);
+
+/*
+ * This function is like IterateHeapUnbarriered, but does it for a single zone.
+ */
+extern void IterateHeapUnbarrieredForZone(
+ JSContext* cx, JS::Zone* zone, void* data, IterateZoneCallback zoneCallback,
+ JS::IterateRealmCallback realmCallback, IterateArenaCallback arenaCallback,
+ IterateCellCallback cellCallback);
+
+/*
+ * Invoke chunkCallback on every in-use chunk.
+ */
+extern void IterateChunks(JSContext* cx, void* data,
+ IterateChunkCallback chunkCallback);
+
+using IterateScriptCallback = void (*)(JSRuntime*, void*, BaseScript*,
+ const JS::AutoRequireNoGC&);
+
+/*
+ * Invoke scriptCallback on every in-use script for the given realm or for all
+ * realms if it is null. The scripts may or may not have bytecode.
+ */
+extern void IterateScripts(JSContext* cx, JS::Realm* realm, void* data,
+ IterateScriptCallback scriptCallback);
+
+JS::Realm* NewRealm(JSContext* cx, JSPrincipals* principals,
+ const JS::RealmOptions& options);
+
+namespace gc {
+
+void FinishGC(JSContext* cx, JS::GCReason = JS::GCReason::FINISH_GC);
+
+void WaitForBackgroundTasks(JSContext* cx);
+
+/*
+ * Merge all contents of source into target. This can only be used if source is
+ * the only realm in its zone.
+ */
+void MergeRealms(JS::Realm* source, JS::Realm* target);
+
+void CollectSelfHostingZone(JSContext* cx);
+
+enum VerifierType { PreBarrierVerifier };
+
+#ifdef JS_GC_ZEAL
+
+extern const char ZealModeHelpText[];
+
+/* Check that write barriers have been used correctly. See gc/Verifier.cpp. */
+void VerifyBarriers(JSRuntime* rt, VerifierType type);
+
+void MaybeVerifyBarriers(JSContext* cx, bool always = false);
+
+void DumpArenaInfo();
+
+#else
+
+static inline void VerifyBarriers(JSRuntime* rt, VerifierType type) {}
+
+static inline void MaybeVerifyBarriers(JSContext* cx, bool always = false) {}
+
+#endif
+
+/*
+ * Instances of this class prevent GC from happening while they are live. If an
+ * allocation causes a heap threshold to be exceeded, no GC will be performed
+ * and the allocation will succeed. Allocation may still fail for other reasons.
+ *
+ * Use of this class is highly discouraged, since without GC system memory can
+ * become exhausted and this can cause crashes at places where we can't handle
+ * allocation failure.
+ *
+ * Use of this is permissible in situations where it would be impossible (or at
+ * least very difficult) to tolerate GC and where only a fixed number of objects
+ * are allocated, such as:
+ *
+ * - error reporting
+ * - JIT bailout handling
+ * - brain transplants (JSObject::swap)
+ * - debugging utilities not exposed to the browser
+ *
+ * This works by updating the |JSContext::suppressGC| counter which is checked
+ * at the start of GC.
+ */
+class MOZ_RAII JS_HAZ_GC_SUPPRESSED AutoSuppressGC {
+ int32_t& suppressGC_;
+
+ public:
+ explicit AutoSuppressGC(JSContext* cx);
+
+ ~AutoSuppressGC() { suppressGC_--; }
+};
+
+const char* StateName(State state);
+
+} /* namespace gc */
+
+/* Use this to avoid assertions when manipulating the wrapper map. */
+class MOZ_RAII AutoDisableProxyCheck {
+ public:
+#ifdef DEBUG
+ AutoDisableProxyCheck();
+ ~AutoDisableProxyCheck();
+#else
+ AutoDisableProxyCheck() {}
+#endif
+};
+
+struct MOZ_RAII AutoDisableCompactingGC {
+ explicit AutoDisableCompactingGC(JSContext* cx);
+ ~AutoDisableCompactingGC();
+
+ private:
+ JSContext* cx;
+};
+
+} /* namespace js */
+
+#endif /* gc_GC_h */
diff --git a/js/src/gc/GCEnum.h b/js/src/gc/GCEnum.h
new file mode 100644
index 0000000000..e6adedd005
--- /dev/null
+++ b/js/src/gc/GCEnum.h
@@ -0,0 +1,159 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * GC-internal enum definitions.
+ */
+
+#ifndef gc_GCEnum_h
+#define gc_GCEnum_h
+
+#include <stdint.h>
+
+#include "js/MemoryFunctions.h" // JS_FOR_EACH_PUBLIC_MEMORY_USE
+
+namespace js {
+namespace gc {
+
+// The phases of an incremental GC.
+#define GCSTATES(D) \
+ D(NotActive) \
+ D(Prepare) \
+ D(MarkRoots) \
+ D(Mark) \
+ D(Sweep) \
+ D(Finalize) \
+ D(Compact) \
+ D(Decommit) \
+ D(Finish)
+enum class State {
+#define MAKE_STATE(name) name,
+ GCSTATES(MAKE_STATE)
+#undef MAKE_STATE
+};
+
+#define JS_FOR_EACH_ZEAL_MODE(D) \
+ D(RootsChange, 1) \
+ D(Alloc, 2) \
+ D(VerifierPre, 4) \
+ D(YieldBeforeRootMarking, 6) \
+ D(GenerationalGC, 7) \
+ D(YieldBeforeMarking, 8) \
+ D(YieldBeforeSweeping, 9) \
+ D(IncrementalMultipleSlices, 10) \
+ D(IncrementalMarkingValidator, 11) \
+ D(ElementsBarrier, 12) \
+ D(CheckHashTablesOnMinorGC, 13) \
+ D(Compact, 14) \
+ D(CheckHeapAfterGC, 15) \
+ D(CheckNursery, 16) \
+ D(YieldBeforeSweepingAtoms, 17) \
+ D(CheckGrayMarking, 18) \
+ D(YieldBeforeSweepingCaches, 19) \
+ D(YieldBeforeSweepingObjects, 21) \
+ D(YieldBeforeSweepingNonObjects, 22) \
+ D(YieldBeforeSweepingShapeTrees, 23) \
+ D(CheckWeakMapMarking, 24) \
+ D(YieldWhileGrayMarking, 25)
+
+enum class ZealMode {
+#define ZEAL_MODE(name, value) name = value,
+ JS_FOR_EACH_ZEAL_MODE(ZEAL_MODE)
+#undef ZEAL_MODE
+ Count,
+ Limit = Count - 1
+};
+
+} /* namespace gc */
+
+// Reasons we reset an ongoing incremental GC or perform a non-incremental GC.
+#define GC_ABORT_REASONS(D) \
+ D(None, 0) \
+ D(NonIncrementalRequested, 1) \
+ D(AbortRequested, 2) \
+ D(Unused1, 3) \
+ D(IncrementalDisabled, 4) \
+ D(ModeChange, 5) \
+ D(MallocBytesTrigger, 6) \
+ D(GCBytesTrigger, 7) \
+ D(ZoneChange, 8) \
+ D(CompartmentRevived, 9) \
+ D(GrayRootBufferingFailed, 10) \
+ D(JitCodeBytesTrigger, 11)
+enum class GCAbortReason {
+#define MAKE_REASON(name, num) name = num,
+ GC_ABORT_REASONS(MAKE_REASON)
+#undef MAKE_REASON
+};
+
+#define JS_FOR_EACH_INTERNAL_MEMORY_USE(_) \
+ _(ArrayBufferContents) \
+ _(StringContents) \
+ _(ObjectElements) \
+ _(ObjectSlots) \
+ _(ScriptPrivateData) \
+ _(MapObjectTable) \
+ _(BigIntDigits) \
+ _(ScopeData) \
+ _(WeakMapObject) \
+ _(ShapeChildren) \
+ _(ShapeCache) \
+ _(ModuleBindingMap) \
+ _(BaselineScript) \
+ _(IonScript) \
+ _(ArgumentsData) \
+ _(RareArgumentsData) \
+ _(RegExpStatics) \
+ _(RegExpSharedBytecode) \
+ _(RegExpSharedNamedCaptureData) \
+ _(TypedArrayElements) \
+ _(TypeDescrTraceList) \
+ _(NativeIterator) \
+ _(JitScript) \
+ _(ScriptDebugScript) \
+ _(BreakpointSite) \
+ _(Breakpoint) \
+ _(ForOfPIC) \
+ _(ForOfPICStub) \
+ _(WasmInstanceExports) \
+ _(WasmInstanceScopes) \
+ _(WasmInstanceGlobals) \
+ _(WasmInstanceInstance) \
+ _(WasmMemoryObservers) \
+ _(WasmGlobalCell) \
+ _(WasmResolveResponseClosure) \
+ _(WasmModule) \
+ _(WasmTableTable) \
+ _(WasmExceptionTag) \
+ _(WasmExceptionType) \
+ _(FileObjectFile) \
+ _(Debugger) \
+ _(DebuggerFrameGeneratorInfo) \
+ _(DebuggerFrameIterData) \
+ _(DebuggerOnStepHandler) \
+ _(DebuggerOnPopHandler) \
+ _(RealmInstrumentation) \
+ _(ICUObject) \
+ _(FinalizationRegistryRecordVector) \
+ _(FinalizationRegistryRegistrations) \
+ _(FinalizationRecordVector) \
+ _(ZoneAllocPolicy) \
+ _(SharedArrayRawBuffer) \
+ _(XDRBufferElements)
+
+#define JS_FOR_EACH_MEMORY_USE(_) \
+ JS_FOR_EACH_PUBLIC_MEMORY_USE(_) \
+ JS_FOR_EACH_INTERNAL_MEMORY_USE(_)
+
+enum class MemoryUse : uint8_t {
+#define DEFINE_MEMORY_USE(Name) Name,
+ JS_FOR_EACH_MEMORY_USE(DEFINE_MEMORY_USE)
+#undef DEFINE_MEMORY_USE
+};
+
+} /* namespace js */
+
+#endif /* gc_GCEnum_h */
diff --git a/js/src/gc/GCInternals.h b/js/src/gc/GCInternals.h
new file mode 100644
index 0000000000..007ba7d70a
--- /dev/null
+++ b/js/src/gc/GCInternals.h
@@ -0,0 +1,295 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * GC-internal definitions.
+ */
+
+#ifndef gc_GCInternals_h
+#define gc_GCInternals_h
+
+#include "mozilla/Maybe.h"
+
+#include "gc/GC.h"
+#include "vm/JSContext.h"
+
+namespace js {
+namespace gc {
+
+/*
+ * There are a couple of classes here that serve mostly as "tokens" indicating
+ * that a precondition holds. Some functions force the caller to possess such a
+ * token because they require the precondition to hold, and it is better to make
+ * the precondition explicit at the API entry point than to crash in an
+ * assertion later on when it is relied upon.
+ */
+
+struct MOZ_RAII AutoAssertNoNurseryAlloc {
+#ifdef DEBUG
+ AutoAssertNoNurseryAlloc();
+ ~AutoAssertNoNurseryAlloc();
+#else
+ AutoAssertNoNurseryAlloc() {}
+#endif
+};
+
+/*
+ * A class that serves as a token that the nursery in the current thread's zone
+ * group is empty.
+ */
+class MOZ_RAII AutoAssertEmptyNursery {
+ protected:
+ JSContext* cx;
+
+ mozilla::Maybe<AutoAssertNoNurseryAlloc> noAlloc;
+
+ // Check that the nursery is empty.
+ void checkCondition(JSContext* cx);
+
+ // For subclasses that need to empty the nursery in their constructors.
+ AutoAssertEmptyNursery() : cx(nullptr) {}
+
+ public:
+ explicit AutoAssertEmptyNursery(JSContext* cx) : cx(nullptr) {
+ checkCondition(cx);
+ }
+
+ AutoAssertEmptyNursery(const AutoAssertEmptyNursery& other)
+ : AutoAssertEmptyNursery(other.cx) {}
+};
+
+/*
+ * Evict the nursery upon construction. Serves as a token indicating that the
+ * nursery is empty. (See AutoAssertEmptyNursery, above.)
+ */
+class MOZ_RAII AutoEmptyNursery : public AutoAssertEmptyNursery {
+ public:
+ explicit AutoEmptyNursery(JSContext* cx);
+};
+
+class MOZ_RAII AutoCheckCanAccessAtomsDuringGC {
+#ifdef DEBUG
+ JSRuntime* runtime;
+
+ public:
+ explicit AutoCheckCanAccessAtomsDuringGC(JSRuntime* rt) : runtime(rt) {
+ // Ensure we're only used from within the GC.
+ MOZ_ASSERT(JS::RuntimeHeapIsMajorCollecting());
+
+ // Ensure there is no off-thread parsing running.
+ MOZ_ASSERT(!rt->hasHelperThreadZones());
+
+ // Set up a check to assert if we try to start an off-thread parse.
+ runtime->setOffThreadParsingBlocked(true);
+ }
+ ~AutoCheckCanAccessAtomsDuringGC() {
+ runtime->setOffThreadParsingBlocked(false);
+ }
+#else
+ public:
+ explicit AutoCheckCanAccessAtomsDuringGC(JSRuntime* rt) {}
+#endif
+};
+
+// Abstract base class for exclusive heap access for tracing or GC.
+class MOZ_RAII AutoHeapSession {
+ public:
+ ~AutoHeapSession();
+
+ protected:
+ AutoHeapSession(GCRuntime* gc, JS::HeapState state);
+
+ private:
+ AutoHeapSession(const AutoHeapSession&) = delete;
+ void operator=(const AutoHeapSession&) = delete;
+
+ GCRuntime* gc;
+ JS::HeapState prevState;
+ mozilla::Maybe<AutoGeckoProfilerEntry> profilingStackFrame;
+};
+
+class MOZ_RAII AutoGCSession : public AutoHeapSession {
+ public:
+ explicit AutoGCSession(GCRuntime* gc, JS::HeapState state)
+ : AutoHeapSession(gc, state) {}
+
+ AutoCheckCanAccessAtomsDuringGC& checkAtomsAccess() {
+ return maybeCheckAtomsAccess.ref();
+ }
+
+ // During a GC we can check that it's not possible for anything else to be
+ // using the atoms zone.
+ mozilla::Maybe<AutoCheckCanAccessAtomsDuringGC> maybeCheckAtomsAccess;
+};
+
+class MOZ_RAII AutoMajorGCProfilerEntry : public AutoGeckoProfilerEntry {
+ public:
+ explicit AutoMajorGCProfilerEntry(GCRuntime* gc);
+};
+
+class MOZ_RAII AutoTraceSession : public AutoLockAllAtoms,
+ public AutoHeapSession {
+ public:
+ explicit AutoTraceSession(JSRuntime* rt)
+ : AutoLockAllAtoms(rt),
+ AutoHeapSession(&rt->gc, JS::HeapState::Tracing) {}
+};
+
+struct MOZ_RAII AutoFinishGC {
+ explicit AutoFinishGC(JSContext* cx, JS::GCReason reason) {
+ FinishGC(cx, reason);
+ }
+};
+
+// This class should be used by any code that needs exclusive access to the heap
+// in order to trace through it.
+class MOZ_RAII AutoPrepareForTracing : private AutoFinishGC,
+ public AutoTraceSession {
+ public:
+ explicit AutoPrepareForTracing(JSContext* cx)
+ : AutoFinishGC(cx, JS::GCReason::PREPARE_FOR_TRACING),
+ AutoTraceSession(cx->runtime()) {}
+};
+
+// This class should be used by any code that needs exclusive access to the heap
+// in order to trace through it.
+//
+// This version also empties the nursery after finishing any ongoing GC.
+class MOZ_RAII AutoEmptyNurseryAndPrepareForTracing : private AutoFinishGC,
+ public AutoEmptyNursery,
+ public AutoTraceSession {
+ public:
+ explicit AutoEmptyNurseryAndPrepareForTracing(JSContext* cx)
+ : AutoFinishGC(cx, JS::GCReason::PREPARE_FOR_TRACING),
+ AutoEmptyNursery(cx),
+ AutoTraceSession(cx->runtime()) {}
+};
+
+/*
+ * Temporarily disable incremental barriers.
+ */
+class AutoDisableBarriers {
+ public:
+ explicit AutoDisableBarriers(GCRuntime* gc);
+ ~AutoDisableBarriers();
+
+ private:
+ GCRuntime* gc;
+};
+
+GCAbortReason IsIncrementalGCUnsafe(JSRuntime* rt);
+
+#ifdef JS_GC_ZEAL
+
+class MOZ_RAII AutoStopVerifyingBarriers {
+ GCRuntime* gc;
+ bool restartPreVerifier;
+
+ public:
+ AutoStopVerifyingBarriers(JSRuntime* rt, bool isShutdown) : gc(&rt->gc) {
+ if (gc->isVerifyPreBarriersEnabled()) {
+ gc->endVerifyPreBarriers();
+ restartPreVerifier = !isShutdown;
+ } else {
+ restartPreVerifier = false;
+ }
+ }
+
+ ~AutoStopVerifyingBarriers() {
+ // Nasty special case: verification runs a minor GC, which *may* nest
+ // inside of an outer minor GC. This is not allowed by the
+ // gc::Statistics phase tree. So we pause the "real" GC, if in fact one
+ // is in progress.
+ gcstats::PhaseKind outer = gc->stats().currentPhaseKind();
+ if (outer != gcstats::PhaseKind::NONE) {
+ gc->stats().endPhase(outer);
+ }
+ MOZ_ASSERT(gc->stats().currentPhaseKind() == gcstats::PhaseKind::NONE);
+
+ if (restartPreVerifier) {
+ gc->startVerifyPreBarriers();
+ }
+
+ if (outer != gcstats::PhaseKind::NONE) {
+ gc->stats().beginPhase(outer);
+ }
+ }
+};
+#else
+struct MOZ_RAII AutoStopVerifyingBarriers {
+ AutoStopVerifyingBarriers(JSRuntime*, bool) {}
+};
+#endif /* JS_GC_ZEAL */
+
+#ifdef JSGC_HASH_TABLE_CHECKS
+void CheckHashTablesAfterMovingGC(JSRuntime* rt);
+void CheckHeapAfterGC(JSRuntime* rt);
+#endif
+
+struct MovingTracer final : public GenericTracer {
+ explicit MovingTracer(JSRuntime* rt)
+ : GenericTracer(rt, JS::TracerKind::Moving,
+ JS::WeakMapTraceAction::TraceKeysAndValues) {}
+
+ JSObject* onObjectEdge(JSObject* obj) override;
+ Shape* onShapeEdge(Shape* shape) override;
+ JSString* onStringEdge(JSString* string) override;
+ js::BaseScript* onScriptEdge(js::BaseScript* script) override;
+ BaseShape* onBaseShapeEdge(BaseShape* base) override;
+ Scope* onScopeEdge(Scope* scope) override;
+ RegExpShared* onRegExpSharedEdge(RegExpShared* shared) override;
+ BigInt* onBigIntEdge(BigInt* bi) override;
+ ObjectGroup* onObjectGroupEdge(ObjectGroup* group) override;
+ JS::Symbol* onSymbolEdge(JS::Symbol* sym) override;
+ jit::JitCode* onJitCodeEdge(jit::JitCode* jit) override;
+
+ private:
+ template <typename T>
+ T* onEdge(T* thingp);
+};
+
+struct SweepingTracer final : public GenericTracer {
+ explicit SweepingTracer(JSRuntime* rt)
+ : GenericTracer(rt, JS::TracerKind::Sweeping,
+ JS::WeakMapTraceAction::TraceKeysAndValues) {}
+
+ JSObject* onObjectEdge(JSObject* obj) override;
+ Shape* onShapeEdge(Shape* shape) override;
+ JSString* onStringEdge(JSString* string) override;
+ js::BaseScript* onScriptEdge(js::BaseScript* script) override;
+ BaseShape* onBaseShapeEdge(BaseShape* base) override;
+ jit::JitCode* onJitCodeEdge(jit::JitCode* jit) override;
+ Scope* onScopeEdge(Scope* scope) override;
+ RegExpShared* onRegExpSharedEdge(RegExpShared* shared) override;
+ BigInt* onBigIntEdge(BigInt* bi) override;
+ js::ObjectGroup* onObjectGroupEdge(js::ObjectGroup* group) override;
+ JS::Symbol* onSymbolEdge(JS::Symbol* sym) override;
+
+ private:
+ template <typename T>
+ T* onEdge(T* thingp);
+};
+
+extern void DelayCrossCompartmentGrayMarking(JSObject* src);
+
+inline bool IsOOMReason(JS::GCReason reason) {
+ return reason == JS::GCReason::LAST_DITCH ||
+ reason == JS::GCReason::MEM_PRESSURE;
+}
+
+// TODO: Bug 1650075. Adding XPCONNECT_SHUTDOWN seems to cause crash.
+inline bool IsShutdownReason(JS::GCReason reason) {
+ return reason == JS::GCReason::WORKER_SHUTDOWN ||
+ reason == JS::GCReason::SHUTDOWN_CC ||
+ reason == JS::GCReason::DESTROY_RUNTIME;
+}
+
+TenuredCell* AllocateCellInGC(JS::Zone* zone, AllocKind thingKind);
+
+} /* namespace gc */
+} /* namespace js */
+
+#endif /* gc_GCInternals_h */
diff --git a/js/src/gc/GCLock.h b/js/src/gc/GCLock.h
new file mode 100644
index 0000000000..4c0243d47a
--- /dev/null
+++ b/js/src/gc/GCLock.h
@@ -0,0 +1,110 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * GC-internal classes for acquiring and releasing the GC lock.
+ */
+
+#ifndef gc_GCLock_h
+#define gc_GCLock_h
+
+#include "vm/Runtime.h"
+
+namespace js {
+
+class AutoUnlockGC;
+
+/*
+ * RAII class that takes the GC lock while it is live.
+ *
+ * Usually functions will pass const references of this class. However
+ * non-const references can be used to either temporarily release the lock by
+ * use of AutoUnlockGC or to start background allocation when the lock is
+ * released.
+ */
+class MOZ_RAII AutoLockGC {
+ public:
+ explicit AutoLockGC(gc::GCRuntime* gc) : gc(gc) { lock(); }
+ explicit AutoLockGC(JSRuntime* rt) : AutoLockGC(&rt->gc) {}
+
+ ~AutoLockGC() { lockGuard_.reset(); }
+
+ protected:
+ void lock() {
+ MOZ_ASSERT(lockGuard_.isNothing());
+ lockGuard_.emplace(gc->lock);
+ }
+
+ void unlock() {
+ MOZ_ASSERT(lockGuard_.isSome());
+ lockGuard_.reset();
+ }
+
+ js::LockGuard<js::Mutex>& guard() { return lockGuard_.ref(); }
+
+ gc::GCRuntime* const gc;
+
+ private:
+ mozilla::Maybe<js::LockGuard<js::Mutex>> lockGuard_;
+
+ AutoLockGC(const AutoLockGC&) = delete;
+ AutoLockGC& operator=(const AutoLockGC&) = delete;
+
+ friend class AutoUnlockGC; // For lock/unlock.
+};
+
+/*
+ * Same as AutoLockGC except it can optionally start a background chunk
+ * allocation task when the lock is released.
+ */
+class MOZ_RAII AutoLockGCBgAlloc : public AutoLockGC {
+ public:
+ explicit AutoLockGCBgAlloc(gc::GCRuntime* gc) : AutoLockGC(gc) {}
+ explicit AutoLockGCBgAlloc(JSRuntime* rt) : AutoLockGCBgAlloc(&rt->gc) {}
+
+ ~AutoLockGCBgAlloc() {
+ unlock();
+
+ /*
+ * We have to do this after releasing the lock because it may acquire
+ * the helper lock which could cause lock inversion if we still held
+ * the GC lock.
+ */
+ if (startBgAlloc) {
+ gc->startBackgroundAllocTaskIfIdle();
+ }
+ }
+
+ /*
+ * This can be used to start a background allocation task (if one isn't
+ * already running) that allocates chunks and makes them available in the
+ * free chunks list. This happens after the lock is released in order to
+ * avoid lock inversion.
+ */
+ void tryToStartBackgroundAllocation() { startBgAlloc = true; }
+
+ private:
+ // true if we should start a background chunk allocation task after the
+ // lock is released.
+ bool startBgAlloc = false;
+};
+
+class MOZ_RAII AutoUnlockGC {
+ public:
+ explicit AutoUnlockGC(AutoLockGC& lock) : lock(lock) { lock.unlock(); }
+
+ ~AutoUnlockGC() { lock.lock(); }
+
+ private:
+ AutoLockGC& lock;
+
+ AutoUnlockGC(const AutoUnlockGC&) = delete;
+ AutoUnlockGC& operator=(const AutoUnlockGC&) = delete;
+};
+
+} // namespace js
+
+#endif /* gc_GCLock_h */
diff --git a/js/src/gc/GCMarker.h b/js/src/gc/GCMarker.h
new file mode 100644
index 0000000000..068f27e36d
--- /dev/null
+++ b/js/src/gc/GCMarker.h
@@ -0,0 +1,584 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef gc_GCMarker_h
+#define gc_GCMarker_h
+
+#include "mozilla/Maybe.h"
+#include "mozilla/Unused.h"
+
+#include "ds/OrderedHashTable.h"
+#include "gc/Barrier.h"
+#include "js/SliceBudget.h"
+#include "js/TracingAPI.h"
+#include "js/TypeDecls.h"
+
+class JSRope;
+
+namespace js {
+
+class AutoAccessAtomsZone;
+class WeakMapBase;
+
+static const size_t NON_INCREMENTAL_MARK_STACK_BASE_CAPACITY = 4096;
+static const size_t INCREMENTAL_MARK_STACK_BASE_CAPACITY = 32768;
+static const size_t SMALL_MARK_STACK_BASE_CAPACITY = 256;
+
+enum class SlotsOrElementsKind { Elements, FixedSlots, DynamicSlots };
+
+namespace gc {
+
+enum IncrementalProgress { NotFinished = 0, Finished };
+
+struct Cell;
+
+struct WeakKeyTableHashPolicy {
+ using Lookup = Cell*;
+ static HashNumber hash(const Lookup& v,
+ const mozilla::HashCodeScrambler& hcs) {
+ return hcs.scramble(mozilla::HashGeneric(v));
+ }
+ static bool match(Cell* const& k, const Lookup& l) { return k == l; }
+ static bool isEmpty(Cell* const& v) { return !v; }
+ static void makeEmpty(Cell** vp) { *vp = nullptr; }
+};
+
+struct WeakMarkable {
+ WeakMapBase* weakmap;
+ Cell* key;
+
+ WeakMarkable(WeakMapBase* weakmapArg, Cell* keyArg)
+ : weakmap(weakmapArg), key(keyArg) {}
+
+ bool operator==(const WeakMarkable& other) const {
+ return weakmap == other.weakmap && key == other.key;
+ }
+};
+
+using WeakEntryVector = Vector<WeakMarkable, 2, js::SystemAllocPolicy>;
+
+using WeakKeyTable =
+ OrderedHashMap<Cell*, WeakEntryVector, WeakKeyTableHashPolicy,
+ js::SystemAllocPolicy>;
+
+/*
+ * When the mark stack is full, the GC does not call js::TraceChildren to mark
+ * the reachable "children" of the thing. Rather the thing is put aside and
+ * js::TraceChildren is called later when the mark stack is empty.
+ *
+ * To implement such delayed marking of the children with minimal overhead for
+ * the normal case of sufficient stack, we link arenas into a list using
+ * Arena::setNextDelayedMarkingArena(). The head of the list is stored in
+ * GCMarker::delayedMarkingList. GCMarker::delayMarkingChildren() adds arenas
+ * to the list as necessary while markAllDelayedChildren() pops the arenas from
+ * the stack until it is empty.
+ */
+class MarkStack {
+ public:
+ /*
+ * We use a common mark stack to mark GC things of different types and use
+ * the explicit tags to distinguish them when it cannot be deduced from
+ * the context of push or pop operation.
+ */
+ enum Tag {
+ SlotsOrElementsRangeTag,
+ ObjectTag,
+ GroupTag,
+ JitCodeTag,
+ ScriptTag,
+ TempRopeTag,
+
+ LastTag = TempRopeTag
+ };
+
+ static const uintptr_t TagMask = 7;
+ static_assert(TagMask >= uintptr_t(LastTag),
+ "The tag mask must subsume the tags.");
+ static_assert(TagMask <= gc::CellAlignMask,
+ "The tag mask must be embeddable in a Cell*.");
+
+ class TaggedPtr {
+ uintptr_t bits;
+
+ Cell* ptr() const;
+
+ public:
+ TaggedPtr() = default;
+ TaggedPtr(Tag tag, Cell* ptr);
+ Tag tag() const;
+ template <typename T>
+ T* as() const;
+
+ JSObject* asRangeObject() const;
+ JSRope* asTempRope() const;
+
+ void assertValid() const;
+ };
+
+ struct SlotsOrElementsRange {
+ SlotsOrElementsRange(SlotsOrElementsKind kind, JSObject* obj, size_t start);
+ void assertValid() const;
+
+ SlotsOrElementsKind kind() const;
+ size_t start() const;
+ TaggedPtr ptr() const;
+
+ static constexpr size_t StartShift = 2;
+ static constexpr size_t KindMask = (1 << StartShift) - 1;
+
+ private:
+ uintptr_t startAndKind_;
+ TaggedPtr ptr_;
+ };
+
+ explicit MarkStack(size_t maxCapacity = DefaultCapacity);
+ ~MarkStack();
+
+ static const size_t DefaultCapacity = SIZE_MAX;
+
+ // The unit for MarkStack::capacity() is mark stack entries.
+ size_t capacity() { return stack().length(); }
+
+ size_t position() const { return topIndex_; }
+
+ enum StackType { MainStack, AuxiliaryStack };
+ MOZ_MUST_USE bool init(StackType which, bool incrementalGCEnabled);
+
+ MOZ_MUST_USE bool setStackCapacity(StackType which,
+ bool incrementalGCEnabled);
+
+ size_t maxCapacity() const { return maxCapacity_; }
+ void setMaxCapacity(size_t maxCapacity);
+
+ template <typename T>
+ MOZ_MUST_USE bool push(T* ptr);
+
+ MOZ_MUST_USE bool push(JSObject* obj, SlotsOrElementsKind kind, size_t start);
+ MOZ_MUST_USE bool push(const SlotsOrElementsRange& array);
+
+ // GCMarker::eagerlyMarkChildren uses unused marking stack as temporary
+ // storage to hold rope pointers.
+ MOZ_MUST_USE bool pushTempRope(JSRope* ptr);
+
+ bool isEmpty() const { return topIndex_ == 0; }
+
+ Tag peekTag() const;
+ TaggedPtr popPtr();
+ SlotsOrElementsRange popSlotsOrElementsRange();
+
+ void clear() {
+ // Fall back to the smaller initial capacity so we don't hold on to excess
+ // memory between GCs.
+ stack().clearAndFree();
+ mozilla::Unused << stack().resize(NON_INCREMENTAL_MARK_STACK_BASE_CAPACITY);
+ topIndex_ = 0;
+ }
+
+ void poisonUnused();
+
+ size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const;
+
+ private:
+ using StackVector = Vector<TaggedPtr, 0, SystemAllocPolicy>;
+ const StackVector& stack() const { return stack_.ref(); }
+ StackVector& stack() { return stack_.ref(); }
+
+ MOZ_MUST_USE bool ensureSpace(size_t count);
+
+ /* Grow the stack, ensuring there is space for at least count elements. */
+ MOZ_MUST_USE bool enlarge(size_t count);
+
+ MOZ_MUST_USE bool resize(size_t newCapacity);
+
+ TaggedPtr* topPtr();
+
+ const TaggedPtr& peekPtr() const;
+ MOZ_MUST_USE bool pushTaggedPtr(Tag tag, Cell* ptr);
+
+ // Index of the top of the stack.
+ MainThreadOrGCTaskData<size_t> topIndex_;
+
+ // The maximum stack capacity to grow to.
+ MainThreadOrGCTaskData<size_t> maxCapacity_;
+
+ // Vector containing allocated stack memory. Unused beyond topIndex_.
+ MainThreadOrGCTaskData<StackVector> stack_;
+
+#ifdef DEBUG
+ mutable size_t iteratorCount_;
+#endif
+
+ friend class MarkStackIter;
+};
+
+class MarkStackIter {
+ MarkStack& stack_;
+ size_t pos_;
+
+ public:
+ explicit MarkStackIter(MarkStack& stack);
+ ~MarkStackIter();
+
+ bool done() const;
+ MarkStack::Tag peekTag() const;
+ MarkStack::TaggedPtr peekPtr() const;
+ MarkStack::SlotsOrElementsRange peekSlotsOrElementsRange() const;
+ void next();
+ void nextPtr();
+ void nextArray();
+
+ private:
+ size_t position() const;
+};
+
+} /* namespace gc */
+
+enum MarkingState : uint8_t {
+ // Have not yet started marking.
+ NotActive,
+
+ // Main marking mode. Weakmap marking will be populating the weakKeys tables
+ // but not consulting them. The state will transition to WeakMarking until it
+ // is done, then back to RegularMarking.
+ RegularMarking,
+
+ // Same as RegularMarking except now every marked obj/script is immediately
+ // looked up in the weakKeys table to see if it is a weakmap key, and
+ // therefore might require marking its value. Transitions back to
+ // RegularMarking when done.
+ WeakMarking,
+
+ // Same as RegularMarking, but we OOMed (or obeyed a directive in the test
+ // marking queue) and fell back to iterating until the next GC.
+ IterativeMarking
+};
+
+class GCMarker final : public JSTracer {
+ public:
+ explicit GCMarker(JSRuntime* rt);
+ MOZ_MUST_USE bool init();
+
+ void setMaxCapacity(size_t maxCap) { stack.setMaxCapacity(maxCap); }
+ size_t maxCapacity() const { return stack.maxCapacity(); }
+
+ bool isActive() const { return state != MarkingState::NotActive; }
+
+ void start();
+ void stop();
+ void reset();
+
+ // Mark the given GC thing and traverse its children at some point.
+ template <typename T>
+ void traverse(T thing);
+
+ // Calls traverse on target after making additional assertions.
+ template <typename S, typename T>
+ void traverseEdge(S source, T* target);
+ template <typename S, typename T>
+ void traverseEdge(S source, const T& target);
+
+ // Helper methods that coerce their second argument to the base pointer
+ // type.
+ template <typename S>
+ void traverseObjectEdge(S source, JSObject* target) {
+ traverseEdge(source, target);
+ }
+ template <typename S>
+ void traverseStringEdge(S source, JSString* target) {
+ traverseEdge(source, target);
+ }
+
+ template <typename S, typename T>
+ void checkTraversedEdge(S source, T* target);
+
+#ifdef DEBUG
+ // We can't check atom marking if the helper thread lock is already held by
+ // the current thread. This allows us to disable the check.
+ void setCheckAtomMarking(bool check);
+#endif
+
+ /*
+ * Care must be taken changing the mark color from gray to black. The cycle
+ * collector depends on the invariant that there are no black to gray edges
+ * in the GC heap. This invariant lets the CC not trace through black
+ * objects. If this invariant is violated, the cycle collector may free
+ * objects that are still reachable.
+ */
+ void setMarkColor(gc::MarkColor newColor);
+ void setMarkColorUnchecked(gc::MarkColor newColor);
+ gc::MarkColor markColor() const { return color; }
+
+ // Declare which color the main mark stack will be used for. The whole stack
+ // must be empty when this is called.
+ void setMainStackColor(gc::MarkColor newColor);
+
+ bool enterWeakMarkingMode();
+ void leaveWeakMarkingMode();
+
+ // Do not use linear-time weak marking for the rest of this collection.
+ // Currently, this will only be triggered by an OOM when updating needed data
+ // structures.
+ void abortLinearWeakMarking() {
+ if (state == MarkingState::WeakMarking) {
+ leaveWeakMarkingMode();
+ }
+ state = MarkingState::IterativeMarking;
+ }
+
+ void delayMarkingChildren(gc::Cell* cell);
+
+ // Remove <map,toRemove> from the weak keys table indexed by 'key'.
+ void forgetWeakKey(js::gc::WeakKeyTable& weakKeys, WeakMapBase* map,
+ gc::Cell* keyOrDelegate, gc::Cell* keyToRemove);
+
+ // Purge all mention of 'map' from the weak keys table.
+ void forgetWeakMap(WeakMapBase* map, Zone* zone);
+
+ // 'delegate' is no longer the delegate of 'key'.
+ void severWeakDelegate(JSObject* key, JSObject* delegate);
+
+ // 'delegate' is now the delegate of 'key'. Update weakmap marking state.
+ void restoreWeakDelegate(JSObject* key, JSObject* delegate);
+
+ bool isDrained() { return isMarkStackEmpty() && !delayedMarkingList; }
+
+ // The mark queue is a testing-only feature for controlling mark ordering and
+ // yield timing.
+ enum MarkQueueProgress {
+ QueueYielded, // End this incremental GC slice, if possible
+ QueueComplete, // Done with the queue
+ QueueSuspended // Continue the GC without ending the slice
+ };
+ MarkQueueProgress processMarkQueue();
+
+ enum ShouldReportMarkTime : bool {
+ ReportMarkTime = true,
+ DontReportMarkTime = false
+ };
+ MOZ_MUST_USE bool markUntilBudgetExhausted(
+ SliceBudget& budget, ShouldReportMarkTime reportTime = ReportMarkTime);
+
+ void setIncrementalGCEnabled(bool enabled) {
+ // Ignore failure to resize the stack and keep using the existing stack.
+ mozilla::Unused << stack.setStackCapacity(gc::MarkStack::MainStack,
+ enabled);
+ }
+
+ size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const;
+
+#ifdef DEBUG
+ bool shouldCheckCompartments() { return strictCompartmentChecking; }
+#endif
+
+ void markEphemeronValues(gc::Cell* markedCell, gc::WeakEntryVector& entry);
+
+ size_t getMarkCount() const { return markCount; }
+ void clearMarkCount() { markCount = 0; }
+
+ static GCMarker* fromTracer(JSTracer* trc) {
+ MOZ_ASSERT(trc->isMarkingTracer());
+ return static_cast<GCMarker*>(trc);
+ }
+
+ template <typename T>
+ void markImplicitEdges(T* oldThing);
+
+ bool isWeakMarking() const { return state == MarkingState::WeakMarking; }
+
+ private:
+#ifdef DEBUG
+ void checkZone(void* p);
+#else
+ void checkZone(void* p) {}
+#endif
+
+ // Push an object onto the stack for later tracing and assert that it has
+ // already been marked.
+ inline void repush(JSObject* obj);
+
+ template <typename T>
+ void markAndTraceChildren(T* thing);
+ template <typename T>
+ void markAndPush(T* thing);
+ template <typename T>
+ void markAndScan(T* thing);
+ template <typename T>
+ void markImplicitEdgesHelper(T oldThing);
+ void eagerlyMarkChildren(JSLinearString* str);
+ void eagerlyMarkChildren(JSRope* rope);
+ void eagerlyMarkChildren(JSString* str);
+ void eagerlyMarkChildren(Shape* shape);
+ void eagerlyMarkChildren(Scope* scope);
+ void lazilyMarkChildren(ObjectGroup* group);
+
+ // We may not have concrete types yet, so this has to be outside the header.
+ template <typename T>
+ void dispatchToTraceChildren(T* thing);
+
+ // Mark the given GC thing, but do not trace its children. Return true
+ // if the thing became marked.
+ template <typename T>
+ MOZ_MUST_USE bool mark(T* thing);
+
+ template <typename T>
+ inline void pushTaggedPtr(T* ptr);
+
+ inline void pushValueRange(JSObject* obj, SlotsOrElementsKind kind,
+ size_t start, size_t end);
+
+ bool isMarkStackEmpty() { return stack.isEmpty() && auxStack.isEmpty(); }
+
+ bool hasBlackEntries() const {
+ return !getStack(gc::MarkColor::Black).isEmpty();
+ }
+
+ bool hasGrayEntries() const {
+ return !getStack(gc::MarkColor::Gray).isEmpty();
+ }
+
+ inline void processMarkStackTop(SliceBudget& budget);
+
+ void markDelayedChildren(gc::Arena* arena, gc::MarkColor color);
+ MOZ_MUST_USE bool markAllDelayedChildren(SliceBudget& budget);
+ bool processDelayedMarkingList(gc::MarkColor color, SliceBudget& budget);
+ bool hasDelayedChildren() const { return !!delayedMarkingList; }
+ void rebuildDelayedMarkingList();
+ void appendToDelayedMarkingList(gc::Arena** listTail, gc::Arena* arena);
+
+ template <typename F>
+ void forEachDelayedMarkingArena(F&& f);
+
+ /*
+ * The mark stack. Pointers in this stack are "gray" in the GC sense, but may
+ * mark the contained items either black or gray (in the CC sense) depending
+ * on mainStackColor.
+ */
+ gc::MarkStack stack;
+
+ /*
+ * A smaller, auxiliary stack, currently only used to accumulate the rare
+ * objects that need to be marked black during gray marking.
+ */
+ gc::MarkStack auxStack;
+
+ /* The color is only applied to objects and functions. */
+ MainThreadOrGCTaskData<gc::MarkColor> color;
+
+ MainThreadOrGCTaskData<gc::MarkColor> mainStackColor;
+
+ MainThreadOrGCTaskData<gc::MarkStack*> currentStackPtr;
+
+ gc::MarkStack& getStack(gc::MarkColor which) {
+ return which == mainStackColor ? stack : auxStack;
+ }
+ const gc::MarkStack& getStack(gc::MarkColor which) const {
+ return which == mainStackColor ? stack : auxStack;
+ }
+
+ gc::MarkStack& currentStack() {
+ MOZ_ASSERT(currentStackPtr);
+ return *currentStackPtr;
+ }
+
+ /* Pointer to the top of the stack of arenas we are delaying marking on. */
+ MainThreadOrGCTaskData<js::gc::Arena*> delayedMarkingList;
+
+ /* Whether more work has been added to the delayed marking list. */
+ MainThreadOrGCTaskData<bool> delayedMarkingWorkAdded;
+
+ /* The count of marked objects during GC. */
+ size_t markCount;
+
+ /* Track the state of marking. */
+ MainThreadOrGCTaskData<MarkingState> state;
+
+ public:
+ /*
+ * Whether weakmaps can be marked incrementally.
+ *
+ * JSGC_INCREMENTAL_WEAKMAP_ENABLED
+ * pref: javascript.options.mem.incremental_weakmap
+ */
+ MainThreadOrGCTaskData<bool> incrementalWeakMapMarkingEnabled;
+
+#ifdef DEBUG
+ private:
+ /* Count of arenas that are currently in the stack. */
+ MainThreadOrGCTaskData<size_t> markLaterArenas;
+
+ /* Assert that start and stop are called with correct ordering. */
+ MainThreadOrGCTaskData<bool> started;
+
+ /*
+ * Whether to check that atoms traversed are present in atom marking
+ * bitmap.
+ */
+ MainThreadOrGCTaskData<bool> checkAtomMarking;
+
+ /* The test marking queue might want to be marking a particular color. */
+ mozilla::Maybe<js::gc::MarkColor> queueMarkColor;
+
+ /*
+ * If this is true, all marked objects must belong to a compartment being
+ * GCed. This is used to look for compartment bugs.
+ */
+ MainThreadOrGCTaskData<bool> strictCompartmentChecking;
+
+ public:
+ /*
+ * The compartment and zone of the object whose trace hook is currently being
+ * called, if any. Used to catch cross-compartment edges traced without use of
+ * TraceCrossCompartmentEdge.
+ */
+ MainThreadOrGCTaskData<Compartment*> tracingCompartment;
+ MainThreadOrGCTaskData<Zone*> tracingZone;
+
+ /*
+ * List of objects to mark at the beginning of a GC. May also contains string
+ * directives to change mark color or wait until different phases of the GC.
+ *
+ * This is a WeakCache because not everything in this list is guaranteed to
+ * end up marked (eg if you insert an object from an already-processed sweep
+ * group in the middle of an incremental GC). Also, the mark queue is not
+ * used during shutdown GCs. In either case, unmarked objects may need to be
+ * discarded.
+ */
+ JS::WeakCache<GCVector<JS::Heap<JS::Value>, 0, SystemAllocPolicy>> markQueue;
+
+ /* Position within the test mark queue. */
+ size_t queuePos;
+#endif // DEBUG
+};
+
+namespace gc {
+
+/*
+ * Temporarily change the mark color while this class is on the stack.
+ *
+ * During incremental sweeping this also transitions zones in the
+ * current sweep group into the Mark or MarkGray state as appropriate.
+ */
+class MOZ_RAII AutoSetMarkColor {
+ GCMarker& marker_;
+ MarkColor initialColor_;
+
+ public:
+ AutoSetMarkColor(GCMarker& marker, MarkColor newColor)
+ : marker_(marker), initialColor_(marker.markColor()) {
+ marker_.setMarkColor(newColor);
+ }
+
+ AutoSetMarkColor(GCMarker& marker, CellColor newColor)
+ : AutoSetMarkColor(marker, newColor.asMarkColor()) {}
+
+ ~AutoSetMarkColor() { marker_.setMarkColor(initialColor_); }
+};
+
+} /* namespace gc */
+
+} /* namespace js */
+
+#endif /* gc_GCMarker_h */
diff --git a/js/src/gc/GCParallelTask.cpp b/js/src/gc/GCParallelTask.cpp
new file mode 100644
index 0000000000..8162e33000
--- /dev/null
+++ b/js/src/gc/GCParallelTask.cpp
@@ -0,0 +1,173 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "gc/GCParallelTask.h"
+
+#include "mozilla/MathAlgorithms.h"
+
+#include "gc/ParallelWork.h"
+#include "vm/HelperThreadState.h"
+#include "vm/Runtime.h"
+#include "vm/TraceLogging.h"
+
+using namespace js;
+using namespace js::gc;
+
+using mozilla::TimeDuration;
+using mozilla::TimeStamp;
+
+js::GCParallelTask::~GCParallelTask() {
+ // Only most-derived classes' destructors may do the join: base class
+ // destructors run after those for derived classes' members, so a join in a
+ // base class can't ensure that the task is done using the members. All we
+ // can do now is check that someone has previously stopped the task.
+ assertIdle();
+}
+
+void js::GCParallelTask::startWithLockHeld(AutoLockHelperThreadState& lock) {
+ MOZ_ASSERT(CanUseExtraThreads());
+ MOZ_ASSERT(!HelperThreadState().threads(lock).empty());
+ assertIdle();
+
+ setDispatched(lock);
+ HelperThreadState().submitTask(this, lock);
+}
+
+void js::GCParallelTask::start() {
+ if (!CanUseExtraThreads()) {
+ runFromMainThread();
+ return;
+ }
+
+ AutoLockHelperThreadState lock;
+ startWithLockHeld(lock);
+}
+
+void js::GCParallelTask::startOrRunIfIdle(AutoLockHelperThreadState& lock) {
+ if (wasStarted(lock)) {
+ return;
+ }
+
+ // Join the previous invocation of the task. This will return immediately
+ // if the thread has never been started.
+ joinWithLockHeld(lock);
+
+ if (!CanUseExtraThreads()) {
+ AutoUnlockHelperThreadState unlock(lock);
+ runFromMainThread();
+ return;
+ }
+
+ startWithLockHeld(lock);
+}
+
+void js::GCParallelTask::cancelAndWait() {
+ MOZ_ASSERT(!isCancelled());
+ cancel_ = true;
+ join();
+ cancel_ = false;
+}
+
+void js::GCParallelTask::join() {
+ AutoLockHelperThreadState lock;
+ joinWithLockHeld(lock);
+}
+
+void js::GCParallelTask::joinWithLockHeld(AutoLockHelperThreadState& lock) {
+ // Task has not been started; there's nothing to do.
+ if (isIdle(lock)) {
+ return;
+ }
+
+ // If the task was dispatched but has not yet started then cancel the task and
+ // run it from the main thread. This stops us from blocking here when the
+ // helper threads are busy with other tasks.
+ if (isDispatched(lock)) {
+ cancelDispatchedTask(lock);
+ AutoUnlockHelperThreadState unlock(lock);
+ runFromMainThread();
+ return;
+ }
+
+ joinRunningOrFinishedTask(lock);
+}
+
+void js::GCParallelTask::joinRunningOrFinishedTask(
+ AutoLockHelperThreadState& lock) {
+ MOZ_ASSERT(isRunning(lock) || isFinished(lock));
+
+ // Wait for the task to run to completion.
+ while (!isFinished(lock)) {
+ HelperThreadState().wait(lock, GlobalHelperThreadState::CONSUMER);
+ }
+
+ setIdle(lock);
+}
+
+void js::GCParallelTask::cancelDispatchedTask(AutoLockHelperThreadState& lock) {
+ MOZ_ASSERT(isDispatched(lock));
+ MOZ_ASSERT(isInList());
+ remove();
+ setIdle(lock);
+}
+
+static inline TimeDuration TimeSince(TimeStamp prev) {
+ TimeStamp now = ReallyNow();
+ // Sadly this happens sometimes.
+ MOZ_ASSERT(now >= prev);
+ if (now < prev) {
+ now = prev;
+ }
+ return now - prev;
+}
+
+void js::GCParallelTask::runFromMainThread() {
+ assertIdle();
+ MOZ_ASSERT(js::CurrentThreadCanAccessRuntime(gc->rt));
+ AutoLockHelperThreadState lock;
+ runTask(lock);
+}
+
+void js::GCParallelTask::runHelperThreadTask(AutoLockHelperThreadState& lock) {
+ TraceLoggerThread* logger = TraceLoggerForCurrentThread();
+ AutoTraceLog logCompile(logger, TraceLogger_GC);
+
+ setRunning(lock);
+
+ AutoSetHelperThreadContext usesContext(lock);
+ AutoSetContextRuntime ascr(gc->rt);
+ gc::AutoSetThreadIsPerformingGC performingGC;
+ runTask(lock);
+
+ setFinished(lock);
+}
+
+void GCParallelTask::runTask(AutoLockHelperThreadState& lock) {
+ // Run the task from either the main thread or a helper thread.
+
+ // The hazard analysis can't tell what the call to func_ will do but it's not
+ // allowed to GC.
+ JS::AutoSuppressGCAnalysis nogc;
+
+ TimeStamp timeStart = ReallyNow();
+ run(lock);
+ duration_ = TimeSince(timeStart);
+}
+
+bool js::GCParallelTask::isIdle() const {
+ AutoLockHelperThreadState lock;
+ return isIdle(lock);
+}
+
+bool js::GCParallelTask::wasStarted() const {
+ AutoLockHelperThreadState lock;
+ return wasStarted(lock);
+}
+
+/* static */
+size_t js::gc::GCRuntime::parallelWorkerCount() const {
+ return std::min(helperThreadCount.ref(), MaxParallelWorkers);
+}
diff --git a/js/src/gc/GCParallelTask.h b/js/src/gc/GCParallelTask.h
new file mode 100644
index 0000000000..96da6ffcdd
--- /dev/null
+++ b/js/src/gc/GCParallelTask.h
@@ -0,0 +1,176 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef gc_GCParallelTask_h
+#define gc_GCParallelTask_h
+
+#include "mozilla/LinkedList.h"
+#include "mozilla/TimeStamp.h"
+
+#include <utility>
+
+#include "js/TypeDecls.h"
+#include "js/Utility.h"
+#include "threading/ProtectedData.h"
+#include "vm/HelperThreadTask.h"
+
+#define JS_MEMBER_FN_PTR_TYPE(ClassT, ReturnT, /* ArgTs */...) \
+ ReturnT (ClassT::*)(__VA_ARGS__)
+
+#define JS_CALL_MEMBER_FN_PTR(Receiver, Ptr, /* Args */...) \
+ ((Receiver)->*(Ptr))(__VA_ARGS__)
+
+namespace js {
+
+namespace gc {
+class GCRuntime;
+}
+
+class AutoLockHelperThreadState;
+class HelperThread;
+
+// A generic task used to dispatch work to the helper thread system.
+// Users override the pure-virtual run() method.
+class GCParallelTask : public mozilla::LinkedListElement<GCParallelTask>,
+ public HelperThreadTask {
+ public:
+ gc::GCRuntime* const gc;
+
+ private:
+ // The state of the parallel computation.
+ enum class State {
+ // The task is idle. Either start() has not been called or join() has
+ // returned.
+ Idle,
+
+ // The task has been started but has not yet begun running on a helper
+ // thread.
+ Dispatched,
+
+ // The task is currently running on a helper thread.
+ Running,
+
+ // The task has finished running but has not yet been joined by the main
+ // thread.
+ Finished
+ };
+
+ UnprotectedData<State> state_;
+
+ // Amount of time this task took to execute.
+ MainThreadOrGCTaskData<mozilla::TimeDuration> duration_;
+
+ explicit GCParallelTask(const GCParallelTask&) = delete;
+
+ protected:
+ // A flag to signal a request for early completion of the off-thread task.
+ mozilla::Atomic<bool, mozilla::MemoryOrdering::ReleaseAcquire> cancel_;
+
+ public:
+ explicit GCParallelTask(gc::GCRuntime* gc)
+ : gc(gc), state_(State::Idle), duration_(nullptr), cancel_(false) {}
+ GCParallelTask(GCParallelTask&& other)
+ : gc(other.gc),
+ state_(other.state_),
+ duration_(nullptr),
+ cancel_(false) {}
+
+ // Derived classes must override this to ensure that join() gets called
+ // before members get destructed.
+ virtual ~GCParallelTask();
+
+ // Time spent in the most recent invocation of this task.
+ mozilla::TimeDuration duration() const { return duration_; }
+
+ // The simple interface to a parallel task works exactly like pthreads.
+ void start();
+ void join();
+
+ // If multiple tasks are to be started or joined at once, it is more
+ // efficient to take the helper thread lock once and use these methods.
+ void startWithLockHeld(AutoLockHelperThreadState& lock);
+ void joinWithLockHeld(AutoLockHelperThreadState& lock);
+ void joinRunningOrFinishedTask(AutoLockHelperThreadState& lock);
+
+ // Instead of dispatching to a helper, run the task on the current thread.
+ void runFromMainThread();
+
+ // If the task is not already running, either start it or run it on the main
+ // thread if that fails.
+ void startOrRunIfIdle(AutoLockHelperThreadState& lock);
+
+ // Cancel a dispatched task before it started executing.
+ void cancelDispatchedTask(AutoLockHelperThreadState& lock);
+
+ // Set the cancel flag and wait for the task to finish.
+ void cancelAndWait();
+
+ // Report whether the task is idle. This means either before start() has been
+ // called or after join() has been called.
+ bool isIdle() const;
+ bool isIdle(const AutoLockHelperThreadState& lock) const {
+ return state_ == State::Idle;
+ }
+
+ // Report whether the task has been started. This means after start() has been
+ // called but before the task has run to completion. The task may not yet have
+ // started running.
+ bool wasStarted() const;
+ bool wasStarted(const AutoLockHelperThreadState& lock) const {
+ return isDispatched(lock) || isRunning(lock);
+ }
+
+ bool isDispatched(const AutoLockHelperThreadState& lock) const {
+ return state_ == State::Dispatched;
+ }
+
+ protected:
+ // Override this method to provide the task's functionality.
+ virtual void run(AutoLockHelperThreadState& lock) = 0;
+
+ bool isCancelled() const { return cancel_; }
+
+ private:
+ void assertIdle() const {
+ // Don't lock here because that adds extra synchronization in debug
+ // builds that may hide bugs. There's no race if the assertion passes.
+ MOZ_ASSERT(state_ == State::Idle);
+ }
+ bool isRunning(const AutoLockHelperThreadState& lock) const {
+ return state_ == State::Running;
+ }
+ bool isFinished(const AutoLockHelperThreadState& lock) const {
+ return state_ == State::Finished;
+ }
+
+ void setDispatched(const AutoLockHelperThreadState& lock) {
+ MOZ_ASSERT(isIdle(lock));
+ state_ = State::Dispatched;
+ }
+ void setRunning(const AutoLockHelperThreadState& lock) {
+ MOZ_ASSERT(isDispatched(lock));
+ state_ = State::Running;
+ }
+ void setFinished(const AutoLockHelperThreadState& lock) {
+ MOZ_ASSERT(isRunning(lock));
+ state_ = State::Finished;
+ }
+ void setIdle(const AutoLockHelperThreadState& lock) {
+ MOZ_ASSERT(isDispatched(lock) || isFinished(lock));
+ state_ = State::Idle;
+ }
+
+ void runTask(AutoLockHelperThreadState& lock);
+
+ // Implement the HelperThreadTask interface.
+ ThreadType threadType() override {
+ return ThreadType::THREAD_TYPE_GCPARALLEL;
+ }
+ void runHelperThreadTask(AutoLockHelperThreadState& locked) override;
+};
+
+} /* namespace js */
+#endif /* gc_GCParallelTask_h */
diff --git a/js/src/gc/GCProbes.h b/js/src/gc/GCProbes.h
new file mode 100644
index 0000000000..19acfe76e3
--- /dev/null
+++ b/js/src/gc/GCProbes.h
@@ -0,0 +1,44 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef gc_GCProbes_h
+#define gc_GCProbes_h
+
+/*
+ * This interface can be used to insert probes for GC related events.
+ *
+ * The code must be built with JS_GC_PROBES for these probes to be called
+ * from JIT code.
+ */
+
+#include "gc/Heap.h"
+
+namespace js {
+
+class ObjectGroup;
+
+namespace gc {
+namespace gcprobes {
+
+inline void Init(gc::GCRuntime* gc) {}
+inline void Finish(gc::GCRuntime* gc) {}
+inline void NurseryAlloc(gc::Cell* thing, size_t size) {}
+inline void NurseryAlloc(gc::Cell* thing, JS::TraceKind kind) {}
+inline void TenuredAlloc(gc::Cell* thing, gc::AllocKind kind) {}
+inline void CreateObject(JSObject* object) {}
+inline void MinorGCStart() {}
+inline void PromoteToTenured(gc::Cell* src, gc::Cell* dst) {}
+inline void MinorGCEnd() {}
+inline void MajorGCStart() {}
+inline void TenuredFinalize(gc::Cell* thing) {
+} // May be called off main thread.
+inline void MajorGCEnd() {}
+
+} // namespace gcprobes
+} // namespace gc
+} // namespace js
+
+#endif // gc_GCProbes_h
diff --git a/js/src/gc/GCRuntime.h b/js/src/gc/GCRuntime.h
new file mode 100644
index 0000000000..a19c9e25b2
--- /dev/null
+++ b/js/src/gc/GCRuntime.h
@@ -0,0 +1,1287 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef gc_GCRuntime_h
+#define gc_GCRuntime_h
+
+#include "mozilla/Atomics.h"
+#include "mozilla/EnumSet.h"
+#include "mozilla/Maybe.h"
+#include "mozilla/TimeStamp.h"
+
+#include "jsfriendapi.h" // For PerformanceHint
+
+#include "gc/ArenaList.h"
+#include "gc/AtomMarking.h"
+#include "gc/GCMarker.h"
+#include "gc/IteratorUtils.h"
+#include "gc/Nursery.h"
+#include "gc/Scheduling.h"
+#include "gc/Statistics.h"
+#include "gc/StoreBuffer.h"
+#include "js/GCAnnotations.h"
+#include "js/UniquePtr.h"
+#include "vm/AtomsTable.h"
+
+namespace js {
+
+class AutoAccessAtomsZone;
+class AutoLockGC;
+class AutoLockGCBgAlloc;
+class AutoLockHelperThreadState;
+class FinalizationRegistryObject;
+class FinalizationQueueObject;
+class VerifyPreTracer;
+class WeakRefObject;
+class ZoneAllocator;
+
+namespace gc {
+
+using BlackGrayEdgeVector = Vector<TenuredCell*, 0, SystemAllocPolicy>;
+using ZoneVector = Vector<JS::Zone*, 4, SystemAllocPolicy>;
+
+class AutoCallGCCallbacks;
+class AutoGCSession;
+class AutoHeapSession;
+class AutoTraceSession;
+class MarkingValidator;
+struct MovingTracer;
+enum class ShouldCheckThresholds;
+class SweepGroupsIter;
+
+// Interface to a sweep action.
+struct SweepAction {
+ // The arguments passed to each action.
+ struct Args {
+ GCRuntime* gc;
+ JSFreeOp* fop;
+ SliceBudget& budget;
+ };
+
+ virtual ~SweepAction() = default;
+ virtual IncrementalProgress run(Args& state) = 0;
+ virtual void assertFinished() const = 0;
+ virtual bool shouldSkip() { return false; }
+};
+
+class ChunkPool {
+ TenuredChunk* head_;
+ size_t count_;
+
+ public:
+ ChunkPool() : head_(nullptr), count_(0) {}
+ ChunkPool(const ChunkPool& other) = delete;
+ ChunkPool(ChunkPool&& other) { *this = std::move(other); }
+
+ ~ChunkPool() {
+ MOZ_ASSERT(!head_);
+ MOZ_ASSERT(count_ == 0);
+ }
+
+ ChunkPool& operator=(const ChunkPool& other) = delete;
+ ChunkPool& operator=(ChunkPool&& other) {
+ head_ = other.head_;
+ other.head_ = nullptr;
+ count_ = other.count_;
+ other.count_ = 0;
+ return *this;
+ }
+
+ bool empty() const { return !head_; }
+ size_t count() const { return count_; }
+
+ TenuredChunk* head() {
+ MOZ_ASSERT(head_);
+ return head_;
+ }
+ TenuredChunk* pop();
+ void push(TenuredChunk* chunk);
+ TenuredChunk* remove(TenuredChunk* chunk);
+
+ void sort();
+
+ private:
+ TenuredChunk* mergeSort(TenuredChunk* list, size_t count);
+ bool isSorted() const;
+
+#ifdef DEBUG
+ public:
+ bool contains(TenuredChunk* chunk) const;
+ bool verify() const;
+#endif
+
+ public:
+ // Pool mutation does not invalidate an Iter unless the mutation
+ // is of the TenuredChunk currently being visited by the Iter.
+ class Iter {
+ public:
+ explicit Iter(ChunkPool& pool) : current_(pool.head_) {}
+ bool done() const { return !current_; }
+ void next();
+ TenuredChunk* get() const { return current_; }
+ operator TenuredChunk*() const { return get(); }
+ TenuredChunk* operator->() const { return get(); }
+
+ private:
+ TenuredChunk* current_;
+ };
+};
+
+class BackgroundMarkTask : public GCParallelTask {
+ public:
+ explicit BackgroundMarkTask(GCRuntime* gc)
+ : GCParallelTask(gc), budget(SliceBudget::unlimited()) {}
+ void setBudget(const SliceBudget& budget) { this->budget = budget; }
+ void run(AutoLockHelperThreadState& lock) override;
+
+ private:
+ SliceBudget budget;
+};
+
+class BackgroundUnmarkTask : public GCParallelTask {
+ public:
+ explicit BackgroundUnmarkTask(GCRuntime* gc) : GCParallelTask(gc) {}
+ void initZones();
+ void run(AutoLockHelperThreadState& lock) override;
+
+ private:
+ void unmarkZones(AutoLockGC& lock);
+
+ ZoneVector zones;
+};
+
+class BackgroundSweepTask : public GCParallelTask {
+ public:
+ explicit BackgroundSweepTask(GCRuntime* gc) : GCParallelTask(gc) {}
+ void run(AutoLockHelperThreadState& lock) override;
+};
+
+class BackgroundFreeTask : public GCParallelTask {
+ public:
+ explicit BackgroundFreeTask(GCRuntime* gc) : GCParallelTask(gc) {}
+ void run(AutoLockHelperThreadState& lock) override;
+};
+
+// Performs extra allocation off thread so that when memory is required on the
+// main thread it will already be available and waiting.
+class BackgroundAllocTask : public GCParallelTask {
+ // Guarded by the GC lock.
+ GCLockData<ChunkPool&> chunkPool_;
+
+ const bool enabled_;
+
+ public:
+ BackgroundAllocTask(GCRuntime* gc, ChunkPool& pool);
+ bool enabled() const { return enabled_; }
+
+ void run(AutoLockHelperThreadState& lock) override;
+};
+
+// Search the provided chunks for free arenas and decommit them.
+class BackgroundDecommitTask : public GCParallelTask {
+ public:
+ explicit BackgroundDecommitTask(GCRuntime* gc) : GCParallelTask(gc) {}
+
+ void run(AutoLockHelperThreadState& lock) override;
+};
+
+template <typename F>
+struct Callback {
+ F op;
+ void* data;
+
+ Callback() : op(nullptr), data(nullptr) {}
+ Callback(F op, void* data) : op(op), data(data) {}
+};
+
+template <typename F>
+using CallbackVector = Vector<Callback<F>, 4, SystemAllocPolicy>;
+
+typedef HashMap<Value*, const char*, DefaultHasher<Value*>, SystemAllocPolicy>
+ RootedValueMap;
+
+using AllocKinds = mozilla::EnumSet<AllocKind, uint64_t>;
+
+// A singly linked list of zones.
+class ZoneList {
+ static Zone* const End;
+
+ Zone* head;
+ Zone* tail;
+
+ public:
+ ZoneList();
+ ~ZoneList();
+
+ bool isEmpty() const;
+ Zone* front() const;
+
+ void append(Zone* zone);
+ void transferFrom(ZoneList& other);
+ Zone* removeFront();
+ void clear();
+
+ private:
+ explicit ZoneList(Zone* singleZone);
+ void check() const;
+
+ ZoneList(const ZoneList& other) = delete;
+ ZoneList& operator=(const ZoneList& other) = delete;
+};
+
+struct WeakCacheToSweep {
+ JS::detail::WeakCacheBase* cache;
+ JS::Zone* zone;
+};
+
+class WeakCacheSweepIterator {
+ using WeakCacheBase = JS::detail::WeakCacheBase;
+
+ JS::Zone* sweepZone;
+ WeakCacheBase* sweepCache;
+
+ public:
+ explicit WeakCacheSweepIterator(JS::Zone* sweepGroup);
+
+ bool done() const;
+ WeakCacheToSweep get() const;
+ void next();
+
+ private:
+ void settle();
+};
+
+class GCRuntime {
+ friend GCMarker::MarkQueueProgress GCMarker::processMarkQueue();
+
+ public:
+ explicit GCRuntime(JSRuntime* rt);
+ MOZ_MUST_USE bool init(uint32_t maxbytes);
+ void finishRoots();
+ void finish();
+
+ JS::HeapState heapState() const { return heapState_; }
+
+ void freezeSelfHostingZone();
+ bool isSelfHostingZoneFrozen() const { return selfHostingZoneFrozen; }
+
+ inline bool hasZealMode(ZealMode mode);
+ inline void clearZealMode(ZealMode mode);
+ inline bool upcomingZealousGC();
+ inline bool needZealousGC();
+ inline bool hasIncrementalTwoSliceZealMode();
+
+ MOZ_MUST_USE bool addRoot(Value* vp, const char* name);
+ void removeRoot(Value* vp);
+ void setMarkStackLimit(size_t limit, AutoLockGC& lock);
+
+ MOZ_MUST_USE bool setParameter(JSGCParamKey key, uint32_t value);
+ MOZ_MUST_USE bool setParameter(JSGCParamKey key, uint32_t value,
+ AutoLockGC& lock);
+ void resetParameter(JSGCParamKey key);
+ void resetParameter(JSGCParamKey key, AutoLockGC& lock);
+ uint32_t getParameter(JSGCParamKey key);
+ uint32_t getParameter(JSGCParamKey key, const AutoLockGC& lock);
+
+ void setPerformanceHint(PerformanceHint hint);
+
+ MOZ_MUST_USE bool triggerGC(JS::GCReason reason);
+ // Check whether to trigger a zone GC after allocating GC cells.
+ void maybeTriggerGCAfterAlloc(Zone* zone);
+ // Check whether to trigger a zone GC after malloc memory.
+ void maybeTriggerGCAfterMalloc(Zone* zone);
+ bool maybeTriggerGCAfterMalloc(Zone* zone, const HeapSize& heap,
+ const HeapThreshold& threshold,
+ JS::GCReason reason);
+ // The return value indicates if we were able to do the GC.
+ bool triggerZoneGC(Zone* zone, JS::GCReason reason, size_t usedBytes,
+ size_t thresholdBytes);
+ void maybeGC();
+ bool checkEagerAllocTrigger(const HeapSize& size,
+ const HeapThreshold& threshold);
+ // The return value indicates whether a major GC was performed.
+ bool gcIfRequested();
+ void gc(JSGCInvocationKind gckind, JS::GCReason reason);
+ void startGC(JSGCInvocationKind gckind, JS::GCReason reason,
+ int64_t millis = 0);
+ void gcSlice(JS::GCReason reason, int64_t millis = 0);
+ void finishGC(JS::GCReason reason);
+ void abortGC();
+ void startDebugGC(JSGCInvocationKind gckind, SliceBudget& budget);
+ void debugGCSlice(SliceBudget& budget);
+
+ void triggerFullGCForAtoms(JSContext* cx);
+
+ void runDebugGC();
+ void notifyRootsRemoved();
+
+ enum TraceOrMarkRuntime { TraceRuntime, MarkRuntime };
+ void traceRuntime(JSTracer* trc, AutoTraceSession& session);
+ void traceRuntimeForMinorGC(JSTracer* trc, AutoGCSession& session);
+
+ void purgeRuntimeForMinorGC();
+
+ void shrinkBuffers();
+ void onOutOfMallocMemory();
+ void onOutOfMallocMemory(const AutoLockGC& lock);
+
+ Nursery& nursery() { return nursery_.ref(); }
+ gc::StoreBuffer& storeBuffer() { return storeBuffer_.ref(); }
+
+ void minorGC(JS::GCReason reason,
+ gcstats::PhaseKind phase = gcstats::PhaseKind::MINOR_GC)
+ JS_HAZ_GC_CALL;
+ void evictNursery(JS::GCReason reason = JS::GCReason::EVICT_NURSERY) {
+ minorGC(reason, gcstats::PhaseKind::EVICT_NURSERY);
+ }
+
+ void* addressOfNurseryPosition() {
+ return nursery_.refNoCheck().addressOfPosition();
+ }
+ const void* addressOfNurseryCurrentEnd() {
+ return nursery_.refNoCheck().addressOfCurrentEnd();
+ }
+ const void* addressOfStringNurseryCurrentEnd() {
+ return nursery_.refNoCheck().addressOfCurrentStringEnd();
+ }
+ const void* addressOfBigIntNurseryCurrentEnd() {
+ return nursery_.refNoCheck().addressOfCurrentBigIntEnd();
+ }
+ uint32_t* addressOfNurseryAllocCount() {
+ return stats().addressOfAllocsSinceMinorGCNursery();
+ }
+
+#ifdef JS_GC_ZEAL
+ const uint32_t* addressOfZealModeBits() { return &zealModeBits.refNoCheck(); }
+ void getZealBits(uint32_t* zealBits, uint32_t* frequency,
+ uint32_t* nextScheduled);
+ void setZeal(uint8_t zeal, uint32_t frequency);
+ void unsetZeal(uint8_t zeal);
+ bool parseAndSetZeal(const char* str);
+ void setNextScheduled(uint32_t count);
+ void verifyPreBarriers();
+ void maybeVerifyPreBarriers(bool always);
+ bool selectForMarking(JSObject* object);
+ void clearSelectedForMarking();
+ void setDeterministic(bool enable);
+#endif
+
+ uint64_t nextCellUniqueId() {
+ MOZ_ASSERT(nextCellUniqueId_ > 0);
+ uint64_t uid = ++nextCellUniqueId_;
+ return uid;
+ }
+
+ void setLowMemoryState(bool newState) { lowMemoryState = newState; }
+ bool systemHasLowMemory() const { return lowMemoryState; }
+
+ public:
+ // Internal public interface
+ State state() const { return incrementalState; }
+ bool isHeapCompacting() const { return state() == State::Compact; }
+ bool isForegroundSweeping() const { return state() == State::Sweep; }
+ bool isBackgroundSweeping() const { return sweepTask.wasStarted(); }
+ void waitBackgroundSweepEnd();
+ void waitBackgroundAllocEnd() { allocTask.cancelAndWait(); }
+ void waitBackgroundFreeEnd();
+ void waitForBackgroundTasks();
+
+ void lockGC() { lock.lock(); }
+
+ void unlockGC() { lock.unlock(); }
+
+#ifdef DEBUG
+ void assertCurrentThreadHasLockedGC() const {
+ lock.assertOwnedByCurrentThread();
+ }
+#endif // DEBUG
+
+ void setAlwaysPreserveCode() { alwaysPreserveCode = true; }
+
+ bool isIncrementalGCAllowed() const { return incrementalAllowed; }
+ void disallowIncrementalGC() { incrementalAllowed = false; }
+
+ void setIncrementalGCEnabled(bool enabled);
+ bool isIncrementalGCEnabled() const { return incrementalGCEnabled; }
+ bool isIncrementalGCInProgress() const {
+ return state() != State::NotActive && !isVerifyPreBarriersEnabled();
+ }
+
+ bool isPerZoneGCEnabled() const { return perZoneGCEnabled; }
+
+ bool hasForegroundWork() const;
+
+ bool isCompactingGCEnabled() const;
+
+ bool isShrinkingGC() const { return invocationKind == GC_SHRINK; }
+
+ bool initSweepActions();
+
+ void setGrayRootsTracer(JSTraceDataOp traceOp, void* data);
+ MOZ_MUST_USE bool addBlackRootsTracer(JSTraceDataOp traceOp, void* data);
+ void removeBlackRootsTracer(JSTraceDataOp traceOp, void* data);
+ void clearBlackAndGrayRootTracers();
+
+ void updateMemoryCountersOnGCStart();
+
+ void setGCCallback(JSGCCallback callback, void* data);
+ void callGCCallback(JSGCStatus status, JS::GCReason reason) const;
+ void setObjectsTenuredCallback(JSObjectsTenuredCallback callback, void* data);
+ void callObjectsTenuredCallback();
+ MOZ_MUST_USE bool addFinalizeCallback(JSFinalizeCallback callback,
+ void* data);
+ void removeFinalizeCallback(JSFinalizeCallback func);
+ void setHostCleanupFinalizationRegistryCallback(
+ JSHostCleanupFinalizationRegistryCallback callback, void* data);
+ void callHostCleanupFinalizationRegistryCallback(
+ JSFunction* doCleanup, GlobalObject* incumbentGlobal);
+ MOZ_MUST_USE bool addWeakPointerZonesCallback(
+ JSWeakPointerZonesCallback callback, void* data);
+ void removeWeakPointerZonesCallback(JSWeakPointerZonesCallback callback);
+ MOZ_MUST_USE bool addWeakPointerCompartmentCallback(
+ JSWeakPointerCompartmentCallback callback, void* data);
+ void removeWeakPointerCompartmentCallback(
+ JSWeakPointerCompartmentCallback callback);
+ JS::GCSliceCallback setSliceCallback(JS::GCSliceCallback callback);
+ JS::GCNurseryCollectionCallback setNurseryCollectionCallback(
+ JS::GCNurseryCollectionCallback callback);
+ JS::DoCycleCollectionCallback setDoCycleCollectionCallback(
+ JS::DoCycleCollectionCallback callback);
+
+ bool addFinalizationRegistry(JSContext* cx,
+ FinalizationRegistryObject* registry);
+ bool registerWithFinalizationRegistry(JSContext* cx, HandleObject target,
+ HandleObject record);
+
+ void setFullCompartmentChecks(bool enable);
+
+ JS::Zone* getCurrentSweepGroup() { return currentSweepGroup; }
+ unsigned getCurrentSweepGroupIndex() {
+ return state() == State::Sweep ? sweepGroupIndex : 0;
+ }
+
+ uint64_t gcNumber() const { return number; }
+ void incGcNumber() { ++number; }
+
+ uint64_t minorGCCount() const { return minorGCNumber; }
+ void incMinorGcNumber() { ++minorGCNumber; }
+
+ uint64_t majorGCCount() const { return majorGCNumber; }
+ void incMajorGcNumber() { ++majorGCNumber; }
+
+ uint64_t gcSliceCount() const { return sliceNumber; }
+ void incGcSliceNumber() { ++sliceNumber; }
+
+ int64_t defaultSliceBudgetMS() const { return defaultTimeBudgetMS_; }
+
+ bool isIncrementalGc() const { return isIncremental; }
+ bool isFullGc() const { return isFull; }
+ bool isCompactingGc() const { return isCompacting; }
+ bool didCompactZones() const { return isCompacting && zonesCompacted; }
+
+ bool areGrayBitsValid() const { return grayBitsValid; }
+ void setGrayBitsInvalid() { grayBitsValid = false; }
+
+ mozilla::TimeStamp lastGCStartTime() const { return lastGCStartTime_; }
+ mozilla::TimeStamp lastGCEndTime() const { return lastGCEndTime_; }
+
+ bool majorGCRequested() const {
+ return majorGCTriggerReason != JS::GCReason::NO_REASON;
+ }
+
+ bool fullGCForAtomsRequested() const { return fullGCForAtomsRequested_; }
+
+ double computeHeapGrowthFactor(size_t lastBytes);
+ size_t computeTriggerBytes(double growthFactor, size_t lastBytes);
+
+ inline void updateOnFreeArenaAlloc(const TenuredChunkInfo& info);
+ inline void updateOnArenaFree();
+
+ ChunkPool& fullChunks(const AutoLockGC& lock) { return fullChunks_.ref(); }
+ ChunkPool& availableChunks(const AutoLockGC& lock) {
+ return availableChunks_.ref();
+ }
+ ChunkPool& emptyChunks(const AutoLockGC& lock) { return emptyChunks_.ref(); }
+ const ChunkPool& fullChunks(const AutoLockGC& lock) const {
+ return fullChunks_.ref();
+ }
+ const ChunkPool& availableChunks(const AutoLockGC& lock) const {
+ return availableChunks_.ref();
+ }
+ const ChunkPool& emptyChunks(const AutoLockGC& lock) const {
+ return emptyChunks_.ref();
+ }
+ using NonEmptyChunksIter = ChainedIterator<ChunkPool::Iter, 2>;
+ NonEmptyChunksIter allNonEmptyChunks(const AutoLockGC& lock) {
+ return NonEmptyChunksIter(availableChunks(lock), fullChunks(lock));
+ }
+
+ TenuredChunk* getOrAllocChunk(AutoLockGCBgAlloc& lock);
+ void recycleChunk(TenuredChunk* chunk, const AutoLockGC& lock);
+
+#ifdef JS_GC_ZEAL
+ void startVerifyPreBarriers();
+ void endVerifyPreBarriers();
+ void finishVerifier();
+ bool isVerifyPreBarriersEnabled() const { return verifyPreData.refNoCheck(); }
+ bool shouldYieldForZeal(ZealMode mode);
+#else
+ bool isVerifyPreBarriersEnabled() const { return false; }
+#endif
+
+#ifdef JSGC_HASH_TABLE_CHECKS
+ void checkHashTablesAfterMovingGC();
+#endif
+
+#ifdef DEBUG
+ // Crawl the heap to check whether an arbitary pointer is within a cell of
+ // the given kind.
+ bool isPointerWithinTenuredCell(void* ptr, JS::TraceKind traceKind);
+
+ bool hasZone(Zone* target);
+#endif
+
+ // Queue memory memory to be freed on a background thread if possible.
+ void queueUnusedLifoBlocksForFree(LifoAlloc* lifo);
+ void queueAllLifoBlocksForFree(LifoAlloc* lifo);
+ void queueAllLifoBlocksForFreeAfterMinorGC(LifoAlloc* lifo);
+ void queueBuffersForFreeAfterMinorGC(Nursery::BufferSet& buffers);
+
+ // Public here for ReleaseArenaLists and FinalizeTypedArenas.
+ void releaseArena(Arena* arena, const AutoLockGC& lock);
+
+ void releaseHeldRelocatedArenas();
+ void releaseHeldRelocatedArenasWithoutUnlocking(const AutoLockGC& lock);
+
+ // Allocator
+ template <AllowGC allowGC>
+ MOZ_MUST_USE bool checkAllocatorState(JSContext* cx, AllocKind kind);
+ template <AllowGC allowGC>
+ JSObject* tryNewNurseryObject(JSContext* cx, size_t thingSize,
+ size_t nDynamicSlots, const JSClass* clasp);
+ template <AllowGC allowGC>
+ static JSObject* tryNewTenuredObject(JSContext* cx, AllocKind kind,
+ size_t thingSize, size_t nDynamicSlots);
+ template <typename T, AllowGC allowGC>
+ static T* tryNewTenuredThing(JSContext* cx, AllocKind kind, size_t thingSize);
+ template <AllowGC allowGC>
+ JSString* tryNewNurseryString(JSContext* cx, size_t thingSize,
+ AllocKind kind);
+ template <AllowGC allowGC>
+ JS::BigInt* tryNewNurseryBigInt(JSContext* cx, size_t thingSize,
+ AllocKind kind);
+ static TenuredCell* refillFreeListInGC(Zone* zone, AllocKind thingKind);
+
+ void setParallelAtomsAllocEnabled(bool enabled);
+ void setParallelUnmarkEnabled(bool enabled);
+
+ /*
+ * Concurrent sweep infrastructure.
+ */
+ void startTask(GCParallelTask& task, gcstats::PhaseKind phase,
+ AutoLockHelperThreadState& locked);
+ void joinTask(GCParallelTask& task, gcstats::PhaseKind phase,
+ AutoLockHelperThreadState& locked);
+ void joinTask(GCParallelTask& task, gcstats::PhaseKind phase);
+ void updateHelperThreadCount();
+ size_t parallelWorkerCount() const;
+
+ void mergeRealms(JS::Realm* source, JS::Realm* target);
+
+ // WeakRefs
+ bool registerWeakRef(HandleObject target, HandleObject weakRef);
+ bool unregisterWeakRefWrapper(JSObject* wrapper);
+ void traceKeptObjects(JSTracer* trc);
+
+ private:
+ enum IncrementalResult { ResetIncremental = 0, Ok };
+
+ TriggerResult checkHeapThreshold(Zone* zone, const HeapSize& heapSize,
+ const HeapThreshold& heapThreshold);
+
+ void updateGCThresholdsAfterCollection(const AutoLockGC& lock);
+ void updateAllGCStartThresholds(const AutoLockGC& lock);
+
+ // Delete an empty zone after its contents have been merged.
+ void deleteEmptyZone(Zone* zone);
+
+ // For ArenaLists::allocateFromArena()
+ friend class ArenaLists;
+ TenuredChunk* pickChunk(AutoLockGCBgAlloc& lock);
+ Arena* allocateArena(TenuredChunk* chunk, Zone* zone, AllocKind kind,
+ ShouldCheckThresholds checkThresholds,
+ const AutoLockGC& lock);
+
+ // Allocator internals
+ MOZ_MUST_USE bool gcIfNeededAtAllocation(JSContext* cx);
+ template <typename T>
+ static void checkIncrementalZoneState(JSContext* cx, T* t);
+ static TenuredCell* refillFreeListFromAnyThread(JSContext* cx,
+ AllocKind thingKind);
+ static TenuredCell* refillFreeListFromMainThread(JSContext* cx,
+ AllocKind thingKind);
+ static TenuredCell* refillFreeListFromHelperThread(JSContext* cx,
+ AllocKind thingKind);
+ void attemptLastDitchGC(JSContext* cx);
+
+ /*
+ * Return the list of chunks that can be released outside the GC lock.
+ * Must be called either during the GC or with the GC lock taken.
+ */
+ friend class BackgroundDecommitTask;
+ bool tooManyEmptyChunks(const AutoLockGC& lock);
+ ChunkPool expireEmptyChunkPool(const AutoLockGC& lock);
+ void freeEmptyChunks(const AutoLockGC& lock);
+ void prepareToFreeChunk(TenuredChunkInfo& info);
+
+ friend class BackgroundAllocTask;
+ bool wantBackgroundAllocation(const AutoLockGC& lock) const;
+ void startBackgroundAllocTaskIfIdle();
+
+ void requestMajorGC(JS::GCReason reason);
+ SliceBudget defaultBudget(JS::GCReason reason, int64_t millis);
+ void maybeIncreaseSliceBudget(SliceBudget& budget);
+ IncrementalResult budgetIncrementalGC(bool nonincrementalByAPI,
+ JS::GCReason reason,
+ SliceBudget& budget);
+ void checkZoneIsScheduled(Zone* zone, JS::GCReason reason,
+ const char* trigger);
+ IncrementalResult resetIncrementalGC(GCAbortReason reason);
+
+ // Assert if the system state is such that we should never
+ // receive a request to do GC work.
+ void checkCanCallAPI();
+
+ // Check if the system state is such that GC has been supressed
+ // or otherwise delayed.
+ MOZ_MUST_USE bool checkIfGCAllowedInCurrentState(JS::GCReason reason);
+
+ gcstats::ZoneGCStats scanZonesBeforeGC();
+
+ using MaybeInvocationKind = mozilla::Maybe<JSGCInvocationKind>;
+
+ void collect(bool nonincrementalByAPI, SliceBudget budget,
+ const MaybeInvocationKind& gckind,
+ JS::GCReason reason) JS_HAZ_GC_CALL;
+
+ /*
+ * Run one GC "cycle" (either a slice of incremental GC or an entire
+ * non-incremental GC).
+ *
+ * Returns:
+ * * ResetIncremental if we "reset" an existing incremental GC, which would
+ * force us to run another cycle or
+ * * Ok otherwise.
+ */
+ MOZ_MUST_USE IncrementalResult gcCycle(bool nonincrementalByAPI,
+ SliceBudget budget,
+ const MaybeInvocationKind& gckind,
+ JS::GCReason reason);
+ bool shouldRepeatForDeadZone(JS::GCReason reason);
+
+ void incrementalSlice(SliceBudget& budget, const MaybeInvocationKind& gckind,
+ JS::GCReason reason);
+
+ void waitForBackgroundTasksBeforeSlice();
+ bool mightSweepInThisSlice(bool nonIncremental);
+ void collectNurseryFromMajorGC(const MaybeInvocationKind& gckind,
+ JS::GCReason reason);
+ void collectNursery(JSGCInvocationKind kind, JS::GCReason reason,
+ gcstats::PhaseKind phase);
+
+ friend class AutoCallGCCallbacks;
+ void maybeCallGCCallback(JSGCStatus status, JS::GCReason reason);
+
+ void purgeRuntime();
+ MOZ_MUST_USE bool beginPreparePhase(JS::GCReason reason,
+ AutoGCSession& session);
+ bool prepareZonesForCollection(JS::GCReason reason, bool* isFullOut);
+ void bufferGrayRoots();
+ void unmarkWeakMaps();
+ void endPreparePhase(JS::GCReason reason);
+ void beginMarkPhase(AutoGCSession& session);
+ bool shouldPreserveJITCode(JS::Realm* realm,
+ const mozilla::TimeStamp& currentTime,
+ JS::GCReason reason, bool canAllocateMoreCode);
+ void discardJITCodeForGC();
+ void startBackgroundFreeAfterMinorGC();
+ void relazifyFunctionsForShrinkingGC();
+ void purgeShapeCachesForShrinkingGC();
+ void purgeSourceURLsForShrinkingGC();
+ void traceRuntimeForMajorGC(JSTracer* trc, AutoGCSession& session);
+ void traceRuntimeAtoms(JSTracer* trc, const AutoAccessAtomsZone& atomsAccess);
+ void traceRuntimeCommon(JSTracer* trc, TraceOrMarkRuntime traceOrMark);
+ void traceEmbeddingBlackRoots(JSTracer* trc);
+ void traceEmbeddingGrayRoots(JSTracer* trc);
+ void markFinalizationRegistryRoots(JSTracer* trc);
+ void checkNoRuntimeRoots(AutoGCSession& session);
+ void maybeDoCycleCollection();
+ void findDeadCompartments();
+
+ friend class BackgroundMarkTask;
+ IncrementalProgress markUntilBudgetExhausted(
+ SliceBudget& sliceBudget,
+ GCMarker::ShouldReportMarkTime reportTime = GCMarker::ReportMarkTime);
+ void drainMarkStack();
+ template <class ZoneIterT>
+ IncrementalProgress markWeakReferences(SliceBudget& budget);
+ IncrementalProgress markWeakReferencesInCurrentGroup(SliceBudget& budget);
+ template <class ZoneIterT>
+ void markGrayRoots(gcstats::PhaseKind phase);
+ void markBufferedGrayRoots(JS::Zone* zone);
+ IncrementalProgress markAllWeakReferences();
+ void markAllGrayReferences(gcstats::PhaseKind phase);
+
+ void beginSweepPhase(JS::GCReason reason, AutoGCSession& session);
+ void dropStringWrappers();
+ void groupZonesForSweeping(JS::GCReason reason);
+ MOZ_MUST_USE bool findSweepGroupEdges();
+ void getNextSweepGroup();
+ IncrementalProgress markGrayReferencesInCurrentGroup(JSFreeOp* fop,
+ SliceBudget& budget);
+ IncrementalProgress endMarkingSweepGroup(JSFreeOp* fop, SliceBudget& budget);
+ void markIncomingCrossCompartmentPointers(MarkColor color);
+ IncrementalProgress beginSweepingSweepGroup(JSFreeOp* fop,
+ SliceBudget& budget);
+ IncrementalProgress markDuringSweeping(JSFreeOp* fop, SliceBudget& budget);
+ void updateAtomsBitmap();
+ void sweepCCWrappers();
+ void sweepMisc();
+ void sweepCompressionTasks();
+ void sweepWeakMaps();
+ void sweepUniqueIds();
+ void sweepDebuggerOnMainThread(JSFreeOp* fop);
+ void sweepJitDataOnMainThread(JSFreeOp* fop);
+ void sweepFinalizationRegistriesOnMainThread();
+ void sweepFinalizationRegistries(Zone* zone);
+ void queueFinalizationRegistryForCleanup(FinalizationQueueObject* queue);
+ void sweepWeakRefs();
+ IncrementalProgress endSweepingSweepGroup(JSFreeOp* fop, SliceBudget& budget);
+ IncrementalProgress performSweepActions(SliceBudget& sliceBudget);
+ void startSweepingAtomsTable();
+ IncrementalProgress sweepAtomsTable(JSFreeOp* fop, SliceBudget& budget);
+ IncrementalProgress sweepWeakCaches(JSFreeOp* fop, SliceBudget& budget);
+ IncrementalProgress finalizeAllocKind(JSFreeOp* fop, SliceBudget& budget);
+ IncrementalProgress sweepShapeTree(JSFreeOp* fop, SliceBudget& budget);
+ void endSweepPhase(bool lastGC);
+ bool allCCVisibleZonesWereCollected();
+ void sweepZones(JSFreeOp* fop, bool destroyingRuntime);
+ void startDecommit();
+ void decommitFreeArenas(const bool& canel, AutoLockGC& lock);
+ void decommitFreeArenasWithoutUnlocking(const AutoLockGC& lock);
+ void queueZonesAndStartBackgroundSweep(ZoneList& zones);
+ void sweepFromBackgroundThread(AutoLockHelperThreadState& lock);
+ void startBackgroundFree();
+ void freeFromBackgroundThread(AutoLockHelperThreadState& lock);
+ void sweepBackgroundThings(ZoneList& zones);
+ void assertBackgroundSweepingFinished();
+ bool shouldCompact();
+ void beginCompactPhase();
+ IncrementalProgress compactPhase(JS::GCReason reason,
+ SliceBudget& sliceBudget,
+ AutoGCSession& session);
+ void endCompactPhase();
+ void sweepZoneAfterCompacting(MovingTracer* trc, Zone* zone);
+ bool canRelocateZone(Zone* zone) const;
+ MOZ_MUST_USE bool relocateArenas(Zone* zone, JS::GCReason reason,
+ Arena*& relocatedListOut,
+ SliceBudget& sliceBudget);
+ void updateTypeDescrObjects(MovingTracer* trc, Zone* zone);
+ void updateCellPointers(Zone* zone, AllocKinds kinds);
+ void updateAllCellPointers(MovingTracer* trc, Zone* zone);
+ void updateZonePointersToRelocatedCells(Zone* zone);
+ void updateRuntimePointersToRelocatedCells(AutoGCSession& session);
+ void protectAndHoldArenas(Arena* arenaList);
+ void unprotectHeldRelocatedArenas();
+ void clearRelocatedArenas(Arena* arenaList, JS::GCReason reason);
+ void clearRelocatedArenasWithoutUnlocking(Arena* arenaList,
+ JS::GCReason reason,
+ const AutoLockGC& lock);
+ void releaseRelocatedArenas(Arena* arenaList);
+ void releaseRelocatedArenasWithoutUnlocking(Arena* arenaList,
+ const AutoLockGC& lock);
+
+ /*
+ * Whether to immediately trigger a slice after a background task
+ * finishes. This may not happen at a convenient time, so the consideration is
+ * whether the slice will run quickly or may take a long time.
+ */
+ enum ShouldTriggerSliceWhenFinished : bool {
+ DontTriggerSliceWhenFinished = false,
+ TriggerSliceWhenFinished = true
+ };
+
+ IncrementalProgress waitForBackgroundTask(
+ GCParallelTask& task, const SliceBudget& budget,
+ ShouldTriggerSliceWhenFinished triggerSlice);
+
+ void maybeRequestGCAfterBackgroundTask(const AutoLockHelperThreadState& lock);
+ void cancelRequestedGCAfterBackgroundTask();
+ void finishCollection();
+ void maybeStopStringPretenuring();
+ void checkGCStateNotInUse();
+ IncrementalProgress joinBackgroundMarkTask();
+
+#ifdef JS_GC_ZEAL
+ void computeNonIncrementalMarkingForValidation(AutoGCSession& session);
+ void validateIncrementalMarking();
+ void finishMarkingValidation();
+#endif
+
+#ifdef DEBUG
+ void checkForCompartmentMismatches();
+#endif
+
+ void callFinalizeCallbacks(JSFreeOp* fop, JSFinalizeStatus status) const;
+ void callWeakPointerZonesCallbacks() const;
+ void callWeakPointerCompartmentCallbacks(JS::Compartment* comp) const;
+ void callDoCycleCollectionCallback(JSContext* cx);
+
+ public:
+ JSRuntime* const rt;
+
+ /* Embedders can use this zone and group however they wish. */
+ UnprotectedData<JS::Zone*> systemZone;
+
+ // All zones in the runtime, except the atoms zone.
+ private:
+ MainThreadOrGCTaskData<ZoneVector> zones_;
+
+ public:
+ ZoneVector& zones() { return zones_.ref(); }
+
+ // The unique atoms zone.
+ WriteOnceData<Zone*> atomsZone;
+
+ private:
+ // Any activity affecting the heap.
+ mozilla::Atomic<JS::HeapState, mozilla::SequentiallyConsistent> heapState_;
+ friend class AutoHeapSession;
+ friend class JS::AutoEnterCycleCollection;
+
+ UnprotectedData<gcstats::Statistics> stats_;
+
+ public:
+ gcstats::Statistics& stats() { return stats_.ref(); }
+
+ js::StringStats stringStats;
+
+ GCMarker marker;
+
+ Vector<JS::GCCellPtr, 0, SystemAllocPolicy> unmarkGrayStack;
+
+ /* Track total GC heap size for this runtime. */
+ HeapSize heapSize;
+
+ /* GC scheduling state and parameters. */
+ GCSchedulingTunables tunables;
+ GCSchedulingState schedulingState;
+
+ // Helper thread configuration.
+ MainThreadData<double> helperThreadRatio;
+ MainThreadData<size_t> maxHelperThreads;
+ MainThreadData<size_t> helperThreadCount;
+
+ // State used for managing atom mark bitmaps in each zone.
+ AtomMarkingRuntime atomMarking;
+
+ private:
+ // When chunks are empty, they reside in the emptyChunks pool and are
+ // re-used as needed or eventually expired if not re-used. The emptyChunks
+ // pool gets refilled from the background allocation task heuristically so
+ // that empty chunks should always be available for immediate allocation
+ // without syscalls.
+ GCLockData<ChunkPool> emptyChunks_;
+
+ // Chunks which have had some, but not all, of their arenas allocated live
+ // in the available chunk lists. When all available arenas in a chunk have
+ // been allocated, the chunk is removed from the available list and moved
+ // to the fullChunks pool. During a GC, if all arenas are free, the chunk
+ // is moved back to the emptyChunks pool and scheduled for eventual
+ // release.
+ GCLockData<ChunkPool> availableChunks_;
+
+ // When all arenas in a chunk are used, it is moved to the fullChunks pool
+ // so as to reduce the cost of operations on the available lists.
+ GCLockData<ChunkPool> fullChunks_;
+
+ MainThreadData<RootedValueMap> rootsHash;
+
+ // An incrementing id used to assign unique ids to cells that require one.
+ mozilla::Atomic<uint64_t, mozilla::ReleaseAcquire> nextCellUniqueId_;
+
+ /*
+ * Number of the committed arenas in all GC chunks including empty chunks.
+ */
+ mozilla::Atomic<uint32_t, mozilla::ReleaseAcquire> numArenasFreeCommitted;
+ MainThreadData<VerifyPreTracer*> verifyPreData;
+
+ private:
+ MainThreadData<mozilla::TimeStamp> lastGCStartTime_;
+ MainThreadData<mozilla::TimeStamp> lastGCEndTime_;
+
+ MainThreadData<bool> incrementalGCEnabled;
+ MainThreadData<bool> perZoneGCEnabled;
+
+ mozilla::Atomic<size_t, mozilla::ReleaseAcquire> numActiveZoneIters;
+
+ /*
+ * The self hosting zone is collected once after initialization. We don't
+ * allow allocation after this point and we don't collect it again.
+ */
+ WriteOnceData<bool> selfHostingZoneFrozen;
+
+ /* During shutdown, the GC needs to clean up every possible object. */
+ MainThreadData<bool> cleanUpEverything;
+
+ // Gray marking must be done after all black marking is complete. However,
+ // we do not have write barriers on XPConnect roots. Therefore, XPConnect
+ // roots must be accumulated in the first slice of incremental GC. We
+ // accumulate these roots in each zone's gcGrayRoots vector and then mark
+ // them later, after black marking is complete for each compartment. This
+ // accumulation can fail, but in that case we switch to non-incremental GC.
+ enum class GrayBufferState { Unused, Okay, Failed };
+ MainThreadOrGCTaskData<GrayBufferState> grayBufferState;
+ bool hasValidGrayRootsBuffer() const {
+ return grayBufferState == GrayBufferState::Okay;
+ }
+
+ // Clear each zone's gray buffers, but do not change the current state.
+ void resetBufferedGrayRoots();
+
+ // Reset the gray buffering state to Unused.
+ void clearBufferedGrayRoots() {
+ grayBufferState = GrayBufferState::Unused;
+ resetBufferedGrayRoots();
+ }
+
+ /*
+ * The gray bits can become invalid if UnmarkGray overflows the stack. A
+ * full GC will reset this bit, since it fills in all the gray bits.
+ */
+ UnprotectedData<bool> grayBitsValid;
+
+ mozilla::Atomic<JS::GCReason, mozilla::ReleaseAcquire> majorGCTriggerReason;
+
+ private:
+ /* Perform full GC when we are able to collect the atoms zone. */
+ MainThreadData<bool> fullGCForAtomsRequested_;
+
+ /* Incremented at the start of every minor GC. */
+ MainThreadData<uint64_t> minorGCNumber;
+
+ /* Incremented at the start of every major GC. */
+ MainThreadData<uint64_t> majorGCNumber;
+
+ /* Incremented on every GC slice or minor collection. */
+ MainThreadData<uint64_t> number;
+
+ /* Incremented on every GC slice. */
+ MainThreadData<uint64_t> sliceNumber;
+
+ /* Whether the currently running GC can finish in multiple slices. */
+ MainThreadOrGCTaskData<bool> isIncremental;
+
+ /* Whether all zones are being collected in first GC slice. */
+ MainThreadData<bool> isFull;
+
+ /* Whether the heap will be compacted at the end of GC. */
+ MainThreadData<bool> isCompacting;
+
+ /* The invocation kind of the current GC, taken from the first slice. */
+ MainThreadOrGCTaskData<JSGCInvocationKind> invocationKind;
+
+ /* The initial GC reason, taken from the first slice. */
+ MainThreadData<JS::GCReason> initialReason;
+
+ /*
+ * The current incremental GC phase. This is also used internally in
+ * non-incremental GC.
+ */
+ MainThreadOrGCTaskData<State> incrementalState;
+
+ /* The incremental state at the start of this slice. */
+ MainThreadOrGCTaskData<State> initialState;
+
+ /* Whether to pay attention the zeal settings in this incremental slice. */
+#ifdef JS_GC_ZEAL
+ MainThreadData<bool> useZeal;
+#else
+ const bool useZeal;
+#endif
+
+ /* Indicates that the last incremental slice exhausted the mark stack. */
+ MainThreadData<bool> lastMarkSlice;
+
+ // Whether it's currently safe to yield to the mutator in an incremental GC.
+ MainThreadData<bool> safeToYield;
+
+ // Whether to do any marking caused by barriers on a background thread during
+ // incremental sweeping, while also sweeping zones which have finished
+ // marking.
+ MainThreadData<bool> markOnBackgroundThreadDuringSweeping;
+
+ /* Whether any sweeping will take place in the separate GC helper thread. */
+ MainThreadData<bool> sweepOnBackgroundThread;
+
+ /* Singly linked list of zones to be swept in the background. */
+ HelperThreadLockData<ZoneList> backgroundSweepZones;
+
+ /*
+ * Whether to trigger a GC slice after a background task is complete, so that
+ * the collector can continue or finsish collecting. This is only used for the
+ * tasks that run concurrently with the mutator, which are background
+ * finalization and background decommit.
+ */
+ HelperThreadLockData<bool> requestSliceAfterBackgroundTask;
+
+ /*
+ * Free LIFO blocks are transferred to these allocators before being freed on
+ * a background thread.
+ */
+ HelperThreadLockData<LifoAlloc> lifoBlocksToFree;
+ MainThreadData<LifoAlloc> lifoBlocksToFreeAfterMinorGC;
+ HelperThreadLockData<Nursery::BufferSet> buffersToFreeAfterMinorGC;
+
+ /* Index of current sweep group (for stats). */
+ MainThreadData<unsigned> sweepGroupIndex;
+
+ /*
+ * Incremental sweep state.
+ */
+ MainThreadData<JS::Zone*> sweepGroups;
+ MainThreadOrGCTaskData<JS::Zone*> currentSweepGroup;
+ MainThreadData<UniquePtr<SweepAction>> sweepActions;
+ MainThreadOrGCTaskData<JS::Zone*> sweepZone;
+ MainThreadOrGCTaskData<AllocKind> sweepAllocKind;
+ MainThreadData<mozilla::Maybe<AtomsTable::SweepIterator>> maybeAtomsToSweep;
+ MainThreadOrGCTaskData<mozilla::Maybe<WeakCacheSweepIterator>>
+ weakCachesToSweep;
+ MainThreadData<bool> hasMarkedGrayRoots;
+ MainThreadData<bool> abortSweepAfterCurrentGroup;
+ MainThreadOrGCTaskData<IncrementalProgress> sweepMarkResult;
+
+#ifdef DEBUG
+ // During gray marking, delay AssertCellIsNotGray checks by
+ // recording the cell pointers here and checking after marking has
+ // finished.
+ MainThreadData<Vector<const Cell*, 0, SystemAllocPolicy>>
+ cellsToAssertNotGray;
+ friend void js::gc::detail::AssertCellIsNotGray(const Cell*);
+#endif
+
+ friend class SweepGroupsIter;
+
+ /*
+ * Incremental compacting state.
+ */
+ MainThreadData<bool> startedCompacting;
+ MainThreadData<ZoneList> zonesToMaybeCompact;
+ MainThreadData<Arena*> relocatedArenasToRelease;
+ MainThreadData<size_t> zonesCompacted;
+
+#ifdef JS_GC_ZEAL
+ MainThreadData<MarkingValidator*> markingValidator;
+#endif
+
+ /*
+ * Default budget for incremental GC slice. See js/SliceBudget.h.
+ *
+ * JSGC_SLICE_TIME_BUDGET_MS
+ * pref: javascript.options.mem.gc_incremental_slice_ms,
+ */
+ MainThreadData<int64_t> defaultTimeBudgetMS_;
+
+ /*
+ * We disable incremental GC if we encounter a Class with a trace hook
+ * that does not implement write barriers.
+ */
+ MainThreadData<bool> incrementalAllowed;
+
+ /*
+ * Whether compacting GC can is enabled globally.
+ *
+ * JSGC_COMPACTING_ENABLED
+ * pref: javascript.options.mem.gc_compacting
+ */
+ MainThreadData<bool> compactingEnabled;
+
+ MainThreadData<bool> rootsRemoved;
+
+ /*
+ * These options control the zealousness of the GC. At every allocation,
+ * nextScheduled is decremented. When it reaches zero we do a full GC.
+ *
+ * At this point, if zeal_ is one of the types that trigger periodic
+ * collection, then nextScheduled is reset to the value of zealFrequency.
+ * Otherwise, no additional GCs take place.
+ *
+ * You can control these values in several ways:
+ * - Set the JS_GC_ZEAL environment variable
+ * - Call gczeal() or schedulegc() from inside shell-executed JS code
+ * (see the help for details)
+ *
+ * If gcZeal_ == 1 then we perform GCs in select places (during MaybeGC and
+ * whenever we are notified that GC roots have been removed). This option is
+ * mainly useful to embedders.
+ *
+ * We use zeal_ == 4 to enable write barrier verification. See the comment
+ * in gc/Verifier.cpp for more information about this.
+ *
+ * zeal_ values from 8 to 10 periodically run different types of
+ * incremental GC.
+ *
+ * zeal_ value 14 performs periodic shrinking collections.
+ */
+#ifdef JS_GC_ZEAL
+ static_assert(size_t(ZealMode::Count) <= 32,
+ "Too many zeal modes to store in a uint32_t");
+ MainThreadData<uint32_t> zealModeBits;
+ MainThreadData<int> zealFrequency;
+ MainThreadData<int> nextScheduled;
+ MainThreadData<bool> deterministicOnly;
+ MainThreadData<int> zealSliceBudget;
+
+ MainThreadData<PersistentRooted<GCVector<JSObject*, 0, SystemAllocPolicy>>>
+ selectedForMarking;
+#endif
+
+ MainThreadData<bool> fullCompartmentChecks;
+
+ MainThreadData<uint32_t> gcCallbackDepth;
+
+ MainThreadData<Callback<JSGCCallback>> gcCallback;
+ MainThreadData<Callback<JS::DoCycleCollectionCallback>>
+ gcDoCycleCollectionCallback;
+ MainThreadData<Callback<JSObjectsTenuredCallback>> tenuredCallback;
+ MainThreadData<CallbackVector<JSFinalizeCallback>> finalizeCallbacks;
+ MainThreadOrGCTaskData<Callback<JSHostCleanupFinalizationRegistryCallback>>
+ hostCleanupFinalizationRegistryCallback;
+ MainThreadData<CallbackVector<JSWeakPointerZonesCallback>>
+ updateWeakPointerZonesCallbacks;
+ MainThreadData<CallbackVector<JSWeakPointerCompartmentCallback>>
+ updateWeakPointerCompartmentCallbacks;
+
+ /*
+ * The trace operations to trace embedding-specific GC roots. One is for
+ * tracing through black roots and the other is for tracing through gray
+ * roots. The black/gray distinction is only relevant to the cycle
+ * collector.
+ */
+ MainThreadData<CallbackVector<JSTraceDataOp>> blackRootTracers;
+ MainThreadOrGCTaskData<Callback<JSTraceDataOp>> grayRootTracer;
+
+ /* Always preserve JIT code during GCs, for testing. */
+ MainThreadData<bool> alwaysPreserveCode;
+
+ /* Count of the number of zones that are currently in page load. */
+ MainThreadData<size_t> inPageLoadCount;
+
+ MainThreadData<bool> lowMemoryState;
+
+ /* Synchronize GC heap access among GC helper threads and the main thread. */
+ friend class js::AutoLockGC;
+ friend class js::AutoLockGCBgAlloc;
+ js::Mutex lock;
+
+ friend class BackgroundSweepTask;
+ friend class BackgroundFreeTask;
+
+ BackgroundAllocTask allocTask;
+ BackgroundUnmarkTask unmarkTask;
+ BackgroundMarkTask markTask;
+ BackgroundSweepTask sweepTask;
+ BackgroundFreeTask freeTask;
+ BackgroundDecommitTask decommitTask;
+
+ /*
+ * During incremental sweeping, this field temporarily holds the arenas of
+ * the current AllocKind being swept in order of increasing free space.
+ */
+ MainThreadData<SortedArenaList> incrementalSweepList;
+
+ private:
+ MainThreadData<Nursery> nursery_;
+
+ // The store buffer used to track tenured to nursery edges for generational
+ // GC. This is accessed off main thread when sweeping WeakCaches.
+ MainThreadOrGCTaskData<gc::StoreBuffer> storeBuffer_;
+
+ mozilla::TimeStamp lastLastDitchTime;
+
+ friend class MarkingValidator;
+ friend class AutoEnterIteration;
+};
+
+/* Prevent compartments and zones from being collected during iteration. */
+class MOZ_RAII AutoEnterIteration {
+ GCRuntime* gc;
+
+ public:
+ explicit AutoEnterIteration(GCRuntime* gc_) : gc(gc_) {
+ ++gc->numActiveZoneIters;
+ }
+
+ ~AutoEnterIteration() {
+ MOZ_ASSERT(gc->numActiveZoneIters);
+ --gc->numActiveZoneIters;
+ }
+};
+
+#ifdef JS_GC_ZEAL
+
+inline bool GCRuntime::hasZealMode(ZealMode mode) {
+ static_assert(size_t(ZealMode::Limit) < sizeof(zealModeBits) * 8,
+ "Zeal modes must fit in zealModeBits");
+ return zealModeBits & (1 << uint32_t(mode));
+}
+
+inline void GCRuntime::clearZealMode(ZealMode mode) {
+ zealModeBits &= ~(1 << uint32_t(mode));
+ MOZ_ASSERT(!hasZealMode(mode));
+}
+
+inline bool GCRuntime::upcomingZealousGC() { return nextScheduled == 1; }
+
+inline bool GCRuntime::needZealousGC() {
+ if (nextScheduled > 0 && --nextScheduled == 0) {
+ if (hasZealMode(ZealMode::Alloc) || hasZealMode(ZealMode::GenerationalGC) ||
+ hasZealMode(ZealMode::IncrementalMultipleSlices) ||
+ hasZealMode(ZealMode::Compact) || hasIncrementalTwoSliceZealMode()) {
+ nextScheduled = zealFrequency;
+ }
+ return true;
+ }
+ return false;
+}
+
+inline bool GCRuntime::hasIncrementalTwoSliceZealMode() {
+ return hasZealMode(ZealMode::YieldBeforeRootMarking) ||
+ hasZealMode(ZealMode::YieldBeforeMarking) ||
+ hasZealMode(ZealMode::YieldBeforeSweeping) ||
+ hasZealMode(ZealMode::YieldBeforeSweepingAtoms) ||
+ hasZealMode(ZealMode::YieldBeforeSweepingCaches) ||
+ hasZealMode(ZealMode::YieldBeforeSweepingObjects) ||
+ hasZealMode(ZealMode::YieldBeforeSweepingNonObjects) ||
+ hasZealMode(ZealMode::YieldBeforeSweepingShapeTrees) ||
+ hasZealMode(ZealMode::YieldWhileGrayMarking);
+}
+
+#else
+inline bool GCRuntime::hasZealMode(ZealMode mode) { return false; }
+inline void GCRuntime::clearZealMode(ZealMode mode) {}
+inline bool GCRuntime::upcomingZealousGC() { return false; }
+inline bool GCRuntime::needZealousGC() { return false; }
+inline bool GCRuntime::hasIncrementalTwoSliceZealMode() { return false; }
+#endif
+
+bool IsCurrentlyAnimating(const mozilla::TimeStamp& lastAnimationTime,
+ const mozilla::TimeStamp& currentTime);
+
+} /* namespace gc */
+} /* namespace js */
+
+#endif
diff --git a/js/src/gc/GenerateStatsPhases.py b/js/src/gc/GenerateStatsPhases.py
new file mode 100644
index 0000000000..4cc53235a7
--- /dev/null
+++ b/js/src/gc/GenerateStatsPhases.py
@@ -0,0 +1,416 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+# flake8: noqa: F821
+
+# Generate graph structures for GC statistics recording.
+#
+# Stats phases are nested and form a directed acyclic graph starting
+# from a set of root phases. Importantly, a phase may appear under more
+# than one parent phase.
+#
+# For example, the following arrangement is possible:
+#
+# +---+
+# | A |
+# +---+
+# |
+# +-------+-------+
+# | | |
+# v v v
+# +---+ +---+ +---+
+# | B | | C | | D |
+# +---+ +---+ +---+
+# | |
+# +---+---+
+# |
+# v
+# +---+
+# | E |
+# +---+
+#
+# This graph is expanded into a tree (or really a forest) and phases
+# with multiple parents are duplicated.
+#
+# For example, the input example above would be expanded to:
+#
+# +---+
+# | A |
+# +---+
+# |
+# +-------+-------+
+# | | |
+# v v v
+# +---+ +---+ +---+
+# | B | | C | | D |
+# +---+ +---+ +---+
+# | |
+# v v
+# +---+ +---+
+# | E | | E'|
+# +---+ +---+
+
+# NOTE: If you add new phases here the current next phase kind number can be
+# found at the end of js/src/gc/StatsPhasesGenerated.inc
+
+import re
+import collections
+
+
+class PhaseKind:
+ def __init__(self, name, descr, bucket, children=[]):
+ self.name = name
+ self.descr = descr
+ # For telemetry
+ self.bucket = bucket
+ self.children = children
+
+
+# The root marking phase appears in several places in the graph.
+MarkRootsPhaseKind = PhaseKind(
+ "MARK_ROOTS",
+ "Mark Roots",
+ 48,
+ [
+ PhaseKind("MARK_CCWS", "Mark Cross Compartment Wrappers", 50),
+ PhaseKind("MARK_STACK", "Mark C and JS stacks", 51),
+ PhaseKind("MARK_RUNTIME_DATA", "Mark Runtime-wide Data", 52),
+ PhaseKind("MARK_EMBEDDING", "Mark Embedding", 53),
+ PhaseKind("MARK_COMPARTMENTS", "Mark Compartments", 54),
+ ],
+)
+
+JoinParallelTasksPhaseKind = PhaseKind("JOIN_PARALLEL_TASKS", "Join Parallel Tasks", 67)
+
+PhaseKindGraphRoots = [
+ PhaseKind("MUTATOR", "Mutator Running", 0),
+ PhaseKind("GC_BEGIN", "Begin Callback", 1),
+ PhaseKind(
+ "EVICT_NURSERY_FOR_MAJOR_GC",
+ "Evict Nursery For Major GC",
+ 70,
+ [
+ MarkRootsPhaseKind,
+ ],
+ ),
+ PhaseKind("WAIT_BACKGROUND_THREAD", "Wait Background Thread", 2),
+ PhaseKind(
+ "PREPARE",
+ "Prepare For Collection",
+ 69,
+ [
+ PhaseKind("UNMARK", "Unmark", 7),
+ PhaseKind("UNMARK_WEAKMAPS", "Unmark WeakMaps", 76),
+ PhaseKind("BUFFER_GRAY_ROOTS", "Buffer Gray Roots", 49),
+ PhaseKind("MARK_DISCARD_CODE", "Mark Discard Code", 3),
+ PhaseKind("RELAZIFY_FUNCTIONS", "Relazify Functions", 4),
+ PhaseKind("PURGE", "Purge", 5),
+ PhaseKind("PURGE_SHAPE_CACHES", "Purge ShapeCaches", 60),
+ PhaseKind("PURGE_SOURCE_URLS", "Purge Source URLs", 73),
+ JoinParallelTasksPhaseKind,
+ ],
+ ),
+ PhaseKind(
+ "MARK",
+ "Mark",
+ 6,
+ [MarkRootsPhaseKind, PhaseKind("MARK_DELAYED", "Mark Delayed", 8)],
+ ),
+ PhaseKind(
+ "SWEEP",
+ "Sweep",
+ 9,
+ [
+ PhaseKind(
+ "SWEEP_MARK",
+ "Mark During Sweeping",
+ 10,
+ [
+ PhaseKind(
+ "SWEEP_MARK_INCOMING_BLACK", "Mark Incoming Black Pointers", 12
+ ),
+ PhaseKind(
+ "SWEEP_MARK_WEAK",
+ "Mark Weak",
+ 13,
+ [PhaseKind("SWEEP_MARK_GRAY_WEAK", "Mark Gray and Weak", 16)],
+ ),
+ PhaseKind(
+ "SWEEP_MARK_INCOMING_GRAY", "Mark Incoming Gray Pointers", 14
+ ),
+ PhaseKind("SWEEP_MARK_GRAY", "Mark Gray", 15),
+ ],
+ ),
+ PhaseKind(
+ "FINALIZE_START",
+ "Finalize Start Callbacks",
+ 17,
+ [
+ PhaseKind("WEAK_ZONES_CALLBACK", "Per-Slice Weak Callback", 57),
+ PhaseKind(
+ "WEAK_COMPARTMENT_CALLBACK", "Per-Compartment Weak Callback", 58
+ ),
+ ],
+ ),
+ PhaseKind("UPDATE_ATOMS_BITMAP", "Sweep Atoms Bitmap", 68),
+ PhaseKind("SWEEP_ATOMS_TABLE", "Sweep Atoms Table", 18),
+ PhaseKind(
+ "SWEEP_COMPARTMENTS",
+ "Sweep Compartments",
+ 20,
+ [
+ PhaseKind("SWEEP_DISCARD_CODE", "Sweep Discard Code", 21),
+ PhaseKind("SWEEP_INNER_VIEWS", "Sweep Inner Views", 22),
+ PhaseKind(
+ "SWEEP_CC_WRAPPER", "Sweep Cross Compartment Wrappers", 23
+ ),
+ PhaseKind("SWEEP_BASE_SHAPE", "Sweep Base Shapes", 24),
+ PhaseKind("SWEEP_INITIAL_SHAPE", "Sweep Initial Shapes", 25),
+ PhaseKind("SWEEP_REGEXP", "Sweep Regexps", 28),
+ PhaseKind("SWEEP_COMPRESSION", "Sweep Compression Tasks", 62),
+ PhaseKind("SWEEP_WEAKMAPS", "Sweep WeakMaps", 63),
+ PhaseKind("SWEEP_UNIQUEIDS", "Sweep Unique IDs", 64),
+ PhaseKind(
+ "SWEEP_FINALIZATION_REGISTRIES",
+ "Sweep FinalizationRegistries",
+ 74,
+ ),
+ PhaseKind("SWEEP_WEAKREFS", "Sweep WeakRefs", 75),
+ PhaseKind("SWEEP_JIT_DATA", "Sweep JIT Data", 65),
+ PhaseKind("SWEEP_WEAK_CACHES", "Sweep Weak Caches", 66),
+ PhaseKind("SWEEP_MISC", "Sweep Miscellaneous", 29),
+ JoinParallelTasksPhaseKind,
+ ],
+ ),
+ PhaseKind("SWEEP_OBJECT", "Sweep Object", 33),
+ PhaseKind("SWEEP_STRING", "Sweep String", 34),
+ PhaseKind("SWEEP_SCRIPT", "Sweep Script", 35),
+ PhaseKind("SWEEP_SCOPE", "Sweep Scope", 59),
+ PhaseKind("SWEEP_REGEXP_SHARED", "Sweep RegExpShared", 61),
+ PhaseKind("SWEEP_SHAPE", "Sweep Shape", 36),
+ PhaseKind("FINALIZE_END", "Finalize End Callback", 38),
+ PhaseKind("DESTROY", "Deallocate", 39),
+ JoinParallelTasksPhaseKind,
+ ],
+ ),
+ PhaseKind(
+ "COMPACT",
+ "Compact",
+ 40,
+ [
+ PhaseKind("COMPACT_MOVE", "Compact Move", 41),
+ PhaseKind(
+ "COMPACT_UPDATE",
+ "Compact Update",
+ 42,
+ [
+ MarkRootsPhaseKind,
+ PhaseKind("COMPACT_UPDATE_CELLS", "Compact Update Cells", 43),
+ JoinParallelTasksPhaseKind,
+ ],
+ ),
+ ],
+ ),
+ PhaseKind("DECOMMIT", "Decommit", 72),
+ PhaseKind("GC_END", "End Callback", 44),
+ PhaseKind(
+ "MINOR_GC",
+ "All Minor GCs",
+ 45,
+ [
+ MarkRootsPhaseKind,
+ ],
+ ),
+ PhaseKind(
+ "EVICT_NURSERY",
+ "Minor GCs to Evict Nursery",
+ 46,
+ [
+ MarkRootsPhaseKind,
+ ],
+ ),
+ PhaseKind(
+ "TRACE_HEAP",
+ "Trace Heap",
+ 47,
+ [
+ MarkRootsPhaseKind,
+ ],
+ ),
+ PhaseKind("BARRIER", "Barriers", 55, [PhaseKind("UNMARK_GRAY", "Unmark gray", 56)]),
+]
+
+
+def findAllPhaseKinds():
+ # Make a linear list of all unique phases by performing a depth first
+ # search on the phase graph starting at the roots. This will be used to
+ # generate the PhaseKind enum.
+ phases = []
+ seen = set()
+
+ def dfs(phase):
+ if phase in seen:
+ return
+ phases.append(phase)
+ seen.add(phase)
+ for child in phase.children:
+ dfs(child)
+
+ for phase in PhaseKindGraphRoots:
+ dfs(phase)
+ return phases
+
+
+AllPhaseKinds = findAllPhaseKinds()
+
+
+class Phase:
+ # Expand the DAG into a tree, duplicating phases which have more than
+ # one parent.
+ def __init__(self, phaseKind, parent):
+ self.phaseKind = phaseKind
+ self.parent = parent
+ self.depth = parent.depth + 1 if parent else 0
+ self.children = []
+ self.nextSibling = None
+ self.nextInPhaseKind = None
+
+ self.path = re.sub(r"\W+", "_", phaseKind.name.lower())
+ if parent is not None:
+ self.path = parent.path + "." + self.path
+
+
+def expandPhases():
+ phases = []
+ phasesForKind = collections.defaultdict(list)
+
+ def traverse(phaseKind, parent):
+ ep = Phase(phaseKind, parent)
+ phases.append(ep)
+
+ # Update list of expanded phases for this phase kind.
+ if phasesForKind[phaseKind]:
+ phasesForKind[phaseKind][-1].nextInPhaseKind = ep
+ phasesForKind[phaseKind].append(ep)
+
+ # Recurse over children.
+ for child in phaseKind.children:
+ child_ep = traverse(child, ep)
+ if ep.children:
+ ep.children[-1].nextSibling = child_ep
+ ep.children.append(child_ep)
+ return ep
+
+ for phaseKind in PhaseKindGraphRoots:
+ traverse(phaseKind, None)
+
+ return phases, phasesForKind
+
+
+AllPhases, PhasesForPhaseKind = expandPhases()
+
+# Name phases based on phase kind name and index if there are multiple phases
+# corresponding to a single phase kind.
+
+for phaseKind in AllPhaseKinds:
+ phases = PhasesForPhaseKind[phaseKind]
+ if len(phases) == 1:
+ phases[0].name = "%s" % phaseKind.name
+ else:
+ for index, phase in enumerate(phases):
+ phase.name = "%s_%d" % (phaseKind.name, index + 1)
+
+# Find the maximum phase nesting.
+MaxPhaseNesting = max(phase.depth for phase in AllPhases) + 1
+
+# And the maximum bucket number.
+MaxBucket = max(kind.bucket for kind in AllPhaseKinds)
+
+# Generate code.
+
+
+def writeList(out, items):
+ if items:
+ out.write(",\n".join(" " + item for item in items) + "\n")
+
+
+def writeEnumClass(out, name, type, items, extraItems):
+ items = ["FIRST"] + list(items) + ["LIMIT"] + list(extraItems)
+ items[1] += " = " + items[0]
+ out.write("enum class %s : %s {\n" % (name, type))
+ writeList(out, items)
+ out.write("};\n")
+
+
+def generateHeader(out):
+ #
+ # Generate PhaseKind enum.
+ #
+ phaseKindNames = map(lambda phaseKind: phaseKind.name, AllPhaseKinds)
+ extraPhaseKinds = [
+ "NONE = LIMIT",
+ "EXPLICIT_SUSPENSION = LIMIT",
+ "IMPLICIT_SUSPENSION",
+ ]
+ writeEnumClass(out, "PhaseKind", "uint8_t", phaseKindNames, extraPhaseKinds)
+ out.write("\n")
+
+ #
+ # Generate Phase enum.
+ #
+ phaseNames = map(lambda phase: phase.name, AllPhases)
+ extraPhases = ["NONE = LIMIT", "EXPLICIT_SUSPENSION = LIMIT", "IMPLICIT_SUSPENSION"]
+ writeEnumClass(out, "Phase", "uint8_t", phaseNames, extraPhases)
+ out.write("\n")
+
+ #
+ # Generate MAX_PHASE_NESTING constant.
+ #
+ out.write("static const size_t MAX_PHASE_NESTING = %d;\n" % MaxPhaseNesting)
+
+
+def generateCpp(out):
+ #
+ # Generate the PhaseKindInfo table.
+ #
+ out.write("static constexpr PhaseKindTable phaseKinds = {\n")
+ for phaseKind in AllPhaseKinds:
+ phase = PhasesForPhaseKind[phaseKind][0]
+ out.write(
+ " /* PhaseKind::%s */ PhaseKindInfo { Phase::%s, %d },\n"
+ % (phaseKind.name, phase.name, phaseKind.bucket)
+ )
+ out.write("};\n")
+ out.write("\n")
+
+ #
+ # Generate the PhaseInfo tree.
+ #
+ def name(phase):
+ return "Phase::" + phase.name if phase else "Phase::NONE"
+
+ out.write("static constexpr PhaseTable phases = {\n")
+ for phase in AllPhases:
+ firstChild = phase.children[0] if phase.children else None
+ phaseKind = phase.phaseKind
+ out.write(
+ ' /* %s */ PhaseInfo { %s, %s, %s, %s, PhaseKind::%s, %d, "%s", "%s" },\n'
+ % ( # NOQA: E501
+ name(phase),
+ name(phase.parent),
+ name(firstChild),
+ name(phase.nextSibling),
+ name(phase.nextInPhaseKind),
+ phaseKind.name,
+ phase.depth,
+ phaseKind.descr,
+ phase.path,
+ )
+ )
+ out.write("};\n")
+
+ #
+ # Print in a comment the next available phase kind number.
+ #
+ out.write("// The next available phase kind number is: %d\n" % (MaxBucket + 1))
diff --git a/js/src/gc/HashUtil.h b/js/src/gc/HashUtil.h
new file mode 100644
index 0000000000..44bf8bbc5e
--- /dev/null
+++ b/js/src/gc/HashUtil.h
@@ -0,0 +1,85 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef gc_HashUtil_h
+#define gc_HashUtil_h
+
+#include <type_traits>
+
+#include "gc/Zone.h"
+#include "vm/JSContext.h"
+
+namespace js {
+
+/*
+ * Used to add entries to a js::HashMap or HashSet where the key depends on a GC
+ * thing that may be moved by generational or compacting GC between the call to
+ * lookupForAdd() and relookupOrAdd().
+ */
+template <class T>
+struct DependentAddPtr {
+ typedef typename T::AddPtr AddPtr;
+ typedef typename T::Entry Entry;
+
+ template <class Lookup>
+ DependentAddPtr(const JSContext* cx, T& table, const Lookup& lookup)
+ : addPtr(table.lookupForAdd(lookup)),
+ originalGcNumber(cx->zone()->gcNumber()) {}
+
+ DependentAddPtr(DependentAddPtr&& other)
+ : addPtr(other.addPtr), originalGcNumber(other.originalGcNumber) {}
+
+ template <class KeyInput, class ValueInput>
+ bool add(JSContext* cx, T& table, const KeyInput& key,
+ const ValueInput& value) {
+ refreshAddPtr(cx, table, key);
+ if (!table.relookupOrAdd(addPtr, key, value)) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+ return true;
+ }
+
+ template <class KeyInput>
+ void remove(JSContext* cx, T& table, const KeyInput& key) {
+ refreshAddPtr(cx, table, key);
+ if (addPtr) {
+ table.remove(addPtr);
+ }
+ }
+
+ bool found() const { return addPtr.found(); }
+ explicit operator bool() const { return found(); }
+ const Entry& operator*() const { return *addPtr; }
+ const Entry* operator->() const { return &*addPtr; }
+
+ private:
+ AddPtr addPtr;
+ const uint64_t originalGcNumber;
+
+ template <class KeyInput>
+ void refreshAddPtr(JSContext* cx, T& table, const KeyInput& key) {
+ bool gcHappened = originalGcNumber != cx->zone()->gcNumber();
+ if (gcHappened) {
+ addPtr = table.lookupForAdd(key);
+ }
+ }
+
+ DependentAddPtr() = delete;
+ DependentAddPtr(const DependentAddPtr&) = delete;
+ DependentAddPtr& operator=(const DependentAddPtr&) = delete;
+};
+
+template <typename T, typename Lookup>
+inline auto MakeDependentAddPtr(const JSContext* cx, T& table,
+ const Lookup& lookup) {
+ using Ptr = DependentAddPtr<std::remove_reference_t<decltype(table)>>;
+ return Ptr(cx, table, lookup);
+}
+
+} // namespace js
+
+#endif
diff --git a/js/src/gc/Heap-inl.h b/js/src/gc/Heap-inl.h
new file mode 100644
index 0000000000..9c09360fdd
--- /dev/null
+++ b/js/src/gc/Heap-inl.h
@@ -0,0 +1,65 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef gc_Heap_inl_h
+#define gc_Heap_inl_h
+
+#include "gc/Heap.h"
+
+#include "gc/StoreBuffer.h"
+#include "gc/Zone.h"
+#include "util/Poison.h"
+#include "vm/Runtime.h"
+
+inline void js::gc::Arena::init(JS::Zone* zoneArg, AllocKind kind,
+ const AutoLockGC& lock) {
+#ifdef DEBUG
+ MOZ_MAKE_MEM_DEFINED(&zone, sizeof(zone));
+ MOZ_ASSERT((uintptr_t(zone) & 0xff) == JS_FREED_ARENA_PATTERN);
+#endif
+
+ MOZ_ASSERT(firstFreeSpan.isEmpty());
+ MOZ_ASSERT(!allocated());
+ MOZ_ASSERT(!onDelayedMarkingList_);
+ MOZ_ASSERT(!hasDelayedBlackMarking_);
+ MOZ_ASSERT(!hasDelayedGrayMarking_);
+ MOZ_ASSERT(!nextDelayedMarkingArena_);
+
+ MOZ_MAKE_MEM_UNDEFINED(this, ArenaSize);
+
+ zone = zoneArg;
+ allocKind = size_t(kind);
+ onDelayedMarkingList_ = 0;
+ hasDelayedBlackMarking_ = 0;
+ hasDelayedGrayMarking_ = 0;
+ nextDelayedMarkingArena_ = 0;
+ if (zone->isAtomsZone()) {
+ zone->runtimeFromAnyThread()->gc.atomMarking.registerArena(this, lock);
+ } else {
+ bufferedCells() = &ArenaCellSet::Empty;
+ }
+
+ setAsFullyUnused();
+}
+
+inline void js::gc::Arena::release(const AutoLockGC& lock) {
+ if (zone->isAtomsZone()) {
+ zone->runtimeFromAnyThread()->gc.atomMarking.unregisterArena(this, lock);
+ }
+ setAsNotAllocated();
+}
+
+inline js::gc::ArenaCellSet*& js::gc::Arena::bufferedCells() {
+ MOZ_ASSERT(zone && !zone->isAtomsZone());
+ return bufferedCells_;
+}
+
+inline size_t& js::gc::Arena::atomBitmapStart() {
+ MOZ_ASSERT(zone && zone->isAtomsZone());
+ return atomBitmapStart_;
+}
+
+#endif
diff --git a/js/src/gc/Heap.h b/js/src/gc/Heap.h
new file mode 100644
index 0000000000..daa20cbf0f
--- /dev/null
+++ b/js/src/gc/Heap.h
@@ -0,0 +1,774 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef gc_Heap_h
+#define gc_Heap_h
+
+#include "mozilla/DebugOnly.h"
+
+#include "ds/BitArray.h"
+#include "gc/AllocKind.h"
+#include "gc/GCEnum.h"
+#include "js/HeapAPI.h"
+#include "js/TypeDecls.h"
+#include "util/Poison.h"
+
+namespace js {
+
+class AutoLockGC;
+class AutoLockGCBgAlloc;
+class NurseryDecommitTask;
+
+namespace gc {
+
+class Arena;
+class ArenaCellSet;
+class ArenaList;
+class GCRuntime;
+class MarkingValidator;
+class SortedArenaList;
+class StoreBuffer;
+class TenuredCell;
+
+// Cells are aligned to CellAlignShift, so the largest tagged null pointer is:
+const uintptr_t LargestTaggedNullCellPointer = (1 << CellAlignShift) - 1;
+
+/*
+ * The minimum cell size ends up as twice the cell alignment because the mark
+ * bitmap contains one bit per CellBytesPerMarkBit bytes (which is equal to
+ * CellAlignBytes) and we need two mark bits per cell.
+ */
+const size_t MinCellSize = CellBytesPerMarkBit * MarkBitsPerCell;
+
+static_assert(ArenaSize % CellAlignBytes == 0,
+ "Arena size must be a multiple of cell alignment");
+
+/*
+ * A FreeSpan represents a contiguous sequence of free cells in an Arena. It
+ * can take two forms.
+ *
+ * - In an empty span, |first| and |last| are both zero.
+ *
+ * - In a non-empty span, |first| is the address of the first free thing in the
+ * span, and |last| is the address of the last free thing in the span.
+ * Furthermore, the memory pointed to by |last| holds a FreeSpan structure
+ * that points to the next span (which may be empty); this works because
+ * sizeof(FreeSpan) is less than the smallest thingSize.
+ */
+class FreeSpan {
+ friend class Arena;
+ friend class ArenaCellIter;
+ friend class ArenaFreeCellIter;
+
+ uint16_t first;
+ uint16_t last;
+
+ public:
+ // This inits just |first| and |last|; if the span is non-empty it doesn't
+ // do anything with the next span stored at |last|.
+ void initBounds(uintptr_t firstArg, uintptr_t lastArg, const Arena* arena) {
+ checkRange(firstArg, lastArg, arena);
+ first = firstArg;
+ last = lastArg;
+ }
+
+ void initAsEmpty() {
+ first = 0;
+ last = 0;
+ }
+
+ // This sets |first| and |last|, and also sets the next span stored at
+ // |last| as empty. (As a result, |firstArg| and |lastArg| cannot represent
+ // an empty span.)
+ void initFinal(uintptr_t firstArg, uintptr_t lastArg, const Arena* arena) {
+ initBounds(firstArg, lastArg, arena);
+ FreeSpan* last = nextSpanUnchecked(arena);
+ last->initAsEmpty();
+ checkSpan(arena);
+ }
+
+ bool isEmpty() const { return !first; }
+
+ Arena* getArenaUnchecked() { return reinterpret_cast<Arena*>(this); }
+ inline Arena* getArena();
+
+ static size_t offsetOfFirst() { return offsetof(FreeSpan, first); }
+
+ static size_t offsetOfLast() { return offsetof(FreeSpan, last); }
+
+ // Like nextSpan(), but no checking of the following span is done.
+ FreeSpan* nextSpanUnchecked(const Arena* arena) const {
+ MOZ_ASSERT(arena && !isEmpty());
+ return reinterpret_cast<FreeSpan*>(uintptr_t(arena) + last);
+ }
+
+ const FreeSpan* nextSpan(const Arena* arena) const {
+ checkSpan(arena);
+ return nextSpanUnchecked(arena);
+ }
+
+ MOZ_ALWAYS_INLINE TenuredCell* allocate(size_t thingSize) {
+ // Eschew the usual checks, because this might be the placeholder span.
+ // If this is somehow an invalid, non-empty span, checkSpan() will catch it.
+ Arena* arena = getArenaUnchecked();
+ checkSpan(arena);
+ uintptr_t thing = uintptr_t(arena) + first;
+ if (first < last) {
+ // We have space for at least two more things, so do a simple
+ // bump-allocate.
+ first += thingSize;
+ } else if (MOZ_LIKELY(first)) {
+ // The last space points to the next free span (which may be empty).
+ const FreeSpan* next = nextSpan(arena);
+ first = next->first;
+ last = next->last;
+ } else {
+ return nullptr; // The span is empty.
+ }
+ checkSpan(arena);
+ DebugOnlyPoison(reinterpret_cast<void*>(thing),
+ JS_ALLOCATED_TENURED_PATTERN, thingSize,
+ MemCheckKind::MakeUndefined);
+ return reinterpret_cast<TenuredCell*>(thing);
+ }
+
+ inline void checkSpan(const Arena* arena) const;
+ inline void checkRange(uintptr_t first, uintptr_t last,
+ const Arena* arena) const;
+};
+
+/*
+ * Arenas are the allocation units of the tenured heap in the GC. An arena
+ * is 4kiB in size and 4kiB-aligned. It starts with several header fields
+ * followed by some bytes of padding. The remainder of the arena is filled
+ * with GC things of a particular AllocKind. The padding ensures that the
+ * GC thing array ends exactly at the end of the arena:
+ *
+ * <----------------------------------------------> = ArenaSize bytes
+ * +---------------+---------+----+----+-----+----+
+ * | header fields | padding | T0 | T1 | ... | Tn |
+ * +---------------+---------+----+----+-----+----+
+ * <-------------------------> = first thing offset
+ */
+class alignas(ArenaSize) Arena {
+ static JS_FRIEND_DATA const uint8_t ThingSizes[];
+ static JS_FRIEND_DATA const uint8_t FirstThingOffsets[];
+ static JS_FRIEND_DATA const uint8_t ThingsPerArena[];
+ /*
+ * The first span of free things in the arena. Most of these spans are
+ * stored as offsets in free regions of the data array, and most operations
+ * on FreeSpans take an Arena pointer for safety. However, the FreeSpans
+ * used for allocation are stored here, at the start of an Arena, and use
+ * their own address to grab the next span within the same Arena.
+ */
+ FreeSpan firstFreeSpan;
+
+ public:
+ /*
+ * The zone that this Arena is contained within, when allocated. The offset
+ * of this field must match the ArenaZoneOffset stored in js/HeapAPI.h,
+ * as is statically asserted below.
+ */
+ JS::Zone* zone;
+
+ /*
+ * Arena::next has two purposes: when unallocated, it points to the next
+ * available Arena. When allocated, it points to the next Arena in the same
+ * zone and with the same alloc kind.
+ */
+ Arena* next;
+
+ private:
+ /*
+ * One of the AllocKind constants or AllocKind::LIMIT when the arena does
+ * not contain any GC things and is on the list of empty arenas in the GC
+ * chunk.
+ *
+ * We use 8 bits for the alloc kind so the compiler can use byte-level
+ * memory instructions to access it.
+ */
+ size_t allocKind : 8;
+
+ private:
+ /*
+ * When recursive marking uses too much stack we delay marking of
+ * arenas and link them into a list for later processing. This
+ * uses the following fields.
+ */
+ static const size_t DELAYED_MARKING_FLAG_BITS = 3;
+ static const size_t DELAYED_MARKING_ARENA_BITS =
+ JS_BITS_PER_WORD - 8 - DELAYED_MARKING_FLAG_BITS;
+ size_t onDelayedMarkingList_ : 1;
+ size_t hasDelayedBlackMarking_ : 1;
+ size_t hasDelayedGrayMarking_ : 1;
+ size_t nextDelayedMarkingArena_ : DELAYED_MARKING_ARENA_BITS;
+ static_assert(
+ DELAYED_MARKING_ARENA_BITS >= JS_BITS_PER_WORD - ArenaShift,
+ "Arena::nextDelayedMarkingArena_ packing assumes that ArenaShift has "
+ "enough bits to cover allocKind and delayed marking state.");
+
+ union {
+ /*
+ * For arenas in zones other than the atoms zone, if non-null, points
+ * to an ArenaCellSet that represents the set of cells in this arena
+ * that are in the nursery's store buffer.
+ */
+ ArenaCellSet* bufferedCells_;
+
+ /*
+ * For arenas in the atoms zone, the starting index into zone atom
+ * marking bitmaps (see AtomMarking.h) of the things in this zone.
+ * Atoms never refer to nursery things, so no store buffer index is
+ * needed.
+ */
+ size_t atomBitmapStart_;
+ };
+
+ public:
+ /*
+ * The size of data should be |ArenaSize - offsetof(data)|, but the offset
+ * is not yet known to the compiler, so we do it by hand. |firstFreeSpan|
+ * takes up 8 bytes on 64-bit due to alignment requirements; the rest are
+ * obvious. This constant is stored in js/HeapAPI.h.
+ */
+ uint8_t data[ArenaSize - ArenaHeaderSize];
+
+ void init(JS::Zone* zoneArg, AllocKind kind, const AutoLockGC& lock);
+
+ // Sets |firstFreeSpan| to the Arena's entire valid range, and
+ // also sets the next span stored at |firstFreeSpan.last| as empty.
+ void setAsFullyUnused() {
+ AllocKind kind = getAllocKind();
+ firstFreeSpan.first = firstThingOffset(kind);
+ firstFreeSpan.last = lastThingOffset(kind);
+ FreeSpan* last = firstFreeSpan.nextSpanUnchecked(this);
+ last->initAsEmpty();
+ }
+
+ // Initialize an arena to its unallocated state. For arenas that were
+ // previously allocated for some zone, use release() instead.
+ void setAsNotAllocated() {
+ firstFreeSpan.initAsEmpty();
+
+ // Poison zone pointer to highlight UAF on released arenas in crash data.
+ AlwaysPoison(&zone, JS_FREED_ARENA_PATTERN, sizeof(zone),
+ MemCheckKind::MakeNoAccess);
+
+ allocKind = size_t(AllocKind::LIMIT);
+ onDelayedMarkingList_ = 0;
+ hasDelayedBlackMarking_ = 0;
+ hasDelayedGrayMarking_ = 0;
+ nextDelayedMarkingArena_ = 0;
+ bufferedCells_ = nullptr;
+ }
+
+ // Return an allocated arena to its unallocated state.
+ inline void release(const AutoLockGC& lock);
+
+ uintptr_t address() const {
+ checkAddress();
+ return uintptr_t(this);
+ }
+
+ inline void checkAddress() const;
+
+ inline TenuredChunk* chunk() const;
+
+ bool allocated() const {
+ MOZ_ASSERT(IsAllocKind(AllocKind(allocKind)));
+ return IsValidAllocKind(AllocKind(allocKind));
+ }
+
+ AllocKind getAllocKind() const {
+ MOZ_ASSERT(allocated());
+ return AllocKind(allocKind);
+ }
+
+ FreeSpan* getFirstFreeSpan() { return &firstFreeSpan; }
+
+ static size_t thingSize(AllocKind kind) { return ThingSizes[size_t(kind)]; }
+ static size_t thingsPerArena(AllocKind kind) {
+ return ThingsPerArena[size_t(kind)];
+ }
+ static size_t thingsSpan(AllocKind kind) {
+ return thingsPerArena(kind) * thingSize(kind);
+ }
+
+ static size_t firstThingOffset(AllocKind kind) {
+ return FirstThingOffsets[size_t(kind)];
+ }
+ static size_t lastThingOffset(AllocKind kind) {
+ return ArenaSize - thingSize(kind);
+ }
+
+ size_t getThingSize() const { return thingSize(getAllocKind()); }
+ size_t getThingsPerArena() const { return thingsPerArena(getAllocKind()); }
+ size_t getThingsSpan() const { return getThingsPerArena() * getThingSize(); }
+ size_t getFirstThingOffset() const {
+ return firstThingOffset(getAllocKind());
+ }
+
+ uintptr_t thingsStart() const { return address() + getFirstThingOffset(); }
+ uintptr_t thingsEnd() const { return address() + ArenaSize; }
+
+ bool isEmpty() const {
+ // Arena is empty if its first span covers the whole arena.
+ firstFreeSpan.checkSpan(this);
+ AllocKind kind = getAllocKind();
+ return firstFreeSpan.first == firstThingOffset(kind) &&
+ firstFreeSpan.last == lastThingOffset(kind);
+ }
+
+ bool hasFreeThings() const { return !firstFreeSpan.isEmpty(); }
+
+ size_t numFreeThings(size_t thingSize) const {
+ firstFreeSpan.checkSpan(this);
+ size_t numFree = 0;
+ const FreeSpan* span = &firstFreeSpan;
+ for (; !span->isEmpty(); span = span->nextSpan(this)) {
+ numFree += (span->last - span->first) / thingSize + 1;
+ }
+ return numFree;
+ }
+
+ size_t countFreeCells() { return numFreeThings(getThingSize()); }
+ size_t countUsedCells() { return getThingsPerArena() - countFreeCells(); }
+
+ bool inFreeList(uintptr_t thing) {
+ uintptr_t base = address();
+ const FreeSpan* span = &firstFreeSpan;
+ for (; !span->isEmpty(); span = span->nextSpan(this)) {
+ /* If the thing comes before the current span, it's not free. */
+ if (thing < base + span->first) {
+ return false;
+ }
+
+ /* If we find it before the end of the span, it's free. */
+ if (thing <= base + span->last) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ static bool isAligned(uintptr_t thing, size_t thingSize) {
+ /* Things ends at the arena end. */
+ uintptr_t tailOffset = ArenaSize - (thing & ArenaMask);
+ return tailOffset % thingSize == 0;
+ }
+
+ bool onDelayedMarkingList() const { return onDelayedMarkingList_; }
+
+ Arena* getNextDelayedMarking() const {
+ MOZ_ASSERT(onDelayedMarkingList_);
+ return reinterpret_cast<Arena*>(nextDelayedMarkingArena_ << ArenaShift);
+ }
+
+ void setNextDelayedMarkingArena(Arena* arena) {
+ MOZ_ASSERT(!(uintptr_t(arena) & ArenaMask));
+ MOZ_ASSERT(!onDelayedMarkingList_);
+ MOZ_ASSERT(!hasDelayedBlackMarking_);
+ MOZ_ASSERT(!hasDelayedGrayMarking_);
+ MOZ_ASSERT(!nextDelayedMarkingArena_);
+ onDelayedMarkingList_ = 1;
+ if (arena) {
+ nextDelayedMarkingArena_ = arena->address() >> ArenaShift;
+ }
+ }
+
+ void updateNextDelayedMarkingArena(Arena* arena) {
+ MOZ_ASSERT(!(uintptr_t(arena) & ArenaMask));
+ MOZ_ASSERT(onDelayedMarkingList_);
+ nextDelayedMarkingArena_ = arena ? arena->address() >> ArenaShift : 0;
+ }
+
+ bool hasDelayedMarking(MarkColor color) const {
+ MOZ_ASSERT(onDelayedMarkingList_);
+ return color == MarkColor::Black ? hasDelayedBlackMarking_
+ : hasDelayedGrayMarking_;
+ }
+
+ bool hasAnyDelayedMarking() const {
+ MOZ_ASSERT(onDelayedMarkingList_);
+ return hasDelayedBlackMarking_ || hasDelayedGrayMarking_;
+ }
+
+ void setHasDelayedMarking(MarkColor color, bool value) {
+ MOZ_ASSERT(onDelayedMarkingList_);
+ if (color == MarkColor::Black) {
+ hasDelayedBlackMarking_ = value;
+ } else {
+ hasDelayedGrayMarking_ = value;
+ }
+ }
+
+ void clearDelayedMarkingState() {
+ MOZ_ASSERT(onDelayedMarkingList_);
+ onDelayedMarkingList_ = 0;
+ hasDelayedBlackMarking_ = 0;
+ hasDelayedGrayMarking_ = 0;
+ nextDelayedMarkingArena_ = 0;
+ }
+
+ inline ArenaCellSet*& bufferedCells();
+ inline size_t& atomBitmapStart();
+
+ template <typename T>
+ size_t finalize(JSFreeOp* fop, AllocKind thingKind, size_t thingSize);
+
+ static void staticAsserts();
+ static void checkLookupTables();
+
+ void unmarkAll();
+ void unmarkPreMarkedFreeCells();
+
+ void arenaAllocatedDuringGC();
+
+#ifdef DEBUG
+ void checkNoMarkedFreeCells();
+ void checkAllCellsMarkedBlack();
+#endif
+
+#if defined(DEBUG) || defined(JS_GC_ZEAL)
+ void checkNoMarkedCells();
+#endif
+};
+
+static_assert(ArenaZoneOffset == offsetof(Arena, zone),
+ "The hardcoded API zone offset must match the actual offset.");
+
+static_assert(sizeof(Arena) == ArenaSize,
+ "ArenaSize must match the actual size of the Arena structure.");
+
+static_assert(
+ offsetof(Arena, data) == ArenaHeaderSize,
+ "ArenaHeaderSize must match the actual size of the header fields.");
+
+inline Arena* FreeSpan::getArena() {
+ Arena* arena = getArenaUnchecked();
+ arena->checkAddress();
+ return arena;
+}
+
+inline void FreeSpan::checkSpan(const Arena* arena) const {
+#ifdef DEBUG
+ if (!first) {
+ MOZ_ASSERT(!first && !last);
+ return;
+ }
+
+ arena->checkAddress();
+ checkRange(first, last, arena);
+
+ // If there's a following span, it must have a higher address,
+ // and the gap must be at least 2 * thingSize.
+ const FreeSpan* next = nextSpanUnchecked(arena);
+ if (next->first) {
+ checkRange(next->first, next->last, arena);
+ size_t thingSize = arena->getThingSize();
+ MOZ_ASSERT(last + 2 * thingSize <= next->first);
+ }
+#endif
+}
+
+inline void FreeSpan::checkRange(uintptr_t first, uintptr_t last,
+ const Arena* arena) const {
+#ifdef DEBUG
+ MOZ_ASSERT(arena);
+ MOZ_ASSERT(first <= last);
+ AllocKind thingKind = arena->getAllocKind();
+ MOZ_ASSERT(first >= Arena::firstThingOffset(thingKind));
+ MOZ_ASSERT(last <= Arena::lastThingOffset(thingKind));
+ MOZ_ASSERT((last - first) % Arena::thingSize(thingKind) == 0);
+#endif
+}
+
+// Mark bitmap API:
+
+MOZ_ALWAYS_INLINE bool MarkBitmap::markBit(const TenuredCell* cell,
+ ColorBit colorBit) {
+ MarkBitmapWord* word;
+ uintptr_t mask;
+ getMarkWordAndMask(cell, colorBit, &word, &mask);
+ return *word & mask;
+}
+
+MOZ_ALWAYS_INLINE bool MarkBitmap::isMarkedAny(const TenuredCell* cell) {
+ return markBit(cell, ColorBit::BlackBit) ||
+ markBit(cell, ColorBit::GrayOrBlackBit);
+}
+
+MOZ_ALWAYS_INLINE bool MarkBitmap::isMarkedBlack(const TenuredCell* cell) {
+ return markBit(cell, ColorBit::BlackBit);
+}
+
+MOZ_ALWAYS_INLINE bool MarkBitmap::isMarkedGray(const TenuredCell* cell) {
+ return !markBit(cell, ColorBit::BlackBit) &&
+ markBit(cell, ColorBit::GrayOrBlackBit);
+}
+
+// The return value indicates if the cell went from unmarked to marked.
+MOZ_ALWAYS_INLINE bool MarkBitmap::markIfUnmarked(const TenuredCell* cell,
+ MarkColor color) {
+ MarkBitmapWord* word;
+ uintptr_t mask;
+ getMarkWordAndMask(cell, ColorBit::BlackBit, &word, &mask);
+ if (*word & mask) {
+ return false;
+ }
+ if (color == MarkColor::Black) {
+ *word |= mask;
+ } else {
+ /*
+ * We use getMarkWordAndMask to recalculate both mask and word as
+ * doing just mask << color may overflow the mask.
+ */
+ getMarkWordAndMask(cell, ColorBit::GrayOrBlackBit, &word, &mask);
+ if (*word & mask) {
+ return false;
+ }
+ *word |= mask;
+ }
+ return true;
+}
+
+MOZ_ALWAYS_INLINE void MarkBitmap::markBlack(const TenuredCell* cell) {
+ MarkBitmapWord* word;
+ uintptr_t mask;
+ getMarkWordAndMask(cell, ColorBit::BlackBit, &word, &mask);
+ *word |= mask;
+}
+
+MOZ_ALWAYS_INLINE void MarkBitmap::copyMarkBit(TenuredCell* dst,
+ const TenuredCell* src,
+ ColorBit colorBit) {
+ TenuredChunkBase* srcChunk = detail::GetCellChunkBase(src);
+ MarkBitmapWord* srcWord;
+ uintptr_t srcMask;
+ srcChunk->markBits.getMarkWordAndMask(src, colorBit, &srcWord, &srcMask);
+
+ MarkBitmapWord* dstWord;
+ uintptr_t dstMask;
+ getMarkWordAndMask(dst, colorBit, &dstWord, &dstMask);
+
+ *dstWord &= ~dstMask;
+ if (*srcWord & srcMask) {
+ *dstWord |= dstMask;
+ }
+}
+
+MOZ_ALWAYS_INLINE void MarkBitmap::unmark(const TenuredCell* cell) {
+ MarkBitmapWord* word;
+ uintptr_t mask;
+ getMarkWordAndMask(cell, ColorBit::BlackBit, &word, &mask);
+ *word &= ~mask;
+ getMarkWordAndMask(cell, ColorBit::GrayOrBlackBit, &word, &mask);
+ *word &= ~mask;
+}
+
+inline void MarkBitmap::clear() {
+ for (size_t i = 0; i < MarkBitmap::WordCount; i++) {
+ bitmap[i] = 0;
+ }
+}
+
+inline MarkBitmapWord* MarkBitmap::arenaBits(Arena* arena) {
+ static_assert(
+ ArenaBitmapBits == ArenaBitmapWords * JS_BITS_PER_WORD,
+ "We assume that the part of the bitmap corresponding to the arena "
+ "has the exact number of words so we do not need to deal with a word "
+ "that covers bits from two arenas.");
+
+ MarkBitmapWord* word;
+ uintptr_t unused;
+ getMarkWordAndMask(reinterpret_cast<TenuredCell*>(arena->address()),
+ ColorBit::BlackBit, &word, &unused);
+ return word;
+}
+
+/*
+ * A chunk in the tenured heap. TenuredChunks contain arenas and associated data
+ * structures (mark bitmap, delayed marking state).
+ */
+class TenuredChunk : public TenuredChunkBase {
+ Arena arenas[ArenasPerChunk];
+
+ friend class GCRuntime;
+ friend class MarkingValidator;
+
+ public:
+ static TenuredChunk* fromAddress(uintptr_t addr) {
+ addr &= ~ChunkMask;
+ return reinterpret_cast<TenuredChunk*>(addr);
+ }
+
+ static bool withinValidRange(uintptr_t addr) {
+ uintptr_t offset = addr & ChunkMask;
+ if (TenuredChunk::fromAddress(addr)->isNurseryChunk()) {
+ return offset >= sizeof(ChunkBase) && offset < ChunkSize;
+ }
+ return offset >= offsetof(TenuredChunk, arenas) && offset < ChunkSize;
+ }
+
+ static size_t arenaIndex(uintptr_t addr) {
+ MOZ_ASSERT(!TenuredChunk::fromAddress(addr)->isNurseryChunk());
+ MOZ_ASSERT(withinValidRange(addr));
+ uintptr_t offset = addr & ChunkMask;
+ return (offset - offsetof(TenuredChunk, arenas)) >> ArenaShift;
+ }
+
+ explicit TenuredChunk(JSRuntime* runtime) : TenuredChunkBase(runtime) {}
+
+ uintptr_t address() const {
+ uintptr_t addr = reinterpret_cast<uintptr_t>(this);
+ MOZ_ASSERT(!(addr & ChunkMask));
+ return addr;
+ }
+
+ bool unused() const { return info.numArenasFree == ArenasPerChunk; }
+
+ bool hasAvailableArenas() const { return info.numArenasFree != 0; }
+
+ bool isNurseryChunk() const { return storeBuffer; }
+
+ Arena* allocateArena(GCRuntime* gc, JS::Zone* zone, AllocKind kind,
+ const AutoLockGC& lock);
+
+ void releaseArena(GCRuntime* gc, Arena* arena, const AutoLockGC& lock);
+ void recycleArena(Arena* arena, SortedArenaList& dest, size_t thingsPerArena);
+
+ MOZ_MUST_USE bool decommitOneFreeArena(GCRuntime* gc, AutoLockGC& lock);
+ void decommitAllArenas();
+
+ // This will decommit each unused not-already decommitted arena. It performs a
+ // system call for each arena but is only used during OOM.
+ void decommitFreeArenasWithoutUnlocking(const AutoLockGC& lock);
+
+ static TenuredChunk* allocate(GCRuntime* gc);
+ void init(GCRuntime* gc);
+
+ /* Unlink and return the freeArenasHead. */
+ Arena* fetchNextFreeArena(GCRuntime* gc);
+
+ private:
+ /* Search for a decommitted arena to allocate. */
+ unsigned findDecommittedArenaOffset();
+ Arena* fetchNextDecommittedArena();
+
+ void addArenaToFreeList(GCRuntime* gc, Arena* arena);
+ void addArenaToDecommittedList(const Arena* arena);
+
+ void updateChunkListAfterAlloc(GCRuntime* gc, const AutoLockGC& lock);
+ void updateChunkListAfterFree(GCRuntime* gc, const AutoLockGC& lock);
+};
+
+inline void Arena::checkAddress() const {
+ mozilla::DebugOnly<uintptr_t> addr = uintptr_t(this);
+ MOZ_ASSERT(addr);
+ MOZ_ASSERT(!(addr & ArenaMask));
+ MOZ_ASSERT(TenuredChunk::withinValidRange(addr));
+}
+
+inline TenuredChunk* Arena::chunk() const {
+ return TenuredChunk::fromAddress(address());
+}
+
+inline bool InFreeList(Arena* arena, void* thing) {
+ uintptr_t addr = reinterpret_cast<uintptr_t>(thing);
+ MOZ_ASSERT(Arena::isAligned(addr, arena->getThingSize()));
+ return arena->inFreeList(addr);
+}
+
+static const int32_t ChunkStoreBufferOffsetFromLastByte =
+ int32_t(gc::ChunkStoreBufferOffset) - int32_t(gc::ChunkMask);
+
+// Cell header stored before all nursery cells.
+struct alignas(gc::CellAlignBytes) NurseryCellHeader {
+ // Store zone pointer with the trace kind in the lowest three bits.
+ const uintptr_t zoneAndTraceKind;
+
+ // We only need to store a subset of trace kinds so this doesn't cover the
+ // full range.
+ static const uintptr_t TraceKindMask = 3;
+
+ static uintptr_t MakeValue(JS::Zone* const zone, JS::TraceKind kind) {
+ MOZ_ASSERT(uintptr_t(kind) < TraceKindMask);
+ MOZ_ASSERT((uintptr_t(zone) & TraceKindMask) == 0);
+ return uintptr_t(zone) | uintptr_t(kind);
+ }
+
+ NurseryCellHeader(JS::Zone* const zone, JS::TraceKind kind)
+ : zoneAndTraceKind(MakeValue(zone, kind)) {}
+
+ JS::Zone* zone() const {
+ return reinterpret_cast<JS::Zone*>(zoneAndTraceKind & ~TraceKindMask);
+ }
+
+ JS::TraceKind traceKind() const {
+ return JS::TraceKind(zoneAndTraceKind & TraceKindMask);
+ }
+
+ static const NurseryCellHeader* from(const Cell* cell) {
+ MOZ_ASSERT(IsInsideNursery(cell));
+ return reinterpret_cast<const NurseryCellHeader*>(
+ uintptr_t(cell) - sizeof(NurseryCellHeader));
+ }
+};
+
+static_assert(uintptr_t(JS::TraceKind::Object) <=
+ NurseryCellHeader::TraceKindMask);
+static_assert(uintptr_t(JS::TraceKind::String) <=
+ NurseryCellHeader::TraceKindMask);
+static_assert(uintptr_t(JS::TraceKind::BigInt) <=
+ NurseryCellHeader::TraceKindMask);
+
+} /* namespace gc */
+
+namespace debug {
+
+// Utility functions meant to be called from an interactive debugger.
+enum class MarkInfo : int {
+ BLACK = 0,
+ GRAY = 1,
+ UNMARKED = -1,
+ NURSERY = -2,
+};
+
+// Get the mark color for a cell, in a way easily usable from a debugger.
+MOZ_NEVER_INLINE MarkInfo GetMarkInfo(js::gc::Cell* cell);
+
+// Sample usage from gdb:
+//
+// (gdb) p $word = js::debug::GetMarkWordAddress(obj)
+// $1 = (uintptr_t *) 0x7fa56d5fe360
+// (gdb) p/x $mask = js::debug::GetMarkMask(obj, js::gc::GRAY)
+// $2 = 0x200000000
+// (gdb) watch *$word
+// Hardware watchpoint 7: *$word
+// (gdb) cond 7 *$word & $mask
+// (gdb) cont
+//
+// Note that this is *not* a watchpoint on a single bit. It is a watchpoint on
+// the whole word, which will trigger whenever the word changes and the
+// selected bit is set after the change.
+//
+// So if the bit changing is the desired one, this is exactly what you want.
+// But if a different bit changes (either set or cleared), you may still stop
+// execution if the $mask bit happened to already be set. gdb does not expose
+// enough information to restrict the watchpoint to just a single bit.
+
+// Return the address of the word containing the mark bits for the given cell,
+// or nullptr if the cell is in the nursery.
+MOZ_NEVER_INLINE uintptr_t* GetMarkWordAddress(js::gc::Cell* cell);
+
+// Return the mask for the given cell and color bit, or 0 if the cell is in the
+// nursery.
+MOZ_NEVER_INLINE uintptr_t GetMarkMask(js::gc::Cell* cell, uint32_t colorBit);
+
+} /* namespace debug */
+} /* namespace js */
+
+#endif /* gc_Heap_h */
diff --git a/js/src/gc/IteratorUtils.h b/js/src/gc/IteratorUtils.h
new file mode 100644
index 0000000000..614fd12100
--- /dev/null
+++ b/js/src/gc/IteratorUtils.h
@@ -0,0 +1,121 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef gc_IteratorUtils_h
+#define gc_IteratorUtils_h
+
+#include "mozilla/Array.h"
+#include "mozilla/Maybe.h"
+
+#include <initializer_list>
+
+namespace js {
+
+/*
+ * Create an iterator that yields the values from IteratorB(a) for all a in
+ * IteratorA(). Equivalent to nested for loops over IteratorA and IteratorB
+ * where IteratorB is constructed with a value from IteratorA.
+ */
+template <typename IteratorA, typename IteratorB>
+class NestedIterator {
+ using T = decltype(std::declval<IteratorB>().get());
+
+ IteratorA a;
+ mozilla::Maybe<IteratorB> b;
+
+ public:
+ template <typename... Args>
+ explicit NestedIterator(Args&&... args) : a(std::forward<Args>(args)...) {
+ settle();
+ }
+
+ bool done() const { return b.isNothing(); }
+
+ T get() const {
+ MOZ_ASSERT(!done());
+ return b.ref().get();
+ }
+
+ void next() {
+ MOZ_ASSERT(!done());
+ b->next();
+ if (b->done()) {
+ b.reset();
+ a.next();
+ settle();
+ }
+ }
+
+ const IteratorB& ref() const { return *b; }
+
+ operator T() const { return get(); }
+
+ T operator->() const { return get(); }
+
+ private:
+ void settle() {
+ MOZ_ASSERT(b.isNothing());
+ while (!a.done()) {
+ b.emplace(a.get());
+ if (!b->done()) {
+ break;
+ }
+ b.reset();
+ a.next();
+ }
+ }
+};
+
+/*
+ * An iterator the yields values from each of N of instances of Iterator in
+ * sequence.
+ */
+template <typename Iterator, size_t N>
+class ChainedIterator {
+ using T = decltype(std::declval<Iterator>().get());
+
+ mozilla::Array<Iterator, N> iterators;
+ size_t index = 0;
+
+ public:
+ template <typename... Args>
+ MOZ_IMPLICIT ChainedIterator(Args&&... args)
+ : iterators(Iterator(std::forward<Args>(args))...) {
+ static_assert(N > 1);
+ settle();
+ }
+
+ bool done() const { return index == N; }
+
+ void next() {
+ MOZ_ASSERT(!done());
+ iterators[index].next();
+ settle();
+ }
+
+ T get() const {
+ MOZ_ASSERT(!done());
+ return iterators[index].get();
+ }
+
+ operator T() const { return get(); }
+ T operator->() const { return get(); }
+
+ private:
+ void settle() {
+ MOZ_ASSERT(!done());
+ while (iterators[index].done()) {
+ index++;
+ if (done()) {
+ break;
+ }
+ }
+ }
+};
+
+} /* namespace js */
+
+#endif // gc_IteratorUtils_h
diff --git a/js/src/gc/Marking-inl.h b/js/src/gc/Marking-inl.h
new file mode 100644
index 0000000000..e8ba1d13b3
--- /dev/null
+++ b/js/src/gc/Marking-inl.h
@@ -0,0 +1,162 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef gc_Marking_inl_h
+#define gc_Marking_inl_h
+
+#include "gc/Marking.h"
+
+#include "mozilla/Maybe.h"
+
+#include <type_traits>
+
+#include "gc/RelocationOverlay.h"
+#include "js/Id.h"
+#include "js/Value.h"
+#include "vm/TaggedProto.h"
+
+#include "gc/Nursery-inl.h"
+
+namespace js {
+namespace gc {
+
+// An abstraction to re-wrap any kind of typed pointer back to the tagged
+// pointer it came from with |TaggedPtr<TargetType>::wrap(sourcePtr)|.
+template <typename T>
+struct TaggedPtr {};
+
+template <>
+struct TaggedPtr<JS::Value> {
+ static JS::Value wrap(JSObject* obj) { return JS::ObjectOrNullValue(obj); }
+ static JS::Value wrap(JSString* str) { return JS::StringValue(str); }
+ static JS::Value wrap(JS::Symbol* sym) { return JS::SymbolValue(sym); }
+ static JS::Value wrap(JS::BigInt* bi) { return JS::BigIntValue(bi); }
+ template <typename T>
+ static JS::Value wrap(T* priv) {
+ static_assert(std::is_base_of_v<Cell, T>,
+ "Type must be a GC thing derived from js::gc::Cell");
+ return JS::PrivateGCThingValue(priv);
+ }
+ static JS::Value empty() { return JS::UndefinedValue(); }
+};
+
+template <>
+struct TaggedPtr<jsid> {
+ static jsid wrap(JSString* str) {
+ return JS::PropertyKey::fromNonIntAtom(str);
+ }
+ static jsid wrap(JS::Symbol* sym) { return SYMBOL_TO_JSID(sym); }
+ static jsid empty() { return JSID_VOID; }
+};
+
+template <>
+struct TaggedPtr<TaggedProto> {
+ static TaggedProto wrap(JSObject* obj) { return TaggedProto(obj); }
+ static TaggedProto empty() { return TaggedProto(); }
+};
+
+template <typename T>
+struct MightBeForwarded {
+ static_assert(std::is_base_of_v<Cell, T>);
+ static_assert(!std::is_same_v<Cell, T> && !std::is_same_v<TenuredCell, T>);
+
+#define CAN_FORWARD_KIND_OR(_1, _2, Type, _3, _4, _5, canCompact) \
+ (std::is_base_of_v<Type, T> && canCompact) ||
+
+ static constexpr bool value = FOR_EACH_ALLOCKIND(CAN_FORWARD_KIND_OR) true;
+#undef CAN_FORWARD_KIND_OR
+};
+
+template <typename T>
+inline bool IsForwarded(const T* t) {
+ if (!MightBeForwarded<T>::value) {
+ MOZ_ASSERT(!t->isForwarded());
+ return false;
+ }
+
+ return t->isForwarded();
+}
+
+template <typename T>
+inline T* Forwarded(const T* t) {
+ const RelocationOverlay* overlay = RelocationOverlay::fromCell(t);
+ MOZ_ASSERT(overlay->isForwarded());
+ return reinterpret_cast<T*>(overlay->forwardingAddress());
+}
+
+template <typename T>
+inline T MaybeForwarded(T t) {
+ if (IsForwarded(t)) {
+ t = Forwarded(t);
+ }
+ return t;
+}
+
+inline const JSClass* MaybeForwardedObjectClass(const JSObject* obj) {
+ return MaybeForwarded(obj->group())->clasp();
+}
+
+template <typename T>
+inline bool MaybeForwardedObjectIs(JSObject* obj) {
+ MOZ_ASSERT(!obj->isForwarded());
+ return MaybeForwardedObjectClass(obj) == &T::class_;
+}
+
+template <typename T>
+inline T& MaybeForwardedObjectAs(JSObject* obj) {
+ MOZ_ASSERT(MaybeForwardedObjectIs<T>(obj));
+ return *static_cast<T*>(obj);
+}
+
+inline RelocationOverlay::RelocationOverlay(Cell* dst) {
+ MOZ_ASSERT(dst->flags() == 0);
+ uintptr_t ptr = uintptr_t(dst);
+ MOZ_ASSERT((ptr & RESERVED_MASK) == 0);
+ header_ = ptr | FORWARD_BIT;
+}
+
+/* static */
+inline RelocationOverlay* RelocationOverlay::forwardCell(Cell* src, Cell* dst) {
+ MOZ_ASSERT(!src->isForwarded());
+ MOZ_ASSERT(!dst->isForwarded());
+ return new (src) RelocationOverlay(dst);
+}
+
+inline bool IsAboutToBeFinalizedDuringMinorSweep(Cell** cellp) {
+ MOZ_ASSERT(JS::RuntimeHeapIsMinorCollecting());
+
+ if ((*cellp)->isTenured()) {
+ return false;
+ }
+
+ return !Nursery::getForwardedPointer(cellp);
+}
+
+#ifdef JSGC_HASH_TABLE_CHECKS
+
+template <typename T>
+inline bool IsGCThingValidAfterMovingGC(T* t) {
+ return !IsInsideNursery(t) && !t->isForwarded();
+}
+
+template <typename T>
+inline void CheckGCThingAfterMovingGC(T* t) {
+ if (t) {
+ MOZ_RELEASE_ASSERT(IsGCThingValidAfterMovingGC(t));
+ }
+}
+
+template <typename T>
+inline void CheckGCThingAfterMovingGC(const WeakHeapPtr<T*>& t) {
+ CheckGCThingAfterMovingGC(t.unbarrieredGet());
+}
+
+#endif // JSGC_HASH_TABLE_CHECKS
+
+} /* namespace gc */
+} /* namespace js */
+
+#endif // gc_Marking_inl_h
diff --git a/js/src/gc/Marking.cpp b/js/src/gc/Marking.cpp
new file mode 100644
index 0000000000..4d7ff8e218
--- /dev/null
+++ b/js/src/gc/Marking.cpp
@@ -0,0 +1,4116 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "gc/Marking-inl.h"
+
+#include "mozilla/DebugOnly.h"
+#include "mozilla/IntegerRange.h"
+#include "mozilla/Maybe.h"
+#include "mozilla/ReentrancyGuard.h"
+#include "mozilla/ScopeExit.h"
+#include "mozilla/Unused.h"
+
+#include <algorithm>
+#include <initializer_list>
+#include <type_traits>
+
+#include "jsfriendapi.h"
+
+#include "builtin/ModuleObject.h"
+#include "debugger/DebugAPI.h"
+#include "gc/GCInternals.h"
+#include "gc/GCProbes.h"
+#include "gc/Policy.h"
+#include "jit/JitCode.h"
+#include "js/friend/DumpFunctions.h" // js::DumpObject
+#include "js/GCTypeMacros.h" // JS_FOR_EACH_PUBLIC_{,TAGGED_}GC_POINTER_TYPE
+#include "js/SliceBudget.h"
+#include "util/DiagnosticAssertions.h"
+#include "util/Memory.h"
+#include "util/Poison.h"
+#include "vm/ArgumentsObject.h"
+#include "vm/ArrayObject.h"
+#include "vm/BigIntType.h"
+#include "vm/GeneratorObject.h"
+#include "vm/RegExpShared.h"
+#include "vm/Scope.h"
+#include "vm/Shape.h"
+#include "vm/SymbolType.h"
+#include "vm/TypedArrayObject.h"
+#include "wasm/WasmJS.h"
+
+#include "gc/GC-inl.h"
+#include "gc/Nursery-inl.h"
+#include "gc/PrivateIterators-inl.h"
+#include "gc/WeakMap-inl.h"
+#include "gc/Zone-inl.h"
+#include "vm/GeckoProfiler-inl.h"
+#include "vm/JSScript-inl.h"
+#include "vm/NativeObject-inl.h"
+#include "vm/PlainObject-inl.h" // js::PlainObject
+#include "vm/Realm-inl.h"
+#include "vm/StringType-inl.h"
+
+#define MAX_DEDUPLICATABLE_STRING_LENGTH 500
+
+using namespace js;
+using namespace js::gc;
+
+using JS::MapTypeToTraceKind;
+
+using mozilla::DebugOnly;
+using mozilla::IntegerRange;
+using mozilla::PodCopy;
+
+// [SMDOC] GC Tracing
+//
+// Tracing Overview
+// ================
+//
+// Tracing, in this context, refers to an abstract visitation of some or all of
+// the GC-controlled heap. The effect of tracing an edge of the graph depends
+// on the subclass of the JSTracer on whose behalf we are tracing.
+//
+// Marking
+// -------
+//
+// The primary JSTracer is the GCMarker. The marking tracer causes the target
+// of each traversed edge to be marked black and the target edge's children to
+// be marked either gray (in the gc algorithm sense) or immediately black.
+//
+// Callback
+// --------
+//
+// The secondary JSTracer is the CallbackTracer. This simply invokes a callback
+// on each edge in a child.
+//
+// The following is a rough outline of the general struture of the tracing
+// internals.
+//
+/* clang-format off */
+//
+// +----------------------+ ...................
+// | | : :
+// | v v :
+// | TraceRoot TraceEdge TraceRange GCMarker:: :
+// | | | | processMarkStackTop +---+---+
+// | +-----------+-----------+ | | |
+// | | | | Mark |
+// | v | | Stack |
+// | TraceEdgeInternal | | |
+// | | | +---+---+
+// | | | ^
+// | +------------+---------------+ +<----------+ :
+// | | | | | :
+// | v v v | :
+// | DoCallback DoMarking traverseEdge | :
+// | | | | | :
+// | | +------+------+ | :
+// | | | | :
+// | v v | :
+// | CallbackTracer:: GCMarker::traverse | :
+// | onSomeEdge | | :
+// | | | :
+// | +-------------------+-----------+------+ | :
+// | | | | | :
+// | v v v | :
+// | markAndTraceChildren markAndPush eagerlyMarkChildren | :
+// | | : | | :
+// | v : +-----------+ :
+// | T::traceChildren : :
+// | | : :
+// +-------------+ ......................................
+//
+// Legend:
+// ------- Direct calls
+// ....... Data flow
+//
+/* clang-format on */
+
+/*** Tracing Invariants *****************************************************/
+
+#if defined(DEBUG)
+template <typename T>
+static inline bool IsThingPoisoned(T* thing) {
+ const uint8_t poisonBytes[] = {
+ JS_FRESH_NURSERY_PATTERN, JS_SWEPT_NURSERY_PATTERN,
+ JS_ALLOCATED_NURSERY_PATTERN, JS_FRESH_TENURED_PATTERN,
+ JS_MOVED_TENURED_PATTERN, JS_SWEPT_TENURED_PATTERN,
+ JS_ALLOCATED_TENURED_PATTERN, JS_FREED_HEAP_PTR_PATTERN,
+ JS_FREED_CHUNK_PATTERN, JS_FREED_ARENA_PATTERN,
+ JS_SWEPT_TI_PATTERN, JS_SWEPT_CODE_PATTERN,
+ JS_RESET_VALUE_PATTERN, JS_POISONED_JSSCRIPT_DATA_PATTERN,
+ JS_OOB_PARSE_NODE_PATTERN, JS_LIFO_UNDEFINED_PATTERN,
+ JS_LIFO_UNINITIALIZED_PATTERN,
+ };
+ const int numPoisonBytes = sizeof(poisonBytes) / sizeof(poisonBytes[0]);
+ uint32_t* p =
+ reinterpret_cast<uint32_t*>(reinterpret_cast<FreeSpan*>(thing) + 1);
+ // Note: all free patterns are odd to make the common, not-poisoned case a
+ // single test.
+ if ((*p & 1) == 0) {
+ return false;
+ }
+ for (int i = 0; i < numPoisonBytes; ++i) {
+ const uint8_t pb = poisonBytes[i];
+ const uint32_t pw = pb | (pb << 8) | (pb << 16) | (pb << 24);
+ if (*p == pw) {
+ return true;
+ }
+ }
+ return false;
+}
+#endif
+
+template <typename T>
+static inline bool IsOwnedByOtherRuntime(JSRuntime* rt, T thing) {
+ bool other = thing->runtimeFromAnyThread() != rt;
+ MOZ_ASSERT_IF(other, thing->isPermanentAndMayBeShared() ||
+ thing->zoneFromAnyThread()->isSelfHostingZone());
+ return other;
+}
+
+#ifdef DEBUG
+
+template <typename T>
+void js::CheckTracedThing(JSTracer* trc, T* thing) {
+ MOZ_ASSERT(trc);
+ MOZ_ASSERT(thing);
+
+ if (IsForwarded(thing)) {
+ MOZ_ASSERT(IsTracerKind(trc, JS::TracerKind::Moving) ||
+ trc->isTenuringTracer());
+ thing = Forwarded(thing);
+ }
+
+ /* This function uses data that's not available in the nursery. */
+ if (IsInsideNursery(thing)) {
+ return;
+ }
+
+ /*
+ * Permanent atoms and things in the self-hosting zone are not associated
+ * with this runtime, but will be ignored during marking.
+ */
+ if (IsOwnedByOtherRuntime(trc->runtime(), thing)) {
+ return;
+ }
+
+ Zone* zone = thing->zoneFromAnyThread();
+ JSRuntime* rt = trc->runtime();
+ MOZ_ASSERT(zone->runtimeFromAnyThread() == rt);
+
+ bool isGcMarkingTracer = trc->isMarkingTracer();
+ bool isUnmarkGrayTracer = IsTracerKind(trc, JS::TracerKind::UnmarkGray);
+ bool isClearEdgesTracer = IsTracerKind(trc, JS::TracerKind::ClearEdges);
+
+ if (TlsContext.get()->isMainThreadContext()) {
+ // If we're on the main thread we must have access to the runtime and zone.
+ MOZ_ASSERT(CurrentThreadCanAccessRuntime(rt));
+ MOZ_ASSERT(CurrentThreadCanAccessZone(zone));
+ } else {
+ MOZ_ASSERT(isGcMarkingTracer || isUnmarkGrayTracer || isClearEdgesTracer ||
+ IsTracerKind(trc, JS::TracerKind::Moving) ||
+ IsTracerKind(trc, JS::TracerKind::GrayBuffering) ||
+ IsTracerKind(trc, JS::TracerKind::Sweeping));
+ MOZ_ASSERT_IF(!isClearEdgesTracer, CurrentThreadIsPerformingGC());
+ }
+
+ MOZ_ASSERT(thing->isAligned());
+ MOZ_ASSERT(MapTypeToTraceKind<std::remove_pointer_t<T>>::kind ==
+ thing->getTraceKind());
+
+ if (isGcMarkingTracer) {
+ GCMarker* gcMarker = GCMarker::fromTracer(trc);
+ MOZ_ASSERT(zone->shouldMarkInZone());
+
+ MOZ_ASSERT_IF(gcMarker->shouldCheckCompartments(),
+ zone->isCollectingFromAnyThread() || zone->isAtomsZone());
+
+ MOZ_ASSERT_IF(gcMarker->markColor() == MarkColor::Gray,
+ !zone->isGCMarkingBlackOnly() || zone->isAtomsZone());
+
+ MOZ_ASSERT(!(zone->isGCSweeping() || zone->isGCFinished() ||
+ zone->isGCCompacting()));
+
+ // Check that we don't stray from the current compartment and zone without
+ // using TraceCrossCompartmentEdge.
+ Compartment* comp = thing->maybeCompartment();
+ MOZ_ASSERT_IF(gcMarker->tracingCompartment && comp,
+ gcMarker->tracingCompartment == comp);
+ MOZ_ASSERT_IF(gcMarker->tracingZone,
+ gcMarker->tracingZone == zone || zone->isAtomsZone());
+ }
+
+ /*
+ * Try to assert that the thing is allocated.
+ *
+ * We would like to assert that the thing is not in the free list, but this
+ * check is very slow. Instead we check whether the thing has been poisoned:
+ * if it has not then we assume it is allocated, but if it has then it is
+ * either free or uninitialized in which case we check the free list.
+ *
+ * Further complications are that background sweeping may be running and
+ * concurrently modifiying the free list and that tracing is done off
+ * thread during compacting GC and reading the contents of the thing by
+ * IsThingPoisoned would be racy in this case.
+ */
+ MOZ_ASSERT_IF(JS::RuntimeHeapIsBusy() && !zone->isGCSweeping() &&
+ !zone->isGCFinished() && !zone->isGCCompacting(),
+ !IsThingPoisoned(thing) ||
+ !InFreeList(thing->asTenured().arena(), thing));
+}
+
+template <typename T>
+void js::CheckTracedThing(JSTracer* trc, const T& thing) {
+ ApplyGCThingTyped(thing, [trc](auto t) { CheckTracedThing(trc, t); });
+}
+
+namespace js {
+# define IMPL_CHECK_TRACED_THING(_, type, _1, _2) \
+ template void CheckTracedThing<type>(JSTracer*, type*);
+JS_FOR_EACH_TRACEKIND(IMPL_CHECK_TRACED_THING);
+# undef IMPL_CHECK_TRACED_THING
+} // namespace js
+
+#endif
+
+static inline bool ShouldMarkCrossCompartment(GCMarker* marker, JSObject* src,
+ Cell* dstCell) {
+ MarkColor color = marker->markColor();
+
+ if (!dstCell->isTenured()) {
+ MOZ_ASSERT(color == MarkColor::Black);
+ return false;
+ }
+ TenuredCell& dst = dstCell->asTenured();
+
+ JS::Zone* dstZone = dst.zone();
+ if (!src->zone()->isGCMarking() && !dstZone->isGCMarking()) {
+ return false;
+ }
+
+ if (color == MarkColor::Black) {
+ // Check our sweep groups are correct: we should never have to
+ // mark something in a zone that we have started sweeping.
+ MOZ_ASSERT_IF(!dst.isMarkedBlack(), !dstZone->isGCSweeping());
+
+ /*
+ * Having black->gray edges violates our promise to the cycle collector so
+ * we ensure that gray things we encounter when marking black end up getting
+ * marked black.
+ *
+ * This can happen for two reasons:
+ *
+ * 1) If we're collecting a compartment and it has an edge to an uncollected
+ * compartment it's possible that the source and destination of the
+ * cross-compartment edge should be gray, but the source was marked black by
+ * the write barrier.
+ *
+ * 2) If we yield during gray marking and the write barrier marks a gray
+ * thing black.
+ *
+ * We handle the first case before returning whereas the second case happens
+ * as part of normal marking.
+ */
+ if (dst.isMarkedGray() && !dstZone->isGCMarking()) {
+ UnmarkGrayGCThingUnchecked(marker->runtime(),
+ JS::GCCellPtr(&dst, dst.getTraceKind()));
+ return false;
+ }
+
+ return dstZone->isGCMarking();
+ } else {
+ // Check our sweep groups are correct as above.
+ MOZ_ASSERT_IF(!dst.isMarkedAny(), !dstZone->isGCSweeping());
+
+ if (dstZone->isGCMarkingBlackOnly()) {
+ /*
+ * The destination compartment is being not being marked gray now,
+ * but it will be later, so record the cell so it can be marked gray
+ * at the appropriate time.
+ */
+ if (!dst.isMarkedAny()) {
+ DelayCrossCompartmentGrayMarking(src);
+ }
+ return false;
+ }
+
+ return dstZone->isGCMarkingBlackAndGray();
+ }
+}
+
+static bool ShouldTraceCrossCompartment(JSTracer* trc, JSObject* src,
+ Cell* dstCell) {
+ if (!trc->isMarkingTracer()) {
+ return true;
+ }
+
+ return ShouldMarkCrossCompartment(GCMarker::fromTracer(trc), src, dstCell);
+}
+
+static bool ShouldTraceCrossCompartment(JSTracer* trc, JSObject* src,
+ const Value& val) {
+ return val.isGCThing() &&
+ ShouldTraceCrossCompartment(trc, src, val.toGCThing());
+}
+
+static void AssertShouldMarkInZone(Cell* thing) {
+ MOZ_ASSERT(thing->asTenured().zone()->shouldMarkInZone());
+}
+
+static void AssertShouldMarkInZone(JSString* str) {
+#ifdef DEBUG
+ Zone* zone = str->zone();
+ MOZ_ASSERT(zone->shouldMarkInZone() || zone->isAtomsZone());
+#endif
+}
+
+static void AssertShouldMarkInZone(JS::Symbol* sym) {
+#ifdef DEBUG
+ Zone* zone = sym->asTenured().zone();
+ MOZ_ASSERT(zone->shouldMarkInZone() || zone->isAtomsZone());
+#endif
+}
+
+#ifdef DEBUG
+void js::gc::AssertRootMarkingPhase(JSTracer* trc) {
+ MOZ_ASSERT_IF(trc->isMarkingTracer(),
+ trc->runtime()->gc.state() == State::NotActive ||
+ trc->runtime()->gc.state() == State::MarkRoots);
+}
+#endif
+
+/*** Tracing Interface ******************************************************/
+
+template <typename T>
+bool DoCallback(GenericTracer* trc, T** thingp, const char* name);
+template <typename T>
+bool DoCallback(GenericTracer* trc, T* thingp, const char* name);
+template <typename T>
+void DoMarking(GCMarker* gcmarker, T* thing);
+template <typename T>
+void DoMarking(GCMarker* gcmarker, const T& thing);
+
+template <typename T>
+static void TraceExternalEdgeHelper(JSTracer* trc, T* thingp,
+ const char* name) {
+ MOZ_ASSERT(InternalBarrierMethods<T>::isMarkable(*thingp));
+ TraceEdgeInternal(trc, ConvertToBase(thingp), name);
+}
+
+JS_PUBLIC_API void js::UnsafeTraceManuallyBarrieredEdge(JSTracer* trc,
+ JSObject** thingp,
+ const char* name) {
+ TraceEdgeInternal(trc, ConvertToBase(thingp), name);
+}
+
+template <typename T>
+static void UnsafeTraceRootHelper(JSTracer* trc, T* thingp, const char* name) {
+ MOZ_ASSERT(thingp);
+ js::TraceNullableRoot(trc, thingp, name);
+}
+
+namespace js {
+class AbstractGeneratorObject;
+class SavedFrame;
+} // namespace js
+
+#define DEFINE_TRACE_EXTERNAL_EDGE_FUNCTION(type) \
+ JS_PUBLIC_API void js::gc::TraceExternalEdge(JSTracer* trc, type* thingp, \
+ const char* name) { \
+ TraceExternalEdgeHelper(trc, thingp, name); \
+ }
+
+// Define TraceExternalEdge for each public GC pointer type.
+JS_FOR_EACH_PUBLIC_GC_POINTER_TYPE(DEFINE_TRACE_EXTERNAL_EDGE_FUNCTION)
+JS_FOR_EACH_PUBLIC_TAGGED_GC_POINTER_TYPE(DEFINE_TRACE_EXTERNAL_EDGE_FUNCTION)
+
+#undef DEFINE_TRACE_EXTERNAL_EDGE_FUNCTION
+
+#define DEFINE_UNSAFE_TRACE_ROOT_FUNCTION(type) \
+ JS_PUBLIC_API void JS::UnsafeTraceRoot(JSTracer* trc, type* thingp, \
+ const char* name) { \
+ UnsafeTraceRootHelper(trc, thingp, name); \
+ }
+
+// Define UnsafeTraceRoot for each public GC pointer type.
+JS_FOR_EACH_PUBLIC_GC_POINTER_TYPE(DEFINE_UNSAFE_TRACE_ROOT_FUNCTION)
+JS_FOR_EACH_PUBLIC_TAGGED_GC_POINTER_TYPE(DEFINE_UNSAFE_TRACE_ROOT_FUNCTION)
+
+// Also, for the moment, define UnsafeTraceRoot for internal GC pointer types.
+DEFINE_UNSAFE_TRACE_ROOT_FUNCTION(AbstractGeneratorObject*)
+DEFINE_UNSAFE_TRACE_ROOT_FUNCTION(SavedFrame*)
+
+#undef DEFINE_UNSAFE_TRACE_ROOT_FUNCTION
+
+namespace js {
+namespace gc {
+
+#define INSTANTIATE_INTERNAL_TRACE_FUNCTIONS(type) \
+ template bool TraceEdgeInternal<type>(JSTracer*, type*, const char*); \
+ template void TraceRangeInternal<type>(JSTracer*, size_t len, type*, \
+ const char*);
+
+#define INSTANTIATE_INTERNAL_TRACE_FUNCTIONS_FROM_TRACEKIND(_1, type, _2, _3) \
+ INSTANTIATE_INTERNAL_TRACE_FUNCTIONS(type*)
+
+JS_FOR_EACH_TRACEKIND(INSTANTIATE_INTERNAL_TRACE_FUNCTIONS_FROM_TRACEKIND)
+JS_FOR_EACH_PUBLIC_TAGGED_GC_POINTER_TYPE(INSTANTIATE_INTERNAL_TRACE_FUNCTIONS)
+
+#undef INSTANTIATE_INTERNAL_TRACE_FUNCTIONS_FROM_TRACEKIND
+#undef INSTANTIATE_INTERNAL_TRACE_FUNCTIONS
+
+} // namespace gc
+} // namespace js
+
+// In debug builds, makes a note of the current compartment before calling a
+// trace hook or traceChildren() method on a GC thing.
+class MOZ_RAII AutoSetTracingSource {
+#ifdef DEBUG
+ GCMarker* marker = nullptr;
+#endif
+
+ public:
+ template <typename T>
+ AutoSetTracingSource(JSTracer* trc, T* thing) {
+#ifdef DEBUG
+ if (trc->isMarkingTracer() && thing) {
+ marker = GCMarker::fromTracer(trc);
+ MOZ_ASSERT(!marker->tracingZone);
+ marker->tracingZone = thing->asTenured().zone();
+ MOZ_ASSERT(!marker->tracingCompartment);
+ marker->tracingCompartment = thing->maybeCompartment();
+ }
+#endif
+ }
+
+ ~AutoSetTracingSource() {
+#ifdef DEBUG
+ if (marker) {
+ marker->tracingZone = nullptr;
+ marker->tracingCompartment = nullptr;
+ }
+#endif
+ }
+};
+
+// In debug builds, clear the trace hook compartment. This happens
+// after the trace hook has called back into one of our trace APIs and we've
+// checked the traced thing.
+class MOZ_RAII AutoClearTracingSource {
+#ifdef DEBUG
+ GCMarker* marker = nullptr;
+ JS::Zone* prevZone = nullptr;
+ Compartment* prevCompartment = nullptr;
+#endif
+
+ public:
+ explicit AutoClearTracingSource(JSTracer* trc) {
+#ifdef DEBUG
+ if (trc->isMarkingTracer()) {
+ marker = GCMarker::fromTracer(trc);
+ prevZone = marker->tracingZone;
+ marker->tracingZone = nullptr;
+ prevCompartment = marker->tracingCompartment;
+ marker->tracingCompartment = nullptr;
+ }
+#endif
+ }
+
+ ~AutoClearTracingSource() {
+#ifdef DEBUG
+ if (marker) {
+ marker->tracingZone = prevZone;
+ marker->tracingCompartment = prevCompartment;
+ }
+#endif
+ }
+};
+
+template <typename T>
+void js::TraceManuallyBarrieredCrossCompartmentEdge(JSTracer* trc,
+ JSObject* src, T* dst,
+ const char* name) {
+ // Clear expected compartment for cross-compartment edge.
+ AutoClearTracingSource acts(trc);
+
+ if (ShouldTraceCrossCompartment(trc, src, *dst)) {
+ TraceEdgeInternal(trc, dst, name);
+ }
+}
+template void js::TraceManuallyBarrieredCrossCompartmentEdge<Value>(
+ JSTracer*, JSObject*, Value*, const char*);
+template void js::TraceManuallyBarrieredCrossCompartmentEdge<JSObject*>(
+ JSTracer*, JSObject*, JSObject**, const char*);
+template void js::TraceManuallyBarrieredCrossCompartmentEdge<BaseScript*>(
+ JSTracer*, JSObject*, BaseScript**, const char*);
+
+template <typename T>
+void js::TraceWeakMapKeyEdgeInternal(JSTracer* trc, Zone* weakMapZone,
+ T** thingp, const char* name) {
+ // We can't use ShouldTraceCrossCompartment here because that assumes the
+ // source of the edge is a CCW object which could be used to delay gray
+ // marking. Instead, assert that the weak map zone is in the same marking
+ // state as the target thing's zone and therefore we can go ahead and mark it.
+#ifdef DEBUG
+ auto thing = *thingp;
+ if (trc->isMarkingTracer()) {
+ MOZ_ASSERT(weakMapZone->isGCMarking());
+ MOZ_ASSERT(weakMapZone->gcState() == thing->zone()->gcState());
+ }
+#endif
+
+ // Clear expected compartment for cross-compartment edge.
+ AutoClearTracingSource acts(trc);
+
+ TraceEdgeInternal(trc, thingp, name);
+}
+
+template void js::TraceWeakMapKeyEdgeInternal<JSObject>(JSTracer*, Zone*,
+ JSObject**,
+ const char*);
+template void js::TraceWeakMapKeyEdgeInternal<BaseScript>(JSTracer*, Zone*,
+ BaseScript**,
+ const char*);
+
+template <typename T>
+void js::TraceProcessGlobalRoot(JSTracer* trc, T* thing, const char* name) {
+ AssertRootMarkingPhase(trc);
+ MOZ_ASSERT(thing->isPermanentAndMayBeShared());
+
+ // We have to mark permanent atoms and well-known symbols through a special
+ // method because the default DoMarking implementation automatically skips
+ // them. Fortunately, atoms (permanent and non) cannot refer to other GC
+ // things so they do not need to go through the mark stack and may simply
+ // be marked directly. Moreover, well-known symbols can refer only to
+ // permanent atoms, so likewise require no subsquent marking.
+ CheckTracedThing(trc, *ConvertToBase(&thing));
+ AutoClearTracingSource acts(trc);
+ if (trc->isMarkingTracer()) {
+ thing->asTenured().markIfUnmarked(gc::MarkColor::Black);
+ } else {
+ DoCallback(trc->asCallbackTracer(), ConvertToBase(&thing), name);
+ }
+}
+template void js::TraceProcessGlobalRoot<JSAtom>(JSTracer*, JSAtom*,
+ const char*);
+template void js::TraceProcessGlobalRoot<JS::Symbol>(JSTracer*, JS::Symbol*,
+ const char*);
+
+static Cell* TraceGenericPointerRootAndType(JSTracer* trc, Cell* thing,
+ JS::TraceKind kind,
+ const char* name) {
+ return MapGCThingTyped(thing, kind, [trc, name](auto t) -> Cell* {
+ TraceRoot(trc, &t, name);
+ return t;
+ });
+}
+
+void js::TraceGenericPointerRoot(JSTracer* trc, Cell** thingp,
+ const char* name) {
+ MOZ_ASSERT(thingp);
+ Cell* thing = *thingp;
+ if (!thing) {
+ return;
+ }
+
+ Cell* traced =
+ TraceGenericPointerRootAndType(trc, thing, thing->getTraceKind(), name);
+ if (traced != thing) {
+ *thingp = traced;
+ }
+}
+
+void js::TraceManuallyBarrieredGenericPointerEdge(JSTracer* trc, Cell** thingp,
+ const char* name) {
+ MOZ_ASSERT(thingp);
+ Cell* thing = *thingp;
+ if (!*thingp) {
+ return;
+ }
+
+ auto traced = MapGCThingTyped(thing, thing->getTraceKind(),
+ [trc, name](auto t) -> Cell* {
+ TraceManuallyBarrieredEdge(trc, &t, name);
+ return t;
+ });
+ if (traced != thing) {
+ *thingp = traced;
+ }
+}
+
+void js::TraceGCCellPtrRoot(JSTracer* trc, JS::GCCellPtr* thingp,
+ const char* name) {
+ Cell* thing = thingp->asCell();
+ if (!thing) {
+ return;
+ }
+
+ Cell* traced =
+ TraceGenericPointerRootAndType(trc, thing, thingp->kind(), name);
+
+ if (!traced) {
+ *thingp = JS::GCCellPtr();
+ } else if (traced != thingp->asCell()) {
+ *thingp = JS::GCCellPtr(traced, thingp->kind());
+ }
+}
+
+// This method is responsible for dynamic dispatch to the real tracer
+// implementation. Consider replacing this choke point with virtual dispatch:
+// a sufficiently smart C++ compiler may be able to devirtualize some paths.
+template <typename T>
+bool js::gc::TraceEdgeInternal(JSTracer* trc, T* thingp, const char* name) {
+#define IS_SAME_TYPE_OR(name, type, _, _1) std::is_same_v<type*, T> ||
+ static_assert(JS_FOR_EACH_TRACEKIND(IS_SAME_TYPE_OR)
+ std::is_same_v<T, JS::Value> ||
+ std::is_same_v<T, jsid> || std::is_same_v<T, TaggedProto>,
+ "Only the base cell layout types are allowed into "
+ "marking/tracing internals");
+#undef IS_SAME_TYPE_OR
+
+ if (trc->isMarkingTracer()) {
+ DoMarking(GCMarker::fromTracer(trc), *thingp);
+ return true;
+ }
+
+ MOZ_ASSERT(trc->isGenericTracer());
+ return DoCallback(trc->asGenericTracer(), thingp, name);
+}
+
+template <typename T>
+void js::gc::TraceRangeInternal(JSTracer* trc, size_t len, T* vec,
+ const char* name) {
+ JS::AutoTracingIndex index(trc);
+ for (auto i : IntegerRange(len)) {
+ if (InternalBarrierMethods<T>::isMarkable(vec[i])) {
+ TraceEdgeInternal(trc, &vec[i], name);
+ }
+ ++index;
+ }
+}
+
+/*** GC Marking Interface ***************************************************/
+
+namespace js {
+
+using HasNoImplicitEdgesType = bool;
+
+template <typename T>
+struct ImplicitEdgeHolderType {
+ using Type = HasNoImplicitEdgesType;
+};
+
+// For now, we only handle JSObject* and BaseScript* keys, but the linear time
+// algorithm can be easily extended by adding in more types here, then making
+// GCMarker::traverse<T> call markImplicitEdges.
+template <>
+struct ImplicitEdgeHolderType<JSObject*> {
+ using Type = JSObject*;
+};
+
+template <>
+struct ImplicitEdgeHolderType<BaseScript*> {
+ using Type = BaseScript*;
+};
+
+void GCMarker::markEphemeronValues(gc::Cell* markedCell,
+ WeakEntryVector& values) {
+ DebugOnly<size_t> initialLen = values.length();
+
+ for (const auto& markable : values) {
+ markable.weakmap->markKey(this, markedCell, markable.key);
+ }
+
+ // The vector should not be appended to during iteration because the key is
+ // already marked, and even in cases where we have a multipart key, we
+ // should only be inserting entries for the unmarked portions.
+ MOZ_ASSERT(values.length() == initialLen);
+}
+
+void GCMarker::forgetWeakKey(js::gc::WeakKeyTable& weakKeys, WeakMapBase* map,
+ gc::Cell* keyOrDelegate, gc::Cell* keyToRemove) {
+ // Find and remove the exact pair <map,keyToRemove> from the values of the
+ // weak keys table.
+ //
+ // This function is called when 'keyToRemove' is removed from a weakmap
+ // 'map'. If 'keyToRemove' has a delegate, then the delegate will be used as
+ // the lookup key in gcWeakKeys; otherwise, 'keyToRemove' itself will be. In
+ // either case, 'keyToRemove' is what we will be filtering out of the
+ // Markable values in the weakKey table.
+ auto p = weakKeys.get(keyOrDelegate);
+
+ // Note that this is not guaranteed to find anything. The key will have
+ // only been inserted into the weakKeys table if it was unmarked when the
+ // map was traced.
+ if (p) {
+ // Entries should only have been added to weakKeys if the map was marked.
+ for (auto r = p->value.all(); !r.empty(); r.popFront()) {
+ MOZ_ASSERT(r.front().weakmap->mapColor);
+ }
+
+ p->value.eraseIfEqual(WeakMarkable(map, keyToRemove));
+ }
+}
+
+void GCMarker::forgetWeakMap(WeakMapBase* map, Zone* zone) {
+ for (auto table : {&zone->gcNurseryWeakKeys(), &zone->gcWeakKeys()}) {
+ for (auto p = table->all(); !p.empty(); p.popFront()) {
+ p.front().value.eraseIf([map](const WeakMarkable& markable) -> bool {
+ return markable.weakmap == map;
+ });
+ }
+ }
+}
+
+// 'delegate' is no longer the delegate of 'key'.
+void GCMarker::severWeakDelegate(JSObject* key, JSObject* delegate) {
+ JS::Zone* zone = delegate->zone();
+ if (!zone->needsIncrementalBarrier()) {
+ MOZ_ASSERT(!zone->gcWeakKeys(delegate).get(delegate),
+ "non-collecting zone should not have populated gcWeakKeys");
+ return;
+ }
+ auto p = zone->gcWeakKeys(delegate).get(delegate);
+ if (!p) {
+ return;
+ }
+
+ // Remove all <weakmap, key> pairs associated with this delegate and key, and
+ // call postSeverDelegate on each of the maps found to record the key
+ // instead.
+ //
+ // But be careful: if key and delegate are in different compartments but the
+ // same zone, then the same gcWeakKeys table will be mutated by both the
+ // eraseIf and the postSeverDelegate, so we cannot nest them.
+ js::Vector<WeakMapBase*, 10, SystemAllocPolicy> severedKeyMaps;
+ p->value.eraseIf(
+ [key, &severedKeyMaps](const WeakMarkable& markable) -> bool {
+ if (markable.key != key) {
+ return false;
+ }
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ if (!severedKeyMaps.append(markable.weakmap)) {
+ oomUnsafe.crash("OOM while recording all weakmaps with severed key");
+ }
+ return true;
+ });
+
+ for (WeakMapBase* weakmap : severedKeyMaps) {
+ if (weakmap->zone()->needsIncrementalBarrier()) {
+ weakmap->postSeverDelegate(this, key);
+ }
+ }
+}
+
+// 'delegate' is now the delegate of 'key'. Update weakmap marking state.
+void GCMarker::restoreWeakDelegate(JSObject* key, JSObject* delegate) {
+ if (!key->zone()->needsIncrementalBarrier() ||
+ !delegate->zone()->needsIncrementalBarrier()) {
+ MOZ_ASSERT(!key->zone()->gcWeakKeys(key).get(key),
+ "non-collecting zone should not have populated gcWeakKeys");
+ return;
+ }
+ auto p = key->zone()->gcWeakKeys(key).get(key);
+ if (!p) {
+ return;
+ }
+
+ js::Vector<WeakMapBase*, 10, SystemAllocPolicy> maps;
+ p->value.eraseIf([key, &maps](const WeakMarkable& markable) -> bool {
+ if (markable.key != key) {
+ return false;
+ }
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ if (!maps.append(markable.weakmap)) {
+ oomUnsafe.crash("OOM while recording all weakmaps with severed key");
+ }
+ return true;
+ });
+
+ for (WeakMapBase* weakmap : maps) {
+ if (weakmap->zone()->needsIncrementalBarrier()) {
+ weakmap->postRestoreDelegate(this, key, delegate);
+ }
+ }
+}
+
+template <typename T>
+void GCMarker::markImplicitEdgesHelper(T markedThing) {
+ if (!isWeakMarking()) {
+ return;
+ }
+
+ Zone* zone = markedThing->asTenured().zone();
+ MOZ_ASSERT(zone->isGCMarking());
+ MOZ_ASSERT(!zone->isGCSweeping());
+
+ auto p = zone->gcWeakKeys().get(markedThing);
+ if (!p) {
+ return;
+ }
+ WeakEntryVector& markables = p->value;
+
+ // markedThing might be a key in a debugger weakmap, which can end up marking
+ // values that are in a different compartment.
+ AutoClearTracingSource acts(this);
+
+ markEphemeronValues(markedThing, markables);
+ markables.clear(); // If key address is reused, it should do nothing
+}
+
+template <>
+void GCMarker::markImplicitEdgesHelper(HasNoImplicitEdgesType) {}
+
+template <typename T>
+void GCMarker::markImplicitEdges(T* thing) {
+ markImplicitEdgesHelper<typename ImplicitEdgeHolderType<T*>::Type>(thing);
+}
+
+template void GCMarker::markImplicitEdges(JSObject*);
+template void GCMarker::markImplicitEdges(BaseScript*);
+
+} // namespace js
+
+template <typename T>
+static inline bool ShouldMark(GCMarker* gcmarker, T* thing) {
+ // Don't trace things that are owned by another runtime.
+ if (IsOwnedByOtherRuntime(gcmarker->runtime(), thing)) {
+ return false;
+ }
+
+ // We may encounter nursery things during normal marking since we don't
+ // collect the nursery at the start of every GC slice.
+ if (!thing->isTenured()) {
+ return false;
+ }
+
+ // Don't mark things outside a zone if we are in a per-zone GC.
+ return thing->asTenured().zone()->shouldMarkInZone();
+}
+
+template <typename T>
+void DoMarking(GCMarker* gcmarker, T* thing) {
+ // Do per-type marking precondition checks.
+ if (!ShouldMark(gcmarker, thing)) {
+ MOZ_ASSERT(gc::detail::GetEffectiveColor(gcmarker->runtime(), thing) ==
+ js::gc::CellColor::Black);
+ return;
+ }
+
+ CheckTracedThing(gcmarker, thing);
+ AutoClearTracingSource acts(gcmarker);
+ gcmarker->traverse(thing);
+
+ // Mark the compartment as live.
+ SetMaybeAliveFlag(thing);
+}
+
+template <typename T>
+void DoMarking(GCMarker* gcmarker, const T& thing) {
+ ApplyGCThingTyped(thing, [gcmarker](auto t) { DoMarking(gcmarker, t); });
+}
+
+JS_PUBLIC_API void js::gc::PerformIncrementalReadBarrier(JS::GCCellPtr thing) {
+ // Optimized marking for read barriers. This is called from
+ // ExposeGCThingToActiveJS which has already checked the prerequisites for
+ // performing a read barrier. This means we can skip a bunch of checks and
+ // call info the tracer directly.
+
+ AutoGeckoProfilerEntry profilingStackFrame(
+ TlsContext.get(), "PerformIncrementalReadBarrier",
+ JS::ProfilingCategoryPair::GCCC_Barrier);
+
+ MOZ_ASSERT(thing);
+ MOZ_ASSERT(!JS::RuntimeHeapIsMajorCollecting());
+
+ TenuredCell* cell = &thing.asCell()->asTenured();
+ Zone* zone = cell->zone();
+ MOZ_ASSERT(zone->needsIncrementalBarrier());
+
+ // Skip disptaching on known tracer type.
+ GCMarker* gcmarker = GCMarker::fromTracer(zone->barrierTracer());
+
+ // Mark the argument, as DoMarking above.
+ ApplyGCThingTyped(thing, [gcmarker](auto thing) {
+ MOZ_ASSERT(ShouldMark(gcmarker, thing));
+ CheckTracedThing(gcmarker, thing);
+ AutoClearTracingSource acts(gcmarker);
+ gcmarker->traverse(thing);
+ });
+}
+
+// The simplest traversal calls out to the fully generic traceChildren function
+// to visit the child edges. In the absence of other traversal mechanisms, this
+// function will rapidly grow the stack past its bounds and crash the process.
+// Thus, this generic tracing should only be used in cases where subsequent
+// tracing will not recurse.
+template <typename T>
+void js::GCMarker::markAndTraceChildren(T* thing) {
+ if (thing->isPermanentAndMayBeShared()) {
+ return;
+ }
+ if (mark(thing)) {
+ AutoSetTracingSource asts(this, thing);
+ thing->traceChildren(this);
+ }
+}
+namespace js {
+template <>
+void GCMarker::traverse(BaseShape* thing) {
+ markAndTraceChildren(thing);
+}
+template <>
+void GCMarker::traverse(JS::Symbol* thing) {
+ markAndTraceChildren(thing);
+}
+template <>
+void GCMarker::traverse(JS::BigInt* thing) {
+ markAndTraceChildren(thing);
+}
+template <>
+void GCMarker::traverse(RegExpShared* thing) {
+ markAndTraceChildren(thing);
+}
+} // namespace js
+
+// Strings, Shapes, and Scopes are extremely common, but have simple patterns of
+// recursion. We traverse trees of these edges immediately, with aggressive,
+// manual inlining, implemented by eagerlyTraceChildren.
+template <typename T>
+void js::GCMarker::markAndScan(T* thing) {
+ if (thing->isPermanentAndMayBeShared()) {
+ return;
+ }
+ if (mark(thing)) {
+ eagerlyMarkChildren(thing);
+ }
+}
+namespace js {
+template <>
+void GCMarker::traverse(JSString* thing) {
+ markAndScan(thing);
+}
+template <>
+void GCMarker::traverse(Shape* thing) {
+ markAndScan(thing);
+}
+template <>
+void GCMarker::traverse(js::Scope* thing) {
+ markAndScan(thing);
+}
+} // namespace js
+
+// Object and ObjectGroup are extremely common and can contain arbitrarily
+// nested graphs, so are not trivially inlined. In this case we use a mark
+// stack to control recursion. JitCode shares none of these properties, but is
+// included for historical reasons. JSScript normally cannot recurse, but may
+// be used as a weakmap key and thereby recurse into weakmapped values.
+template <typename T>
+void js::GCMarker::markAndPush(T* thing) {
+ if (!mark(thing)) {
+ return;
+ }
+ pushTaggedPtr(thing);
+}
+namespace js {
+template <>
+void GCMarker::traverse(JSObject* thing) {
+ markAndPush(thing);
+}
+template <>
+void GCMarker::traverse(ObjectGroup* thing) {
+ markAndPush(thing);
+}
+template <>
+void GCMarker::traverse(jit::JitCode* thing) {
+ markAndPush(thing);
+}
+template <>
+void GCMarker::traverse(BaseScript* thing) {
+ markAndPush(thing);
+}
+} // namespace js
+
+namespace js {
+template <>
+void GCMarker::traverse(AccessorShape* thing) {
+ MOZ_CRASH("AccessorShape must be marked as a Shape");
+}
+} // namespace js
+
+#ifdef DEBUG
+void GCMarker::setCheckAtomMarking(bool check) {
+ MOZ_ASSERT(check != checkAtomMarking);
+ checkAtomMarking = check;
+}
+#endif
+
+template <typename S, typename T>
+inline void GCMarker::checkTraversedEdge(S source, T* target) {
+#ifdef DEBUG
+ // Atoms and Symbols do not have or mark their internal pointers,
+ // respectively.
+ MOZ_ASSERT(!source->isPermanentAndMayBeShared());
+
+ if (target->isPermanentAndMayBeShared()) {
+ MOZ_ASSERT(!target->maybeCompartment());
+
+ // No further checks for parmanent/shared things.
+ return;
+ }
+
+ Zone* sourceZone = source->zone();
+ Zone* targetZone = target->zone();
+
+ // Atoms and Symbols do not have access to a compartment pointer, or we'd need
+ // to adjust the subsequent check to catch that case.
+ MOZ_ASSERT_IF(targetZone->isAtomsZone(), !target->maybeCompartment());
+
+ // The Zones must match, unless the target is an atom.
+ MOZ_ASSERT(targetZone == sourceZone || targetZone->isAtomsZone());
+
+ // If we are marking an atom, that atom must be marked in the source zone's
+ // atom bitmap.
+ if (checkAtomMarking && !sourceZone->isAtomsZone() &&
+ targetZone->isAtomsZone()) {
+ MOZ_ASSERT(target->runtimeFromAnyThread()->gc.atomMarking.atomIsMarked(
+ sourceZone, reinterpret_cast<TenuredCell*>(target)));
+ }
+
+ // If we have access to a compartment pointer for both things, they must
+ // match.
+ MOZ_ASSERT_IF(source->maybeCompartment() && target->maybeCompartment(),
+ source->maybeCompartment() == target->maybeCompartment());
+#endif
+}
+
+template <typename S, typename T>
+void js::GCMarker::traverseEdge(S source, T* target) {
+ checkTraversedEdge(source, target);
+ traverse(target);
+}
+
+template <typename S, typename T>
+void js::GCMarker::traverseEdge(S source, const T& thing) {
+ ApplyGCThingTyped(thing,
+ [this, source](auto t) { this->traverseEdge(source, t); });
+}
+
+namespace {
+
+template <typename T>
+struct TraceKindCanBeGray {};
+#define EXPAND_TRACEKIND_DEF(_, type, canBeGray, _1) \
+ template <> \
+ struct TraceKindCanBeGray<type> { \
+ static const bool value = canBeGray; \
+ };
+JS_FOR_EACH_TRACEKIND(EXPAND_TRACEKIND_DEF)
+#undef EXPAND_TRACEKIND_DEF
+
+} // namespace
+
+struct TraceKindCanBeGrayFunctor {
+ template <typename T>
+ bool operator()() {
+ return TraceKindCanBeGray<T>::value;
+ }
+};
+
+static bool TraceKindCanBeMarkedGray(JS::TraceKind kind) {
+ return DispatchTraceKindTyped(TraceKindCanBeGrayFunctor(), kind);
+}
+
+template <typename T>
+bool js::GCMarker::mark(T* thing) {
+ if (!thing->isTenured()) {
+ return false;
+ }
+
+ AssertShouldMarkInZone(thing);
+ TenuredCell* cell = &thing->asTenured();
+
+ MarkColor color =
+ TraceKindCanBeGray<T>::value ? markColor() : MarkColor::Black;
+ bool marked = cell->markIfUnmarked(color);
+ if (marked) {
+ markCount++;
+ }
+
+ return marked;
+}
+
+/*** Inline, Eager GC Marking ***********************************************/
+
+// Each of the eager, inline marking paths is directly preceeded by the
+// out-of-line, generic tracing code for comparison. Both paths must end up
+// traversing equivalent subgraphs.
+
+void BaseScript::traceChildren(JSTracer* trc) {
+ TraceEdge(trc, &functionOrGlobal_, "function");
+ TraceEdge(trc, &sourceObject_, "sourceObject");
+
+ warmUpData_.trace(trc);
+
+ if (data_) {
+ data_->trace(trc);
+ }
+
+ // Scripts with bytecode may have optional data stored in per-runtime or
+ // per-zone maps. Note that a failed compilation must not have entries since
+ // the script itself will not be marked as having bytecode.
+ if (hasBytecode()) {
+ JSScript* script = this->asJSScript();
+
+ if (hasDebugScript()) {
+ DebugAPI::traceDebugScript(trc, script);
+ }
+ }
+
+ if (trc->isMarkingTracer()) {
+ GCMarker::fromTracer(trc)->markImplicitEdges(this);
+ }
+}
+
+void Shape::traceChildren(JSTracer* trc) {
+ TraceCellHeaderEdge(trc, this, "base");
+ TraceEdge(trc, &propidRef(), "propid");
+ if (parent) {
+ TraceEdge(trc, &parent, "parent");
+ }
+ if (dictNext.isObject()) {
+ JSObject* obj = dictNext.toObject();
+ TraceManuallyBarrieredEdge(trc, &obj, "dictNext object");
+ if (obj != dictNext.toObject()) {
+ dictNext.setObject(obj);
+ }
+ }
+
+ if (hasGetterObject()) {
+ TraceManuallyBarrieredEdge(trc, &asAccessorShape().getterObj, "getter");
+ }
+ if (hasSetterObject()) {
+ TraceManuallyBarrieredEdge(trc, &asAccessorShape().setterObj, "setter");
+ }
+}
+inline void js::GCMarker::eagerlyMarkChildren(Shape* shape) {
+ MOZ_ASSERT(shape->isMarked(markColor()));
+
+ do {
+ // Special case: if a base shape has a shape table then all its pointers
+ // must point to this shape or an anscestor. Since these pointers will
+ // be traced by this loop they do not need to be traced here as well.
+ BaseShape* base = shape->base();
+ checkTraversedEdge(shape, base);
+ if (mark(base)) {
+ MOZ_ASSERT(base->canSkipMarkingShapeCache(shape));
+ base->traceChildrenSkipShapeCache(this);
+ }
+
+ traverseEdge(shape, shape->propidRef().get());
+
+ // Normally only the last shape in a dictionary list can have a pointer to
+ // an object here, but it's possible that we can see this if we trace
+ // barriers while removing a shape from a dictionary list.
+ if (shape->dictNext.isObject()) {
+ traverseEdge(shape, shape->dictNext.toObject());
+ }
+
+ // When triggered between slices on behalf of a barrier, these
+ // objects may reside in the nursery, so require an extra check.
+ // FIXME: Bug 1157967 - remove the isTenured checks.
+ if (shape->hasGetterObject() && shape->getterObject()->isTenured()) {
+ traverseEdge(shape, shape->getterObject());
+ }
+ if (shape->hasSetterObject() && shape->setterObject()->isTenured()) {
+ traverseEdge(shape, shape->setterObject());
+ }
+
+ shape = shape->previous();
+ } while (shape && mark(shape));
+}
+
+void JSString::traceChildren(JSTracer* trc) {
+ if (hasBase()) {
+ traceBase(trc);
+ } else if (isRope()) {
+ asRope().traceChildren(trc);
+ }
+}
+inline void GCMarker::eagerlyMarkChildren(JSString* str) {
+ if (str->isLinear()) {
+ eagerlyMarkChildren(&str->asLinear());
+ } else {
+ eagerlyMarkChildren(&str->asRope());
+ }
+}
+
+void JSString::traceBase(JSTracer* trc) {
+ MOZ_ASSERT(hasBase());
+ TraceManuallyBarrieredEdge(trc, &d.s.u3.base, "base");
+}
+inline void js::GCMarker::eagerlyMarkChildren(JSLinearString* linearStr) {
+ AssertShouldMarkInZone(linearStr);
+ MOZ_ASSERT(linearStr->isMarkedAny());
+ MOZ_ASSERT(linearStr->JSString::isLinear());
+
+ // Use iterative marking to avoid blowing out the stack.
+ while (linearStr->hasBase()) {
+ linearStr = linearStr->base();
+ MOZ_ASSERT(linearStr->JSString::isLinear());
+ if (linearStr->isPermanentAtom()) {
+ break;
+ }
+ AssertShouldMarkInZone(linearStr);
+ if (!mark(static_cast<JSString*>(linearStr))) {
+ break;
+ }
+ }
+}
+
+void JSRope::traceChildren(JSTracer* trc) {
+ js::TraceManuallyBarrieredEdge(trc, &d.s.u2.left, "left child");
+ js::TraceManuallyBarrieredEdge(trc, &d.s.u3.right, "right child");
+}
+inline void js::GCMarker::eagerlyMarkChildren(JSRope* rope) {
+ // This function tries to scan the whole rope tree using the marking stack
+ // as temporary storage. If that becomes full, the unscanned ropes are
+ // added to the delayed marking list. When the function returns, the
+ // marking stack is at the same depth as it was on entry. This way we avoid
+ // using tags when pushing ropes to the stack as ropes never leak to other
+ // users of the stack. This also assumes that a rope can only point to
+ // other ropes or linear strings, it cannot refer to GC things of other
+ // types.
+ gc::MarkStack& stack = currentStack();
+ size_t savedPos = stack.position();
+ MOZ_DIAGNOSTIC_ASSERT(rope->getTraceKind() == JS::TraceKind::String);
+ while (true) {
+ MOZ_DIAGNOSTIC_ASSERT(rope->getTraceKind() == JS::TraceKind::String);
+ MOZ_DIAGNOSTIC_ASSERT(rope->JSString::isRope());
+ AssertShouldMarkInZone(rope);
+ MOZ_ASSERT(rope->isMarkedAny());
+ JSRope* next = nullptr;
+
+ JSString* right = rope->rightChild();
+ if (!right->isPermanentAtom() && mark(right)) {
+ if (right->isLinear()) {
+ eagerlyMarkChildren(&right->asLinear());
+ } else {
+ next = &right->asRope();
+ }
+ }
+
+ JSString* left = rope->leftChild();
+ if (!left->isPermanentAtom() && mark(left)) {
+ if (left->isLinear()) {
+ eagerlyMarkChildren(&left->asLinear());
+ } else {
+ // When both children are ropes, set aside the right one to
+ // scan it later.
+ if (next && !stack.pushTempRope(next)) {
+ delayMarkingChildren(next);
+ }
+ next = &left->asRope();
+ }
+ }
+ if (next) {
+ rope = next;
+ } else if (savedPos != stack.position()) {
+ MOZ_ASSERT(savedPos < stack.position());
+ rope = stack.popPtr().asTempRope();
+ } else {
+ break;
+ }
+ }
+ MOZ_ASSERT(savedPos == stack.position());
+}
+
+static inline void TraceBindingNames(JSTracer* trc, BindingName* names,
+ uint32_t length) {
+ for (uint32_t i = 0; i < length; i++) {
+ JSAtom* name = names[i].name();
+ MOZ_ASSERT(name);
+ TraceManuallyBarrieredEdge(trc, &name, "scope name");
+ }
+};
+static inline void TraceNullableBindingNames(JSTracer* trc, BindingName* names,
+ uint32_t length) {
+ for (uint32_t i = 0; i < length; i++) {
+ if (JSAtom* name = names[i].name()) {
+ TraceManuallyBarrieredEdge(trc, &name, "scope name");
+ }
+ }
+};
+void AbstractBindingName<JSAtom>::trace(JSTracer* trc) {
+ if (JSAtom* atom = name()) {
+ TraceManuallyBarrieredEdge(trc, &atom, "binding name");
+ }
+}
+void BindingIter::trace(JSTracer* trc) {
+ TraceNullableBindingNames(trc, names_, length_);
+}
+void LexicalScope::RuntimeData::trace(JSTracer* trc) {
+ TraceBindingNames(trc, trailingNames.start(), slotInfo.length);
+}
+void FunctionScope::RuntimeData::trace(JSTracer* trc) {
+ TraceNullableEdge(trc, &canonicalFunction, "scope canonical function");
+ TraceNullableBindingNames(trc, trailingNames.start(), slotInfo.length);
+}
+void VarScope::RuntimeData::trace(JSTracer* trc) {
+ TraceBindingNames(trc, trailingNames.start(), slotInfo.length);
+}
+void GlobalScope::RuntimeData::trace(JSTracer* trc) {
+ TraceBindingNames(trc, trailingNames.start(), slotInfo.length);
+}
+void EvalScope::RuntimeData::trace(JSTracer* trc) {
+ TraceBindingNames(trc, trailingNames.start(), slotInfo.length);
+}
+void ModuleScope::RuntimeData::trace(JSTracer* trc) {
+ TraceNullableEdge(trc, &module, "scope module");
+ TraceBindingNames(trc, trailingNames.start(), slotInfo.length);
+}
+void WasmInstanceScope::RuntimeData::trace(JSTracer* trc) {
+ TraceNullableEdge(trc, &instance, "wasm instance");
+ TraceBindingNames(trc, trailingNames.start(), slotInfo.length);
+}
+void WasmFunctionScope::RuntimeData::trace(JSTracer* trc) {
+ TraceBindingNames(trc, trailingNames.start(), slotInfo.length);
+}
+void Scope::traceChildren(JSTracer* trc) {
+ TraceNullableEdge(trc, &environmentShape_, "scope env shape");
+ TraceNullableEdge(trc, &enclosingScope_, "scope enclosing");
+ applyScopeDataTyped([trc](auto data) { data->trace(trc); });
+}
+inline void js::GCMarker::eagerlyMarkChildren(Scope* scope) {
+ do {
+ if (scope->environmentShape()) {
+ traverseEdge(scope, scope->environmentShape());
+ }
+ AbstractTrailingNamesArray<JSAtom>* names = nullptr;
+ uint32_t length = 0;
+ switch (scope->kind()) {
+ case ScopeKind::Function: {
+ FunctionScope::RuntimeData& data = scope->as<FunctionScope>().data();
+ if (data.canonicalFunction) {
+ traverseObjectEdge(scope, data.canonicalFunction);
+ }
+ names = &data.trailingNames;
+ length = data.slotInfo.length;
+ break;
+ }
+
+ case ScopeKind::FunctionBodyVar: {
+ VarScope::RuntimeData& data = scope->as<VarScope>().data();
+ names = &data.trailingNames;
+ length = data.slotInfo.length;
+ break;
+ }
+
+ case ScopeKind::Lexical:
+ case ScopeKind::SimpleCatch:
+ case ScopeKind::Catch:
+ case ScopeKind::NamedLambda:
+ case ScopeKind::StrictNamedLambda:
+ case ScopeKind::FunctionLexical:
+ case ScopeKind::ClassBody: {
+ LexicalScope::RuntimeData& data = scope->as<LexicalScope>().data();
+ names = &data.trailingNames;
+ length = data.slotInfo.length;
+ break;
+ }
+
+ case ScopeKind::Global:
+ case ScopeKind::NonSyntactic: {
+ GlobalScope::RuntimeData& data = scope->as<GlobalScope>().data();
+ names = &data.trailingNames;
+ length = data.slotInfo.length;
+ break;
+ }
+
+ case ScopeKind::Eval:
+ case ScopeKind::StrictEval: {
+ EvalScope::RuntimeData& data = scope->as<EvalScope>().data();
+ names = &data.trailingNames;
+ length = data.slotInfo.length;
+ break;
+ }
+
+ case ScopeKind::Module: {
+ ModuleScope::RuntimeData& data = scope->as<ModuleScope>().data();
+ if (data.module) {
+ traverseObjectEdge(scope, data.module);
+ }
+ names = &data.trailingNames;
+ length = data.slotInfo.length;
+ break;
+ }
+
+ case ScopeKind::With:
+ break;
+
+ case ScopeKind::WasmInstance: {
+ WasmInstanceScope::RuntimeData& data =
+ scope->as<WasmInstanceScope>().data();
+ traverseObjectEdge(scope, data.instance);
+ names = &data.trailingNames;
+ length = data.slotInfo.length;
+ break;
+ }
+
+ case ScopeKind::WasmFunction: {
+ WasmFunctionScope::RuntimeData& data =
+ scope->as<WasmFunctionScope>().data();
+ names = &data.trailingNames;
+ length = data.slotInfo.length;
+ break;
+ }
+ }
+ if (scope->kind_ == ScopeKind::Function) {
+ for (uint32_t i = 0; i < length; i++) {
+ if (JSAtom* name = names->get(i).name()) {
+ traverseStringEdge(scope, name);
+ }
+ }
+ } else {
+ for (uint32_t i = 0; i < length; i++) {
+ traverseStringEdge(scope, names->get(i).name());
+ }
+ }
+ scope = scope->enclosing();
+ } while (scope && mark(scope));
+}
+
+void js::ObjectGroup::traceChildren(JSTracer* trc) {
+ if (proto().isObject()) {
+ TraceEdge(trc, &proto(), "group_proto");
+ }
+
+ // Note: the realm's global can be nullptr if we GC while creating the global.
+ if (JSObject* global = realm()->unsafeUnbarrieredMaybeGlobal()) {
+ TraceManuallyBarrieredEdge(trc, &global, "group_global");
+ }
+
+ TraceNullableEdge(trc, &typeDescr_, "group_typedescr");
+}
+
+void js::GCMarker::lazilyMarkChildren(ObjectGroup* group) {
+ if (group->proto().isObject()) {
+ traverseEdge(group, group->proto().toObject());
+ }
+
+ // Note: the realm's global can be nullptr if we GC while creating the global.
+ if (GlobalObject* global = group->realm()->unsafeUnbarrieredMaybeGlobal()) {
+ traverseEdge(group, static_cast<JSObject*>(global));
+ }
+
+ if (TypeDescr* descr = group->maybeTypeDescr()) {
+ traverseEdge(group, static_cast<JSObject*>(descr));
+ }
+}
+
+void JS::BigInt::traceChildren(JSTracer* trc) {}
+
+// Call the trace hook set on the object, if present.
+static inline void CallTraceHook(JSTracer* trc, JSObject* obj) {
+ const JSClass* clasp = obj->getClass();
+ MOZ_ASSERT(clasp);
+ MOZ_ASSERT(obj->isNative() == clasp->isNative());
+
+ if (clasp->hasTrace()) {
+ AutoSetTracingSource asts(trc, obj);
+ clasp->doTrace(trc, obj);
+ }
+}
+
+template <typename Functor>
+static void VisitTraceListWithFunctor(const Functor& f,
+ const uint32_t* traceList,
+ uint8_t* memory) {
+ size_t stringCount = *traceList++;
+ size_t objectCount = *traceList++;
+ size_t valueCount = *traceList++;
+ for (size_t i = 0; i < stringCount; i++) {
+ f(reinterpret_cast<JSString**>(memory + *traceList));
+ traceList++;
+ }
+ for (size_t i = 0; i < objectCount; i++) {
+ auto** objp = reinterpret_cast<JSObject**>(memory + *traceList);
+ if (*objp) {
+ f(objp);
+ }
+ traceList++;
+ }
+ for (size_t i = 0; i < valueCount; i++) {
+ f(reinterpret_cast<Value*>(memory + *traceList));
+ traceList++;
+ }
+}
+
+/*
+ * Trace TypedObject memory according to the layout specified by |traceList|
+ * with optimized paths for GC tracers.
+ *
+ * I'm not sure how much difference this makes versus calling TraceEdge for each
+ * edge; that at least has to dispatch on the tracer kind each time.
+ */
+void js::gc::VisitTraceList(JSTracer* trc, JSObject* obj,
+ const uint32_t* traceList, uint8_t* memory) {
+ if (trc->isMarkingTracer()) {
+ auto* marker = GCMarker::fromTracer(trc);
+ VisitTraceListWithFunctor([=](auto thingp) { DoMarking(marker, *thingp); },
+ traceList, memory);
+ return;
+ }
+
+ if (trc->isTenuringTracer()) {
+ auto* ttrc = static_cast<TenuringTracer*>(trc);
+ VisitTraceListWithFunctor([=](auto thingp) { ttrc->traverse(thingp); },
+ traceList, memory);
+ return;
+ }
+
+ VisitTraceListWithFunctor(
+ [=](auto thingp) { TraceEdgeInternal(trc, thingp, "TypedObject edge"); },
+ traceList, memory);
+ return;
+}
+
+/*** Mark-stack Marking *****************************************************/
+
+GCMarker::MarkQueueProgress GCMarker::processMarkQueue() {
+#ifdef DEBUG
+ if (markQueue.empty()) {
+ return QueueComplete;
+ }
+
+ GCRuntime& gcrt = runtime()->gc;
+ if (queueMarkColor == mozilla::Some(MarkColor::Gray) &&
+ gcrt.state() != State::Sweep) {
+ return QueueSuspended;
+ }
+
+ // If the queue wants to be gray marking, but we've pushed a black object
+ // since set-color-gray was processed, then we can't switch to gray and must
+ // again wait until gray marking is possible.
+ //
+ // Remove this code if the restriction against marking gray during black is
+ // relaxed.
+ if (queueMarkColor == mozilla::Some(MarkColor::Gray) && hasBlackEntries()) {
+ return QueueSuspended;
+ }
+
+ // If the queue wants to be marking a particular color, switch to that color.
+ // In any case, restore the mark color to whatever it was when we entered
+ // this function.
+ AutoSetMarkColor autoRevertColor(*this, queueMarkColor.valueOr(markColor()));
+
+ // Process the mark queue by taking each object in turn, pushing it onto the
+ // mark stack, and processing just the top element with processMarkStackTop
+ // without recursing into reachable objects.
+ while (queuePos < markQueue.length()) {
+ Value val = markQueue[queuePos++].get().unbarrieredGet();
+ if (val.isObject()) {
+ JSObject* obj = &val.toObject();
+ JS::Zone* zone = obj->zone();
+ if (!zone->isGCMarking() || obj->isMarkedAtLeast(markColor())) {
+ continue;
+ }
+
+ // If we have started sweeping, obey sweep group ordering. But note that
+ // we will first be called during the initial sweep slice, when the sweep
+ // group indexes have not yet been computed. In that case, we can mark
+ // freely.
+ if (gcrt.state() == State::Sweep && gcrt.initialState != State::Sweep) {
+ if (zone->gcSweepGroupIndex < gcrt.getCurrentSweepGroupIndex()) {
+ // Too late. This must have been added after we started collecting,
+ // and we've already processed its sweep group. Skip it.
+ continue;
+ }
+ if (zone->gcSweepGroupIndex > gcrt.getCurrentSweepGroupIndex()) {
+ // Not ready yet. Wait until we reach the object's sweep group.
+ queuePos--;
+ return QueueSuspended;
+ }
+ }
+
+ if (markColor() == MarkColor::Gray && zone->isGCMarkingBlackOnly()) {
+ // Have not yet reached the point where we can mark this object, so
+ // continue with the GC.
+ queuePos--;
+ return QueueSuspended;
+ }
+
+ // Mark the object and push it onto the stack.
+ traverse(obj);
+
+ if (isMarkStackEmpty()) {
+ if (obj->asTenured().arena()->onDelayedMarkingList()) {
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ oomUnsafe.crash("mark queue OOM");
+ }
+ }
+
+ // Process just the one object that is now on top of the mark stack,
+ // possibly pushing more stuff onto the stack.
+ if (isMarkStackEmpty()) {
+ MOZ_ASSERT(obj->asTenured().arena()->onDelayedMarkingList());
+ // If we overflow the stack here and delay marking, then we won't be
+ // testing what we think we're testing.
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ oomUnsafe.crash("Overflowed stack while marking test queue");
+ }
+
+ SliceBudget unlimited = SliceBudget::unlimited();
+ processMarkStackTop(unlimited);
+ } else if (val.isString()) {
+ JSLinearString* str = &val.toString()->asLinear();
+ if (js::StringEqualsLiteral(str, "yield") && gcrt.isIncrementalGc()) {
+ return QueueYielded;
+ } else if (js::StringEqualsLiteral(str, "enter-weak-marking-mode") ||
+ js::StringEqualsLiteral(str, "abort-weak-marking-mode")) {
+ if (state == MarkingState::RegularMarking) {
+ // We can't enter weak marking mode at just any time, so instead
+ // we'll stop processing the queue and continue on with the GC. Once
+ // we enter weak marking mode, we can continue to the rest of the
+ // queue. Note that we will also suspend for aborting, and then abort
+ // the earliest following weak marking mode.
+ queuePos--;
+ return QueueSuspended;
+ }
+ if (js::StringEqualsLiteral(str, "abort-weak-marking-mode")) {
+ abortLinearWeakMarking();
+ }
+ } else if (js::StringEqualsLiteral(str, "drain")) {
+ auto unlimited = SliceBudget::unlimited();
+ MOZ_RELEASE_ASSERT(
+ markUntilBudgetExhausted(unlimited, DontReportMarkTime));
+ } else if (js::StringEqualsLiteral(str, "set-color-gray")) {
+ queueMarkColor = mozilla::Some(MarkColor::Gray);
+ if (gcrt.state() != State::Sweep) {
+ // Cannot mark gray yet, so continue with the GC.
+ queuePos--;
+ return QueueSuspended;
+ }
+ setMarkColor(MarkColor::Gray);
+ } else if (js::StringEqualsLiteral(str, "set-color-black")) {
+ queueMarkColor = mozilla::Some(MarkColor::Black);
+ setMarkColor(MarkColor::Black);
+ } else if (js::StringEqualsLiteral(str, "unset-color")) {
+ queueMarkColor.reset();
+ }
+ }
+ }
+#endif
+
+ return QueueComplete;
+}
+
+static gcstats::PhaseKind GrayMarkingPhaseForCurrentPhase(
+ const gcstats::Statistics& stats) {
+ using namespace gcstats;
+ switch (stats.currentPhaseKind()) {
+ case PhaseKind::SWEEP_MARK:
+ return PhaseKind::SWEEP_MARK_GRAY;
+ case PhaseKind::SWEEP_MARK_WEAK:
+ return PhaseKind::SWEEP_MARK_GRAY_WEAK;
+ default:
+ MOZ_CRASH("Unexpected current phase");
+ }
+}
+
+bool GCMarker::markUntilBudgetExhausted(SliceBudget& budget,
+ ShouldReportMarkTime reportTime) {
+#ifdef DEBUG
+ MOZ_ASSERT(!strictCompartmentChecking);
+ strictCompartmentChecking = true;
+ auto acc = mozilla::MakeScopeExit([&] { strictCompartmentChecking = false; });
+#endif
+
+ if (budget.isOverBudget()) {
+ return false;
+ }
+
+ // This method leaves the mark color as it found it.
+ AutoSetMarkColor autoSetBlack(*this, MarkColor::Black);
+
+ for (;;) {
+ while (hasBlackEntries()) {
+ MOZ_ASSERT(markColor() == MarkColor::Black);
+ processMarkStackTop(budget);
+ if (budget.isOverBudget()) {
+ return false;
+ }
+ }
+
+ if (hasGrayEntries()) {
+ mozilla::Maybe<gcstats::AutoPhase> ap;
+ if (reportTime) {
+ auto& stats = runtime()->gc.stats();
+ ap.emplace(stats, GrayMarkingPhaseForCurrentPhase(stats));
+ }
+
+ AutoSetMarkColor autoSetGray(*this, MarkColor::Gray);
+ do {
+ processMarkStackTop(budget);
+ if (budget.isOverBudget()) {
+ return false;
+ }
+ } while (hasGrayEntries());
+ }
+
+ if (hasBlackEntries()) {
+ // We can end up marking black during gray marking in the following case:
+ // a WeakMap has a CCW key whose delegate (target) is black, and during
+ // gray marking we mark the map (gray). The delegate's color will be
+ // propagated to the key. (And we can't avoid this by marking the key
+ // gray, because even though the value will end up gray in either case,
+ // the WeakMap entry must be preserved because the CCW could get
+ // collected and then we could re-wrap the delegate and look it up in the
+ // map again, and need to get back the original value.)
+ continue;
+ }
+
+ if (!hasDelayedChildren()) {
+ break;
+ }
+
+ /*
+ * Mark children of things that caused too deep recursion during the
+ * above tracing. Don't do this until we're done with everything
+ * else.
+ */
+ if (!markAllDelayedChildren(budget)) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static inline void CheckForCompartmentMismatch(JSObject* obj, JSObject* obj2) {
+#ifdef DEBUG
+ if (MOZ_UNLIKELY(obj->compartment() != obj2->compartment())) {
+ fprintf(
+ stderr,
+ "Compartment mismatch in pointer from %s object slot to %s object\n",
+ obj->getClass()->name, obj2->getClass()->name);
+ MOZ_CRASH("Compartment mismatch");
+ }
+#endif
+}
+
+static inline size_t NumUsedFixedSlots(NativeObject* obj) {
+ return std::min(obj->numFixedSlots(), obj->slotSpan());
+}
+
+static inline size_t NumUsedDynamicSlots(NativeObject* obj) {
+ size_t nfixed = obj->numFixedSlots();
+ size_t nslots = obj->slotSpan();
+ if (nslots < nfixed) {
+ return 0;
+ }
+
+ return nslots - nfixed;
+}
+
+inline void GCMarker::processMarkStackTop(SliceBudget& budget) {
+ /*
+ * This function uses explicit goto and scans objects directly. This allows us
+ * to eliminate tail recursion and significantly improve the marking
+ * performance, see bug 641025.
+ *
+ * Note that the mutator can change the size and layout of objects between
+ * marking slices, so we must check slots and element ranges read from the
+ * stack.
+ */
+
+ JSObject* obj; // The object being scanned.
+ SlotsOrElementsKind kind; // The kind of slot range being scanned, if any.
+ HeapSlot* base; // Slot range base pointer.
+ size_t index; // Index of the next slot to mark.
+ size_t end; // End of slot range to mark.
+
+ gc::MarkStack& stack = currentStack();
+
+ switch (stack.peekTag()) {
+ case MarkStack::SlotsOrElementsRangeTag: {
+ auto range = stack.popSlotsOrElementsRange();
+ obj = range.ptr().asRangeObject();
+ NativeObject* nobj = &obj->as<NativeObject>();
+ kind = range.kind();
+ index = range.start();
+
+ switch (kind) {
+ case SlotsOrElementsKind::FixedSlots: {
+ base = nobj->fixedSlots();
+ end = NumUsedFixedSlots(nobj);
+ break;
+ }
+
+ case SlotsOrElementsKind::DynamicSlots: {
+ base = nobj->slots_;
+ end = NumUsedDynamicSlots(nobj);
+ break;
+ }
+
+ case SlotsOrElementsKind::Elements: {
+ base = nobj->getDenseElements();
+
+ // Account for shifted elements.
+ size_t numShifted = nobj->getElementsHeader()->numShiftedElements();
+ size_t initlen = nobj->getDenseInitializedLength();
+ index = std::max(index, numShifted) - numShifted;
+ end = initlen;
+ break;
+ }
+ }
+
+ goto scan_value_range;
+ }
+
+ case MarkStack::ObjectTag: {
+ obj = stack.popPtr().as<JSObject>();
+ AssertShouldMarkInZone(obj);
+ goto scan_obj;
+ }
+
+ case MarkStack::GroupTag: {
+ auto group = stack.popPtr().as<ObjectGroup>();
+ return lazilyMarkChildren(group);
+ }
+
+ case MarkStack::JitCodeTag: {
+ auto code = stack.popPtr().as<jit::JitCode>();
+ AutoSetTracingSource asts(this, code);
+ return code->traceChildren(this);
+ }
+
+ case MarkStack::ScriptTag: {
+ auto script = stack.popPtr().as<BaseScript>();
+ AutoSetTracingSource asts(this, script);
+ return script->traceChildren(this);
+ }
+
+ default:
+ MOZ_CRASH("Invalid tag in mark stack");
+ }
+ return;
+
+scan_value_range:
+ while (index < end) {
+ budget.step();
+ if (budget.isOverBudget()) {
+ pushValueRange(obj, kind, index, end);
+ return;
+ }
+
+ const Value& v = base[index];
+ index++;
+
+ if (v.isString()) {
+ traverseEdge(obj, v.toString());
+ } else if (v.isObject()) {
+ JSObject* obj2 = &v.toObject();
+#ifdef DEBUG
+ if (!obj2) {
+ fprintf(stderr,
+ "processMarkStackTop found ObjectValue(nullptr) "
+ "at %zu Values from end of range in object:\n",
+ size_t(end - (index - 1)));
+ DumpObject(obj);
+ }
+#endif
+ CheckForCompartmentMismatch(obj, obj2);
+ if (mark(obj2)) {
+ // Save the rest of this value range for later and start scanning obj2's
+ // children.
+ pushValueRange(obj, kind, index, end);
+ obj = obj2;
+ goto scan_obj;
+ }
+ } else if (v.isSymbol()) {
+ traverseEdge(obj, v.toSymbol());
+ } else if (v.isBigInt()) {
+ traverseEdge(obj, v.toBigInt());
+ } else if (v.isPrivateGCThing()) {
+ // v.toGCCellPtr cannot be inlined, so construct one manually.
+ Cell* cell = v.toGCThing();
+ traverseEdge(obj, JS::GCCellPtr(cell, cell->getTraceKind()));
+ }
+ }
+ return;
+
+scan_obj : {
+ AssertShouldMarkInZone(obj);
+
+ budget.step();
+ if (budget.isOverBudget()) {
+ repush(obj);
+ return;
+ }
+
+ markImplicitEdges(obj);
+ traverseEdge(obj, obj->group());
+
+ CallTraceHook(this, obj);
+
+ if (!obj->isNative()) {
+ return;
+ }
+
+ NativeObject* nobj = &obj->as<NativeObject>();
+ Shape* shape = nobj->lastProperty();
+ traverseEdge(obj, shape);
+
+ unsigned nslots = nobj->slotSpan();
+
+ do {
+ if (nobj->hasEmptyElements()) {
+ break;
+ }
+
+ base = nobj->getDenseElements();
+ kind = SlotsOrElementsKind::Elements;
+ index = 0;
+ end = nobj->getDenseInitializedLength();
+
+ if (!nslots) {
+ goto scan_value_range;
+ }
+ pushValueRange(nobj, kind, index, end);
+ } while (false);
+
+ unsigned nfixed = nobj->numFixedSlots();
+
+ base = nobj->fixedSlots();
+ kind = SlotsOrElementsKind::FixedSlots;
+ index = 0;
+
+ if (nslots > nfixed) {
+ pushValueRange(nobj, kind, index, nfixed);
+ kind = SlotsOrElementsKind::DynamicSlots;
+ base = nobj->slots_;
+ end = nslots - nfixed;
+ goto scan_value_range;
+ }
+
+ MOZ_ASSERT(nslots <= nobj->numFixedSlots());
+ end = nslots;
+ goto scan_value_range;
+}
+}
+
+/*** Mark Stack *************************************************************/
+
+static_assert(sizeof(MarkStack::TaggedPtr) == sizeof(uintptr_t),
+ "A TaggedPtr should be the same size as a pointer");
+static_assert((sizeof(MarkStack::SlotsOrElementsRange) % sizeof(uintptr_t)) ==
+ 0,
+ "SlotsOrElementsRange size should be a multiple of "
+ "the pointer size");
+
+static const size_t ValueRangeWords =
+ sizeof(MarkStack::SlotsOrElementsRange) / sizeof(uintptr_t);
+
+template <typename T>
+struct MapTypeToMarkStackTag {};
+template <>
+struct MapTypeToMarkStackTag<JSObject*> {
+ static const auto value = MarkStack::ObjectTag;
+};
+template <>
+struct MapTypeToMarkStackTag<ObjectGroup*> {
+ static const auto value = MarkStack::GroupTag;
+};
+template <>
+struct MapTypeToMarkStackTag<jit::JitCode*> {
+ static const auto value = MarkStack::JitCodeTag;
+};
+template <>
+struct MapTypeToMarkStackTag<BaseScript*> {
+ static const auto value = MarkStack::ScriptTag;
+};
+
+static inline bool TagIsRangeTag(MarkStack::Tag tag) {
+ return tag == MarkStack::SlotsOrElementsRangeTag;
+}
+
+inline MarkStack::TaggedPtr::TaggedPtr(Tag tag, Cell* ptr)
+ : bits(tag | uintptr_t(ptr)) {
+ assertValid();
+}
+
+inline MarkStack::Tag MarkStack::TaggedPtr::tag() const {
+ auto tag = Tag(bits & TagMask);
+ MOZ_ASSERT(tag <= LastTag);
+ return tag;
+}
+
+inline Cell* MarkStack::TaggedPtr::ptr() const {
+ return reinterpret_cast<Cell*>(bits & ~TagMask);
+}
+
+inline void MarkStack::TaggedPtr::assertValid() const {
+ mozilla::Unused << tag();
+ MOZ_ASSERT(IsCellPointerValid(ptr()));
+}
+
+template <typename T>
+inline T* MarkStack::TaggedPtr::as() const {
+ MOZ_ASSERT(tag() == MapTypeToMarkStackTag<T*>::value);
+ MOZ_ASSERT(ptr()->isTenured());
+ MOZ_ASSERT(ptr()->is<T>());
+ return static_cast<T*>(ptr());
+}
+
+inline JSObject* MarkStack::TaggedPtr::asRangeObject() const {
+ MOZ_ASSERT(TagIsRangeTag(tag()));
+ MOZ_ASSERT(ptr()->isTenured());
+ return ptr()->as<JSObject>();
+}
+
+inline JSRope* MarkStack::TaggedPtr::asTempRope() const {
+ MOZ_ASSERT(tag() == TempRopeTag);
+ return &ptr()->as<JSString>()->asRope();
+}
+
+inline MarkStack::SlotsOrElementsRange::SlotsOrElementsRange(
+ SlotsOrElementsKind kindArg, JSObject* obj, size_t startArg)
+ : startAndKind_((startArg << StartShift) | size_t(kindArg)),
+ ptr_(SlotsOrElementsRangeTag, obj) {
+ assertValid();
+ MOZ_ASSERT(kind() == kindArg);
+ MOZ_ASSERT(start() == startArg);
+}
+
+inline void MarkStack::SlotsOrElementsRange::assertValid() const {
+ ptr_.assertValid();
+ MOZ_ASSERT(TagIsRangeTag(ptr_.tag()));
+}
+
+inline SlotsOrElementsKind MarkStack::SlotsOrElementsRange::kind() const {
+ return SlotsOrElementsKind(startAndKind_ & KindMask);
+}
+
+inline size_t MarkStack::SlotsOrElementsRange::start() const {
+ return startAndKind_ >> StartShift;
+}
+
+inline MarkStack::TaggedPtr MarkStack::SlotsOrElementsRange::ptr() const {
+ return ptr_;
+}
+
+MarkStack::MarkStack(size_t maxCapacity)
+ : topIndex_(0),
+ maxCapacity_(maxCapacity)
+#ifdef DEBUG
+ ,
+ iteratorCount_(0)
+#endif
+{
+}
+
+MarkStack::~MarkStack() {
+ MOZ_ASSERT(isEmpty());
+ MOZ_ASSERT(iteratorCount_ == 0);
+}
+
+bool MarkStack::init(StackType which, bool incrementalGCEnabled) {
+ MOZ_ASSERT(isEmpty());
+ return setStackCapacity(which, incrementalGCEnabled);
+}
+
+bool MarkStack::setStackCapacity(StackType which, bool incrementalGCEnabled) {
+ size_t capacity;
+
+ if (which == AuxiliaryStack) {
+ capacity = SMALL_MARK_STACK_BASE_CAPACITY;
+ } else if (incrementalGCEnabled) {
+ capacity = INCREMENTAL_MARK_STACK_BASE_CAPACITY;
+ } else {
+ capacity = NON_INCREMENTAL_MARK_STACK_BASE_CAPACITY;
+ }
+
+ if (capacity > maxCapacity_) {
+ capacity = maxCapacity_;
+ }
+
+ return resize(capacity);
+}
+
+void MarkStack::setMaxCapacity(size_t maxCapacity) {
+ MOZ_ASSERT(maxCapacity != 0);
+ MOZ_ASSERT(isEmpty());
+
+ maxCapacity_ = maxCapacity;
+ if (capacity() > maxCapacity_) {
+ // If the realloc fails, just keep using the existing stack; it's
+ // not ideal but better than failing.
+ mozilla::Unused << resize(maxCapacity_);
+ }
+}
+
+inline MarkStack::TaggedPtr* MarkStack::topPtr() { return &stack()[topIndex_]; }
+
+inline bool MarkStack::pushTaggedPtr(Tag tag, Cell* ptr) {
+ if (!ensureSpace(1)) {
+ return false;
+ }
+
+ *topPtr() = TaggedPtr(tag, ptr);
+ topIndex_++;
+ return true;
+}
+
+template <typename T>
+inline bool MarkStack::push(T* ptr) {
+ return pushTaggedPtr(MapTypeToMarkStackTag<T*>::value, ptr);
+}
+
+inline bool MarkStack::pushTempRope(JSRope* rope) {
+ return pushTaggedPtr(TempRopeTag, rope);
+}
+
+inline bool MarkStack::push(JSObject* obj, SlotsOrElementsKind kind,
+ size_t start) {
+ return push(SlotsOrElementsRange(kind, obj, start));
+}
+
+inline bool MarkStack::push(const SlotsOrElementsRange& array) {
+ array.assertValid();
+
+ if (!ensureSpace(ValueRangeWords)) {
+ return false;
+ }
+
+ *reinterpret_cast<SlotsOrElementsRange*>(topPtr()) = array;
+ topIndex_ += ValueRangeWords;
+ MOZ_ASSERT(position() <= capacity());
+ MOZ_ASSERT(TagIsRangeTag(peekTag()));
+ return true;
+}
+
+inline const MarkStack::TaggedPtr& MarkStack::peekPtr() const {
+ return stack()[topIndex_ - 1];
+}
+
+inline MarkStack::Tag MarkStack::peekTag() const { return peekPtr().tag(); }
+
+inline MarkStack::TaggedPtr MarkStack::popPtr() {
+ MOZ_ASSERT(!isEmpty());
+ MOZ_ASSERT(!TagIsRangeTag(peekTag()));
+ peekPtr().assertValid();
+ topIndex_--;
+ return *topPtr();
+}
+
+inline MarkStack::SlotsOrElementsRange MarkStack::popSlotsOrElementsRange() {
+ MOZ_ASSERT(TagIsRangeTag(peekTag()));
+ MOZ_ASSERT(position() >= ValueRangeWords);
+
+ topIndex_ -= ValueRangeWords;
+ const auto& array = *reinterpret_cast<SlotsOrElementsRange*>(topPtr());
+ array.assertValid();
+ return array;
+}
+
+inline bool MarkStack::ensureSpace(size_t count) {
+ if ((topIndex_ + count) <= capacity()) {
+ return !js::oom::ShouldFailWithOOM();
+ }
+
+ return enlarge(count);
+}
+
+bool MarkStack::enlarge(size_t count) {
+ size_t newCapacity = std::min(maxCapacity_.ref(), capacity() * 2);
+ if (newCapacity < capacity() + count) {
+ return false;
+ }
+
+ return resize(newCapacity);
+}
+
+bool MarkStack::resize(size_t newCapacity) {
+ MOZ_ASSERT(newCapacity != 0);
+ if (!stack().resize(newCapacity)) {
+ return false;
+ }
+
+ poisonUnused();
+ return true;
+}
+
+inline void MarkStack::poisonUnused() {
+ static_assert((JS_FRESH_MARK_STACK_PATTERN & TagMask) > LastTag,
+ "The mark stack poison pattern must not look like a valid "
+ "tagged pointer");
+
+ AlwaysPoison(stack().begin() + topIndex_, JS_FRESH_MARK_STACK_PATTERN,
+ stack().capacity() - topIndex_, MemCheckKind::MakeUndefined);
+}
+
+size_t MarkStack::sizeOfExcludingThis(
+ mozilla::MallocSizeOf mallocSizeOf) const {
+ return stack().sizeOfExcludingThis(mallocSizeOf);
+}
+
+MarkStackIter::MarkStackIter(MarkStack& stack)
+ : stack_(stack), pos_(stack.position()) {
+#ifdef DEBUG
+ stack.iteratorCount_++;
+#endif
+}
+
+MarkStackIter::~MarkStackIter() {
+#ifdef DEBUG
+ MOZ_ASSERT(stack_.iteratorCount_);
+ stack_.iteratorCount_--;
+#endif
+}
+
+inline size_t MarkStackIter::position() const { return pos_; }
+
+inline bool MarkStackIter::done() const { return position() == 0; }
+
+inline MarkStack::TaggedPtr MarkStackIter::peekPtr() const {
+ MOZ_ASSERT(!done());
+ return stack_.stack()[pos_ - 1];
+}
+
+inline MarkStack::Tag MarkStackIter::peekTag() const { return peekPtr().tag(); }
+
+inline void MarkStackIter::nextPtr() {
+ MOZ_ASSERT(!done());
+ MOZ_ASSERT(!TagIsRangeTag(peekTag()));
+ pos_--;
+}
+
+inline void MarkStackIter::next() {
+ if (TagIsRangeTag(peekTag())) {
+ nextArray();
+ } else {
+ nextPtr();
+ }
+}
+
+inline void MarkStackIter::nextArray() {
+ MOZ_ASSERT(TagIsRangeTag(peekTag()));
+ MOZ_ASSERT(position() >= ValueRangeWords);
+ pos_ -= ValueRangeWords;
+}
+
+/*** GCMarker ***************************************************************/
+
+/*
+ * WeakMapTraceAction::Expand: the GC is recomputing the liveness of WeakMap
+ * entries by expanding each live WeakMap into its constituent key->value edges,
+ * a table of which will be consulted in a later phase whenever marking a
+ * potential key.
+ */
+GCMarker::GCMarker(JSRuntime* rt)
+ : JSTracer(rt, JS::TracerKind::Marking,
+ JS::TraceOptions(JS::WeakMapTraceAction::Expand,
+ JS::WeakEdgeTraceAction::Skip)),
+ stack(),
+ auxStack(),
+ mainStackColor(MarkColor::Black),
+ delayedMarkingList(nullptr),
+ delayedMarkingWorkAdded(false),
+ state(MarkingState::NotActive),
+ incrementalWeakMapMarkingEnabled(
+ TuningDefaults::IncrementalWeakMapMarkingEnabled)
+#ifdef DEBUG
+ ,
+ markLaterArenas(0),
+ checkAtomMarking(true),
+ strictCompartmentChecking(false),
+ markQueue(rt),
+ queuePos(0)
+#endif
+{
+ setMarkColorUnchecked(MarkColor::Black);
+}
+
+bool GCMarker::init() {
+ bool incrementalGCEnabled = runtime()->gc.isIncrementalGCEnabled();
+ return stack.init(gc::MarkStack::MainStack, incrementalGCEnabled) &&
+ auxStack.init(gc::MarkStack::AuxiliaryStack, incrementalGCEnabled);
+}
+
+void GCMarker::start() {
+ MOZ_ASSERT(state == MarkingState::NotActive);
+ state = MarkingState::RegularMarking;
+ color = MarkColor::Black;
+
+#ifdef DEBUG
+ queuePos = 0;
+ queueMarkColor.reset();
+#endif
+
+ MOZ_ASSERT(!delayedMarkingList);
+ MOZ_ASSERT(markLaterArenas == 0);
+}
+
+void GCMarker::stop() {
+ MOZ_ASSERT(isDrained());
+ MOZ_ASSERT(!delayedMarkingList);
+ MOZ_ASSERT(markLaterArenas == 0);
+
+ if (state == MarkingState::NotActive) {
+ return;
+ }
+ state = MarkingState::NotActive;
+
+ stack.clear();
+ auxStack.clear();
+ setMainStackColor(MarkColor::Black);
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ for (GCZonesIter zone(runtime()); !zone.done(); zone.next()) {
+ if (!zone->gcWeakKeys().clear()) {
+ oomUnsafe.crash("clearing weak keys in GCMarker::stop()");
+ }
+ if (!zone->gcNurseryWeakKeys().clear()) {
+ oomUnsafe.crash("clearing (nursery) weak keys in GCMarker::stop()");
+ }
+ }
+}
+
+template <typename F>
+inline void GCMarker::forEachDelayedMarkingArena(F&& f) {
+ Arena* arena = delayedMarkingList;
+ Arena* next;
+ while (arena) {
+ next = arena->getNextDelayedMarking();
+ f(arena);
+ arena = next;
+ }
+}
+
+void GCMarker::reset() {
+ color = MarkColor::Black;
+
+ stack.clear();
+ auxStack.clear();
+ setMainStackColor(MarkColor::Black);
+ MOZ_ASSERT(isMarkStackEmpty());
+
+ forEachDelayedMarkingArena([&](Arena* arena) {
+ MOZ_ASSERT(arena->onDelayedMarkingList());
+ arena->clearDelayedMarkingState();
+#ifdef DEBUG
+ MOZ_ASSERT(markLaterArenas);
+ markLaterArenas--;
+#endif
+ });
+ delayedMarkingList = nullptr;
+
+ MOZ_ASSERT(isDrained());
+ MOZ_ASSERT(!markLaterArenas);
+}
+
+void GCMarker::setMarkColor(gc::MarkColor newColor) {
+ if (color != newColor) {
+ MOZ_ASSERT(runtime()->gc.state() == State::Sweep);
+ setMarkColorUnchecked(newColor);
+ }
+}
+
+void GCMarker::setMarkColorUnchecked(gc::MarkColor newColor) {
+ color = newColor;
+ currentStackPtr = &getStack(color);
+}
+
+void GCMarker::setMainStackColor(gc::MarkColor newColor) {
+ if (newColor != mainStackColor) {
+ MOZ_ASSERT(isMarkStackEmpty());
+ mainStackColor = newColor;
+ setMarkColorUnchecked(color);
+ }
+}
+
+template <typename T>
+void GCMarker::pushTaggedPtr(T* ptr) {
+ checkZone(ptr);
+ if (!currentStack().push(ptr)) {
+ delayMarkingChildren(ptr);
+ }
+}
+
+void GCMarker::pushValueRange(JSObject* obj, SlotsOrElementsKind kind,
+ size_t start, size_t end) {
+ checkZone(obj);
+ MOZ_ASSERT(obj->is<NativeObject>());
+ MOZ_ASSERT(start <= end);
+
+ if (start == end) {
+ return;
+ }
+
+ if (!currentStack().push(obj, kind, start)) {
+ delayMarkingChildren(obj);
+ }
+}
+
+void GCMarker::repush(JSObject* obj) {
+ MOZ_ASSERT(obj->asTenured().isMarkedAtLeast(markColor()));
+ pushTaggedPtr(obj);
+}
+
+bool GCMarker::enterWeakMarkingMode() {
+ MOZ_ASSERT(weakMapAction() == JS::WeakMapTraceAction::Expand);
+ MOZ_ASSERT(state != MarkingState::WeakMarking);
+ if (state == MarkingState::IterativeMarking) {
+ return false;
+ }
+
+ // During weak marking mode, we maintain a table mapping weak keys to
+ // entries in known-live weakmaps. Initialize it with the keys of marked
+ // weakmaps -- or more precisely, the keys of marked weakmaps that are
+ // mapped to not yet live values. (Once bug 1167452 implements incremental
+ // weakmap marking, this initialization step will become unnecessary, as
+ // the table will already hold all such keys.)
+
+ // Set state before doing anything else, so any new key that is marked
+ // during the following gcWeakKeys scan will itself be looked up in
+ // gcWeakKeys and marked according to ephemeron rules.
+ state = MarkingState::WeakMarking;
+
+ // If there was an 'enter-weak-marking-mode' token in the queue, then it
+ // and everything after it will still be in the queue so we can process
+ // them now.
+ while (processMarkQueue() == QueueYielded) {
+ };
+
+ return true;
+}
+
+IncrementalProgress JS::Zone::enterWeakMarkingMode(GCMarker* marker,
+ SliceBudget& budget) {
+ MOZ_ASSERT(marker->isWeakMarking());
+
+ if (!marker->incrementalWeakMapMarkingEnabled) {
+ for (WeakMapBase* m : gcWeakMapList()) {
+ if (m->mapColor) {
+ mozilla::Unused << m->markEntries(marker);
+ }
+ }
+ return IncrementalProgress::Finished;
+ }
+
+ // gcWeakKeys contains the keys from all weakmaps marked so far, or at least
+ // the keys that might still need to be marked through. Scan through
+ // gcWeakKeys and mark all values whose keys are marked. This marking may
+ // recursively mark through other weakmap entries (immediately since we are
+ // now in WeakMarking mode). The end result is a consistent state where all
+ // values are marked if both their map and key are marked -- though note that
+ // we may later leave weak marking mode, do some more marking, and then enter
+ // back in.
+ if (!isGCMarking()) {
+ return IncrementalProgress::Finished;
+ }
+
+ MOZ_ASSERT(gcNurseryWeakKeys().count() == 0);
+
+ // An OrderedHashMap::Range stays valid even when the underlying table
+ // (zone->gcWeakKeys) is mutated, which is useful here since we may add
+ // additional entries while iterating over the Range.
+ gc::WeakKeyTable::Range r = gcWeakKeys().all();
+ while (!r.empty()) {
+ gc::Cell* key = r.front().key;
+ gc::CellColor keyColor =
+ gc::detail::GetEffectiveColor(marker->runtime(), key);
+ if (keyColor) {
+ MOZ_ASSERT(key == r.front().key);
+ auto& markables = r.front().value;
+ r.popFront(); // Pop before any mutations happen.
+ size_t end = markables.length();
+ for (size_t i = 0; i < end; i++) {
+ WeakMarkable& v = markables[i];
+ // Note: if the key is marked gray but not black, then the markables
+ // vector may be appended to within this loop body. So iterate just
+ // over the ones from before weak marking mode was switched on.
+ v.weakmap->markKey(marker, key, v.key);
+ budget.step();
+ if (budget.isOverBudget()) {
+ return NotFinished;
+ }
+ }
+
+ if (keyColor == gc::CellColor::Black) {
+ // We can't mark the key any more than already is, so it no longer
+ // needs to be in the weak keys table.
+ if (end == markables.length()) {
+ bool found;
+ gcWeakKeys().remove(key, &found);
+ } else {
+ markables.erase(markables.begin(), &markables[end]);
+ }
+ }
+ } else {
+ r.popFront();
+ }
+ }
+
+ return IncrementalProgress::Finished;
+}
+
+#ifdef DEBUG
+void JS::Zone::checkWeakMarkingMode() {
+ for (auto r = gcWeakKeys().all(); !r.empty(); r.popFront()) {
+ for (auto markable : r.front().value) {
+ MOZ_ASSERT(markable.weakmap->mapColor,
+ "unmarked weakmaps in weak keys table");
+ }
+ }
+}
+#endif
+
+void GCMarker::leaveWeakMarkingMode() {
+ MOZ_ASSERT(state == MarkingState::WeakMarking ||
+ state == MarkingState::IterativeMarking);
+
+ if (state != MarkingState::IterativeMarking) {
+ state = MarkingState::RegularMarking;
+ }
+
+ // The gcWeakKeys table is still populated and may be used during a future
+ // weak marking mode within this GC.
+}
+
+void GCMarker::delayMarkingChildren(Cell* cell) {
+ Arena* arena = cell->asTenured().arena();
+ if (!arena->onDelayedMarkingList()) {
+ arena->setNextDelayedMarkingArena(delayedMarkingList);
+ delayedMarkingList = arena;
+#ifdef DEBUG
+ markLaterArenas++;
+#endif
+ }
+ JS::TraceKind kind = MapAllocToTraceKind(arena->getAllocKind());
+ MarkColor colorToMark =
+ TraceKindCanBeMarkedGray(kind) ? color : MarkColor::Black;
+ if (!arena->hasDelayedMarking(colorToMark)) {
+ arena->setHasDelayedMarking(colorToMark, true);
+ delayedMarkingWorkAdded = true;
+ }
+}
+
+void GCMarker::markDelayedChildren(Arena* arena, MarkColor color) {
+ JS::TraceKind kind = MapAllocToTraceKind(arena->getAllocKind());
+ MOZ_ASSERT_IF(color == MarkColor::Gray, TraceKindCanBeMarkedGray(kind));
+
+ AutoSetMarkColor setColor(*this, color);
+ for (ArenaCellIterUnderGC cell(arena); !cell.done(); cell.next()) {
+ if (cell->isMarked(color)) {
+ JS::TraceChildren(this, JS::GCCellPtr(cell, kind));
+ }
+ }
+}
+
+/*
+ * Process arenas from |delayedMarkingList| by marking the unmarked children of
+ * marked cells of color |color|. Return early if the |budget| is exceeded.
+ *
+ * This is called twice, first to mark gray children and then to mark black
+ * children.
+ */
+bool GCMarker::processDelayedMarkingList(MarkColor color, SliceBudget& budget) {
+ // Marking delayed children may add more arenas to the list, including arenas
+ // we are currently processing or have previously processed. Handle this by
+ // clearing a flag on each arena before marking its children. This flag will
+ // be set again if the arena is re-added. Iterate the list until no new arenas
+ // were added.
+
+ do {
+ delayedMarkingWorkAdded = false;
+ for (Arena* arena = delayedMarkingList; arena;
+ arena = arena->getNextDelayedMarking()) {
+ if (!arena->hasDelayedMarking(color)) {
+ continue;
+ }
+ arena->setHasDelayedMarking(color, false);
+ markDelayedChildren(arena, color);
+ budget.step(150);
+ if (budget.isOverBudget()) {
+ return false;
+ }
+ }
+ } while (delayedMarkingWorkAdded);
+
+ return true;
+}
+
+bool GCMarker::markAllDelayedChildren(SliceBudget& budget) {
+ MOZ_ASSERT(!hasBlackEntries());
+ MOZ_ASSERT(markColor() == MarkColor::Black);
+
+ GCRuntime& gc = runtime()->gc;
+ mozilla::Maybe<gcstats::AutoPhase> ap;
+ if (gc.state() == State::Mark) {
+ ap.emplace(gc.stats(), gcstats::PhaseKind::MARK_DELAYED);
+ }
+
+ // We have a list of arenas containing marked cells with unmarked children
+ // where we ran out of stack space during marking.
+ //
+ // Both black and gray cells in these arenas may have unmarked children, and
+ // we must mark gray children first as gray entries always sit before black
+ // entries on the mark stack. Therefore the list is processed in two stages.
+
+ MOZ_ASSERT(delayedMarkingList);
+
+ bool finished;
+ finished = processDelayedMarkingList(MarkColor::Gray, budget);
+ rebuildDelayedMarkingList();
+ if (!finished) {
+ return false;
+ }
+
+ finished = processDelayedMarkingList(MarkColor::Black, budget);
+ rebuildDelayedMarkingList();
+
+ MOZ_ASSERT_IF(finished, !delayedMarkingList);
+ MOZ_ASSERT_IF(finished, !markLaterArenas);
+
+ return finished;
+}
+
+void GCMarker::rebuildDelayedMarkingList() {
+ // Rebuild the delayed marking list, removing arenas which do not need further
+ // marking.
+
+ Arena* listTail = nullptr;
+ forEachDelayedMarkingArena([&](Arena* arena) {
+ if (!arena->hasAnyDelayedMarking()) {
+ arena->clearDelayedMarkingState();
+#ifdef DEBUG
+ MOZ_ASSERT(markLaterArenas);
+ markLaterArenas--;
+#endif
+ return;
+ }
+
+ appendToDelayedMarkingList(&listTail, arena);
+ });
+ appendToDelayedMarkingList(&listTail, nullptr);
+}
+
+inline void GCMarker::appendToDelayedMarkingList(Arena** listTail,
+ Arena* arena) {
+ if (*listTail) {
+ (*listTail)->updateNextDelayedMarkingArena(arena);
+ } else {
+ delayedMarkingList = arena;
+ }
+ *listTail = arena;
+}
+
+#ifdef DEBUG
+void GCMarker::checkZone(void* p) {
+ MOZ_ASSERT(state != MarkingState::NotActive);
+ DebugOnly<Cell*> cell = static_cast<Cell*>(p);
+ MOZ_ASSERT_IF(cell->isTenured(),
+ cell->asTenured().zone()->isCollectingFromAnyThread());
+}
+#endif
+
+size_t GCMarker::sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
+ size_t size = stack.sizeOfExcludingThis(mallocSizeOf);
+ size += auxStack.sizeOfExcludingThis(mallocSizeOf);
+ for (ZonesIter zone(runtime(), WithAtoms); !zone.done(); zone.next()) {
+ size += zone->gcGrayRoots().SizeOfExcludingThis(mallocSizeOf);
+ }
+ return size;
+}
+
+/*** Tenuring Tracer ********************************************************/
+
+JSObject* TenuringTracer::onObjectEdge(JSObject* obj) {
+ if (!IsInsideNursery(obj)) {
+ return obj;
+ }
+
+ if (obj->isForwarded()) {
+ const gc::RelocationOverlay* overlay = gc::RelocationOverlay::fromCell(obj);
+ return static_cast<JSObject*>(overlay->forwardingAddress());
+ }
+
+ // Take a fast path for tenuring a plain object which is by far the most
+ // common case.
+ if (obj->is<PlainObject>()) {
+ return movePlainObjectToTenured(&obj->as<PlainObject>());
+ }
+
+ return moveToTenuredSlow(obj);
+}
+
+JSString* TenuringTracer::onStringEdge(JSString* str) {
+ if (!IsInsideNursery(str)) {
+ return str;
+ }
+
+ if (str->isForwarded()) {
+ const gc::RelocationOverlay* overlay = gc::RelocationOverlay::fromCell(str);
+ return static_cast<JSString*>(overlay->forwardingAddress());
+ }
+
+ return moveToTenured(str);
+}
+
+JS::BigInt* TenuringTracer::onBigIntEdge(JS::BigInt* bi) {
+ if (!IsInsideNursery(bi)) {
+ return bi;
+ }
+
+ if (bi->isForwarded()) {
+ const gc::RelocationOverlay* overlay = gc::RelocationOverlay::fromCell(bi);
+ return static_cast<JS::BigInt*>(overlay->forwardingAddress());
+ }
+
+ return moveToTenured(bi);
+}
+
+JS::Symbol* TenuringTracer::onSymbolEdge(JS::Symbol* sym) { return sym; }
+js::BaseScript* TenuringTracer::onScriptEdge(BaseScript* script) {
+ return script;
+}
+js::Shape* TenuringTracer::onShapeEdge(Shape* shape) { return shape; }
+js::RegExpShared* TenuringTracer::onRegExpSharedEdge(RegExpShared* shared) {
+ return shared;
+}
+js::ObjectGroup* TenuringTracer::onObjectGroupEdge(ObjectGroup* group) {
+ return group;
+}
+js::BaseShape* TenuringTracer::onBaseShapeEdge(BaseShape* base) { return base; }
+js::jit::JitCode* TenuringTracer::onJitCodeEdge(jit::JitCode* code) {
+ return code;
+}
+js::Scope* TenuringTracer::onScopeEdge(Scope* scope) { return scope; }
+
+template <typename T>
+inline void TenuringTracer::traverse(T** thingp) {
+ // This is only used by VisitTraceList.
+ MOZ_ASSERT(!nursery().isInside(thingp));
+ CheckTracedThing(this, *thingp);
+ T* thing = *thingp;
+ T* post = DispatchToOnEdge(this, thing);
+ if (post != thing) {
+ *thingp = post;
+ }
+}
+
+void TenuringTracer::traverse(JS::Value* thingp) {
+ MOZ_ASSERT(!nursery().isInside(thingp));
+
+ Value value = *thingp;
+ CheckTracedThing(this, value);
+
+ // We only care about a few kinds of GC thing here and this generates much
+ // tighter code than using MapGCThingTyped.
+ Value post;
+ if (value.isObject()) {
+ post = JS::ObjectValue(*onObjectEdge(&value.toObject()));
+ } else if (value.isString()) {
+ post = JS::StringValue(onStringEdge(value.toString()));
+ } else if (value.isBigInt()) {
+ post = JS::BigIntValue(onBigIntEdge(value.toBigInt()));
+ } else {
+ return;
+ }
+
+ if (post != value) {
+ *thingp = post;
+ }
+}
+
+template <typename T>
+void js::gc::StoreBuffer::MonoTypeBuffer<T>::trace(TenuringTracer& mover) {
+ mozilla::ReentrancyGuard g(*owner_);
+ MOZ_ASSERT(owner_->isEnabled());
+ if (last_) {
+ last_.trace(mover);
+ }
+ for (typename StoreSet::Range r = stores_.all(); !r.empty(); r.popFront()) {
+ r.front().trace(mover);
+ }
+}
+
+namespace js {
+namespace gc {
+template void StoreBuffer::MonoTypeBuffer<StoreBuffer::ValueEdge>::trace(
+ TenuringTracer&);
+template void StoreBuffer::MonoTypeBuffer<StoreBuffer::SlotsEdge>::trace(
+ TenuringTracer&);
+template struct StoreBuffer::MonoTypeBuffer<StoreBuffer::StringPtrEdge>;
+template struct StoreBuffer::MonoTypeBuffer<StoreBuffer::BigIntPtrEdge>;
+template struct StoreBuffer::MonoTypeBuffer<StoreBuffer::ObjectPtrEdge>;
+} // namespace gc
+} // namespace js
+
+void js::gc::StoreBuffer::SlotsEdge::trace(TenuringTracer& mover) const {
+ NativeObject* obj = object();
+ MOZ_ASSERT(IsCellPointerValid(obj));
+
+ // Beware JSObject::swap exchanging a native object for a non-native one.
+ if (!obj->isNative()) {
+ return;
+ }
+
+ MOZ_ASSERT(!IsInsideNursery(obj), "obj shouldn't live in nursery.");
+
+ if (kind() == ElementKind) {
+ uint32_t initLen = obj->getDenseInitializedLength();
+ uint32_t numShifted = obj->getElementsHeader()->numShiftedElements();
+ uint32_t clampedStart = start_;
+ clampedStart = numShifted < clampedStart ? clampedStart - numShifted : 0;
+ clampedStart = std::min(clampedStart, initLen);
+ uint32_t clampedEnd = start_ + count_;
+ clampedEnd = numShifted < clampedEnd ? clampedEnd - numShifted : 0;
+ clampedEnd = std::min(clampedEnd, initLen);
+ MOZ_ASSERT(clampedStart <= clampedEnd);
+ mover.traceSlots(
+ static_cast<HeapSlot*>(obj->getDenseElements() + clampedStart)
+ ->unbarrieredAddress(),
+ clampedEnd - clampedStart);
+ } else {
+ uint32_t start = std::min(start_, obj->slotSpan());
+ uint32_t end = std::min(start_ + count_, obj->slotSpan());
+ MOZ_ASSERT(start <= end);
+ mover.traceObjectSlots(obj, start, end);
+ }
+}
+
+static inline void TraceWholeCell(TenuringTracer& mover, JSObject* object) {
+ MOZ_ASSERT_IF(object->storeBuffer(),
+ !object->storeBuffer()->markingNondeduplicatable);
+ mover.traceObject(object);
+}
+
+// Non-deduplicatable marking is necessary because of the following 2 reasons:
+//
+// 1. Tenured string chars cannot be updated:
+//
+// If any of the tenured string's bases were deduplicated during tenuring,
+// the tenured string's chars pointer would need to be adjusted. This would
+// then require updating any other tenured strings that are dependent on the
+// first tenured string, and we have no way to find them without scanning
+// the entire tenured heap.
+//
+// 2. Tenured string cannot store its nursery base or base's chars:
+//
+// Tenured strings have no place to stash a pointer to their nursery base or
+// its chars. You need to be able to traverse any dependent string's chain
+// of bases up to a nursery "root base" that points to the malloced chars
+// that the dependent strings started out pointing to, so that you can
+// calculate the offset of any dependent string and update the ptr+offset if
+// the root base gets deduplicated to a different allocation. Tenured
+// strings in this base chain will stop you from reaching the nursery
+// version of the root base; you can only get to the tenured version, and it
+// has no place to store the original chars pointer.
+static inline void PreventDeduplicationOfReachableStrings(JSString* str) {
+ MOZ_ASSERT(str->isTenured());
+ MOZ_ASSERT(!str->isForwarded());
+
+ JSLinearString* baseOrRelocOverlay = str->nurseryBaseOrRelocOverlay();
+
+ // Walk along the chain of dependent strings' base string pointers
+ // to mark them all non-deduplicatable.
+ while (true) {
+ // baseOrRelocOverlay can be one of the three cases:
+ // 1. forwarded nursery string:
+ // The forwarded string still retains the flag that can tell whether
+ // this string is a dependent string with a base. Its
+ // StringRelocationOverlay holds a saved pointer to its base in the
+ // nursery.
+ // 2. not yet forwarded nursery string:
+ // Retrieve the base field directly from the string.
+ // 3. tenured string:
+ // The nursery base chain ends here, so stop traversing.
+ if (baseOrRelocOverlay->isForwarded()) {
+ JSLinearString* tenuredBase = Forwarded(baseOrRelocOverlay);
+ if (!tenuredBase->hasBase()) {
+ break;
+ }
+ baseOrRelocOverlay = StringRelocationOverlay::fromCell(baseOrRelocOverlay)
+ ->savedNurseryBaseOrRelocOverlay();
+ } else {
+ JSLinearString* base = baseOrRelocOverlay;
+ if (base->isTenured()) {
+ break;
+ }
+ if (base->isDeduplicatable()) {
+ base->setNonDeduplicatable();
+ }
+ if (!base->hasBase()) {
+ break;
+ }
+ baseOrRelocOverlay = base->nurseryBaseOrRelocOverlay();
+ }
+ }
+}
+
+static inline void TraceWholeCell(TenuringTracer& mover, JSString* str) {
+ MOZ_ASSERT_IF(str->storeBuffer(),
+ str->storeBuffer()->markingNondeduplicatable);
+
+ // Mark all strings reachable from the tenured string `str` as
+ // non-deduplicatable. These strings are the bases of the tenured dependent
+ // string.
+ if (str->hasBase()) {
+ PreventDeduplicationOfReachableStrings(str);
+ }
+
+ str->traceChildren(&mover);
+}
+
+static inline void TraceWholeCell(TenuringTracer& mover, BaseScript* script) {
+ script->traceChildren(&mover);
+}
+
+static inline void TraceWholeCell(TenuringTracer& mover,
+ jit::JitCode* jitcode) {
+ jitcode->traceChildren(&mover);
+}
+
+template <typename T>
+static void TraceBufferedCells(TenuringTracer& mover, Arena* arena,
+ ArenaCellSet* cells) {
+ for (size_t i = 0; i < MaxArenaCellIndex; i += cells->BitsPerWord) {
+ ArenaCellSet::WordT bitset = cells->getWord(i / cells->BitsPerWord);
+ while (bitset) {
+ size_t bit = i + js::detail::CountTrailingZeroes(bitset);
+ auto cell =
+ reinterpret_cast<T*>(uintptr_t(arena) + ArenaCellIndexBytes * bit);
+ TraceWholeCell(mover, cell);
+ bitset &= bitset - 1; // Clear the low bit.
+ }
+ }
+}
+
+void ArenaCellSet::trace(TenuringTracer& mover) {
+ for (ArenaCellSet* cells = this; cells; cells = cells->next) {
+ cells->check();
+
+ Arena* arena = cells->arena;
+ arena->bufferedCells() = &ArenaCellSet::Empty;
+
+ JS::TraceKind kind = MapAllocToTraceKind(arena->getAllocKind());
+ switch (kind) {
+ case JS::TraceKind::Object:
+ TraceBufferedCells<JSObject>(mover, arena, cells);
+ break;
+ case JS::TraceKind::String:
+ TraceBufferedCells<JSString>(mover, arena, cells);
+ break;
+ case JS::TraceKind::Script:
+ TraceBufferedCells<BaseScript>(mover, arena, cells);
+ break;
+ case JS::TraceKind::JitCode:
+ TraceBufferedCells<jit::JitCode>(mover, arena, cells);
+ break;
+ default:
+ MOZ_CRASH("Unexpected trace kind");
+ }
+ }
+}
+
+void js::gc::StoreBuffer::WholeCellBuffer::trace(TenuringTracer& mover) {
+ MOZ_ASSERT(owner_->isEnabled());
+
+#ifdef DEBUG
+ // Verify that all string whole cells are traced first before any other
+ // strings are visited for any reason.
+ MOZ_ASSERT(!owner_->markingNondeduplicatable);
+ owner_->markingNondeduplicatable = true;
+#endif
+ // Trace all of the strings to mark the non-deduplicatable bits, then trace
+ // all other whole cells.
+ if (stringHead_) {
+ stringHead_->trace(mover);
+ }
+#ifdef DEBUG
+ owner_->markingNondeduplicatable = false;
+#endif
+ if (nonStringHead_) {
+ nonStringHead_->trace(mover);
+ }
+
+ stringHead_ = nonStringHead_ = nullptr;
+}
+
+template <typename T>
+void js::gc::StoreBuffer::CellPtrEdge<T>::trace(TenuringTracer& mover) const {
+ static_assert(std::is_base_of_v<Cell, T>, "T must be a Cell type");
+ static_assert(!std::is_base_of_v<TenuredCell, T>,
+ "T must not be a tenured Cell type");
+
+ T* thing = *edge;
+ if (!thing) {
+ return;
+ }
+
+ MOZ_ASSERT(IsCellPointerValid(thing));
+ MOZ_ASSERT(thing->getTraceKind() == JS::MapTypeToTraceKind<T>::kind);
+
+ if (std::is_same_v<JSString, T>) {
+ // Nursery string deduplication requires all tenured string -> nursery
+ // string edges to be registered with the whole cell buffer in order to
+ // correctly set the non-deduplicatable bit.
+ MOZ_ASSERT(!mover.runtime()->gc.isPointerWithinTenuredCell(
+ edge, JS::TraceKind::String));
+ }
+
+ *edge = DispatchToOnEdge(&mover, thing);
+}
+
+void js::gc::StoreBuffer::ValueEdge::trace(TenuringTracer& mover) const {
+ if (deref()) {
+ mover.traverse(edge);
+ }
+}
+
+// Visit all object children of the object and trace them.
+void js::TenuringTracer::traceObject(JSObject* obj) {
+ CallTraceHook(this, obj);
+
+ if (!obj->isNative()) {
+ return;
+ }
+
+ NativeObject* nobj = &obj->as<NativeObject>();
+ if (!nobj->hasEmptyElements()) {
+ HeapSlotArray elements = nobj->getDenseElements();
+ Value* elems = elements.begin()->unbarrieredAddress();
+ traceSlots(elems, elems + nobj->getDenseInitializedLength());
+ }
+
+ traceObjectSlots(nobj, 0, nobj->slotSpan());
+}
+
+void js::TenuringTracer::traceObjectSlots(NativeObject* nobj, uint32_t start,
+ uint32_t end) {
+ HeapSlot* fixedStart;
+ HeapSlot* fixedEnd;
+ HeapSlot* dynStart;
+ HeapSlot* dynEnd;
+ nobj->getSlotRange(start, end, &fixedStart, &fixedEnd, &dynStart, &dynEnd);
+ if (fixedStart) {
+ traceSlots(fixedStart->unbarrieredAddress(),
+ fixedEnd->unbarrieredAddress());
+ }
+ if (dynStart) {
+ traceSlots(dynStart->unbarrieredAddress(), dynEnd->unbarrieredAddress());
+ }
+}
+
+void js::TenuringTracer::traceSlots(Value* vp, Value* end) {
+ for (; vp != end; ++vp) {
+ traverse(vp);
+ }
+}
+
+inline void js::TenuringTracer::traceSlots(JS::Value* vp, uint32_t nslots) {
+ traceSlots(vp, vp + nslots);
+}
+
+void js::TenuringTracer::traceString(JSString* str) {
+ str->traceChildren(this);
+}
+
+void js::TenuringTracer::traceBigInt(JS::BigInt* bi) {
+ bi->traceChildren(this);
+}
+
+#ifdef DEBUG
+static inline uintptr_t OffsetFromChunkStart(void* p) {
+ return uintptr_t(p) & gc::ChunkMask;
+}
+static inline ptrdiff_t OffsetToChunkEnd(void* p) {
+ return ChunkSize - (uintptr_t(p) & gc::ChunkMask);
+}
+#endif
+
+/* Insert the given relocation entry into the list of things to visit. */
+inline void js::TenuringTracer::insertIntoObjectFixupList(
+ RelocationOverlay* entry) {
+ *objTail = entry;
+ objTail = &entry->nextRef();
+ *objTail = nullptr;
+}
+
+template <typename T>
+inline T* js::TenuringTracer::allocTenured(Zone* zone, AllocKind kind) {
+ return static_cast<T*>(static_cast<Cell*>(AllocateCellInGC(zone, kind)));
+}
+
+JSString* js::TenuringTracer::allocTenuredString(JSString* src, Zone* zone,
+ AllocKind dstKind) {
+ JSString* dst = allocTenured<JSString>(zone, dstKind);
+ tenuredSize += moveStringToTenured(dst, src, dstKind);
+ tenuredCells++;
+
+ return dst;
+}
+
+JSObject* js::TenuringTracer::moveToTenuredSlow(JSObject* src) {
+ MOZ_ASSERT(IsInsideNursery(src));
+ MOZ_ASSERT(!src->nurseryZone()->usedByHelperThread());
+ MOZ_ASSERT(!src->is<PlainObject>());
+
+ AllocKind dstKind = src->allocKindForTenure(nursery());
+ auto dst = allocTenured<JSObject>(src->nurseryZone(), dstKind);
+
+ size_t srcSize = Arena::thingSize(dstKind);
+ size_t dstSize = srcSize;
+
+ /*
+ * Arrays do not necessarily have the same AllocKind between src and dst.
+ * We deal with this by copying elements manually, possibly re-inlining
+ * them if there is adequate room inline in dst.
+ *
+ * For Arrays we're reducing tenuredSize to the smaller srcSize
+ * because moveElementsToTenured() accounts for all Array elements,
+ * even if they are inlined.
+ */
+ if (src->is<ArrayObject>()) {
+ dstSize = srcSize = sizeof(NativeObject);
+ } else if (src->is<TypedArrayObject>()) {
+ TypedArrayObject* tarray = &src->as<TypedArrayObject>();
+ // Typed arrays with inline data do not necessarily have the same
+ // AllocKind between src and dst. The nursery does not allocate an
+ // inline data buffer that has the same size as the slow path will do.
+ // In the slow path, the Typed Array Object stores the inline data
+ // in the allocated space that fits the AllocKind. In the fast path,
+ // the nursery will allocate another buffer that is directly behind the
+ // minimal JSObject. That buffer size plus the JSObject size is not
+ // necessarily as large as the slow path's AllocKind size.
+ if (tarray->hasInlineElements()) {
+ AllocKind srcKind = GetGCObjectKind(TypedArrayObject::FIXED_DATA_START);
+ size_t headerSize = Arena::thingSize(srcKind);
+ srcSize = headerSize + tarray->byteLength().get();
+ }
+ }
+
+ tenuredSize += dstSize;
+ tenuredCells++;
+
+ // Copy the Cell contents.
+ MOZ_ASSERT(OffsetFromChunkStart(src) >= sizeof(ChunkBase));
+ MOZ_ASSERT(OffsetToChunkEnd(src) >= ptrdiff_t(srcSize));
+ js_memcpy(dst, src, srcSize);
+
+ // Move the slots and elements, if we need to.
+ if (src->isNative()) {
+ NativeObject* ndst = &dst->as<NativeObject>();
+ NativeObject* nsrc = &src->as<NativeObject>();
+ tenuredSize += moveSlotsToTenured(ndst, nsrc);
+ tenuredSize += moveElementsToTenured(ndst, nsrc, dstKind);
+
+ // There is a pointer into a dictionary mode object from the head of its
+ // shape list. This is updated in Nursery::sweepDictionaryModeObjects().
+ }
+
+ JSObjectMovedOp op = dst->getClass()->extObjectMovedOp();
+ MOZ_ASSERT_IF(src->is<ProxyObject>(), op == proxy_ObjectMoved);
+ if (op) {
+ // Tell the hazard analysis that the object moved hook can't GC.
+ JS::AutoSuppressGCAnalysis nogc;
+ tenuredSize += op(dst, src);
+ } else {
+ MOZ_ASSERT_IF(src->getClass()->hasFinalize(),
+ CanNurseryAllocateFinalizedClass(src->getClass()));
+ }
+
+ RelocationOverlay* overlay = RelocationOverlay::forwardCell(src, dst);
+ insertIntoObjectFixupList(overlay);
+
+ gcprobes::PromoteToTenured(src, dst);
+ return dst;
+}
+
+inline JSObject* js::TenuringTracer::movePlainObjectToTenured(
+ PlainObject* src) {
+ // Fast path version of moveToTenuredSlow() for specialized for PlainObject.
+
+ MOZ_ASSERT(IsInsideNursery(src));
+ MOZ_ASSERT(!src->nurseryZone()->usedByHelperThread());
+
+ AllocKind dstKind = src->allocKindForTenure();
+ auto dst = allocTenured<PlainObject>(src->nurseryZone(), dstKind);
+
+ size_t srcSize = Arena::thingSize(dstKind);
+ tenuredSize += srcSize;
+ tenuredCells++;
+
+ // Copy the Cell contents.
+ MOZ_ASSERT(OffsetFromChunkStart(src) >= sizeof(ChunkBase));
+ MOZ_ASSERT(OffsetToChunkEnd(src) >= ptrdiff_t(srcSize));
+ js_memcpy(dst, src, srcSize);
+
+ // Move the slots and elements.
+ tenuredSize += moveSlotsToTenured(dst, src);
+ tenuredSize += moveElementsToTenured(dst, src, dstKind);
+
+ MOZ_ASSERT(!dst->getClass()->extObjectMovedOp());
+
+ RelocationOverlay* overlay = RelocationOverlay::forwardCell(src, dst);
+ insertIntoObjectFixupList(overlay);
+
+ gcprobes::PromoteToTenured(src, dst);
+ return dst;
+}
+
+size_t js::TenuringTracer::moveSlotsToTenured(NativeObject* dst,
+ NativeObject* src) {
+ /* Fixed slots have already been copied over. */
+ if (!src->hasDynamicSlots()) {
+ return 0;
+ }
+
+ Zone* zone = src->nurseryZone();
+ size_t count = src->numDynamicSlots();
+
+ if (!nursery().isInside(src->slots_)) {
+ AddCellMemory(dst, ObjectSlots::allocSize(count), MemoryUse::ObjectSlots);
+ nursery().removeMallocedBufferDuringMinorGC(src->getSlotsHeader());
+ return 0;
+ }
+
+ {
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ HeapSlot* allocation =
+ zone->pod_malloc<HeapSlot>(ObjectSlots::allocCount(count));
+ if (!allocation) {
+ oomUnsafe.crash(ObjectSlots::allocSize(count),
+ "Failed to allocate slots while tenuring.");
+ }
+
+ ObjectSlots* slotsHeader = new (allocation)
+ ObjectSlots(count, src->getSlotsHeader()->dictionarySlotSpan());
+ dst->slots_ = slotsHeader->slots();
+ }
+
+ AddCellMemory(dst, ObjectSlots::allocSize(count), MemoryUse::ObjectSlots);
+
+ PodCopy(dst->slots_, src->slots_, count);
+ nursery().setSlotsForwardingPointer(src->slots_, dst->slots_, count);
+
+ return count * sizeof(HeapSlot);
+}
+
+size_t js::TenuringTracer::moveElementsToTenured(NativeObject* dst,
+ NativeObject* src,
+ AllocKind dstKind) {
+ if (src->hasEmptyElements()) {
+ return 0;
+ }
+
+ Zone* zone = src->nurseryZone();
+
+ ObjectElements* srcHeader = src->getElementsHeader();
+ size_t nslots = srcHeader->numAllocatedElements();
+
+ void* srcAllocatedHeader = src->getUnshiftedElementsHeader();
+
+ /* TODO Bug 874151: Prefer to put element data inline if we have space. */
+ if (!nursery().isInside(srcAllocatedHeader)) {
+ MOZ_ASSERT(src->elements_ == dst->elements_);
+ nursery().removeMallocedBufferDuringMinorGC(srcAllocatedHeader);
+
+ AddCellMemory(dst, nslots * sizeof(HeapSlot), MemoryUse::ObjectElements);
+
+ return 0;
+ }
+
+ // Shifted elements are copied too.
+ uint32_t numShifted = srcHeader->numShiftedElements();
+
+ /* Unlike other objects, Arrays can have fixed elements. */
+ if (src->is<ArrayObject>() && nslots <= GetGCKindSlots(dstKind)) {
+ dst->as<ArrayObject>().setFixedElements();
+ js_memcpy(dst->getElementsHeader(), srcAllocatedHeader,
+ nslots * sizeof(HeapSlot));
+ dst->elements_ += numShifted;
+ nursery().setElementsForwardingPointer(srcHeader, dst->getElementsHeader(),
+ srcHeader->capacity);
+ return nslots * sizeof(HeapSlot);
+ }
+
+ MOZ_ASSERT(nslots >= 2);
+
+ ObjectElements* dstHeader;
+ {
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ dstHeader =
+ reinterpret_cast<ObjectElements*>(zone->pod_malloc<HeapSlot>(nslots));
+ if (!dstHeader) {
+ oomUnsafe.crash(sizeof(HeapSlot) * nslots,
+ "Failed to allocate elements while tenuring.");
+ }
+ }
+
+ AddCellMemory(dst, nslots * sizeof(HeapSlot), MemoryUse::ObjectElements);
+
+ js_memcpy(dstHeader, srcAllocatedHeader, nslots * sizeof(HeapSlot));
+ dst->elements_ = dstHeader->elements() + numShifted;
+ nursery().setElementsForwardingPointer(srcHeader, dst->getElementsHeader(),
+ srcHeader->capacity);
+ return nslots * sizeof(HeapSlot);
+}
+
+inline void js::TenuringTracer::insertIntoStringFixupList(
+ StringRelocationOverlay* entry) {
+ *stringTail = entry;
+ stringTail = &entry->nextRef();
+ *stringTail = nullptr;
+}
+
+JSString* js::TenuringTracer::moveToTenured(JSString* src) {
+ MOZ_ASSERT(IsInsideNursery(src));
+ MOZ_ASSERT(!src->nurseryZone()->usedByHelperThread());
+ MOZ_ASSERT(!src->isExternal());
+
+ AllocKind dstKind = src->getAllocKind();
+ Zone* zone = src->nurseryZone();
+
+ // If this string is in the StringToAtomCache, try to deduplicate it by using
+ // the atom. Don't do this for dependent strings because they're more
+ // complicated. See StringRelocationOverlay and DeduplicationStringHasher
+ // comments.
+ if (src->inStringToAtomCache() && src->isDeduplicatable() &&
+ !src->hasBase()) {
+ JSLinearString* linear = &src->asLinear();
+ JSAtom* atom = runtime()->caches().stringToAtomCache.lookup(linear);
+ MOZ_ASSERT(atom, "Why was the cache purged before minor GC?");
+
+ // Only deduplicate if both strings have the same encoding, to not confuse
+ // dependent strings.
+ if (src->hasTwoByteChars() == atom->hasTwoByteChars()) {
+ // The StringToAtomCache isn't used for inline strings (due to the minimum
+ // length) so canOwnDependentChars must be true for both src and atom.
+ // This means if there are dependent strings floating around using str's
+ // chars, they will be able to use the chars from the atom.
+ static_assert(StringToAtomCache::MinStringLength >
+ JSFatInlineString::MAX_LENGTH_LATIN1);
+ static_assert(StringToAtomCache::MinStringLength >
+ JSFatInlineString::MAX_LENGTH_TWO_BYTE);
+ MOZ_ASSERT(src->canOwnDependentChars());
+ MOZ_ASSERT(atom->canOwnDependentChars());
+
+ StringRelocationOverlay::forwardCell(src, atom);
+ gcprobes::PromoteToTenured(src, atom);
+ return atom;
+ }
+ }
+
+ JSString* dst;
+
+ // A live nursery string can only get deduplicated when:
+ // 1. Its length is smaller than MAX_DEDUPLICATABLE_STRING_LENGTH:
+ // Hashing a long string can affect performance.
+ // 2. It is linear:
+ // Deduplicating every node in it would end up doing O(n^2) hashing work.
+ // 3. It is deduplicatable:
+ // The JSString NON_DEDUP_BIT flag is unset.
+ // 4. It matches an entry in stringDeDupSet.
+
+ if (src->length() < MAX_DEDUPLICATABLE_STRING_LENGTH && src->isLinear() &&
+ src->isDeduplicatable() && nursery().stringDeDupSet.isSome()) {
+ if (auto p = nursery().stringDeDupSet->lookup(src)) {
+ // Deduplicate to the looked-up string!
+ dst = *p;
+ zone->stringStats.ref().noteDeduplicated(src->length(), src->allocSize());
+ StringRelocationOverlay::forwardCell(src, dst);
+ gcprobes::PromoteToTenured(src, dst);
+ return dst;
+ }
+
+ dst = allocTenuredString(src, zone, dstKind);
+
+ if (!nursery().stringDeDupSet->putNew(dst)) {
+ // When there is oom caused by the stringDeDupSet, stop deduplicating
+ // strings.
+ nursery().stringDeDupSet.reset();
+ }
+ } else {
+ dst = allocTenuredString(src, zone, dstKind);
+ dst->clearNonDeduplicatable();
+ }
+
+ zone->stringStats.ref().noteTenured(src->allocSize());
+
+ auto* overlay = StringRelocationOverlay::forwardCell(src, dst);
+ MOZ_ASSERT(dst->isDeduplicatable());
+ // The base root might be deduplicated, so the non-inlined chars might no
+ // longer be valid. Insert the overlay into this list to relocate it later.
+ insertIntoStringFixupList(overlay);
+
+ gcprobes::PromoteToTenured(src, dst);
+ return dst;
+}
+
+template <typename CharT>
+void js::Nursery::relocateDependentStringChars(
+ JSDependentString* tenuredDependentStr, JSLinearString* baseOrRelocOverlay,
+ size_t* offset, bool* rootBaseNotYetForwarded, JSLinearString** rootBase) {
+ MOZ_ASSERT(*offset == 0);
+ MOZ_ASSERT(*rootBaseNotYetForwarded == false);
+ MOZ_ASSERT(*rootBase == nullptr);
+
+ JS::AutoCheckCannotGC nogc;
+
+ const CharT* dependentStrChars =
+ tenuredDependentStr->nonInlineChars<CharT>(nogc);
+
+ // Traverse the dependent string nursery base chain to find the base that
+ // it's using chars from.
+ while (true) {
+ if (baseOrRelocOverlay->isForwarded()) {
+ JSLinearString* tenuredBase = Forwarded(baseOrRelocOverlay);
+ StringRelocationOverlay* relocOverlay =
+ StringRelocationOverlay::fromCell(baseOrRelocOverlay);
+
+ if (!tenuredBase->hasBase()) {
+ // The nursery root base is relocOverlay, it is tenured to tenuredBase.
+ // Relocate tenuredDependentStr chars and reassign the tenured root base
+ // as its base.
+ JSLinearString* tenuredRootBase = tenuredBase;
+ const CharT* rootBaseChars = relocOverlay->savedNurseryChars<CharT>();
+ *offset = dependentStrChars - rootBaseChars;
+ MOZ_ASSERT(*offset < tenuredRootBase->length());
+ tenuredDependentStr->relocateNonInlineChars<const CharT*>(
+ tenuredRootBase->nonInlineChars<CharT>(nogc), *offset);
+ tenuredDependentStr->setBase(tenuredRootBase);
+ return;
+ }
+
+ baseOrRelocOverlay = relocOverlay->savedNurseryBaseOrRelocOverlay();
+
+ } else {
+ JSLinearString* base = baseOrRelocOverlay;
+
+ if (!base->hasBase()) {
+ // The root base is not forwarded yet, it is simply base.
+ *rootBase = base;
+
+ // The root base can be in either the nursery or the tenured heap.
+ // dependentStr chars needs to be relocated after traceString if the
+ // root base is in the nursery.
+ if (!(*rootBase)->isTenured()) {
+ *rootBaseNotYetForwarded = true;
+ const CharT* rootBaseChars = (*rootBase)->nonInlineChars<CharT>(nogc);
+ *offset = dependentStrChars - rootBaseChars;
+ MOZ_ASSERT(*offset < base->length(), "Tenured root base");
+ }
+
+ tenuredDependentStr->setBase(*rootBase);
+
+ return;
+ }
+
+ baseOrRelocOverlay = base->nurseryBaseOrRelocOverlay();
+ }
+ }
+}
+
+inline void js::TenuringTracer::insertIntoBigIntFixupList(
+ RelocationOverlay* entry) {
+ *bigIntTail = entry;
+ bigIntTail = &entry->nextRef();
+ *bigIntTail = nullptr;
+}
+
+JS::BigInt* js::TenuringTracer::moveToTenured(JS::BigInt* src) {
+ MOZ_ASSERT(IsInsideNursery(src));
+ MOZ_ASSERT(!src->nurseryZone()->usedByHelperThread());
+
+ AllocKind dstKind = src->getAllocKind();
+ Zone* zone = src->nurseryZone();
+ zone->tenuredBigInts++;
+
+ JS::BigInt* dst = allocTenured<JS::BigInt>(zone, dstKind);
+ tenuredSize += moveBigIntToTenured(dst, src, dstKind);
+ tenuredCells++;
+
+ RelocationOverlay* overlay = RelocationOverlay::forwardCell(src, dst);
+ insertIntoBigIntFixupList(overlay);
+
+ gcprobes::PromoteToTenured(src, dst);
+ return dst;
+}
+
+void js::Nursery::collectToFixedPoint(TenuringTracer& mover) {
+ for (RelocationOverlay* p = mover.objHead; p; p = p->next()) {
+ auto* obj = static_cast<JSObject*>(p->forwardingAddress());
+ mover.traceObject(obj);
+ }
+
+ for (StringRelocationOverlay* p = mover.stringHead; p; p = p->next()) {
+ auto* tenuredStr = static_cast<JSString*>(p->forwardingAddress());
+ // To ensure the NON_DEDUP_BIT was reset properly.
+ MOZ_ASSERT(tenuredStr->isDeduplicatable());
+
+ // The nursery root base might not be forwarded before
+ // traceString(tenuredStr). traceString(tenuredStr) will forward the root
+ // base if that's the case. Dependent string chars needs to be relocated
+ // after traceString if root base was not forwarded.
+ size_t offset = 0;
+ bool rootBaseNotYetForwarded = false;
+ JSLinearString* rootBase = nullptr;
+
+ if (tenuredStr->isDependent()) {
+ if (tenuredStr->hasTwoByteChars()) {
+ relocateDependentStringChars<char16_t>(
+ &tenuredStr->asDependent(), p->savedNurseryBaseOrRelocOverlay(),
+ &offset, &rootBaseNotYetForwarded, &rootBase);
+ } else {
+ relocateDependentStringChars<JS::Latin1Char>(
+ &tenuredStr->asDependent(), p->savedNurseryBaseOrRelocOverlay(),
+ &offset, &rootBaseNotYetForwarded, &rootBase);
+ }
+ }
+
+ mover.traceString(tenuredStr);
+
+ if (rootBaseNotYetForwarded) {
+ MOZ_ASSERT(rootBase->isForwarded(),
+ "traceString() should make it forwarded");
+ JS::AutoCheckCannotGC nogc;
+
+ JSLinearString* tenuredRootBase = Forwarded(rootBase);
+ MOZ_ASSERT(offset < tenuredRootBase->length());
+
+ if (tenuredStr->hasTwoByteChars()) {
+ tenuredStr->asDependent().relocateNonInlineChars<const char16_t*>(
+ tenuredRootBase->twoByteChars(nogc), offset);
+ } else {
+ tenuredStr->asDependent().relocateNonInlineChars<const JS::Latin1Char*>(
+ tenuredRootBase->latin1Chars(nogc), offset);
+ }
+ tenuredStr->setBase(tenuredRootBase);
+ }
+ }
+
+ for (RelocationOverlay* p = mover.bigIntHead; p; p = p->next()) {
+ mover.traceBigInt(static_cast<JS::BigInt*>(p->forwardingAddress()));
+ }
+}
+
+size_t js::TenuringTracer::moveStringToTenured(JSString* dst, JSString* src,
+ AllocKind dstKind) {
+ size_t size = Arena::thingSize(dstKind);
+
+ // At the moment, strings always have the same AllocKind between src and
+ // dst. This may change in the future.
+ MOZ_ASSERT(dst->asTenured().getAllocKind() == src->getAllocKind());
+
+ // Copy the Cell contents.
+ MOZ_ASSERT(OffsetToChunkEnd(src) >= ptrdiff_t(size));
+ js_memcpy(dst, src, size);
+
+ if (src->ownsMallocedChars()) {
+ void* chars = src->asLinear().nonInlineCharsRaw();
+ nursery().removeMallocedBufferDuringMinorGC(chars);
+ AddCellMemory(dst, dst->asLinear().allocSize(), MemoryUse::StringContents);
+ }
+
+ return size;
+}
+
+size_t js::TenuringTracer::moveBigIntToTenured(JS::BigInt* dst, JS::BigInt* src,
+ AllocKind dstKind) {
+ size_t size = Arena::thingSize(dstKind);
+
+ // At the moment, BigInts always have the same AllocKind between src and
+ // dst. This may change in the future.
+ MOZ_ASSERT(dst->asTenured().getAllocKind() == src->getAllocKind());
+
+ // Copy the Cell contents.
+ MOZ_ASSERT(OffsetToChunkEnd(src) >= ptrdiff_t(size));
+ js_memcpy(dst, src, size);
+
+ MOZ_ASSERT(dst->zone() == src->nurseryZone());
+
+ if (src->hasHeapDigits()) {
+ size_t length = dst->digitLength();
+ if (!nursery().isInside(src->heapDigits_)) {
+ nursery().removeMallocedBufferDuringMinorGC(src->heapDigits_);
+ } else {
+ Zone* zone = src->nurseryZone();
+ {
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ dst->heapDigits_ = zone->pod_malloc<JS::BigInt::Digit>(length);
+ if (!dst->heapDigits_) {
+ oomUnsafe.crash(sizeof(JS::BigInt::Digit) * length,
+ "Failed to allocate digits while tenuring.");
+ }
+ }
+
+ PodCopy(dst->heapDigits_, src->heapDigits_, length);
+ nursery().setDirectForwardingPointer(src->heapDigits_, dst->heapDigits_);
+
+ size += length * sizeof(JS::BigInt::Digit);
+ }
+
+ AddCellMemory(dst, length * sizeof(JS::BigInt::Digit),
+ MemoryUse::BigIntDigits);
+ }
+
+ return size;
+}
+
+/*** IsMarked / IsAboutToBeFinalized ****************************************/
+
+template <typename T>
+static inline void CheckIsMarkedThing(T* thing) {
+#define IS_SAME_TYPE_OR(name, type, _, _1) std::is_same_v<type, T> ||
+ static_assert(JS_FOR_EACH_TRACEKIND(IS_SAME_TYPE_OR) false,
+ "Only the base cell layout types are allowed into "
+ "marking/tracing internals");
+#undef IS_SAME_TYPE_OR
+
+#ifdef DEBUG
+ MOZ_ASSERT(thing);
+
+ // Allow any thread access to uncollected things.
+ if (thing->isPermanentAndMayBeShared()) {
+ return;
+ }
+
+ // Allow the current thread access if it is sweeping or in sweep-marking, but
+ // try to check the zone. Some threads have access to all zones when sweeping.
+ JSContext* cx = TlsContext.get();
+ MOZ_ASSERT(cx->gcUse != JSContext::GCUse::Finalizing);
+ if (cx->gcUse == JSContext::GCUse::Sweeping ||
+ cx->gcUse == JSContext::GCUse::Marking) {
+ Zone* zone = thing->zoneFromAnyThread();
+ MOZ_ASSERT_IF(cx->gcSweepZone,
+ cx->gcSweepZone == zone || zone->isAtomsZone());
+ return;
+ }
+
+ // Otherwise only allow access from the main thread or this zone's associated
+ // thread.
+ MOZ_ASSERT(CurrentThreadCanAccessRuntime(thing->runtimeFromAnyThread()) ||
+ CurrentThreadCanAccessZone(thing->zoneFromAnyThread()));
+#endif
+}
+
+template <typename T>
+static inline bool ShouldCheckMarkState(JSRuntime* rt, T** thingp) {
+ MOZ_ASSERT(thingp);
+ CheckIsMarkedThing(*thingp);
+ MOZ_ASSERT(!IsInsideNursery(*thingp));
+
+ TenuredCell& thing = (*thingp)->asTenured();
+ Zone* zone = thing.zoneFromAnyThread();
+
+ if (zone->gcState() <= Zone::Prepare || zone->isGCFinished()) {
+ return false;
+ }
+
+ if (zone->isGCCompacting() && IsForwarded(*thingp)) {
+ *thingp = Forwarded(*thingp);
+ return false;
+ }
+
+ return true;
+}
+
+template <typename T>
+bool js::gc::IsMarkedInternal(JSRuntime* rt, T** thingp) {
+ // Don't depend on the mark state of other cells during finalization.
+ MOZ_ASSERT(!CurrentThreadIsGCFinalizing());
+
+ T* thing = *thingp;
+ if (IsOwnedByOtherRuntime(rt, thing)) {
+ return true;
+ }
+
+ if (!thing->isTenured()) {
+ MOZ_ASSERT(CurrentThreadCanAccessRuntime(rt));
+ auto** cellp = reinterpret_cast<Cell**>(thingp);
+ return Nursery::getForwardedPointer(cellp);
+ }
+
+ if (!ShouldCheckMarkState(rt, thingp)) {
+ return true;
+ }
+
+ return (*thingp)->asTenured().isMarkedAny();
+}
+
+template <typename T>
+bool js::gc::IsAboutToBeFinalizedInternal(T** thingp) {
+ // Don't depend on the mark state of other cells during finalization.
+ MOZ_ASSERT(!CurrentThreadIsGCFinalizing());
+
+ MOZ_ASSERT(thingp);
+ T* thing = *thingp;
+ CheckIsMarkedThing(thing);
+ JSRuntime* rt = thing->runtimeFromAnyThread();
+
+ /* Permanent atoms are never finalized by non-owning runtimes. */
+ if (thing->isPermanentAndMayBeShared() && TlsContext.get()->runtime() != rt) {
+ return false;
+ }
+
+ if (!thing->isTenured()) {
+ return JS::RuntimeHeapIsMinorCollecting() &&
+ !Nursery::getForwardedPointer(reinterpret_cast<Cell**>(thingp));
+ }
+
+ Zone* zone = thing->asTenured().zoneFromAnyThread();
+ if (zone->isGCSweeping()) {
+ return !thing->asTenured().isMarkedAny();
+ }
+
+ if (zone->isGCCompacting() && IsForwarded(thing)) {
+ *thingp = Forwarded(thing);
+ return false;
+ }
+
+ return false;
+}
+
+template <typename T>
+bool js::gc::IsAboutToBeFinalizedInternal(T* thingp) {
+ bool dying = false;
+ auto thing = MapGCThingTyped(*thingp, [&dying](auto t) {
+ dying = IsAboutToBeFinalizedInternal(&t);
+ return TaggedPtr<T>::wrap(t);
+ });
+ if (thing.isSome() && thing.value() != *thingp) {
+ *thingp = thing.value();
+ }
+ return dying;
+}
+
+template <typename T>
+inline T* SweepingTracer::onEdge(T* thing) {
+ CheckIsMarkedThing(thing);
+
+ JSRuntime* rt = thing->runtimeFromAnyThread();
+
+ if (thing->isPermanentAndMayBeShared() && runtime() != rt) {
+ return thing;
+ }
+
+ // TODO: We should assert the zone of the tenured cell is in Sweeping state,
+ // however we need to fix atoms and JitcodeGlobalTable first.
+ // Bug 1501334 : IsAboutToBeFinalized doesn't work for atoms
+ // Bug 1071218 : Refactor Debugger::sweepAll and
+ // JitRuntime::SweepJitcodeGlobalTable to work per sweep group
+ if (!thing->isMarkedAny()) {
+ return nullptr;
+ }
+
+ return thing;
+}
+
+JSObject* SweepingTracer::onObjectEdge(JSObject* obj) { return onEdge(obj); }
+Shape* SweepingTracer::onShapeEdge(Shape* shape) { return onEdge(shape); }
+JSString* SweepingTracer::onStringEdge(JSString* string) {
+ return onEdge(string);
+}
+js::BaseScript* SweepingTracer::onScriptEdge(js::BaseScript* script) {
+ return onEdge(script);
+}
+BaseShape* SweepingTracer::onBaseShapeEdge(BaseShape* base) {
+ return onEdge(base);
+}
+jit::JitCode* SweepingTracer::onJitCodeEdge(jit::JitCode* jit) {
+ return onEdge(jit);
+}
+Scope* SweepingTracer::onScopeEdge(Scope* scope) { return onEdge(scope); }
+RegExpShared* SweepingTracer::onRegExpSharedEdge(RegExpShared* shared) {
+ return onEdge(shared);
+}
+ObjectGroup* SweepingTracer::onObjectGroupEdge(ObjectGroup* group) {
+ return onEdge(group);
+}
+BigInt* SweepingTracer::onBigIntEdge(BigInt* bi) { return onEdge(bi); }
+JS::Symbol* SweepingTracer::onSymbolEdge(JS::Symbol* sym) {
+ return onEdge(sym);
+}
+
+namespace js {
+namespace gc {
+
+template <typename T>
+JS_PUBLIC_API bool EdgeNeedsSweep(JS::Heap<T>* thingp) {
+ return IsAboutToBeFinalizedInternal(ConvertToBase(thingp->unsafeGet()));
+}
+
+template <typename T>
+JS_PUBLIC_API bool EdgeNeedsSweepUnbarrieredSlow(T* thingp) {
+ return IsAboutToBeFinalizedInternal(ConvertToBase(thingp));
+}
+
+// Instantiate a copy of the Tracing templates for each public GC type.
+#define INSTANTIATE_ALL_VALID_HEAP_TRACE_FUNCTIONS(type) \
+ template JS_PUBLIC_API bool EdgeNeedsSweep<type>(JS::Heap<type>*); \
+ template JS_PUBLIC_API bool EdgeNeedsSweepUnbarrieredSlow<type>(type*);
+JS_FOR_EACH_PUBLIC_GC_POINTER_TYPE(INSTANTIATE_ALL_VALID_HEAP_TRACE_FUNCTIONS)
+JS_FOR_EACH_PUBLIC_TAGGED_GC_POINTER_TYPE(
+ INSTANTIATE_ALL_VALID_HEAP_TRACE_FUNCTIONS)
+
+#define INSTANTIATE_INTERNAL_IS_MARKED_FUNCTION(type) \
+ template bool IsMarkedInternal(JSRuntime* rt, type* thing);
+
+#define INSTANTIATE_INTERNAL_IATBF_FUNCTION(type) \
+ template bool IsAboutToBeFinalizedInternal(type* thingp);
+
+#define INSTANTIATE_INTERNAL_MARKING_FUNCTIONS_FROM_TRACEKIND(_1, type, _2, \
+ _3) \
+ INSTANTIATE_INTERNAL_IS_MARKED_FUNCTION(type*) \
+ INSTANTIATE_INTERNAL_IATBF_FUNCTION(type*)
+
+JS_FOR_EACH_TRACEKIND(INSTANTIATE_INTERNAL_MARKING_FUNCTIONS_FROM_TRACEKIND)
+
+JS_FOR_EACH_PUBLIC_TAGGED_GC_POINTER_TYPE(INSTANTIATE_INTERNAL_IATBF_FUNCTION)
+
+#undef INSTANTIATE_INTERNAL_IS_MARKED_FUNCTION
+#undef INSTANTIATE_INTERNAL_IATBF_FUNCTION
+#undef INSTANTIATE_INTERNAL_MARKING_FUNCTIONS_FROM_TRACEKIND
+
+} /* namespace gc */
+} /* namespace js */
+
+/*** Cycle Collector Barrier Implementation *********************************/
+
+/*
+ * The GC and CC are run independently. Consequently, the following sequence of
+ * events can occur:
+ * 1. GC runs and marks an object gray.
+ * 2. The mutator runs (specifically, some C++ code with access to gray
+ * objects) and creates a pointer from a JS root or other black object to
+ * the gray object. If we re-ran a GC at this point, the object would now be
+ * black.
+ * 3. Now we run the CC. It may think it can collect the gray object, even
+ * though it's reachable from the JS heap.
+ *
+ * To prevent this badness, we unmark the gray bit of an object when it is
+ * accessed by callers outside XPConnect. This would cause the object to go
+ * black in step 2 above. This must be done on everything reachable from the
+ * object being returned. The following code takes care of the recursive
+ * re-coloring.
+ *
+ * There is an additional complication for certain kinds of edges that are not
+ * contained explicitly in the source object itself, such as from a weakmap key
+ * to its value. These "implicit edges" are represented in some other
+ * container object, such as the weakmap itself. In these
+ * cases, calling unmark gray on an object won't find all of its children.
+ *
+ * Handling these implicit edges has two parts:
+ * - A special pass enumerating all of the containers that know about the
+ * implicit edges to fix any black-gray edges that have been created. This
+ * is implemented in nsXPConnect::FixWeakMappingGrayBits.
+ * - To prevent any incorrectly gray objects from escaping to live JS outside
+ * of the containers, we must add unmark-graying read barriers to these
+ * containers.
+ */
+
+#ifdef DEBUG
+struct AssertNonGrayTracer final : public JS::CallbackTracer {
+ // This is used by the UnmarkGray tracer only, and needs to report itself as
+ // the non-gray tracer to not trigger assertions. Do not use it in another
+ // context without making this more generic.
+ explicit AssertNonGrayTracer(JSRuntime* rt)
+ : JS::CallbackTracer(rt, JS::TracerKind::UnmarkGray) {}
+ void onChild(const JS::GCCellPtr& thing) override {
+ MOZ_ASSERT(!thing.asCell()->isMarkedGray());
+ }
+};
+#endif
+
+class UnmarkGrayTracer final : public JS::CallbackTracer {
+ public:
+ // We set weakMapAction to WeakMapTraceAction::Skip because the cycle
+ // collector will fix up any color mismatches involving weakmaps when it runs.
+ explicit UnmarkGrayTracer(JSRuntime* rt)
+ : JS::CallbackTracer(rt, JS::TracerKind::UnmarkGray,
+ JS::WeakMapTraceAction::Skip),
+ unmarkedAny(false),
+ oom(false),
+ stack(rt->gc.unmarkGrayStack) {}
+
+ void unmark(JS::GCCellPtr cell);
+
+ // Whether we unmarked anything.
+ bool unmarkedAny;
+
+ // Whether we ran out of memory.
+ bool oom;
+
+ private:
+ // Stack of cells to traverse.
+ Vector<JS::GCCellPtr, 0, SystemAllocPolicy>& stack;
+
+ void onChild(const JS::GCCellPtr& thing) override;
+};
+
+void UnmarkGrayTracer::onChild(const JS::GCCellPtr& thing) {
+ Cell* cell = thing.asCell();
+
+ // Cells in the nursery cannot be gray, and nor can certain kinds of tenured
+ // cells. These must necessarily point only to black edges.
+ if (!cell->isTenured() ||
+ !TraceKindCanBeMarkedGray(cell->asTenured().getTraceKind())) {
+#ifdef DEBUG
+ MOZ_ASSERT(!cell->isMarkedGray());
+ AssertNonGrayTracer nongray(runtime());
+ JS::TraceChildren(&nongray, thing);
+#endif
+ return;
+ }
+
+ TenuredCell& tenured = cell->asTenured();
+ Zone* zone = tenured.zone();
+
+ // If the cell is in a zone whose mark bits are being cleared, then it will
+ // end up white.
+ if (zone->isGCPreparing()) {
+ return;
+ }
+
+ // If the cell is in a zone that we're currently marking, then it's possible
+ // that it is currently white but will end up gray. To handle this case, push
+ // any cells in zones that are currently being marked onto the mark stack and
+ // they will eventually get marked black.
+ if (zone->isGCMarking()) {
+ if (!cell->isMarkedBlack()) {
+ Cell* tmp = cell;
+ JSTracer* trc = &runtime()->gc.marker;
+ TraceManuallyBarrieredGenericPointerEdge(trc, &tmp, "read barrier");
+ MOZ_ASSERT(tmp == cell);
+ unmarkedAny = true;
+ }
+ return;
+ }
+
+ if (!tenured.isMarkedGray()) {
+ return;
+ }
+
+ tenured.markBlack();
+ unmarkedAny = true;
+
+ if (!stack.append(thing)) {
+ oom = true;
+ }
+}
+
+void UnmarkGrayTracer::unmark(JS::GCCellPtr cell) {
+ MOZ_ASSERT(stack.empty());
+
+ onChild(cell);
+
+ while (!stack.empty() && !oom) {
+ TraceChildren(this, stack.popCopy());
+ }
+
+ if (oom) {
+ // If we run out of memory, we take a drastic measure: require that we
+ // GC again before the next CC.
+ stack.clear();
+ runtime()->gc.setGrayBitsInvalid();
+ return;
+ }
+}
+
+bool js::gc::UnmarkGrayGCThingUnchecked(JSRuntime* rt, JS::GCCellPtr thing) {
+ MOZ_ASSERT(thing);
+ MOZ_ASSERT(thing.asCell()->isMarkedGray());
+
+ AutoGeckoProfilerEntry profilingStackFrame(
+ TlsContext.get(), "UnmarkGrayGCThing",
+ JS::ProfilingCategoryPair::GCCC_UnmarkGray);
+
+ UnmarkGrayTracer unmarker(rt);
+ unmarker.unmark(thing);
+ return unmarker.unmarkedAny;
+}
+
+JS_FRIEND_API bool JS::UnmarkGrayGCThingRecursively(JS::GCCellPtr thing) {
+ MOZ_ASSERT(!JS::RuntimeHeapIsCollecting());
+ MOZ_ASSERT(!JS::RuntimeHeapIsCycleCollecting());
+
+ JSRuntime* rt = thing.asCell()->runtimeFromMainThread();
+ if (thing.asCell()->zone()->isGCPreparing()) {
+ // Mark bits are being cleared in preparation for GC.
+ return false;
+ }
+
+ gcstats::AutoPhase outerPhase(rt->gc.stats(), gcstats::PhaseKind::BARRIER);
+ gcstats::AutoPhase innerPhase(rt->gc.stats(),
+ gcstats::PhaseKind::UNMARK_GRAY);
+ return UnmarkGrayGCThingUnchecked(rt, thing);
+}
+
+void js::gc::UnmarkGrayGCThingRecursively(TenuredCell* cell) {
+ JS::UnmarkGrayGCThingRecursively(JS::GCCellPtr(cell, cell->getTraceKind()));
+}
+
+bool js::UnmarkGrayShapeRecursively(Shape* shape) {
+ return JS::UnmarkGrayGCThingRecursively(JS::GCCellPtr(shape));
+}
+
+#ifdef DEBUG
+Cell* js::gc::UninlinedForwarded(const Cell* cell) { return Forwarded(cell); }
+#endif
+
+namespace js {
+namespace debug {
+
+MarkInfo GetMarkInfo(Cell* rawCell) {
+ if (!rawCell->isTenured()) {
+ return MarkInfo::NURSERY;
+ }
+
+ TenuredCell* cell = &rawCell->asTenured();
+ if (cell->isMarkedGray()) {
+ return MarkInfo::GRAY;
+ }
+ if (cell->isMarkedBlack()) {
+ return MarkInfo::BLACK;
+ }
+ return MarkInfo::UNMARKED;
+}
+
+uintptr_t* GetMarkWordAddress(Cell* cell) {
+ if (!cell->isTenured()) {
+ return nullptr;
+ }
+
+ MarkBitmapWord* wordp;
+ uintptr_t mask;
+ TenuredChunkBase* chunk = gc::detail::GetCellChunkBase(&cell->asTenured());
+ chunk->markBits.getMarkWordAndMask(&cell->asTenured(), ColorBit::BlackBit,
+ &wordp, &mask);
+ return reinterpret_cast<uintptr_t*>(wordp);
+}
+
+uintptr_t GetMarkMask(Cell* cell, uint32_t colorBit) {
+ MOZ_ASSERT(colorBit == 0 || colorBit == 1);
+
+ if (!cell->isTenured()) {
+ return 0;
+ }
+
+ ColorBit bit = colorBit == 0 ? ColorBit::BlackBit : ColorBit::GrayOrBlackBit;
+ MarkBitmapWord* wordp;
+ uintptr_t mask;
+ TenuredChunkBase* chunk = gc::detail::GetCellChunkBase(&cell->asTenured());
+ chunk->markBits.getMarkWordAndMask(&cell->asTenured(), bit, &wordp, &mask);
+ return mask;
+}
+
+} // namespace debug
+} // namespace js
diff --git a/js/src/gc/Marking.h b/js/src/gc/Marking.h
new file mode 100644
index 0000000000..f92f9a4862
--- /dev/null
+++ b/js/src/gc/Marking.h
@@ -0,0 +1,178 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * Marking and sweeping APIs for use by implementations of different GC cell
+ * kinds.
+ */
+
+#ifndef gc_Marking_h
+#define gc_Marking_h
+
+#include "js/TypeDecls.h"
+#include "vm/TaggedProto.h"
+
+class JSLinearString;
+class JSRope;
+class JSTracer;
+struct JSClass;
+
+namespace js {
+class BaseShape;
+class GCMarker;
+class NativeObject;
+class ObjectGroup;
+class Shape;
+class WeakMapBase;
+
+namespace jit {
+class JitCode;
+} // namespace jit
+
+namespace gc {
+
+struct Cell;
+class TenuredCell;
+
+/*** Liveness ***/
+
+// The IsMarkedInternal and IsAboutToBeFinalizedInternal function templates are
+// used to implement the IsMarked and IsAboutToBeFinalized set of functions.
+// These internal functions are instantiated for the base GC types and should
+// not be called directly.
+//
+// Note that there are two function templates declared for each, not one
+// template and a specialization. This is necessary so that pointer arguments
+// (e.g. JSObject**) and tagged value arguments (e.g. JS::Value*) are routed to
+// separate implementations.
+
+template <typename T>
+bool IsMarkedInternal(JSRuntime* rt, T** thing);
+
+template <typename T>
+bool IsAboutToBeFinalizedInternal(T* thingp);
+template <typename T>
+bool IsAboutToBeFinalizedInternal(T** thingp);
+
+// Report whether a GC thing has been marked with any color. Things which are in
+// zones that are not currently being collected or are owned by another runtime
+// are always reported as being marked.
+template <typename T>
+inline bool IsMarkedUnbarriered(JSRuntime* rt, T* thingp) {
+ return IsMarkedInternal(rt, ConvertToBase(thingp));
+}
+
+// Report whether a GC thing has been marked with any color. Things which are in
+// zones that are not currently being collected or are owned by another runtime
+// are always reported as being marked.
+template <typename T>
+inline bool IsMarked(JSRuntime* rt, BarrieredBase<T>* thingp) {
+ return IsMarkedInternal(rt, ConvertToBase(thingp->unbarrieredAddress()));
+}
+
+template <typename T>
+inline bool IsAboutToBeFinalizedUnbarriered(T* thingp) {
+ return IsAboutToBeFinalizedInternal(ConvertToBase(thingp));
+}
+
+template <typename T>
+inline bool IsAboutToBeFinalized(const BarrieredBase<T>* thingp) {
+ return IsAboutToBeFinalizedInternal(
+ ConvertToBase(thingp->unbarrieredAddress()));
+}
+
+inline bool IsAboutToBeFinalizedDuringMinorSweep(Cell* cell);
+
+inline Cell* ToMarkable(const Value& v) {
+ if (v.isGCThing()) {
+ return (Cell*)v.toGCThing();
+ }
+ return nullptr;
+}
+
+inline Cell* ToMarkable(Cell* cell) { return cell; }
+
+bool UnmarkGrayGCThingUnchecked(JSRuntime* rt, JS::GCCellPtr thing);
+
+} /* namespace gc */
+
+// The return value indicates if anything was unmarked.
+bool UnmarkGrayShapeRecursively(Shape* shape);
+
+namespace gc {
+
+// Functions for checking and updating GC thing pointers that might have been
+// moved by compacting GC. Overloads are also provided that work with Values.
+//
+// IsForwarded - check whether a pointer refers to an GC thing that has been
+// moved.
+//
+// Forwarded - return a pointer to the new location of a GC thing given a
+// pointer to old location.
+//
+// MaybeForwarded - used before dereferencing a pointer that may refer to a
+// moved GC thing without updating it. For JSObjects this will
+// also update the object's shape pointer if it has been moved
+// to allow slots to be accessed.
+
+template <typename T>
+inline bool IsForwarded(const T* t);
+
+template <typename T>
+inline T* Forwarded(const T* t);
+
+inline Value Forwarded(const JS::Value& value);
+
+template <typename T>
+inline T MaybeForwarded(T t);
+
+// Helper functions for use in situations where the object's group might be
+// forwarded, for example while marking.
+
+inline const JSClass* MaybeForwardedObjectClass(const JSObject* obj);
+
+template <typename T>
+inline bool MaybeForwardedObjectIs(JSObject* obj);
+
+template <typename T>
+inline T& MaybeForwardedObjectAs(JSObject* obj);
+
+// Trace TypedObject trace lists with specialised paths for GCMarker and
+// TenuringTracer.
+void VisitTraceList(JSTracer* trc, JSObject* obj, const uint32_t* traceList,
+ uint8_t* memory);
+
+#ifdef JSGC_HASH_TABLE_CHECKS
+
+template <typename T>
+inline bool IsGCThingValidAfterMovingGC(T* t);
+
+template <typename T>
+inline void CheckGCThingAfterMovingGC(T* t);
+
+template <typename T>
+inline void CheckGCThingAfterMovingGC(const WeakHeapPtr<T*>& t);
+
+#endif // JSGC_HASH_TABLE_CHECKS
+
+} /* namespace gc */
+
+// Debugging functions to check tracing invariants.
+#ifdef DEBUG
+template <typename T>
+void CheckTracedThing(JSTracer* trc, T* thing);
+template <typename T>
+void CheckTracedThing(JSTracer* trc, const T& thing);
+#else
+template <typename T>
+inline void CheckTracedThing(JSTracer* trc, T* thing) {}
+template <typename T>
+inline void CheckTracedThing(JSTracer* trc, const T& thing) {}
+#endif
+
+} /* namespace js */
+
+#endif /* gc_Marking_h */
diff --git a/js/src/gc/MaybeRooted.h b/js/src/gc/MaybeRooted.h
new file mode 100644
index 0000000000..65373b467e
--- /dev/null
+++ b/js/src/gc/MaybeRooted.h
@@ -0,0 +1,152 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * Template types for use in generic code: to use Rooted/Handle/MutableHandle in
+ * cases where GC may occur, or to use mock versions of those types that perform
+ * no rooting or root list manipulation when GC cannot occur.
+ */
+
+#ifndef gc_MaybeRooted_h
+#define gc_MaybeRooted_h
+
+#include "mozilla/Attributes.h" // MOZ_IMPLICIT, MOZ_RAII
+
+#include <type_traits> // std::true_type
+
+#include "gc/Allocator.h" // js::AllowGC, js::CanGC, js::NoGC
+#include "js/ComparisonOperators.h" // JS::detail::DefineComparisonOps
+#include "js/RootingAPI.h" // js::{Rooted,MutableHandle}Base, JS::SafelyInitialized, DECLARE_POINTER_{CONSTREF,ASSIGN}_OPS, DECLARE_NONPOINTER_{,MUTABLE_}ACCESSOR_METHODS, JS::Rooted, JS::{,Mutable}Handle
+
+namespace js {
+
+/**
+ * Interface substitute for Rooted<T> which does not root the variable's
+ * memory.
+ */
+template <typename T>
+class MOZ_RAII FakeRooted : public RootedBase<T, FakeRooted<T>> {
+ public:
+ using ElementType = T;
+
+ template <typename CX>
+ explicit FakeRooted(CX* cx) : ptr(JS::SafelyInitialized<T>()) {}
+
+ template <typename CX>
+ FakeRooted(CX* cx, T initial) : ptr(initial) {}
+
+ DECLARE_POINTER_CONSTREF_OPS(T);
+ DECLARE_POINTER_ASSIGN_OPS(FakeRooted, T);
+ DECLARE_NONPOINTER_ACCESSOR_METHODS(ptr);
+ DECLARE_NONPOINTER_MUTABLE_ACCESSOR_METHODS(ptr);
+
+ private:
+ T ptr;
+
+ void set(const T& value) { ptr = value; }
+
+ FakeRooted(const FakeRooted&) = delete;
+};
+
+} // namespace js
+
+namespace JS {
+
+namespace detail {
+
+template <typename T>
+struct DefineComparisonOps<js::FakeRooted<T>> : std::true_type {
+ static const T& get(const js::FakeRooted<T>& v) { return v.get(); }
+};
+
+} // namespace detail
+
+} // namespace JS
+
+namespace js {
+
+/**
+ * Interface substitute for MutableHandle<T> which is not required to point to
+ * rooted memory.
+ */
+template <typename T>
+class FakeMutableHandle
+ : public js::MutableHandleBase<T, FakeMutableHandle<T>> {
+ public:
+ using ElementType = T;
+
+ MOZ_IMPLICIT FakeMutableHandle(T* t) : ptr(t) {}
+
+ MOZ_IMPLICIT FakeMutableHandle(FakeRooted<T>* root) : ptr(root->address()) {}
+
+ void set(const T& v) { *ptr = v; }
+
+ DECLARE_POINTER_CONSTREF_OPS(T);
+ DECLARE_NONPOINTER_ACCESSOR_METHODS(*ptr);
+ DECLARE_NONPOINTER_MUTABLE_ACCESSOR_METHODS(*ptr);
+
+ private:
+ FakeMutableHandle() : ptr(nullptr) {}
+ DELETE_ASSIGNMENT_OPS(FakeMutableHandle, T);
+
+ T* ptr;
+};
+
+} // namespace js
+
+namespace JS {
+
+namespace detail {
+
+template <typename T>
+struct DefineComparisonOps<js::FakeMutableHandle<T>> : std::true_type {
+ static const T& get(const js::FakeMutableHandle<T>& v) { return v.get(); }
+};
+
+} // namespace detail
+
+} // namespace JS
+
+namespace js {
+
+/**
+ * Types for a variable that either should or shouldn't be rooted, depending on
+ * the template parameter allowGC. Used for implementing functions that can
+ * operate on either rooted or unrooted data.
+ */
+
+template <typename T, AllowGC allowGC>
+class MaybeRooted;
+
+template <typename T>
+class MaybeRooted<T, CanGC> {
+ public:
+ using HandleType = JS::Handle<T>;
+ using RootType = JS::Rooted<T>;
+ using MutableHandleType = JS::MutableHandle<T>;
+
+ template <typename T2>
+ static JS::Handle<T2*> downcastHandle(HandleType v) {
+ return v.template as<T2>();
+ }
+};
+
+template <typename T>
+class MaybeRooted<T, NoGC> {
+ public:
+ using HandleType = const T&;
+ using RootType = FakeRooted<T>;
+ using MutableHandleType = FakeMutableHandle<T>;
+
+ template <typename T2>
+ static T2* downcastHandle(HandleType v) {
+ return &v->template as<T2>();
+ }
+};
+
+} // namespace js
+
+#endif // gc_MaybeRooted_h
diff --git a/js/src/gc/Memory.cpp b/js/src/gc/Memory.cpp
new file mode 100644
index 0000000000..d59585f0c8
--- /dev/null
+++ b/js/src/gc/Memory.cpp
@@ -0,0 +1,1003 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "gc/Memory.h"
+
+#include "mozilla/Atomics.h"
+#include "mozilla/MathAlgorithms.h"
+#include "mozilla/RandomNum.h"
+#include "mozilla/TaggedAnonymousMemory.h"
+
+#include "js/HeapAPI.h"
+#include "util/Memory.h"
+#include "vm/Runtime.h"
+
+#ifdef XP_WIN
+
+# include "util/Windows.h"
+# include <psapi.h>
+
+#else
+
+# include <algorithm>
+# include <errno.h>
+# include <sys/mman.h>
+# include <sys/resource.h>
+# include <sys/stat.h>
+# include <sys/types.h>
+# include <unistd.h>
+
+#endif
+
+namespace js {
+namespace gc {
+
+/*
+ * System allocation functions generally require the allocation size
+ * to be an integer multiple of the page size of the running process.
+ */
+static size_t pageSize = 0;
+
+/* The OS allocation granularity may not match the page size. */
+static size_t allocGranularity = 0;
+
+/* The number of bits used by addresses on this platform. */
+static size_t numAddressBits = 0;
+
+/* An estimate of the number of bytes available for virtual memory. */
+static size_t virtualMemoryLimit = size_t(-1);
+
+/*
+ * System allocation functions may hand out regions of memory in increasing or
+ * decreasing order. This ordering is used as a hint during chunk alignment to
+ * reduce the number of system calls. On systems with 48-bit addresses, our
+ * workarounds to obtain 47-bit pointers cause addresses to be handed out in
+ * increasing order.
+ *
+ * We do not use the growth direction on Windows, as constraints on VirtualAlloc
+ * would make its application failure prone and complex. Tests indicate that
+ * VirtualAlloc always hands out regions of memory in increasing order.
+ */
+#if defined(XP_DARWIN)
+static mozilla::Atomic<int, mozilla::Relaxed> growthDirection(1);
+#elif defined(XP_UNIX)
+static mozilla::Atomic<int, mozilla::Relaxed> growthDirection(0);
+#endif
+
+/*
+ * Data from OOM crashes shows there may be up to 24 chunk-sized but unusable
+ * chunks available in low memory situations. These chunks may all need to be
+ * used up before we gain access to remaining *alignable* chunk-sized regions,
+ * so we use a generous limit of 32 unusable chunks to ensure we reach them.
+ */
+static const int MaxLastDitchAttempts = 32;
+
+#ifdef JS_64BIT
+/*
+ * On some 64-bit platforms we can use a random, scattershot allocator that
+ * tries addresses from the available range at random. If the address range
+ * is large enough this will have a high chance of success and additionally
+ * makes the memory layout of our process less predictable.
+ *
+ * However, not all 64-bit platforms have a very large address range. For
+ * example, AArch64 on Linux defaults to using 39-bit addresses to limit the
+ * number of translation tables used. On such configurations the scattershot
+ * approach to allocation creates a conflict with our desire to reserve large
+ * regions of memory for applications like WebAssembly: Small allocations may
+ * inadvertently block off all available 4-6GiB regions, and conversely
+ * reserving such regions may lower the success rate for smaller allocations to
+ * unacceptable levels.
+ *
+ * So we make a compromise: Instead of using the scattershot on all 64-bit
+ * platforms, we only use it on platforms that meet a minimum requirement for
+ * the available address range. In addition we split the address range,
+ * reserving the upper half for huge allocations and the lower half for smaller
+ * allocations. We use a limit of 43 bits so that at least 42 bits are available
+ * for huge allocations - this matches the 8TiB per process address space limit
+ * that we're already subject to on Windows.
+ */
+static const size_t MinAddressBitsForRandomAlloc = 43;
+
+/* The lower limit for huge allocations. This is fairly arbitrary. */
+static const size_t HugeAllocationSize = 1024 * 1024 * 1024;
+
+/* The minimum and maximum valid addresses that can be allocated into. */
+static size_t minValidAddress = 0;
+static size_t maxValidAddress = 0;
+
+/* The upper limit for smaller allocations and the lower limit for huge ones. */
+static size_t hugeSplit = 0;
+#endif
+
+size_t SystemPageSize() { return pageSize; }
+
+size_t SystemAddressBits() { return numAddressBits; }
+
+size_t VirtualMemoryLimit() { return virtualMemoryLimit; }
+
+bool UsingScattershotAllocator() {
+#ifdef JS_64BIT
+ return numAddressBits >= MinAddressBitsForRandomAlloc;
+#else
+ return false;
+#endif
+}
+
+enum class Commit : bool {
+ No = false,
+ Yes = true,
+};
+
+#ifdef XP_WIN
+enum class PageAccess : DWORD {
+ None = PAGE_NOACCESS,
+ Read = PAGE_READONLY,
+ ReadWrite = PAGE_READWRITE,
+ Execute = PAGE_EXECUTE,
+ ReadExecute = PAGE_EXECUTE_READ,
+ ReadWriteExecute = PAGE_EXECUTE_READWRITE,
+};
+#else
+enum class PageAccess : int {
+ None = PROT_NONE,
+ Read = PROT_READ,
+ ReadWrite = PROT_READ | PROT_WRITE,
+ Execute = PROT_EXEC,
+ ReadExecute = PROT_READ | PROT_EXEC,
+ ReadWriteExecute = PROT_READ | PROT_WRITE | PROT_EXEC,
+};
+#endif
+
+template <bool AlwaysGetNew = true>
+static bool TryToAlignChunk(void** aRegion, void** aRetainedRegion,
+ size_t length, size_t alignment);
+
+static void* MapAlignedPagesSlow(size_t length, size_t alignment);
+static void* MapAlignedPagesLastDitch(size_t length, size_t alignment);
+
+#ifdef JS_64BIT
+static void* MapAlignedPagesRandom(size_t length, size_t alignment);
+#endif
+
+void* TestMapAlignedPagesLastDitch(size_t length, size_t alignment) {
+ return MapAlignedPagesLastDitch(length, alignment);
+}
+
+/*
+ * We can only decommit unused pages if the hardcoded Arena
+ * size matches the page size for the running process.
+ */
+static inline bool DecommitEnabled() { return pageSize == ArenaSize; }
+
+/* Returns the offset from the nearest aligned address at or below |region|. */
+static inline size_t OffsetFromAligned(void* region, size_t alignment) {
+ return uintptr_t(region) % alignment;
+}
+
+template <Commit commit, PageAccess prot>
+static inline void* MapInternal(void* desired, size_t length) {
+ void* region = nullptr;
+#ifdef XP_WIN
+ DWORD flags =
+ (commit == Commit::Yes ? MEM_RESERVE | MEM_COMMIT : MEM_RESERVE);
+ region = VirtualAlloc(desired, length, flags, DWORD(prot));
+#else
+ int flags = MAP_PRIVATE | MAP_ANON;
+ region = MozTaggedAnonymousMmap(desired, length, int(prot), flags, -1, 0,
+ "js-gc-heap");
+ if (region == MAP_FAILED) {
+ return nullptr;
+ }
+#endif
+ return region;
+}
+
+static inline void UnmapInternal(void* region, size_t length) {
+ MOZ_ASSERT(region && OffsetFromAligned(region, allocGranularity) == 0);
+ MOZ_ASSERT(length > 0 && length % pageSize == 0);
+
+#ifdef XP_WIN
+ MOZ_RELEASE_ASSERT(VirtualFree(region, 0, MEM_RELEASE) != 0);
+#else
+ if (munmap(region, length)) {
+ MOZ_RELEASE_ASSERT(errno == ENOMEM);
+ }
+#endif
+}
+
+template <Commit commit = Commit::Yes, PageAccess prot = PageAccess::ReadWrite>
+static inline void* MapMemory(size_t length) {
+ MOZ_ASSERT(length > 0);
+
+ return MapInternal<commit, prot>(nullptr, length);
+}
+
+/*
+ * Attempts to map memory at the given address, but allows the system
+ * to return a different address that may still be suitable.
+ */
+template <Commit commit = Commit::Yes, PageAccess prot = PageAccess::ReadWrite>
+static inline void* MapMemoryAtFuzzy(void* desired, size_t length) {
+ MOZ_ASSERT(desired && OffsetFromAligned(desired, allocGranularity) == 0);
+ MOZ_ASSERT(length > 0);
+
+ // Note that some platforms treat the requested address as a hint, so the
+ // returned address might not match the requested address.
+ return MapInternal<commit, prot>(desired, length);
+}
+
+/*
+ * Attempts to map memory at the given address, returning nullptr if
+ * the system returns any address other than the requested one.
+ */
+template <Commit commit = Commit::Yes, PageAccess prot = PageAccess::ReadWrite>
+static inline void* MapMemoryAt(void* desired, size_t length) {
+ MOZ_ASSERT(desired && OffsetFromAligned(desired, allocGranularity) == 0);
+ MOZ_ASSERT(length > 0);
+
+ void* region = MapInternal<commit, prot>(desired, length);
+ if (!region) {
+ return nullptr;
+ }
+
+ // On some platforms mmap treats the desired address as a hint, so
+ // check that the address we got is the address we requested.
+ if (region != desired) {
+ UnmapInternal(region, length);
+ return nullptr;
+ }
+ return region;
+}
+
+#ifdef JS_64BIT
+
+/* Returns a random number in the given range. */
+static inline uint64_t GetNumberInRange(uint64_t minNum, uint64_t maxNum) {
+ const uint64_t MaxRand = UINT64_C(0xffffffffffffffff);
+ maxNum -= minNum;
+ uint64_t binSize = 1 + (MaxRand - maxNum) / (maxNum + 1);
+
+ uint64_t rndNum;
+ do {
+ mozilla::Maybe<uint64_t> result;
+ do {
+ result = mozilla::RandomUint64();
+ } while (!result);
+ rndNum = result.value() / binSize;
+ } while (rndNum > maxNum);
+
+ return minNum + rndNum;
+}
+
+# ifndef XP_WIN
+static inline uint64_t FindAddressLimitInner(size_t highBit, size_t tries);
+
+/*
+ * The address range available to applications depends on both hardware and
+ * kernel configuration. For example, AArch64 on Linux uses addresses with
+ * 39 significant bits by default, but can be configured to use addresses with
+ * 48 significant bits by enabling a 4th translation table. Unfortunately,
+ * there appears to be no standard way to query the limit at runtime
+ * (Windows exposes this via GetSystemInfo()).
+ *
+ * This function tries to find the address limit by performing a binary search
+ * on the index of the most significant set bit in the addresses it attempts to
+ * allocate. As the requested address is often treated as a hint by the
+ * operating system, we use the actual returned addresses to narrow the range.
+ * We return the number of bits of an address that may be set.
+ */
+static size_t FindAddressLimit() {
+ // Use 32 bits as a lower bound in case we keep getting nullptr.
+ uint64_t low = 31;
+ uint64_t highestSeen = (UINT64_C(1) << 32) - allocGranularity - 1;
+
+ // Exclude 48-bit and 47-bit addresses first.
+ uint64_t high = 47;
+ for (; high >= std::max(low, UINT64_C(46)); --high) {
+ highestSeen = std::max(FindAddressLimitInner(high, 4), highestSeen);
+ low = mozilla::FloorLog2(highestSeen);
+ }
+ // If those didn't work, perform a modified binary search.
+ while (high - 1 > low) {
+ uint64_t middle = low + (high - low) / 2;
+ highestSeen = std::max(FindAddressLimitInner(middle, 4), highestSeen);
+ low = mozilla::FloorLog2(highestSeen);
+ if (highestSeen < (UINT64_C(1) << middle)) {
+ high = middle;
+ }
+ }
+ // We can be sure of the lower bound, but check the upper bound again.
+ do {
+ high = low + 1;
+ highestSeen = std::max(FindAddressLimitInner(high, 8), highestSeen);
+ low = mozilla::FloorLog2(highestSeen);
+ } while (low >= high);
+
+ // `low` is the highest set bit, so `low + 1` is the number of bits.
+ return low + 1;
+}
+
+static inline uint64_t FindAddressLimitInner(size_t highBit, size_t tries) {
+ const size_t length = allocGranularity; // Used as both length and alignment.
+
+ uint64_t highestSeen = 0;
+ uint64_t startRaw = UINT64_C(1) << highBit;
+ uint64_t endRaw = 2 * startRaw - length - 1;
+ uint64_t start = (startRaw + length - 1) / length;
+ uint64_t end = (endRaw - (length - 1)) / length;
+ for (size_t i = 0; i < tries; ++i) {
+ uint64_t desired = length * GetNumberInRange(start, end);
+ void* address = MapMemoryAtFuzzy(reinterpret_cast<void*>(desired), length);
+ uint64_t actual = uint64_t(address);
+ if (address) {
+ UnmapInternal(address, length);
+ }
+ if (actual > highestSeen) {
+ highestSeen = actual;
+ if (actual >= startRaw) {
+ break;
+ }
+ }
+ }
+ return highestSeen;
+}
+# endif // !defined(XP_WIN)
+
+#endif // defined(JS_64BIT)
+
+void InitMemorySubsystem() {
+ if (pageSize == 0) {
+#ifdef XP_WIN
+ SYSTEM_INFO sysinfo;
+ GetSystemInfo(&sysinfo);
+ pageSize = sysinfo.dwPageSize;
+ allocGranularity = sysinfo.dwAllocationGranularity;
+#else
+ pageSize = size_t(sysconf(_SC_PAGESIZE));
+ allocGranularity = pageSize;
+#endif
+#ifdef JS_64BIT
+# ifdef XP_WIN
+ minValidAddress = size_t(sysinfo.lpMinimumApplicationAddress);
+ maxValidAddress = size_t(sysinfo.lpMaximumApplicationAddress);
+ numAddressBits = mozilla::FloorLog2(maxValidAddress) + 1;
+# else
+ // No standard way to determine these, so fall back to FindAddressLimit().
+ numAddressBits = FindAddressLimit();
+ minValidAddress = allocGranularity;
+ maxValidAddress = (UINT64_C(1) << numAddressBits) - 1 - allocGranularity;
+# endif
+ // Sanity check the address to ensure we don't use more than 47 bits.
+ uint64_t maxJSAddress = UINT64_C(0x00007fffffffffff) - allocGranularity;
+ if (maxValidAddress > maxJSAddress) {
+ maxValidAddress = maxJSAddress;
+ hugeSplit = UINT64_C(0x00003fffffffffff) - allocGranularity;
+ } else {
+ hugeSplit = (UINT64_C(1) << (numAddressBits - 1)) - 1 - allocGranularity;
+ }
+#else // !defined(JS_64BIT)
+ numAddressBits = 32;
+#endif
+#ifdef RLIMIT_AS
+ rlimit as_limit;
+ if (getrlimit(RLIMIT_AS, &as_limit) == 0 &&
+ as_limit.rlim_max != RLIM_INFINITY) {
+ virtualMemoryLimit = as_limit.rlim_max;
+ }
+#endif
+ }
+}
+
+#ifdef JS_64BIT
+/* The JS engine uses 47-bit pointers; all higher bits must be clear. */
+static inline bool IsInvalidRegion(void* region, size_t length) {
+ const uint64_t invalidPointerMask = UINT64_C(0xffff800000000000);
+ return (uintptr_t(region) + length - 1) & invalidPointerMask;
+}
+#endif
+
+void* MapAlignedPages(size_t length, size_t alignment) {
+ MOZ_RELEASE_ASSERT(length > 0 && alignment > 0);
+ MOZ_RELEASE_ASSERT(length % pageSize == 0);
+ MOZ_RELEASE_ASSERT(std::max(alignment, allocGranularity) %
+ std::min(alignment, allocGranularity) ==
+ 0);
+
+ // Smaller alignments aren't supported by the allocation functions.
+ if (alignment < allocGranularity) {
+ alignment = allocGranularity;
+ }
+
+#ifdef JS_64BIT
+ // Use the scattershot allocator if the address range is large enough.
+ if (UsingScattershotAllocator()) {
+ void* region = MapAlignedPagesRandom(length, alignment);
+
+ MOZ_RELEASE_ASSERT(!IsInvalidRegion(region, length));
+ MOZ_ASSERT(OffsetFromAligned(region, alignment) == 0);
+
+ return region;
+ }
+#endif
+
+ // Try to allocate the region. If the returned address is aligned,
+ // either we OOMed (region is nullptr) or we're done.
+ void* region = MapMemory(length);
+ if (OffsetFromAligned(region, alignment) == 0) {
+ return region;
+ }
+
+ // Try to align the region. On success, TryToAlignChunk() returns
+ // true and we can return the aligned region immediately.
+ void* retainedRegion;
+ if (TryToAlignChunk(&region, &retainedRegion, length, alignment)) {
+ MOZ_ASSERT(region && OffsetFromAligned(region, alignment) == 0);
+ MOZ_ASSERT(!retainedRegion);
+ return region;
+ }
+
+ // On failure, the unaligned region is retained unless we OOMed. We don't
+ // use the retained region on this path (see the last ditch allocator).
+ if (retainedRegion) {
+ UnmapInternal(retainedRegion, length);
+ }
+
+ // If it fails to align the given region, TryToAlignChunk() returns the
+ // next valid region that we might be able to align (unless we OOMed).
+ if (region) {
+ MOZ_ASSERT(OffsetFromAligned(region, alignment) != 0);
+ UnmapInternal(region, length);
+ }
+
+ // Since we couldn't align the first region, fall back to allocating a
+ // region large enough that we can definitely align it.
+ region = MapAlignedPagesSlow(length, alignment);
+ if (!region) {
+ // If there wasn't enough contiguous address space left for that,
+ // try to find an alignable region using the last ditch allocator.
+ region = MapAlignedPagesLastDitch(length, alignment);
+ }
+
+ // At this point we should either have an aligned region or nullptr.
+ MOZ_ASSERT(OffsetFromAligned(region, alignment) == 0);
+ return region;
+}
+
+#ifdef JS_64BIT
+
+/*
+ * This allocator takes advantage of the large address range on some 64-bit
+ * platforms to allocate in a scattershot manner, choosing addresses at random
+ * from the range. By controlling the range we can avoid returning addresses
+ * that have more than 47 significant bits (as required by SpiderMonkey).
+ * This approach also has some other advantages over the methods employed by
+ * the other allocation functions in this file:
+ * 1) Allocations are extremely likely to succeed on the first try.
+ * 2) The randomness makes our memory layout becomes harder to predict.
+ * 3) The low probability of reusing regions guards against use-after-free.
+ *
+ * The main downside is that detecting physical OOM situations becomes more
+ * difficult; to guard against this, we occasionally try a regular allocation.
+ * In addition, sprinkling small allocations throughout the full address range
+ * might get in the way of large address space reservations such as those
+ * employed by WebAssembly. To avoid this (or the opposite problem of such
+ * reservations reducing the chance of success for smaller allocations) we
+ * split the address range in half, with one half reserved for huge allocations
+ * and the other for regular (usually chunk sized) allocations.
+ */
+static void* MapAlignedPagesRandom(size_t length, size_t alignment) {
+ uint64_t minNum, maxNum;
+ if (length < HugeAllocationSize) {
+ // Use the lower half of the range.
+ minNum = (minValidAddress + alignment - 1) / alignment;
+ maxNum = (hugeSplit - (length - 1)) / alignment;
+ } else {
+ // Use the upper half of the range.
+ minNum = (hugeSplit + 1 + alignment - 1) / alignment;
+ maxNum = (maxValidAddress - (length - 1)) / alignment;
+ }
+
+ // Try to allocate in random aligned locations.
+ void* region = nullptr;
+ for (size_t i = 1; i <= 1024; ++i) {
+ if (i & 0xf) {
+ uint64_t desired = alignment * GetNumberInRange(minNum, maxNum);
+ region = MapMemoryAtFuzzy(reinterpret_cast<void*>(desired), length);
+ if (!region) {
+ continue;
+ }
+ } else {
+ // Check for OOM.
+ region = MapMemory(length);
+ if (!region) {
+ return nullptr;
+ }
+ }
+ if (IsInvalidRegion(region, length)) {
+ UnmapInternal(region, length);
+ continue;
+ }
+ if (OffsetFromAligned(region, alignment) == 0) {
+ return region;
+ }
+ void* retainedRegion = nullptr;
+ if (TryToAlignChunk<false>(&region, &retainedRegion, length, alignment)) {
+ MOZ_ASSERT(region && OffsetFromAligned(region, alignment) == 0);
+ MOZ_ASSERT(!retainedRegion);
+ return region;
+ }
+ MOZ_ASSERT(region && !retainedRegion);
+ UnmapInternal(region, length);
+ }
+
+ if (numAddressBits < 48) {
+ // Try the reliable fallback of overallocating.
+ // Note: This will not respect the address space split.
+ region = MapAlignedPagesSlow(length, alignment);
+ if (region) {
+ return region;
+ }
+ }
+ if (length < HugeAllocationSize) {
+ MOZ_CRASH("Couldn't allocate even after 1000 tries!");
+ }
+
+ return nullptr;
+}
+
+#endif // defined(JS_64BIT)
+
+static void* MapAlignedPagesSlow(size_t length, size_t alignment) {
+ void* alignedRegion = nullptr;
+ do {
+ size_t reserveLength = length + alignment - pageSize;
+#ifdef XP_WIN
+ // Don't commit the requested pages as we won't use the region directly.
+ void* region = MapMemory<Commit::No>(reserveLength);
+#else
+ void* region = MapMemory(reserveLength);
+#endif
+ if (!region) {
+ return nullptr;
+ }
+ alignedRegion =
+ reinterpret_cast<void*>(AlignBytes(uintptr_t(region), alignment));
+#ifdef XP_WIN
+ // Windows requires that map and unmap calls be matched, so deallocate
+ // and immediately reallocate at the desired (aligned) address.
+ UnmapInternal(region, reserveLength);
+ alignedRegion = MapMemoryAt(alignedRegion, length);
+#else
+ // munmap allows us to simply unmap the pages that don't interest us.
+ if (alignedRegion != region) {
+ UnmapInternal(region, uintptr_t(alignedRegion) - uintptr_t(region));
+ }
+ void* regionEnd =
+ reinterpret_cast<void*>(uintptr_t(region) + reserveLength);
+ void* alignedEnd =
+ reinterpret_cast<void*>(uintptr_t(alignedRegion) + length);
+ if (alignedEnd != regionEnd) {
+ UnmapInternal(alignedEnd, uintptr_t(regionEnd) - uintptr_t(alignedEnd));
+ }
+#endif
+ // On Windows we may have raced with another thread; if so, try again.
+ } while (!alignedRegion);
+
+ return alignedRegion;
+}
+
+/*
+ * In a low memory or high fragmentation situation, alignable chunks of the
+ * desired length may still be available, even if there are no more contiguous
+ * free chunks that meet the |length + alignment - pageSize| requirement of
+ * MapAlignedPagesSlow. In this case, try harder to find an alignable chunk
+ * by temporarily holding onto the unaligned parts of each chunk until the
+ * allocator gives us a chunk that either is, or can be aligned.
+ */
+static void* MapAlignedPagesLastDitch(size_t length, size_t alignment) {
+ void* tempMaps[MaxLastDitchAttempts];
+ int attempt = 0;
+ void* region = MapMemory(length);
+ if (OffsetFromAligned(region, alignment) == 0) {
+ return region;
+ }
+ for (; attempt < MaxLastDitchAttempts; ++attempt) {
+ if (TryToAlignChunk(&region, tempMaps + attempt, length, alignment)) {
+ MOZ_ASSERT(region && OffsetFromAligned(region, alignment) == 0);
+ MOZ_ASSERT(!tempMaps[attempt]);
+ break; // Success!
+ }
+ if (!region || !tempMaps[attempt]) {
+ break; // We ran out of memory, so give up.
+ }
+ }
+ if (OffsetFromAligned(region, alignment)) {
+ UnmapInternal(region, length);
+ region = nullptr;
+ }
+ while (--attempt >= 0) {
+ UnmapInternal(tempMaps[attempt], length);
+ }
+ return region;
+}
+
+#ifdef XP_WIN
+
+/*
+ * On Windows, map and unmap calls must be matched, so we deallocate the
+ * unaligned chunk, then reallocate the unaligned part to block off the
+ * old address and force the allocator to give us a new one.
+ */
+template <bool>
+static bool TryToAlignChunk(void** aRegion, void** aRetainedRegion,
+ size_t length, size_t alignment) {
+ void* region = *aRegion;
+ MOZ_ASSERT(region && OffsetFromAligned(region, alignment) != 0);
+
+ size_t retainedLength = 0;
+ void* retainedRegion = nullptr;
+ do {
+ size_t offset = OffsetFromAligned(region, alignment);
+ if (offset == 0) {
+ // If the address is aligned, either we hit OOM or we're done.
+ break;
+ }
+ UnmapInternal(region, length);
+ retainedLength = alignment - offset;
+ retainedRegion = MapMemoryAt<Commit::No>(region, retainedLength);
+ region = MapMemory(length);
+
+ // If retainedRegion is null here, we raced with another thread.
+ } while (!retainedRegion);
+
+ bool result = OffsetFromAligned(region, alignment) == 0;
+ if (result && retainedRegion) {
+ UnmapInternal(retainedRegion, retainedLength);
+ retainedRegion = nullptr;
+ }
+
+ *aRegion = region;
+ *aRetainedRegion = retainedRegion;
+ return region && result;
+}
+
+#else // !defined(XP_WIN)
+
+/*
+ * mmap calls don't have to be matched with calls to munmap, so we can unmap
+ * just the pages we don't need. However, as we don't know a priori if addresses
+ * are handed out in increasing or decreasing order, we have to try both
+ * directions (depending on the environment, one will always fail).
+ */
+template <bool AlwaysGetNew>
+static bool TryToAlignChunk(void** aRegion, void** aRetainedRegion,
+ size_t length, size_t alignment) {
+ void* regionStart = *aRegion;
+ MOZ_ASSERT(regionStart && OffsetFromAligned(regionStart, alignment) != 0);
+
+ bool addressesGrowUpward = growthDirection > 0;
+ bool directionUncertain = -8 < growthDirection && growthDirection <= 8;
+ size_t offsetLower = OffsetFromAligned(regionStart, alignment);
+ size_t offsetUpper = alignment - offsetLower;
+ for (size_t i = 0; i < 2; ++i) {
+ if (addressesGrowUpward) {
+ void* upperStart =
+ reinterpret_cast<void*>(uintptr_t(regionStart) + offsetUpper);
+ void* regionEnd =
+ reinterpret_cast<void*>(uintptr_t(regionStart) + length);
+ if (MapMemoryAt(regionEnd, offsetUpper)) {
+ UnmapInternal(regionStart, offsetUpper);
+ if (directionUncertain) {
+ ++growthDirection;
+ }
+ regionStart = upperStart;
+ break;
+ }
+ } else {
+ auto* lowerStart =
+ reinterpret_cast<void*>(uintptr_t(regionStart) - offsetLower);
+ auto* lowerEnd = reinterpret_cast<void*>(uintptr_t(lowerStart) + length);
+ if (MapMemoryAt(lowerStart, offsetLower)) {
+ UnmapInternal(lowerEnd, offsetLower);
+ if (directionUncertain) {
+ --growthDirection;
+ }
+ regionStart = lowerStart;
+ break;
+ }
+ }
+ // If we're confident in the growth direction, don't try the other.
+ if (!directionUncertain) {
+ break;
+ }
+ addressesGrowUpward = !addressesGrowUpward;
+ }
+
+ void* retainedRegion = nullptr;
+ bool result = OffsetFromAligned(regionStart, alignment) == 0;
+ if (AlwaysGetNew && !result) {
+ // If our current chunk cannot be aligned, just get a new one.
+ retainedRegion = regionStart;
+ regionStart = MapMemory(length);
+ // Our new region might happen to already be aligned.
+ result = OffsetFromAligned(regionStart, alignment) == 0;
+ if (result) {
+ UnmapInternal(retainedRegion, length);
+ retainedRegion = nullptr;
+ }
+ }
+
+ *aRegion = regionStart;
+ *aRetainedRegion = retainedRegion;
+ return regionStart && result;
+}
+
+#endif
+
+void UnmapPages(void* region, size_t length) {
+ MOZ_RELEASE_ASSERT(region &&
+ OffsetFromAligned(region, allocGranularity) == 0);
+ MOZ_RELEASE_ASSERT(length > 0 && length % pageSize == 0);
+
+ // ASan does not automatically unpoison memory, so we have to do this here.
+ MOZ_MAKE_MEM_UNDEFINED(region, length);
+
+ UnmapInternal(region, length);
+}
+
+static void CheckDecommit(void* region, size_t length) {
+ MOZ_RELEASE_ASSERT(region);
+ MOZ_RELEASE_ASSERT(length > 0);
+
+ // pageSize == ArenaSize doesn't necessarily hold, but this function is
+ // used by the GC to decommit unused Arenas, so we don't want to assert
+ // if pageSize > ArenaSize.
+ MOZ_ASSERT(OffsetFromAligned(region, ArenaSize) == 0);
+ MOZ_ASSERT(length % ArenaSize == 0);
+
+ if (DecommitEnabled()) {
+ // We can't decommit part of a page.
+ MOZ_RELEASE_ASSERT(OffsetFromAligned(region, pageSize) == 0);
+ MOZ_RELEASE_ASSERT(length % pageSize == 0);
+ }
+}
+
+bool MarkPagesUnusedSoft(void* region, size_t length) {
+ CheckDecommit(region, length);
+
+ MOZ_MAKE_MEM_NOACCESS(region, length);
+
+ if (!DecommitEnabled()) {
+ return true;
+ }
+
+#if defined(XP_WIN)
+ return VirtualAlloc(region, length, MEM_RESET,
+ DWORD(PageAccess::ReadWrite)) == region;
+#else
+ int status;
+ do {
+# if defined(XP_DARWIN)
+ status = madvise(region, length, MADV_FREE_REUSABLE);
+# elif defined(XP_SOLARIS)
+ status = posix_madvise(region, length, POSIX_MADV_DONTNEED);
+# else
+ status = madvise(region, length, MADV_DONTNEED);
+# endif
+ } while (status == -1 && errno == EAGAIN);
+ return status == 0;
+#endif
+}
+
+bool MarkPagesUnusedHard(void* region, size_t length) {
+ CheckDecommit(region, length);
+
+ MOZ_MAKE_MEM_NOACCESS(region, length);
+
+ if (!DecommitEnabled()) {
+ return true;
+ }
+
+#if defined(XP_WIN)
+ return VirtualFree(region, length, MEM_DECOMMIT);
+#else
+ return MarkPagesUnusedSoft(region, length);
+#endif
+}
+
+void MarkPagesInUseSoft(void* region, size_t length) {
+ CheckDecommit(region, length);
+
+#if defined(XP_DARWIN)
+ while (madvise(region, length, MADV_FREE_REUSE) == -1 && errno == EAGAIN) {
+ }
+#endif
+
+ MOZ_MAKE_MEM_UNDEFINED(region, length);
+}
+
+bool MarkPagesInUseHard(void* region, size_t length) {
+ if (js::oom::ShouldFailWithOOM()) {
+ return false;
+ }
+
+ CheckDecommit(region, length);
+
+ MOZ_MAKE_MEM_UNDEFINED(region, length);
+
+ if (!DecommitEnabled()) {
+ return true;
+ }
+
+#if defined(XP_WIN)
+ return VirtualAlloc(region, length, MEM_COMMIT,
+ DWORD(PageAccess::ReadWrite)) == region;
+#else
+ return true;
+#endif
+}
+
+size_t GetPageFaultCount() {
+#ifdef XP_WIN
+ PROCESS_MEMORY_COUNTERS pmc;
+ if (GetProcessMemoryInfo(GetCurrentProcess(), &pmc, sizeof(pmc)) == 0) {
+ return 0;
+ }
+ return pmc.PageFaultCount;
+#else
+ struct rusage usage;
+ int err = getrusage(RUSAGE_SELF, &usage);
+ if (err) {
+ return 0;
+ }
+ return usage.ru_majflt;
+#endif
+}
+
+void* AllocateMappedContent(int fd, size_t offset, size_t length,
+ size_t alignment) {
+ if (length == 0 || alignment == 0 || offset % alignment != 0 ||
+ std::max(alignment, allocGranularity) %
+ std::min(alignment, allocGranularity) !=
+ 0) {
+ return nullptr;
+ }
+
+ size_t alignedOffset = offset - (offset % allocGranularity);
+ size_t alignedLength = length + (offset % allocGranularity);
+
+ // We preallocate the mapping using MapAlignedPages, which expects
+ // the length parameter to be an integer multiple of the page size.
+ size_t mappedLength = alignedLength;
+ if (alignedLength % pageSize != 0) {
+ mappedLength += pageSize - alignedLength % pageSize;
+ }
+
+#ifdef XP_WIN
+ HANDLE hFile = reinterpret_cast<HANDLE>(intptr_t(fd));
+
+ // This call will fail if the file does not exist.
+ HANDLE hMap = CreateFileMapping(hFile, nullptr, PAGE_READONLY, 0, 0, nullptr);
+ if (!hMap) {
+ return nullptr;
+ }
+
+ DWORD offsetH = uint32_t(uint64_t(alignedOffset) >> 32);
+ DWORD offsetL = uint32_t(alignedOffset);
+
+ uint8_t* map = nullptr;
+ for (;;) {
+ // The value of a pointer is technically only defined while the region
+ // it points to is allocated, so explicitly treat this one as a number.
+ uintptr_t region = uintptr_t(MapAlignedPages(mappedLength, alignment));
+ if (region == 0) {
+ break;
+ }
+ UnmapInternal(reinterpret_cast<void*>(region), mappedLength);
+ // If the offset or length are out of bounds, this call will fail.
+ map = static_cast<uint8_t*>(
+ MapViewOfFileEx(hMap, FILE_MAP_COPY, offsetH, offsetL, alignedLength,
+ reinterpret_cast<void*>(region)));
+
+ // Retry if another thread mapped the address we were trying to use.
+ if (map || GetLastError() != ERROR_INVALID_ADDRESS) {
+ break;
+ }
+ }
+
+ // This just decreases the file mapping object's internal reference count;
+ // it won't actually be destroyed until we unmap the associated view.
+ CloseHandle(hMap);
+
+ if (!map) {
+ return nullptr;
+ }
+#else // !defined(XP_WIN)
+ // Sanity check the offset and length, as mmap does not do this for us.
+ struct stat st;
+ if (fstat(fd, &st) || offset >= uint64_t(st.st_size) ||
+ length > uint64_t(st.st_size) - offset) {
+ return nullptr;
+ }
+
+ void* region = MapAlignedPages(mappedLength, alignment);
+ if (!region) {
+ return nullptr;
+ }
+
+ // Calling mmap with MAP_FIXED will replace the previous mapping, allowing
+ // us to reuse the region we obtained without racing with other threads.
+ uint8_t* map =
+ static_cast<uint8_t*>(mmap(region, alignedLength, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_FIXED, fd, alignedOffset));
+ if (map == MAP_FAILED) {
+ UnmapInternal(region, mappedLength);
+ return nullptr;
+ }
+#endif
+
+#ifdef DEBUG
+ // Zero out data before and after the desired mapping to catch errors early.
+ if (offset != alignedOffset) {
+ memset(map, 0, offset - alignedOffset);
+ }
+ if (alignedLength % pageSize) {
+ memset(map + alignedLength, 0, pageSize - (alignedLength % pageSize));
+ }
+#endif
+
+ return map + (offset - alignedOffset);
+}
+
+void DeallocateMappedContent(void* region, size_t length) {
+ if (!region) {
+ return;
+ }
+
+ // Due to bug 1502562, the following assertion does not currently hold.
+ // MOZ_RELEASE_ASSERT(length > 0);
+
+ // Calculate the address originally returned by the system call.
+ // This is needed because AllocateMappedContent returns a pointer
+ // that might be offset from the mapping, as the beginning of a
+ // mapping must be aligned with the allocation granularity.
+ uintptr_t map = uintptr_t(region) - (uintptr_t(region) % allocGranularity);
+#ifdef XP_WIN
+ MOZ_RELEASE_ASSERT(UnmapViewOfFile(reinterpret_cast<void*>(map)) != 0);
+#else
+ size_t alignedLength = length + (uintptr_t(region) % allocGranularity);
+ if (munmap(reinterpret_cast<void*>(map), alignedLength)) {
+ MOZ_RELEASE_ASSERT(errno == ENOMEM);
+ }
+#endif
+}
+
+static inline void ProtectMemory(void* region, size_t length, PageAccess prot) {
+ MOZ_RELEASE_ASSERT(region && OffsetFromAligned(region, pageSize) == 0);
+ MOZ_RELEASE_ASSERT(length > 0 && length % pageSize == 0);
+#ifdef XP_WIN
+ DWORD oldProtect;
+ MOZ_RELEASE_ASSERT(VirtualProtect(region, length, DWORD(prot), &oldProtect) !=
+ 0);
+#else
+ MOZ_RELEASE_ASSERT(mprotect(region, length, int(prot)) == 0);
+#endif
+}
+
+void ProtectPages(void* region, size_t length) {
+ ProtectMemory(region, length, PageAccess::None);
+}
+
+void MakePagesReadOnly(void* region, size_t length) {
+ ProtectMemory(region, length, PageAccess::Read);
+}
+
+void UnprotectPages(void* region, size_t length) {
+ ProtectMemory(region, length, PageAccess::ReadWrite);
+}
+
+} // namespace gc
+} // namespace js
diff --git a/js/src/gc/Memory.h b/js/src/gc/Memory.h
new file mode 100644
index 0000000000..1d1a24dee7
--- /dev/null
+++ b/js/src/gc/Memory.h
@@ -0,0 +1,82 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef gc_Memory_h
+#define gc_Memory_h
+
+#include "mozilla/Attributes.h"
+
+#include <stddef.h>
+
+namespace js {
+namespace gc {
+
+// Sanity check that our compiled configuration matches the currently
+// running instance and initialize any runtime data needed for allocation.
+void InitMemorySubsystem();
+
+// The page size as reported by the operating system.
+size_t SystemPageSize();
+
+// The number of bits that may be set in a valid address, as
+// reported by the operating system or measured at startup.
+size_t SystemAddressBits();
+
+// The number of bytes of virtual memory that may be allocated or mapped, as
+// reported by the operating system on certain platforms. If no limit was able
+// to be determined, then it will be size_t(-1).
+size_t VirtualMemoryLimit();
+
+// The scattershot allocator is used on platforms that have a large address
+// range. On these platforms we allocate at random addresses.
+bool UsingScattershotAllocator();
+
+// Allocate or deallocate pages from the system with the given alignment.
+// Pages will be read/write-able.
+void* MapAlignedPages(size_t length, size_t alignment);
+void UnmapPages(void* region, size_t length);
+
+// Tell the OS that the given pages are not in use, so they should not be
+// written to a paging file. This may be a no-op on some platforms.
+bool MarkPagesUnusedSoft(void* region, size_t length);
+
+// Tell the OS that the given pages are not in use and it can decommit them
+// immediately. This may defer to MarkPagesUnusedSoft and must be paired with
+// MarkPagesInUse to use the pages again.
+bool MarkPagesUnusedHard(void* region, size_t length);
+
+// Undo |MarkPagesUnusedSoft|: tell the OS that the given pages are of interest
+// and should be paged in and out normally. This may be a no-op on some
+// platforms. May make pages read/write-able.
+void MarkPagesInUseSoft(void* region, size_t length);
+
+// Undo |MarkPagesUnusedHard|: tell the OS that the given pages are of interest
+// and should be paged in and out normally. This may be a no-op on some
+// platforms. Callers must check the result, false could mean that the pages
+// are not available. May make pages read/write.
+MOZ_MUST_USE bool MarkPagesInUseHard(void* region, size_t length);
+
+// Returns #(hard faults) + #(soft faults)
+size_t GetPageFaultCount();
+
+// Allocate memory mapped content.
+// The offset must be aligned according to alignment requirement.
+void* AllocateMappedContent(int fd, size_t offset, size_t length,
+ size_t alignment);
+
+// Deallocate memory mapped content.
+void DeallocateMappedContent(void* region, size_t length);
+
+void* TestMapAlignedPagesLastDitch(size_t size, size_t alignment);
+
+void ProtectPages(void* region, size_t length);
+void MakePagesReadOnly(void* region, size_t length);
+void UnprotectPages(void* region, size_t length);
+
+} // namespace gc
+} // namespace js
+
+#endif /* gc_Memory_h */
diff --git a/js/src/gc/Nursery-inl.h b/js/src/gc/Nursery-inl.h
new file mode 100644
index 0000000000..510c40553d
--- /dev/null
+++ b/js/src/gc/Nursery-inl.h
@@ -0,0 +1,188 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=4 sw=2 et tw=80 ft=cpp:
+ *
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef gc_Nursery_inl_h
+#define gc_Nursery_inl_h
+
+#include "gc/Nursery.h"
+
+#include "gc/Heap.h"
+#include "gc/RelocationOverlay.h"
+#include "gc/Zone.h"
+#include "js/TracingAPI.h"
+#include "vm/JSContext.h"
+#include "vm/Runtime.h"
+#include "vm/SharedMem.h"
+
+inline JSRuntime* js::Nursery::runtime() const { return gc->rt; }
+
+template <typename T>
+bool js::Nursery::isInside(const SharedMem<T>& p) const {
+ return isInside(p.unwrap(/*safe - used for value in comparison above*/));
+}
+
+MOZ_ALWAYS_INLINE /* static */ bool js::Nursery::getForwardedPointer(
+ js::gc::Cell** ref) {
+ js::gc::Cell* cell = (*ref);
+ MOZ_ASSERT(IsInsideNursery(cell));
+ if (!cell->isForwarded()) {
+ return false;
+ }
+ const gc::RelocationOverlay* overlay = gc::RelocationOverlay::fromCell(cell);
+ *ref = overlay->forwardingAddress();
+ return true;
+}
+
+inline void js::Nursery::maybeSetForwardingPointer(JSTracer* trc, void* oldData,
+ void* newData, bool direct) {
+ if (trc->isTenuringTracer()) {
+ setForwardingPointerWhileTenuring(oldData, newData, direct);
+ }
+}
+
+inline void js::Nursery::setForwardingPointerWhileTenuring(void* oldData,
+ void* newData,
+ bool direct) {
+ if (isInside(oldData)) {
+ setForwardingPointer(oldData, newData, direct);
+ }
+}
+
+inline void js::Nursery::setSlotsForwardingPointer(HeapSlot* oldSlots,
+ HeapSlot* newSlots,
+ uint32_t nslots) {
+ // Slot arrays always have enough space for a forwarding pointer, since the
+ // number of slots is never zero.
+ MOZ_ASSERT(nslots > 0);
+ setDirectForwardingPointer(oldSlots, newSlots);
+}
+
+inline void js::Nursery::setElementsForwardingPointer(ObjectElements* oldHeader,
+ ObjectElements* newHeader,
+ uint32_t capacity) {
+ // Only use a direct forwarding pointer if there is enough space for one.
+ setForwardingPointer(oldHeader->elements(), newHeader->elements(),
+ capacity > 0);
+}
+
+inline void js::Nursery::setForwardingPointer(void* oldData, void* newData,
+ bool direct) {
+ if (direct) {
+ setDirectForwardingPointer(oldData, newData);
+ return;
+ }
+
+ setIndirectForwardingPointer(oldData, newData);
+}
+
+inline void js::Nursery::setDirectForwardingPointer(void* oldData,
+ void* newData) {
+ MOZ_ASSERT(isInside(oldData));
+ MOZ_ASSERT(!isInside(newData));
+
+ new (oldData) BufferRelocationOverlay{newData};
+}
+
+namespace js {
+
+// The allocation methods below will not run the garbage collector. If the
+// nursery cannot accomodate the allocation, the malloc heap will be used
+// instead.
+
+template <typename T>
+static inline T* AllocateObjectBuffer(JSContext* cx, uint32_t count) {
+ size_t nbytes = RoundUp(count * sizeof(T), sizeof(Value));
+ auto* buffer =
+ static_cast<T*>(cx->nursery().allocateBuffer(cx->zone(), nbytes));
+ if (!buffer) {
+ ReportOutOfMemory(cx);
+ }
+ return buffer;
+}
+
+template <typename T>
+static inline T* AllocateObjectBuffer(JSContext* cx, JSObject* obj,
+ uint32_t count) {
+ if (cx->isHelperThreadContext()) {
+ return cx->pod_malloc<T>(count);
+ }
+ size_t nbytes = RoundUp(count * sizeof(T), sizeof(Value));
+ auto* buffer = static_cast<T*>(cx->nursery().allocateBuffer(obj, nbytes));
+ if (!buffer) {
+ ReportOutOfMemory(cx);
+ }
+ return buffer;
+}
+
+// If this returns null then the old buffer will be left alone.
+template <typename T>
+static inline T* ReallocateObjectBuffer(JSContext* cx, JSObject* obj,
+ T* oldBuffer, uint32_t oldCount,
+ uint32_t newCount) {
+ T* buffer;
+ if (cx->isHelperThreadContext()) {
+ buffer = obj->zone()->pod_realloc<T>(oldBuffer, oldCount, newCount);
+ } else {
+ buffer = static_cast<T*>(cx->nursery().reallocateBuffer(
+ obj->zone(), obj, oldBuffer, oldCount * sizeof(T),
+ newCount * sizeof(T)));
+ }
+
+ if (!buffer) {
+ ReportOutOfMemory(cx);
+ }
+
+ return buffer;
+}
+
+static inline JS::BigInt::Digit* AllocateBigIntDigits(JSContext* cx,
+ JS::BigInt* bi,
+ uint32_t length) {
+ JS::BigInt::Digit* digits;
+ if (cx->isHelperThreadContext()) {
+ digits = cx->pod_malloc<JS::BigInt::Digit>(length);
+ } else {
+ size_t nbytes = RoundUp(length * sizeof(JS::BigInt::Digit), sizeof(Value));
+ digits = static_cast<JS::BigInt::Digit*>(
+ cx->nursery().allocateBuffer(bi, nbytes));
+ }
+
+ if (!digits) {
+ ReportOutOfMemory(cx);
+ }
+
+ return digits;
+}
+
+static inline JS::BigInt::Digit* ReallocateBigIntDigits(
+ JSContext* cx, JS::BigInt* bi, JS::BigInt::Digit* oldDigits,
+ uint32_t oldLength, uint32_t newLength) {
+ JS::BigInt::Digit* digits;
+ if (cx->isHelperThreadContext()) {
+ MOZ_ASSERT(!cx->nursery().isInside(oldDigits));
+ digits = bi->zone()->pod_realloc<JS::BigInt::Digit>(oldDigits, oldLength,
+ newLength);
+ } else {
+ size_t oldBytes =
+ RoundUp(oldLength * sizeof(JS::BigInt::Digit), sizeof(Value));
+ size_t newBytes =
+ RoundUp(newLength * sizeof(JS::BigInt::Digit), sizeof(Value));
+
+ digits = static_cast<JS::BigInt::Digit*>(cx->nursery().reallocateBuffer(
+ bi->zone(), bi, oldDigits, oldBytes, newBytes));
+ }
+
+ if (!digits) {
+ ReportOutOfMemory(cx);
+ }
+
+ return digits;
+}
+
+} // namespace js
+
+#endif /* gc_Nursery_inl_h */
diff --git a/js/src/gc/Nursery.cpp b/js/src/gc/Nursery.cpp
new file mode 100644
index 0000000000..a9a3ad6a12
--- /dev/null
+++ b/js/src/gc/Nursery.cpp
@@ -0,0 +1,1841 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sw=2 et tw=80:
+ *
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "gc/Nursery-inl.h"
+
+#include "mozilla/DebugOnly.h"
+#include "mozilla/IntegerPrintfMacros.h"
+#include "mozilla/ScopeExit.h"
+#include "mozilla/Unused.h"
+
+#include <algorithm>
+#include <cmath>
+#include <utility>
+
+#include "builtin/MapObject.h"
+#include "debugger/DebugAPI.h"
+#include "gc/FreeOp.h"
+#include "gc/GCInternals.h"
+#include "gc/GCLock.h"
+#include "gc/Memory.h"
+#include "gc/PublicIterators.h"
+#include "jit/JitFrames.h"
+#include "jit/JitRealm.h"
+#include "util/DifferentialTesting.h"
+#include "util/Poison.h"
+#include "vm/ArrayObject.h"
+#include "vm/JSONPrinter.h"
+#include "vm/Realm.h"
+#include "vm/Time.h"
+#include "vm/TypedArrayObject.h"
+
+#include "gc/Marking-inl.h"
+#include "gc/Zone-inl.h"
+#include "vm/NativeObject-inl.h"
+
+#ifdef XP_WIN
+# include <process.h>
+# define getpid _getpid
+#else
+# include <unistd.h>
+#endif
+
+using namespace js;
+using namespace gc;
+
+using mozilla::DebugOnly;
+using mozilla::PodCopy;
+using mozilla::TimeDuration;
+using mozilla::TimeStamp;
+
+#ifdef JS_GC_ZEAL
+constexpr uint32_t CanaryMagicValue = 0xDEADB15D;
+
+struct alignas(gc::CellAlignBytes) js::Nursery::Canary {
+ uint32_t magicValue;
+ Canary* next;
+};
+#endif
+
+namespace js {
+struct NurseryChunk : public ChunkBase {
+ char data[Nursery::NurseryChunkUsableSize];
+
+ static NurseryChunk* fromChunk(gc::TenuredChunk* chunk);
+
+ explicit NurseryChunk(JSRuntime* runtime)
+ : ChunkBase(runtime, &runtime->gc.storeBuffer()) {}
+
+ void poisonAndInit(JSRuntime* rt, size_t size = ChunkSize);
+ void poisonRange(size_t from, size_t size, uint8_t value,
+ MemCheckKind checkKind);
+ void poisonAfterEvict(size_t extent = ChunkSize);
+
+ // The end of the range is always ChunkSize.
+ void markPagesUnusedHard(size_t from);
+ // The start of the range is always the beginning of the chunk.
+ MOZ_MUST_USE bool markPagesInUseHard(size_t to);
+
+ uintptr_t start() const { return uintptr_t(&data); }
+ uintptr_t end() const { return uintptr_t(this) + ChunkSize; }
+};
+static_assert(sizeof(js::NurseryChunk) == gc::ChunkSize,
+ "Nursery chunk size must match gc::Chunk size.");
+
+} // namespace js
+
+inline void js::NurseryChunk::poisonAndInit(JSRuntime* rt, size_t size) {
+ MOZ_ASSERT(size >= sizeof(ChunkBase));
+ MOZ_ASSERT(size <= ChunkSize);
+ poisonRange(0, size, JS_FRESH_NURSERY_PATTERN, MemCheckKind::MakeUndefined);
+ new (this) NurseryChunk(rt);
+}
+
+inline void js::NurseryChunk::poisonRange(size_t from, size_t size,
+ uint8_t value,
+ MemCheckKind checkKind) {
+ MOZ_ASSERT(from + size <= ChunkSize);
+
+ auto* start = reinterpret_cast<uint8_t*>(this) + from;
+
+ // We can poison the same chunk more than once, so first make sure memory
+ // sanitizers will let us poison it.
+ MOZ_MAKE_MEM_UNDEFINED(start, size);
+ Poison(start, value, size, checkKind);
+}
+
+inline void js::NurseryChunk::poisonAfterEvict(size_t extent) {
+ MOZ_ASSERT(extent <= ChunkSize);
+ poisonRange(sizeof(ChunkBase), extent - sizeof(ChunkBase),
+ JS_SWEPT_NURSERY_PATTERN, MemCheckKind::MakeNoAccess);
+}
+
+inline void js::NurseryChunk::markPagesUnusedHard(size_t from) {
+ MOZ_ASSERT(from >= sizeof(ChunkBase)); // Don't touch the header.
+ MOZ_ASSERT(from <= ChunkSize);
+ uintptr_t start = uintptr_t(this) + from;
+ MarkPagesUnusedHard(reinterpret_cast<void*>(start), ChunkSize - from);
+}
+
+inline bool js::NurseryChunk::markPagesInUseHard(size_t to) {
+ MOZ_ASSERT(to >= sizeof(ChunkBase));
+ MOZ_ASSERT(to <= ChunkSize);
+ return MarkPagesInUseHard(this, to);
+}
+
+// static
+inline js::NurseryChunk* js::NurseryChunk::fromChunk(TenuredChunk* chunk) {
+ return reinterpret_cast<NurseryChunk*>(chunk);
+}
+
+js::NurseryDecommitTask::NurseryDecommitTask(gc::GCRuntime* gc)
+ : GCParallelTask(gc) {}
+
+bool js::NurseryDecommitTask::isEmpty(
+ const AutoLockHelperThreadState& lock) const {
+ return chunksToDecommit().empty() && !partialChunk;
+}
+
+bool js::NurseryDecommitTask::reserveSpaceForBytes(size_t nbytes) {
+ MOZ_ASSERT(isIdle());
+ size_t nchunks = HowMany(nbytes, ChunkSize);
+ return chunksToDecommit().reserve(nchunks);
+}
+
+void js::NurseryDecommitTask::queueChunk(
+ NurseryChunk* chunk, const AutoLockHelperThreadState& lock) {
+ MOZ_ASSERT(isIdle(lock));
+ MOZ_ALWAYS_TRUE(chunksToDecommit().append(chunk));
+}
+
+void js::NurseryDecommitTask::queueRange(
+ size_t newCapacity, NurseryChunk& newChunk,
+ const AutoLockHelperThreadState& lock) {
+ MOZ_ASSERT(isIdle(lock));
+ MOZ_ASSERT(!partialChunk);
+ MOZ_ASSERT(newCapacity < ChunkSize);
+ MOZ_ASSERT(newCapacity % SystemPageSize() == 0);
+
+ partialChunk = &newChunk;
+ partialCapacity = newCapacity;
+}
+
+void js::NurseryDecommitTask::run(AutoLockHelperThreadState& lock) {
+ while (!chunksToDecommit().empty()) {
+ NurseryChunk* nurseryChunk = chunksToDecommit().popCopy();
+ AutoUnlockHelperThreadState unlock(lock);
+ auto* tenuredChunk = reinterpret_cast<TenuredChunk*>(nurseryChunk);
+ tenuredChunk->init(gc);
+ AutoLockGC lock(gc);
+ gc->recycleChunk(tenuredChunk, lock);
+ }
+
+ if (partialChunk) {
+ {
+ AutoUnlockHelperThreadState unlock(lock);
+ partialChunk->markPagesUnusedHard(partialCapacity);
+ }
+ partialChunk = nullptr;
+ partialCapacity = 0;
+ }
+}
+
+js::Nursery::Nursery(GCRuntime* gc)
+ : gc(gc),
+ position_(0),
+ currentStartChunk_(0),
+ currentStartPosition_(0),
+ currentEnd_(0),
+ currentStringEnd_(0),
+ currentBigIntEnd_(0),
+ currentChunk_(0),
+ capacity_(0),
+ timeInChunkAlloc_(0),
+ profileThreshold_(0),
+ enableProfiling_(false),
+ canAllocateStrings_(true),
+ canAllocateBigInts_(true),
+ reportDeduplications_(false),
+ minorGCTriggerReason_(JS::GCReason::NO_REASON),
+ hasRecentGrowthData(false),
+ smoothedGrowthFactor(1.0),
+ decommitTask(gc)
+#ifdef JS_GC_ZEAL
+ ,
+ lastCanary_(nullptr)
+#endif
+{
+ const char* env = getenv("MOZ_NURSERY_STRINGS");
+ if (env && *env) {
+ canAllocateStrings_ = (*env == '1');
+ }
+ env = getenv("MOZ_NURSERY_BIGINTS");
+ if (env && *env) {
+ canAllocateBigInts_ = (*env == '1');
+ }
+}
+
+bool js::Nursery::init(AutoLockGCBgAlloc& lock) {
+ if (char* env = getenv("JS_GC_PROFILE_NURSERY")) {
+ if (0 == strcmp(env, "help")) {
+ fprintf(stderr,
+ "JS_GC_PROFILE_NURSERY=N\n"
+ "\tReport minor GC's taking at least N microseconds.\n");
+ exit(0);
+ }
+ enableProfiling_ = true;
+ profileThreshold_ = TimeDuration::FromMicroseconds(atoi(env));
+ }
+
+ if (char* env = getenv("JS_GC_REPORT_STATS")) {
+ if (0 == strcmp(env, "help")) {
+ fprintf(stderr,
+ "JS_GC_REPORT_STATS=1\n"
+ "\tAfter a minor GC, report how many strings were "
+ "deduplicated.\n");
+ exit(0);
+ }
+ reportDeduplications_ = !!atoi(env);
+ }
+
+ if (!gc->storeBuffer().enable()) {
+ return false;
+ }
+
+ return initFirstChunk(lock);
+}
+
+js::Nursery::~Nursery() { disable(); }
+
+void js::Nursery::enable() {
+ MOZ_ASSERT(isEmpty());
+ MOZ_ASSERT(!gc->isVerifyPreBarriersEnabled());
+ if (isEnabled()) {
+ return;
+ }
+
+ {
+ AutoLockGCBgAlloc lock(gc);
+ if (!initFirstChunk(lock)) {
+ // If we fail to allocate memory, the nursery will not be enabled.
+ return;
+ }
+ }
+
+#ifdef JS_GC_ZEAL
+ if (gc->hasZealMode(ZealMode::GenerationalGC)) {
+ enterZealMode();
+ }
+#endif
+
+ // This should always succeed after the first time it's called.
+ MOZ_ALWAYS_TRUE(gc->storeBuffer().enable());
+}
+
+bool js::Nursery::initFirstChunk(AutoLockGCBgAlloc& lock) {
+ MOZ_ASSERT(!isEnabled());
+
+ capacity_ = tunables().gcMinNurseryBytes();
+
+ if (!decommitTask.reserveSpaceForBytes(capacity_) ||
+ !allocateNextChunk(0, lock)) {
+ capacity_ = 0;
+ return false;
+ }
+
+ setCurrentChunk(0);
+ setStartPosition();
+ poisonAndInitCurrentChunk();
+
+ // Clear any information about previous collections.
+ clearRecentGrowthData();
+
+ return true;
+}
+
+void js::Nursery::disable() {
+ stringDeDupSet.reset();
+ MOZ_ASSERT(isEmpty());
+ if (!isEnabled()) {
+ return;
+ }
+
+ // Free all chunks.
+ decommitTask.join();
+ freeChunksFrom(0);
+ decommitTask.runFromMainThread();
+
+ capacity_ = 0;
+
+ // We must reset currentEnd_ so that there is no space for anything in the
+ // nursery. JIT'd code uses this even if the nursery is disabled.
+ currentEnd_ = 0;
+ currentStringEnd_ = 0;
+ currentBigIntEnd_ = 0;
+ position_ = 0;
+ gc->storeBuffer().disable();
+}
+
+void js::Nursery::enableStrings() {
+ MOZ_ASSERT(isEmpty());
+ canAllocateStrings_ = true;
+ currentStringEnd_ = currentEnd_;
+}
+
+void js::Nursery::disableStrings() {
+ MOZ_ASSERT(isEmpty());
+ canAllocateStrings_ = false;
+ currentStringEnd_ = 0;
+}
+
+void js::Nursery::enableBigInts() {
+ MOZ_ASSERT(isEmpty());
+ canAllocateBigInts_ = true;
+ currentBigIntEnd_ = currentEnd_;
+}
+
+void js::Nursery::disableBigInts() {
+ MOZ_ASSERT(isEmpty());
+ canAllocateBigInts_ = false;
+ currentBigIntEnd_ = 0;
+}
+
+bool js::Nursery::isEmpty() const {
+ if (!isEnabled()) {
+ return true;
+ }
+
+ if (!gc->hasZealMode(ZealMode::GenerationalGC)) {
+ MOZ_ASSERT(currentStartChunk_ == 0);
+ MOZ_ASSERT(currentStartPosition_ == chunk(0).start());
+ }
+ return position() == currentStartPosition_;
+}
+
+#ifdef JS_GC_ZEAL
+void js::Nursery::enterZealMode() {
+ if (!isEnabled()) {
+ return;
+ }
+
+ MOZ_ASSERT(isEmpty());
+
+ decommitTask.join();
+
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+
+ if (isSubChunkMode()) {
+ {
+ if (!chunk(0).markPagesInUseHard(ChunkSize)) {
+ oomUnsafe.crash("Out of memory trying to extend chunk for zeal mode");
+ }
+ }
+
+ // It'd be simpler to poison the whole chunk, but we can't do that
+ // because the nursery might be partially used.
+ chunk(0).poisonRange(capacity_, ChunkSize - capacity_,
+ JS_FRESH_NURSERY_PATTERN, MemCheckKind::MakeUndefined);
+ }
+
+ capacity_ = RoundUp(tunables().gcMaxNurseryBytes(), ChunkSize);
+
+ if (!decommitTask.reserveSpaceForBytes(capacity_)) {
+ oomUnsafe.crash("Nursery::enterZealMode");
+ }
+
+ setCurrentEnd();
+}
+
+void js::Nursery::leaveZealMode() {
+ if (!isEnabled()) {
+ return;
+ }
+
+ MOZ_ASSERT(isEmpty());
+
+ setCurrentChunk(0);
+ setStartPosition();
+ poisonAndInitCurrentChunk();
+}
+#endif // JS_GC_ZEAL
+
+JSObject* js::Nursery::allocateObject(JSContext* cx, size_t size,
+ size_t nDynamicSlots,
+ const JSClass* clasp) {
+ // Ensure there's enough space to replace the contents with a
+ // RelocationOverlay.
+ MOZ_ASSERT(size >= sizeof(RelocationOverlay));
+
+ // Sanity check the finalizer.
+ MOZ_ASSERT_IF(clasp->hasFinalize(),
+ CanNurseryAllocateFinalizedClass(clasp) || clasp->isProxy());
+
+ auto* obj = reinterpret_cast<JSObject*>(
+ allocateCell(cx->zone(), size, JS::TraceKind::Object));
+ if (!obj) {
+ return nullptr;
+ }
+
+ // If we want external slots, add them.
+ ObjectSlots* slotsHeader = nullptr;
+ if (nDynamicSlots) {
+ MOZ_ASSERT(clasp->isNative());
+ void* allocation =
+ allocateBuffer(cx->zone(), ObjectSlots::allocSize(nDynamicSlots));
+ if (!allocation) {
+ // It is safe to leave the allocated object uninitialized, since we
+ // do not visit unallocated things in the nursery.
+ return nullptr;
+ }
+ slotsHeader = new (allocation) ObjectSlots(nDynamicSlots, 0);
+ }
+
+ // Store slots pointer directly in new object. If no dynamic slots were
+ // requested, caller must initialize slots_ field itself as needed. We
+ // don't know if the caller was a native object or not.
+ if (nDynamicSlots) {
+ static_cast<NativeObject*>(obj)->initSlots(slotsHeader->slots());
+ }
+
+ gcprobes::NurseryAlloc(obj, size);
+ return obj;
+}
+
+Cell* js::Nursery::allocateCell(Zone* zone, size_t size, JS::TraceKind kind) {
+ // Ensure there's enough space to replace the contents with a
+ // RelocationOverlay.
+ MOZ_ASSERT(size >= sizeof(RelocationOverlay));
+ MOZ_ASSERT(size % CellAlignBytes == 0);
+
+ void* ptr = allocate(sizeof(NurseryCellHeader) + size);
+ if (!ptr) {
+ return nullptr;
+ }
+
+ new (ptr) NurseryCellHeader(zone, kind);
+
+ auto cell =
+ reinterpret_cast<Cell*>(uintptr_t(ptr) + sizeof(NurseryCellHeader));
+ gcprobes::NurseryAlloc(cell, kind);
+ return cell;
+}
+
+Cell* js::Nursery::allocateString(JS::Zone* zone, size_t size) {
+ Cell* cell = allocateCell(zone, size, JS::TraceKind::String);
+ if (cell) {
+ zone->nurseryAllocatedStrings++;
+ }
+ return cell;
+}
+
+inline void* js::Nursery::allocate(size_t size) {
+ MOZ_ASSERT(isEnabled());
+ MOZ_ASSERT(!JS::RuntimeHeapIsBusy());
+ MOZ_ASSERT(CurrentThreadCanAccessRuntime(runtime()));
+ MOZ_ASSERT_IF(currentChunk_ == currentStartChunk_,
+ position() >= currentStartPosition_);
+ MOZ_ASSERT(position() % CellAlignBytes == 0);
+ MOZ_ASSERT(size % CellAlignBytes == 0);
+
+#ifdef JS_GC_ZEAL
+ if (gc->hasZealMode(ZealMode::CheckNursery)) {
+ size += sizeof(Canary);
+ }
+#endif
+
+ if (MOZ_UNLIKELY(currentEnd() < position() + size)) {
+ return moveToNextChunkAndAllocate(size);
+ }
+
+ void* thing = (void*)position();
+ position_ = position() + size;
+ // We count this regardless of the profiler's state, assuming that it costs
+ // just as much to count it, as to check the profiler's state and decide not
+ // to count it.
+ stats().noteNurseryAlloc();
+
+ DebugOnlyPoison(thing, JS_ALLOCATED_NURSERY_PATTERN, size,
+ MemCheckKind::MakeUndefined);
+
+#ifdef JS_GC_ZEAL
+ if (gc->hasZealMode(ZealMode::CheckNursery)) {
+ writeCanary(position() - sizeof(Canary));
+ }
+#endif
+
+ return thing;
+}
+
+void* Nursery::moveToNextChunkAndAllocate(size_t size) {
+ MOZ_ASSERT(currentEnd() < position() + size);
+
+ unsigned chunkno = currentChunk_ + 1;
+ MOZ_ASSERT(chunkno <= maxChunkCount());
+ MOZ_ASSERT(chunkno <= allocatedChunkCount());
+ if (chunkno == maxChunkCount()) {
+ return nullptr;
+ }
+ if (chunkno == allocatedChunkCount()) {
+ mozilla::TimeStamp start = ReallyNow();
+ {
+ AutoLockGCBgAlloc lock(gc);
+ if (!allocateNextChunk(chunkno, lock)) {
+ return nullptr;
+ }
+ }
+ timeInChunkAlloc_ += ReallyNow() - start;
+ MOZ_ASSERT(chunkno < allocatedChunkCount());
+ }
+ setCurrentChunk(chunkno);
+ poisonAndInitCurrentChunk();
+
+ // We know there's enough space to allocate now so we can call allocate()
+ // recursively. Adjust the size for the nursery canary which it will add on.
+ MOZ_ASSERT(currentEnd() >= position() + size);
+#ifdef JS_GC_ZEAL
+ if (gc->hasZealMode(ZealMode::CheckNursery)) {
+ size -= sizeof(Canary);
+ }
+#endif
+ return allocate(size);
+}
+
+#ifdef JS_GC_ZEAL
+inline void Nursery::writeCanary(uintptr_t address) {
+ auto* canary = reinterpret_cast<Canary*>(address);
+ new (canary) Canary{CanaryMagicValue, nullptr};
+ if (lastCanary_) {
+ MOZ_ASSERT(!lastCanary_->next);
+ lastCanary_->next = canary;
+ }
+ lastCanary_ = canary;
+}
+#endif
+
+void* js::Nursery::allocateBuffer(Zone* zone, size_t nbytes) {
+ MOZ_ASSERT(nbytes > 0);
+
+ if (nbytes <= MaxNurseryBufferSize) {
+ void* buffer = allocate(nbytes);
+ if (buffer) {
+ return buffer;
+ }
+ }
+
+ void* buffer = zone->pod_malloc<uint8_t>(nbytes);
+ if (buffer && !registerMallocedBuffer(buffer, nbytes)) {
+ js_free(buffer);
+ return nullptr;
+ }
+ return buffer;
+}
+
+void* js::Nursery::allocateBuffer(JSObject* obj, size_t nbytes) {
+ MOZ_ASSERT(obj);
+ MOZ_ASSERT(nbytes > 0);
+
+ if (!IsInsideNursery(obj)) {
+ return obj->zone()->pod_malloc<uint8_t>(nbytes);
+ }
+ return allocateBuffer(obj->zone(), nbytes);
+}
+
+void* js::Nursery::allocateBufferSameLocation(JSObject* obj, size_t nbytes) {
+ MOZ_ASSERT(obj);
+ MOZ_ASSERT(nbytes > 0);
+ MOZ_ASSERT(nbytes <= MaxNurseryBufferSize);
+
+ if (!IsInsideNursery(obj)) {
+ return obj->zone()->pod_malloc<uint8_t>(nbytes);
+ }
+
+ return allocate(nbytes);
+}
+
+void* js::Nursery::allocateZeroedBuffer(
+ Zone* zone, size_t nbytes, arena_id_t arena /*= js::MallocArena*/) {
+ MOZ_ASSERT(nbytes > 0);
+
+ if (nbytes <= MaxNurseryBufferSize) {
+ void* buffer = allocate(nbytes);
+ if (buffer) {
+ memset(buffer, 0, nbytes);
+ return buffer;
+ }
+ }
+
+ void* buffer = zone->pod_arena_calloc<uint8_t>(arena, nbytes);
+ if (buffer && !registerMallocedBuffer(buffer, nbytes)) {
+ js_free(buffer);
+ return nullptr;
+ }
+ return buffer;
+}
+
+void* js::Nursery::allocateZeroedBuffer(
+ JSObject* obj, size_t nbytes, arena_id_t arena /*= js::MallocArena*/) {
+ MOZ_ASSERT(obj);
+ MOZ_ASSERT(nbytes > 0);
+
+ if (!IsInsideNursery(obj)) {
+ return obj->zone()->pod_arena_calloc<uint8_t>(arena, nbytes);
+ }
+ return allocateZeroedBuffer(obj->zone(), nbytes, arena);
+}
+
+void* js::Nursery::reallocateBuffer(Zone* zone, Cell* cell, void* oldBuffer,
+ size_t oldBytes, size_t newBytes) {
+ if (!IsInsideNursery(cell)) {
+ return zone->pod_realloc<uint8_t>((uint8_t*)oldBuffer, oldBytes, newBytes);
+ }
+
+ if (!isInside(oldBuffer)) {
+ MOZ_ASSERT(mallocedBufferBytes >= oldBytes);
+ void* newBuffer =
+ zone->pod_realloc<uint8_t>((uint8_t*)oldBuffer, oldBytes, newBytes);
+ if (newBuffer) {
+ if (oldBuffer != newBuffer) {
+ MOZ_ALWAYS_TRUE(
+ mallocedBuffers.rekeyAs(oldBuffer, newBuffer, newBuffer));
+ }
+ mallocedBufferBytes -= oldBytes;
+ mallocedBufferBytes += newBytes;
+ }
+ return newBuffer;
+ }
+
+ // The nursery cannot make use of the returned slots data.
+ if (newBytes < oldBytes) {
+ return oldBuffer;
+ }
+
+ void* newBuffer = allocateBuffer(zone, newBytes);
+ if (newBuffer) {
+ PodCopy((uint8_t*)newBuffer, (uint8_t*)oldBuffer, oldBytes);
+ }
+ return newBuffer;
+}
+
+void* js::Nursery::allocateBuffer(JS::BigInt* bi, size_t nbytes) {
+ MOZ_ASSERT(bi);
+ MOZ_ASSERT(nbytes > 0);
+
+ if (!IsInsideNursery(bi)) {
+ return bi->zone()->pod_malloc<uint8_t>(nbytes);
+ }
+ return allocateBuffer(bi->zone(), nbytes);
+}
+
+void js::Nursery::freeBuffer(void* buffer, size_t nbytes) {
+ if (!isInside(buffer)) {
+ removeMallocedBuffer(buffer, nbytes);
+ js_free(buffer);
+ }
+}
+
+#ifdef DEBUG
+/* static */
+inline bool Nursery::checkForwardingPointerLocation(void* ptr,
+ bool expectedInside) {
+ if (isInside(ptr) == expectedInside) {
+ return true;
+ }
+
+ // If a zero-capacity elements header lands right at the end of a chunk then
+ // elements data will appear to be in the next chunk. If we have a pointer to
+ // the very start of a chunk, check the previous chunk.
+ if ((uintptr_t(ptr) & ChunkMask) == 0 &&
+ isInside(reinterpret_cast<uint8_t*>(ptr) - 1) == expectedInside) {
+ return true;
+ }
+
+ return false;
+}
+#endif
+
+void Nursery::setIndirectForwardingPointer(void* oldData, void* newData) {
+ MOZ_ASSERT(checkForwardingPointerLocation(oldData, true));
+ MOZ_ASSERT(checkForwardingPointerLocation(newData, false));
+
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+#ifdef DEBUG
+ if (ForwardedBufferMap::Ptr p = forwardedBuffers.lookup(oldData)) {
+ MOZ_ASSERT(p->value() == newData);
+ }
+#endif
+ if (!forwardedBuffers.put(oldData, newData)) {
+ oomUnsafe.crash("Nursery::setForwardingPointer");
+ }
+}
+
+#ifdef DEBUG
+static bool IsWriteableAddress(void* ptr) {
+ auto* vPtr = reinterpret_cast<volatile uint64_t*>(ptr);
+ *vPtr = *vPtr;
+ return true;
+}
+#endif
+
+void js::Nursery::forwardBufferPointer(uintptr_t* pSlotsElems) {
+ // Read the current pointer value which may be one of:
+ // - Non-nursery pointer
+ // - Nursery-allocated buffer
+ // - A BufferRelocationOverlay inside the nursery
+ //
+ // Note: The buffer has already be relocated. We are just patching stale
+ // pointers now.
+ auto* buffer = reinterpret_cast<void*>(*pSlotsElems);
+
+ if (!isInside(buffer)) {
+ return;
+ }
+
+ // The new location for this buffer is either stored inline with it or in
+ // the forwardedBuffers table.
+ if (ForwardedBufferMap::Ptr p = forwardedBuffers.lookup(buffer)) {
+ buffer = p->value();
+ // It's not valid to assert IsWriteableAddress for indirect forwarding
+ // pointers because the size of the allocation could be less than a word.
+ } else {
+ BufferRelocationOverlay* reloc =
+ static_cast<BufferRelocationOverlay*>(buffer);
+ buffer = *reloc;
+ MOZ_ASSERT(IsWriteableAddress(buffer));
+ }
+
+ MOZ_ASSERT(!isInside(buffer));
+ *pSlotsElems = reinterpret_cast<uintptr_t>(buffer);
+}
+
+js::TenuringTracer::TenuringTracer(JSRuntime* rt, Nursery* nursery)
+ : GenericTracer(rt, JS::TracerKind::Tenuring,
+ JS::WeakMapTraceAction::TraceKeysAndValues),
+ nursery_(*nursery),
+ tenuredSize(0),
+ tenuredCells(0),
+ objHead(nullptr),
+ objTail(&objHead),
+ stringHead(nullptr),
+ stringTail(&stringHead),
+ bigIntHead(nullptr),
+ bigIntTail(&bigIntHead) {}
+
+inline double js::Nursery::calcPromotionRate(bool* validForTenuring) const {
+ double used = double(previousGC.nurseryUsedBytes);
+ double capacity = double(previousGC.nurseryCapacity);
+ double tenured = double(previousGC.tenuredBytes);
+ double rate;
+
+ if (previousGC.nurseryUsedBytes > 0) {
+ if (validForTenuring) {
+ // We can only use promotion rates if they're likely to be valid,
+ // they're only valid if the nursery was at least 90% full.
+ *validForTenuring = used > capacity * 0.9;
+ }
+ rate = tenured / used;
+ } else {
+ if (validForTenuring) {
+ *validForTenuring = false;
+ }
+ rate = 0.0;
+ }
+
+ return rate;
+}
+
+void js::Nursery::renderProfileJSON(JSONPrinter& json) const {
+ if (!isEnabled()) {
+ json.beginObject();
+ json.property("status", "nursery disabled");
+ json.endObject();
+ return;
+ }
+
+ if (previousGC.reason == JS::GCReason::NO_REASON) {
+ // If the nursery was empty when the last minorGC was requested, then
+ // no nursery collection will have been performed but JSON may still be
+ // requested. (And as a public API, this function should not crash in
+ // such a case.)
+ json.beginObject();
+ json.property("status", "nursery empty");
+ json.endObject();
+ return;
+ }
+
+ json.beginObject();
+
+ json.property("status", "complete");
+
+ json.property("reason", JS::ExplainGCReason(previousGC.reason));
+ json.property("bytes_tenured", previousGC.tenuredBytes);
+ json.property("cells_tenured", previousGC.tenuredCells);
+ json.property("strings_tenured",
+ stats().getStat(gcstats::STAT_STRINGS_TENURED));
+ json.property("strings_deduplicated",
+ stats().getStat(gcstats::STAT_STRINGS_DEDUPLICATED));
+ json.property("bigints_tenured",
+ stats().getStat(gcstats::STAT_BIGINTS_TENURED));
+ json.property("bytes_used", previousGC.nurseryUsedBytes);
+ json.property("cur_capacity", previousGC.nurseryCapacity);
+ const size_t newCapacity = capacity();
+ if (newCapacity != previousGC.nurseryCapacity) {
+ json.property("new_capacity", newCapacity);
+ }
+ if (previousGC.nurseryCommitted != previousGC.nurseryCapacity) {
+ json.property("lazy_capacity", previousGC.nurseryCommitted);
+ }
+ if (!timeInChunkAlloc_.IsZero()) {
+ json.property("chunk_alloc_us", timeInChunkAlloc_, json.MICROSECONDS);
+ }
+
+ // These counters only contain consistent data if the profiler is enabled,
+ // and then there's no guarentee.
+ if (runtime()->geckoProfiler().enabled()) {
+ json.property("cells_allocated_nursery",
+ stats().allocsSinceMinorGCNursery());
+ json.property("cells_allocated_tenured",
+ stats().allocsSinceMinorGCTenured());
+ }
+
+ if (stats().getStat(gcstats::STAT_NURSERY_STRING_REALMS_DISABLED)) {
+ json.property(
+ "nursery_string_realms_disabled",
+ stats().getStat(gcstats::STAT_NURSERY_STRING_REALMS_DISABLED));
+ }
+ if (stats().getStat(gcstats::STAT_NURSERY_BIGINT_REALMS_DISABLED)) {
+ json.property(
+ "nursery_bigint_realms_disabled",
+ stats().getStat(gcstats::STAT_NURSERY_BIGINT_REALMS_DISABLED));
+ }
+
+ json.beginObjectProperty("phase_times");
+
+#define EXTRACT_NAME(name, text) #name,
+ static const char* const names[] = {
+ FOR_EACH_NURSERY_PROFILE_TIME(EXTRACT_NAME)
+#undef EXTRACT_NAME
+ ""};
+
+ size_t i = 0;
+ for (auto time : profileDurations_) {
+ json.property(names[i++], time, json.MICROSECONDS);
+ }
+
+ json.endObject(); // timings value
+
+ json.endObject();
+}
+
+void js::Nursery::printCollectionProfile(JS::GCReason reason,
+ double promotionRate) {
+ stats().maybePrintProfileHeaders();
+
+ TimeDuration ts = collectionStartTime() - stats().creationTime();
+
+ fprintf(stderr, "MinorGC: %12p %10.6f %-20.20s %4.1f%% %5zu %5zu %6" PRIu32,
+ runtime(), ts.ToSeconds(), JS::ExplainGCReason(reason),
+ promotionRate * 100, previousGC.nurseryCapacity / 1024,
+ capacity() / 1024,
+ stats().getStat(gcstats::STAT_STRINGS_DEDUPLICATED));
+
+ printProfileDurations(profileDurations_);
+}
+
+// static
+void js::Nursery::printProfileHeader() {
+ fprintf(stderr,
+ "MinorGC: Runtime Timestamp Reason PRate OldSz "
+ "NewSz Dedup");
+#define PRINT_HEADER(name, text) fprintf(stderr, " %6s", text);
+ FOR_EACH_NURSERY_PROFILE_TIME(PRINT_HEADER)
+#undef PRINT_HEADER
+ fprintf(stderr, "\n");
+}
+
+// static
+void js::Nursery::printProfileDurations(const ProfileDurations& times) {
+ for (auto time : times) {
+ fprintf(stderr, " %6" PRIi64, static_cast<int64_t>(time.ToMicroseconds()));
+ }
+ fprintf(stderr, "\n");
+}
+
+void js::Nursery::printTotalProfileTimes() {
+ if (enableProfiling_) {
+ fprintf(stderr,
+ "MinorGC TOTALS: %7" PRIu64
+ " collections: %16" PRIu64,
+ gc->stringStats.deduplicatedStrings, gc->minorGCCount());
+ printProfileDurations(totalDurations_);
+ }
+}
+
+void js::Nursery::maybeClearProfileDurations() {
+ for (auto& duration : profileDurations_) {
+ duration = mozilla::TimeDuration();
+ }
+}
+
+inline void js::Nursery::startProfile(ProfileKey key) {
+ startTimes_[key] = ReallyNow();
+}
+
+inline void js::Nursery::endProfile(ProfileKey key) {
+ profileDurations_[key] = ReallyNow() - startTimes_[key];
+ totalDurations_[key] += profileDurations_[key];
+}
+
+inline TimeStamp js::Nursery::collectionStartTime() const {
+ return startTimes_[ProfileKey::Total];
+}
+
+inline TimeStamp js::Nursery::lastCollectionEndTime() const {
+ return previousGC.endTime;
+}
+
+bool js::Nursery::shouldCollect() const {
+ if (minorGCRequested()) {
+ return true;
+ }
+
+ if (isEmpty() && capacity() == tunables().gcMinNurseryBytes()) {
+ return false;
+ }
+
+ // Eagerly collect the nursery in idle time if it's nearly full.
+ if (isNearlyFull()) {
+ return true;
+ }
+
+ // If the nursery is not being collected often then it may be taking up more
+ // space than necessary.
+ return isUnderused();
+}
+
+inline bool js::Nursery::isNearlyFull() const {
+ bool belowBytesThreshold =
+ freeSpace() < tunables().nurseryFreeThresholdForIdleCollection();
+ bool belowFractionThreshold =
+ double(freeSpace()) / double(capacity()) <
+ tunables().nurseryFreeThresholdForIdleCollectionFraction();
+
+ // We want to use belowBytesThreshold when the nursery is sufficiently large,
+ // and belowFractionThreshold when it's small.
+ //
+ // When the nursery is small then belowBytesThreshold is a lower threshold
+ // (triggered earlier) than belowFractionThreshold. So if the fraction
+ // threshold is true, the bytes one will be true also. The opposite is true
+ // when the nursery is large.
+ //
+ // Therefore, by the time we cross the threshold we care about, we've already
+ // crossed the other one, and we can boolean AND to use either condition
+ // without encoding any "is the nursery big/small" test/threshold. The point
+ // at which they cross is when the nursery is: BytesThreshold /
+ // FractionThreshold large.
+ //
+ // With defaults that's:
+ //
+ // 1MB = 256KB / 0.25
+ //
+ return belowBytesThreshold && belowFractionThreshold;
+}
+
+// If the nursery is above its minimum size, collect it at least this often if
+// we have idle time. This allows the nursery to shrink when it's not being
+// used. There are other heuristics we could use for this, but this is the
+// simplest.
+static const TimeDuration UnderuseTimeout = TimeDuration::FromSeconds(2.0);
+
+inline bool js::Nursery::isUnderused() const {
+ if (js::SupportDifferentialTesting() || !previousGC.endTime) {
+ return false;
+ }
+
+ if (capacity() == tunables().gcMinNurseryBytes()) {
+ return false;
+ }
+
+ TimeDuration timeSinceLastCollection = ReallyNow() - previousGC.endTime;
+ return timeSinceLastCollection > UnderuseTimeout;
+}
+
+// typeReason is the gcReason for specified type, for example,
+// FULL_CELL_PTR_OBJ_BUFFER is the gcReason for JSObject.
+static inline bool IsFullStoreBufferReason(JS::GCReason reason,
+ JS::GCReason typeReason) {
+ return reason == typeReason ||
+ reason == JS::GCReason::FULL_WHOLE_CELL_BUFFER ||
+ reason == JS::GCReason::FULL_GENERIC_BUFFER ||
+ reason == JS::GCReason::FULL_VALUE_BUFFER ||
+ reason == JS::GCReason::FULL_SLOT_BUFFER ||
+ reason == JS::GCReason::FULL_SHAPE_BUFFER;
+}
+
+void js::Nursery::collect(JSGCInvocationKind kind, JS::GCReason reason) {
+ JSRuntime* rt = runtime();
+ MOZ_ASSERT(!rt->mainContextFromOwnThread()->suppressGC);
+
+ if (!isEnabled() || isEmpty()) {
+ // Our barriers are not always exact, and there may be entries in the
+ // storebuffer even when the nursery is disabled or empty. It's not safe
+ // to keep these entries as they may refer to tenured cells which may be
+ // freed after this point.
+ gc->storeBuffer().clear();
+ }
+
+ if (!isEnabled()) {
+ return;
+ }
+
+#ifdef JS_GC_ZEAL
+ if (gc->hasZealMode(ZealMode::CheckNursery)) {
+ for (auto canary = lastCanary_; canary; canary = canary->next) {
+ MOZ_ASSERT(canary->magicValue == CanaryMagicValue);
+ }
+ }
+ lastCanary_ = nullptr;
+#endif
+
+ stats().beginNurseryCollection(reason);
+ gcprobes::MinorGCStart();
+
+ stringDeDupSet.emplace();
+ auto guardStringDedupSet =
+ mozilla::MakeScopeExit([&] { stringDeDupSet.reset(); });
+
+ maybeClearProfileDurations();
+ startProfile(ProfileKey::Total);
+
+ previousGC.reason = JS::GCReason::NO_REASON;
+ previousGC.nurseryUsedBytes = usedSpace();
+ previousGC.nurseryCapacity = capacity();
+ previousGC.nurseryCommitted = committed();
+ previousGC.tenuredBytes = 0;
+ previousGC.tenuredCells = 0;
+
+ // If it isn't empty, it will call doCollection, and possibly after that
+ // isEmpty() will become true, so use another variable to keep track of the
+ // old empty state.
+ bool wasEmpty = isEmpty();
+ if (!wasEmpty) {
+ CollectionResult result = doCollection(reason);
+ previousGC.reason = reason;
+ previousGC.tenuredBytes = result.tenuredBytes;
+ previousGC.tenuredCells = result.tenuredCells;
+ }
+
+ // Resize the nursery.
+ maybeResizeNursery(kind, reason);
+
+ // Poison/initialise the first chunk.
+ if (previousGC.nurseryUsedBytes) {
+ // In most cases Nursery::clear() has not poisoned this chunk or marked it
+ // as NoAccess; so we only need to poison the region used during the last
+ // cycle. Also, if the heap was recently expanded we don't want to
+ // re-poison the new memory. In both cases we only need to poison until
+ // previousGC.nurseryUsedBytes.
+ //
+ // In cases where this is not true, like generational zeal mode or subchunk
+ // mode, poisonAndInitCurrentChunk() will ignore its parameter. It will
+ // also clamp the parameter.
+ poisonAndInitCurrentChunk(previousGC.nurseryUsedBytes);
+ }
+
+ bool validPromotionRate;
+ const double promotionRate = calcPromotionRate(&validPromotionRate);
+ bool highPromotionRate =
+ validPromotionRate && promotionRate > tunables().pretenureThreshold();
+
+ startProfile(ProfileKey::Pretenure);
+ doPretenuring(rt, reason, highPromotionRate);
+ endProfile(ProfileKey::Pretenure);
+
+ // We ignore gcMaxBytes when allocating for minor collection. However, if we
+ // overflowed, we disable the nursery. The next time we allocate, we'll fail
+ // because bytes >= gcMaxBytes.
+ if (gc->heapSize.bytes() >= tunables().gcMaxBytes()) {
+ disable();
+ }
+
+ previousGC.endTime = ReallyNow(); // Must happen after maybeResizeNursery.
+ endProfile(ProfileKey::Total);
+ gc->incMinorGcNumber();
+
+ TimeDuration totalTime = profileDurations_[ProfileKey::Total];
+ sendTelemetry(reason, totalTime, wasEmpty, promotionRate);
+
+ stats().endNurseryCollection(reason);
+ gcprobes::MinorGCEnd();
+
+ timeInChunkAlloc_ = mozilla::TimeDuration();
+
+ js::StringStats prevStats = gc->stringStats;
+ js::StringStats& currStats = gc->stringStats;
+ currStats = js::StringStats();
+ for (ZonesIter zone(gc, WithAtoms); !zone.done(); zone.next()) {
+ currStats += zone->stringStats;
+ zone->previousGCStringStats = zone->stringStats;
+ }
+ stats().setStat(
+ gcstats::STAT_STRINGS_DEDUPLICATED,
+ currStats.deduplicatedStrings - prevStats.deduplicatedStrings);
+ if (enableProfiling_ && totalTime >= profileThreshold_) {
+ printCollectionProfile(reason, promotionRate);
+ }
+
+ if (reportDeduplications_) {
+ printDeduplicationData(prevStats, currStats);
+ }
+}
+
+void js::Nursery::sendTelemetry(JS::GCReason reason, TimeDuration totalTime,
+ bool wasEmpty, double promotionRate) {
+ JSRuntime* rt = runtime();
+ rt->addTelemetry(JS_TELEMETRY_GC_MINOR_REASON, uint32_t(reason));
+ if (totalTime.ToMilliseconds() > 1.0) {
+ rt->addTelemetry(JS_TELEMETRY_GC_MINOR_REASON_LONG, uint32_t(reason));
+ }
+ rt->addTelemetry(JS_TELEMETRY_GC_MINOR_US, totalTime.ToMicroseconds());
+ rt->addTelemetry(JS_TELEMETRY_GC_NURSERY_BYTES, committed());
+
+ if (!wasEmpty) {
+ rt->addTelemetry(JS_TELEMETRY_GC_PRETENURE_COUNT_2, 0);
+ rt->addTelemetry(JS_TELEMETRY_GC_NURSERY_PROMOTION_RATE,
+ promotionRate * 100);
+ }
+}
+
+void js::Nursery::printDeduplicationData(js::StringStats& prev,
+ js::StringStats& curr) {
+ if (curr.deduplicatedStrings > prev.deduplicatedStrings) {
+ fprintf(stderr,
+ "pid %zu: deduplicated %" PRIi64 " strings, %" PRIu64
+ " chars, %" PRIu64 " malloc bytes\n",
+ size_t(getpid()),
+ curr.deduplicatedStrings - prev.deduplicatedStrings,
+ curr.deduplicatedChars - prev.deduplicatedChars,
+ curr.deduplicatedBytes - prev.deduplicatedBytes);
+ }
+}
+
+js::Nursery::CollectionResult js::Nursery::doCollection(JS::GCReason reason) {
+ JSRuntime* rt = runtime();
+ AutoGCSession session(gc, JS::HeapState::MinorCollecting);
+ AutoSetThreadIsPerformingGC performingGC;
+ AutoStopVerifyingBarriers av(rt, false);
+ AutoDisableProxyCheck disableStrictProxyChecking;
+ mozilla::DebugOnly<AutoEnterOOMUnsafeRegion> oomUnsafeRegion;
+
+ // Move objects pointed to by roots from the nursery to the major heap.
+ TenuringTracer mover(rt, this);
+
+ // Mark the store buffer. This must happen first.
+ StoreBuffer& sb = gc->storeBuffer();
+
+ // Strings in the whole cell buffer must be traced first, in order to mark
+ // tenured dependent strings' bases as non-deduplicatable. The rest of
+ // nursery collection (whole non-string cells, edges, etc.) can happen later.
+ startProfile(ProfileKey::TraceWholeCells);
+ sb.traceWholeCells(mover);
+ endProfile(ProfileKey::TraceWholeCells);
+
+ startProfile(ProfileKey::TraceValues);
+ sb.traceValues(mover);
+ endProfile(ProfileKey::TraceValues);
+
+ startProfile(ProfileKey::TraceCells);
+ sb.traceCells(mover);
+ endProfile(ProfileKey::TraceCells);
+
+ startProfile(ProfileKey::TraceSlots);
+ sb.traceSlots(mover);
+ endProfile(ProfileKey::TraceSlots);
+
+ startProfile(ProfileKey::TraceGenericEntries);
+ sb.traceGenericEntries(&mover);
+ endProfile(ProfileKey::TraceGenericEntries);
+
+ startProfile(ProfileKey::MarkRuntime);
+ gc->traceRuntimeForMinorGC(&mover, session);
+ endProfile(ProfileKey::MarkRuntime);
+
+ startProfile(ProfileKey::MarkDebugger);
+ {
+ gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::MARK_ROOTS);
+ DebugAPI::traceAllForMovingGC(&mover);
+ }
+ endProfile(ProfileKey::MarkDebugger);
+
+ startProfile(ProfileKey::SweepCaches);
+ gc->purgeRuntimeForMinorGC();
+ endProfile(ProfileKey::SweepCaches);
+
+ // Most of the work is done here. This loop iterates over objects that have
+ // been moved to the major heap. If these objects have any outgoing pointers
+ // to the nursery, then those nursery objects get moved as well, until no
+ // objects are left to move. That is, we iterate to a fixed point.
+ startProfile(ProfileKey::CollectToFP);
+ collectToFixedPoint(mover);
+ endProfile(ProfileKey::CollectToFP);
+
+ // Sweep to update any pointers to nursery objects that have now been
+ // tenured.
+ startProfile(ProfileKey::Sweep);
+ sweep(&mover);
+ endProfile(ProfileKey::Sweep);
+
+ // Update any slot or element pointers whose destination has been tenured.
+ startProfile(ProfileKey::UpdateJitActivations);
+ js::jit::UpdateJitActivationsForMinorGC(rt);
+ forwardedBuffers.clearAndCompact();
+ endProfile(ProfileKey::UpdateJitActivations);
+
+ startProfile(ProfileKey::ObjectsTenuredCallback);
+ gc->callObjectsTenuredCallback();
+ endProfile(ProfileKey::ObjectsTenuredCallback);
+
+ // Sweep.
+ startProfile(ProfileKey::FreeMallocedBuffers);
+ gc->queueBuffersForFreeAfterMinorGC(mallocedBuffers);
+ mallocedBufferBytes = 0;
+ endProfile(ProfileKey::FreeMallocedBuffers);
+
+ startProfile(ProfileKey::ClearNursery);
+ clear();
+ endProfile(ProfileKey::ClearNursery);
+
+ startProfile(ProfileKey::ClearStoreBuffer);
+ gc->storeBuffer().clear();
+ endProfile(ProfileKey::ClearStoreBuffer);
+
+ // Purge the StringToAtomCache. This has to happen at the end because the
+ // cache is used when tenuring strings.
+ startProfile(ProfileKey::PurgeStringToAtomCache);
+ runtime()->caches().stringToAtomCache.purge();
+ endProfile(ProfileKey::PurgeStringToAtomCache);
+
+ // Make sure hashtables have been updated after the collection.
+ startProfile(ProfileKey::CheckHashTables);
+#ifdef JS_GC_ZEAL
+ if (gc->hasZealMode(ZealMode::CheckHashTablesOnMinorGC)) {
+ gc->checkHashTablesAfterMovingGC();
+ }
+#endif
+ endProfile(ProfileKey::CheckHashTables);
+
+ return {mover.tenuredSize, mover.tenuredCells};
+}
+
+void js::Nursery::doPretenuring(JSRuntime* rt, JS::GCReason reason,
+ bool highPromotionRate) {
+ // If we are promoting the nursery, or exhausted the store buffer with
+ // pointers to nursery things, which will force a collection well before
+ // the nursery is full, look for object groups that are getting promoted
+ // excessively and try to pretenure them.
+
+ bool pretenureStr = false;
+ bool pretenureBigInt = false;
+ if (tunables().attemptPretenuring()) {
+ // Should we check for pretenuring regardless of GCReason?
+ // Use 3MB as the threshold so the pretenuring can be applied on Android.
+ bool pretenureAll =
+ highPromotionRate && previousGC.nurseryUsedBytes >= 3 * 1024 * 1024;
+
+ pretenureStr =
+ pretenureAll ||
+ IsFullStoreBufferReason(reason, JS::GCReason::FULL_CELL_PTR_STR_BUFFER);
+ pretenureBigInt =
+ pretenureAll || IsFullStoreBufferReason(
+ reason, JS::GCReason::FULL_CELL_PTR_BIGINT_BUFFER);
+ }
+
+ mozilla::Maybe<AutoGCSession> session;
+ uint32_t numStringsTenured = 0;
+ uint32_t numNurseryStringRealmsDisabled = 0;
+ uint32_t numBigIntsTenured = 0;
+ uint32_t numNurseryBigIntRealmsDisabled = 0;
+ for (ZonesIter zone(gc, SkipAtoms); !zone.done(); zone.next()) {
+ // For some tests in JetStream2 and Kraken, the tenuredRate is high but the
+ // number of allocated strings is low. So we calculate the tenuredRate only
+ // if the number of string allocations is enough.
+ bool allocThreshold = zone->nurseryAllocatedStrings > 30000;
+ uint64_t zoneTenuredStrings =
+ zone->stringStats.ref().liveNurseryStrings -
+ zone->previousGCStringStats.ref().liveNurseryStrings;
+ double tenuredRate =
+ allocThreshold
+ ? double(zoneTenuredStrings) / double(zone->nurseryAllocatedStrings)
+ : 0.0;
+ bool disableNurseryStrings =
+ pretenureStr && zone->allocNurseryStrings &&
+ tenuredRate > tunables().pretenureStringThreshold();
+ bool disableNurseryBigInts = pretenureBigInt && zone->allocNurseryBigInts &&
+ zone->tenuredBigInts >= 30 * 1000;
+ if (disableNurseryStrings || disableNurseryBigInts) {
+ if (!session.isSome()) {
+ session.emplace(gc, JS::HeapState::MinorCollecting);
+ }
+ CancelOffThreadIonCompile(zone);
+ bool preserving = zone->isPreservingCode();
+ zone->setPreservingCode(false);
+ zone->discardJitCode(rt->defaultFreeOp());
+ zone->setPreservingCode(preserving);
+ for (RealmsInZoneIter r(zone); !r.done(); r.next()) {
+ if (jit::JitRealm* jitRealm = r->jitRealm()) {
+ jitRealm->discardStubs();
+ if (disableNurseryStrings) {
+ jitRealm->setStringsCanBeInNursery(false);
+ numNurseryStringRealmsDisabled++;
+ }
+ if (disableNurseryBigInts) {
+ numNurseryBigIntRealmsDisabled++;
+ }
+ }
+ }
+ if (disableNurseryStrings) {
+ zone->allocNurseryStrings = false;
+ }
+ if (disableNurseryBigInts) {
+ zone->allocNurseryBigInts = false;
+ }
+ }
+ numStringsTenured += zoneTenuredStrings;
+ numBigIntsTenured += zone->tenuredBigInts;
+ zone->tenuredBigInts = 0;
+ zone->nurseryAllocatedStrings = 0;
+ }
+ session.reset(); // End the minor GC session, if running one.
+ stats().setStat(gcstats::STAT_NURSERY_STRING_REALMS_DISABLED,
+ numNurseryStringRealmsDisabled);
+ stats().setStat(gcstats::STAT_STRINGS_TENURED, numStringsTenured);
+ stats().setStat(gcstats::STAT_NURSERY_BIGINT_REALMS_DISABLED,
+ numNurseryBigIntRealmsDisabled);
+ stats().setStat(gcstats::STAT_BIGINTS_TENURED, numBigIntsTenured);
+}
+
+bool js::Nursery::registerMallocedBuffer(void* buffer, size_t nbytes) {
+ MOZ_ASSERT(buffer);
+ MOZ_ASSERT(nbytes > 0);
+ if (!mallocedBuffers.putNew(buffer)) {
+ return false;
+ }
+
+ mallocedBufferBytes += nbytes;
+ if (MOZ_UNLIKELY(mallocedBufferBytes > capacity() * 8)) {
+ requestMinorGC(JS::GCReason::NURSERY_MALLOC_BUFFERS);
+ }
+
+ return true;
+}
+
+void js::Nursery::sweep(JSTracer* trc) {
+ // Sweep unique IDs first before we sweep any tables that may be keyed based
+ // on them.
+ for (Cell* cell : cellsWithUid_) {
+ auto* obj = static_cast<JSObject*>(cell);
+ if (!IsForwarded(obj)) {
+ obj->nurseryZone()->removeUniqueId(obj);
+ } else {
+ JSObject* dst = Forwarded(obj);
+ obj->nurseryZone()->transferUniqueId(dst, obj);
+ }
+ }
+ cellsWithUid_.clear();
+
+ for (CompartmentsIter c(runtime()); !c.done(); c.next()) {
+ c->sweepAfterMinorGC(trc);
+ }
+
+ for (ZonesIter zone(trc->runtime(), SkipAtoms); !zone.done(); zone.next()) {
+ zone->sweepAfterMinorGC(trc);
+ }
+
+ sweepDictionaryModeObjects();
+ sweepMapAndSetObjects();
+}
+
+void js::Nursery::clear() {
+ // Poison the nursery contents so touching a freed object will crash.
+ unsigned firstClearChunk;
+ if (gc->hasZealMode(ZealMode::GenerationalGC)) {
+ // Poison all the chunks used in this cycle. The new start chunk is
+ // reposioned in Nursery::collect() but there's no point optimising that in
+ // this case.
+ firstClearChunk = currentStartChunk_;
+ } else {
+ // In normal mode we start at the second chunk, the first one will be used
+ // in the next cycle and poisoned in Nusery::collect();
+ MOZ_ASSERT(currentStartChunk_ == 0);
+ firstClearChunk = 1;
+ }
+ for (unsigned i = firstClearChunk; i < currentChunk_; ++i) {
+ chunk(i).poisonAfterEvict();
+ }
+ // Clear only the used part of the chunk because that's the part we touched,
+ // but only if it's not going to be re-used immediately (>= firstClearChunk).
+ if (currentChunk_ >= firstClearChunk) {
+ chunk(currentChunk_)
+ .poisonAfterEvict(position() - chunk(currentChunk_).start());
+ }
+
+ // Reset the start chunk & position if we're not in this zeal mode, or we're
+ // in it and close to the end of the nursery.
+ MOZ_ASSERT(maxChunkCount() > 0);
+ if (!gc->hasZealMode(ZealMode::GenerationalGC) ||
+ (gc->hasZealMode(ZealMode::GenerationalGC) &&
+ currentChunk_ + 1 == maxChunkCount())) {
+ setCurrentChunk(0);
+ }
+
+ // Set current start position for isEmpty checks.
+ setStartPosition();
+}
+
+size_t js::Nursery::spaceToEnd(unsigned chunkCount) const {
+ if (chunkCount == 0) {
+ return 0;
+ }
+
+ unsigned lastChunk = chunkCount - 1;
+
+ MOZ_ASSERT(lastChunk >= currentStartChunk_);
+ MOZ_ASSERT(currentStartPosition_ - chunk(currentStartChunk_).start() <=
+ NurseryChunkUsableSize);
+
+ size_t bytes;
+
+ if (chunkCount != 1) {
+ // In the general case we have to add:
+ // + the bytes used in the first
+ // chunk which may be less than the total size of a chunk since in some
+ // zeal modes we start the first chunk at some later position
+ // (currentStartPosition_).
+ // + the size of all the other chunks.
+ bytes = (chunk(currentStartChunk_).end() - currentStartPosition_) +
+ ((lastChunk - currentStartChunk_) * ChunkSize);
+ } else {
+ // In sub-chunk mode, but it also works whenever chunkCount == 1, we need to
+ // use currentEnd_ since it may not refer to a full chunk.
+ bytes = currentEnd_ - currentStartPosition_;
+ }
+
+ MOZ_ASSERT(bytes <= maxChunkCount() * ChunkSize);
+
+ return bytes;
+}
+
+MOZ_ALWAYS_INLINE void js::Nursery::setCurrentChunk(unsigned chunkno) {
+ MOZ_ASSERT(chunkno < allocatedChunkCount());
+
+ currentChunk_ = chunkno;
+ position_ = chunk(chunkno).start();
+ setCurrentEnd();
+}
+
+void js::Nursery::poisonAndInitCurrentChunk(size_t extent) {
+ if (gc->hasZealMode(ZealMode::GenerationalGC) || !isSubChunkMode()) {
+ chunk(currentChunk_).poisonAndInit(runtime());
+ } else {
+ extent = std::min(capacity_, extent);
+ chunk(currentChunk_).poisonAndInit(runtime(), extent);
+ }
+}
+
+MOZ_ALWAYS_INLINE void js::Nursery::setCurrentEnd() {
+ MOZ_ASSERT_IF(isSubChunkMode(),
+ currentChunk_ == 0 && currentEnd_ <= chunk(0).end());
+ currentEnd_ =
+ uintptr_t(&chunk(currentChunk_)) + std::min(capacity_, ChunkSize);
+ if (canAllocateStrings_) {
+ currentStringEnd_ = currentEnd_;
+ }
+ if (canAllocateBigInts_) {
+ currentBigIntEnd_ = currentEnd_;
+ }
+}
+
+bool js::Nursery::allocateNextChunk(const unsigned chunkno,
+ AutoLockGCBgAlloc& lock) {
+ const unsigned priorCount = allocatedChunkCount();
+ const unsigned newCount = priorCount + 1;
+
+ MOZ_ASSERT((chunkno == currentChunk_ + 1) ||
+ (chunkno == 0 && allocatedChunkCount() == 0));
+ MOZ_ASSERT(chunkno == allocatedChunkCount());
+ MOZ_ASSERT(chunkno < HowMany(capacity(), ChunkSize));
+
+ if (!chunks_.resize(newCount)) {
+ return false;
+ }
+
+ TenuredChunk* newChunk;
+ newChunk = gc->getOrAllocChunk(lock);
+ if (!newChunk) {
+ chunks_.shrinkTo(priorCount);
+ return false;
+ }
+
+ chunks_[chunkno] = NurseryChunk::fromChunk(newChunk);
+ return true;
+}
+
+MOZ_ALWAYS_INLINE void js::Nursery::setStartPosition() {
+ currentStartChunk_ = currentChunk_;
+ currentStartPosition_ = position();
+}
+
+void js::Nursery::maybeResizeNursery(JSGCInvocationKind kind,
+ JS::GCReason reason) {
+#ifdef JS_GC_ZEAL
+ // This zeal mode disabled nursery resizing.
+ if (gc->hasZealMode(ZealMode::GenerationalGC)) {
+ return;
+ }
+#endif
+
+ decommitTask.join();
+
+ size_t newCapacity =
+ mozilla::Clamp(targetSize(kind, reason), tunables().gcMinNurseryBytes(),
+ tunables().gcMaxNurseryBytes());
+
+ MOZ_ASSERT(roundSize(newCapacity) == newCapacity);
+
+ if (newCapacity > capacity()) {
+ growAllocableSpace(newCapacity);
+ } else if (newCapacity < capacity()) {
+ shrinkAllocableSpace(newCapacity);
+ }
+
+ AutoLockHelperThreadState lock;
+ if (!decommitTask.isEmpty(lock)) {
+ decommitTask.startOrRunIfIdle(lock);
+ }
+}
+
+static inline double ClampDouble(double value, double min, double max) {
+ MOZ_ASSERT(!std::isnan(value) && !std::isnan(min) && !std::isnan(max));
+ MOZ_ASSERT(max >= min);
+
+ if (value <= min) {
+ return min;
+ }
+
+ if (value >= max) {
+ return max;
+ }
+
+ return value;
+}
+
+size_t js::Nursery::targetSize(JSGCInvocationKind kind, JS::GCReason reason) {
+ // Shrink the nursery as much as possible if shrinking was requested or in low
+ // memory situations.
+ if (kind == GC_SHRINK || gc::IsOOMReason(reason) ||
+ gc->systemHasLowMemory()) {
+ clearRecentGrowthData();
+ return 0;
+ }
+
+ // Don't resize the nursery during shutdown.
+ if (gc::IsShutdownReason(reason)) {
+ clearRecentGrowthData();
+ return capacity();
+ }
+
+ TimeStamp now = ReallyNow();
+
+ // If the nursery is completely unused then minimise it.
+ if (hasRecentGrowthData && previousGC.nurseryUsedBytes == 0 &&
+ now - lastCollectionEndTime() > UnderuseTimeout &&
+ !js::SupportDifferentialTesting()) {
+ clearRecentGrowthData();
+ return 0;
+ }
+
+ // Calculate the fraction of the nursery promoted out of its entire
+ // capacity. This gives better results than using the promotion rate (based on
+ // the amount of nursery used) in cases where we collect before the nursery is
+ // full.
+ double fractionPromoted =
+ double(previousGC.tenuredBytes) / double(previousGC.nurseryCapacity);
+
+ // Calculate the fraction of time spent collecting the nursery.
+ double timeFraction = 0.0;
+ if (hasRecentGrowthData && !js::SupportDifferentialTesting()) {
+ TimeDuration collectorTime = now - collectionStartTime();
+ TimeDuration totalTime = now - lastCollectionEndTime();
+ timeFraction = collectorTime.ToSeconds() / totalTime.ToSeconds();
+ }
+
+ // Adjust the nursery size to try to achieve a target promotion rate and
+ // collector time goals.
+ static const double PromotionGoal = 0.02;
+ static const double TimeGoal = 0.01;
+ double growthFactor =
+ std::max(fractionPromoted / PromotionGoal, timeFraction / TimeGoal);
+
+ // Limit the range of the growth factor to prevent transient high promotion
+ // rates from affecting the nursery size too far into the future.
+ static const double GrowthRange = 2.0;
+ growthFactor = ClampDouble(growthFactor, 1.0 / GrowthRange, GrowthRange);
+
+ // Use exponential smoothing on the desired growth rate to take into account
+ // the promotion rate from recent previous collections.
+ if (hasRecentGrowthData &&
+ now - lastCollectionEndTime() < TimeDuration::FromMilliseconds(200) &&
+ !js::SupportDifferentialTesting()) {
+ growthFactor = 0.75 * smoothedGrowthFactor + 0.25 * growthFactor;
+ }
+
+ hasRecentGrowthData = true;
+ smoothedGrowthFactor = growthFactor;
+
+ // Leave size untouched if we are close to the promotion goal.
+ static const double GoalWidth = 1.5;
+ if (growthFactor > (1.0 / GoalWidth) && growthFactor < GoalWidth) {
+ return capacity();
+ }
+
+ // The multiplication below cannot overflow because growthFactor is at
+ // most two.
+ MOZ_ASSERT(growthFactor <= 2.0);
+ MOZ_ASSERT(capacity() < SIZE_MAX / 2);
+
+ return roundSize(size_t(double(capacity()) * growthFactor));
+}
+
+void js::Nursery::clearRecentGrowthData() {
+ if (js::SupportDifferentialTesting()) {
+ return;
+ }
+
+ hasRecentGrowthData = false;
+ smoothedGrowthFactor = 1.0;
+}
+
+/* static */
+size_t js::Nursery::roundSize(size_t size) {
+ size_t step = size >= ChunkSize ? ChunkSize : SystemPageSize();
+ size = Round(size, step);
+
+ MOZ_ASSERT(size >= SystemPageSize());
+
+ return size;
+}
+
+void js::Nursery::growAllocableSpace(size_t newCapacity) {
+ MOZ_ASSERT_IF(!isSubChunkMode(), newCapacity > currentChunk_ * ChunkSize);
+ MOZ_ASSERT(newCapacity <= tunables().gcMaxNurseryBytes());
+ MOZ_ASSERT(newCapacity > capacity());
+
+ if (!decommitTask.reserveSpaceForBytes(newCapacity)) {
+ return;
+ }
+
+ if (isSubChunkMode()) {
+ MOZ_ASSERT(currentChunk_ == 0);
+
+ // The remainder of the chunk may have been decommitted.
+ if (!chunk(0).markPagesInUseHard(std::min(newCapacity, ChunkSize))) {
+ // The OS won't give us the memory we need, we can't grow.
+ return;
+ }
+
+ // The capacity has changed and since we were in sub-chunk mode we need to
+ // update the poison values / asan information for the now-valid region of
+ // this chunk.
+ size_t size = std::min(newCapacity, ChunkSize) - capacity();
+ chunk(0).poisonRange(capacity(), size, JS_FRESH_NURSERY_PATTERN,
+ MemCheckKind::MakeUndefined);
+ }
+
+ capacity_ = newCapacity;
+
+ setCurrentEnd();
+}
+
+void js::Nursery::freeChunksFrom(const unsigned firstFreeChunk) {
+ MOZ_ASSERT(firstFreeChunk < chunks_.length());
+
+ // The loop below may need to skip the first chunk, so we may use this so we
+ // can modify it.
+ unsigned firstChunkToDecommit = firstFreeChunk;
+
+ if ((firstChunkToDecommit == 0) && isSubChunkMode()) {
+ // Part of the first chunk may be hard-decommitted, un-decommit it so that
+ // the GC's normal chunk-handling doesn't segfault.
+ MOZ_ASSERT(currentChunk_ == 0);
+ if (!chunk(0).markPagesInUseHard(ChunkSize)) {
+ // Free the chunk if we can't allocate its pages.
+ UnmapPages(static_cast<void*>(&chunk(0)), ChunkSize);
+ firstChunkToDecommit = 1;
+ }
+ }
+
+ {
+ AutoLockHelperThreadState lock;
+ for (size_t i = firstChunkToDecommit; i < chunks_.length(); i++) {
+ decommitTask.queueChunk(chunks_[i], lock);
+ }
+ }
+
+ chunks_.shrinkTo(firstFreeChunk);
+}
+
+void js::Nursery::shrinkAllocableSpace(size_t newCapacity) {
+#ifdef JS_GC_ZEAL
+ if (gc->hasZealMode(ZealMode::GenerationalGC)) {
+ return;
+ }
+#endif
+
+ // Don't shrink the nursery to zero (use Nursery::disable() instead)
+ // This can't happen due to the rounding-down performed above because of the
+ // clamping in maybeResizeNursery().
+ MOZ_ASSERT(newCapacity != 0);
+ // Don't attempt to shrink it to the same size.
+ if (newCapacity == capacity_) {
+ return;
+ }
+ MOZ_ASSERT(newCapacity < capacity_);
+
+ unsigned newCount = HowMany(newCapacity, ChunkSize);
+ if (newCount < allocatedChunkCount()) {
+ freeChunksFrom(newCount);
+ }
+
+ size_t oldCapacity = capacity_;
+ capacity_ = newCapacity;
+
+ setCurrentEnd();
+
+ if (isSubChunkMode()) {
+ MOZ_ASSERT(currentChunk_ == 0);
+ size_t size = std::min(oldCapacity, ChunkSize) - newCapacity;
+ chunk(0).poisonRange(newCapacity, size, JS_SWEPT_NURSERY_PATTERN,
+ MemCheckKind::MakeNoAccess);
+
+ AutoLockHelperThreadState lock;
+ decommitTask.queueRange(capacity_, chunk(0), lock);
+ }
+}
+
+bool js::Nursery::queueDictionaryModeObjectToSweep(NativeObject* obj) {
+ MOZ_ASSERT(IsInsideNursery(obj));
+ return dictionaryModeObjects_.append(obj);
+}
+
+uintptr_t js::Nursery::currentEnd() const {
+ // These are separate asserts because it can be useful to see which one
+ // failed.
+ MOZ_ASSERT_IF(isSubChunkMode(), currentChunk_ == 0);
+ MOZ_ASSERT_IF(isSubChunkMode(), currentEnd_ <= chunk(currentChunk_).end());
+ MOZ_ASSERT_IF(!isSubChunkMode(), currentEnd_ == chunk(currentChunk_).end());
+ MOZ_ASSERT(currentEnd_ != chunk(currentChunk_).start());
+ return currentEnd_;
+}
+
+gcstats::Statistics& js::Nursery::stats() const { return gc->stats(); }
+
+MOZ_ALWAYS_INLINE const js::gc::GCSchedulingTunables& js::Nursery::tunables()
+ const {
+ return gc->tunables;
+}
+
+bool js::Nursery::isSubChunkMode() const {
+ return capacity() <= NurseryChunkUsableSize;
+}
+
+void js::Nursery::sweepDictionaryModeObjects() {
+ for (auto obj : dictionaryModeObjects_) {
+ if (!IsForwarded(obj)) {
+ obj->sweepDictionaryListPointer();
+ } else {
+ Forwarded(obj)->updateDictionaryListPointerAfterMinorGC(obj);
+ }
+ }
+ dictionaryModeObjects_.clear();
+}
+
+void js::Nursery::sweepMapAndSetObjects() {
+ auto fop = runtime()->defaultFreeOp();
+
+ for (auto mapobj : mapsWithNurseryMemory_) {
+ MapObject::sweepAfterMinorGC(fop, mapobj);
+ }
+ mapsWithNurseryMemory_.clearAndFree();
+
+ for (auto setobj : setsWithNurseryMemory_) {
+ SetObject::sweepAfterMinorGC(fop, setobj);
+ }
+ setsWithNurseryMemory_.clearAndFree();
+}
+
+JS_PUBLIC_API void JS::EnableNurseryStrings(JSContext* cx) {
+ AutoEmptyNursery empty(cx);
+ ReleaseAllJITCode(cx->defaultFreeOp());
+ cx->runtime()->gc.nursery().enableStrings();
+}
+
+JS_PUBLIC_API void JS::DisableNurseryStrings(JSContext* cx) {
+ AutoEmptyNursery empty(cx);
+ ReleaseAllJITCode(cx->defaultFreeOp());
+ cx->runtime()->gc.nursery().disableStrings();
+}
+
+JS_PUBLIC_API void JS::EnableNurseryBigInts(JSContext* cx) {
+ AutoEmptyNursery empty(cx);
+ ReleaseAllJITCode(cx->defaultFreeOp());
+ cx->runtime()->gc.nursery().enableBigInts();
+}
+
+JS_PUBLIC_API void JS::DisableNurseryBigInts(JSContext* cx) {
+ AutoEmptyNursery empty(cx);
+ ReleaseAllJITCode(cx->defaultFreeOp());
+ cx->runtime()->gc.nursery().disableBigInts();
+}
diff --git a/js/src/gc/Nursery.h b/js/src/gc/Nursery.h
new file mode 100644
index 0000000000..740408f594
--- /dev/null
+++ b/js/src/gc/Nursery.h
@@ -0,0 +1,775 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sw=2 et tw=80:
+ *
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef gc_Nursery_h
+#define gc_Nursery_h
+
+#include "mozilla/ArrayUtils.h"
+#include "mozilla/EnumeratedArray.h"
+#include "mozilla/Maybe.h"
+#include "mozilla/TimeStamp.h"
+
+#include "gc/GCParallelTask.h"
+#include "gc/Heap.h"
+#include "js/AllocPolicy.h"
+#include "js/Class.h"
+#include "js/HeapAPI.h"
+#include "js/TracingAPI.h"
+#include "js/TypeDecls.h"
+#include "js/Vector.h"
+#include "util/Text.h"
+
+#define FOR_EACH_NURSERY_PROFILE_TIME(_) \
+ /* Key Header text */ \
+ _(Total, "total") \
+ _(TraceValues, "mkVals") \
+ _(TraceCells, "mkClls") \
+ _(TraceSlots, "mkSlts") \
+ _(TraceWholeCells, "mcWCll") \
+ _(TraceGenericEntries, "mkGnrc") \
+ _(CheckHashTables, "ckTbls") \
+ _(MarkRuntime, "mkRntm") \
+ _(MarkDebugger, "mkDbgr") \
+ _(SweepCaches, "swpCch") \
+ _(CollectToFP, "collct") \
+ _(ObjectsTenuredCallback, "tenCB") \
+ _(Sweep, "sweep") \
+ _(UpdateJitActivations, "updtIn") \
+ _(FreeMallocedBuffers, "frSlts") \
+ _(ClearStoreBuffer, "clrSB") \
+ _(ClearNursery, "clear") \
+ _(PurgeStringToAtomCache, "pStoA") \
+ _(Pretenure, "pretnr")
+
+template <typename T>
+class SharedMem;
+class JSDependentString;
+
+namespace js {
+
+struct StringStats;
+class AutoLockGCBgAlloc;
+class ObjectElements;
+class PlainObject;
+class NativeObject;
+class Nursery;
+struct NurseryChunk;
+class HeapSlot;
+class JSONPrinter;
+class MapObject;
+class SetObject;
+
+namespace gc {
+class AutoMaybeStartBackgroundAllocation;
+class AutoTraceSession;
+struct Cell;
+class GCSchedulingTunables;
+class MinorCollectionTracer;
+class RelocationOverlay;
+class StringRelocationOverlay;
+enum class AllocKind : uint8_t;
+class TenuredCell;
+} // namespace gc
+
+namespace jit {
+class MacroAssembler;
+} // namespace jit
+
+class NurseryDecommitTask : public GCParallelTask {
+ public:
+ explicit NurseryDecommitTask(gc::GCRuntime* gc);
+ bool reserveSpaceForBytes(size_t nbytes);
+
+ bool isEmpty(const AutoLockHelperThreadState& lock) const;
+
+ void queueChunk(NurseryChunk* chunk, const AutoLockHelperThreadState& lock);
+ void queueRange(size_t newCapacity, NurseryChunk& chunk,
+ const AutoLockHelperThreadState& lock);
+
+ private:
+ using NurseryChunkVector = Vector<NurseryChunk*, 0, SystemAllocPolicy>;
+
+ void run(AutoLockHelperThreadState& lock) override;
+
+ NurseryChunkVector& chunksToDecommit() { return chunksToDecommit_.ref(); }
+ const NurseryChunkVector& chunksToDecommit() const {
+ return chunksToDecommit_.ref();
+ }
+
+ MainThreadOrGCTaskData<NurseryChunkVector> chunksToDecommit_;
+
+ MainThreadOrGCTaskData<NurseryChunk*> partialChunk;
+ MainThreadOrGCTaskData<size_t> partialCapacity;
+};
+
+class TenuringTracer final : public GenericTracer {
+ friend class Nursery;
+ Nursery& nursery_;
+
+ // Amount of data moved to the tenured generation during collection.
+ size_t tenuredSize;
+ // Number of cells moved to the tenured generation.
+ size_t tenuredCells;
+
+ // These lists are threaded through the Nursery using the space from
+ // already moved things. The lists are used to fix up the moved things and
+ // to find things held live by intra-Nursery pointers.
+ gc::RelocationOverlay* objHead;
+ gc::RelocationOverlay** objTail;
+ gc::StringRelocationOverlay* stringHead;
+ gc::StringRelocationOverlay** stringTail;
+ gc::RelocationOverlay* bigIntHead;
+ gc::RelocationOverlay** bigIntTail;
+
+ TenuringTracer(JSRuntime* rt, Nursery* nursery);
+
+ JSObject* onObjectEdge(JSObject* obj) override;
+ JSString* onStringEdge(JSString* str) override;
+ JS::Symbol* onSymbolEdge(JS::Symbol* sym) override;
+ JS::BigInt* onBigIntEdge(JS::BigInt* bi) override;
+ js::BaseScript* onScriptEdge(BaseScript* script) override;
+ js::Shape* onShapeEdge(Shape* shape) override;
+ js::RegExpShared* onRegExpSharedEdge(RegExpShared* shared) override;
+ js::ObjectGroup* onObjectGroupEdge(ObjectGroup* group) override;
+ js::BaseShape* onBaseShapeEdge(BaseShape* base) override;
+ js::jit::JitCode* onJitCodeEdge(jit::JitCode* code) override;
+ js::Scope* onScopeEdge(Scope* scope) override;
+
+ public:
+ Nursery& nursery() { return nursery_; }
+
+ template <typename T>
+ void traverse(T** thingp);
+ void traverse(JS::Value* thingp);
+
+ // The store buffers need to be able to call these directly.
+ void traceObject(JSObject* src);
+ void traceObjectSlots(NativeObject* nobj, uint32_t start, uint32_t end);
+ void traceSlots(JS::Value* vp, uint32_t nslots);
+ void traceString(JSString* src);
+ void traceBigInt(JS::BigInt* src);
+
+ private:
+ inline void insertIntoObjectFixupList(gc::RelocationOverlay* entry);
+ inline void insertIntoStringFixupList(gc::StringRelocationOverlay* entry);
+ inline void insertIntoBigIntFixupList(gc::RelocationOverlay* entry);
+
+ template <typename T>
+ inline T* allocTenured(JS::Zone* zone, gc::AllocKind kind);
+ JSString* allocTenuredString(JSString* src, JS::Zone* zone,
+ gc::AllocKind dstKind);
+
+ inline JSObject* movePlainObjectToTenured(PlainObject* src);
+ JSObject* moveToTenuredSlow(JSObject* src);
+ JSString* moveToTenured(JSString* src);
+ JS::BigInt* moveToTenured(JS::BigInt* src);
+
+ size_t moveElementsToTenured(NativeObject* dst, NativeObject* src,
+ gc::AllocKind dstKind);
+ size_t moveSlotsToTenured(NativeObject* dst, NativeObject* src);
+ size_t moveStringToTenured(JSString* dst, JSString* src,
+ gc::AllocKind dstKind);
+ size_t moveBigIntToTenured(JS::BigInt* dst, JS::BigInt* src,
+ gc::AllocKind dstKind);
+
+ void traceSlots(JS::Value* vp, JS::Value* end);
+};
+
+// Classes with JSCLASS_SKIP_NURSERY_FINALIZE or Wrapper classes with
+// CROSS_COMPARTMENT flags will not have their finalizer called if they are
+// nursery allocated and not promoted to the tenured heap. The finalizers for
+// these classes must do nothing except free data which was allocated via
+// Nursery::allocateBuffer.
+inline bool CanNurseryAllocateFinalizedClass(const JSClass* const clasp) {
+ MOZ_ASSERT(clasp->hasFinalize());
+ return clasp->flags & JSCLASS_SKIP_NURSERY_FINALIZE;
+}
+
+class Nursery {
+ public:
+ static const size_t Alignment = gc::ChunkSize;
+ static const size_t ChunkShift = gc::ChunkShift;
+
+ using BufferRelocationOverlay = void*;
+ using BufferSet = HashSet<void*, PointerHasher<void*>, SystemAllocPolicy>;
+
+ explicit Nursery(gc::GCRuntime* gc);
+ ~Nursery();
+
+ MOZ_MUST_USE bool init(AutoLockGCBgAlloc& lock);
+
+ // Number of allocated (ready to use) chunks.
+ unsigned allocatedChunkCount() const { return chunks_.length(); }
+
+ // Total number of chunks and the capacity of the nursery. Chunks will be
+ // lazilly allocated and added to the chunks array up to this limit, after
+ // that the nursery must be collected, this limit may be raised during
+ // collection.
+ unsigned maxChunkCount() const {
+ MOZ_ASSERT(capacity());
+ return HowMany(capacity(), gc::ChunkSize);
+ }
+
+ void enable();
+ void disable();
+ bool isEnabled() const { return capacity() != 0; }
+
+ void enableStrings();
+ void disableStrings();
+ bool canAllocateStrings() const { return canAllocateStrings_; }
+
+ void enableBigInts();
+ void disableBigInts();
+ bool canAllocateBigInts() const { return canAllocateBigInts_; }
+
+ // Return true if no allocations have been made since the last collection.
+ bool isEmpty() const;
+
+ // Check whether an arbitrary pointer is within the nursery. This is
+ // slower than IsInsideNursery(Cell*), but works on all types of pointers.
+ MOZ_ALWAYS_INLINE bool isInside(gc::Cell* cellp) const = delete;
+ MOZ_ALWAYS_INLINE bool isInside(const void* p) const {
+ for (auto chunk : chunks_) {
+ if (uintptr_t(p) - uintptr_t(chunk) < gc::ChunkSize) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ template <typename T>
+ inline bool isInside(const SharedMem<T>& p) const;
+
+ // Allocate and return a pointer to a new GC object with its |slots|
+ // pointer pre-filled. Returns nullptr if the Nursery is full.
+ JSObject* allocateObject(JSContext* cx, size_t size, size_t numDynamic,
+ const JSClass* clasp);
+
+ // Allocate and return a pointer to a new GC thing. Returns nullptr if the
+ // Nursery is full.
+ gc::Cell* allocateCell(JS::Zone* zone, size_t size, JS::TraceKind kind);
+
+ gc::Cell* allocateBigInt(JS::Zone* zone, size_t size) {
+ return allocateCell(zone, size, JS::TraceKind::BigInt);
+ }
+ gc::Cell* allocateString(JS::Zone* zone, size_t size);
+
+ static size_t nurseryCellHeaderSize() {
+ return sizeof(gc::NurseryCellHeader);
+ }
+
+ // Allocate a buffer for a given zone, using the nursery if possible.
+ void* allocateBuffer(JS::Zone* zone, size_t nbytes);
+
+ // Allocate a buffer for a given object, using the nursery if possible and
+ // obj is in the nursery.
+ void* allocateBuffer(JSObject* obj, size_t nbytes);
+
+ // Allocate a buffer for a given object, always using the nursery if obj is
+ // in the nursery. The requested size must be less than or equal to
+ // MaxNurseryBufferSize.
+ void* allocateBufferSameLocation(JSObject* obj, size_t nbytes);
+
+ // Allocate a zero-initialized buffer for a given zone, using the nursery if
+ // possible. If the buffer isn't allocated in the nursery, the given arena is
+ // used.
+ void* allocateZeroedBuffer(JS::Zone* zone, size_t nbytes,
+ arena_id_t arena = js::MallocArena);
+
+ // Allocate a zero-initialized buffer for a given object, using the nursery if
+ // possible and obj is in the nursery. If the buffer isn't allocated in the
+ // nursery, the given arena is used.
+ void* allocateZeroedBuffer(JSObject* obj, size_t nbytes,
+ arena_id_t arena = js::MallocArena);
+
+ // Resize an existing buffer.
+ void* reallocateBuffer(JS::Zone* zone, gc::Cell* cell, void* oldBuffer,
+ size_t oldBytes, size_t newBytes);
+
+ // Allocate a digits buffer for a given BigInt, using the nursery if possible
+ // and |bi| is in the nursery.
+ void* allocateBuffer(JS::BigInt* bi, size_t nbytes);
+
+ // Free an object buffer.
+ void freeBuffer(void* buffer, size_t nbytes);
+
+ // The maximum number of bytes allowed to reside in nursery buffers.
+ static const size_t MaxNurseryBufferSize = 1024;
+
+ // Do a minor collection.
+ void collect(JSGCInvocationKind kind, JS::GCReason reason);
+
+ // If the thing at |*ref| in the Nursery has been forwarded, set |*ref| to
+ // the new location and return true. Otherwise return false and leave
+ // |*ref| unset.
+ MOZ_ALWAYS_INLINE MOZ_MUST_USE static bool getForwardedPointer(
+ js::gc::Cell** ref);
+
+ // Forward a slots/elements pointer stored in an Ion frame.
+ void forwardBufferPointer(uintptr_t* pSlotsElems);
+
+ inline void maybeSetForwardingPointer(JSTracer* trc, void* oldData,
+ void* newData, bool direct);
+ inline void setForwardingPointerWhileTenuring(void* oldData, void* newData,
+ bool direct);
+
+ // Register a malloced buffer that is held by a nursery object, which
+ // should be freed at the end of a minor GC. Buffers are unregistered when
+ // their owning objects are tenured.
+ MOZ_MUST_USE bool registerMallocedBuffer(void* buffer, size_t nbytes);
+
+ // Mark a malloced buffer as no longer needing to be freed.
+ void removeMallocedBuffer(void* buffer, size_t nbytes) {
+ MOZ_ASSERT(mallocedBuffers.has(buffer));
+ MOZ_ASSERT(nbytes > 0);
+ MOZ_ASSERT(mallocedBufferBytes >= nbytes);
+ mallocedBuffers.remove(buffer);
+ mallocedBufferBytes -= nbytes;
+ }
+
+ // Mark a malloced buffer as no longer needing to be freed during minor
+ // GC. There's no need to account for the size here since all remaining
+ // buffers will soon be freed.
+ void removeMallocedBufferDuringMinorGC(void* buffer) {
+ MOZ_ASSERT(JS::RuntimeHeapIsMinorCollecting());
+ MOZ_ASSERT(mallocedBuffers.has(buffer));
+ mallocedBuffers.remove(buffer);
+ }
+
+ MOZ_MUST_USE bool addedUniqueIdToCell(gc::Cell* cell) {
+ MOZ_ASSERT(IsInsideNursery(cell));
+ MOZ_ASSERT(isEnabled());
+ return cellsWithUid_.append(cell);
+ }
+
+ MOZ_MUST_USE bool queueDictionaryModeObjectToSweep(NativeObject* obj);
+
+ size_t sizeOfMallocedBuffers(mozilla::MallocSizeOf mallocSizeOf) const {
+ size_t total = 0;
+ for (BufferSet::Range r = mallocedBuffers.all(); !r.empty(); r.popFront()) {
+ total += mallocSizeOf(r.front());
+ }
+ total += mallocedBuffers.shallowSizeOfExcludingThis(mallocSizeOf);
+ return total;
+ }
+
+ // The number of bytes from the start position to the end of the nursery.
+ // pass maxChunkCount(), allocatedChunkCount() or chunkCountLimit()
+ // to calculate the nursery size, current lazy-allocated size or nursery
+ // limit respectively.
+ size_t spaceToEnd(unsigned chunkCount) const;
+
+ size_t capacity() const { return capacity_; }
+ size_t committed() const { return spaceToEnd(allocatedChunkCount()); }
+
+ // Used and free space both include chunk headers for that part of the
+ // nursery.
+ //
+ // usedSpace() + freeSpace() == capacity()
+ //
+ MOZ_ALWAYS_INLINE size_t usedSpace() const {
+ return capacity() - freeSpace();
+ }
+ MOZ_ALWAYS_INLINE size_t freeSpace() const {
+ MOZ_ASSERT(isEnabled());
+ MOZ_ASSERT(currentEnd_ - position_ <= NurseryChunkUsableSize);
+ MOZ_ASSERT(currentChunk_ < maxChunkCount());
+ return (currentEnd_ - position_) +
+ (maxChunkCount() - currentChunk_ - 1) * gc::ChunkSize;
+ }
+
+#ifdef JS_GC_ZEAL
+ void enterZealMode();
+ void leaveZealMode();
+#endif
+
+ // Write profile time JSON on JSONPrinter.
+ void renderProfileJSON(JSONPrinter& json) const;
+
+ // Print header line for profile times.
+ static void printProfileHeader();
+
+ // Print total profile times on shutdown.
+ void printTotalProfileTimes();
+
+ void* addressOfPosition() const { return (void**)&position_; }
+ const void* addressOfCurrentEnd() const { return (void**)&currentEnd_; }
+ const void* addressOfCurrentStringEnd() const {
+ return (void*)&currentStringEnd_;
+ }
+ const void* addressOfCurrentBigIntEnd() const {
+ return (void*)&currentBigIntEnd_;
+ }
+
+ void requestMinorGC(JS::GCReason reason) const;
+
+ bool minorGCRequested() const {
+ return minorGCTriggerReason_ != JS::GCReason::NO_REASON;
+ }
+ JS::GCReason minorGCTriggerReason() const { return minorGCTriggerReason_; }
+ void clearMinorGCRequest() {
+ minorGCTriggerReason_ = JS::GCReason::NO_REASON;
+ }
+
+ bool shouldCollect() const;
+ bool isNearlyFull() const;
+ bool isUnderused() const;
+
+ bool enableProfiling() const { return enableProfiling_; }
+
+ bool addMapWithNurseryMemory(MapObject* obj) {
+ MOZ_ASSERT_IF(!mapsWithNurseryMemory_.empty(),
+ mapsWithNurseryMemory_.back() != obj);
+ return mapsWithNurseryMemory_.append(obj);
+ }
+ bool addSetWithNurseryMemory(SetObject* obj) {
+ MOZ_ASSERT_IF(!setsWithNurseryMemory_.empty(),
+ setsWithNurseryMemory_.back() != obj);
+ return setsWithNurseryMemory_.append(obj);
+ }
+
+ // The amount of space in the mapped nursery available to allocations.
+ static const size_t NurseryChunkUsableSize =
+ gc::ChunkSize - sizeof(gc::ChunkBase);
+
+ void joinDecommitTask() { decommitTask.join(); }
+
+ mozilla::TimeStamp collectionStartTime() {
+ return startTimes_[ProfileKey::Total];
+ }
+
+ // Round a size in bytes to the nearest valid nursery size.
+ static size_t roundSize(size_t size);
+
+ private:
+ gc::GCRuntime* const gc;
+
+ // Vector of allocated chunks to allocate from.
+ Vector<NurseryChunk*, 0, SystemAllocPolicy> chunks_;
+
+ // Pointer to the first unallocated byte in the nursery.
+ uintptr_t position_;
+
+ // These fields refer to the beginning of the nursery. They're normally 0
+ // and chunk(0).start() respectively. Except when a generational GC zeal
+ // mode is active, then they may be arbitrary (see Nursery::clear()).
+ unsigned currentStartChunk_;
+ uintptr_t currentStartPosition_;
+
+ // Pointer to the last byte of space in the current chunk.
+ uintptr_t currentEnd_;
+
+ // Pointer to the last byte of space in the current chunk, or nullptr if we
+ // are not allocating strings in the nursery.
+ uintptr_t currentStringEnd_;
+
+ // Pointer to the last byte of space in the current chunk, or nullptr if we
+ // are not allocating BigInts in the nursery.
+ uintptr_t currentBigIntEnd_;
+
+ // The index of the chunk that is currently being allocated from.
+ unsigned currentChunk_;
+
+ // The current nursery capacity measured in bytes. It may grow up to this
+ // value without a collection, allocating chunks on demand. This limit may be
+ // changed by maybeResizeNursery() each collection. It includes chunk headers.
+ size_t capacity_;
+
+ mozilla::TimeDuration timeInChunkAlloc_;
+
+ // Report minor collections taking at least this long, if enabled.
+ mozilla::TimeDuration profileThreshold_;
+ bool enableProfiling_;
+
+ // Whether we will nursery-allocate strings.
+ bool canAllocateStrings_;
+
+ // Whether we will nursery-allocate BigInts.
+ bool canAllocateBigInts_;
+
+ // Report how many strings were deduplicated.
+ bool reportDeduplications_;
+
+ // Whether and why a collection of this nursery has been requested. This is
+ // mutable as it is set by the store buffer, which otherwise cannot modify
+ // anything in the nursery.
+ mutable JS::GCReason minorGCTriggerReason_;
+
+ // Profiling data.
+
+ enum class ProfileKey {
+#define DEFINE_TIME_KEY(name, text) name,
+ FOR_EACH_NURSERY_PROFILE_TIME(DEFINE_TIME_KEY)
+#undef DEFINE_TIME_KEY
+ KeyCount
+ };
+
+ using ProfileTimes =
+ mozilla::EnumeratedArray<ProfileKey, ProfileKey::KeyCount,
+ mozilla::TimeStamp>;
+ using ProfileDurations =
+ mozilla::EnumeratedArray<ProfileKey, ProfileKey::KeyCount,
+ mozilla::TimeDuration>;
+
+ ProfileTimes startTimes_;
+ ProfileDurations profileDurations_;
+ ProfileDurations totalDurations_;
+
+ // Data about the previous collection.
+ struct PreviousGC {
+ JS::GCReason reason = JS::GCReason::NO_REASON;
+ size_t nurseryCapacity = 0;
+ size_t nurseryCommitted = 0;
+ size_t nurseryUsedBytes = 0;
+ size_t tenuredBytes = 0;
+ size_t tenuredCells = 0;
+ mozilla::TimeStamp endTime;
+ };
+ PreviousGC previousGC;
+
+ bool hasRecentGrowthData;
+ double smoothedGrowthFactor;
+
+ // Calculate the promotion rate of the most recent minor GC.
+ // The valid_for_tenuring parameter is used to return whether this
+ // promotion rate is accurate enough (the nursery was full enough) to be
+ // used for tenuring and other decisions.
+ //
+ // Must only be called if the previousGC data is initialised.
+ double calcPromotionRate(bool* validForTenuring) const;
+
+ // The set of externally malloced buffers potentially kept live by objects
+ // stored in the nursery. Any external buffers that do not belong to a
+ // tenured thing at the end of a minor GC must be freed.
+ BufferSet mallocedBuffers;
+ size_t mallocedBufferBytes = 0;
+
+ // During a collection most hoisted slot and element buffers indicate their
+ // new location with a forwarding pointer at the base. This does not work
+ // for buffers whose length is less than pointer width, or when different
+ // buffers might overlap each other. For these, an entry in the following
+ // table is used.
+ typedef HashMap<void*, void*, PointerHasher<void*>, SystemAllocPolicy>
+ ForwardedBufferMap;
+ ForwardedBufferMap forwardedBuffers;
+
+ // When we assign a unique id to cell in the nursery, that almost always
+ // means that the cell will be in a hash table, and thus, held live,
+ // automatically moving the uid from the nursery to its new home in
+ // tenured. It is possible, if rare, for an object that acquired a uid to
+ // be dead before the next collection, in which case we need to know to
+ // remove it when we sweep.
+ //
+ // Note: we store the pointers as Cell* here, resulting in an ugly cast in
+ // sweep. This is because this structure is used to help implement
+ // stable object hashing and we have to break the cycle somehow.
+ using CellsWithUniqueIdVector = Vector<gc::Cell*, 8, SystemAllocPolicy>;
+ CellsWithUniqueIdVector cellsWithUid_;
+
+ using NativeObjectVector = Vector<NativeObject*, 0, SystemAllocPolicy>;
+ NativeObjectVector dictionaryModeObjects_;
+
+ template <typename Key>
+ struct DeduplicationStringHasher {
+ using Lookup = Key;
+
+ static inline HashNumber hash(const Lookup& lookup) {
+ JS::AutoCheckCannotGC nogc;
+ HashNumber strHash;
+
+ // Include flags in the hash. A string relocation overlay stores either
+ // the nursery root base chars or the dependent string nursery base, but
+ // does not indicate which one. If strings with different string types
+ // were deduplicated, for example, a dependent string gets deduplicated
+ // into an extensible string, the base chain would be broken and the root
+ // base would be unreachable.
+
+ if (lookup->asLinear().hasLatin1Chars()) {
+ strHash = mozilla::HashString(lookup->asLinear().latin1Chars(nogc),
+ lookup->length());
+ } else {
+ MOZ_ASSERT(lookup->asLinear().hasTwoByteChars());
+ strHash = mozilla::HashString(lookup->asLinear().twoByteChars(nogc),
+ lookup->length());
+ }
+
+ return mozilla::HashGeneric(strHash, lookup->zone(), lookup->flags());
+ }
+
+ static MOZ_ALWAYS_INLINE bool match(const Key& key, const Lookup& lookup) {
+ if (!key->sameLengthAndFlags(*lookup) ||
+ key->asTenured().zone() != lookup->zone() ||
+ key->asTenured().getAllocKind() != lookup->getAllocKind()) {
+ return false;
+ }
+
+ JS::AutoCheckCannotGC nogc;
+
+ if (key->asLinear().hasLatin1Chars()) {
+ MOZ_ASSERT(lookup->asLinear().hasLatin1Chars());
+ return mozilla::ArrayEqual(key->asLinear().latin1Chars(nogc),
+ lookup->asLinear().latin1Chars(nogc),
+ lookup->length());
+ } else {
+ MOZ_ASSERT(key->asLinear().hasTwoByteChars());
+ MOZ_ASSERT(lookup->asLinear().hasTwoByteChars());
+ return EqualChars(key->asLinear().twoByteChars(nogc),
+ lookup->asLinear().twoByteChars(nogc),
+ lookup->length());
+ }
+ }
+ };
+
+ using StringDeDupSet =
+ HashSet<JSString*, DeduplicationStringHasher<JSString*>,
+ SystemAllocPolicy>;
+
+ // deDupSet is emplaced at the beginning of the nursery collection and reset
+ // at the end of the nursery collection. It can also be reset during nursery
+ // collection when out of memory to insert new entries.
+ mozilla::Maybe<StringDeDupSet> stringDeDupSet;
+
+ // Lists of map and set objects allocated in the nursery or with iterators
+ // allocated there. Such objects need to be swept after minor GC.
+ Vector<MapObject*, 0, SystemAllocPolicy> mapsWithNurseryMemory_;
+ Vector<SetObject*, 0, SystemAllocPolicy> setsWithNurseryMemory_;
+
+ NurseryDecommitTask decommitTask;
+
+#ifdef JS_GC_ZEAL
+ struct Canary;
+ Canary* lastCanary_;
+#endif
+
+ NurseryChunk& chunk(unsigned index) const { return *chunks_[index]; }
+
+ // Set the current chunk. This updates the currentChunk_, position_
+ // currentEnd_ and currentStringEnd_ values as approprite. It'll also
+ // poison the chunk, either a portion of the chunk if it is already the
+ // current chunk, or the whole chunk if fullPoison is true or it is not
+ // the current chunk.
+ void setCurrentChunk(unsigned chunkno);
+
+ bool initFirstChunk(AutoLockGCBgAlloc& lock);
+
+ // extent is advisory, it will be ignored in sub-chunk and generational zeal
+ // modes. It will be clamped to Min(NurseryChunkUsableSize, capacity_).
+ void poisonAndInitCurrentChunk(size_t extent = gc::ChunkSize);
+
+ void setCurrentEnd();
+ void setStartPosition();
+
+ // Allocate the next chunk, or the first chunk for initialization.
+ // Callers will probably want to call setCurrentChunk(0) next.
+ MOZ_MUST_USE bool allocateNextChunk(unsigned chunkno,
+ AutoLockGCBgAlloc& lock);
+
+ MOZ_ALWAYS_INLINE uintptr_t currentEnd() const;
+
+ uintptr_t position() const { return position_; }
+
+ MOZ_ALWAYS_INLINE bool isSubChunkMode() const;
+
+ JSRuntime* runtime() const;
+ gcstats::Statistics& stats() const;
+
+ const js::gc::GCSchedulingTunables& tunables() const;
+
+ // Common internal allocator function.
+ void* allocate(size_t size);
+
+ void* moveToNextChunkAndAllocate(size_t size);
+
+#ifdef JS_GC_ZEAL
+ void writeCanary(uintptr_t address);
+#endif
+
+ struct CollectionResult {
+ size_t tenuredBytes;
+ size_t tenuredCells;
+ };
+ CollectionResult doCollection(JS::GCReason reason);
+
+ void doPretenuring(JSRuntime* rt, JS::GCReason reason,
+ bool highPromotionRate);
+
+ // Move the object at |src| in the Nursery to an already-allocated cell
+ // |dst| in Tenured.
+ void collectToFixedPoint(TenuringTracer& trc);
+
+ // The dependent string chars needs to be relocated if the base which it's
+ // using chars from has been deduplicated.
+ template <typename CharT>
+ void relocateDependentStringChars(JSDependentString* tenuredDependentStr,
+ JSLinearString* baseOrRelocOverlay,
+ size_t* offset,
+ bool* rootBaseNotYetForwarded,
+ JSLinearString** rootBase);
+
+ // Handle relocation of slots/elements pointers stored in Ion frames.
+ inline void setForwardingPointer(void* oldData, void* newData, bool direct);
+
+ inline void setDirectForwardingPointer(void* oldData, void* newData);
+ void setIndirectForwardingPointer(void* oldData, void* newData);
+
+ inline void setSlotsForwardingPointer(HeapSlot* oldSlots, HeapSlot* newSlots,
+ uint32_t nslots);
+ inline void setElementsForwardingPointer(ObjectElements* oldHeader,
+ ObjectElements* newHeader,
+ uint32_t capacity);
+
+#ifdef DEBUG
+ bool checkForwardingPointerLocation(void* ptr, bool expectedInside);
+#endif
+
+ // Updates pointers to nursery objects that have been tenured and discards
+ // pointers to objects that have been freed.
+ void sweep(JSTracer* trc);
+
+ // Reset the current chunk and position after a minor collection. Also poison
+ // the nursery on debug & nightly builds.
+ void clear();
+
+ void sweepDictionaryModeObjects();
+ void sweepMapAndSetObjects();
+
+ // Change the allocable space provided by the nursery.
+ void maybeResizeNursery(JSGCInvocationKind kind, JS::GCReason reason);
+ size_t targetSize(JSGCInvocationKind kind, JS::GCReason reason);
+ void clearRecentGrowthData();
+ void growAllocableSpace(size_t newCapacity);
+ void shrinkAllocableSpace(size_t newCapacity);
+ void minimizeAllocableSpace();
+
+ // Free the chunks starting at firstFreeChunk until the end of the chunks
+ // vector. Shrinks the vector but does not update maxChunkCount().
+ void freeChunksFrom(unsigned firstFreeChunk);
+
+ void sendTelemetry(JS::GCReason reason, mozilla::TimeDuration totalTime,
+ bool wasEmpty, double promotionRate);
+
+ void printCollectionProfile(JS::GCReason reason, double promotionRate);
+ void printDeduplicationData(js::StringStats& prev, js::StringStats& curr);
+
+ // Profile recording and printing.
+ void maybeClearProfileDurations();
+ void startProfile(ProfileKey key);
+ void endProfile(ProfileKey key);
+ static void printProfileDurations(const ProfileDurations& times);
+
+ mozilla::TimeStamp collectionStartTime() const;
+ mozilla::TimeStamp lastCollectionEndTime() const;
+
+ friend class TenuringTracer;
+ friend class gc::MinorCollectionTracer;
+ friend class jit::MacroAssembler;
+ friend struct NurseryChunk;
+};
+
+} // namespace js
+
+#endif // gc_Nursery_h
diff --git a/js/src/gc/NurseryAwareHashMap.h b/js/src/gc/NurseryAwareHashMap.h
new file mode 100644
index 0000000000..5b2e1f02fa
--- /dev/null
+++ b/js/src/gc/NurseryAwareHashMap.h
@@ -0,0 +1,218 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef gc_NurseryAwareHashMap_h
+#define gc_NurseryAwareHashMap_h
+
+#include "gc/Barrier.h"
+#include "gc/Marking.h"
+#include "js/GCHashTable.h"
+#include "js/GCPolicyAPI.h"
+#include "js/HashTable.h"
+
+namespace js {
+
+namespace detail {
+// This class only handles the incremental case and does not deal with nursery
+// pointers. The only users should be for NurseryAwareHashMap; it is defined
+// externally because we need a GCPolicy for its use in the contained map.
+template <typename T>
+class UnsafeBareWeakHeapPtr : public ReadBarriered<T> {
+ public:
+ UnsafeBareWeakHeapPtr() : ReadBarriered<T>(JS::SafelyInitialized<T>()) {}
+ MOZ_IMPLICIT UnsafeBareWeakHeapPtr(const T& v) : ReadBarriered<T>(v) {}
+ explicit UnsafeBareWeakHeapPtr(const UnsafeBareWeakHeapPtr& v)
+ : ReadBarriered<T>(v) {}
+ UnsafeBareWeakHeapPtr(UnsafeBareWeakHeapPtr&& v)
+ : ReadBarriered<T>(std::move(v)) {}
+
+ UnsafeBareWeakHeapPtr& operator=(const UnsafeBareWeakHeapPtr& v) {
+ this->value = v.value;
+ return *this;
+ }
+
+ UnsafeBareWeakHeapPtr& operator=(const T& v) {
+ this->value = v;
+ return *this;
+ }
+
+ const T get() const {
+ if (!InternalBarrierMethods<T>::isMarkable(this->value)) {
+ return JS::SafelyInitialized<T>();
+ }
+ this->read();
+ return this->value;
+ }
+
+ explicit operator bool() const { return bool(this->value); }
+
+ const T unbarrieredGet() const { return this->value; }
+ T* unsafeGet() { return &this->value; }
+ T const* unsafeGet() const { return &this->value; }
+};
+} // namespace detail
+
+enum : bool { DuplicatesNotPossible, DuplicatesPossible };
+
+// The "nursery aware" hash map is a special case of GCHashMap that is able to
+// treat nursery allocated members weakly during a minor GC: e.g. it allows for
+// nursery allocated objects to be collected during nursery GC where a normal
+// hash table treats such edges strongly.
+//
+// Doing this requires some strong constraints on what can be stored in this
+// table and how it can be accessed. At the moment, this table assumes that
+// all values contain a strong reference to the key. It also requires the
+// policy to contain an |isTenured| and |needsSweep| members, which is fairly
+// non-standard. This limits its usefulness to the CrossCompartmentMap at the
+// moment, but might serve as a useful base for other tables in future.
+template <typename Key, typename Value,
+ typename HashPolicy = DefaultHasher<Key>,
+ typename AllocPolicy = TempAllocPolicy,
+ bool AllowDuplicates = DuplicatesNotPossible>
+class NurseryAwareHashMap {
+ using BarrieredValue = detail::UnsafeBareWeakHeapPtr<Value>;
+ using MapType =
+ GCRekeyableHashMap<Key, BarrieredValue, HashPolicy, AllocPolicy>;
+ MapType map;
+
+ // Keep a list of all keys for which JS::GCPolicy<Key>::isTenured is false.
+ // This lets us avoid a full traveral of the map on each minor GC, keeping
+ // the minor GC times proportional to the nursery heap size.
+ Vector<Key, 0, AllocPolicy> nurseryEntries;
+
+ public:
+ using Lookup = typename MapType::Lookup;
+ using Ptr = typename MapType::Ptr;
+ using Range = typename MapType::Range;
+ using Entry = typename MapType::Entry;
+
+ explicit NurseryAwareHashMap(AllocPolicy a = AllocPolicy())
+ : map(a), nurseryEntries(std::move(a)) {}
+ explicit NurseryAwareHashMap(size_t length) : map(length) {}
+ NurseryAwareHashMap(AllocPolicy a, size_t length)
+ : map(a, length), nurseryEntries(std::move(a)) {}
+
+ bool empty() const { return map.empty(); }
+ Ptr lookup(const Lookup& l) const { return map.lookup(l); }
+ void remove(Ptr p) { map.remove(p); }
+ Range all() const { return map.all(); }
+ struct Enum : public MapType::Enum {
+ explicit Enum(NurseryAwareHashMap& namap) : MapType::Enum(namap.map) {}
+ };
+ size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
+ return map.shallowSizeOfExcludingThis(mallocSizeOf) +
+ nurseryEntries.sizeOfExcludingThis(mallocSizeOf);
+ }
+ size_t sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
+ return map.shallowSizeOfIncludingThis(mallocSizeOf) +
+ nurseryEntries.sizeOfIncludingThis(mallocSizeOf);
+ }
+
+ MOZ_MUST_USE bool put(const Key& k, const Value& v) {
+ auto p = map.lookupForAdd(k);
+ if (p) {
+ if (!JS::GCPolicy<Key>::isTenured(k) ||
+ !JS::GCPolicy<Value>::isTenured(v)) {
+ if (!nurseryEntries.append(k)) {
+ return false;
+ }
+ }
+ p->value() = v;
+ return true;
+ }
+
+ bool ok = map.add(p, k, v);
+ if (!ok) {
+ return false;
+ }
+
+ if (!JS::GCPolicy<Key>::isTenured(k) ||
+ !JS::GCPolicy<Value>::isTenured(v)) {
+ if (!nurseryEntries.append(k)) {
+ map.remove(k);
+ return false;
+ }
+ }
+
+ return true;
+ }
+
+ void sweepAfterMinorGC(JSTracer* trc) {
+ for (auto& key : nurseryEntries) {
+ auto p = map.lookup(key);
+ if (!p) {
+ continue;
+ }
+
+ // Drop the entry if the value is not marked.
+ if (JS::GCPolicy<BarrieredValue>::needsSweep(&p->value())) {
+ map.remove(key);
+ continue;
+ }
+
+ // Update and relocate the key, if the value is still needed.
+ //
+ // Non-string Values will contain a strong reference to Key, as per
+ // its use in the CrossCompartmentWrapperMap, so the key will never
+ // be dying here. Strings do *not* have any sort of pointer from
+ // wrapper to wrappee, as they are just copies. The wrapper map
+ // entry is merely used as a cache to avoid re-copying the string,
+ // and currently that entire cache is flushed on major GC.
+ Key copy(key);
+ bool sweepKey = JS::GCPolicy<Key>::needsSweep(&copy);
+ if (sweepKey) {
+ map.remove(key);
+ continue;
+ }
+ if (AllowDuplicates) {
+ // Drop duplicated keys.
+ //
+ // A key can be forwarded to another place. In this case, rekey the
+ // item. If two or more different keys are forwarded to the same new
+ // key, simply drop the later ones.
+ if (key == copy) {
+ // No rekey needed.
+ } else if (map.has(copy)) {
+ // Key was forwarded to the same place that another key was already
+ // forwarded to.
+ map.remove(key);
+ } else {
+ map.rekeyAs(key, copy, copy);
+ }
+ } else {
+ MOZ_ASSERT(key == copy || !map.has(copy));
+ map.rekeyIfMoved(key, copy);
+ }
+ }
+ nurseryEntries.clear();
+ }
+
+ void sweep() { map.sweep(); }
+
+ void clear() {
+ map.clear();
+ nurseryEntries.clear();
+ }
+
+ bool hasNurseryEntries() const { return !nurseryEntries.empty(); }
+};
+
+} // namespace js
+
+namespace JS {
+template <typename T>
+struct GCPolicy<js::detail::UnsafeBareWeakHeapPtr<T>> {
+ static void trace(JSTracer* trc, js::detail::UnsafeBareWeakHeapPtr<T>* thingp,
+ const char* name) {
+ js::TraceEdge(trc, thingp, name);
+ }
+ static bool needsSweep(js::detail::UnsafeBareWeakHeapPtr<T>* thingp) {
+ return js::gc::IsAboutToBeFinalized(thingp);
+ }
+};
+} // namespace JS
+
+#endif // gc_NurseryAwareHashMap_h
diff --git a/js/src/gc/ObjectKind-inl.h b/js/src/gc/ObjectKind-inl.h
new file mode 100644
index 0000000000..2bf109d190
--- /dev/null
+++ b/js/src/gc/ObjectKind-inl.h
@@ -0,0 +1,176 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * GC-internal helper functions for getting the AllocKind used to allocate a
+ * JSObject and related information.
+ */
+
+#ifndef gc_ObjectKind_inl_h
+#define gc_ObjectKind_inl_h
+
+#include "util/Memory.h"
+#include "vm/NativeObject.h"
+
+namespace js {
+namespace gc {
+
+/* Capacity for slotsToThingKind */
+const size_t SLOTS_TO_THING_KIND_LIMIT = 17;
+
+extern const AllocKind slotsToThingKind[];
+
+/* Get the best kind to use when making an object with the given slot count. */
+static inline AllocKind GetGCObjectKind(size_t numSlots) {
+ if (numSlots >= SLOTS_TO_THING_KIND_LIMIT) {
+ return AllocKind::OBJECT16;
+ }
+ return slotsToThingKind[numSlots];
+}
+
+static inline AllocKind GetGCObjectKind(const JSClass* clasp) {
+ if (clasp == FunctionClassPtr) {
+ return AllocKind::FUNCTION;
+ }
+
+ MOZ_ASSERT(!clasp->isProxy(), "Proxies should use GetProxyGCObjectKind");
+
+ uint32_t nslots = JSCLASS_RESERVED_SLOTS(clasp);
+ if (clasp->flags & JSCLASS_HAS_PRIVATE) {
+ nslots++;
+ }
+ return GetGCObjectKind(nslots);
+}
+
+/* As for GetGCObjectKind, but for dense array allocation. */
+static inline AllocKind GetGCArrayKind(size_t numElements) {
+ /*
+ * Dense arrays can use their fixed slots to hold their elements array
+ * (less two Values worth of ObjectElements header), but if more than the
+ * maximum number of fixed slots is needed then the fixed slots will be
+ * unused.
+ */
+ static_assert(ObjectElements::VALUES_PER_HEADER == 2);
+ if (numElements > NativeObject::MAX_DENSE_ELEMENTS_COUNT ||
+ numElements + ObjectElements::VALUES_PER_HEADER >=
+ SLOTS_TO_THING_KIND_LIMIT) {
+ return AllocKind::OBJECT2;
+ }
+ return slotsToThingKind[numElements + ObjectElements::VALUES_PER_HEADER];
+}
+
+static inline AllocKind GetGCObjectFixedSlotsKind(size_t numFixedSlots) {
+ MOZ_ASSERT(numFixedSlots < SLOTS_TO_THING_KIND_LIMIT);
+ return slotsToThingKind[numFixedSlots];
+}
+
+// Get the best kind to use when allocating an object that needs a specific
+// number of bytes.
+static inline AllocKind GetGCObjectKindForBytes(size_t nbytes) {
+ MOZ_ASSERT(nbytes <= JSObject::MAX_BYTE_SIZE);
+
+ if (nbytes <= sizeof(NativeObject)) {
+ return AllocKind::OBJECT0;
+ }
+ nbytes -= sizeof(NativeObject);
+
+ size_t dataSlots = AlignBytes(nbytes, sizeof(Value)) / sizeof(Value);
+ MOZ_ASSERT(nbytes <= dataSlots * sizeof(Value));
+ return GetGCObjectKind(dataSlots);
+}
+
+/* Get the number of fixed slots and initial capacity associated with a kind. */
+static inline size_t GetGCKindSlots(AllocKind thingKind) {
+ // Using a switch in hopes that thingKind will usually be a compile-time
+ // constant.
+ switch (thingKind) {
+ case AllocKind::FUNCTION:
+ case AllocKind::OBJECT0:
+ case AllocKind::OBJECT0_BACKGROUND:
+ return 0;
+ case AllocKind::FUNCTION_EXTENDED:
+ case AllocKind::OBJECT2:
+ case AllocKind::OBJECT2_BACKGROUND:
+ return 2;
+ case AllocKind::OBJECT4:
+ case AllocKind::OBJECT4_BACKGROUND:
+ return 4;
+ case AllocKind::OBJECT8:
+ case AllocKind::OBJECT8_BACKGROUND:
+ return 8;
+ case AllocKind::OBJECT12:
+ case AllocKind::OBJECT12_BACKGROUND:
+ return 12;
+ case AllocKind::OBJECT16:
+ case AllocKind::OBJECT16_BACKGROUND:
+ return 16;
+ default:
+ MOZ_CRASH("Bad object alloc kind");
+ }
+}
+
+static inline size_t GetGCKindSlots(AllocKind thingKind, const JSClass* clasp) {
+ size_t nslots = GetGCKindSlots(thingKind);
+
+ /* An object's private data uses the space taken by its last fixed slot. */
+ if (clasp->flags & JSCLASS_HAS_PRIVATE) {
+ MOZ_ASSERT(nslots > 0);
+ nslots--;
+ }
+
+ /*
+ * Functions have a larger alloc kind than AllocKind::OBJECT to reserve
+ * space for the extra fields in JSFunction, but have no fixed slots.
+ */
+ if (clasp == FunctionClassPtr) {
+ nslots = 0;
+ }
+
+ return nslots;
+}
+
+static inline size_t GetGCKindBytes(AllocKind thingKind) {
+ return sizeof(JSObject_Slots0) + GetGCKindSlots(thingKind) * sizeof(Value);
+}
+
+static inline bool CanChangeToBackgroundAllocKind(AllocKind kind,
+ const JSClass* clasp) {
+ // If a foreground alloc kind is specified but the class has no finalizer or a
+ // finalizer that is safe to call on a different thread, we can change the
+ // alloc kind to one which is finalized on a background thread.
+ //
+ // For example, AllocKind::OBJECT0 calls the finalizer on the main thread, and
+ // AllocKind::OBJECT0_BACKGROUND calls the finalizer on the a helper thread.
+
+ MOZ_ASSERT(IsObjectAllocKind(kind));
+
+ if (IsBackgroundFinalized(kind)) {
+ return false; // This kind is already a background finalized kind.
+ }
+
+ return !clasp->hasFinalize() || (clasp->flags & JSCLASS_BACKGROUND_FINALIZE);
+}
+
+static inline AllocKind ForegroundToBackgroundAllocKind(AllocKind fgKind) {
+ MOZ_ASSERT(IsObjectAllocKind(fgKind));
+ MOZ_ASSERT(IsForegroundFinalized(fgKind));
+
+ // For objects, each background alloc kind is defined just after the
+ // corresponding foreground alloc kind so we can convert between them by
+ // incrementing or decrementing as appropriate.
+ AllocKind bgKind = AllocKind(size_t(fgKind) + 1);
+
+ MOZ_ASSERT(IsObjectAllocKind(bgKind));
+ MOZ_ASSERT(IsBackgroundFinalized(bgKind));
+ MOZ_ASSERT(GetGCKindSlots(bgKind) == GetGCKindSlots(fgKind));
+
+ return bgKind;
+}
+
+} // namespace gc
+} // namespace js
+
+#endif // gc_ObjectKind_inl_h
diff --git a/js/src/gc/ParallelWork.h b/js/src/gc/ParallelWork.h
new file mode 100644
index 0000000000..9de9561c35
--- /dev/null
+++ b/js/src/gc/ParallelWork.h
@@ -0,0 +1,139 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef gc_ParallelWork_h
+#define gc_ParallelWork_h
+
+#include "mozilla/Maybe.h"
+
+#include "gc/GC.h"
+#include "gc/GCParallelTask.h"
+#include "gc/GCRuntime.h"
+#include "js/SliceBudget.h"
+#include "vm/HelperThreads.h"
+
+namespace js {
+
+namespace gcstats {
+enum class PhaseKind : uint8_t;
+}
+
+namespace gc {
+
+template <typename WorkItem>
+using ParallelWorkFunc = size_t (*)(GCRuntime*, const WorkItem&);
+
+// A GCParallelTask task that executes WorkItems from a WorkItemIterator.
+//
+// The WorkItemIterator class must supply done(), next() and get() methods. The
+// get() method must return WorkItems objects.
+template <typename WorkItem, typename WorkItemIterator>
+class ParallelWorker : public GCParallelTask {
+ public:
+ using WorkFunc = ParallelWorkFunc<WorkItem>;
+
+ ParallelWorker(GCRuntime* gc, WorkFunc func, WorkItemIterator& work,
+ const SliceBudget& budget, AutoLockHelperThreadState& lock)
+ : GCParallelTask(gc),
+ func_(func),
+ work_(work),
+ budget_(budget),
+ item_(work.get()) {
+ // Consume a work item on creation so that we can stop creating workers if
+ // the number of workers exceeds the number of work items.
+ work.next();
+ }
+
+ void run(AutoLockHelperThreadState& lock) {
+ AutoUnlockHelperThreadState unlock(lock);
+
+ // These checks assert when run in parallel.
+ AutoDisableProxyCheck noProxyCheck;
+
+ for (;;) {
+ size_t steps = func_(gc, item_);
+ budget_.step(steps);
+ if (budget_.isOverBudget()) {
+ break;
+ }
+
+ AutoLockHelperThreadState lock;
+ if (work().done()) {
+ break;
+ }
+
+ item_ = work().get();
+ work().next();
+ }
+ }
+
+ private:
+ WorkItemIterator& work() { return work_.ref(); }
+
+ // A function to execute work items on the helper thread.
+ WorkFunc func_;
+
+ // An iterator which produces work items to execute.
+ HelperThreadLockData<WorkItemIterator&> work_;
+
+ // The budget that determines how long to run for.
+ SliceBudget budget_;
+
+ // The next work item to process.
+ WorkItem item_;
+};
+
+static constexpr size_t MaxParallelWorkers = 8;
+
+extern size_t ParallelWorkerCount();
+
+// An RAII class that starts a number of ParallelWorkers and waits for them to
+// finish.
+template <typename WorkItem, typename WorkItemIterator>
+class MOZ_RAII AutoRunParallelWork {
+ public:
+ using Worker = ParallelWorker<WorkItem, WorkItemIterator>;
+ using WorkFunc = ParallelWorkFunc<WorkItem>;
+
+ AutoRunParallelWork(GCRuntime* gc, WorkFunc func,
+ gcstats::PhaseKind phaseKind, WorkItemIterator& work,
+ const SliceBudget& budget,
+ AutoLockHelperThreadState& lock)
+ : gc(gc), phaseKind(phaseKind), lock(lock), tasksStarted(0) {
+ size_t workerCount = gc->parallelWorkerCount();
+ MOZ_ASSERT(workerCount <= MaxParallelWorkers);
+ MOZ_ASSERT_IF(workerCount == 0, work.done());
+
+ for (size_t i = 0; i < workerCount && !work.done(); i++) {
+ tasks[i].emplace(gc, func, work, budget, lock);
+ gc->startTask(*tasks[i], phaseKind, lock);
+ tasksStarted++;
+ }
+ }
+
+ ~AutoRunParallelWork() {
+ gHelperThreadLock.assertOwnedByCurrentThread();
+
+ for (size_t i = 0; i < tasksStarted; i++) {
+ gc->joinTask(*tasks[i], phaseKind, lock);
+ }
+ for (size_t i = tasksStarted; i < MaxParallelWorkers; i++) {
+ MOZ_ASSERT(tasks[i].isNothing());
+ }
+ }
+
+ private:
+ GCRuntime* gc;
+ gcstats::PhaseKind phaseKind;
+ AutoLockHelperThreadState& lock;
+ size_t tasksStarted;
+ mozilla::Maybe<Worker> tasks[MaxParallelWorkers];
+};
+
+} /* namespace gc */
+} /* namespace js */
+
+#endif /* gc_ParallelWork_h */
diff --git a/js/src/gc/Policy.h b/js/src/gc/Policy.h
new file mode 100644
index 0000000000..d30e4df379
--- /dev/null
+++ b/js/src/gc/Policy.h
@@ -0,0 +1,99 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* JS Garbage Collector. */
+
+#ifndef gc_Policy_h
+#define gc_Policy_h
+
+#include <type_traits>
+
+#include "gc/Barrier.h"
+#include "gc/Marking.h"
+#include "js/GCPolicyAPI.h"
+
+namespace js {
+
+// Define the GCPolicy for all internal pointers.
+template <typename T>
+struct InternalGCPointerPolicy : public JS::GCPointerPolicy<T> {
+ using Type = std::remove_pointer_t<T>;
+
+#define IS_BASE_OF_OR(_1, BaseType, _2, _3) std::is_base_of_v<BaseType, Type> ||
+ static_assert(
+ JS_FOR_EACH_TRACEKIND(IS_BASE_OF_OR) false,
+ "InternalGCPointerPolicy must only be used for GC thing pointers");
+#undef IS_BASE_OF_OR
+
+ static void trace(JSTracer* trc, T* vp, const char* name) {
+ // It's not safe to trace unbarriered pointers except as part of root
+ // marking. If you get an assertion here you probably need to add a barrier,
+ // e.g. HeapPtr<T>.
+ TraceNullableRoot(trc, vp, name);
+ }
+};
+
+} // namespace js
+
+namespace JS {
+
+// Internally, all pointer types are treated as pointers to GC things by
+// default.
+template <typename T>
+struct GCPolicy<T*> : public js::InternalGCPointerPolicy<T*> {};
+template <typename T>
+struct GCPolicy<T* const> : public js::InternalGCPointerPolicy<T* const> {};
+
+template <typename T>
+struct GCPolicy<js::HeapPtr<T>> {
+ static void trace(JSTracer* trc, js::HeapPtr<T>* thingp, const char* name) {
+ js::TraceNullableEdge(trc, thingp, name);
+ }
+ static bool needsSweep(js::HeapPtr<T>* thingp) {
+ return js::gc::IsAboutToBeFinalized(thingp);
+ }
+ static bool traceWeak(JSTracer* trc, js::HeapPtr<T>* thingp) {
+ return js::TraceWeakEdge(trc, thingp, "traceWeak");
+ }
+};
+
+template <typename T>
+struct GCPolicy<js::PreBarriered<T>> {
+ static void trace(JSTracer* trc, js::PreBarriered<T>* thingp,
+ const char* name) {
+ js::TraceNullableEdge(trc, thingp, name);
+ }
+ static bool needsSweep(js::PreBarriered<T>* thingp) {
+ return js::gc::IsAboutToBeFinalized(thingp);
+ }
+};
+
+template <typename T>
+struct GCPolicy<js::WeakHeapPtr<T>> {
+ static void trace(JSTracer* trc, js::WeakHeapPtr<T>* thingp,
+ const char* name) {
+ js::TraceEdge(trc, thingp, name);
+ }
+ static bool needsSweep(js::WeakHeapPtr<T>* thingp) {
+ return js::gc::IsAboutToBeFinalized(thingp);
+ }
+ static bool traceWeak(JSTracer* trc, js::WeakHeapPtr<T>* thingp) {
+ return js::TraceWeakEdge(trc, thingp, "traceWeak");
+ }
+};
+
+template <>
+struct GCPolicy<JS::GCCellPtr> {
+ static void trace(JSTracer* trc, JS::GCCellPtr* thingp, const char* name) {
+ // It's not safe to trace unbarriered pointers except as part of root
+ // marking.
+ js::TraceGCCellPtrRoot(trc, thingp, name);
+ }
+};
+
+} // namespace JS
+
+#endif // gc_Policy_h
diff --git a/js/src/gc/PrivateIterators-inl.h b/js/src/gc/PrivateIterators-inl.h
new file mode 100644
index 0000000000..8a3ea3de07
--- /dev/null
+++ b/js/src/gc/PrivateIterators-inl.h
@@ -0,0 +1,167 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * GC-internal iterators for various data structures.
+ */
+
+#ifndef gc_PrivateIterators_inl_h
+#define gc_PrivateIterators_inl_h
+
+#include "gc/PublicIterators.h"
+
+#include "gc/GC-inl.h"
+
+namespace js {
+namespace gc {
+
+class ArenaCellIterUnderGC : public ArenaCellIter {
+ public:
+ explicit ArenaCellIterUnderGC(Arena* arena) : ArenaCellIter(arena) {
+ MOZ_ASSERT(CurrentThreadIsPerformingGC());
+ }
+};
+
+class ArenaCellIterUnderFinalize : public ArenaCellIter {
+ public:
+ explicit ArenaCellIterUnderFinalize(Arena* arena) : ArenaCellIter(arena) {
+ MOZ_ASSERT(CurrentThreadIsGCFinalizing());
+ }
+};
+
+class GrayObjectIter : public ZoneAllCellIter<js::gc::TenuredCell> {
+ public:
+ explicit GrayObjectIter(JS::Zone* zone, AllocKind kind)
+ : ZoneAllCellIter<js::gc::TenuredCell>() {
+ initForTenuredIteration(zone, kind);
+ }
+
+ JSObject* get() const {
+ return ZoneAllCellIter<js::gc::TenuredCell>::get<JSObject>();
+ }
+ operator JSObject*() const { return get(); }
+ JSObject* operator->() const { return get(); }
+};
+
+class GCZonesIter {
+ AllZonesIter zone;
+
+ public:
+ explicit GCZonesIter(GCRuntime* gc) : zone(gc) {
+ MOZ_ASSERT(JS::RuntimeHeapIsBusy());
+ MOZ_ASSERT_IF(gc->atomsZone->wasGCStarted(),
+ !gc->rt->hasHelperThreadZones());
+
+ if (!done() && !zone->wasGCStarted()) {
+ next();
+ }
+ }
+ explicit GCZonesIter(JSRuntime* rt) : GCZonesIter(&rt->gc) {}
+
+ bool done() const { return zone.done(); }
+
+ void next() {
+ MOZ_ASSERT(!done());
+ do {
+ zone.next();
+ } while (!zone.done() && !zone->wasGCStarted());
+ }
+
+ JS::Zone* get() const {
+ MOZ_ASSERT(!done());
+ return zone;
+ }
+
+ operator JS::Zone*() const { return get(); }
+ JS::Zone* operator->() const { return get(); }
+};
+
+using GCCompartmentsIter =
+ CompartmentsOrRealmsIterT<GCZonesIter, CompartmentsInZoneIter>;
+using GCRealmsIter = CompartmentsOrRealmsIterT<GCZonesIter, RealmsInZoneIter>;
+
+/* Iterates over all zones in the current sweep group. */
+class SweepGroupZonesIter {
+ JS::Zone* current;
+
+ public:
+ explicit SweepGroupZonesIter(GCRuntime* gc)
+ : current(gc->getCurrentSweepGroup()) {
+ MOZ_ASSERT(CurrentThreadIsPerformingGC());
+ }
+ explicit SweepGroupZonesIter(JSRuntime* rt) : SweepGroupZonesIter(&rt->gc) {}
+
+ bool done() const { return !current; }
+
+ void next() {
+ MOZ_ASSERT(!done());
+ current = current->nextNodeInGroup();
+ }
+
+ JS::Zone* get() const {
+ MOZ_ASSERT(!done());
+ return current;
+ }
+
+ operator JS::Zone*() const { return get(); }
+ JS::Zone* operator->() const { return get(); }
+};
+
+using SweepGroupCompartmentsIter =
+ CompartmentsOrRealmsIterT<SweepGroupZonesIter, CompartmentsInZoneIter>;
+using SweepGroupRealmsIter =
+ CompartmentsOrRealmsIterT<SweepGroupZonesIter, RealmsInZoneIter>;
+
+// Iterate the free cells in an arena. See also ArenaCellIter which iterates
+// the allocated cells.
+class ArenaFreeCellIter {
+ Arena* arena;
+ size_t thingSize;
+ FreeSpan span;
+ uint_fast16_t thing;
+
+ public:
+ explicit ArenaFreeCellIter(Arena* arena)
+ : arena(arena),
+ thingSize(arena->getThingSize()),
+ span(*arena->getFirstFreeSpan()),
+ thing(span.first) {
+ MOZ_ASSERT(arena);
+ MOZ_ASSERT(thing < ArenaSize);
+ }
+
+ bool done() const {
+ MOZ_ASSERT(thing < ArenaSize);
+ return !thing;
+ }
+
+ TenuredCell* get() const {
+ MOZ_ASSERT(!done());
+ return reinterpret_cast<TenuredCell*>(uintptr_t(arena) + thing);
+ }
+
+ void next() {
+ MOZ_ASSERT(!done());
+ MOZ_ASSERT(thing >= span.first && thing <= span.last);
+
+ if (thing == span.last) {
+ span = *span.nextSpan(arena);
+ thing = span.first;
+ } else {
+ thing += thingSize;
+ }
+
+ MOZ_ASSERT(thing < ArenaSize);
+ }
+
+ operator TenuredCell*() const { return get(); }
+ TenuredCell* operator->() const { return get(); }
+};
+
+} // namespace gc
+} // namespace js
+
+#endif // gc_PrivateIterators_inl_h
diff --git a/js/src/gc/PublicIterators.cpp b/js/src/gc/PublicIterators.cpp
new file mode 100644
index 0000000000..78919a21a3
--- /dev/null
+++ b/js/src/gc/PublicIterators.cpp
@@ -0,0 +1,249 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/DebugOnly.h"
+
+#include "gc/GCInternals.h"
+#include "gc/GCLock.h"
+#include "js/HashTable.h"
+#include "vm/Realm.h"
+#include "vm/Runtime.h"
+
+#include "gc/PrivateIterators-inl.h"
+#include "vm/JSContext-inl.h"
+
+using namespace js;
+using namespace js::gc;
+
+static void IterateRealmsArenasCellsUnbarriered(
+ JSContext* cx, Zone* zone, void* data,
+ JS::IterateRealmCallback realmCallback, IterateArenaCallback arenaCallback,
+ IterateCellCallback cellCallback, const JS::AutoRequireNoGC& nogc) {
+ {
+ Rooted<Realm*> realm(cx);
+ for (RealmsInZoneIter r(zone); !r.done(); r.next()) {
+ realm = r;
+ (*realmCallback)(cx, data, realm, nogc);
+ }
+ }
+
+ for (auto thingKind : AllAllocKinds()) {
+ JS::TraceKind traceKind = MapAllocToTraceKind(thingKind);
+ size_t thingSize = Arena::thingSize(thingKind);
+
+ for (ArenaIter aiter(zone, thingKind); !aiter.done(); aiter.next()) {
+ Arena* arena = aiter.get();
+ (*arenaCallback)(cx->runtime(), data, arena, traceKind, thingSize, nogc);
+ for (ArenaCellIter cell(arena); !cell.done(); cell.next()) {
+ (*cellCallback)(cx->runtime(), data, JS::GCCellPtr(cell, traceKind),
+ thingSize, nogc);
+ }
+ }
+ }
+}
+
+void js::IterateHeapUnbarriered(JSContext* cx, void* data,
+ IterateZoneCallback zoneCallback,
+ JS::IterateRealmCallback realmCallback,
+ IterateArenaCallback arenaCallback,
+ IterateCellCallback cellCallback) {
+ AutoPrepareForTracing prep(cx);
+ JS::AutoSuppressGCAnalysis nogc(cx);
+
+ for (ZonesIter zone(cx->runtime(), WithAtoms); !zone.done(); zone.next()) {
+ (*zoneCallback)(cx->runtime(), data, zone, nogc);
+ IterateRealmsArenasCellsUnbarriered(cx, zone, data, realmCallback,
+ arenaCallback, cellCallback, nogc);
+ }
+}
+
+void js::IterateHeapUnbarrieredForZone(JSContext* cx, Zone* zone, void* data,
+ IterateZoneCallback zoneCallback,
+ JS::IterateRealmCallback realmCallback,
+ IterateArenaCallback arenaCallback,
+ IterateCellCallback cellCallback) {
+ AutoPrepareForTracing prep(cx);
+ JS::AutoSuppressGCAnalysis nogc(cx);
+
+ (*zoneCallback)(cx->runtime(), data, zone, nogc);
+ IterateRealmsArenasCellsUnbarriered(cx, zone, data, realmCallback,
+ arenaCallback, cellCallback, nogc);
+}
+
+void js::IterateChunks(JSContext* cx, void* data,
+ IterateChunkCallback chunkCallback) {
+ AutoPrepareForTracing prep(cx);
+ AutoLockGC lock(cx->runtime());
+ JS::AutoSuppressGCAnalysis nogc(cx);
+
+ for (auto chunk = cx->runtime()->gc.allNonEmptyChunks(lock); !chunk.done();
+ chunk.next()) {
+ chunkCallback(cx->runtime(), data, chunk, nogc);
+ }
+}
+
+static void TraverseInnerLazyScriptsForLazyScript(
+ JSContext* cx, void* data, BaseScript* enclosingScript,
+ IterateScriptCallback lazyScriptCallback, const JS::AutoRequireNoGC& nogc) {
+ for (JS::GCCellPtr gcThing : enclosingScript->gcthings()) {
+ if (!gcThing.is<JSObject>()) {
+ continue;
+ }
+ JSObject* obj = &gcThing.as<JSObject>();
+
+ MOZ_ASSERT(obj->is<JSFunction>(),
+ "All objects in lazy scripts should be functions");
+ JSFunction* fun = &obj->as<JSFunction>();
+
+ if (!fun->hasBaseScript() || fun->hasBytecode()) {
+ continue;
+ }
+
+ BaseScript* script = fun->baseScript();
+ MOZ_ASSERT_IF(script->hasEnclosingScript(),
+ script->enclosingScript() == enclosingScript);
+
+ lazyScriptCallback(cx->runtime(), data, script, nogc);
+
+ TraverseInnerLazyScriptsForLazyScript(cx, data, script, lazyScriptCallback,
+ nogc);
+ }
+}
+
+static inline void DoScriptCallback(JSContext* cx, void* data,
+ BaseScript* script,
+ IterateScriptCallback callback,
+ const JS::AutoRequireNoGC& nogc) {
+ // Exclude any scripts that may be the result of a failed compile. Check that
+ // script either has bytecode or is ready to delazify.
+ //
+ // This excludes lazy scripts that do not have an enclosing scope because we
+ // cannot distinguish a failed compile fragment from a lazy script with a lazy
+ // parent.
+ if (!script->hasBytecode() && !script->isReadyForDelazification()) {
+ return;
+ }
+
+ // Invoke callback.
+ callback(cx->runtime(), data, script, nogc);
+
+ // The check above excluded lazy scripts with lazy parents, so explicitly
+ // visit inner scripts now if we are lazy with a successfully compiled parent.
+ if (!script->hasBytecode()) {
+ TraverseInnerLazyScriptsForLazyScript(cx, data, script, callback, nogc);
+ }
+}
+
+void js::IterateScripts(JSContext* cx, Realm* realm, void* data,
+ IterateScriptCallback scriptCallback) {
+ MOZ_ASSERT(!cx->suppressGC);
+ AutoEmptyNurseryAndPrepareForTracing prep(cx);
+ JS::AutoSuppressGCAnalysis nogc;
+
+ if (realm) {
+ Zone* zone = realm->zone();
+ for (auto iter = zone->cellIter<BaseScript>(prep); !iter.done();
+ iter.next()) {
+ if (iter->realm() != realm) {
+ continue;
+ }
+ DoScriptCallback(cx, data, iter.get(), scriptCallback, nogc);
+ }
+ } else {
+ for (ZonesIter zone(cx->runtime(), SkipAtoms); !zone.done(); zone.next()) {
+ for (auto iter = zone->cellIter<BaseScript>(prep); !iter.done();
+ iter.next()) {
+ DoScriptCallback(cx, data, iter.get(), scriptCallback, nogc);
+ }
+ }
+ }
+}
+
+void js::IterateGrayObjects(Zone* zone, IterateGCThingCallback cellCallback,
+ void* data) {
+ MOZ_ASSERT(!JS::RuntimeHeapIsBusy());
+
+ JSContext* cx = TlsContext.get();
+ AutoPrepareForTracing prep(cx);
+ JS::AutoSuppressGCAnalysis nogc(cx);
+
+ for (auto kind : ObjectAllocKinds()) {
+ for (GrayObjectIter obj(zone, kind); !obj.done(); obj.next()) {
+ if (obj->asTenured().isMarkedGray()) {
+ cellCallback(data, JS::GCCellPtr(obj.get()), nogc);
+ }
+ }
+ }
+}
+
+JS_PUBLIC_API void JS_IterateCompartments(
+ JSContext* cx, void* data,
+ JSIterateCompartmentCallback compartmentCallback) {
+ AutoTraceSession session(cx->runtime());
+
+ for (CompartmentsIter c(cx->runtime()); !c.done(); c.next()) {
+ if ((*compartmentCallback)(cx, data, c) ==
+ JS::CompartmentIterResult::Stop) {
+ break;
+ }
+ }
+}
+
+JS_PUBLIC_API void JS_IterateCompartmentsInZone(
+ JSContext* cx, JS::Zone* zone, void* data,
+ JSIterateCompartmentCallback compartmentCallback) {
+ AutoTraceSession session(cx->runtime());
+
+ for (CompartmentsInZoneIter c(zone); !c.done(); c.next()) {
+ if ((*compartmentCallback)(cx, data, c) ==
+ JS::CompartmentIterResult::Stop) {
+ break;
+ }
+ }
+}
+
+JS_PUBLIC_API void JS::IterateRealms(JSContext* cx, void* data,
+ JS::IterateRealmCallback realmCallback) {
+ AutoTraceSession session(cx->runtime());
+ JS::AutoSuppressGCAnalysis nogc(cx);
+
+ Rooted<Realm*> realm(cx);
+ for (RealmsIter r(cx->runtime()); !r.done(); r.next()) {
+ realm = r;
+ (*realmCallback)(cx, data, realm, nogc);
+ }
+}
+
+JS_PUBLIC_API void JS::IterateRealmsWithPrincipals(
+ JSContext* cx, JSPrincipals* principals, void* data,
+ JS::IterateRealmCallback realmCallback) {
+ MOZ_ASSERT(principals);
+
+ AutoTraceSession session(cx->runtime());
+ JS::AutoSuppressGCAnalysis nogc(cx);
+
+ Rooted<Realm*> realm(cx);
+ for (RealmsIter r(cx->runtime()); !r.done(); r.next()) {
+ if (r->principals() != principals) {
+ continue;
+ }
+ realm = r;
+ (*realmCallback)(cx, data, realm, nogc);
+ }
+}
+
+JS_PUBLIC_API void JS::IterateRealmsInCompartment(
+ JSContext* cx, JS::Compartment* compartment, void* data,
+ JS::IterateRealmCallback realmCallback) {
+ AutoTraceSession session(cx->runtime());
+ JS::AutoSuppressGCAnalysis nogc(cx);
+
+ Rooted<Realm*> realm(cx);
+ for (RealmsInCompartmentIter r(compartment); !r.done(); r.next()) {
+ realm = r;
+ (*realmCallback)(cx, data, realm, nogc);
+ }
+}
diff --git a/js/src/gc/PublicIterators.h b/js/src/gc/PublicIterators.h
new file mode 100644
index 0000000000..3df7926438
--- /dev/null
+++ b/js/src/gc/PublicIterators.h
@@ -0,0 +1,197 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * Iterators for various data structures.
+ */
+
+#ifndef gc_PublicIterators_h
+#define gc_PublicIterators_h
+
+#include "mozilla/Maybe.h"
+
+#include "jstypes.h"
+#include "gc/GCRuntime.h"
+#include "gc/IteratorUtils.h"
+#include "gc/Zone.h"
+#include "vm/Compartment.h"
+#include "vm/Runtime.h"
+
+struct JSRuntime;
+
+namespace JS {
+class JS_PUBLIC_API Realm;
+}
+
+namespace js {
+
+// Accessing the atoms zone can be dangerous because helper threads may be
+// accessing it concurrently to the main thread, so it's better to skip the
+// atoms zone when iterating over zones. If you need to iterate over the atoms
+// zone, consider using AutoLockAllAtoms.
+enum ZoneSelector { WithAtoms, SkipAtoms };
+
+// Iterate over all zones in the runtime apart from the atoms zone and those
+// which may be in use by parse threads.
+class NonAtomZonesIter {
+ gc::AutoEnterIteration iterMarker;
+ JS::Zone** it;
+ JS::Zone** end;
+
+ public:
+ explicit NonAtomZonesIter(gc::GCRuntime* gc)
+ : iterMarker(gc), it(gc->zones().begin()), end(gc->zones().end()) {
+ skipHelperThreadZones();
+ }
+ explicit NonAtomZonesIter(JSRuntime* rt) : NonAtomZonesIter(&rt->gc) {}
+
+ bool done() const { return it == end; }
+
+ void next() {
+ MOZ_ASSERT(!done());
+ it++;
+ skipHelperThreadZones();
+ }
+
+ void skipHelperThreadZones() {
+ while (!done() && get()->usedByHelperThread()) {
+ it++;
+ }
+ }
+
+ JS::Zone* get() const {
+ MOZ_ASSERT(!done());
+ return *it;
+ }
+
+ operator JS::Zone*() const { return get(); }
+ JS::Zone* operator->() const { return get(); }
+};
+
+// Iterate over all zones in the runtime, except those which may be in use by
+// parse threads. May or may not include the atoms zone.
+class ZonesIter {
+ JS::Zone* atomsZone;
+ NonAtomZonesIter otherZones;
+
+ public:
+ ZonesIter(gc::GCRuntime* gc, ZoneSelector selector)
+ : atomsZone(selector == WithAtoms ? gc->atomsZone.ref() : nullptr),
+ otherZones(gc) {}
+ ZonesIter(JSRuntime* rt, ZoneSelector selector)
+ : ZonesIter(&rt->gc, selector) {}
+
+ bool done() const { return !atomsZone && otherZones.done(); }
+
+ JS::Zone* get() const {
+ MOZ_ASSERT(!done());
+ return atomsZone ? atomsZone : otherZones.get();
+ }
+
+ void next() {
+ MOZ_ASSERT(!done());
+ if (atomsZone) {
+ atomsZone = nullptr;
+ return;
+ }
+
+ otherZones.next();
+ }
+
+ operator JS::Zone*() const { return get(); }
+ JS::Zone* operator->() const { return get(); }
+};
+
+// Iterate over all zones in the runtime, except those which may be in use by
+// parse threads.
+class AllZonesIter : public ZonesIter {
+ public:
+ explicit AllZonesIter(gc::GCRuntime* gc) : ZonesIter(gc, WithAtoms) {}
+ explicit AllZonesIter(JSRuntime* rt) : AllZonesIter(&rt->gc) {}
+};
+
+struct CompartmentsInZoneIter {
+ explicit CompartmentsInZoneIter(JS::Zone* zone) : zone(zone) {
+ it = zone->compartments().begin();
+ }
+
+ bool done() const {
+ MOZ_ASSERT(it);
+ return it < zone->compartments().begin() ||
+ it >= zone->compartments().end();
+ }
+ void next() {
+ MOZ_ASSERT(!done());
+ it++;
+ }
+
+ JS::Compartment* get() const {
+ MOZ_ASSERT(it);
+ return *it;
+ }
+
+ operator JS::Compartment*() const { return get(); }
+ JS::Compartment* operator->() const { return get(); }
+
+ private:
+ JS::Zone* zone;
+ JS::Compartment** it;
+};
+
+class RealmsInCompartmentIter {
+ JS::Compartment* comp;
+ JS::Realm** it;
+
+ public:
+ explicit RealmsInCompartmentIter(JS::Compartment* comp) : comp(comp) {
+ it = comp->realms().begin();
+ MOZ_ASSERT(!done(), "Compartments must have at least one realm");
+ }
+
+ bool done() const {
+ MOZ_ASSERT(it);
+ return it < comp->realms().begin() || it >= comp->realms().end();
+ }
+ void next() {
+ MOZ_ASSERT(!done());
+ it++;
+ }
+
+ JS::Realm* get() const {
+ MOZ_ASSERT(!done());
+ return *it;
+ }
+
+ operator JS::Realm*() const { return get(); }
+ JS::Realm* operator->() const { return get(); }
+};
+
+using RealmsInZoneIter =
+ NestedIterator<CompartmentsInZoneIter, RealmsInCompartmentIter>;
+
+// This iterator iterates over all the compartments or realms in a given set of
+// zones. The set of zones is determined by iterating ZoneIterT. The set of
+// compartments or realms is determined by InnerIterT.
+template <class ZonesIterT, class InnerIterT>
+class CompartmentsOrRealmsIterT
+ : public NestedIterator<ZonesIterT, InnerIterT> {
+ gc::AutoEnterIteration iterMarker;
+
+ public:
+ explicit CompartmentsOrRealmsIterT(gc::GCRuntime* gc)
+ : NestedIterator<ZonesIterT, InnerIterT>(gc), iterMarker(gc) {}
+ explicit CompartmentsOrRealmsIterT(JSRuntime* rt)
+ : CompartmentsOrRealmsIterT(&rt->gc) {}
+};
+
+using CompartmentsIter =
+ CompartmentsOrRealmsIterT<NonAtomZonesIter, CompartmentsInZoneIter>;
+using RealmsIter =
+ CompartmentsOrRealmsIterT<NonAtomZonesIter, RealmsInZoneIter>;
+
+} // namespace js
+
+#endif // gc_PublicIterators_h
diff --git a/js/src/gc/RelocationOverlay.h b/js/src/gc/RelocationOverlay.h
new file mode 100644
index 0000000000..28be11a788
--- /dev/null
+++ b/js/src/gc/RelocationOverlay.h
@@ -0,0 +1,66 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * GC-internal definition of relocation overlay used while moving cells.
+ */
+
+#ifndef gc_RelocationOverlay_h
+#define gc_RelocationOverlay_h
+
+#include "mozilla/Assertions.h"
+
+#include <stdint.h>
+
+#include "gc/Cell.h"
+
+namespace js {
+namespace gc {
+
+/*
+ * This structure overlays a Cell that has been moved and provides a way to find
+ * its new location. It's used during generational and compacting GC.
+ */
+class RelocationOverlay : public Cell {
+ public:
+ /* The location the cell has been moved to, stored in the cell header. */
+ Cell* forwardingAddress() const {
+ MOZ_ASSERT(isForwarded());
+ return reinterpret_cast<Cell*>(header_ & ~RESERVED_MASK);
+ }
+
+ protected:
+ /* A list entry to track all relocated things. */
+ RelocationOverlay* next_;
+
+ explicit RelocationOverlay(Cell* dst);
+
+ public:
+ static const RelocationOverlay* fromCell(const Cell* cell) {
+ return static_cast<const RelocationOverlay*>(cell);
+ }
+
+ static RelocationOverlay* fromCell(Cell* cell) {
+ return static_cast<RelocationOverlay*>(cell);
+ }
+
+ static RelocationOverlay* forwardCell(Cell* src, Cell* dst);
+
+ RelocationOverlay*& nextRef() {
+ MOZ_ASSERT(isForwarded());
+ return next_;
+ }
+
+ RelocationOverlay* next() const {
+ MOZ_ASSERT(isForwarded());
+ return next_;
+ }
+};
+
+} // namespace gc
+} // namespace js
+
+#endif /* gc_RelocationOverlay_h */
diff --git a/js/src/gc/RootMarking.cpp b/js/src/gc/RootMarking.cpp
new file mode 100644
index 0000000000..9403ec4c82
--- /dev/null
+++ b/js/src/gc/RootMarking.cpp
@@ -0,0 +1,649 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifdef MOZ_VALGRIND
+# include <valgrind/memcheck.h>
+#endif
+
+#include "jstypes.h"
+
+#include "builtin/MapObject.h"
+#include "debugger/DebugAPI.h"
+#include "frontend/BytecodeCompiler.h"
+#include "frontend/Parser.h"
+#include "gc/ClearEdgesTracer.h"
+#include "gc/GCInternals.h"
+#include "gc/Marking.h"
+#include "jit/JitFrames.h"
+#include "jit/JitRuntime.h"
+#include "js/HashTable.h"
+#include "js/ValueArray.h"
+#include "vm/HelperThreadState.h"
+#include "vm/JSContext.h"
+#include "vm/JSONParser.h"
+
+#include "gc/Nursery-inl.h"
+#include "gc/PrivateIterators-inl.h"
+#include "vm/JSObject-inl.h"
+
+using namespace js;
+using namespace js::gc;
+
+using mozilla::LinkedList;
+
+using JS::AutoGCRooter;
+
+using RootRange = RootedValueMap::Range;
+using RootEntry = RootedValueMap::Entry;
+using RootEnum = RootedValueMap::Enum;
+
+// For more detail see JS::Rooted::root and js::RootedTraceable.
+//
+// The JS::RootKind::Traceable list contains a bunch of totally disparate types,
+// but to refer to this list we need /something/ in the type field. We use the
+// following type as a compatible stand-in. No actual methods from
+// ConcreteTraceable type are actually used at runtime.
+struct ConcreteTraceable {
+ ConcreteTraceable() = delete;
+ void trace(JSTracer*) = delete;
+};
+
+template <typename T>
+inline void RootedGCThingTraits<T>::trace(JSTracer* trc, T* thingp,
+ const char* name) {
+ TraceNullableRoot(trc, thingp, name);
+}
+
+template <typename T>
+inline void RootedTraceableTraits<T>::trace(JSTracer* trc,
+ VirtualTraceable* thingp,
+ const char* name) {
+ thingp->trace(trc, name);
+}
+
+template <typename T>
+inline void JS::Rooted<T>::trace(JSTracer* trc, const char* name) {
+ PtrTraits::trace(trc, &ptr, name);
+}
+
+template <typename T>
+inline void JS::PersistentRooted<T>::trace(JSTracer* trc, const char* name) {
+ PtrTraits::trace(trc, &ptr, name);
+}
+
+template <typename T>
+static inline void TraceExactStackRootList(
+ JSTracer* trc, JS::Rooted<JS::detail::RootListEntry*>* listHead,
+ const char* name) {
+ auto* typedList = reinterpret_cast<JS::Rooted<T>*>(listHead);
+ for (JS::Rooted<T>* root = typedList; root; root = root->previous()) {
+ root->trace(trc, name);
+ }
+}
+
+static inline void TraceStackRoots(JSTracer* trc,
+ JS::RootedListHeads& stackRoots) {
+#define TRACE_ROOTS(name, type, _, _1) \
+ TraceExactStackRootList<type*>(trc, stackRoots[JS::RootKind::name], \
+ "exact-" #name);
+ JS_FOR_EACH_TRACEKIND(TRACE_ROOTS)
+#undef TRACE_ROOTS
+ TraceExactStackRootList<jsid>(trc, stackRoots[JS::RootKind::Id], "exact-id");
+ TraceExactStackRootList<Value>(trc, stackRoots[JS::RootKind::Value],
+ "exact-value");
+
+ // RootedTraceable uses virtual dispatch.
+ JS::AutoSuppressGCAnalysis nogc;
+
+ TraceExactStackRootList<ConcreteTraceable>(
+ trc, stackRoots[JS::RootKind::Traceable], "Traceable");
+}
+
+void JS::RootingContext::traceStackRoots(JSTracer* trc) {
+ TraceStackRoots(trc, stackRoots_);
+}
+
+static void TraceExactStackRoots(JSContext* cx, JSTracer* trc) {
+ cx->traceStackRoots(trc);
+}
+
+template <typename T>
+static inline void TracePersistentRootedList(
+ JSTracer* trc,
+ LinkedList<PersistentRooted<JS::detail::RootListEntry*>>& list,
+ const char* name) {
+ auto& typedList = reinterpret_cast<LinkedList<PersistentRooted<T>>&>(list);
+ for (PersistentRooted<T>* root : typedList) {
+ root->trace(trc, name);
+ }
+}
+
+void JSRuntime::tracePersistentRoots(JSTracer* trc) {
+#define TRACE_ROOTS(name, type, _, _1) \
+ TracePersistentRootedList<type*>(trc, heapRoots.ref()[JS::RootKind::name], \
+ "persistent-" #name);
+ JS_FOR_EACH_TRACEKIND(TRACE_ROOTS)
+#undef TRACE_ROOTS
+ TracePersistentRootedList<jsid>(trc, heapRoots.ref()[JS::RootKind::Id],
+ "persistent-id");
+ TracePersistentRootedList<Value>(trc, heapRoots.ref()[JS::RootKind::Value],
+ "persistent-value");
+
+ // RootedTraceable uses virtual dispatch.
+ JS::AutoSuppressGCAnalysis nogc;
+
+ TracePersistentRootedList<ConcreteTraceable>(
+ trc, heapRoots.ref()[JS::RootKind::Traceable], "persistent-traceable");
+}
+
+static void TracePersistentRooted(JSRuntime* rt, JSTracer* trc) {
+ rt->tracePersistentRoots(trc);
+}
+
+template <typename T>
+static void FinishPersistentRootedChain(
+ LinkedList<PersistentRooted<JS::detail::RootListEntry*>>& listArg) {
+ auto& list = reinterpret_cast<LinkedList<PersistentRooted<T>>&>(listArg);
+ while (!list.isEmpty()) {
+ list.getFirst()->reset();
+ }
+}
+
+void JSRuntime::finishPersistentRoots() {
+#define FINISH_ROOT_LIST(name, type, _, _1) \
+ FinishPersistentRootedChain<type*>(heapRoots.ref()[JS::RootKind::name]);
+ JS_FOR_EACH_TRACEKIND(FINISH_ROOT_LIST)
+#undef FINISH_ROOT_LIST
+ FinishPersistentRootedChain<jsid>(heapRoots.ref()[JS::RootKind::Id]);
+ FinishPersistentRootedChain<Value>(heapRoots.ref()[JS::RootKind::Value]);
+
+ // Note that we do not finalize the Traceable list as we do not know how to
+ // safely clear members. We instead assert that none escape the RootLists.
+ // See the comment on RootLists::~RootLists for details.
+}
+
+JS_PUBLIC_API void js::TraceValueArray(JSTracer* trc, size_t length,
+ Value* elements) {
+ TraceRootRange(trc, length, elements, "JS::RootedValueArray");
+}
+
+void AutoGCRooter::trace(JSTracer* trc) {
+ switch (kind_) {
+ case Kind::Wrapper:
+ static_cast<AutoWrapperRooter*>(this)->trace(trc);
+ break;
+
+ case Kind::WrapperVector:
+ static_cast<AutoWrapperVector*>(this)->trace(trc);
+ break;
+
+ case Kind::Custom:
+ static_cast<JS::CustomAutoRooter*>(this)->trace(trc);
+ break;
+
+ default:
+ MOZ_CRASH("Bad AutoGCRooter::Kind");
+ break;
+ }
+}
+
+void AutoWrapperRooter::trace(JSTracer* trc) {
+ /*
+ * We need to use TraceManuallyBarrieredEdge here because we trace wrapper
+ * roots in every slice. This is because of some rule-breaking in
+ * RemapAllWrappersForObject; see comment there.
+ */
+ TraceManuallyBarrieredEdge(trc, &value.get(), "js::AutoWrapperRooter.value");
+}
+
+void AutoWrapperVector::trace(JSTracer* trc) {
+ /*
+ * We need to use TraceManuallyBarrieredEdge here because we trace wrapper
+ * roots in every slice. This is because of some rule-breaking in
+ * RemapAllWrappersForObject; see comment there.
+ */
+ for (WrapperValue& value : *this) {
+ TraceManuallyBarrieredEdge(trc, &value.get(),
+ "js::AutoWrapperVector.vector");
+ }
+}
+
+void JS::RootingContext::traceAllGCRooters(JSTracer* trc) {
+ for (AutoGCRooter* list : autoGCRooters_) {
+ traceGCRooterList(trc, list);
+ }
+}
+
+void JS::RootingContext::traceWrapperGCRooters(JSTracer* trc) {
+ traceGCRooterList(trc, autoGCRooters_[AutoGCRooter::Kind::Wrapper]);
+ traceGCRooterList(trc, autoGCRooters_[AutoGCRooter::Kind::WrapperVector]);
+}
+
+/* static */
+inline void JS::RootingContext::traceGCRooterList(JSTracer* trc,
+ AutoGCRooter* head) {
+ for (AutoGCRooter* rooter = head; rooter; rooter = rooter->down) {
+ rooter->trace(trc);
+ }
+}
+
+void StackShape::trace(JSTracer* trc) {
+ if (base) {
+ TraceRoot(trc, &base, "StackShape base");
+ }
+
+ TraceRoot(trc, (jsid*)&propid, "StackShape id");
+
+ if ((attrs & JSPROP_GETTER) && rawGetter) {
+ TraceRoot(trc, (JSObject**)&rawGetter, "StackShape getter");
+ }
+
+ if ((attrs & JSPROP_SETTER) && rawSetter) {
+ TraceRoot(trc, (JSObject**)&rawSetter, "StackShape setter");
+ }
+}
+
+void PropertyDescriptor::trace(JSTracer* trc) {
+ if (obj) {
+ TraceRoot(trc, &obj, "Descriptor::obj");
+ }
+ TraceRoot(trc, &value, "Descriptor::value");
+ if ((attrs & JSPROP_GETTER) && getter) {
+ JSObject* tmp = JS_FUNC_TO_DATA_PTR(JSObject*, getter);
+ TraceRoot(trc, &tmp, "Descriptor::get");
+ getter = JS_DATA_TO_FUNC_PTR(JSGetterOp, tmp);
+ }
+ if ((attrs & JSPROP_SETTER) && setter) {
+ JSObject* tmp = JS_FUNC_TO_DATA_PTR(JSObject*, setter);
+ TraceRoot(trc, &tmp, "Descriptor::set");
+ setter = JS_DATA_TO_FUNC_PTR(JSSetterOp, tmp);
+ }
+}
+
+void js::gc::GCRuntime::traceRuntimeForMajorGC(JSTracer* trc,
+ AutoGCSession& session) {
+ MOZ_ASSERT(!TlsContext.get()->suppressGC);
+
+ gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::MARK_ROOTS);
+
+ // We only need to trace atoms when we're marking; atoms are never moved by
+ // compacting GC.
+ if (atomsZone->isGCMarking()) {
+ traceRuntimeAtoms(trc, session.checkAtomsAccess());
+ }
+
+ {
+ // Trace incoming cross compartment edges from uncollected compartments,
+ // skipping gray edges which are traced later.
+ gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::MARK_CCWS);
+ Compartment::traceIncomingCrossCompartmentEdgesForZoneGC(
+ trc, Compartment::NonGrayEdges);
+ }
+
+ markFinalizationRegistryRoots(trc);
+
+ traceRuntimeCommon(trc, MarkRuntime);
+}
+
+void js::gc::GCRuntime::traceRuntimeForMinorGC(JSTracer* trc,
+ AutoGCSession& session) {
+ MOZ_ASSERT(!TlsContext.get()->suppressGC);
+
+ // Note that we *must* trace the runtime during the SHUTDOWN_GC's minor GC
+ // despite having called FinishRoots already. This is because FinishRoots
+ // does not clear the crossCompartmentWrapper map. It cannot do this
+ // because Proxy's trace for CrossCompartmentWrappers asserts presence in
+ // the map. And we can reach its trace function despite having finished the
+ // roots via the edges stored by the pre-barrier verifier when we finish
+ // the verifier for the last time.
+ gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::MARK_ROOTS);
+
+ traceRuntimeCommon(trc, TraceRuntime);
+}
+
+void js::TraceRuntime(JSTracer* trc) {
+ MOZ_ASSERT(!trc->isMarkingTracer());
+
+ JSRuntime* rt = trc->runtime();
+ AutoEmptyNurseryAndPrepareForTracing prep(rt->mainContextFromOwnThread());
+ gcstats::AutoPhase ap(rt->gc.stats(), gcstats::PhaseKind::TRACE_HEAP);
+ rt->gc.traceRuntime(trc, prep);
+}
+
+void js::TraceRuntimeWithoutEviction(JSTracer* trc) {
+ MOZ_ASSERT(!trc->isMarkingTracer());
+
+ JSRuntime* rt = trc->runtime();
+ AutoTraceSession session(rt);
+ gcstats::AutoPhase ap(rt->gc.stats(), gcstats::PhaseKind::TRACE_HEAP);
+ rt->gc.traceRuntime(trc, session);
+}
+
+void js::gc::GCRuntime::traceRuntime(JSTracer* trc, AutoTraceSession& session) {
+ MOZ_ASSERT(!rt->isBeingDestroyed());
+
+ gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::MARK_ROOTS);
+
+ traceRuntimeAtoms(trc, session);
+ traceRuntimeCommon(trc, TraceRuntime);
+}
+
+void js::gc::GCRuntime::traceRuntimeAtoms(JSTracer* trc,
+ const AutoAccessAtomsZone& access) {
+ gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::MARK_RUNTIME_DATA);
+ rt->tracePermanentAtoms(trc);
+ TraceAtoms(trc, access);
+ TraceWellKnownSymbols(trc);
+ jit::JitRuntime::TraceAtomZoneRoots(trc, access);
+}
+
+void js::gc::GCRuntime::traceRuntimeCommon(JSTracer* trc,
+ TraceOrMarkRuntime traceOrMark) {
+ {
+ gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::MARK_STACK);
+
+ JSContext* cx = rt->mainContextFromOwnThread();
+
+ // Trace active interpreter and JIT stack roots.
+ TraceInterpreterActivations(cx, trc);
+ jit::TraceJitActivations(cx, trc);
+
+ // Trace legacy C stack roots.
+ cx->traceAllGCRooters(trc);
+
+ // Trace C stack roots.
+ TraceExactStackRoots(cx, trc);
+
+ for (RootRange r = rootsHash.ref().all(); !r.empty(); r.popFront()) {
+ const RootEntry& entry = r.front();
+ TraceRoot(trc, entry.key(), entry.value());
+ }
+ }
+
+ // Trace runtime global roots.
+ TracePersistentRooted(rt, trc);
+
+ // Trace the self-hosting global compartment.
+ rt->traceSelfHostingGlobal(trc);
+
+#ifdef JS_HAS_INTL_API
+ // Trace the shared Intl data.
+ rt->traceSharedIntlData(trc);
+#endif
+
+ // Trace the JSContext.
+ rt->mainContextFromOwnThread()->trace(trc);
+
+ // Trace all realm roots, but not the realm itself; it is traced via the
+ // parent pointer if traceRoots actually traces anything.
+ for (RealmsIter r(rt); !r.done(); r.next()) {
+ r->traceRoots(trc, traceOrMark);
+ }
+
+ // Trace zone script-table roots. See comment in
+ // Zone::traceScriptTableRoots() for justification re: calling this only
+ // during major (non-nursery) collections.
+ if (!JS::RuntimeHeapIsMinorCollecting()) {
+ for (ZonesIter zone(this, ZoneSelector::SkipAtoms); !zone.done();
+ zone.next()) {
+ zone->traceScriptTableRoots(trc);
+ }
+ }
+
+ // Trace helper thread roots.
+ HelperThreadState().trace(trc);
+
+ // Trace Debugger.Frames that have live hooks, since dropping them would be
+ // observable. In effect, they are rooted by the stack frames.
+ DebugAPI::traceFramesWithLiveHooks(trc);
+
+ // Trace the embedding's black and gray roots.
+ if (!JS::RuntimeHeapIsMinorCollecting()) {
+ gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::MARK_EMBEDDING);
+
+ /*
+ * The embedding can register additional roots here.
+ *
+ * We don't need to trace these in a minor GC because all pointers into
+ * the nursery should be in the store buffer, and we want to avoid the
+ * time taken to trace all these roots.
+ */
+ traceEmbeddingBlackRoots(trc);
+
+ /* During GC, we don't trace gray roots at this stage. */
+ if (traceOrMark == TraceRuntime) {
+ traceEmbeddingGrayRoots(trc);
+ }
+ }
+
+ traceKeptObjects(trc);
+}
+
+void GCRuntime::traceEmbeddingBlackRoots(JSTracer* trc) {
+ // The analysis doesn't like the function pointer below.
+ JS::AutoSuppressGCAnalysis nogc;
+
+ for (size_t i = 0; i < blackRootTracers.ref().length(); i++) {
+ const Callback<JSTraceDataOp>& e = blackRootTracers.ref()[i];
+ (*e.op)(trc, e.data);
+ }
+}
+
+void GCRuntime::traceEmbeddingGrayRoots(JSTracer* trc) {
+ // The analysis doesn't like the function pointer below.
+ JS::AutoSuppressGCAnalysis nogc;
+
+ const auto& callback = grayRootTracer.ref();
+ if (JSTraceDataOp op = callback.op) {
+ (*op)(trc, callback.data);
+ }
+}
+
+#ifdef DEBUG
+class AssertNoRootsTracer final : public JS::CallbackTracer {
+ void onChild(const JS::GCCellPtr& thing) override {
+ MOZ_CRASH("There should not be any roots during runtime shutdown");
+ }
+
+ public:
+ explicit AssertNoRootsTracer(JSRuntime* rt)
+ : JS::CallbackTracer(rt, JS::TracerKind::Callback,
+ JS::WeakMapTraceAction::TraceKeysAndValues) {}
+};
+#endif // DEBUG
+
+void js::gc::GCRuntime::finishRoots() {
+ AutoNoteSingleThreadedRegion anstr;
+
+ rt->finishParserAtoms();
+ rt->finishAtoms();
+
+ rootsHash.ref().clear();
+
+ rt->finishPersistentRoots();
+
+ rt->finishSelfHosting();
+ selfHostingZoneFrozen = false;
+
+ for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next()) {
+ zone->finishRoots();
+ }
+
+#ifdef JS_GC_ZEAL
+ clearSelectedForMarking();
+#endif
+
+ // Clear any remaining roots from the embedding (as otherwise they will be
+ // left dangling after we shut down) and remove the callbacks.
+ ClearEdgesTracer trc(rt);
+ traceEmbeddingBlackRoots(&trc);
+ traceEmbeddingGrayRoots(&trc);
+ clearBlackAndGrayRootTracers();
+}
+
+void js::gc::GCRuntime::checkNoRuntimeRoots(AutoGCSession& session) {
+#ifdef DEBUG
+ AssertNoRootsTracer trc(rt);
+ traceRuntimeForMajorGC(&trc, session);
+#endif // DEBUG
+}
+
+// Append traced things to a buffer on the zone for use later in the GC.
+// See the comment in GCRuntime.h above grayBufferState for details.
+class BufferGrayRootsTracer final : public GenericTracer {
+ // Set to false if we OOM while buffering gray roots.
+ bool bufferingGrayRootsFailed;
+
+ JSObject* onObjectEdge(JSObject* obj) override { return bufferRoot(obj); }
+ JSString* onStringEdge(JSString* string) override {
+ return bufferRoot(string);
+ }
+ js::BaseScript* onScriptEdge(js::BaseScript* script) override {
+ return bufferRoot(script);
+ }
+ JS::Symbol* onSymbolEdge(JS::Symbol* symbol) override {
+ return bufferRoot(symbol);
+ }
+ JS::BigInt* onBigIntEdge(JS::BigInt* bi) override { return bufferRoot(bi); }
+
+ js::Shape* onShapeEdge(js::Shape* shape) override {
+ unsupportedEdge();
+ return nullptr;
+ }
+ js::ObjectGroup* onObjectGroupEdge(js::ObjectGroup* group) override {
+ unsupportedEdge();
+ return nullptr;
+ }
+ js::BaseShape* onBaseShapeEdge(js::BaseShape* base) override {
+ unsupportedEdge();
+ return nullptr;
+ }
+ js::jit::JitCode* onJitCodeEdge(js::jit::JitCode* code) override {
+ unsupportedEdge();
+ return nullptr;
+ }
+ js::Scope* onScopeEdge(js::Scope* scope) override {
+ unsupportedEdge();
+ return nullptr;
+ }
+ js::RegExpShared* onRegExpSharedEdge(js::RegExpShared* shared) override {
+ unsupportedEdge();
+ return nullptr;
+ }
+
+ void unsupportedEdge() { MOZ_CRASH("Unsupported gray root edge kind"); }
+
+ template <typename T>
+ inline T* bufferRoot(T* thing);
+
+ public:
+ explicit BufferGrayRootsTracer(JSRuntime* rt)
+ : GenericTracer(rt, JS::TracerKind::GrayBuffering),
+ bufferingGrayRootsFailed(false) {}
+
+ bool failed() const { return bufferingGrayRootsFailed; }
+ void setFailed() { bufferingGrayRootsFailed = true; }
+};
+
+void js::gc::GCRuntime::bufferGrayRoots() {
+ // Precondition: the state has been reset to "unused" after the last GC
+ // and the zone's buffers have been cleared.
+ MOZ_ASSERT(grayBufferState == GrayBufferState::Unused);
+ for (GCZonesIter zone(this); !zone.done(); zone.next()) {
+ MOZ_ASSERT(zone->gcGrayRoots().IsEmpty());
+ }
+
+ BufferGrayRootsTracer grayBufferer(rt);
+ traceEmbeddingGrayRoots(&grayBufferer);
+ Compartment::traceIncomingCrossCompartmentEdgesForZoneGC(
+ &grayBufferer, Compartment::GrayEdges);
+
+ // Propagate the failure flag from the marker to the runtime.
+ if (grayBufferer.failed()) {
+ grayBufferState = GrayBufferState::Failed;
+ resetBufferedGrayRoots();
+ } else {
+ grayBufferState = GrayBufferState::Okay;
+ }
+}
+
+template <typename T>
+inline T* BufferGrayRootsTracer::bufferRoot(T* thing) {
+ MOZ_ASSERT(JS::RuntimeHeapIsBusy());
+ MOZ_ASSERT(thing);
+ // Check if |thing| is corrupt by calling a method that touches the heap.
+ MOZ_ASSERT(thing->getTraceKind() != JS::TraceKind(0xff));
+
+ TenuredCell* tenured = &thing->asTenured();
+
+ // This is run from a helper thread while the mutator is paused so we have
+ // to use *FromAnyThread methods here.
+ Zone* zone = tenured->zoneFromAnyThread();
+ if (zone->isCollectingFromAnyThread()) {
+ // See the comment on SetMaybeAliveFlag to see why we only do this for
+ // objects and scripts. We rely on gray root buffering for this to work,
+ // but we only need to worry about uncollected dead compartments during
+ // incremental GCs (when we do gray root buffering).
+ SetMaybeAliveFlag(thing);
+
+ if (!zone->gcGrayRoots().Append(tenured)) {
+ bufferingGrayRootsFailed = true;
+ }
+ }
+
+ return thing;
+}
+
+void GCRuntime::markBufferedGrayRoots(JS::Zone* zone) {
+ MOZ_ASSERT(grayBufferState == GrayBufferState::Okay);
+ MOZ_ASSERT(zone->isGCMarkingBlackAndGray() || zone->isGCCompacting());
+
+ auto& roots = zone->gcGrayRoots();
+ if (roots.IsEmpty()) {
+ return;
+ }
+
+ for (auto iter = roots.Iter(); !iter.Done(); iter.Next()) {
+ Cell* cell = iter.Get();
+
+ // Bug 1203273: Check for bad pointers on OSX and output diagnostics.
+#if defined(XP_DARWIN) && defined(MOZ_DIAGNOSTIC_ASSERT_ENABLED)
+ auto addr = uintptr_t(cell);
+ if (addr < ChunkSize || addr % CellAlignBytes != 0) {
+ MOZ_CRASH_UNSAFE_PRINTF(
+ "Bad GC thing pointer in gray root buffer: %p at address %p", cell,
+ &iter.Get());
+ }
+#else
+ MOZ_ASSERT(IsCellPointerValid(cell));
+#endif
+
+ TraceManuallyBarrieredGenericPointerEdge(&marker, &cell,
+ "buffered gray root");
+ }
+}
+
+void GCRuntime::resetBufferedGrayRoots() {
+ MOZ_ASSERT(
+ grayBufferState != GrayBufferState::Okay,
+ "Do not clear the gray buffers unless we are Failed or becoming Unused");
+ for (GCZonesIter zone(this); !zone.done(); zone.next()) {
+ zone->gcGrayRoots().Clear();
+ }
+}
+
+JS_PUBLIC_API void JS::AddPersistentRoot(
+ JS::RootingContext* cx, RootKind kind,
+ PersistentRooted<JS::detail::RootListEntry*>* root) {
+ static_cast<JSContext*>(cx)->runtime()->heapRoots.ref()[kind].insertBack(
+ root);
+}
+
+JS_PUBLIC_API void JS::AddPersistentRoot(
+ JSRuntime* rt, RootKind kind,
+ PersistentRooted<JS::detail::RootListEntry*>* root) {
+ rt->heapRoots.ref()[kind].insertBack(root);
+}
diff --git a/js/src/gc/Rooting.h b/js/src/gc/Rooting.h
new file mode 100644
index 0000000000..1564f41e3e
--- /dev/null
+++ b/js/src/gc/Rooting.h
@@ -0,0 +1,102 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef gc_Rooting_h
+#define gc_Rooting_h
+
+#include "gc/Allocator.h"
+#include "gc/Policy.h"
+#include "js/GCVector.h"
+#include "js/RootingAPI.h"
+#include "js/TypeDecls.h"
+
+class JSLinearString;
+
+namespace js {
+
+class PropertyName;
+class NativeObject;
+class ArrayObject;
+class GlobalObject;
+class PlainObject;
+class ScriptSourceObject;
+class SavedFrame;
+class Shape;
+class ObjectGroup;
+class DebuggerArguments;
+class DebuggerEnvironment;
+class DebuggerFrame;
+class DebuggerObject;
+class DebuggerScript;
+class DebuggerSource;
+class Scope;
+class ModuleObject;
+
+// These are internal counterparts to the public types such as HandleObject.
+
+using HandleNativeObject = JS::Handle<NativeObject*>;
+using HandleShape = JS::Handle<Shape*>;
+using HandleObjectGroup = JS::Handle<ObjectGroup*>;
+using HandleAtom = JS::Handle<JSAtom*>;
+using HandleLinearString = JS::Handle<JSLinearString*>;
+using HandlePropertyName = JS::Handle<PropertyName*>;
+using HandleArrayObject = JS::Handle<ArrayObject*>;
+using HandlePlainObject = JS::Handle<PlainObject*>;
+using HandleSavedFrame = JS::Handle<SavedFrame*>;
+using HandleScriptSourceObject = JS::Handle<ScriptSourceObject*>;
+using HandleDebuggerArguments = JS::Handle<DebuggerArguments*>;
+using HandleDebuggerEnvironment = JS::Handle<DebuggerEnvironment*>;
+using HandleDebuggerFrame = JS::Handle<DebuggerFrame*>;
+using HandleDebuggerObject = JS::Handle<DebuggerObject*>;
+using HandleDebuggerScript = JS::Handle<DebuggerScript*>;
+using HandleDebuggerSource = JS::Handle<DebuggerSource*>;
+using HandleScope = JS::Handle<Scope*>;
+using HandleModuleObject = JS::Handle<ModuleObject*>;
+
+using MutableHandleShape = JS::MutableHandle<Shape*>;
+using MutableHandleAtom = JS::MutableHandle<JSAtom*>;
+using MutableHandleNativeObject = JS::MutableHandle<NativeObject*>;
+using MutableHandlePlainObject = JS::MutableHandle<PlainObject*>;
+using MutableHandleSavedFrame = JS::MutableHandle<SavedFrame*>;
+using MutableHandleDebuggerArguments = JS::MutableHandle<DebuggerArguments*>;
+using MutableHandleDebuggerEnvironment =
+ JS::MutableHandle<DebuggerEnvironment*>;
+using MutableHandleDebuggerFrame = JS::MutableHandle<DebuggerFrame*>;
+using MutableHandleDebuggerObject = JS::MutableHandle<DebuggerObject*>;
+using MutableHandleDebuggerScript = JS::MutableHandle<DebuggerScript*>;
+using MutableHandleDebuggerSource = JS::MutableHandle<DebuggerSource*>;
+using MutableHandleScope = JS::MutableHandle<Scope*>;
+using MutableHandleModuleObject = JS::MutableHandle<ModuleObject*>;
+using MutableHandleArrayObject = JS::MutableHandle<ArrayObject*>;
+
+using RootedNativeObject = JS::Rooted<NativeObject*>;
+using RootedShape = JS::Rooted<Shape*>;
+using RootedObjectGroup = JS::Rooted<ObjectGroup*>;
+using RootedAtom = JS::Rooted<JSAtom*>;
+using RootedLinearString = JS::Rooted<JSLinearString*>;
+using RootedPropertyName = JS::Rooted<PropertyName*>;
+using RootedArrayObject = JS::Rooted<ArrayObject*>;
+using RootedGlobalObject = JS::Rooted<GlobalObject*>;
+using RootedPlainObject = JS::Rooted<PlainObject*>;
+using RootedSavedFrame = JS::Rooted<SavedFrame*>;
+using RootedScriptSourceObject = JS::Rooted<ScriptSourceObject*>;
+using RootedDebuggerArguments = JS::Rooted<DebuggerArguments*>;
+using RootedDebuggerEnvironment = JS::Rooted<DebuggerEnvironment*>;
+using RootedDebuggerFrame = JS::Rooted<DebuggerFrame*>;
+using RootedDebuggerObject = JS::Rooted<DebuggerObject*>;
+using RootedDebuggerScript = JS::Rooted<DebuggerScript*>;
+using RootedDebuggerSource = JS::Rooted<DebuggerSource*>;
+using RootedScope = JS::Rooted<Scope*>;
+using RootedModuleObject = JS::Rooted<ModuleObject*>;
+
+using FunctionVector = JS::GCVector<JSFunction*>;
+using PropertyNameVector = JS::GCVector<PropertyName*>;
+using ShapeVector = JS::GCVector<Shape*>;
+using StringVector = JS::GCVector<JSString*>;
+
+} /* namespace js */
+
+#endif /* gc_Rooting_h */
diff --git a/js/src/gc/Scheduling.cpp b/js/src/gc/Scheduling.cpp
new file mode 100644
index 0000000000..29613aaedb
--- /dev/null
+++ b/js/src/gc/Scheduling.cpp
@@ -0,0 +1,826 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "gc/Scheduling.h"
+
+#include "mozilla/CheckedInt.h"
+#include "mozilla/TimeStamp.h"
+
+#include <algorithm>
+
+#include "gc/Nursery.h"
+#include "gc/RelocationOverlay.h"
+#include "gc/ZoneAllocator.h"
+#include "vm/MutexIDs.h"
+
+using namespace js;
+using namespace js::gc;
+
+using mozilla::CheckedInt;
+using mozilla::TimeDuration;
+
+/*
+ * We may start to collect a zone before its trigger threshold is reached if
+ * GCRuntime::maybeGC() is called for that zone or we start collecting other
+ * zones. These eager threshold factors are not configurable.
+ */
+static constexpr double HighFrequencyEagerAllocTriggerFactor = 0.85;
+static constexpr double LowFrequencyEagerAllocTriggerFactor = 0.9;
+
+/*
+ * Don't allow heap growth factors to be set so low that eager collections could
+ * reduce the trigger threshold.
+ */
+static constexpr double MinHeapGrowthFactor =
+ 1.0f / std::min(HighFrequencyEagerAllocTriggerFactor,
+ LowFrequencyEagerAllocTriggerFactor);
+
+GCSchedulingTunables::GCSchedulingTunables()
+ : gcMaxBytes_(0),
+ gcMinNurseryBytes_(Nursery::roundSize(TuningDefaults::GCMinNurseryBytes)),
+ gcMaxNurseryBytes_(Nursery::roundSize(JS::DefaultNurseryMaxBytes)),
+ gcZoneAllocThresholdBase_(TuningDefaults::GCZoneAllocThresholdBase),
+ smallHeapIncrementalLimit_(TuningDefaults::SmallHeapIncrementalLimit),
+ largeHeapIncrementalLimit_(TuningDefaults::LargeHeapIncrementalLimit),
+ zoneAllocDelayBytes_(TuningDefaults::ZoneAllocDelayBytes),
+ highFrequencyThreshold_(
+ TimeDuration::FromSeconds(TuningDefaults::HighFrequencyThreshold)),
+ smallHeapSizeMaxBytes_(TuningDefaults::SmallHeapSizeMaxBytes),
+ largeHeapSizeMinBytes_(TuningDefaults::LargeHeapSizeMinBytes),
+ highFrequencySmallHeapGrowth_(
+ TuningDefaults::HighFrequencySmallHeapGrowth),
+ highFrequencyLargeHeapGrowth_(
+ TuningDefaults::HighFrequencyLargeHeapGrowth),
+ lowFrequencyHeapGrowth_(TuningDefaults::LowFrequencyHeapGrowth),
+ minEmptyChunkCount_(TuningDefaults::MinEmptyChunkCount),
+ maxEmptyChunkCount_(TuningDefaults::MaxEmptyChunkCount),
+ nurseryFreeThresholdForIdleCollection_(
+ TuningDefaults::NurseryFreeThresholdForIdleCollection),
+ nurseryFreeThresholdForIdleCollectionFraction_(
+ TuningDefaults::NurseryFreeThresholdForIdleCollectionFraction),
+ pretenureThreshold_(TuningDefaults::PretenureThreshold),
+ pretenureGroupThreshold_(TuningDefaults::PretenureGroupThreshold),
+ pretenureStringThreshold_(TuningDefaults::PretenureStringThreshold),
+ stopPretenureStringThreshold_(
+ TuningDefaults::StopPretenureStringThreshold),
+ minLastDitchGCPeriod_(
+ TimeDuration::FromSeconds(TuningDefaults::MinLastDitchGCPeriod)),
+ mallocThresholdBase_(TuningDefaults::MallocThresholdBase),
+ mallocGrowthFactor_(TuningDefaults::MallocGrowthFactor) {}
+
+bool GCSchedulingTunables::setParameter(JSGCParamKey key, uint32_t value,
+ const AutoLockGC& lock) {
+ // Limit heap growth factor to one hundred times size of current heap.
+ const double MaxHeapGrowthFactor = 100;
+ const size_t MaxNurseryBytes = 128 * 1024 * 1024;
+
+ switch (key) {
+ case JSGC_MAX_BYTES:
+ gcMaxBytes_ = value;
+ break;
+ case JSGC_MIN_NURSERY_BYTES:
+ if (value < ArenaSize || value >= MaxNurseryBytes) {
+ return false;
+ }
+ value = Nursery::roundSize(value);
+ if (value > gcMaxNurseryBytes_) {
+ return false;
+ }
+ gcMinNurseryBytes_ = value;
+ break;
+ case JSGC_MAX_NURSERY_BYTES:
+ if (value < ArenaSize || value >= MaxNurseryBytes) {
+ return false;
+ }
+ value = Nursery::roundSize(value);
+ if (value < gcMinNurseryBytes_) {
+ return false;
+ }
+ gcMaxNurseryBytes_ = value;
+ break;
+ case JSGC_HIGH_FREQUENCY_TIME_LIMIT:
+ highFrequencyThreshold_ = TimeDuration::FromMilliseconds(value);
+ break;
+ case JSGC_SMALL_HEAP_SIZE_MAX: {
+ CheckedInt<size_t> newLimit = CheckedInt<size_t>(value) * 1024 * 1024;
+ if (!newLimit.isValid()) {
+ return false;
+ }
+ setSmallHeapSizeMaxBytes(newLimit.value());
+ break;
+ }
+ case JSGC_LARGE_HEAP_SIZE_MIN: {
+ size_t newLimit = (size_t)value * 1024 * 1024;
+ if (newLimit == 0) {
+ return false;
+ }
+ setLargeHeapSizeMinBytes(newLimit);
+ break;
+ }
+ case JSGC_HIGH_FREQUENCY_SMALL_HEAP_GROWTH: {
+ double newGrowth = value / 100.0;
+ if (newGrowth < MinHeapGrowthFactor || newGrowth > MaxHeapGrowthFactor) {
+ return false;
+ }
+ setHighFrequencySmallHeapGrowth(newGrowth);
+ break;
+ }
+ case JSGC_HIGH_FREQUENCY_LARGE_HEAP_GROWTH: {
+ double newGrowth = value / 100.0;
+ if (newGrowth < MinHeapGrowthFactor || newGrowth > MaxHeapGrowthFactor) {
+ return false;
+ }
+ setHighFrequencyLargeHeapGrowth(newGrowth);
+ break;
+ }
+ case JSGC_LOW_FREQUENCY_HEAP_GROWTH: {
+ double newGrowth = value / 100.0;
+ if (newGrowth < MinHeapGrowthFactor || newGrowth > MaxHeapGrowthFactor) {
+ return false;
+ }
+ setLowFrequencyHeapGrowth(newGrowth);
+ break;
+ }
+ case JSGC_ALLOCATION_THRESHOLD:
+ gcZoneAllocThresholdBase_ = value * 1024 * 1024;
+ break;
+ case JSGC_SMALL_HEAP_INCREMENTAL_LIMIT: {
+ double newFactor = value / 100.0;
+ if (newFactor < 1.0f || newFactor > MaxHeapGrowthFactor) {
+ return false;
+ }
+ smallHeapIncrementalLimit_ = newFactor;
+ break;
+ }
+ case JSGC_LARGE_HEAP_INCREMENTAL_LIMIT: {
+ double newFactor = value / 100.0;
+ if (newFactor < 1.0f || newFactor > MaxHeapGrowthFactor) {
+ return false;
+ }
+ largeHeapIncrementalLimit_ = newFactor;
+ break;
+ }
+ case JSGC_MIN_EMPTY_CHUNK_COUNT:
+ setMinEmptyChunkCount(value);
+ break;
+ case JSGC_MAX_EMPTY_CHUNK_COUNT:
+ setMaxEmptyChunkCount(value);
+ break;
+ case JSGC_NURSERY_FREE_THRESHOLD_FOR_IDLE_COLLECTION:
+ if (value > gcMaxNurseryBytes()) {
+ value = gcMaxNurseryBytes();
+ }
+ nurseryFreeThresholdForIdleCollection_ = value;
+ break;
+ case JSGC_NURSERY_FREE_THRESHOLD_FOR_IDLE_COLLECTION_PERCENT:
+ if (value == 0 || value > 100) {
+ return false;
+ }
+ nurseryFreeThresholdForIdleCollectionFraction_ = value / 100.0;
+ break;
+ case JSGC_PRETENURE_THRESHOLD: {
+ // 100 disables pretenuring
+ if (value == 0 || value > 100) {
+ return false;
+ }
+ pretenureThreshold_ = value / 100.0;
+ break;
+ }
+ case JSGC_PRETENURE_GROUP_THRESHOLD:
+ if (value <= 0) {
+ return false;
+ }
+ pretenureGroupThreshold_ = value;
+ break;
+ case JSGC_PRETENURE_STRING_THRESHOLD:
+ // 100 disables pretenuring
+ if (value == 0 || value > 100) {
+ return false;
+ }
+ pretenureStringThreshold_ = value / 100.0;
+ break;
+ case JSGC_STOP_PRETENURE_STRING_THRESHOLD:
+ if (value == 0 || value > 100) {
+ return false;
+ }
+ stopPretenureStringThreshold_ = value / 100.0;
+ break;
+ case JSGC_MIN_LAST_DITCH_GC_PERIOD:
+ minLastDitchGCPeriod_ = TimeDuration::FromSeconds(value);
+ break;
+ case JSGC_ZONE_ALLOC_DELAY_KB:
+ zoneAllocDelayBytes_ = value * 1024;
+ break;
+ case JSGC_MALLOC_THRESHOLD_BASE:
+ mallocThresholdBase_ = value * 1024 * 1024;
+ break;
+ case JSGC_MALLOC_GROWTH_FACTOR: {
+ double newGrowth = value / 100.0;
+ if (newGrowth < MinHeapGrowthFactor || newGrowth > MaxHeapGrowthFactor) {
+ return false;
+ }
+ mallocGrowthFactor_ = newGrowth;
+ break;
+ }
+ default:
+ MOZ_CRASH("Unknown GC parameter.");
+ }
+
+ return true;
+}
+
+void GCSchedulingTunables::setSmallHeapSizeMaxBytes(size_t value) {
+ smallHeapSizeMaxBytes_ = value;
+ if (smallHeapSizeMaxBytes_ >= largeHeapSizeMinBytes_) {
+ largeHeapSizeMinBytes_ = smallHeapSizeMaxBytes_ + 1;
+ }
+ MOZ_ASSERT(largeHeapSizeMinBytes_ > smallHeapSizeMaxBytes_);
+}
+
+void GCSchedulingTunables::setLargeHeapSizeMinBytes(size_t value) {
+ largeHeapSizeMinBytes_ = value;
+ if (largeHeapSizeMinBytes_ <= smallHeapSizeMaxBytes_) {
+ smallHeapSizeMaxBytes_ = largeHeapSizeMinBytes_ - 1;
+ }
+ MOZ_ASSERT(largeHeapSizeMinBytes_ > smallHeapSizeMaxBytes_);
+}
+
+void GCSchedulingTunables::setHighFrequencyLargeHeapGrowth(double value) {
+ highFrequencyLargeHeapGrowth_ = value;
+ if (highFrequencyLargeHeapGrowth_ > highFrequencySmallHeapGrowth_) {
+ highFrequencySmallHeapGrowth_ = highFrequencyLargeHeapGrowth_;
+ }
+ MOZ_ASSERT(highFrequencyLargeHeapGrowth_ >= MinHeapGrowthFactor);
+ MOZ_ASSERT(highFrequencyLargeHeapGrowth_ <= highFrequencySmallHeapGrowth_);
+}
+
+void GCSchedulingTunables::setHighFrequencySmallHeapGrowth(double value) {
+ highFrequencySmallHeapGrowth_ = value;
+ if (highFrequencySmallHeapGrowth_ < highFrequencyLargeHeapGrowth_) {
+ highFrequencyLargeHeapGrowth_ = highFrequencySmallHeapGrowth_;
+ }
+ MOZ_ASSERT(highFrequencyLargeHeapGrowth_ >= MinHeapGrowthFactor);
+ MOZ_ASSERT(highFrequencyLargeHeapGrowth_ <= highFrequencySmallHeapGrowth_);
+}
+
+void GCSchedulingTunables::setLowFrequencyHeapGrowth(double value) {
+ lowFrequencyHeapGrowth_ = value;
+ MOZ_ASSERT(lowFrequencyHeapGrowth_ >= MinHeapGrowthFactor);
+}
+
+void GCSchedulingTunables::setMinEmptyChunkCount(uint32_t value) {
+ minEmptyChunkCount_ = value;
+ if (minEmptyChunkCount_ > maxEmptyChunkCount_) {
+ maxEmptyChunkCount_ = minEmptyChunkCount_;
+ }
+ MOZ_ASSERT(maxEmptyChunkCount_ >= minEmptyChunkCount_);
+}
+
+void GCSchedulingTunables::setMaxEmptyChunkCount(uint32_t value) {
+ maxEmptyChunkCount_ = value;
+ if (minEmptyChunkCount_ > maxEmptyChunkCount_) {
+ minEmptyChunkCount_ = maxEmptyChunkCount_;
+ }
+ MOZ_ASSERT(maxEmptyChunkCount_ >= minEmptyChunkCount_);
+}
+
+void GCSchedulingTunables::resetParameter(JSGCParamKey key,
+ const AutoLockGC& lock) {
+ switch (key) {
+ case JSGC_MAX_BYTES:
+ gcMaxBytes_ = 0xffffffff;
+ break;
+ case JSGC_MIN_NURSERY_BYTES:
+ case JSGC_MAX_NURSERY_BYTES:
+ // Reset these togeather to maintain their min <= max invariant.
+ gcMinNurseryBytes_ = TuningDefaults::GCMinNurseryBytes;
+ gcMaxNurseryBytes_ = JS::DefaultNurseryMaxBytes;
+ break;
+ case JSGC_HIGH_FREQUENCY_TIME_LIMIT:
+ highFrequencyThreshold_ =
+ TimeDuration::FromSeconds(TuningDefaults::HighFrequencyThreshold);
+ break;
+ case JSGC_SMALL_HEAP_SIZE_MAX:
+ setSmallHeapSizeMaxBytes(TuningDefaults::SmallHeapSizeMaxBytes);
+ break;
+ case JSGC_LARGE_HEAP_SIZE_MIN:
+ setLargeHeapSizeMinBytes(TuningDefaults::LargeHeapSizeMinBytes);
+ break;
+ case JSGC_HIGH_FREQUENCY_SMALL_HEAP_GROWTH:
+ setHighFrequencySmallHeapGrowth(
+ TuningDefaults::HighFrequencySmallHeapGrowth);
+ break;
+ case JSGC_HIGH_FREQUENCY_LARGE_HEAP_GROWTH:
+ setHighFrequencyLargeHeapGrowth(
+ TuningDefaults::HighFrequencyLargeHeapGrowth);
+ break;
+ case JSGC_LOW_FREQUENCY_HEAP_GROWTH:
+ setLowFrequencyHeapGrowth(TuningDefaults::LowFrequencyHeapGrowth);
+ break;
+ case JSGC_ALLOCATION_THRESHOLD:
+ gcZoneAllocThresholdBase_ = TuningDefaults::GCZoneAllocThresholdBase;
+ break;
+ case JSGC_SMALL_HEAP_INCREMENTAL_LIMIT:
+ smallHeapIncrementalLimit_ = TuningDefaults::SmallHeapIncrementalLimit;
+ break;
+ case JSGC_LARGE_HEAP_INCREMENTAL_LIMIT:
+ largeHeapIncrementalLimit_ = TuningDefaults::LargeHeapIncrementalLimit;
+ break;
+ case JSGC_MIN_EMPTY_CHUNK_COUNT:
+ setMinEmptyChunkCount(TuningDefaults::MinEmptyChunkCount);
+ break;
+ case JSGC_MAX_EMPTY_CHUNK_COUNT:
+ setMaxEmptyChunkCount(TuningDefaults::MaxEmptyChunkCount);
+ break;
+ case JSGC_NURSERY_FREE_THRESHOLD_FOR_IDLE_COLLECTION:
+ nurseryFreeThresholdForIdleCollection_ =
+ TuningDefaults::NurseryFreeThresholdForIdleCollection;
+ break;
+ case JSGC_NURSERY_FREE_THRESHOLD_FOR_IDLE_COLLECTION_PERCENT:
+ nurseryFreeThresholdForIdleCollectionFraction_ =
+ TuningDefaults::NurseryFreeThresholdForIdleCollectionFraction;
+ break;
+ case JSGC_PRETENURE_THRESHOLD:
+ pretenureThreshold_ = TuningDefaults::PretenureThreshold;
+ break;
+ case JSGC_PRETENURE_GROUP_THRESHOLD:
+ pretenureGroupThreshold_ = TuningDefaults::PretenureGroupThreshold;
+ break;
+ case JSGC_PRETENURE_STRING_THRESHOLD:
+ pretenureStringThreshold_ = TuningDefaults::PretenureStringThreshold;
+ break;
+ case JSGC_MIN_LAST_DITCH_GC_PERIOD:
+ minLastDitchGCPeriod_ =
+ TimeDuration::FromSeconds(TuningDefaults::MinLastDitchGCPeriod);
+ break;
+ case JSGC_MALLOC_THRESHOLD_BASE:
+ mallocThresholdBase_ = TuningDefaults::MallocThresholdBase;
+ break;
+ case JSGC_MALLOC_GROWTH_FACTOR:
+ mallocGrowthFactor_ = TuningDefaults::MallocGrowthFactor;
+ break;
+ default:
+ MOZ_CRASH("Unknown GC parameter.");
+ }
+}
+
+// GC thresholds may exceed the range of size_t on 32-bit platforms, so these
+// are calculated using 64-bit integers and clamped.
+static inline size_t ToClampedSize(uint64_t bytes) {
+ return std::min(bytes, uint64_t(SIZE_MAX));
+}
+
+void HeapThreshold::setIncrementalLimitFromStartBytes(
+ size_t retainedBytes, const GCSchedulingTunables& tunables) {
+ // Calculate the incremental limit for a heap based on its size and start
+ // threshold.
+ //
+ // This effectively classifies the heap size into small, medium or large, and
+ // uses the small heap incremental limit paramer, the large heap incremental
+ // limit parameter or an interpolation between them.
+ //
+ // The incremental limit is always set greater than the start threshold by at
+ // least the maximum nursery size to reduce the chance that tenuring a full
+ // nursery will send us straight into non-incremental collection.
+
+ MOZ_ASSERT(tunables.smallHeapIncrementalLimit() >=
+ tunables.largeHeapIncrementalLimit());
+
+ double factor = LinearInterpolate(
+ retainedBytes, tunables.smallHeapSizeMaxBytes(),
+ tunables.smallHeapIncrementalLimit(), tunables.largeHeapSizeMinBytes(),
+ tunables.largeHeapIncrementalLimit());
+
+ uint64_t bytes =
+ std::max(uint64_t(double(startBytes_) * factor),
+ uint64_t(startBytes_) + tunables.gcMaxNurseryBytes());
+ incrementalLimitBytes_ = ToClampedSize(bytes);
+ MOZ_ASSERT(incrementalLimitBytes_ >= startBytes_);
+}
+
+double HeapThreshold::eagerAllocTrigger(bool highFrequencyGC) const {
+ double eagerTriggerFactor = highFrequencyGC
+ ? HighFrequencyEagerAllocTriggerFactor
+ : LowFrequencyEagerAllocTriggerFactor;
+ return eagerTriggerFactor * startBytes();
+}
+
+void HeapThreshold::setSliceThreshold(ZoneAllocator* zone,
+ const HeapSize& heapSize,
+ const GCSchedulingTunables& tunables) {
+ sliceBytes_ = ToClampedSize(
+ std::min(uint64_t(heapSize.bytes()) + tunables.zoneAllocDelayBytes(),
+ uint64_t(incrementalLimitBytes_)));
+}
+
+/* static */
+double GCHeapThreshold::computeZoneHeapGrowthFactorForHeapSize(
+ size_t lastBytes, const GCSchedulingTunables& tunables,
+ const GCSchedulingState& state) {
+ // For small zones, our collection heuristics do not matter much: favor
+ // something simple in this case.
+ if (lastBytes < 1 * 1024 * 1024) {
+ return tunables.lowFrequencyHeapGrowth();
+ }
+
+ // The heap growth factor depends on the heap size after a GC and the GC
+ // frequency. If GC's are not triggering in rapid succession, use a lower
+ // threshold so that we will collect garbage sooner.
+ if (!state.inHighFrequencyGCMode()) {
+ return tunables.lowFrequencyHeapGrowth();
+ }
+
+ // For high frequency GCs we let the heap grow depending on whether we
+ // classify the heap as small, medium or large. There are parameters for small
+ // and large heap sizes and linear interpolation is used between them for
+ // medium sized heaps.
+
+ MOZ_ASSERT(tunables.smallHeapSizeMaxBytes() <=
+ tunables.largeHeapSizeMinBytes());
+ MOZ_ASSERT(tunables.highFrequencyLargeHeapGrowth() <=
+ tunables.highFrequencySmallHeapGrowth());
+
+ return LinearInterpolate(lastBytes, tunables.smallHeapSizeMaxBytes(),
+ tunables.highFrequencySmallHeapGrowth(),
+ tunables.largeHeapSizeMinBytes(),
+ tunables.highFrequencyLargeHeapGrowth());
+}
+
+/* static */
+size_t GCHeapThreshold::computeZoneTriggerBytes(
+ double growthFactor, size_t lastBytes, JSGCInvocationKind gckind,
+ const GCSchedulingTunables& tunables, const AutoLockGC& lock) {
+ size_t baseMin = gckind == GC_SHRINK
+ ? tunables.minEmptyChunkCount(lock) * ChunkSize
+ : tunables.gcZoneAllocThresholdBase();
+ size_t base = std::max(lastBytes, baseMin);
+ double trigger = double(base) * growthFactor;
+ double triggerMax =
+ double(tunables.gcMaxBytes()) / tunables.largeHeapIncrementalLimit();
+ return ToClampedSize(std::min(triggerMax, trigger));
+}
+
+void GCHeapThreshold::updateStartThreshold(size_t lastBytes,
+ JSGCInvocationKind gckind,
+ const GCSchedulingTunables& tunables,
+ const GCSchedulingState& state,
+ bool isAtomsZone,
+ const AutoLockGC& lock) {
+ double growthFactor =
+ computeZoneHeapGrowthFactorForHeapSize(lastBytes, tunables, state);
+
+ // Discourage collection of the atoms zone during page load as this can block
+ // off-thread parsing.
+ if (isAtomsZone && state.inPageLoad) {
+ growthFactor *= 1.5;
+ }
+
+ startBytes_ =
+ computeZoneTriggerBytes(growthFactor, lastBytes, gckind, tunables, lock);
+
+ setIncrementalLimitFromStartBytes(lastBytes, tunables);
+}
+
+/* static */
+size_t MallocHeapThreshold::computeZoneTriggerBytes(double growthFactor,
+ size_t lastBytes,
+ size_t baseBytes,
+ const AutoLockGC& lock) {
+ return ToClampedSize(double(std::max(lastBytes, baseBytes)) * growthFactor);
+}
+
+void MallocHeapThreshold::updateStartThreshold(
+ size_t lastBytes, const GCSchedulingTunables& tunables,
+ const AutoLockGC& lock) {
+ startBytes_ =
+ computeZoneTriggerBytes(tunables.mallocGrowthFactor(), lastBytes,
+ tunables.mallocThresholdBase(), lock);
+
+ setIncrementalLimitFromStartBytes(lastBytes, tunables);
+}
+
+#ifdef DEBUG
+
+void MemoryTracker::adopt(MemoryTracker& other) {
+ LockGuard<Mutex> lock(mutex);
+
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+
+ for (auto r = other.gcMap.all(); !r.empty(); r.popFront()) {
+ if (!gcMap.put(r.front().key(), r.front().value())) {
+ oomUnsafe.crash("MemoryTracker::adopt");
+ }
+ }
+ other.gcMap.clear();
+
+ // There may still be ZoneAllocPolicies associated with the old zone since
+ // some are not destroyed until the zone itself dies. Instead check there is
+ // no memory associated with them and clear their zone pointer in debug builds
+ // to catch further memory association.
+ for (auto r = other.nonGCMap.all(); !r.empty(); r.popFront()) {
+ MOZ_ASSERT(r.front().value() == 0);
+ if (r.front().key().use() == MemoryUse::ZoneAllocPolicy) {
+ auto policy = static_cast<ZoneAllocPolicy*>(r.front().key().ptr());
+ policy->zone_ = nullptr;
+ }
+ }
+ other.nonGCMap.clear();
+}
+
+static const char* MemoryUseName(MemoryUse use) {
+ switch (use) {
+# define DEFINE_CASE(Name) \
+ case MemoryUse::Name: \
+ return #Name;
+ JS_FOR_EACH_MEMORY_USE(DEFINE_CASE)
+# undef DEFINE_CASE
+ }
+
+ MOZ_CRASH("Unknown memory use");
+}
+
+MemoryTracker::MemoryTracker() : mutex(mutexid::MemoryTracker) {}
+
+void MemoryTracker::checkEmptyOnDestroy() {
+ bool ok = true;
+
+ if (!gcMap.empty()) {
+ ok = false;
+ fprintf(stderr, "Missing calls to JS::RemoveAssociatedMemory:\n");
+ for (auto r = gcMap.all(); !r.empty(); r.popFront()) {
+ fprintf(stderr, " %p 0x%zx %s\n", r.front().key().ptr(),
+ r.front().value(), MemoryUseName(r.front().key().use()));
+ }
+ }
+
+ if (!nonGCMap.empty()) {
+ ok = false;
+ fprintf(stderr, "Missing calls to Zone::decNonGCMemory:\n");
+ for (auto r = nonGCMap.all(); !r.empty(); r.popFront()) {
+ fprintf(stderr, " %p 0x%zx\n", r.front().key().ptr(), r.front().value());
+ }
+ }
+
+ MOZ_ASSERT(ok);
+}
+
+/* static */
+inline bool MemoryTracker::isGCMemoryUse(MemoryUse use) {
+ // Most memory uses are for memory associated with GC things but some are for
+ // memory associated with non-GC thing pointers.
+ return !isNonGCMemoryUse(use);
+}
+
+/* static */
+inline bool MemoryTracker::isNonGCMemoryUse(MemoryUse use) {
+ return use == MemoryUse::ZoneAllocPolicy;
+}
+
+/* static */
+inline bool MemoryTracker::allowMultipleAssociations(MemoryUse use) {
+ // For most uses only one association is possible for each GC thing. Allow a
+ // one-to-many relationship only where necessary.
+ return isNonGCMemoryUse(use) || use == MemoryUse::RegExpSharedBytecode ||
+ use == MemoryUse::BreakpointSite || use == MemoryUse::Breakpoint ||
+ use == MemoryUse::ForOfPICStub || use == MemoryUse::ICUObject;
+}
+
+void MemoryTracker::trackGCMemory(Cell* cell, size_t nbytes, MemoryUse use) {
+ MOZ_ASSERT(cell->isTenured());
+ MOZ_ASSERT(isGCMemoryUse(use));
+
+ LockGuard<Mutex> lock(mutex);
+
+ Key<Cell> key{cell, use};
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ auto ptr = gcMap.lookupForAdd(key);
+ if (ptr) {
+ if (!allowMultipleAssociations(use)) {
+ MOZ_CRASH_UNSAFE_PRINTF("Association already present: %p 0x%zx %s", cell,
+ nbytes, MemoryUseName(use));
+ }
+ ptr->value() += nbytes;
+ return;
+ }
+
+ if (!gcMap.add(ptr, key, nbytes)) {
+ oomUnsafe.crash("MemoryTracker::trackGCMemory");
+ }
+}
+
+void MemoryTracker::untrackGCMemory(Cell* cell, size_t nbytes, MemoryUse use) {
+ MOZ_ASSERT(cell->isTenured());
+
+ LockGuard<Mutex> lock(mutex);
+
+ Key<Cell> key{cell, use};
+ auto ptr = gcMap.lookup(key);
+ if (!ptr) {
+ MOZ_CRASH_UNSAFE_PRINTF("Association not found: %p 0x%zx %s", cell, nbytes,
+ MemoryUseName(use));
+ }
+
+ if (!allowMultipleAssociations(use) && ptr->value() != nbytes) {
+ MOZ_CRASH_UNSAFE_PRINTF(
+ "Association for %p %s has different size: "
+ "expected 0x%zx but got 0x%zx",
+ cell, MemoryUseName(use), ptr->value(), nbytes);
+ }
+
+ if (nbytes > ptr->value()) {
+ MOZ_CRASH_UNSAFE_PRINTF(
+ "Association for %p %s size is too large: "
+ "expected at most 0x%zx but got 0x%zx",
+ cell, MemoryUseName(use), ptr->value(), nbytes);
+ }
+
+ ptr->value() -= nbytes;
+
+ if (ptr->value() == 0) {
+ gcMap.remove(ptr);
+ }
+}
+
+void MemoryTracker::swapGCMemory(Cell* a, Cell* b, MemoryUse use) {
+ MOZ_ASSERT(a->isTenured());
+ MOZ_ASSERT(b->isTenured());
+
+ Key<Cell> ka{a, use};
+ Key<Cell> kb{b, use};
+
+ LockGuard<Mutex> lock(mutex);
+
+ size_t sa = getAndRemoveEntry(ka, lock);
+ size_t sb = getAndRemoveEntry(kb, lock);
+
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+
+ if ((sa && !gcMap.put(kb, sa)) || (sb && !gcMap.put(ka, sb))) {
+ oomUnsafe.crash("MemoryTracker::swapGCMemory");
+ }
+}
+
+size_t MemoryTracker::getAndRemoveEntry(const Key<Cell>& key,
+ LockGuard<Mutex>& lock) {
+ auto ptr = gcMap.lookup(key);
+ if (!ptr) {
+ return 0;
+ }
+
+ size_t size = ptr->value();
+ gcMap.remove(ptr);
+ return size;
+}
+
+void MemoryTracker::registerNonGCMemory(void* mem, MemoryUse use) {
+ LockGuard<Mutex> lock(mutex);
+
+ Key<void> key{mem, use};
+ auto ptr = nonGCMap.lookupForAdd(key);
+ if (ptr) {
+ MOZ_CRASH_UNSAFE_PRINTF("%s assocaition %p already registered",
+ MemoryUseName(use), mem);
+ }
+
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ if (!nonGCMap.add(ptr, key, 0)) {
+ oomUnsafe.crash("MemoryTracker::registerNonGCMemory");
+ }
+}
+
+void MemoryTracker::unregisterNonGCMemory(void* mem, MemoryUse use) {
+ LockGuard<Mutex> lock(mutex);
+
+ Key<void> key{mem, use};
+ auto ptr = nonGCMap.lookup(key);
+ if (!ptr) {
+ MOZ_CRASH_UNSAFE_PRINTF("%s association %p not found", MemoryUseName(use),
+ mem);
+ }
+
+ if (ptr->value() != 0) {
+ MOZ_CRASH_UNSAFE_PRINTF(
+ "%s association %p still has 0x%zx bytes associated",
+ MemoryUseName(use), mem, ptr->value());
+ }
+
+ nonGCMap.remove(ptr);
+}
+
+void MemoryTracker::moveNonGCMemory(void* dst, void* src, MemoryUse use) {
+ LockGuard<Mutex> lock(mutex);
+
+ Key<void> srcKey{src, use};
+ auto srcPtr = nonGCMap.lookup(srcKey);
+ if (!srcPtr) {
+ MOZ_CRASH_UNSAFE_PRINTF("%s association %p not found", MemoryUseName(use),
+ src);
+ }
+
+ size_t nbytes = srcPtr->value();
+ nonGCMap.remove(srcPtr);
+
+ Key<void> dstKey{dst, use};
+ auto dstPtr = nonGCMap.lookupForAdd(dstKey);
+ if (dstPtr) {
+ MOZ_CRASH_UNSAFE_PRINTF("%s %p already registered", MemoryUseName(use),
+ dst);
+ }
+
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ if (!nonGCMap.add(dstPtr, dstKey, nbytes)) {
+ oomUnsafe.crash("MemoryTracker::moveNonGCMemory");
+ }
+}
+
+void MemoryTracker::incNonGCMemory(void* mem, size_t nbytes, MemoryUse use) {
+ MOZ_ASSERT(isNonGCMemoryUse(use));
+
+ LockGuard<Mutex> lock(mutex);
+
+ Key<void> key{mem, use};
+ auto ptr = nonGCMap.lookup(key);
+ if (!ptr) {
+ MOZ_CRASH_UNSAFE_PRINTF("%s allocation %p not found", MemoryUseName(use),
+ mem);
+ }
+
+ ptr->value() += nbytes;
+}
+
+void MemoryTracker::decNonGCMemory(void* mem, size_t nbytes, MemoryUse use) {
+ MOZ_ASSERT(isNonGCMemoryUse(use));
+
+ LockGuard<Mutex> lock(mutex);
+
+ Key<void> key{mem, use};
+ auto ptr = nonGCMap.lookup(key);
+ if (!ptr) {
+ MOZ_CRASH_UNSAFE_PRINTF("%s allocation %p not found", MemoryUseName(use),
+ mem);
+ }
+
+ size_t& value = ptr->value();
+ if (nbytes > value) {
+ MOZ_CRASH_UNSAFE_PRINTF(
+ "%s allocation %p is too large: "
+ "expected at most 0x%zx but got 0x%zx bytes",
+ MemoryUseName(use), mem, value, nbytes);
+ }
+
+ value -= nbytes;
+}
+
+void MemoryTracker::fixupAfterMovingGC() {
+ // Update the table after we move GC things. We don't use MovableCellHasher
+ // because that would create a difference between debug and release builds.
+ for (GCMap::Enum e(gcMap); !e.empty(); e.popFront()) {
+ const auto& key = e.front().key();
+ Cell* cell = key.ptr();
+ if (cell->isForwarded()) {
+ cell = gc::RelocationOverlay::fromCell(cell)->forwardingAddress();
+ e.rekeyFront(Key<Cell>{cell, key.use()});
+ }
+ }
+}
+
+template <typename Ptr>
+inline MemoryTracker::Key<Ptr>::Key(Ptr* ptr, MemoryUse use)
+ : ptr_(uint64_t(ptr)), use_(uint64_t(use)) {
+# ifdef JS_64BIT
+ static_assert(sizeof(Key) == 8,
+ "MemoryTracker::Key should be packed into 8 bytes");
+# endif
+ MOZ_ASSERT(this->ptr() == ptr);
+ MOZ_ASSERT(this->use() == use);
+}
+
+template <typename Ptr>
+inline Ptr* MemoryTracker::Key<Ptr>::ptr() const {
+ return reinterpret_cast<Ptr*>(ptr_);
+}
+template <typename Ptr>
+inline MemoryUse MemoryTracker::Key<Ptr>::use() const {
+ return static_cast<MemoryUse>(use_);
+}
+
+template <typename Ptr>
+inline HashNumber MemoryTracker::Hasher<Ptr>::hash(const Lookup& l) {
+ return mozilla::HashGeneric(DefaultHasher<Ptr*>::hash(l.ptr()),
+ DefaultHasher<unsigned>::hash(unsigned(l.use())));
+}
+
+template <typename Ptr>
+inline bool MemoryTracker::Hasher<Ptr>::match(const KeyT& k, const Lookup& l) {
+ return k.ptr() == l.ptr() && k.use() == l.use();
+}
+
+template <typename Ptr>
+inline void MemoryTracker::Hasher<Ptr>::rekey(KeyT& k, const KeyT& newKey) {
+ k = newKey;
+}
+
+#endif // DEBUG
diff --git a/js/src/gc/Scheduling.h b/js/src/gc/Scheduling.h
new file mode 100644
index 0000000000..9f6564c828
--- /dev/null
+++ b/js/src/gc/Scheduling.h
@@ -0,0 +1,974 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * [SMDOC] GC Scheduling
+ *
+ * GC Scheduling Overview
+ * ======================
+ *
+ * Scheduling GC's in SpiderMonkey/Firefox is tremendously complicated because
+ * of the large number of subtle, cross-cutting, and widely dispersed factors
+ * that must be taken into account. A summary of some of the more important
+ * factors follows.
+ *
+ * Cost factors:
+ *
+ * * GC too soon and we'll revisit an object graph almost identical to the
+ * one we just visited; since we are unlikely to find new garbage, the
+ * traversal will be largely overhead. We rely heavily on external factors
+ * to signal us that we are likely to find lots of garbage: e.g. "a tab
+ * just got closed".
+ *
+ * * GC too late and we'll run out of memory to allocate (e.g. Out-Of-Memory,
+ * hereafter simply abbreviated to OOM). If this happens inside
+ * SpiderMonkey we may be able to recover, but most embedder allocations
+ * will simply crash on OOM, even if the GC has plenty of free memory it
+ * could surrender.
+ *
+ * * Memory fragmentation: if we fill the process with GC allocations, a
+ * request for a large block of contiguous memory may fail because no
+ * contiguous block is free, despite having enough memory available to
+ * service the request.
+ *
+ * * Management overhead: if our GC heap becomes large, we create extra
+ * overhead when managing the GC's structures, even if the allocations are
+ * mostly unused.
+ *
+ * Heap Management Factors:
+ *
+ * * GC memory: The GC has its own allocator that it uses to make fixed size
+ * allocations for GC managed things. In cases where the GC thing requires
+ * larger or variable sized memory to implement itself, it is responsible
+ * for using the system heap.
+ *
+ * * C Heap Memory: Rather than allowing for large or variable allocations,
+ * the SpiderMonkey GC allows GC things to hold pointers to C heap memory.
+ * It is the responsibility of the thing to free this memory with a custom
+ * finalizer (with the sole exception of NativeObject, which knows about
+ * slots and elements for performance reasons). C heap memory has different
+ * performance and overhead tradeoffs than GC internal memory, which need
+ * to be considered with scheduling a GC.
+ *
+ * Application Factors:
+ *
+ * * Most applications allocate heavily at startup, then enter a processing
+ * stage where memory utilization remains roughly fixed with a slower
+ * allocation rate. This is not always the case, however, so while we may
+ * optimize for this pattern, we must be able to handle arbitrary
+ * allocation patterns.
+ *
+ * Other factors:
+ *
+ * * Other memory: This is memory allocated outside the purview of the GC.
+ * Data mapped by the system for code libraries, data allocated by those
+ * libraries, data in the JSRuntime that is used to manage the engine,
+ * memory used by the embedding that is not attached to a GC thing, memory
+ * used by unrelated processes running on the hardware that use space we
+ * could otherwise use for allocation, etc. While we don't have to manage
+ * it, we do have to take it into account when scheduling since it affects
+ * when we will OOM.
+ *
+ * * Physical Reality: All real machines have limits on the number of bits
+ * that they are physically able to store. While modern operating systems
+ * can generally make additional space available with swapping, at some
+ * point there are simply no more bits to allocate. There is also the
+ * factor of address space limitations, particularly on 32bit machines.
+ *
+ * * Platform Factors: Each OS makes use of wildly different memory
+ * management techniques. These differences result in different performance
+ * tradeoffs, different fragmentation patterns, and different hard limits
+ * on the amount of physical and/or virtual memory that we can use before
+ * OOMing.
+ *
+ *
+ * Reasons for scheduling GC
+ * -------------------------
+ *
+ * While code generally takes the above factors into account in only an ad-hoc
+ * fashion, the API forces the user to pick a "reason" for the GC. We have a
+ * bunch of JS::GCReason reasons in GCAPI.h. These fall into a few categories
+ * that generally coincide with one or more of the above factors.
+ *
+ * Embedding reasons:
+ *
+ * 1) Do a GC now because the embedding knows something useful about the
+ * zone's memory retention state. These are GCReasons like LOAD_END,
+ * PAGE_HIDE, SET_NEW_DOCUMENT, DOM_UTILS. Mostly, Gecko uses these to
+ * indicate that a significant fraction of the scheduled zone's memory is
+ * probably reclaimable.
+ *
+ * 2) Do some known amount of GC work now because the embedding knows now is
+ * a good time to do a long, unblockable operation of a known duration.
+ * These are INTER_SLICE_GC and REFRESH_FRAME.
+ *
+ * Correctness reasons:
+ *
+ * 3) Do a GC now because correctness depends on some GC property. For
+ * example, CC_FORCED is where the embedding requires the mark bits to be
+ * set correctly. Also, EVICT_NURSERY where we need to work on the tenured
+ * heap.
+ *
+ * 4) Do a GC because we are shutting down: e.g. SHUTDOWN_CC or DESTROY_*.
+ *
+ * 5) Do a GC because a compartment was accessed between GC slices when we
+ * would have otherwise discarded it. We have to do a second GC to clean
+ * it up: e.g. COMPARTMENT_REVIVED.
+ *
+ * Emergency Reasons:
+ *
+ * 6) Do an all-zones, non-incremental GC now because the embedding knows it
+ * cannot wait: e.g. MEM_PRESSURE.
+ *
+ * 7) OOM when fetching a new Chunk results in a LAST_DITCH GC.
+ *
+ * Heap Size Limitation Reasons:
+ *
+ * 8) Do an incremental, zonal GC with reason MAYBEGC when we discover that
+ * the gc's allocated size is approaching the current trigger. This is
+ * called MAYBEGC because we make this check in the MaybeGC function.
+ * MaybeGC gets called at the top of the main event loop. Normally, it is
+ * expected that this callback will keep the heap size limited. It is
+ * relatively inexpensive, because it is invoked with no JS running and
+ * thus few stack roots to scan. For this reason, the GC's "trigger" bytes
+ * is less than the GC's "max" bytes as used by the trigger below.
+ *
+ * 9) Do an incremental, zonal GC with reason MAYBEGC when we go to allocate
+ * a new GC thing and find that the GC heap size has grown beyond the
+ * configured maximum (JSGC_MAX_BYTES). We trigger this GC by returning
+ * nullptr and then calling maybeGC at the top level of the allocator.
+ * This is then guaranteed to fail the "size greater than trigger" check
+ * above, since trigger is always less than max. After performing the GC,
+ * the allocator unconditionally returns nullptr to force an OOM exception
+ * is raised by the script.
+ *
+ * Note that this differs from a LAST_DITCH GC where we actually run out
+ * of memory (i.e., a call to a system allocator fails) when trying to
+ * allocate. Unlike above, LAST_DITCH GC only happens when we are really
+ * out of memory, not just when we cross an arbitrary trigger; despite
+ * this, it may still return an allocation at the end and allow the script
+ * to continue, if the LAST_DITCH GC was able to free up enough memory.
+ *
+ * 10) Do a GC under reason ALLOC_TRIGGER when we are over the GC heap trigger
+ * limit, but in the allocator rather than in a random call to maybeGC.
+ * This occurs if we allocate too much before returning to the event loop
+ * and calling maybeGC; this is extremely common in benchmarks and
+ * long-running Worker computations. Note that this uses a wildly
+ * different mechanism from the above in that it sets the interrupt flag
+ * and does the GC at the next loop head, before the next alloc, or
+ * maybeGC. The reason for this is that this check is made after the
+ * allocation and we cannot GC with an uninitialized thing in the heap.
+ *
+ * 11) Do an incremental, zonal GC with reason TOO_MUCH_MALLOC when the total
+ * amount of malloced memory is greater than the malloc trigger limit for the
+ * zone.
+ *
+ *
+ * Size Limitation Triggers Explanation
+ * ------------------------------------
+ *
+ * The GC internally is entirely unaware of the context of the execution of
+ * the mutator. It sees only:
+ *
+ * A) Allocated size: this is the amount of memory currently requested by the
+ * mutator. This quantity is monotonically increasing: i.e. the allocation
+ * rate is always >= 0. It is also easy for the system to track.
+ *
+ * B) Retained size: this is the amount of memory that the mutator can
+ * currently reach. Said another way, it is the size of the heap
+ * immediately after a GC (modulo background sweeping). This size is very
+ * costly to know exactly and also extremely hard to estimate with any
+ * fidelity.
+ *
+ * For reference, a common allocated vs. retained graph might look like:
+ *
+ * | ** **
+ * | ** ** * **
+ * | ** * ** * **
+ * | * ** * ** * **
+ * | ** ** * ** * **
+ * s| * * ** ** + + **
+ * i| * * * + + + + +
+ * z| * * * + + + + +
+ * e| * **+
+ * | * +
+ * | * +
+ * | * +
+ * | * +
+ * | * +
+ * |*+
+ * +--------------------------------------------------
+ * time
+ * *** = allocated
+ * +++ = retained
+ *
+ * Note that this is a bit of a simplification
+ * because in reality we track malloc and GC heap
+ * sizes separately and have a different level of
+ * granularity and accuracy on each heap.
+ *
+ * This presents some obvious implications for Mark-and-Sweep collectors.
+ * Namely:
+ * -> t[marking] ~= size[retained]
+ * -> t[sweeping] ~= size[allocated] - size[retained]
+ *
+ * In a non-incremental collector, maintaining low latency and high
+ * responsiveness requires that total GC times be as low as possible. Thus,
+ * in order to stay responsive when we did not have a fully incremental
+ * collector, our GC triggers were focused on minimizing collection time.
+ * Furthermore, since size[retained] is not under control of the GC, all the
+ * GC could do to control collection times was reduce sweep times by
+ * minimizing size[allocated], per the equation above.
+ *
+ * The result of the above is GC triggers that focus on size[allocated] to
+ * the exclusion of other important factors and default heuristics that are
+ * not optimal for a fully incremental collector. On the other hand, this is
+ * not all bad: minimizing size[allocated] also minimizes the chance of OOM
+ * and sweeping remains one of the hardest areas to further incrementalize.
+ *
+ * EAGER_ALLOC_TRIGGER
+ * -------------------
+ * Occurs when we return to the event loop and find our heap is getting
+ * largish, but before t[marking] OR t[sweeping] is too large for a
+ * responsive non-incremental GC. This is intended to be the common case
+ * in normal web applications: e.g. we just finished an event handler and
+ * the few objects we allocated when computing the new whatzitz have
+ * pushed us slightly over the limit. After this GC we rescale the new
+ * EAGER_ALLOC_TRIGGER trigger to 150% of size[retained] so that our
+ * non-incremental GC times will always be proportional to this size
+ * rather than being dominated by sweeping.
+ *
+ * As a concession to mutators that allocate heavily during their startup
+ * phase, we have a highFrequencyGCMode that ups the growth rate to 300%
+ * of the current size[retained] so that we'll do fewer longer GCs at the
+ * end of the mutator startup rather than more, smaller GCs.
+ *
+ * Assumptions:
+ * -> Responsiveness is proportional to t[marking] + t[sweeping].
+ * -> size[retained] is proportional only to GC allocations.
+ *
+ * ALLOC_TRIGGER (non-incremental)
+ * -------------------------------
+ * If we do not return to the event loop before getting all the way to our
+ * gc trigger bytes then MAYBEGC will never fire. To avoid OOMing, we
+ * succeed the current allocation and set the script interrupt so that we
+ * will (hopefully) do a GC before we overflow our max and have to raise
+ * an OOM exception for the script.
+ *
+ * Assumptions:
+ * -> Common web scripts will return to the event loop before using
+ * 10% of the current triggerBytes worth of GC memory.
+ *
+ * ALLOC_TRIGGER (incremental)
+ * ---------------------------
+ * In practice the above trigger is rough: if a website is just on the
+ * cusp, sometimes it will trigger a non-incremental GC moments before
+ * returning to the event loop, where it could have done an incremental
+ * GC. Thus, we recently added an incremental version of the above with a
+ * substantially lower threshold, so that we have a soft limit here. If
+ * IGC can collect faster than the allocator generates garbage, even if
+ * the allocator does not return to the event loop frequently, we should
+ * not have to fall back to a non-incremental GC.
+ *
+ * INCREMENTAL_TOO_SLOW
+ * --------------------
+ * Do a full, non-incremental GC if we overflow ALLOC_TRIGGER during an
+ * incremental GC. When in the middle of an incremental GC, we suppress
+ * our other triggers, so we need a way to backstop the IGC if the
+ * mutator allocates faster than the IGC can clean things up.
+ *
+ * TOO_MUCH_MALLOC
+ * ---------------
+ * Performs a GC before size[allocated] - size[retained] gets too large
+ * for non-incremental sweeping to be fast in the case that we have
+ * significantly more malloc allocation than GC allocation. This is meant
+ * to complement MAYBEGC triggers. We track this by counting malloced
+ * bytes; the counter gets reset at every GC since we do not always have a
+ * size at the time we call free. Because of this, the malloc heuristic
+ * is, unfortunately, not usefully able to augment our other GC heap
+ * triggers and is limited to this singular heuristic.
+ *
+ * Assumptions:
+ * -> EITHER size[allocated_by_malloc] ~= size[allocated_by_GC]
+ * OR time[sweeping] ~= size[allocated_by_malloc]
+ * -> size[retained] @ t0 ~= size[retained] @ t1
+ * i.e. That the mutator is in steady-state operation.
+ *
+ * LAST_DITCH_GC
+ * -------------
+ * Does a GC because we are out of memory.
+ *
+ * Assumptions:
+ * -> size[retained] < size[available_memory]
+ */
+
+#ifndef gc_Scheduling_h
+#define gc_Scheduling_h
+
+#include "mozilla/Atomics.h"
+#include "mozilla/DebugOnly.h"
+
+#include "gc/GCEnum.h"
+#include "js/AllocPolicy.h"
+#include "js/GCAPI.h"
+#include "js/HashTable.h"
+#include "js/HeapAPI.h"
+#include "js/SliceBudget.h"
+#include "threading/ProtectedData.h"
+#include "util/DifferentialTesting.h"
+
+namespace js {
+
+class AutoLockGC;
+class ZoneAllocator;
+class ZoneAllocPolicy;
+
+namespace gc {
+
+struct Cell;
+
+/*
+ * Default settings for tuning the GC. Some of these can be set at runtime,
+ * This list is not complete, some tuning parameters are not listed here.
+ *
+ * If you change the values here, please also consider changing them in
+ * modules/libpref/init/all.js where they are duplicated for the Firefox
+ * preferences.
+ */
+namespace TuningDefaults {
+
+/* JSGC_ALLOCATION_THRESHOLD */
+static const size_t GCZoneAllocThresholdBase = 27 * 1024 * 1024;
+
+/*
+ * JSGC_MIN_NURSERY_BYTES
+ *
+ * With some testing (Bug 1532838) we increased this to 256K from 192K
+ * which improves performance. We should try to reduce this for background
+ * tabs.
+ */
+static const size_t GCMinNurseryBytes = 256 * 1024;
+
+/*
+ * JSGC_SMALL_HEAP_INCREMENTAL_LIMIT
+ *
+ * This must be greater than 1.3 to maintain performance on splay-latency.
+ */
+static const double SmallHeapIncrementalLimit = 1.40;
+
+/* JSGC_LARGE_HEAP_INCREMENTAL_LIMIT */
+static const double LargeHeapIncrementalLimit = 1.10;
+
+/* JSGC_ZONE_ALLOC_DELAY_KB */
+static const size_t ZoneAllocDelayBytes = 1024 * 1024;
+
+/* JSGC_HIGH_FREQUENCY_TIME_LIMIT */
+static const auto HighFrequencyThreshold = 1; // in seconds
+
+/* JSGC_SMALL_HEAP_SIZE_MAX */
+static const size_t SmallHeapSizeMaxBytes = 100 * 1024 * 1024;
+
+/* JSGC_LARGE_HEAP_SIZE_MIN */
+static const size_t LargeHeapSizeMinBytes = 500 * 1024 * 1024;
+
+/* JSGC_HIGH_FREQUENCY_SMALL_HEAP_GROWTH */
+static const double HighFrequencySmallHeapGrowth = 3.0;
+
+/* JSGC_HIGH_FREQUENCY_LARGE_HEAP_GROWTH */
+static const double HighFrequencyLargeHeapGrowth = 1.5;
+
+/* JSGC_LOW_FREQUENCY_HEAP_GROWTH */
+static const double LowFrequencyHeapGrowth = 1.5;
+
+/* JSGC_MIN_EMPTY_CHUNK_COUNT */
+static const uint32_t MinEmptyChunkCount = 1;
+
+/* JSGC_MAX_EMPTY_CHUNK_COUNT */
+static const uint32_t MaxEmptyChunkCount = 30;
+
+/* JSGC_SLICE_TIME_BUDGET_MS */
+static const int64_t DefaultTimeBudgetMS = SliceBudget::UnlimitedTimeBudget;
+
+/* JSGC_INCREMENTAL_ENABLED */
+static const bool IncrementalGCEnabled = false;
+
+/* JSGC_PER_ZONE_GC_ENABLED */
+static const bool PerZoneGCEnabled = false;
+
+/* JSGC_COMPACTING_ENABLED */
+static const bool CompactingEnabled = true;
+
+/* JSGC_INCREMENTAL_WEAKMAP_ENABLED */
+static const bool IncrementalWeakMapMarkingEnabled = true;
+
+/* JSGC_NURSERY_FREE_THRESHOLD_FOR_IDLE_COLLECTION */
+static const uint32_t NurseryFreeThresholdForIdleCollection = ChunkSize / 4;
+
+/* JSGC_NURSERY_FREE_THRESHOLD_FOR_IDLE_COLLECTION_PERCENT */
+static const double NurseryFreeThresholdForIdleCollectionFraction = 0.25;
+
+/* JSGC_PRETENURE_THRESHOLD */
+static const double PretenureThreshold = 0.6;
+
+/* JSGC_PRETENURE_GROUP_THRESHOLD */
+static const double PretenureGroupThreshold = 3000;
+
+/* JSGC_PRETENURE_STRING_THRESHOLD */
+static const double PretenureStringThreshold = 0.55;
+
+/* JSGC_STOP_PRETENURE_STRING_THRESHOLD */
+static const double StopPretenureStringThreshold = 0.9;
+
+/* JSGC_MIN_LAST_DITCH_GC_PERIOD */
+static const auto MinLastDitchGCPeriod = 60; // in seconds
+
+/* JSGC_MALLOC_THRESHOLD_BASE */
+static const size_t MallocThresholdBase = 38 * 1024 * 1024;
+
+/* JSGC_MALLOC_GROWTH_FACTOR */
+static const double MallocGrowthFactor = 1.5;
+
+/* JSGC_HELPER_THREAD_RATIO */
+static const double HelperThreadRatio = 0.5;
+
+/* JSGC_MAX_HELPER_THREADS */
+static const size_t MaxHelperThreads = 8;
+
+} // namespace TuningDefaults
+
+/*
+ * Encapsulates all of the GC tunables. These are effectively constant and
+ * should only be modified by setParameter.
+ */
+class GCSchedulingTunables {
+ /*
+ * JSGC_MAX_BYTES
+ *
+ * Maximum nominal heap before last ditch GC.
+ */
+ UnprotectedData<size_t> gcMaxBytes_;
+
+ /*
+ * JSGC_MIN_NURSERY_BYTES
+ * JSGC_MAX_NURSERY_BYTES
+ *
+ * Minimum and maximum nursery size for each runtime.
+ */
+ MainThreadData<size_t> gcMinNurseryBytes_;
+ MainThreadData<size_t> gcMaxNurseryBytes_;
+
+ /*
+ * JSGC_ALLOCATION_THRESHOLD
+ *
+ * The base value used to compute zone->threshold.bytes(). When
+ * gcHeapSize.bytes() exceeds threshold.bytes() for a zone, the zone may be
+ * scheduled for a GC, depending on the exact circumstances.
+ */
+ MainThreadOrGCTaskData<size_t> gcZoneAllocThresholdBase_;
+
+ /*
+ * JSGC_SMALL_HEAP_INCREMENTAL_LIMIT
+ *
+ * Multiple of threshold.bytes() which triggers a non-incremental GC.
+ */
+ UnprotectedData<double> smallHeapIncrementalLimit_;
+
+ /*
+ * JSGC_LARGE_HEAP_INCREMENTAL_LIMIT
+ *
+ * Multiple of threshold.bytes() which triggers a non-incremental GC.
+ */
+ UnprotectedData<double> largeHeapIncrementalLimit_;
+
+ /*
+ * Number of bytes to allocate between incremental slices in GCs triggered by
+ * the zone allocation threshold.
+ *
+ * This value does not have a JSGCParamKey parameter yet.
+ */
+ UnprotectedData<size_t> zoneAllocDelayBytes_;
+
+ /*
+ * JSGC_HIGH_FREQUENCY_TIME_LIMIT
+ *
+ * We enter high-frequency mode if we GC a twice within this many
+ * microseconds.
+ */
+ MainThreadOrGCTaskData<mozilla::TimeDuration> highFrequencyThreshold_;
+
+ /*
+ * JSGC_SMALL_HEAP_SIZE_MAX
+ * JSGC_LARGE_HEAP_SIZE_MIN
+ * JSGC_HIGH_FREQUENCY_SMALL_HEAP_GROWTH
+ * JSGC_HIGH_FREQUENCY_LARGE_HEAP_GROWTH
+ *
+ * When in the |highFrequencyGC| mode, these parameterize the per-zone
+ * "HeapGrowthFactor" computation.
+ */
+ MainThreadOrGCTaskData<size_t> smallHeapSizeMaxBytes_;
+ MainThreadOrGCTaskData<size_t> largeHeapSizeMinBytes_;
+ MainThreadOrGCTaskData<double> highFrequencySmallHeapGrowth_;
+ MainThreadOrGCTaskData<double> highFrequencyLargeHeapGrowth_;
+
+ /*
+ * JSGC_LOW_FREQUENCY_HEAP_GROWTH
+ *
+ * When not in |highFrequencyGC| mode, this is the global (stored per-zone)
+ * "HeapGrowthFactor".
+ */
+ MainThreadOrGCTaskData<double> lowFrequencyHeapGrowth_;
+
+ /*
+ * JSGC_MIN_EMPTY_CHUNK_COUNT
+ * JSGC_MAX_EMPTY_CHUNK_COUNT
+ *
+ * Controls the number of empty chunks reserved for future allocation.
+ */
+ UnprotectedData<uint32_t> minEmptyChunkCount_;
+ UnprotectedData<uint32_t> maxEmptyChunkCount_;
+
+ /*
+ * JSGC_NURSERY_FREE_THRESHOLD_FOR_IDLE_COLLECTION
+ * JSGC_NURSERY_FREE_THRESHOLD_FOR_IDLE_COLLECTION_FRACTION
+ *
+ * Attempt to run a minor GC in the idle time if the free space falls
+ * below this threshold. The absolute threshold is used when the nursery is
+ * large and the percentage when it is small. See Nursery::shouldCollect()
+ */
+ UnprotectedData<uint32_t> nurseryFreeThresholdForIdleCollection_;
+ UnprotectedData<double> nurseryFreeThresholdForIdleCollectionFraction_;
+
+ /*
+ * JSGC_PRETENURE_THRESHOLD
+ *
+ * Fraction of objects tenured to trigger pretenuring (between 0 and 1). If
+ * this fraction is met, the GC proceeds to calculate which objects will be
+ * tenured. If this is 1.0f (actually if it is not < 1.0f) then pretenuring
+ * is disabled.
+ */
+ UnprotectedData<double> pretenureThreshold_;
+
+ /*
+ * JSGC_PRETENURE_GROUP_THRESHOLD
+ *
+ * During a single nursery collection, if this many objects from the same
+ * object group are tenured, then that group will be pretenured.
+ */
+ UnprotectedData<uint32_t> pretenureGroupThreshold_;
+
+ /*
+ * JSGC_PRETENURE_STRING_THRESHOLD
+ *
+ * If the percentage of the tenured strings exceeds this threshold, string
+ * will be allocated in tenured heap instead. (Default is allocated in
+ * nursery.)
+ */
+ MainThreadData<double> pretenureStringThreshold_;
+
+ /*
+ * JSGC_STOP_PRETENURE_STRING_THRESHOLD
+ *
+ * If the finalization rate of the tenured strings exceeds this threshold,
+ * string will be allocated in nursery.
+ */
+ MainThreadData<double> stopPretenureStringThreshold_;
+
+ /*
+ * JSGC_MIN_LAST_DITCH_GC_PERIOD
+ *
+ * Last ditch GC is skipped if allocation failure occurs less than this many
+ * seconds from the previous one.
+ */
+ MainThreadData<mozilla::TimeDuration> minLastDitchGCPeriod_;
+
+ /*
+ * JSGC_MALLOC_THRESHOLD_BASE
+ *
+ * The base value used to compute the GC trigger for malloc allocated memory.
+ */
+ MainThreadOrGCTaskData<size_t> mallocThresholdBase_;
+
+ /*
+ * JSGC_MALLOC_GROWTH_FACTOR
+ *
+ * Malloc memory growth factor.
+ */
+ MainThreadOrGCTaskData<double> mallocGrowthFactor_;
+
+ public:
+ GCSchedulingTunables();
+
+ size_t gcMaxBytes() const { return gcMaxBytes_; }
+ size_t gcMinNurseryBytes() const { return gcMinNurseryBytes_; }
+ size_t gcMaxNurseryBytes() const { return gcMaxNurseryBytes_; }
+ size_t gcZoneAllocThresholdBase() const { return gcZoneAllocThresholdBase_; }
+ double smallHeapIncrementalLimit() const {
+ return smallHeapIncrementalLimit_;
+ }
+ double largeHeapIncrementalLimit() const {
+ return largeHeapIncrementalLimit_;
+ }
+ size_t zoneAllocDelayBytes() const { return zoneAllocDelayBytes_; }
+ const mozilla::TimeDuration& highFrequencyThreshold() const {
+ return highFrequencyThreshold_;
+ }
+ size_t smallHeapSizeMaxBytes() const { return smallHeapSizeMaxBytes_; }
+ size_t largeHeapSizeMinBytes() const { return largeHeapSizeMinBytes_; }
+ double highFrequencySmallHeapGrowth() const {
+ return highFrequencySmallHeapGrowth_;
+ }
+ double highFrequencyLargeHeapGrowth() const {
+ return highFrequencyLargeHeapGrowth_;
+ }
+ double lowFrequencyHeapGrowth() const { return lowFrequencyHeapGrowth_; }
+ unsigned minEmptyChunkCount(const AutoLockGC&) const {
+ return minEmptyChunkCount_;
+ }
+ unsigned maxEmptyChunkCount() const { return maxEmptyChunkCount_; }
+ uint32_t nurseryFreeThresholdForIdleCollection() const {
+ return nurseryFreeThresholdForIdleCollection_;
+ }
+ double nurseryFreeThresholdForIdleCollectionFraction() const {
+ return nurseryFreeThresholdForIdleCollectionFraction_;
+ }
+
+ bool attemptPretenuring() const { return pretenureThreshold_ < 1.0; }
+ double pretenureThreshold() const { return pretenureThreshold_; }
+ uint32_t pretenureGroupThreshold() const { return pretenureGroupThreshold_; }
+ double pretenureStringThreshold() const { return pretenureStringThreshold_; }
+ double stopPretenureStringThreshold() const {
+ return stopPretenureStringThreshold_;
+ }
+
+ mozilla::TimeDuration minLastDitchGCPeriod() const {
+ return minLastDitchGCPeriod_;
+ }
+
+ size_t mallocThresholdBase() const { return mallocThresholdBase_; }
+ double mallocGrowthFactor() const { return mallocGrowthFactor_; }
+
+ MOZ_MUST_USE bool setParameter(JSGCParamKey key, uint32_t value,
+ const AutoLockGC& lock);
+ void resetParameter(JSGCParamKey key, const AutoLockGC& lock);
+
+ private:
+ void setSmallHeapSizeMaxBytes(size_t value);
+ void setLargeHeapSizeMinBytes(size_t value);
+ void setHighFrequencySmallHeapGrowth(double value);
+ void setHighFrequencyLargeHeapGrowth(double value);
+ void setLowFrequencyHeapGrowth(double value);
+ void setMinEmptyChunkCount(uint32_t value);
+ void setMaxEmptyChunkCount(uint32_t value);
+};
+
+class GCSchedulingState {
+ /*
+ * Influences how we schedule and run GC's in several subtle ways. The most
+ * important factor is in how it controls the "HeapGrowthFactor". The
+ * growth factor is a measure of how large (as a percentage of the last GC)
+ * the heap is allowed to grow before we try to schedule another GC.
+ */
+ MainThreadOrGCTaskData<bool> inHighFrequencyGCMode_;
+
+ public:
+ /*
+ * Influences the GC thresholds for the atoms zone to discourage collection of
+ * this zone during page load.
+ */
+ MainThreadOrGCTaskData<bool> inPageLoad;
+
+ GCSchedulingState() : inHighFrequencyGCMode_(false) {}
+
+ bool inHighFrequencyGCMode() const { return inHighFrequencyGCMode_; }
+
+ void updateHighFrequencyMode(const mozilla::TimeStamp& lastGCTime,
+ const mozilla::TimeStamp& currentTime,
+ const GCSchedulingTunables& tunables) {
+ if (js::SupportDifferentialTesting()) {
+ return;
+ }
+
+ inHighFrequencyGCMode_ =
+ !lastGCTime.IsNull() &&
+ lastGCTime + tunables.highFrequencyThreshold() > currentTime;
+ }
+};
+
+struct TriggerResult {
+ bool shouldTrigger;
+ size_t usedBytes;
+ size_t thresholdBytes;
+};
+
+using AtomicByteCount = mozilla::Atomic<size_t, mozilla::ReleaseAcquire>;
+
+/*
+ * Tracks the size of allocated data. This is used for both GC and malloc data.
+ * It automatically maintains the memory usage relationship between parent and
+ * child instances, i.e. between those in a GCRuntime and its Zones.
+ */
+class HeapSize {
+ /*
+ * An instance that contains our parent's heap usage, or null if this is the
+ * top-level usage container.
+ */
+ HeapSize* const parent_;
+
+ /*
+ * The number of bytes in use. For GC heaps this is approximate to the nearest
+ * ArenaSize. It is atomic because it is updated by both the active and GC
+ * helper threads.
+ */
+ AtomicByteCount bytes_;
+
+ /*
+ * The number of bytes retained after the last collection. This is updated
+ * dynamically during incremental GC. It does not include allocations that
+ * happen during a GC.
+ */
+ AtomicByteCount retainedBytes_;
+
+ public:
+ explicit HeapSize(HeapSize* parent) : parent_(parent), bytes_(0) {}
+
+ size_t bytes() const { return bytes_; }
+ size_t retainedBytes() const { return retainedBytes_; }
+
+ void updateOnGCStart() { retainedBytes_ = size_t(bytes_); }
+
+ void addGCArena() { addBytes(ArenaSize); }
+ void removeGCArena() {
+ MOZ_ASSERT(retainedBytes_ >= ArenaSize);
+ removeBytes(ArenaSize, true /* only sweeping removes arenas */);
+ }
+
+ void addBytes(size_t nbytes) {
+ mozilla::DebugOnly<size_t> initialBytes(bytes_);
+ MOZ_ASSERT(initialBytes + nbytes > initialBytes);
+ bytes_ += nbytes;
+ if (parent_) {
+ parent_->addBytes(nbytes);
+ }
+ }
+ void removeBytes(size_t nbytes, bool wasSwept) {
+ if (wasSwept) {
+ // TODO: We would like to assert that retainedBytes_ >= nbytes is here but
+ // we can't do that yet, so clamp the result to zero.
+ retainedBytes_ = nbytes <= retainedBytes_ ? retainedBytes_ - nbytes : 0;
+ }
+ MOZ_ASSERT(bytes_ >= nbytes);
+ bytes_ -= nbytes;
+ if (parent_) {
+ parent_->removeBytes(nbytes, wasSwept);
+ }
+ }
+
+ /* Pair to adoptArenas. Adopts the attendant usage statistics. */
+ void adopt(HeapSize& source) {
+ // Skip retainedBytes_: we never adopt zones that are currently being
+ // collected.
+ bytes_ += source.bytes_;
+ source.retainedBytes_ = 0;
+ source.bytes_ = 0;
+ }
+};
+
+// Heap size thresholds used to trigger GC. This is an abstract base class for
+// GC heap and malloc thresholds defined below.
+class HeapThreshold {
+ protected:
+ HeapThreshold()
+ : startBytes_(SIZE_MAX),
+ incrementalLimitBytes_(SIZE_MAX),
+ sliceBytes_(SIZE_MAX) {}
+
+ // The threshold at which to start a new incremental collection.
+ //
+ // TODO: This is currently read off-thread during parsing, but at some point
+ // we should be able to make this MainThreadData<>.
+ AtomicByteCount startBytes_;
+
+ // The threshold at which start a new non-incremental collection or finish an
+ // ongoing collection non-incrementally.
+ size_t incrementalLimitBytes_;
+
+ // The threshold at which to trigger a slice during an ongoing incremental
+ // collection.
+ size_t sliceBytes_;
+
+ public:
+ size_t startBytes() const { return startBytes_; }
+ size_t sliceBytes() const { return sliceBytes_; }
+ size_t incrementalLimitBytes() const { return incrementalLimitBytes_; }
+ double eagerAllocTrigger(bool highFrequencyGC) const;
+
+ void setSliceThreshold(ZoneAllocator* zone, const HeapSize& heapSize,
+ const GCSchedulingTunables& tunables);
+ void clearSliceThreshold() { sliceBytes_ = SIZE_MAX; }
+ bool hasSliceThreshold() const { return sliceBytes_ != SIZE_MAX; }
+
+ protected:
+ void setIncrementalLimitFromStartBytes(size_t retainedBytes,
+ const GCSchedulingTunables& tunables);
+};
+
+// A heap threshold that is based on a multiple of the retained size after the
+// last collection adjusted based on collection frequency and retained
+// size. This is used to determine when to do a zone GC based on GC heap size.
+class GCHeapThreshold : public HeapThreshold {
+ public:
+ void updateStartThreshold(size_t lastBytes, JSGCInvocationKind gckind,
+ const GCSchedulingTunables& tunables,
+ const GCSchedulingState& state, bool isAtomsZone,
+ const AutoLockGC& lock);
+
+ private:
+ static double computeZoneHeapGrowthFactorForHeapSize(
+ size_t lastBytes, const GCSchedulingTunables& tunables,
+ const GCSchedulingState& state);
+ static size_t computeZoneTriggerBytes(double growthFactor, size_t lastBytes,
+ JSGCInvocationKind gckind,
+ const GCSchedulingTunables& tunables,
+ const AutoLockGC& lock);
+};
+
+// A heap threshold that is calculated as a constant multiple of the retained
+// size after the last collection. This is used to determines when to do a zone
+// GC based on malloc data.
+class MallocHeapThreshold : public HeapThreshold {
+ public:
+ void updateStartThreshold(size_t lastBytes,
+ const GCSchedulingTunables& tunables,
+ const AutoLockGC& lock);
+
+ private:
+ static size_t computeZoneTriggerBytes(double growthFactor, size_t lastBytes,
+ size_t baseBytes,
+ const AutoLockGC& lock);
+};
+
+// A fixed threshold that's used to determine when we need to do a zone GC based
+// on allocated JIT code.
+class JitHeapThreshold : public HeapThreshold {
+ public:
+ explicit JitHeapThreshold(size_t bytes) { startBytes_ = bytes; }
+};
+
+struct SharedMemoryUse {
+ explicit SharedMemoryUse(MemoryUse use) : count(0), nbytes(0) {
+#ifdef DEBUG
+ this->use = use;
+#endif
+ }
+
+ size_t count;
+ size_t nbytes;
+#ifdef DEBUG
+ MemoryUse use;
+#endif
+};
+
+// A map which tracks shared memory uses (shared in the sense that an allocation
+// can be referenced by more than one GC thing in a zone). This allows us to
+// only account for the memory once.
+using SharedMemoryMap =
+ HashMap<void*, SharedMemoryUse, DefaultHasher<void*>, SystemAllocPolicy>;
+
+#ifdef DEBUG
+
+// Counts memory associated with GC things in a zone.
+//
+// This records details of the cell (or non-cell pointer) the memory allocation
+// is associated with to check the correctness of the information provided. This
+// is not present in opt builds.
+class MemoryTracker {
+ public:
+ MemoryTracker();
+ void fixupAfterMovingGC();
+ void checkEmptyOnDestroy();
+
+ void adopt(MemoryTracker& other);
+
+ // Track memory by associated GC thing pointer.
+ void trackGCMemory(Cell* cell, size_t nbytes, MemoryUse use);
+ void untrackGCMemory(Cell* cell, size_t nbytes, MemoryUse use);
+ void swapGCMemory(Cell* a, Cell* b, MemoryUse use);
+
+ // Track memory by associated non-GC thing pointer.
+ void registerNonGCMemory(void* ptr, MemoryUse use);
+ void unregisterNonGCMemory(void* ptr, MemoryUse use);
+ void moveNonGCMemory(void* dst, void* src, MemoryUse use);
+ void incNonGCMemory(void* ptr, size_t nbytes, MemoryUse use);
+ void decNonGCMemory(void* ptr, size_t nbytes, MemoryUse use);
+
+ private:
+ template <typename Ptr>
+ struct Key {
+ Key(Ptr* ptr, MemoryUse use);
+ Ptr* ptr() const;
+ MemoryUse use() const;
+
+ private:
+# ifdef JS_64BIT
+ // Pack this into a single word on 64 bit platforms.
+ uintptr_t ptr_ : 56;
+ uintptr_t use_ : 8;
+# else
+ uintptr_t ptr_ : 32;
+ uintptr_t use_ : 8;
+# endif
+ };
+
+ template <typename Ptr>
+ struct Hasher {
+ using KeyT = Key<Ptr>;
+ using Lookup = KeyT;
+ static HashNumber hash(const Lookup& l);
+ static bool match(const KeyT& key, const Lookup& l);
+ static void rekey(KeyT& k, const KeyT& newKey);
+ };
+
+ template <typename Ptr>
+ using Map = HashMap<Key<Ptr>, size_t, Hasher<Ptr>, SystemAllocPolicy>;
+ using GCMap = Map<Cell>;
+ using NonGCMap = Map<void>;
+
+ static bool isGCMemoryUse(MemoryUse use);
+ static bool isNonGCMemoryUse(MemoryUse use);
+ static bool allowMultipleAssociations(MemoryUse use);
+
+ size_t getAndRemoveEntry(const Key<Cell>& key, LockGuard<Mutex>& lock);
+
+ Mutex mutex;
+
+ // Map containing the allocated size associated with (cell, use) pairs.
+ GCMap gcMap;
+
+ // Map containing the allocated size associated (non-cell pointer, use) pairs.
+ NonGCMap nonGCMap;
+};
+
+#endif // DEBUG
+
+static inline double LinearInterpolate(double x, double x0, double y0,
+ double x1, double y1) {
+ MOZ_ASSERT(x0 < x1);
+
+ if (x < x0) {
+ return y0;
+ }
+
+ if (x < x1) {
+ return y0 + (y1 - y0) * ((x - x0) / (x1 - x0));
+ }
+
+ return y1;
+}
+
+} // namespace gc
+} // namespace js
+
+#endif // gc_Scheduling_h
diff --git a/js/src/gc/Statistics.cpp b/js/src/gc/Statistics.cpp
new file mode 100644
index 0000000000..50cc055d18
--- /dev/null
+++ b/js/src/gc/Statistics.cpp
@@ -0,0 +1,1633 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "gc/Statistics.h"
+
+#include "mozilla/DebugOnly.h"
+#include "mozilla/Sprintf.h"
+#include "mozilla/TimeStamp.h"
+
+#include <algorithm>
+#include <stdarg.h>
+#include <stdio.h>
+#include <type_traits>
+
+#include "debugger/DebugAPI.h"
+#include "gc/GC.h"
+#include "gc/Memory.h"
+#include "js/friend/UsageStatistics.h" // JS_TELEMETRY_*
+#include "util/Text.h"
+#include "vm/HelperThreads.h"
+#include "vm/Runtime.h"
+#include "vm/Time.h"
+
+#include "gc/PrivateIterators-inl.h"
+
+using namespace js;
+using namespace js::gc;
+using namespace js::gcstats;
+
+using mozilla::DebugOnly;
+using mozilla::EnumeratedArray;
+using mozilla::Maybe;
+using mozilla::TimeDuration;
+using mozilla::TimeStamp;
+
+static const size_t BYTES_PER_MB = 1024 * 1024;
+
+/*
+ * If this fails, then you can either delete this assertion and allow all
+ * larger-numbered reasons to pile up in the last telemetry bucket, or switch
+ * to GC_REASON_3 and bump the max value.
+ */
+static_assert(JS::GCReason::NUM_TELEMETRY_REASONS >= JS::GCReason::NUM_REASONS);
+
+static inline auto AllPhaseKinds() {
+ return mozilla::MakeEnumeratedRange(PhaseKind::FIRST, PhaseKind::LIMIT);
+}
+
+static inline auto MajorGCPhaseKinds() {
+ return mozilla::MakeEnumeratedRange(PhaseKind::GC_BEGIN,
+ PhaseKind(size_t(PhaseKind::GC_END) + 1));
+}
+
+const char* js::gcstats::ExplainInvocationKind(JSGCInvocationKind gckind) {
+ MOZ_ASSERT(gckind == GC_NORMAL || gckind == GC_SHRINK);
+ if (gckind == GC_NORMAL) {
+ return "Normal";
+ } else {
+ return "Shrinking";
+ }
+}
+
+JS_PUBLIC_API const char* JS::ExplainGCReason(JS::GCReason reason) {
+ switch (reason) {
+#define SWITCH_REASON(name, _) \
+ case JS::GCReason::name: \
+ return #name;
+ GCREASONS(SWITCH_REASON)
+#undef SWITCH_REASON
+
+ case JS::GCReason::NO_REASON:
+ return "NO_REASON";
+
+ default:
+ MOZ_CRASH("bad GC reason");
+ }
+}
+
+JS_PUBLIC_API bool JS::InternalGCReason(JS::GCReason reason) {
+ return reason < JS::GCReason::FIRST_FIREFOX_REASON;
+}
+
+const char* js::gcstats::ExplainAbortReason(GCAbortReason reason) {
+ switch (reason) {
+#define SWITCH_REASON(name, _) \
+ case GCAbortReason::name: \
+ return #name;
+ GC_ABORT_REASONS(SWITCH_REASON)
+
+ default:
+ MOZ_CRASH("bad GC abort reason");
+#undef SWITCH_REASON
+ }
+}
+
+static FILE* MaybeOpenFileFromEnv(const char* env) {
+ FILE* file;
+ const char* value = getenv(env);
+
+ if (!value) {
+ return nullptr;
+ }
+
+ if (strcmp(value, "none") == 0) {
+ file = nullptr;
+ } else if (strcmp(value, "stdout") == 0) {
+ file = stdout;
+ } else if (strcmp(value, "stderr") == 0) {
+ file = stderr;
+ } else {
+ char path[300];
+ if (value[0] != '/') {
+ const char* dir = getenv("MOZ_UPLOAD_DIR");
+ if (dir) {
+ SprintfLiteral(path, "%s/%s", dir, value);
+ value = path;
+ }
+ }
+
+ file = fopen(value, "a");
+ if (!file) {
+ perror("opening log file");
+ MOZ_CRASH("Failed to open log file.");
+ }
+ }
+
+ return file;
+}
+
+struct PhaseKindInfo {
+ Phase firstPhase;
+ uint8_t telemetryBucket;
+};
+
+// PhaseInfo objects form a tree.
+struct PhaseInfo {
+ Phase parent;
+ Phase firstChild;
+ Phase nextSibling;
+ Phase nextWithPhaseKind;
+ PhaseKind phaseKind;
+ uint8_t depth;
+ const char* name;
+ const char* path;
+};
+
+// A table of PhaseInfo indexed by Phase.
+using PhaseTable = EnumeratedArray<Phase, Phase::LIMIT, PhaseInfo>;
+
+// A table of PhaseKindInfo indexed by PhaseKind.
+using PhaseKindTable =
+ EnumeratedArray<PhaseKind, PhaseKind::LIMIT, PhaseKindInfo>;
+
+#include "gc/StatsPhasesGenerated.inc"
+
+// Iterate the phases in a phase kind.
+class PhaseIter {
+ Phase phase;
+
+ public:
+ explicit PhaseIter(PhaseKind kind) : phase(phaseKinds[kind].firstPhase) {}
+ bool done() const { return phase == Phase::NONE; }
+ void next() { phase = phases[phase].nextWithPhaseKind; }
+ Phase get() const { return phase; }
+ operator Phase() const { return phase; }
+};
+
+static double t(TimeDuration duration) { return duration.ToMilliseconds(); }
+
+inline JSContext* Statistics::context() {
+ return gc->rt->mainContextFromOwnThread();
+}
+
+inline Phase Statistics::currentPhase() const {
+ return phaseStack.empty() ? Phase::NONE : phaseStack.back();
+}
+
+PhaseKind Statistics::currentPhaseKind() const {
+ // Public API to get the current phase kind, suppressing the synthetic
+ // PhaseKind::MUTATOR phase.
+
+ Phase phase = currentPhase();
+ MOZ_ASSERT_IF(phase == Phase::MUTATOR, phaseStack.length() == 1);
+ if (phase == Phase::NONE || phase == Phase::MUTATOR) {
+ return PhaseKind::NONE;
+ }
+
+ return phases[phase].phaseKind;
+}
+
+static Phase LookupPhaseWithParent(PhaseKind phaseKind, Phase parentPhase) {
+ for (PhaseIter phase(phaseKind); !phase.done(); phase.next()) {
+ if (phases[phase].parent == parentPhase) {
+ return phase;
+ }
+ }
+
+ return Phase::NONE;
+}
+
+Phase Statistics::lookupChildPhase(PhaseKind phaseKind) const {
+ if (phaseKind == PhaseKind::IMPLICIT_SUSPENSION) {
+ return Phase::IMPLICIT_SUSPENSION;
+ }
+ if (phaseKind == PhaseKind::EXPLICIT_SUSPENSION) {
+ return Phase::EXPLICIT_SUSPENSION;
+ }
+
+ MOZ_ASSERT(phaseKind < PhaseKind::LIMIT);
+
+ // Search all expanded phases that correspond to the required
+ // phase to find the one whose parent is the current expanded phase.
+ Phase phase = LookupPhaseWithParent(phaseKind, currentPhase());
+
+ if (phase == Phase::NONE) {
+ MOZ_CRASH_UNSAFE_PRINTF(
+ "Child phase kind %u not found under current phase kind %u",
+ unsigned(phaseKind), unsigned(currentPhaseKind()));
+ }
+
+ return phase;
+}
+
+inline auto AllPhases() {
+ return mozilla::MakeEnumeratedRange(Phase::FIRST, Phase::LIMIT);
+}
+
+void Statistics::gcDuration(TimeDuration* total, TimeDuration* maxPause) const {
+ *total = *maxPause = 0;
+ for (auto& slice : slices_) {
+ *total += slice.duration();
+ if (slice.duration() > *maxPause) {
+ *maxPause = slice.duration();
+ }
+ }
+ if (*maxPause > maxPauseInInterval) {
+ maxPauseInInterval = *maxPause;
+ }
+}
+
+void Statistics::sccDurations(TimeDuration* total,
+ TimeDuration* maxPause) const {
+ *total = *maxPause = 0;
+ for (size_t i = 0; i < sccTimes.length(); i++) {
+ *total += sccTimes[i];
+ *maxPause = std::max(*maxPause, sccTimes[i]);
+ }
+}
+
+typedef Vector<UniqueChars, 8, SystemAllocPolicy> FragmentVector;
+
+static UniqueChars Join(const FragmentVector& fragments,
+ const char* separator = "") {
+ const size_t separatorLength = strlen(separator);
+ size_t length = 0;
+ for (size_t i = 0; i < fragments.length(); ++i) {
+ length += fragments[i] ? strlen(fragments[i].get()) : 0;
+ if (i < (fragments.length() - 1)) {
+ length += separatorLength;
+ }
+ }
+
+ char* joined = js_pod_malloc<char>(length + 1);
+ if (!joined) {
+ return UniqueChars();
+ }
+
+ joined[length] = '\0';
+ char* cursor = joined;
+ for (size_t i = 0; i < fragments.length(); ++i) {
+ if (fragments[i]) {
+ strcpy(cursor, fragments[i].get());
+ }
+ cursor += fragments[i] ? strlen(fragments[i].get()) : 0;
+ if (i < (fragments.length() - 1)) {
+ if (separatorLength) {
+ strcpy(cursor, separator);
+ }
+ cursor += separatorLength;
+ }
+ }
+
+ return UniqueChars(joined);
+}
+
+static TimeDuration SumChildTimes(
+ Phase phase, const Statistics::PhaseTimeTable& phaseTimes) {
+ TimeDuration total = 0;
+ for (phase = phases[phase].firstChild; phase != Phase::NONE;
+ phase = phases[phase].nextSibling) {
+ total += phaseTimes[phase];
+ }
+ return total;
+}
+
+UniqueChars Statistics::formatCompactSliceMessage() const {
+ // Skip if we OOM'ed.
+ if (slices_.length() == 0) {
+ return UniqueChars(nullptr);
+ }
+
+ const size_t index = slices_.length() - 1;
+ const SliceData& slice = slices_.back();
+
+ char budgetDescription[200];
+ slice.budget.describe(budgetDescription, sizeof(budgetDescription) - 1);
+
+ const char* format =
+ "GC Slice %u - Pause: %.3fms of %s budget (@ %.3fms); Reason: %s; Reset: "
+ "%s%s; Times: ";
+ char buffer[1024];
+ SprintfLiteral(buffer, format, index, t(slice.duration()), budgetDescription,
+ t(slice.start - slices_[0].start),
+ ExplainGCReason(slice.reason),
+ slice.wasReset() ? "yes - " : "no",
+ slice.wasReset() ? ExplainAbortReason(slice.resetReason) : "");
+
+ FragmentVector fragments;
+ if (!fragments.append(DuplicateString(buffer)) ||
+ !fragments.append(
+ formatCompactSlicePhaseTimes(slices_[index].phaseTimes))) {
+ return UniqueChars(nullptr);
+ }
+ return Join(fragments);
+}
+
+UniqueChars Statistics::formatCompactSummaryMessage() const {
+ FragmentVector fragments;
+ if (!fragments.append(DuplicateString("Summary - "))) {
+ return UniqueChars(nullptr);
+ }
+
+ TimeDuration total, longest;
+ gcDuration(&total, &longest);
+
+ const double mmu20 = computeMMU(TimeDuration::FromMilliseconds(20));
+ const double mmu50 = computeMMU(TimeDuration::FromMilliseconds(50));
+
+ char buffer[1024];
+ if (!nonincremental()) {
+ SprintfLiteral(buffer,
+ "Max Pause: %.3fms; MMU 20ms: %.1f%%; MMU 50ms: %.1f%%; "
+ "Total: %.3fms; ",
+ t(longest), mmu20 * 100., mmu50 * 100., t(total));
+ } else {
+ SprintfLiteral(buffer, "Non-Incremental: %.3fms (%s); ", t(total),
+ ExplainAbortReason(nonincrementalReason_));
+ }
+ if (!fragments.append(DuplicateString(buffer))) {
+ return UniqueChars(nullptr);
+ }
+
+ SprintfLiteral(buffer,
+ "Zones: %d of %d (-%d); Compartments: %d of %d (-%d); "
+ "HeapSize: %.3f MiB; "
+ "HeapChange (abs): %+d (%u); ",
+ zoneStats.collectedZoneCount, zoneStats.zoneCount,
+ zoneStats.sweptZoneCount, zoneStats.collectedCompartmentCount,
+ zoneStats.compartmentCount, zoneStats.sweptCompartmentCount,
+ double(preTotalHeapBytes) / BYTES_PER_MB,
+ int32_t(counts[COUNT_NEW_CHUNK] - counts[COUNT_DESTROY_CHUNK]),
+ counts[COUNT_NEW_CHUNK] + counts[COUNT_DESTROY_CHUNK]);
+ if (!fragments.append(DuplicateString(buffer))) {
+ return UniqueChars(nullptr);
+ }
+
+ MOZ_ASSERT_IF(counts[COUNT_ARENA_RELOCATED], gckind == GC_SHRINK);
+ if (gckind == GC_SHRINK) {
+ SprintfLiteral(
+ buffer, "Kind: %s; Relocated: %.3f MiB; ",
+ ExplainInvocationKind(gckind),
+ double(ArenaSize * counts[COUNT_ARENA_RELOCATED]) / BYTES_PER_MB);
+ if (!fragments.append(DuplicateString(buffer))) {
+ return UniqueChars(nullptr);
+ }
+ }
+
+ return Join(fragments);
+}
+
+UniqueChars Statistics::formatCompactSlicePhaseTimes(
+ const PhaseTimeTable& phaseTimes) const {
+ static const TimeDuration MaxUnaccountedTime =
+ TimeDuration::FromMicroseconds(100);
+
+ FragmentVector fragments;
+ char buffer[128];
+ for (auto phase : AllPhases()) {
+ DebugOnly<uint8_t> level = phases[phase].depth;
+ MOZ_ASSERT(level < 4);
+
+ TimeDuration ownTime = phaseTimes[phase];
+ TimeDuration childTime = SumChildTimes(phase, phaseTimes);
+ if (ownTime > MaxUnaccountedTime) {
+ SprintfLiteral(buffer, "%s: %.3fms", phases[phase].name, t(ownTime));
+ if (!fragments.append(DuplicateString(buffer))) {
+ return UniqueChars(nullptr);
+ }
+
+ if (childTime && (ownTime - childTime) > MaxUnaccountedTime) {
+ MOZ_ASSERT(level < 3);
+ SprintfLiteral(buffer, "%s: %.3fms", "Other", t(ownTime - childTime));
+ if (!fragments.append(DuplicateString(buffer))) {
+ return UniqueChars(nullptr);
+ }
+ }
+ }
+ }
+ return Join(fragments, ", ");
+}
+
+UniqueChars Statistics::formatDetailedMessage() const {
+ FragmentVector fragments;
+
+ if (!fragments.append(formatDetailedDescription())) {
+ return UniqueChars(nullptr);
+ }
+
+ if (!slices_.empty()) {
+ for (unsigned i = 0; i < slices_.length(); i++) {
+ if (!fragments.append(formatDetailedSliceDescription(i, slices_[i]))) {
+ return UniqueChars(nullptr);
+ }
+ if (!fragments.append(formatDetailedPhaseTimes(slices_[i].phaseTimes))) {
+ return UniqueChars(nullptr);
+ }
+ }
+ }
+ if (!fragments.append(formatDetailedTotals())) {
+ return UniqueChars(nullptr);
+ }
+ if (!fragments.append(formatDetailedPhaseTimes(phaseTimes))) {
+ return UniqueChars(nullptr);
+ }
+
+ return Join(fragments);
+}
+
+UniqueChars Statistics::formatDetailedDescription() const {
+ TimeDuration sccTotal, sccLongest;
+ sccDurations(&sccTotal, &sccLongest);
+
+ const double mmu20 = computeMMU(TimeDuration::FromMilliseconds(20));
+ const double mmu50 = computeMMU(TimeDuration::FromMilliseconds(50));
+
+ const char* format =
+ "=================================================================\n\
+ Invocation Kind: %s\n\
+ Reason: %s\n\
+ Incremental: %s%s\n\
+ Zones Collected: %d of %d (-%d)\n\
+ Compartments Collected: %d of %d (-%d)\n\
+ MinorGCs since last GC: %d\n\
+ Store Buffer Overflows: %d\n\
+ MMU 20ms:%.1f%%; 50ms:%.1f%%\n\
+ SCC Sweep Total (MaxPause): %.3fms (%.3fms)\n\
+ HeapSize: %.3f MiB\n\
+ Chunk Delta (magnitude): %+d (%d)\n\
+ Arenas Relocated: %.3f MiB\n\
+";
+
+ char buffer[1024];
+ SprintfLiteral(
+ buffer, format, ExplainInvocationKind(gckind),
+ ExplainGCReason(slices_[0].reason), nonincremental() ? "no - " : "yes",
+ nonincremental() ? ExplainAbortReason(nonincrementalReason_) : "",
+ zoneStats.collectedZoneCount, zoneStats.zoneCount,
+ zoneStats.sweptZoneCount, zoneStats.collectedCompartmentCount,
+ zoneStats.compartmentCount, zoneStats.sweptCompartmentCount,
+ getCount(COUNT_MINOR_GC), getCount(COUNT_STOREBUFFER_OVERFLOW),
+ mmu20 * 100., mmu50 * 100., t(sccTotal), t(sccLongest),
+ double(preTotalHeapBytes) / BYTES_PER_MB,
+ getCount(COUNT_NEW_CHUNK) - getCount(COUNT_DESTROY_CHUNK),
+ getCount(COUNT_NEW_CHUNK) + getCount(COUNT_DESTROY_CHUNK),
+ double(ArenaSize * getCount(COUNT_ARENA_RELOCATED)) / BYTES_PER_MB);
+
+ return DuplicateString(buffer);
+}
+
+UniqueChars Statistics::formatDetailedSliceDescription(
+ unsigned i, const SliceData& slice) const {
+ char budgetDescription[200];
+ slice.budget.describe(budgetDescription, sizeof(budgetDescription) - 1);
+
+ const char* format =
+ "\
+ ---- Slice %u ----\n\
+ Reason: %s\n\
+ Trigger: %s\n\
+ Reset: %s%s\n\
+ State: %s -> %s\n\
+ Page Faults: %" PRIu64
+ "\n\
+ Pause: %.3fms of %s budget (@ %.3fms)\n\
+";
+
+ char triggerBuffer[100] = "n/a";
+ if (slice.trigger) {
+ Trigger trigger = slice.trigger.value();
+ SprintfLiteral(triggerBuffer, "%.3f MiB of %.3f MiB threshold\n",
+ double(trigger.amount) / BYTES_PER_MB,
+ double(trigger.threshold) / BYTES_PER_MB);
+ }
+
+ char buffer[1024];
+ SprintfLiteral(
+ buffer, format, i, ExplainGCReason(slice.reason), triggerBuffer,
+ slice.wasReset() ? "yes - " : "no",
+ slice.wasReset() ? ExplainAbortReason(slice.resetReason) : "",
+ gc::StateName(slice.initialState), gc::StateName(slice.finalState),
+ uint64_t(slice.endFaults - slice.startFaults), t(slice.duration()),
+ budgetDescription, t(slice.start - slices_[0].start));
+ return DuplicateString(buffer);
+}
+
+static bool IncludePhase(TimeDuration duration) {
+ // Don't include durations that will print as "0.000ms".
+ return duration.ToMilliseconds() >= 0.001;
+}
+
+UniqueChars Statistics::formatDetailedPhaseTimes(
+ const PhaseTimeTable& phaseTimes) const {
+ static const TimeDuration MaxUnaccountedChildTime =
+ TimeDuration::FromMicroseconds(50);
+
+ FragmentVector fragments;
+ char buffer[128];
+ for (auto phase : AllPhases()) {
+ uint8_t level = phases[phase].depth;
+ TimeDuration ownTime = phaseTimes[phase];
+ TimeDuration childTime = SumChildTimes(phase, phaseTimes);
+ if (IncludePhase(ownTime)) {
+ SprintfLiteral(buffer, " %*s%s: %.3fms\n", level * 2, "",
+ phases[phase].name, t(ownTime));
+ if (!fragments.append(DuplicateString(buffer))) {
+ return UniqueChars(nullptr);
+ }
+
+ if (childTime && (ownTime - childTime) > MaxUnaccountedChildTime) {
+ SprintfLiteral(buffer, " %*s%s: %.3fms\n", (level + 1) * 2, "",
+ "Other", t(ownTime - childTime));
+ if (!fragments.append(DuplicateString(buffer))) {
+ return UniqueChars(nullptr);
+ }
+ }
+ }
+ }
+ return Join(fragments);
+}
+
+UniqueChars Statistics::formatDetailedTotals() const {
+ TimeDuration total, longest;
+ gcDuration(&total, &longest);
+
+ const char* format =
+ "\
+ ---- Totals ----\n\
+ Total Time: %.3fms\n\
+ Max Pause: %.3fms\n\
+";
+ char buffer[1024];
+ SprintfLiteral(buffer, format, t(total), t(longest));
+ return DuplicateString(buffer);
+}
+
+void Statistics::formatJsonSlice(size_t sliceNum, JSONPrinter& json) const {
+ /*
+ * We number each of the slice properties to keep the code in
+ * GCTelemetry.jsm in sync. See MAX_SLICE_KEYS.
+ */
+ json.beginObject();
+ formatJsonSliceDescription(sliceNum, slices_[sliceNum], json); // # 1-11
+
+ json.beginObjectProperty("times"); // # 12
+ formatJsonPhaseTimes(slices_[sliceNum].phaseTimes, json);
+ json.endObject();
+
+ json.endObject();
+}
+
+UniqueChars Statistics::renderJsonSlice(size_t sliceNum) const {
+ Sprinter printer(nullptr, false);
+ if (!printer.init()) {
+ return UniqueChars(nullptr);
+ }
+ JSONPrinter json(printer);
+
+ formatJsonSlice(sliceNum, json);
+ return printer.release();
+}
+
+UniqueChars Statistics::renderNurseryJson() const {
+ Sprinter printer(nullptr, false);
+ if (!printer.init()) {
+ return UniqueChars(nullptr);
+ }
+ JSONPrinter json(printer);
+ gc->nursery().renderProfileJSON(json);
+ return printer.release();
+}
+
+#ifdef DEBUG
+void Statistics::log(const char* fmt, ...) {
+ va_list args;
+ va_start(args, fmt);
+ if (gcDebugFile) {
+ TimeDuration sinceStart = TimeStamp::Now() - TimeStamp::ProcessCreation();
+ fprintf(gcDebugFile, "%12.3f: ", sinceStart.ToMicroseconds());
+ vfprintf(gcDebugFile, fmt, args);
+ fprintf(gcDebugFile, "\n");
+ fflush(gcDebugFile);
+ }
+ va_end(args);
+}
+#endif
+
+UniqueChars Statistics::renderJsonMessage() const {
+ /*
+ * The format of the JSON message is specified by the GCMajorMarkerPayload
+ * type in profiler.firefox.com
+ * https://github.com/firefox-devtools/profiler/blob/master/src/types/markers.js#L62
+ *
+ * All the properties listed here are created within the timings property
+ * of the GCMajor marker.
+ */
+ if (aborted) {
+ return DuplicateString("{status:\"aborted\"}"); // May return nullptr
+ }
+
+ Sprinter printer(nullptr, false);
+ if (!printer.init()) {
+ return UniqueChars(nullptr);
+ }
+ JSONPrinter json(printer);
+
+ json.beginObject();
+ json.property("status", "completed");
+ formatJsonDescription(json);
+
+ json.beginObjectProperty("totals");
+ formatJsonPhaseTimes(phaseTimes, json);
+ json.endObject();
+
+ json.endObject();
+
+ return printer.release();
+}
+
+void Statistics::formatJsonDescription(JSONPrinter& json) const {
+ // If you change JSON properties here, please update:
+ // Firefox Profiler:
+ // https://github.com/firefox-devtools/profiler
+
+ TimeDuration total, longest;
+ gcDuration(&total, &longest);
+ json.property("max_pause", longest, JSONPrinter::MILLISECONDS);
+ json.property("total_time", total, JSONPrinter::MILLISECONDS);
+ // We might be able to omit reason if profiler.firefox.com was able to retrive
+ // it from the first slice. But it doesn't do this yet.
+ json.property("reason", ExplainGCReason(slices_[0].reason));
+ json.property("zones_collected", zoneStats.collectedZoneCount);
+ json.property("total_zones", zoneStats.zoneCount);
+ json.property("total_compartments", zoneStats.compartmentCount);
+ json.property("minor_gcs", getCount(COUNT_MINOR_GC));
+ uint32_t storebufferOverflows = getCount(COUNT_STOREBUFFER_OVERFLOW);
+ if (storebufferOverflows) {
+ json.property("store_buffer_overflows", storebufferOverflows);
+ }
+ json.property("slices", slices_.length());
+
+ const double mmu20 = computeMMU(TimeDuration::FromMilliseconds(20));
+ const double mmu50 = computeMMU(TimeDuration::FromMilliseconds(50));
+ json.property("mmu_20ms", int(mmu20 * 100));
+ json.property("mmu_50ms", int(mmu50 * 100));
+
+ TimeDuration sccTotal, sccLongest;
+ sccDurations(&sccTotal, &sccLongest);
+ json.property("scc_sweep_total", sccTotal, JSONPrinter::MILLISECONDS);
+ json.property("scc_sweep_max_pause", sccLongest, JSONPrinter::MILLISECONDS);
+
+ if (nonincrementalReason_ != GCAbortReason::None) {
+ json.property("nonincremental_reason",
+ ExplainAbortReason(nonincrementalReason_));
+ }
+ json.property("allocated_bytes", preTotalHeapBytes);
+ json.property("post_heap_size", postTotalHeapBytes);
+
+ uint32_t addedChunks = getCount(COUNT_NEW_CHUNK);
+ if (addedChunks) {
+ json.property("added_chunks", addedChunks);
+ }
+ uint32_t removedChunks = getCount(COUNT_DESTROY_CHUNK);
+ if (removedChunks) {
+ json.property("removed_chunks", removedChunks);
+ }
+ json.property("major_gc_number", startingMajorGCNumber);
+ json.property("minor_gc_number", startingMinorGCNumber);
+ json.property("slice_number", startingSliceNumber);
+}
+
+void Statistics::formatJsonSliceDescription(unsigned i, const SliceData& slice,
+ JSONPrinter& json) const {
+ // If you change JSON properties here, please update:
+ // Firefox Profiler:
+ // https://github.com/firefox-devtools/profiler
+ //
+ char budgetDescription[200];
+ slice.budget.describe(budgetDescription, sizeof(budgetDescription) - 1);
+ TimeStamp originTime = TimeStamp::ProcessCreation();
+
+ json.property("slice", i);
+ json.property("pause", slice.duration(), JSONPrinter::MILLISECONDS);
+ json.property("reason", ExplainGCReason(slice.reason));
+ json.property("initial_state", gc::StateName(slice.initialState));
+ json.property("final_state", gc::StateName(slice.finalState));
+ json.property("budget", budgetDescription);
+ json.property("major_gc_number", startingMajorGCNumber);
+ if (slice.trigger) {
+ Trigger trigger = slice.trigger.value();
+ json.property("trigger_amount", trigger.amount);
+ json.property("trigger_threshold", trigger.threshold);
+ }
+ int64_t numFaults = slice.endFaults - slice.startFaults;
+ if (numFaults != 0) {
+ json.property("page_faults", numFaults);
+ }
+ json.property("start_timestamp", slice.start - originTime,
+ JSONPrinter::SECONDS);
+}
+
+void Statistics::formatJsonPhaseTimes(const PhaseTimeTable& phaseTimes,
+ JSONPrinter& json) const {
+ for (auto phase : AllPhases()) {
+ TimeDuration ownTime = phaseTimes[phase];
+ if (!ownTime.IsZero()) {
+ json.property(phases[phase].path, ownTime, JSONPrinter::MILLISECONDS);
+ }
+ }
+}
+
+Statistics::Statistics(GCRuntime* gc)
+ : gc(gc),
+ gcTimerFile(nullptr),
+ gcDebugFile(nullptr),
+ nonincrementalReason_(GCAbortReason::None),
+ creationTime_(ReallyNow()),
+ allocsSinceMinorGC({0, 0}),
+ preTotalHeapBytes(0),
+ postTotalHeapBytes(0),
+ preCollectedHeapBytes(0),
+ startingMinorGCNumber(0),
+ startingMajorGCNumber(0),
+ startingSliceNumber(0),
+ maxPauseInInterval(0),
+ sliceCallback(nullptr),
+ nurseryCollectionCallback(nullptr),
+ aborted(false),
+ enableProfiling_(false),
+ sliceCount_(0) {
+ for (auto& count : counts) {
+ count = 0;
+ }
+
+ for (auto& stat : stats) {
+ stat = 0;
+ }
+
+#ifdef DEBUG
+ for (const auto& duration : totalTimes_) {
+ using ElementType = std::remove_reference_t<decltype(duration)>;
+ static_assert(!std::is_trivially_constructible_v<ElementType>,
+ "Statistics::Statistics will only initialize "
+ "totalTimes_'s elements if their default constructor is "
+ "non-trivial");
+ MOZ_ASSERT(duration.IsZero(),
+ "totalTimes_ default-initialization should have "
+ "default-initialized every element of totalTimes_ to zero");
+ }
+#endif
+
+ MOZ_ALWAYS_TRUE(phaseStack.reserve(MAX_PHASE_NESTING));
+ MOZ_ALWAYS_TRUE(suspendedPhases.reserve(MAX_SUSPENDED_PHASES));
+
+ gcTimerFile = MaybeOpenFileFromEnv("MOZ_GCTIMER");
+ gcDebugFile = MaybeOpenFileFromEnv("JS_GC_DEBUG");
+
+ const char* env = getenv("JS_GC_PROFILE");
+ if (env) {
+ if (0 == strcmp(env, "help")) {
+ fprintf(stderr,
+ "JS_GC_PROFILE=N\n"
+ "\tReport major GC's taking more than N milliseconds.\n");
+ exit(0);
+ }
+ enableProfiling_ = true;
+ profileThreshold_ = TimeDuration::FromMilliseconds(atoi(env));
+ }
+}
+
+Statistics::~Statistics() {
+ if (gcTimerFile && gcTimerFile != stdout && gcTimerFile != stderr) {
+ fclose(gcTimerFile);
+ }
+ if (gcDebugFile && gcDebugFile != stdout && gcDebugFile != stderr) {
+ fclose(gcDebugFile);
+ }
+}
+
+/* static */
+bool Statistics::initialize() {
+#ifdef DEBUG
+ // Sanity check generated tables.
+ for (auto i : AllPhases()) {
+ auto parent = phases[i].parent;
+ if (parent != Phase::NONE) {
+ MOZ_ASSERT(phases[i].depth == phases[parent].depth + 1);
+ }
+ auto firstChild = phases[i].firstChild;
+ if (firstChild != Phase::NONE) {
+ MOZ_ASSERT(i == phases[firstChild].parent);
+ MOZ_ASSERT(phases[i].depth == phases[firstChild].depth - 1);
+ }
+ auto nextSibling = phases[i].nextSibling;
+ if (nextSibling != Phase::NONE) {
+ MOZ_ASSERT(parent == phases[nextSibling].parent);
+ MOZ_ASSERT(phases[i].depth == phases[nextSibling].depth);
+ }
+ auto nextWithPhaseKind = phases[i].nextWithPhaseKind;
+ if (nextWithPhaseKind != Phase::NONE) {
+ MOZ_ASSERT(phases[i].phaseKind == phases[nextWithPhaseKind].phaseKind);
+ MOZ_ASSERT(parent != phases[nextWithPhaseKind].parent);
+ }
+ }
+ for (auto i : AllPhaseKinds()) {
+ MOZ_ASSERT(phases[phaseKinds[i].firstPhase].phaseKind == i);
+ for (auto j : AllPhaseKinds()) {
+ MOZ_ASSERT_IF(i != j, phaseKinds[i].telemetryBucket !=
+ phaseKinds[j].telemetryBucket);
+ }
+ }
+#endif
+
+ return true;
+}
+
+JS::GCSliceCallback Statistics::setSliceCallback(
+ JS::GCSliceCallback newCallback) {
+ JS::GCSliceCallback oldCallback = sliceCallback;
+ sliceCallback = newCallback;
+ return oldCallback;
+}
+
+JS::GCNurseryCollectionCallback Statistics::setNurseryCollectionCallback(
+ JS::GCNurseryCollectionCallback newCallback) {
+ auto oldCallback = nurseryCollectionCallback;
+ nurseryCollectionCallback = newCallback;
+ return oldCallback;
+}
+
+TimeDuration Statistics::clearMaxGCPauseAccumulator() {
+ TimeDuration prior = maxPauseInInterval;
+ maxPauseInInterval = 0;
+ return prior;
+}
+
+TimeDuration Statistics::getMaxGCPauseSinceClear() {
+ return maxPauseInInterval;
+}
+
+// Sum up the time for a phase, including instances of the phase with different
+// parents.
+static TimeDuration SumPhase(PhaseKind phaseKind,
+ const Statistics::PhaseTimeTable& times) {
+ TimeDuration sum;
+ for (PhaseIter phase(phaseKind); !phase.done(); phase.next()) {
+ sum += times[phase];
+ }
+ return sum;
+}
+
+static bool CheckSelfTime(Phase parent, Phase child,
+ const Statistics::PhaseTimeTable& times,
+ const Statistics::PhaseTimeTable& selfTimes,
+ TimeDuration childTime) {
+ if (selfTimes[parent] < childTime) {
+ fprintf(
+ stderr,
+ "Parent %s time = %.3fms with %.3fms remaining, child %s time %.3fms\n",
+ phases[parent].name, times[parent].ToMilliseconds(),
+ selfTimes[parent].ToMilliseconds(), phases[child].name,
+ childTime.ToMilliseconds());
+ fflush(stderr);
+ return false;
+ }
+
+ return true;
+}
+
+using PhaseKindTimes =
+ EnumeratedArray<PhaseKind, PhaseKind::LIMIT, TimeDuration>;
+
+static PhaseKind FindLongestPhaseKind(const PhaseKindTimes& times) {
+ TimeDuration longestTime;
+ PhaseKind phaseKind = PhaseKind::NONE;
+ for (auto i : MajorGCPhaseKinds()) {
+ if (times[i] > longestTime) {
+ longestTime = times[i];
+ phaseKind = i;
+ }
+ }
+
+ return phaseKind;
+}
+
+static PhaseKind LongestPhaseSelfTimeInMajorGC(
+ const Statistics::PhaseTimeTable& times) {
+ // Start with total times per expanded phase, including children's times.
+ Statistics::PhaseTimeTable selfTimes(times);
+
+ // We have the total time spent in each phase, including descendant times.
+ // Loop over the children and subtract their times from their parent's self
+ // time.
+ for (auto i : AllPhases()) {
+ Phase parent = phases[i].parent;
+ if (parent != Phase::NONE) {
+ bool ok = CheckSelfTime(parent, i, times, selfTimes, times[i]);
+
+ // This happens very occasionally in release builds and frequently
+ // in Windows debug builds. Skip collecting longest phase telemetry
+ // if it does.
+#ifndef XP_WIN
+ MOZ_ASSERT(ok, "Inconsistent time data; see bug 1400153");
+#endif
+ if (!ok) {
+ return PhaseKind::NONE;
+ }
+
+ selfTimes[parent] -= times[i];
+ }
+ }
+
+ // Sum expanded phases corresponding to the same phase.
+ PhaseKindTimes phaseKindTimes;
+ for (auto i : AllPhaseKinds()) {
+ phaseKindTimes[i] = SumPhase(i, selfTimes);
+ }
+
+ return FindLongestPhaseKind(phaseKindTimes);
+}
+
+static TimeDuration PhaseMax(PhaseKind phaseKind,
+ const Statistics::PhaseTimeTable& times) {
+ TimeDuration max;
+ for (PhaseIter phase(phaseKind); !phase.done(); phase.next()) {
+ max = std::max(max, times[phase]);
+ }
+
+ return max;
+}
+
+static PhaseKind LongestParallelPhaseKind(
+ const Statistics::PhaseTimeTable& times) {
+ // Find longest time for each phase kind.
+ PhaseKindTimes phaseKindTimes;
+ for (auto i : AllPhaseKinds()) {
+ phaseKindTimes[i] = PhaseMax(i, times);
+ }
+
+ return FindLongestPhaseKind(phaseKindTimes);
+}
+
+void Statistics::printStats() {
+ if (aborted) {
+ fprintf(gcTimerFile,
+ "OOM during GC statistics collection. The report is unavailable "
+ "for this GC.\n");
+ } else {
+ UniqueChars msg = formatDetailedMessage();
+ if (msg) {
+ double secSinceStart =
+ (slices_[0].start - TimeStamp::ProcessCreation()).ToSeconds();
+ fprintf(gcTimerFile, "GC(T+%.3fs) %s\n", secSinceStart, msg.get());
+ }
+ }
+ fflush(gcTimerFile);
+}
+
+void Statistics::beginGC(JSGCInvocationKind kind,
+ const TimeStamp& currentTime) {
+ slices_.clearAndFree();
+ sccTimes.clearAndFree();
+ gckind = kind;
+ nonincrementalReason_ = GCAbortReason::None;
+
+ preTotalHeapBytes = gc->heapSize.bytes();
+
+ preCollectedHeapBytes = 0;
+
+ startingMajorGCNumber = gc->majorGCCount();
+ startingSliceNumber = gc->gcNumber();
+
+ if (gc->lastGCEndTime()) {
+ timeSinceLastGC = currentTime - gc->lastGCEndTime();
+ }
+}
+
+void Statistics::measureInitialHeapSize() {
+ MOZ_ASSERT(preCollectedHeapBytes == 0);
+ for (GCZonesIter zone(gc); !zone.done(); zone.next()) {
+ preCollectedHeapBytes += zone->gcHeapSize.bytes();
+ }
+}
+
+void Statistics::adoptHeapSizeDuringIncrementalGC(Zone* mergedZone) {
+ // A zone is being merged into a zone that's currently being collected so we
+ // need to adjust our record of the total size of heap for collected zones.
+ MOZ_ASSERT(gc->isIncrementalGCInProgress());
+ preCollectedHeapBytes += mergedZone->gcHeapSize.bytes();
+}
+
+void Statistics::endGC() {
+ postTotalHeapBytes = gc->heapSize.bytes();
+
+ sendGCTelemetry();
+}
+
+void Statistics::sendGCTelemetry() {
+ JSRuntime* runtime = gc->rt;
+ runtime->addTelemetry(JS_TELEMETRY_GC_IS_ZONE_GC,
+ !zoneStats.isFullCollection());
+ TimeDuration prepareTotal = SumPhase(PhaseKind::PREPARE, phaseTimes);
+ TimeDuration markTotal = SumPhase(PhaseKind::MARK, phaseTimes);
+ TimeDuration markRootsTotal = SumPhase(PhaseKind::MARK_ROOTS, phaseTimes);
+ TimeDuration markWeakTotal = phaseTimes[Phase::SWEEP_MARK_WEAK] +
+ phaseTimes[Phase::SWEEP_MARK_GRAY_WEAK];
+ TimeDuration markGrayTotal = phaseTimes[Phase::SWEEP_MARK_GRAY] +
+ phaseTimes[Phase::SWEEP_MARK_GRAY_WEAK];
+ size_t markCount = gc->marker.getMarkCount();
+ double markRate = markCount / t(markTotal);
+ runtime->addTelemetry(JS_TELEMETRY_GC_PREPARE_MS, t(prepareTotal));
+ runtime->addTelemetry(JS_TELEMETRY_GC_MARK_MS, t(markTotal));
+ runtime->addTelemetry(JS_TELEMETRY_GC_MARK_RATE_2, markRate);
+ runtime->addTelemetry(JS_TELEMETRY_GC_SWEEP_MS, t(phaseTimes[Phase::SWEEP]));
+ if (gc->didCompactZones()) {
+ runtime->addTelemetry(JS_TELEMETRY_GC_COMPACT_MS,
+ t(phaseTimes[Phase::COMPACT]));
+ }
+ runtime->addTelemetry(JS_TELEMETRY_GC_MARK_ROOTS_US,
+ markRootsTotal.ToMicroseconds());
+ runtime->addTelemetry(JS_TELEMETRY_GC_MARK_GRAY_MS_2, t(markGrayTotal));
+ runtime->addTelemetry(JS_TELEMETRY_GC_MARK_WEAK_MS, t(markWeakTotal));
+ runtime->addTelemetry(JS_TELEMETRY_GC_NON_INCREMENTAL, nonincremental());
+ if (nonincremental()) {
+ runtime->addTelemetry(JS_TELEMETRY_GC_NON_INCREMENTAL_REASON,
+ uint32_t(nonincrementalReason_));
+ }
+
+#ifdef DEBUG
+ // Reset happens non-incrementally, so only the last slice can be reset.
+ for (size_t i = 0; i < slices_.length() - 1; i++) {
+ MOZ_ASSERT(!slices_[i].wasReset());
+ }
+#endif
+ const auto& lastSlice = slices_.back();
+ runtime->addTelemetry(JS_TELEMETRY_GC_RESET, lastSlice.wasReset());
+ if (lastSlice.wasReset()) {
+ runtime->addTelemetry(JS_TELEMETRY_GC_RESET_REASON,
+ uint32_t(lastSlice.resetReason));
+ }
+
+ TimeDuration total, longest;
+ gcDuration(&total, &longest);
+
+ runtime->addTelemetry(JS_TELEMETRY_GC_MS, t(total));
+ runtime->addTelemetry(JS_TELEMETRY_GC_MAX_PAUSE_MS_2, t(longest));
+
+ const double mmu50 = computeMMU(TimeDuration::FromMilliseconds(50));
+ runtime->addTelemetry(JS_TELEMETRY_GC_MMU_50, mmu50 * 100);
+
+ // Record scheduling telemetry for the main runtime but not for workers, which
+ // are scheduled differently.
+ if (!runtime->parentRuntime && timeSinceLastGC) {
+ runtime->addTelemetry(JS_TELEMETRY_GC_TIME_BETWEEN_S,
+ timeSinceLastGC.ToSeconds());
+ if (!nonincremental()) {
+ runtime->addTelemetry(JS_TELEMETRY_GC_SLICE_COUNT, slices_.length());
+ }
+ }
+
+ if (!lastSlice.wasReset()) {
+ size_t bytesSurvived = 0;
+ for (ZonesIter zone(runtime, WithAtoms); !zone.done(); zone.next()) {
+ if (zone->wasCollected()) {
+ bytesSurvived += zone->gcHeapSize.retainedBytes();
+ }
+ }
+
+ MOZ_ASSERT(preCollectedHeapBytes >= bytesSurvived);
+ double survialRate =
+ 100.0 * double(bytesSurvived) / double(preCollectedHeapBytes);
+ runtime->addTelemetry(JS_TELEMETRY_GC_TENURED_SURVIVAL_RATE,
+ uint32_t(survialRate));
+
+ // Calculate 'effectiveness' in MB / second, on main thread only for now.
+ if (!runtime->parentRuntime) {
+ size_t bytesFreed = preCollectedHeapBytes - bytesSurvived;
+ TimeDuration clampedTotal =
+ TimeDuration::Max(total, TimeDuration::FromMilliseconds(1));
+ double effectiveness =
+ (double(bytesFreed) / BYTES_PER_MB) / clampedTotal.ToSeconds();
+ runtime->addTelemetry(JS_TELEMETRY_GC_EFFECTIVENESS,
+ uint32_t(effectiveness));
+ }
+ }
+}
+
+void Statistics::beginNurseryCollection(JS::GCReason reason) {
+ count(COUNT_MINOR_GC);
+ startingMinorGCNumber = gc->minorGCCount();
+ if (nurseryCollectionCallback) {
+ (*nurseryCollectionCallback)(
+ context(), JS::GCNurseryProgress::GC_NURSERY_COLLECTION_START, reason);
+ }
+}
+
+void Statistics::endNurseryCollection(JS::GCReason reason) {
+ if (nurseryCollectionCallback) {
+ (*nurseryCollectionCallback)(
+ context(), JS::GCNurseryProgress::GC_NURSERY_COLLECTION_END, reason);
+ }
+
+ allocsSinceMinorGC = {0, 0};
+}
+
+Statistics::SliceData::SliceData(SliceBudget budget, Maybe<Trigger> trigger,
+ JS::GCReason reason, TimeStamp start,
+ size_t startFaults, gc::State initialState)
+ : budget(budget),
+ reason(reason),
+ trigger(trigger),
+ initialState(initialState),
+ start(start),
+ startFaults(startFaults) {}
+
+void Statistics::beginSlice(const ZoneGCStats& zoneStats,
+ JSGCInvocationKind gckind, SliceBudget budget,
+ JS::GCReason reason) {
+ MOZ_ASSERT(phaseStack.empty() ||
+ (phaseStack.length() == 1 && phaseStack[0] == Phase::MUTATOR));
+
+ this->zoneStats = zoneStats;
+
+ TimeStamp currentTime = ReallyNow();
+
+ bool first = !gc->isIncrementalGCInProgress();
+ if (first) {
+ beginGC(gckind, currentTime);
+ }
+
+ JSRuntime* runtime = gc->rt;
+ if (!runtime->parentRuntime && !slices_.empty()) {
+ TimeDuration timeSinceLastSlice = currentTime - slices_.back().end;
+ runtime->addTelemetry(JS_TELEMETRY_GC_TIME_BETWEEN_SLICES_MS,
+ uint32_t(timeSinceLastSlice.ToMilliseconds()));
+ }
+
+ Maybe<Trigger> trigger = recordedTrigger;
+ recordedTrigger.reset();
+
+ if (!slices_.emplaceBack(budget, trigger, reason, currentTime,
+ GetPageFaultCount(), gc->state())) {
+ // If we are OOM, set a flag to indicate we have missing slice data.
+ aborted = true;
+ return;
+ }
+
+ runtime->addTelemetry(JS_TELEMETRY_GC_REASON, uint32_t(reason));
+
+ // Slice callbacks should only fire for the outermost level.
+ bool wasFullGC = zoneStats.isFullCollection();
+ if (sliceCallback) {
+ JSContext* cx = context();
+ JS::GCDescription desc(!wasFullGC, false, gckind, reason);
+ if (first) {
+ (*sliceCallback)(cx, JS::GC_CYCLE_BEGIN, desc);
+ }
+ (*sliceCallback)(cx, JS::GC_SLICE_BEGIN, desc);
+ }
+
+ log("begin slice");
+}
+
+void Statistics::endSlice() {
+ MOZ_ASSERT(phaseStack.empty() ||
+ (phaseStack.length() == 1 && phaseStack[0] == Phase::MUTATOR));
+
+ if (!aborted) {
+ auto& slice = slices_.back();
+ slice.end = ReallyNow();
+ slice.endFaults = GetPageFaultCount();
+ slice.finalState = gc->state();
+
+ log("end slice");
+
+ sendSliceTelemetry(slice);
+
+ sliceCount_++;
+ }
+
+ bool last = !gc->isIncrementalGCInProgress();
+ if (last) {
+ if (gcTimerFile) {
+ printStats();
+ }
+
+ if (!aborted) {
+ endGC();
+ }
+ }
+
+ if (enableProfiling_ && !aborted &&
+ slices_.back().duration() >= profileThreshold_) {
+ printSliceProfile();
+ }
+
+ // Slice callbacks should only fire for the outermost level.
+ if (!aborted) {
+ bool wasFullGC = zoneStats.isFullCollection();
+ if (sliceCallback) {
+ JSContext* cx = context();
+ JS::GCDescription desc(!wasFullGC, last, gckind, slices_.back().reason);
+ (*sliceCallback)(cx, JS::GC_SLICE_END, desc);
+ if (last) {
+ (*sliceCallback)(cx, JS::GC_CYCLE_END, desc);
+ }
+ }
+ }
+
+ // Do this after the slice callback since it uses these values.
+ if (last) {
+ for (auto& count : counts) {
+ count = 0;
+ }
+
+ // Clear the timers at the end of a GC, preserving the data for
+ // PhaseKind::MUTATOR.
+ auto mutatorStartTime = phaseStartTimes[Phase::MUTATOR];
+ auto mutatorTime = phaseTimes[Phase::MUTATOR];
+
+ for (mozilla::TimeStamp& t : phaseStartTimes) {
+ t = TimeStamp();
+ }
+#ifdef DEBUG
+ for (mozilla::TimeStamp& t : phaseEndTimes) {
+ t = TimeStamp();
+ }
+#endif
+
+ for (TimeDuration& duration : phaseTimes) {
+ duration = TimeDuration();
+ MOZ_ASSERT(duration.IsZero());
+ }
+
+ phaseStartTimes[Phase::MUTATOR] = mutatorStartTime;
+ phaseTimes[Phase::MUTATOR] = mutatorTime;
+ }
+
+ aborted = false;
+}
+
+void Statistics::sendSliceTelemetry(const SliceData& slice) {
+ JSRuntime* runtime = gc->rt;
+ TimeDuration sliceTime = slice.end - slice.start;
+ runtime->addTelemetry(JS_TELEMETRY_GC_SLICE_MS, t(sliceTime));
+
+ if (slice.budget.isTimeBudget()) {
+ int64_t budget_ms = slice.budget.timeBudget.budget;
+ runtime->addTelemetry(JS_TELEMETRY_GC_BUDGET_MS_2, budget_ms);
+ if (IsCurrentlyAnimating(runtime->lastAnimationTime, slice.end)) {
+ runtime->addTelemetry(JS_TELEMETRY_GC_ANIMATION_MS, t(sliceTime));
+ }
+
+ // Record any phase that goes 1.5 times or 5ms over its budget.
+ double longSliceThreshold = std::min(1.5 * budget_ms, budget_ms + 5.0);
+ if (sliceTime.ToMilliseconds() > longSliceThreshold) {
+ PhaseKind longest = LongestPhaseSelfTimeInMajorGC(slice.phaseTimes);
+ reportLongestPhaseInMajorGC(longest, JS_TELEMETRY_GC_SLOW_PHASE);
+
+ // If the longest phase was waiting for parallel tasks then record the
+ // longest task.
+ if (longest == PhaseKind::JOIN_PARALLEL_TASKS) {
+ PhaseKind longestParallel =
+ LongestParallelPhaseKind(slice.maxParallelTimes);
+ reportLongestPhaseInMajorGC(longestParallel, JS_TELEMETRY_GC_SLOW_TASK);
+ }
+ }
+
+ // Record how long we went over budget.
+ int64_t overrun = int64_t(sliceTime.ToMicroseconds()) - (1000 * budget_ms);
+ if (overrun > 0) {
+ runtime->addTelemetry(JS_TELEMETRY_GC_BUDGET_OVERRUN, uint32_t(overrun));
+ }
+ }
+}
+
+void Statistics::reportLongestPhaseInMajorGC(PhaseKind longest,
+ int telemetryId) {
+ JSRuntime* runtime = gc->rt;
+ if (longest != PhaseKind::NONE) {
+ uint8_t bucket = phaseKinds[longest].telemetryBucket;
+ runtime->addTelemetry(telemetryId, bucket);
+ }
+}
+
+bool Statistics::startTimingMutator() {
+ if (phaseStack.length() != 0) {
+ // Should only be called from outside of GC.
+ MOZ_ASSERT(phaseStack.length() == 1);
+ MOZ_ASSERT(phaseStack[0] == Phase::MUTATOR);
+ return false;
+ }
+
+ MOZ_ASSERT(suspendedPhases.empty());
+
+ timedGCTime = 0;
+ phaseStartTimes[Phase::MUTATOR] = TimeStamp();
+ phaseTimes[Phase::MUTATOR] = 0;
+ timedGCStart = TimeStamp();
+
+ beginPhase(PhaseKind::MUTATOR);
+ return true;
+}
+
+bool Statistics::stopTimingMutator(double& mutator_ms, double& gc_ms) {
+ // This should only be called from outside of GC, while timing the mutator.
+ if (phaseStack.length() != 1 || phaseStack[0] != Phase::MUTATOR) {
+ return false;
+ }
+
+ endPhase(PhaseKind::MUTATOR);
+ mutator_ms = t(phaseTimes[Phase::MUTATOR]);
+ gc_ms = t(timedGCTime);
+
+ return true;
+}
+
+void Statistics::suspendPhases(PhaseKind suspension) {
+ MOZ_ASSERT(suspension == PhaseKind::EXPLICIT_SUSPENSION ||
+ suspension == PhaseKind::IMPLICIT_SUSPENSION);
+ while (!phaseStack.empty()) {
+ MOZ_ASSERT(suspendedPhases.length() < MAX_SUSPENDED_PHASES);
+ Phase parent = phaseStack.back();
+ suspendedPhases.infallibleAppend(parent);
+ recordPhaseEnd(parent);
+ }
+ suspendedPhases.infallibleAppend(lookupChildPhase(suspension));
+}
+
+void Statistics::resumePhases() {
+ MOZ_ASSERT(suspendedPhases.back() == Phase::EXPLICIT_SUSPENSION ||
+ suspendedPhases.back() == Phase::IMPLICIT_SUSPENSION);
+ suspendedPhases.popBack();
+
+ while (!suspendedPhases.empty() &&
+ suspendedPhases.back() != Phase::EXPLICIT_SUSPENSION &&
+ suspendedPhases.back() != Phase::IMPLICIT_SUSPENSION) {
+ Phase resumePhase = suspendedPhases.popCopy();
+ if (resumePhase == Phase::MUTATOR) {
+ timedGCTime += ReallyNow() - timedGCStart;
+ }
+ recordPhaseBegin(resumePhase);
+ }
+}
+
+void Statistics::beginPhase(PhaseKind phaseKind) {
+ // No longer timing these phases. We should never see these.
+ MOZ_ASSERT(phaseKind != PhaseKind::GC_BEGIN &&
+ phaseKind != PhaseKind::GC_END);
+
+ // PhaseKind::MUTATOR is suspended while performing GC.
+ if (currentPhase() == Phase::MUTATOR) {
+ suspendPhases(PhaseKind::IMPLICIT_SUSPENSION);
+ }
+
+ recordPhaseBegin(lookupChildPhase(phaseKind));
+}
+
+void Statistics::recordPhaseBegin(Phase phase) {
+ MOZ_ASSERT(CurrentThreadCanAccessRuntime(gc->rt));
+
+ // Guard against any other re-entry.
+ MOZ_ASSERT(!phaseStartTimes[phase]);
+
+ MOZ_ASSERT(phaseStack.length() < MAX_PHASE_NESTING);
+
+ Phase current = currentPhase();
+ MOZ_ASSERT(phases[phase].parent == current);
+
+ TimeStamp now = ReallyNow();
+
+ if (current != Phase::NONE) {
+ MOZ_ASSERT(now >= phaseStartTimes[currentPhase()],
+ "Inconsistent time data; see bug 1400153");
+ if (now < phaseStartTimes[currentPhase()]) {
+ now = phaseStartTimes[currentPhase()];
+ aborted = true;
+ }
+ }
+
+ phaseStack.infallibleAppend(phase);
+ phaseStartTimes[phase] = now;
+ log("begin: %s", phases[phase].path);
+}
+
+void Statistics::recordPhaseEnd(Phase phase) {
+ MOZ_ASSERT(CurrentThreadCanAccessRuntime(gc->rt));
+
+ MOZ_ASSERT(phaseStartTimes[phase]);
+
+ TimeStamp now = ReallyNow();
+
+ // Make sure this phase ends after it starts.
+ MOZ_ASSERT(now >= phaseStartTimes[phase],
+ "Inconsistent time data; see bug 1400153");
+
+#ifdef DEBUG
+ // Make sure this phase ends after all of its children. Note that some
+ // children might not have run in this instance, in which case they will
+ // have run in a previous instance of this parent or not at all.
+ for (Phase kid = phases[phase].firstChild; kid != Phase::NONE;
+ kid = phases[kid].nextSibling) {
+ if (phaseEndTimes[kid].IsNull()) {
+ continue;
+ }
+ if (phaseEndTimes[kid] > now) {
+ fprintf(stderr,
+ "Parent %s ended at %.3fms, before child %s ended at %.3fms?\n",
+ phases[phase].name, t(now - TimeStamp::ProcessCreation()),
+ phases[kid].name,
+ t(phaseEndTimes[kid] - TimeStamp::ProcessCreation()));
+ }
+ MOZ_ASSERT(phaseEndTimes[kid] <= now,
+ "Inconsistent time data; see bug 1400153");
+ }
+#endif
+
+ if (now < phaseStartTimes[phase]) {
+ now = phaseStartTimes[phase];
+ aborted = true;
+ }
+
+ if (phase == Phase::MUTATOR) {
+ timedGCStart = now;
+ }
+
+ phaseStack.popBack();
+
+ TimeDuration t = now - phaseStartTimes[phase];
+ if (!slices_.empty()) {
+ slices_.back().phaseTimes[phase] += t;
+ }
+ phaseTimes[phase] += t;
+ phaseStartTimes[phase] = TimeStamp();
+
+#ifdef DEBUG
+ phaseEndTimes[phase] = now;
+ log("end: %s", phases[phase].path);
+#endif
+}
+
+void Statistics::endPhase(PhaseKind phaseKind) {
+ Phase phase = currentPhase();
+ MOZ_ASSERT(phase != Phase::NONE);
+ MOZ_ASSERT(phases[phase].phaseKind == phaseKind);
+
+ recordPhaseEnd(phase);
+
+ // When emptying the stack, we may need to return to timing the mutator
+ // (PhaseKind::MUTATOR).
+ if (phaseStack.empty() && !suspendedPhases.empty() &&
+ suspendedPhases.back() == Phase::IMPLICIT_SUSPENSION) {
+ resumePhases();
+ }
+}
+
+void Statistics::recordParallelPhase(PhaseKind phaseKind,
+ TimeDuration duration) {
+ MOZ_ASSERT(CurrentThreadCanAccessRuntime(gc->rt));
+
+ if (aborted) {
+ return;
+ }
+
+ // Record the maximum task time for each phase. Don't record times for parent
+ // phases.
+ Phase phase = lookupChildPhase(phaseKind);
+ TimeDuration& time = slices_.back().maxParallelTimes[phase];
+ time = std::max(time, duration);
+}
+
+TimeStamp Statistics::beginSCC() { return ReallyNow(); }
+
+void Statistics::endSCC(unsigned scc, TimeStamp start) {
+ if (scc >= sccTimes.length() && !sccTimes.resize(scc + 1)) {
+ return;
+ }
+
+ sccTimes[scc] += ReallyNow() - start;
+}
+
+/*
+ * MMU (minimum mutator utilization) is a measure of how much garbage collection
+ * is affecting the responsiveness of the system. MMU measurements are given
+ * with respect to a certain window size. If we report MMU(50ms) = 80%, then
+ * that means that, for any 50ms window of time, at least 80% of the window is
+ * devoted to the mutator. In other words, the GC is running for at most 20% of
+ * the window, or 10ms. The GC can run multiple slices during the 50ms window
+ * as long as the total time it spends is at most 10ms.
+ */
+double Statistics::computeMMU(TimeDuration window) const {
+ MOZ_ASSERT(!slices_.empty());
+
+ TimeDuration gc = slices_[0].end - slices_[0].start;
+ TimeDuration gcMax = gc;
+
+ if (gc >= window) {
+ return 0.0;
+ }
+
+ int startIndex = 0;
+ for (size_t endIndex = 1; endIndex < slices_.length(); endIndex++) {
+ auto* startSlice = &slices_[startIndex];
+ auto& endSlice = slices_[endIndex];
+ gc += endSlice.end - endSlice.start;
+
+ while (endSlice.end - startSlice->end >= window) {
+ gc -= startSlice->end - startSlice->start;
+ startSlice = &slices_[++startIndex];
+ }
+
+ TimeDuration cur = gc;
+ if (endSlice.end - startSlice->start > window) {
+ cur -= (endSlice.end - startSlice->start - window);
+ }
+ if (cur > gcMax) {
+ gcMax = cur;
+ }
+ }
+
+ return double((window - gcMax) / window);
+}
+
+void Statistics::maybePrintProfileHeaders() {
+ static int printedHeader = 0;
+ if ((printedHeader++ % 200) == 0) {
+ printProfileHeader();
+ if (gc->nursery().enableProfiling()) {
+ Nursery::printProfileHeader();
+ }
+ }
+}
+
+void Statistics::printProfileHeader() {
+ if (!enableProfiling_) {
+ return;
+ }
+
+ fprintf(stderr, "MajorGC: Timestamp Reason States FSNR ");
+ fprintf(stderr, " %6s", "budget");
+ fprintf(stderr, " %6s", "total");
+#define PRINT_PROFILE_HEADER(name, text, phase) fprintf(stderr, " %6s", text);
+ FOR_EACH_GC_PROFILE_TIME(PRINT_PROFILE_HEADER)
+#undef PRINT_PROFILE_HEADER
+ fprintf(stderr, "\n");
+}
+
+/* static */
+void Statistics::printProfileTimes(const ProfileDurations& times) {
+ for (auto time : times) {
+ fprintf(stderr, " %6" PRIi64, static_cast<int64_t>(time.ToMilliseconds()));
+ }
+ fprintf(stderr, "\n");
+}
+
+void Statistics::printSliceProfile() {
+ const SliceData& slice = slices_.back();
+
+ maybePrintProfileHeaders();
+
+ TimeDuration ts = slice.end - creationTime();
+
+ bool shrinking = gckind == GC_SHRINK;
+ bool reset = slice.resetReason != GCAbortReason::None;
+ bool nonIncremental = nonincrementalReason_ != GCAbortReason::None;
+ bool full = zoneStats.isFullCollection();
+
+ fprintf(stderr, "MajorGC: %10.6f %-20.20s %1d -> %1d %1s%1s%1s%1s ",
+ ts.ToSeconds(), ExplainGCReason(slice.reason),
+ int(slice.initialState), int(slice.finalState), full ? "F" : "",
+ shrinking ? "S" : "", nonIncremental ? "N" : "", reset ? "R" : "");
+
+ if (!nonIncremental && !slice.budget.isUnlimited() &&
+ slice.budget.isTimeBudget()) {
+ fprintf(stderr, " %6" PRIi64,
+ static_cast<int64_t>(slice.budget.timeBudget.budget));
+ } else {
+ fprintf(stderr, " ");
+ }
+
+ ProfileDurations times;
+ times[ProfileKey::Total] = slice.duration();
+ totalTimes_[ProfileKey::Total] += times[ProfileKey::Total];
+
+#define GET_PROFILE_TIME(name, text, phase) \
+ times[ProfileKey::name] = SumPhase(phase, slice.phaseTimes); \
+ totalTimes_[ProfileKey::name] += times[ProfileKey::name];
+ FOR_EACH_GC_PROFILE_TIME(GET_PROFILE_TIME)
+#undef GET_PROFILE_TIME
+
+ printProfileTimes(times);
+}
+
+void Statistics::printTotalProfileTimes() {
+ if (enableProfiling_) {
+ fprintf(stderr,
+ "MajorGC TOTALS: %7" PRIu64 " slices: ",
+ sliceCount_);
+ printProfileTimes(totalTimes_);
+ }
+}
diff --git a/js/src/gc/Statistics.h b/js/src/gc/Statistics.h
new file mode 100644
index 0000000000..9c8ccc3be3
--- /dev/null
+++ b/js/src/gc/Statistics.h
@@ -0,0 +1,578 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef gc_Statistics_h
+#define gc_Statistics_h
+
+#include "mozilla/Array.h"
+#include "mozilla/Atomics.h"
+#include "mozilla/EnumeratedArray.h"
+#include "mozilla/IntegerRange.h"
+#include "mozilla/Maybe.h"
+#include "mozilla/TimeStamp.h"
+
+#include "jspubtd.h"
+#include "NamespaceImports.h"
+
+#include "gc/GCEnum.h"
+#include "js/AllocPolicy.h"
+#include "js/SliceBudget.h"
+#include "js/UniquePtr.h"
+#include "js/Vector.h"
+#include "vm/JSONPrinter.h"
+
+namespace js {
+namespace gcstats {
+
+// Phase data is generated by a script. If you need to add phases, edit
+// js/src/gc/GenerateStatsPhases.py
+
+#include "gc/StatsPhasesGenerated.h"
+
+// Counts can be incremented with Statistics::count(). They're reset at the end
+// of a Major GC.
+enum Count {
+ COUNT_NEW_CHUNK,
+ COUNT_DESTROY_CHUNK,
+ COUNT_MINOR_GC,
+
+ // Number of times a 'put' into a storebuffer overflowed, triggering a
+ // compaction
+ COUNT_STOREBUFFER_OVERFLOW,
+
+ // Number of arenas relocated by compacting GC.
+ COUNT_ARENA_RELOCATED,
+
+ COUNT_LIMIT
+};
+
+// Stats can be set with Statistics::setStat(). They're not reset automatically.
+enum Stat {
+ // Number of strings tenured.
+ STAT_STRINGS_TENURED,
+
+ // Number of strings deduplicated.
+ STAT_STRINGS_DEDUPLICATED,
+
+ // Number of realms that had nursery strings disabled due to large numbers
+ // being tenured.
+ STAT_NURSERY_STRING_REALMS_DISABLED,
+
+ // Number of BigInts tenured.
+ STAT_BIGINTS_TENURED,
+
+ // Number of realms that had nursery BigInts disabled due to large numbers
+ // being tenured.
+ STAT_NURSERY_BIGINT_REALMS_DISABLED,
+
+ STAT_LIMIT
+};
+
+struct ZoneGCStats {
+ /* Number of zones collected in this GC. */
+ int collectedZoneCount = 0;
+
+ /* Number of zones that could have been collected in this GC. */
+ int collectableZoneCount = 0;
+
+ /* Total number of zones in the Runtime at the start of this GC. */
+ int zoneCount = 0;
+
+ /* Number of zones swept in this GC. */
+ int sweptZoneCount = 0;
+
+ /* Total number of compartments in all zones collected. */
+ int collectedCompartmentCount = 0;
+
+ /* Total number of compartments in the Runtime at the start of this GC. */
+ int compartmentCount = 0;
+
+ /* Total number of compartments swept by this GC. */
+ int sweptCompartmentCount = 0;
+
+ bool isFullCollection() const {
+ return collectedZoneCount == collectableZoneCount;
+ }
+
+ ZoneGCStats() = default;
+};
+
+struct Trigger {
+ size_t amount = 0;
+ size_t threshold = 0;
+};
+
+#define FOR_EACH_GC_PROFILE_TIME(_) \
+ _(BeginCallback, "bgnCB", PhaseKind::GC_BEGIN) \
+ _(MinorForMajor, "evct4m", PhaseKind::EVICT_NURSERY_FOR_MAJOR_GC) \
+ _(WaitBgThread, "waitBG", PhaseKind::WAIT_BACKGROUND_THREAD) \
+ _(Prepare, "prep", PhaseKind::PREPARE) \
+ _(Mark, "mark", PhaseKind::MARK) \
+ _(Sweep, "sweep", PhaseKind::SWEEP) \
+ _(Compact, "cmpct", PhaseKind::COMPACT) \
+ _(EndCallback, "endCB", PhaseKind::GC_END) \
+ _(MinorGC, "minor", PhaseKind::MINOR_GC) \
+ _(EvictNursery, "evict", PhaseKind::EVICT_NURSERY) \
+ _(Barriers, "brrier", PhaseKind::BARRIER)
+
+const char* ExplainAbortReason(GCAbortReason reason);
+const char* ExplainInvocationKind(JSGCInvocationKind gckind);
+
+/*
+ * Struct for collecting timing statistics on a "phase tree". The tree is
+ * specified as a limited DAG, but the timings are collected for the whole tree
+ * that you would get by expanding out the DAG by duplicating subtrees rooted
+ * at nodes with multiple parents.
+ *
+ * During execution, a child phase can be activated multiple times, and the
+ * total time will be accumulated. (So for example, you can start and end
+ * PhaseKind::MARK_ROOTS multiple times before completing the parent phase.)
+ *
+ * Incremental GC is represented by recording separate timing results for each
+ * slice within the overall GC.
+ */
+struct Statistics {
+ template <typename T, size_t Length>
+ using Array = mozilla::Array<T, Length>;
+
+ template <typename IndexType, IndexType SizeAsEnumValue, typename ValueType>
+ using EnumeratedArray =
+ mozilla::EnumeratedArray<IndexType, SizeAsEnumValue, ValueType>;
+
+ using TimeDuration = mozilla::TimeDuration;
+ using TimeStamp = mozilla::TimeStamp;
+
+ // Create a convenient type for referring to tables of phase times.
+ using PhaseTimeTable = EnumeratedArray<Phase, Phase::LIMIT, TimeDuration>;
+
+ static MOZ_MUST_USE bool initialize();
+
+ explicit Statistics(gc::GCRuntime* gc);
+ ~Statistics();
+
+ Statistics(const Statistics&) = delete;
+ Statistics& operator=(const Statistics&) = delete;
+
+ void beginPhase(PhaseKind phaseKind);
+ void endPhase(PhaseKind phaseKind);
+ void recordParallelPhase(PhaseKind phaseKind, TimeDuration duration);
+
+ // Occasionally, we may be in the middle of something that is tracked by
+ // this class, and we need to do something unusual (eg evict the nursery)
+ // that doesn't normally nest within the current phase. Suspend the
+ // currently tracked phase stack, at which time the caller is free to do
+ // other tracked operations.
+ //
+ // This also happens internally with the PhaseKind::MUTATOR "phase". While in
+ // this phase, any beginPhase will automatically suspend the non-GC phase,
+ // until that inner stack is complete, at which time it will automatically
+ // resume the non-GC phase. Explicit suspensions do not get auto-resumed.
+ void suspendPhases(PhaseKind suspension = PhaseKind::EXPLICIT_SUSPENSION);
+
+ // Resume a suspended stack of phases.
+ void resumePhases();
+
+ void beginSlice(const ZoneGCStats& zoneStats, JSGCInvocationKind gckind,
+ SliceBudget budget, JS::GCReason reason);
+ void endSlice();
+
+ MOZ_MUST_USE bool startTimingMutator();
+ MOZ_MUST_USE bool stopTimingMutator(double& mutator_ms, double& gc_ms);
+
+ // Note when we sweep a zone or compartment.
+ void sweptZone() { ++zoneStats.sweptZoneCount; }
+ void sweptCompartment() { ++zoneStats.sweptCompartmentCount; }
+
+ void reset(GCAbortReason reason) {
+ MOZ_ASSERT(reason != GCAbortReason::None);
+ if (!aborted) {
+ slices_.back().resetReason = reason;
+ }
+ }
+
+ void measureInitialHeapSize();
+ void adoptHeapSizeDuringIncrementalGC(Zone* mergedZone);
+
+ void nonincremental(GCAbortReason reason) {
+ MOZ_ASSERT(reason != GCAbortReason::None);
+ nonincrementalReason_ = reason;
+ log("Non-incremental reason: %s", nonincrementalReason());
+ }
+
+ bool nonincremental() const {
+ return nonincrementalReason_ != GCAbortReason::None;
+ }
+
+ const char* nonincrementalReason() const {
+ return ExplainAbortReason(nonincrementalReason_);
+ }
+
+ void count(Count s) { counts[s]++; }
+
+ uint32_t getCount(Count s) const { return uint32_t(counts[s]); }
+
+ void setStat(Stat s, uint32_t value) { stats[s] = value; }
+
+ uint32_t getStat(Stat s) const { return stats[s]; }
+
+ void recordTrigger(size_t amount, size_t threshold) {
+ recordedTrigger = mozilla::Some(Trigger{amount, threshold});
+ }
+ bool hasTrigger() const { return recordedTrigger.isSome(); }
+
+ void noteNurseryAlloc() { allocsSinceMinorGC.nursery++; }
+
+ // tenured allocs don't include nursery evictions.
+ void setAllocsSinceMinorGCTenured(uint32_t allocs) {
+ allocsSinceMinorGC.tenured = allocs;
+ }
+
+ uint32_t allocsSinceMinorGCNursery() { return allocsSinceMinorGC.nursery; }
+
+ uint32_t allocsSinceMinorGCTenured() { return allocsSinceMinorGC.tenured; }
+
+ uint32_t* addressOfAllocsSinceMinorGCNursery() {
+ return &allocsSinceMinorGC.nursery;
+ }
+
+ void beginNurseryCollection(JS::GCReason reason);
+ void endNurseryCollection(JS::GCReason reason);
+
+ TimeStamp beginSCC();
+ void endSCC(unsigned scc, TimeStamp start);
+
+ UniqueChars formatCompactSliceMessage() const;
+ UniqueChars formatCompactSummaryMessage() const;
+ UniqueChars formatDetailedMessage() const;
+
+ JS::GCSliceCallback setSliceCallback(JS::GCSliceCallback callback);
+ JS::GCNurseryCollectionCallback setNurseryCollectionCallback(
+ JS::GCNurseryCollectionCallback callback);
+
+ TimeDuration clearMaxGCPauseAccumulator();
+ TimeDuration getMaxGCPauseSinceClear();
+
+ PhaseKind currentPhaseKind() const;
+
+ static const size_t MAX_SUSPENDED_PHASES = MAX_PHASE_NESTING * 3;
+
+ struct SliceData {
+ SliceData(SliceBudget budget, mozilla::Maybe<Trigger> trigger,
+ JS::GCReason reason, TimeStamp start, size_t startFaults,
+ gc::State initialState);
+
+ SliceBudget budget;
+ JS::GCReason reason = JS::GCReason::NO_REASON;
+ mozilla::Maybe<Trigger> trigger;
+ gc::State initialState = gc::State::NotActive;
+ gc::State finalState = gc::State::NotActive;
+ GCAbortReason resetReason = GCAbortReason::None;
+ TimeStamp start;
+ TimeStamp end;
+ size_t startFaults = 0;
+ size_t endFaults = 0;
+ PhaseTimeTable phaseTimes;
+ PhaseTimeTable maxParallelTimes;
+
+ TimeDuration duration() const { return end - start; }
+ bool wasReset() const { return resetReason != GCAbortReason::None; }
+ };
+
+ typedef Vector<SliceData, 8, SystemAllocPolicy> SliceDataVector;
+
+ const SliceDataVector& slices() const { return slices_; }
+
+ TimeStamp start() const { return slices_[0].start; }
+
+ TimeStamp end() const { return slices_.back().end; }
+
+ TimeStamp creationTime() const { return creationTime_; }
+
+ // Occasionally print header lines for profiling information.
+ void maybePrintProfileHeaders();
+
+ // Print header line for profile times.
+ void printProfileHeader();
+
+ // Print total profile times on shutdown.
+ void printTotalProfileTimes();
+
+ // These JSON strings are used by the firefox profiler to display the GC
+ // markers.
+
+ // Return JSON for a whole major GC
+ UniqueChars renderJsonMessage() const;
+
+ // Return JSON for the timings of just the given slice.
+ UniqueChars renderJsonSlice(size_t sliceNum) const;
+
+ // Return JSON for the previous nursery collection.
+ UniqueChars renderNurseryJson() const;
+
+#ifdef DEBUG
+ // Print a logging message.
+ void log(const char* fmt, ...);
+#else
+ void log(const char* fmt, ...){};
+#endif
+
+ private:
+ gc::GCRuntime* const gc;
+
+ /* File used for MOZ_GCTIMER output. */
+ FILE* gcTimerFile;
+
+ /* File used for JS_GC_DEBUG output. */
+ FILE* gcDebugFile;
+
+ ZoneGCStats zoneStats;
+
+ JSGCInvocationKind gckind;
+
+ GCAbortReason nonincrementalReason_;
+
+ SliceDataVector slices_;
+
+ /* Most recent time when the given phase started. */
+ EnumeratedArray<Phase, Phase::LIMIT, TimeStamp> phaseStartTimes;
+
+#ifdef DEBUG
+ /* Most recent time when the given phase ended. */
+ EnumeratedArray<Phase, Phase::LIMIT, TimeStamp> phaseEndTimes;
+#endif
+
+ TimeStamp creationTime_;
+
+ /* Bookkeeping for GC timings when timingMutator is true */
+ TimeStamp timedGCStart;
+ TimeDuration timedGCTime;
+
+ /* Total time in a given phase for this GC. */
+ PhaseTimeTable phaseTimes;
+
+ /* Number of events of this type for this GC. */
+ EnumeratedArray<Count, COUNT_LIMIT,
+ mozilla::Atomic<uint32_t, mozilla::ReleaseAcquire>>
+ counts;
+
+ /* Other GC statistics. */
+ EnumeratedArray<Stat, STAT_LIMIT, uint32_t> stats;
+
+ /*
+ * These events cannot be kept in the above array, we need to take their
+ * address.
+ */
+ struct {
+ uint32_t nursery;
+ uint32_t tenured;
+ } allocsSinceMinorGC;
+
+ /* Total GC heap size before and after the GC ran. */
+ size_t preTotalHeapBytes;
+ size_t postTotalHeapBytes;
+
+ /* GC heap size for collected zones before GC ran. */
+ size_t preCollectedHeapBytes;
+
+ /*
+ * If a GC slice was triggered by exceeding some threshold, record the
+ * threshold and the value that exceeded it. This happens before the slice
+ * starts so this is recorded here first and then transferred to SliceData.
+ */
+ mozilla::Maybe<Trigger> recordedTrigger;
+
+ /* GC numbers as of the beginning of the collection. */
+ uint64_t startingMinorGCNumber;
+ uint64_t startingMajorGCNumber;
+ uint64_t startingSliceNumber;
+
+ /* Records the maximum GC pause in an API-controlled interval. */
+ mutable TimeDuration maxPauseInInterval;
+
+ /* Phases that are currently on stack. */
+ Vector<Phase, MAX_PHASE_NESTING, SystemAllocPolicy> phaseStack;
+
+ /*
+ * Certain phases can interrupt the phase stack, eg callback phases. When
+ * this happens, we move the suspended phases over to a sepearate list,
+ * terminated by a dummy PhaseKind::SUSPENSION phase (so that we can nest
+ * suspensions by suspending multiple stacks with a PhaseKind::SUSPENSION in
+ * between).
+ */
+ Vector<Phase, MAX_SUSPENDED_PHASES, SystemAllocPolicy> suspendedPhases;
+
+ /* Sweep times for SCCs of compartments. */
+ Vector<TimeDuration, 0, SystemAllocPolicy> sccTimes;
+
+ TimeDuration timeSinceLastGC;
+
+ JS::GCSliceCallback sliceCallback;
+ JS::GCNurseryCollectionCallback nurseryCollectionCallback;
+
+ /*
+ * True if we saw an OOM while allocating slices or we saw an impossible
+ * timestamp. The statistics for this GC will be invalid.
+ */
+ bool aborted;
+
+ /* Profiling data. */
+
+ enum class ProfileKey {
+ Total,
+#define DEFINE_TIME_KEY(name, text, phase) name,
+ FOR_EACH_GC_PROFILE_TIME(DEFINE_TIME_KEY)
+#undef DEFINE_TIME_KEY
+ KeyCount
+ };
+
+ using ProfileDurations =
+ EnumeratedArray<ProfileKey, ProfileKey::KeyCount, TimeDuration>;
+
+ TimeDuration profileThreshold_;
+ bool enableProfiling_;
+ ProfileDurations totalTimes_;
+ uint64_t sliceCount_;
+
+ JSContext* context();
+
+ Phase currentPhase() const;
+ Phase lookupChildPhase(PhaseKind phaseKind) const;
+
+ void beginGC(JSGCInvocationKind kind, const TimeStamp& currentTime);
+ void endGC();
+
+ void sendGCTelemetry();
+ void sendSliceTelemetry(const SliceData& slice);
+
+ void recordPhaseBegin(Phase phase);
+ void recordPhaseEnd(Phase phase);
+
+ void gcDuration(TimeDuration* total, TimeDuration* maxPause) const;
+ void sccDurations(TimeDuration* total, TimeDuration* maxPause) const;
+ void printStats();
+
+ void reportLongestPhaseInMajorGC(PhaseKind longest, int telemetryId);
+
+ UniqueChars formatCompactSlicePhaseTimes(
+ const PhaseTimeTable& phaseTimes) const;
+
+ UniqueChars formatDetailedDescription() const;
+ UniqueChars formatDetailedSliceDescription(unsigned i,
+ const SliceData& slice) const;
+ UniqueChars formatDetailedPhaseTimes(const PhaseTimeTable& phaseTimes) const;
+ UniqueChars formatDetailedTotals() const;
+
+ void formatJsonDescription(JSONPrinter&) const;
+ void formatJsonSliceDescription(unsigned i, const SliceData& slice,
+ JSONPrinter&) const;
+ void formatJsonPhaseTimes(const PhaseTimeTable& phaseTimes,
+ JSONPrinter&) const;
+ void formatJsonSlice(size_t sliceNum, JSONPrinter&) const;
+
+ double computeMMU(TimeDuration resolution) const;
+
+ void printSliceProfile();
+ static void printProfileTimes(const ProfileDurations& times);
+};
+
+struct MOZ_RAII AutoGCSlice {
+ AutoGCSlice(Statistics& stats, const ZoneGCStats& zoneStats,
+ JSGCInvocationKind gckind, SliceBudget budget,
+ JS::GCReason reason)
+ : stats(stats) {
+ stats.beginSlice(zoneStats, gckind, budget, reason);
+ }
+ ~AutoGCSlice() { stats.endSlice(); }
+
+ Statistics& stats;
+};
+
+struct MOZ_RAII AutoPhase {
+ AutoPhase(Statistics& stats, PhaseKind phaseKind)
+ : stats(stats), phaseKind(phaseKind), enabled(true) {
+ stats.beginPhase(phaseKind);
+ }
+
+ AutoPhase(Statistics& stats, bool condition, PhaseKind phaseKind)
+ : stats(stats), phaseKind(phaseKind), enabled(condition) {
+ if (enabled) {
+ stats.beginPhase(phaseKind);
+ }
+ }
+
+ ~AutoPhase() {
+ if (enabled) {
+ stats.endPhase(phaseKind);
+ }
+ }
+
+ Statistics& stats;
+ PhaseKind phaseKind;
+ bool enabled;
+};
+
+struct MOZ_RAII AutoSCC {
+ AutoSCC(Statistics& stats, unsigned scc) : stats(stats), scc(scc) {
+ start = stats.beginSCC();
+ }
+ ~AutoSCC() { stats.endSCC(scc, start); }
+
+ Statistics& stats;
+ unsigned scc;
+ mozilla::TimeStamp start;
+};
+
+} /* namespace gcstats */
+
+struct StringStats {
+ // number of strings that were deduplicated, and their sizes in characters
+ // and bytes
+ uint64_t deduplicatedStrings = 0;
+ uint64_t deduplicatedChars = 0;
+ uint64_t deduplicatedBytes = 0;
+
+ // number of live nursery strings at the start of a nursery collection
+ uint64_t liveNurseryStrings = 0;
+
+ // number of new strings added to the tenured heap
+ uint64_t tenuredStrings = 0;
+
+ // Currently, liveNurseryStrings = tenuredStrings + deduplicatedStrings (but
+ // in the future we may do more transformation during tenuring, eg
+ // atomizing.)
+
+ // number of malloced bytes associated with tenured strings (the actual
+ // malloc will have happened when the strings were allocated in the nursery;
+ // the ownership of the bytes will be transferred to the tenured strings)
+ uint64_t tenuredBytes = 0;
+
+ StringStats& operator+=(const StringStats& other) {
+ deduplicatedStrings += other.deduplicatedStrings;
+ deduplicatedChars += other.deduplicatedChars;
+ deduplicatedBytes += other.deduplicatedBytes;
+ liveNurseryStrings += other.liveNurseryStrings;
+ tenuredStrings += other.tenuredStrings;
+ tenuredBytes += other.tenuredBytes;
+ return *this;
+ }
+
+ void noteTenured(size_t mallocBytes) {
+ liveNurseryStrings++;
+ tenuredStrings++;
+ tenuredBytes += mallocBytes;
+ }
+
+ void noteDeduplicated(size_t numChars, size_t mallocBytes) {
+ liveNurseryStrings++;
+ deduplicatedStrings++;
+ deduplicatedChars += numChars;
+ deduplicatedBytes += mallocBytes;
+ }
+};
+
+} /* namespace js */
+
+#endif /* gc_Statistics_h */
diff --git a/js/src/gc/StoreBuffer-inl.h b/js/src/gc/StoreBuffer-inl.h
new file mode 100644
index 0000000000..5538e88509
--- /dev/null
+++ b/js/src/gc/StoreBuffer-inl.h
@@ -0,0 +1,83 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef gc_StoreBuffer_inl_h
+#define gc_StoreBuffer_inl_h
+
+#include "gc/StoreBuffer.h"
+
+#include "gc/Cell.h"
+#include "gc/Heap.h"
+
+#include "gc/Heap-inl.h"
+
+namespace js {
+namespace gc {
+
+inline /* static */ size_t ArenaCellSet::getCellIndex(const TenuredCell* cell) {
+ uintptr_t cellOffset = uintptr_t(cell) & ArenaMask;
+ MOZ_ASSERT(cellOffset % ArenaCellIndexBytes == 0);
+ return cellOffset / ArenaCellIndexBytes;
+}
+
+inline /* static */ void ArenaCellSet::getWordIndexAndMask(size_t cellIndex,
+ size_t* wordp,
+ uint32_t* maskp) {
+ BitArray<MaxArenaCellIndex>::getIndexAndMask(cellIndex, wordp, maskp);
+}
+
+inline bool ArenaCellSet::hasCell(size_t cellIndex) const {
+ MOZ_ASSERT(cellIndex < MaxArenaCellIndex);
+ return bits.get(cellIndex);
+}
+
+inline void ArenaCellSet::putCell(size_t cellIndex) {
+ MOZ_ASSERT(cellIndex < MaxArenaCellIndex);
+ MOZ_ASSERT(arena);
+
+ bits.set(cellIndex);
+ check();
+}
+
+inline void ArenaCellSet::check() const {
+#ifdef DEBUG
+ bool bitsZero = bits.isAllClear();
+ MOZ_ASSERT(isEmpty() == bitsZero);
+ MOZ_ASSERT(isEmpty() == !arena);
+ if (!isEmpty()) {
+ MOZ_ASSERT(IsCellPointerValid(arena));
+ MOZ_ASSERT(arena->bufferedCells() == this);
+ JSRuntime* runtime = arena->zone->runtimeFromMainThread();
+ MOZ_ASSERT(runtime->gc.minorGCCount() == minorGCNumberAtCreation);
+ }
+#endif
+}
+
+inline void StoreBuffer::WholeCellBuffer::put(const Cell* cell) {
+ MOZ_ASSERT(cell->isTenured());
+
+ // BigInts don't have any children, so shouldn't show up here.
+ MOZ_ASSERT(cell->getTraceKind() != JS::TraceKind::BigInt);
+
+ Arena* arena = cell->asTenured().arena();
+ ArenaCellSet* cells = arena->bufferedCells();
+ if (cells->isEmpty()) {
+ cells = allocateCellSet(arena);
+ if (!cells) {
+ return;
+ }
+ }
+
+ cells->putCell(&cell->asTenured());
+ cells->check();
+}
+
+inline void StoreBuffer::putWholeCell(Cell* cell) { bufferWholeCell.put(cell); }
+
+} // namespace gc
+} // namespace js
+
+#endif // gc_StoreBuffer_inl_h
diff --git a/js/src/gc/StoreBuffer.cpp b/js/src/gc/StoreBuffer.cpp
new file mode 100644
index 0000000000..d9333a86f8
--- /dev/null
+++ b/js/src/gc/StoreBuffer.cpp
@@ -0,0 +1,238 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "gc/StoreBuffer-inl.h"
+
+#include "mozilla/Assertions.h"
+
+#include "gc/Statistics.h"
+#include "vm/ArgumentsObject.h"
+#include "vm/JSContext.h"
+#include "vm/MutexIDs.h"
+#include "vm/Runtime.h"
+
+#include "gc/GC-inl.h"
+
+using namespace js;
+using namespace js::gc;
+
+JS_PUBLIC_API void js::gc::LockStoreBuffer(StoreBuffer* sb) {
+ MOZ_ASSERT(sb);
+ sb->lock();
+}
+
+JS_PUBLIC_API void js::gc::UnlockStoreBuffer(StoreBuffer* sb) {
+ MOZ_ASSERT(sb);
+ sb->unlock();
+}
+
+bool StoreBuffer::WholeCellBuffer::init() {
+ MOZ_ASSERT(!stringHead_);
+ MOZ_ASSERT(!nonStringHead_);
+ if (!storage_) {
+ storage_ = MakeUnique<LifoAlloc>(LifoAllocBlockSize);
+ // This prevents LifoAlloc::Enum from crashing with a release
+ // assertion if we ever allocate one entry larger than
+ // LifoAllocBlockSize.
+ if (storage_) {
+ storage_->disableOversize();
+ }
+ }
+ clear();
+ return bool(storage_);
+}
+
+bool StoreBuffer::GenericBuffer::init() {
+ if (!storage_) {
+ storage_ = MakeUnique<LifoAlloc>(LifoAllocBlockSize);
+ }
+ clear();
+ return bool(storage_);
+}
+
+void StoreBuffer::GenericBuffer::trace(JSTracer* trc) {
+ mozilla::ReentrancyGuard g(*owner_);
+ MOZ_ASSERT(owner_->isEnabled());
+ if (!storage_) {
+ return;
+ }
+
+ for (LifoAlloc::Enum e(*storage_); !e.empty();) {
+ unsigned size = *e.read<unsigned>();
+ BufferableRef* edge = e.read<BufferableRef>(size);
+ edge->trace(trc);
+ }
+}
+
+StoreBuffer::StoreBuffer(JSRuntime* rt, const Nursery& nursery)
+ : lock_(mutexid::StoreBuffer),
+ bufferVal(this, JS::GCReason::FULL_VALUE_BUFFER),
+ bufStrCell(this, JS::GCReason::FULL_CELL_PTR_STR_BUFFER),
+ bufBigIntCell(this, JS::GCReason::FULL_CELL_PTR_BIGINT_BUFFER),
+ bufObjCell(this, JS::GCReason::FULL_CELL_PTR_OBJ_BUFFER),
+ bufferSlot(this, JS::GCReason::FULL_SLOT_BUFFER),
+ bufferWholeCell(this),
+ bufferGeneric(this),
+ runtime_(rt),
+ nursery_(nursery),
+ aboutToOverflow_(false),
+ enabled_(false),
+ mayHavePointersToDeadCells_(false)
+#ifdef DEBUG
+ ,
+ mEntered(false),
+ markingNondeduplicatable(false)
+#endif
+{
+}
+
+void StoreBuffer::checkEmpty() const { MOZ_ASSERT(isEmpty()); }
+
+bool StoreBuffer::isEmpty() const {
+ return bufferVal.isEmpty() && bufStrCell.isEmpty() &&
+ bufBigIntCell.isEmpty() && bufObjCell.isEmpty() &&
+ bufferSlot.isEmpty() && bufferWholeCell.isEmpty() &&
+ bufferGeneric.isEmpty();
+}
+
+bool StoreBuffer::enable() {
+ if (enabled_) {
+ return true;
+ }
+
+ checkEmpty();
+
+ if (!bufferWholeCell.init() || !bufferGeneric.init()) {
+ return false;
+ }
+
+ enabled_ = true;
+ return true;
+}
+
+void StoreBuffer::disable() {
+ checkEmpty();
+
+ if (!enabled_) {
+ return;
+ }
+
+ aboutToOverflow_ = false;
+
+ enabled_ = false;
+}
+
+void StoreBuffer::clear() {
+ if (!enabled_) {
+ return;
+ }
+
+ aboutToOverflow_ = false;
+ mayHavePointersToDeadCells_ = false;
+
+ bufferVal.clear();
+ bufStrCell.clear();
+ bufBigIntCell.clear();
+ bufObjCell.clear();
+ bufferSlot.clear();
+ bufferWholeCell.clear();
+ bufferGeneric.clear();
+}
+
+void StoreBuffer::setAboutToOverflow(JS::GCReason reason) {
+ if (!aboutToOverflow_) {
+ aboutToOverflow_ = true;
+ runtime_->gc.stats().count(gcstats::COUNT_STOREBUFFER_OVERFLOW);
+ }
+ nursery_.requestMinorGC(reason);
+}
+
+void StoreBuffer::addSizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf,
+ JS::GCSizes* sizes) {
+ sizes->storeBufferVals += bufferVal.sizeOfExcludingThis(mallocSizeOf);
+ sizes->storeBufferCells += bufStrCell.sizeOfExcludingThis(mallocSizeOf) +
+ bufBigIntCell.sizeOfExcludingThis(mallocSizeOf) +
+ bufObjCell.sizeOfExcludingThis(mallocSizeOf);
+ sizes->storeBufferSlots += bufferSlot.sizeOfExcludingThis(mallocSizeOf);
+ sizes->storeBufferWholeCells +=
+ bufferWholeCell.sizeOfExcludingThis(mallocSizeOf);
+ sizes->storeBufferGenerics += bufferGeneric.sizeOfExcludingThis(mallocSizeOf);
+}
+
+ArenaCellSet ArenaCellSet::Empty;
+
+ArenaCellSet::ArenaCellSet(Arena* arena, ArenaCellSet* next)
+ : arena(arena),
+ next(next)
+#ifdef DEBUG
+ ,
+ minorGCNumberAtCreation(
+ arena->zone->runtimeFromMainThread()->gc.minorGCCount())
+#endif
+{
+ MOZ_ASSERT(arena);
+ MOZ_ASSERT(bits.isAllClear());
+}
+
+ArenaCellSet* StoreBuffer::WholeCellBuffer::allocateCellSet(Arena* arena) {
+ Zone* zone = arena->zone;
+ JSRuntime* rt = zone->runtimeFromMainThread();
+ if (!rt->gc.nursery().isEnabled()) {
+ return nullptr;
+ }
+
+ // Maintain separate lists for strings and non-strings, so that all buffered
+ // string whole cells will be processed before anything else (to prevent them
+ // from being deduplicated when their chars are used by a tenured string.)
+ bool isString =
+ MapAllocToTraceKind(arena->getAllocKind()) == JS::TraceKind::String;
+
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ ArenaCellSet*& head = isString ? stringHead_ : nonStringHead_;
+ auto cells = storage_->new_<ArenaCellSet>(arena, head);
+ if (!cells) {
+ oomUnsafe.crash("Failed to allocate ArenaCellSet");
+ }
+
+ arena->bufferedCells() = cells;
+ head = cells;
+
+ if (isAboutToOverflow()) {
+ rt->gc.storeBuffer().setAboutToOverflow(
+ JS::GCReason::FULL_WHOLE_CELL_BUFFER);
+ }
+
+ return cells;
+}
+
+void StoreBuffer::WholeCellBuffer::clear() {
+ for (auto** headPtr : {&stringHead_, &nonStringHead_}) {
+ for (auto* set = *headPtr; set; set = set->next) {
+ set->arena->bufferedCells() = &ArenaCellSet::Empty;
+ }
+ *headPtr = nullptr;
+ }
+
+ if (storage_) {
+ storage_->used() ? storage_->releaseAll() : storage_->freeAll();
+ }
+}
+
+template struct StoreBuffer::MonoTypeBuffer<StoreBuffer::ValueEdge>;
+template struct StoreBuffer::MonoTypeBuffer<StoreBuffer::SlotsEdge>;
+
+void js::gc::PostWriteBarrierCell(Cell* cell, Cell* prev, Cell* next) {
+ if (!next || !cell->isTenured()) {
+ return;
+ }
+
+ StoreBuffer* buffer = next->storeBuffer();
+ if (!buffer || (prev && prev->storeBuffer())) {
+ return;
+ }
+
+ buffer->putWholeCell(cell);
+}
diff --git a/js/src/gc/StoreBuffer.h b/js/src/gc/StoreBuffer.h
new file mode 100644
index 0000000000..95a13e4c6f
--- /dev/null
+++ b/js/src/gc/StoreBuffer.h
@@ -0,0 +1,679 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef gc_StoreBuffer_h
+#define gc_StoreBuffer_h
+
+#include "mozilla/Attributes.h"
+#include "mozilla/HashFunctions.h"
+#include "mozilla/ReentrancyGuard.h"
+
+#include <algorithm>
+
+#include "ds/BitArray.h"
+#include "ds/LifoAlloc.h"
+#include "gc/Nursery.h"
+#include "js/AllocPolicy.h"
+#include "js/MemoryMetrics.h"
+#include "js/UniquePtr.h"
+#include "threading/Mutex.h"
+
+namespace js {
+
+#ifdef DEBUG
+extern bool CurrentThreadIsGCMarking();
+#endif
+
+namespace gc {
+
+// Map from all trace kinds to the base GC type.
+template <JS::TraceKind kind>
+struct MapTraceKindToType {};
+
+#define DEFINE_TRACE_KIND_MAP(name, type, _, _1) \
+ template <> \
+ struct MapTraceKindToType<JS::TraceKind::name> { \
+ using Type = type; \
+ };
+JS_FOR_EACH_TRACEKIND(DEFINE_TRACE_KIND_MAP);
+#undef DEFINE_TRACE_KIND_MAP
+
+// Map from a possibly-derived type to the base GC type.
+template <typename T>
+struct BaseGCType {
+ using type =
+ typename MapTraceKindToType<JS::MapTypeToTraceKind<T>::kind>::Type;
+ static_assert(std::is_base_of_v<type, T>, "Failed to find base type");
+};
+
+class Arena;
+class ArenaCellSet;
+
+#ifdef DEBUG
+extern bool CurrentThreadHasLockedGC();
+#endif
+
+/*
+ * BufferableRef represents an abstract reference for use in the generational
+ * GC's remembered set. Entries in the store buffer that cannot be represented
+ * with the simple pointer-to-a-pointer scheme must derive from this class and
+ * use the generic store buffer interface.
+ *
+ * A single BufferableRef entry in the generic buffer can represent many entries
+ * in the remembered set. For example js::OrderedHashTableRef represents all
+ * the incoming edges corresponding to keys in an ordered hash table.
+ */
+class BufferableRef {
+ public:
+ virtual void trace(JSTracer* trc) = 0;
+ bool maybeInRememberedSet(const Nursery&) const { return true; }
+};
+
+typedef HashSet<void*, PointerHasher<void*>, SystemAllocPolicy> EdgeSet;
+
+/* The size of a single block of store buffer storage space. */
+static const size_t LifoAllocBlockSize = 8 * 1024;
+
+/*
+ * The StoreBuffer observes all writes that occur in the system and performs
+ * efficient filtering of them to derive a remembered set for nursery GC.
+ */
+class StoreBuffer {
+ friend class mozilla::ReentrancyGuard;
+
+ /* The size at which a block is about to overflow for the generic buffer. */
+ static const size_t GenericBufferLowAvailableThreshold =
+ LifoAllocBlockSize / 2;
+
+ /* The size at which the whole cell buffer is about to overflow. */
+ static const size_t WholeCellBufferOverflowThresholdBytes = 128 * 1024;
+
+ /*
+ * This buffer holds only a single type of edge. Using this buffer is more
+ * efficient than the generic buffer when many writes will be to the same
+ * type of edge: e.g. Value or Cell*.
+ */
+ template <typename T>
+ struct MonoTypeBuffer {
+ /* The canonical set of stores. */
+ typedef HashSet<T, typename T::Hasher, SystemAllocPolicy> StoreSet;
+ StoreSet stores_;
+
+ /*
+ * A one element cache in front of the canonical set to speed up
+ * temporary instances of HeapPtr.
+ */
+ T last_;
+
+ StoreBuffer* owner_;
+
+ JS::GCReason gcReason_;
+
+ /* Maximum number of entries before we request a minor GC. */
+ const static size_t MaxEntries = 48 * 1024 / sizeof(T);
+
+ explicit MonoTypeBuffer(StoreBuffer* owner, JS::GCReason reason)
+ : last_(T()), owner_(owner), gcReason_(reason) {}
+
+ void clear() {
+ last_ = T();
+ stores_.clear();
+ }
+
+ /* Add one item to the buffer. */
+ void put(const T& t) {
+ sinkStore();
+ last_ = t;
+ }
+
+ /* Remove an item from the store buffer. */
+ void unput(const T& v) {
+ // Fast, hashless remove of last put.
+ if (last_ == v) {
+ last_ = T();
+ return;
+ }
+ stores_.remove(v);
+ }
+
+ /* Move any buffered stores to the canonical store set. */
+ void sinkStore() {
+ if (last_) {
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ if (!stores_.put(last_)) {
+ oomUnsafe.crash("Failed to allocate for MonoTypeBuffer::put.");
+ }
+ }
+ last_ = T();
+
+ if (MOZ_UNLIKELY(stores_.count() > MaxEntries)) {
+ owner_->setAboutToOverflow(gcReason_);
+ }
+ }
+
+ /* Trace the source of all edges in the store buffer. */
+ void trace(TenuringTracer& mover);
+
+ size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) {
+ return stores_.shallowSizeOfExcludingThis(mallocSizeOf);
+ }
+
+ bool isEmpty() const { return last_ == T() && stores_.empty(); }
+
+ private:
+ MonoTypeBuffer(const MonoTypeBuffer& other) = delete;
+ MonoTypeBuffer& operator=(const MonoTypeBuffer& other) = delete;
+ };
+
+ struct WholeCellBuffer {
+ UniquePtr<LifoAlloc> storage_;
+ ArenaCellSet* stringHead_;
+ ArenaCellSet* nonStringHead_;
+ StoreBuffer* owner_;
+
+ explicit WholeCellBuffer(StoreBuffer* owner)
+ : storage_(nullptr),
+ stringHead_(nullptr),
+ nonStringHead_(nullptr),
+ owner_(owner) {}
+
+ MOZ_MUST_USE bool init();
+
+ void clear();
+
+ bool isAboutToOverflow() const {
+ return !storage_->isEmpty() &&
+ storage_->used() > WholeCellBufferOverflowThresholdBytes;
+ }
+
+ void trace(TenuringTracer& mover);
+
+ inline void put(const Cell* cell);
+
+ size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) {
+ return storage_ ? storage_->sizeOfIncludingThis(mallocSizeOf) : 0;
+ }
+
+ bool isEmpty() const {
+ MOZ_ASSERT_IF(!stringHead_ && !nonStringHead_,
+ !storage_ || storage_->isEmpty());
+ return !stringHead_ && !nonStringHead_;
+ }
+
+ private:
+ ArenaCellSet* allocateCellSet(Arena* arena);
+
+ WholeCellBuffer(const WholeCellBuffer& other) = delete;
+ WholeCellBuffer& operator=(const WholeCellBuffer& other) = delete;
+ };
+
+ struct GenericBuffer {
+ UniquePtr<LifoAlloc> storage_;
+ StoreBuffer* owner_;
+
+ explicit GenericBuffer(StoreBuffer* owner)
+ : storage_(nullptr), owner_(owner) {}
+
+ MOZ_MUST_USE bool init();
+
+ void clear() {
+ if (storage_) {
+ storage_->used() ? storage_->releaseAll() : storage_->freeAll();
+ }
+ }
+
+ bool isAboutToOverflow() const {
+ return !storage_->isEmpty() && storage_->availableInCurrentChunk() <
+ GenericBufferLowAvailableThreshold;
+ }
+
+ /* Trace all generic edges. */
+ void trace(JSTracer* trc);
+
+ template <typename T>
+ void put(const T& t) {
+ MOZ_ASSERT(storage_);
+
+ /* Ensure T is derived from BufferableRef. */
+ (void)static_cast<const BufferableRef*>(&t);
+
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ unsigned size = sizeof(T);
+ unsigned* sizep = storage_->pod_malloc<unsigned>();
+ if (!sizep) {
+ oomUnsafe.crash("Failed to allocate for GenericBuffer::put.");
+ }
+ *sizep = size;
+
+ T* tp = storage_->new_<T>(t);
+ if (!tp) {
+ oomUnsafe.crash("Failed to allocate for GenericBuffer::put.");
+ }
+
+ if (isAboutToOverflow()) {
+ owner_->setAboutToOverflow(JS::GCReason::FULL_GENERIC_BUFFER);
+ }
+ }
+
+ size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) {
+ return storage_ ? storage_->sizeOfIncludingThis(mallocSizeOf) : 0;
+ }
+
+ bool isEmpty() const { return !storage_ || storage_->isEmpty(); }
+
+ private:
+ GenericBuffer(const GenericBuffer& other) = delete;
+ GenericBuffer& operator=(const GenericBuffer& other) = delete;
+ };
+
+ template <typename Edge>
+ struct PointerEdgeHasher {
+ using Lookup = Edge;
+ static HashNumber hash(const Lookup& l) {
+ return mozilla::HashGeneric(l.edge);
+ }
+ static bool match(const Edge& k, const Lookup& l) { return k == l; }
+ };
+
+ template <typename T>
+ struct CellPtrEdge {
+ T** edge = nullptr;
+
+ CellPtrEdge() = default;
+ explicit CellPtrEdge(T** v) : edge(v) {}
+ bool operator==(const CellPtrEdge& other) const {
+ return edge == other.edge;
+ }
+ bool operator!=(const CellPtrEdge& other) const {
+ return edge != other.edge;
+ }
+
+ bool maybeInRememberedSet(const Nursery& nursery) const {
+ MOZ_ASSERT(IsInsideNursery(*edge));
+ return !nursery.isInside(edge);
+ }
+
+ void trace(TenuringTracer& mover) const;
+
+ explicit operator bool() const { return edge != nullptr; }
+
+ using Hasher = PointerEdgeHasher<CellPtrEdge<T>>;
+ };
+
+ using ObjectPtrEdge = CellPtrEdge<JSObject>;
+ using StringPtrEdge = CellPtrEdge<JSString>;
+ using BigIntPtrEdge = CellPtrEdge<JS::BigInt>;
+
+ struct ValueEdge {
+ JS::Value* edge;
+
+ ValueEdge() : edge(nullptr) {}
+ explicit ValueEdge(JS::Value* v) : edge(v) {}
+ bool operator==(const ValueEdge& other) const { return edge == other.edge; }
+ bool operator!=(const ValueEdge& other) const { return edge != other.edge; }
+
+ Cell* deref() const {
+ return edge->isGCThing() ? static_cast<Cell*>(edge->toGCThing())
+ : nullptr;
+ }
+
+ bool maybeInRememberedSet(const Nursery& nursery) const {
+ MOZ_ASSERT(IsInsideNursery(deref()));
+ return !nursery.isInside(edge);
+ }
+
+ void trace(TenuringTracer& mover) const;
+
+ explicit operator bool() const { return edge != nullptr; }
+
+ using Hasher = PointerEdgeHasher<ValueEdge>;
+ };
+
+ struct SlotsEdge {
+ // These definitions must match those in HeapSlot::Kind.
+ const static int SlotKind = 0;
+ const static int ElementKind = 1;
+
+ uintptr_t objectAndKind_; // NativeObject* | Kind
+ uint32_t start_;
+ uint32_t count_;
+
+ SlotsEdge() : objectAndKind_(0), start_(0), count_(0) {}
+ SlotsEdge(NativeObject* object, int kind, uint32_t start, uint32_t count)
+ : objectAndKind_(uintptr_t(object) | kind),
+ start_(start),
+ count_(count) {
+ MOZ_ASSERT((uintptr_t(object) & 1) == 0);
+ MOZ_ASSERT(kind <= 1);
+ MOZ_ASSERT(count > 0);
+ MOZ_ASSERT(start + count > start);
+ }
+
+ NativeObject* object() const {
+ return reinterpret_cast<NativeObject*>(objectAndKind_ & ~1);
+ }
+ int kind() const { return (int)(objectAndKind_ & 1); }
+
+ bool operator==(const SlotsEdge& other) const {
+ return objectAndKind_ == other.objectAndKind_ && start_ == other.start_ &&
+ count_ == other.count_;
+ }
+
+ bool operator!=(const SlotsEdge& other) const { return !(*this == other); }
+
+ // True if this SlotsEdge range overlaps with the other SlotsEdge range,
+ // false if they do not overlap.
+ bool overlaps(const SlotsEdge& other) const {
+ if (objectAndKind_ != other.objectAndKind_) {
+ return false;
+ }
+
+ // Widen our range by one on each side so that we consider
+ // adjacent-but-not-actually-overlapping ranges as overlapping. This
+ // is particularly useful for coalescing a series of increasing or
+ // decreasing single index writes 0, 1, 2, ..., N into a SlotsEdge
+ // range of elements [0, N].
+ uint32_t end = start_ + count_ + 1;
+ uint32_t start = start_ > 0 ? start_ - 1 : 0;
+ MOZ_ASSERT(start < end);
+
+ uint32_t otherEnd = other.start_ + other.count_;
+ MOZ_ASSERT(other.start_ <= otherEnd);
+ return (start <= other.start_ && other.start_ <= end) ||
+ (start <= otherEnd && otherEnd <= end);
+ }
+
+ // Destructively make this SlotsEdge range the union of the other
+ // SlotsEdge range and this one. A precondition is that the ranges must
+ // overlap.
+ void merge(const SlotsEdge& other) {
+ MOZ_ASSERT(overlaps(other));
+ uint32_t end = std::max(start_ + count_, other.start_ + other.count_);
+ start_ = std::min(start_, other.start_);
+ count_ = end - start_;
+ }
+
+ bool maybeInRememberedSet(const Nursery& n) const {
+ return !IsInsideNursery(reinterpret_cast<Cell*>(object()));
+ }
+
+ void trace(TenuringTracer& mover) const;
+
+ explicit operator bool() const { return objectAndKind_ != 0; }
+
+ typedef struct Hasher {
+ using Lookup = SlotsEdge;
+ static HashNumber hash(const Lookup& l) {
+ return mozilla::HashGeneric(l.objectAndKind_, l.start_, l.count_);
+ }
+ static bool match(const SlotsEdge& k, const Lookup& l) { return k == l; }
+ } Hasher;
+ };
+
+ // The GC runs tasks that may access the storebuffer in parallel and so must
+ // take a lock. The mutator may only access the storebuffer from the main
+ // thread.
+ inline void CheckAccess() const {
+#ifdef DEBUG
+ if (JS::RuntimeHeapIsBusy()) {
+ MOZ_ASSERT(!CurrentThreadIsGCMarking());
+ lock_.assertOwnedByCurrentThread();
+ } else {
+ MOZ_ASSERT(CurrentThreadCanAccessRuntime(runtime_));
+ }
+#endif
+ }
+
+ template <typename Buffer, typename Edge>
+ void unput(Buffer& buffer, const Edge& edge) {
+ CheckAccess();
+ if (!isEnabled()) {
+ return;
+ }
+ mozilla::ReentrancyGuard g(*this);
+ buffer.unput(edge);
+ }
+
+ template <typename Buffer, typename Edge>
+ void put(Buffer& buffer, const Edge& edge) {
+ CheckAccess();
+ if (!isEnabled()) {
+ return;
+ }
+ mozilla::ReentrancyGuard g(*this);
+ if (edge.maybeInRememberedSet(nursery_)) {
+ buffer.put(edge);
+ }
+ }
+
+ Mutex lock_;
+
+ MonoTypeBuffer<ValueEdge> bufferVal;
+ MonoTypeBuffer<StringPtrEdge> bufStrCell;
+ MonoTypeBuffer<BigIntPtrEdge> bufBigIntCell;
+ MonoTypeBuffer<ObjectPtrEdge> bufObjCell;
+ MonoTypeBuffer<SlotsEdge> bufferSlot;
+ WholeCellBuffer bufferWholeCell;
+ GenericBuffer bufferGeneric;
+
+ JSRuntime* runtime_;
+ const Nursery& nursery_;
+
+ bool aboutToOverflow_;
+ bool enabled_;
+ bool mayHavePointersToDeadCells_;
+#ifdef DEBUG
+ bool mEntered; /* For ReentrancyGuard. */
+#endif
+
+ public:
+#ifdef DEBUG
+ bool markingNondeduplicatable;
+#endif
+
+ explicit StoreBuffer(JSRuntime* rt, const Nursery& nursery);
+ MOZ_MUST_USE bool enable();
+
+ void disable();
+ bool isEnabled() const { return enabled_; }
+
+ bool isEmpty() const;
+ void clear();
+
+ const Nursery& nursery() const { return nursery_; }
+
+ /* Get the overflowed status. */
+ bool isAboutToOverflow() const { return aboutToOverflow_; }
+
+ /*
+ * Brain transplants may add whole cell buffer entires for dead cells. We must
+ * evict the nursery prior to sweeping arenas if any such entries are present.
+ */
+ bool mayHavePointersToDeadCells() const {
+ return mayHavePointersToDeadCells_;
+ }
+
+ /* Insert a single edge into the buffer/remembered set. */
+ void putValue(JS::Value* vp) { put(bufferVal, ValueEdge(vp)); }
+ void unputValue(JS::Value* vp) { unput(bufferVal, ValueEdge(vp)); }
+
+ void putCell(JSString** strp) { put(bufStrCell, StringPtrEdge(strp)); }
+ void unputCell(JSString** strp) { unput(bufStrCell, StringPtrEdge(strp)); }
+
+ void putCell(JS::BigInt** bip) { put(bufBigIntCell, BigIntPtrEdge(bip)); }
+ void unputCell(JS::BigInt** bip) { unput(bufBigIntCell, BigIntPtrEdge(bip)); }
+
+ void putCell(JSObject** strp) { put(bufObjCell, ObjectPtrEdge(strp)); }
+ void unputCell(JSObject** strp) { unput(bufObjCell, ObjectPtrEdge(strp)); }
+
+ void putSlot(NativeObject* obj, int kind, uint32_t start, uint32_t count) {
+ SlotsEdge edge(obj, kind, start, count);
+ if (bufferSlot.last_.overlaps(edge)) {
+ bufferSlot.last_.merge(edge);
+ } else {
+ put(bufferSlot, edge);
+ }
+ }
+
+ inline void putWholeCell(Cell* cell);
+
+ /* Insert an entry into the generic buffer. */
+ template <typename T>
+ void putGeneric(const T& t) {
+ put(bufferGeneric, t);
+ }
+
+ void setMayHavePointersToDeadCells() { mayHavePointersToDeadCells_ = true; }
+
+ /* Methods to trace the source of all edges in the store buffer. */
+ void traceValues(TenuringTracer& mover) { bufferVal.trace(mover); }
+ void traceCells(TenuringTracer& mover) {
+ bufStrCell.trace(mover);
+ bufBigIntCell.trace(mover);
+ bufObjCell.trace(mover);
+ }
+ void traceSlots(TenuringTracer& mover) { bufferSlot.trace(mover); }
+ void traceWholeCells(TenuringTracer& mover) { bufferWholeCell.trace(mover); }
+ void traceGenericEntries(JSTracer* trc) { bufferGeneric.trace(trc); }
+
+ /* For use by our owned buffers and for testing. */
+ void setAboutToOverflow(JS::GCReason);
+
+ void addSizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf,
+ JS::GCSizes* sizes);
+
+ void checkEmpty() const;
+
+ // For use by the GC only.
+ void lock() { lock_.lock(); }
+ void unlock() { lock_.unlock(); }
+};
+
+// A set of cells in an arena used to implement the whole cell store buffer.
+class ArenaCellSet {
+ friend class StoreBuffer;
+
+ using ArenaCellBits = BitArray<MaxArenaCellIndex>;
+
+ // The arena this relates to.
+ Arena* arena;
+
+ // Pointer to next set forming a linked list.
+ ArenaCellSet* next;
+
+ // Bit vector for each possible cell start position.
+ ArenaCellBits bits;
+
+#ifdef DEBUG
+ // The minor GC number when this was created. This object should not survive
+ // past the next minor collection.
+ const uint64_t minorGCNumberAtCreation;
+#endif
+
+ // Construct the empty sentinel object.
+ constexpr ArenaCellSet()
+ : arena(nullptr),
+ next(nullptr)
+#ifdef DEBUG
+ ,
+ minorGCNumberAtCreation(0)
+#endif
+ {
+ }
+
+ public:
+ using WordT = ArenaCellBits::WordT;
+ const size_t BitsPerWord = ArenaCellBits::bitsPerElement;
+ const size_t NumWords = ArenaCellBits::numSlots;
+
+ ArenaCellSet(Arena* arena, ArenaCellSet* next);
+
+ bool hasCell(const TenuredCell* cell) const {
+ return hasCell(getCellIndex(cell));
+ }
+
+ void putCell(const TenuredCell* cell) { putCell(getCellIndex(cell)); }
+
+ bool isEmpty() const { return this == &Empty; }
+
+ bool hasCell(size_t cellIndex) const;
+
+ void putCell(size_t cellIndex);
+
+ void check() const;
+
+ WordT getWord(size_t wordIndex) const { return bits.getWord(wordIndex); }
+
+ void trace(TenuringTracer& mover);
+
+ // Sentinel object used for all empty sets.
+ //
+ // We use a sentinel because it simplifies the JIT code slightly as we can
+ // assume all arenas have a cell set.
+ static ArenaCellSet Empty;
+
+ static size_t getCellIndex(const TenuredCell* cell);
+ static void getWordIndexAndMask(size_t cellIndex, size_t* wordp,
+ uint32_t* maskp);
+
+ // Attempt to trigger a minor GC if free space in the nursery (where these
+ // objects are allocated) falls below this threshold.
+ static const size_t NurseryFreeThresholdBytes = 64 * 1024;
+
+ static size_t offsetOfArena() { return offsetof(ArenaCellSet, arena); }
+ static size_t offsetOfBits() { return offsetof(ArenaCellSet, bits); }
+};
+
+// Post-write barrier implementation for GC cells.
+
+// Implement the post-write barrier for nursery allocateable cell type |T|. Call
+// this from |T::postWriteBarrier|.
+template <typename T>
+MOZ_ALWAYS_INLINE void PostWriteBarrierImpl(void* cellp, T* prev, T* next) {
+ MOZ_ASSERT(cellp);
+
+ // If the target needs an entry, add it.
+ StoreBuffer* buffer;
+ if (next && (buffer = next->storeBuffer())) {
+ // If we know that the prev has already inserted an entry, we can skip
+ // doing the lookup to add the new entry. Note that we cannot safely
+ // assert the presence of the entry because it may have been added
+ // via a different store buffer.
+ if (prev && prev->storeBuffer()) {
+ return;
+ }
+ buffer->putCell(static_cast<T**>(cellp));
+ return;
+ }
+
+ // Remove the prev entry if the new value does not need it. There will only
+ // be a prev entry if the prev value was in the nursery.
+ if (prev && (buffer = prev->storeBuffer())) {
+ buffer->unputCell(static_cast<T**>(cellp));
+ }
+}
+
+template <typename T>
+MOZ_ALWAYS_INLINE void PostWriteBarrier(T** vp, T* prev, T* next) {
+ static_assert(std::is_base_of_v<Cell, T>);
+ static_assert(!std::is_same_v<Cell, T> && !std::is_same_v<TenuredCell, T>);
+
+ if constexpr (!std::is_base_of_v<TenuredCell, T>) {
+ using BaseT = typename BaseGCType<T>::type;
+ PostWriteBarrierImpl<BaseT>(vp, prev, next);
+ return;
+ }
+
+ MOZ_ASSERT(!IsInsideNursery(next));
+}
+
+// Used when we don't have a specific edge to put in the store buffer.
+void PostWriteBarrierCell(Cell* cell, Cell* prev, Cell* next);
+
+} /* namespace gc */
+} /* namespace js */
+
+#endif /* gc_StoreBuffer_h */
diff --git a/js/src/gc/Tracer.cpp b/js/src/gc/Tracer.cpp
new file mode 100644
index 0000000000..00d4ff0ff7
--- /dev/null
+++ b/js/src/gc/Tracer.cpp
@@ -0,0 +1,377 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "gc/Tracer.h"
+
+#include "mozilla/DebugOnly.h"
+
+#include "NamespaceImports.h"
+
+#include "gc/GCInternals.h"
+#include "gc/Marking.h"
+#include "gc/PublicIterators.h"
+#include "gc/Zone.h"
+#include "util/Memory.h"
+#include "util/Text.h"
+#include "vm/BigIntType.h"
+#include "vm/JSFunction.h"
+#include "vm/JSScript.h"
+#include "vm/Shape.h"
+#include "vm/SymbolType.h"
+
+#include "gc/GC-inl.h"
+#include "gc/Marking-inl.h"
+#include "vm/Realm-inl.h"
+
+using namespace js;
+using namespace js::gc;
+using mozilla::DebugOnly;
+
+/*** Callback Tracer Dispatch ***********************************************/
+
+template <typename T>
+bool DoCallback(GenericTracer* trc, T** thingp, const char* name) {
+ CheckTracedThing(trc, *thingp);
+ JS::AutoTracingName ctx(trc, name);
+
+ T* thing = *thingp;
+ T* post = DispatchToOnEdge(trc, thing);
+ if (post != thing) {
+ *thingp = post;
+ }
+
+ return post;
+}
+#define INSTANTIATE_ALL_VALID_TRACE_FUNCTIONS(name, type, _, _1) \
+ template bool DoCallback<type>(GenericTracer*, type**, const char*);
+JS_FOR_EACH_TRACEKIND(INSTANTIATE_ALL_VALID_TRACE_FUNCTIONS);
+#undef INSTANTIATE_ALL_VALID_TRACE_FUNCTIONS
+
+template <typename T>
+bool DoCallback(GenericTracer* trc, T* thingp, const char* name) {
+ JS::AutoTracingName ctx(trc, name);
+
+ // Return true by default. For some types the lambda below won't be called.
+ bool ret = true;
+ auto thing = MapGCThingTyped(*thingp, [trc, &ret](auto thing) {
+ CheckTracedThing(trc, thing);
+
+ auto* post = DispatchToOnEdge(trc, thing);
+ if (!post) {
+ ret = false;
+ return TaggedPtr<T>::empty();
+ }
+
+ return TaggedPtr<T>::wrap(post);
+ });
+
+ // Only update *thingp if the value changed, to avoid TSan false positives for
+ // template objects when using DumpHeapTracer or UbiNode tracers while Ion
+ // compiling off-thread.
+ if (thing.isSome() && thing.value() != *thingp) {
+ *thingp = thing.value();
+ }
+
+ return ret;
+}
+template bool DoCallback<JS::Value>(GenericTracer*, JS::Value*, const char*);
+template bool DoCallback<JS::PropertyKey>(GenericTracer*, JS::PropertyKey*,
+ const char*);
+template bool DoCallback<TaggedProto>(GenericTracer*, TaggedProto*,
+ const char*);
+
+void JS::TracingContext::getEdgeName(char* buffer, size_t bufferSize) {
+ MOZ_ASSERT(bufferSize > 0);
+ if (functor_) {
+ (*functor_)(this, buffer, bufferSize);
+ return;
+ }
+ if (index_ != InvalidIndex) {
+ snprintf(buffer, bufferSize, "%s[%zu]", name_, index_);
+ return;
+ }
+ snprintf(buffer, bufferSize, "%s", name_);
+}
+
+/*** Public Tracing API *****************************************************/
+
+JS_PUBLIC_API void JS::TraceChildren(JSTracer* trc, GCCellPtr thing) {
+ ApplyGCThingTyped(thing.asCell(), thing.kind(), [trc](auto t) {
+ MOZ_ASSERT_IF(t->runtimeFromAnyThread() != trc->runtime(),
+ t->isPermanentAndMayBeShared() ||
+ t->zoneFromAnyThread()->isSelfHostingZone());
+ t->traceChildren(trc);
+ });
+}
+
+void js::gc::TraceIncomingCCWs(JSTracer* trc,
+ const JS::CompartmentSet& compartments) {
+ for (CompartmentsIter source(trc->runtime()); !source.done(); source.next()) {
+ if (compartments.has(source)) {
+ continue;
+ }
+ // Iterate over all compartments that |source| has wrappers for.
+ for (Compartment::WrappedObjectCompartmentEnum dest(source); !dest.empty();
+ dest.popFront()) {
+ if (!compartments.has(dest)) {
+ continue;
+ }
+ // Iterate over all wrappers from |source| to |dest| compartments.
+ for (Compartment::ObjectWrapperEnum e(source, dest); !e.empty();
+ e.popFront()) {
+ JSObject* obj = e.front().key();
+ MOZ_ASSERT(compartments.has(obj->compartment()));
+ mozilla::DebugOnly<JSObject*> prior = obj;
+ TraceManuallyBarrieredEdge(trc, &obj,
+ "cross-compartment wrapper target");
+ MOZ_ASSERT(obj == prior);
+ }
+ }
+ }
+}
+
+/*** Cycle Collector Helpers ************************************************/
+
+// This function is used by the Cycle Collector (CC) to trace through -- or in
+// CC parlance, traverse -- a Shape tree. The CC does not care about Shapes or
+// BaseShapes, only the JSObjects held live by them. Thus, we walk the Shape
+// lineage, but only report non-Shape things. This effectively makes the entire
+// shape lineage into a single node in the CC, saving tremendous amounts of
+// space and time in its algorithms.
+//
+// The algorithm implemented here uses only bounded stack space. This would be
+// possible to implement outside the engine, but would require much extra
+// infrastructure and many, many more slow GOT lookups. We have implemented it
+// inside SpiderMonkey, despite the lack of general applicability, for the
+// simplicity and performance of FireFox's embedding of this engine.
+void gc::TraceCycleCollectorChildren(JS::CallbackTracer* trc, Shape* shape) {
+ do {
+ MOZ_ASSERT(shape->base());
+ shape->base()->assertConsistency();
+
+ // Don't trace the propid because the CC doesn't care about jsid.
+
+ if (shape->hasGetterObject()) {
+ JSObject* tmp = shape->getterObject();
+ DoCallback(trc, &tmp, "getter");
+ MOZ_ASSERT(tmp == shape->getterObject());
+ }
+
+ if (shape->hasSetterObject()) {
+ JSObject* tmp = shape->setterObject();
+ DoCallback(trc, &tmp, "setter");
+ MOZ_ASSERT(tmp == shape->setterObject());
+ }
+
+ shape = shape->previous();
+ } while (shape);
+}
+
+void gc::TraceCycleCollectorChildren(JS::CallbackTracer* trc,
+ ObjectGroup* group) {
+ MOZ_ASSERT(trc->isCallbackTracer());
+
+ group->traceChildren(trc);
+}
+
+/*** Traced Edge Printer ****************************************************/
+
+static size_t CountDecimalDigits(size_t num) {
+ size_t numDigits = 0;
+ do {
+ num /= 10;
+ numDigits++;
+ } while (num > 0);
+
+ return numDigits;
+}
+
+static const char* StringKindHeader(JSString* str) {
+ MOZ_ASSERT(str->isLinear());
+
+ if (str->isAtom()) {
+ if (str->isPermanentAtom()) {
+ return "permanent atom: ";
+ }
+ return "atom: ";
+ }
+
+ if (str->isExtensible()) {
+ return "extensible: ";
+ }
+
+ if (str->isInline()) {
+ if (str->isFatInline()) {
+ return "fat inline: ";
+ }
+ return "inline: ";
+ }
+
+ if (str->isDependent()) {
+ return "dependent: ";
+ }
+
+ if (str->isExternal()) {
+ return "external: ";
+ }
+
+ return "linear: ";
+}
+
+void js::gc::GetTraceThingInfo(char* buf, size_t bufsize, void* thing,
+ JS::TraceKind kind, bool details) {
+ const char* name = nullptr; /* silence uninitialized warning */
+ size_t n;
+
+ if (bufsize == 0) {
+ return;
+ }
+
+ switch (kind) {
+ case JS::TraceKind::BaseShape:
+ name = "base_shape";
+ break;
+
+ case JS::TraceKind::JitCode:
+ name = "jitcode";
+ break;
+
+ case JS::TraceKind::Null:
+ name = "null_pointer";
+ break;
+
+ case JS::TraceKind::Object: {
+ name = static_cast<JSObject*>(thing)->getClass()->name;
+ break;
+ }
+
+ case JS::TraceKind::ObjectGroup:
+ name = "object_group";
+ break;
+
+ case JS::TraceKind::RegExpShared:
+ name = "reg_exp_shared";
+ break;
+
+ case JS::TraceKind::Scope:
+ name = "scope";
+ break;
+
+ case JS::TraceKind::Script:
+ name = "script";
+ break;
+
+ case JS::TraceKind::Shape:
+ name = "shape";
+ break;
+
+ case JS::TraceKind::String:
+ name = ((JSString*)thing)->isDependent() ? "substring" : "string";
+ break;
+
+ case JS::TraceKind::Symbol:
+ name = "symbol";
+ break;
+
+ case JS::TraceKind::BigInt:
+ name = "BigInt";
+ break;
+
+ default:
+ name = "INVALID";
+ break;
+ }
+
+ n = strlen(name);
+ if (n > bufsize - 1) {
+ n = bufsize - 1;
+ }
+ js_memcpy(buf, name, n + 1);
+ buf += n;
+ bufsize -= n;
+ *buf = '\0';
+
+ if (details && bufsize > 2) {
+ switch (kind) {
+ case JS::TraceKind::Object: {
+ JSObject* obj = (JSObject*)thing;
+ if (obj->is<JSFunction>()) {
+ JSFunction* fun = &obj->as<JSFunction>();
+ if (fun->displayAtom()) {
+ *buf++ = ' ';
+ bufsize--;
+ PutEscapedString(buf, bufsize, fun->displayAtom(), 0);
+ }
+ } else if (obj->getClass()->flags & JSCLASS_HAS_PRIVATE) {
+ snprintf(buf, bufsize, " %p", obj->as<NativeObject>().getPrivate());
+ } else {
+ snprintf(buf, bufsize, " <no private>");
+ }
+ break;
+ }
+
+ case JS::TraceKind::Script: {
+ auto* script = static_cast<js::BaseScript*>(thing);
+ snprintf(buf, bufsize, " %s:%u", script->filename(), script->lineno());
+ break;
+ }
+
+ case JS::TraceKind::String: {
+ *buf++ = ' ';
+ bufsize--;
+ JSString* str = (JSString*)thing;
+
+ if (str->isLinear()) {
+ const char* header = StringKindHeader(str);
+ bool willFit = str->length() + strlen("<length > ") + strlen(header) +
+ CountDecimalDigits(str->length()) <
+ bufsize;
+
+ n = snprintf(buf, bufsize, "<%slength %zu%s> ", header, str->length(),
+ willFit ? "" : " (truncated)");
+ buf += n;
+ bufsize -= n;
+
+ PutEscapedString(buf, bufsize, &str->asLinear(), 0);
+ } else {
+ snprintf(buf, bufsize, "<rope: length %zu>", str->length());
+ }
+ break;
+ }
+
+ case JS::TraceKind::Symbol: {
+ auto* sym = static_cast<JS::Symbol*>(thing);
+ if (JSAtom* desc = sym->description()) {
+ *buf++ = ' ';
+ bufsize--;
+ PutEscapedString(buf, bufsize, desc, 0);
+ } else {
+ snprintf(buf, bufsize, "<null>");
+ }
+ break;
+ }
+
+ case JS::TraceKind::Scope: {
+ auto* scope = static_cast<js::Scope*>(thing);
+ snprintf(buf, bufsize, " %s", js::ScopeKindString(scope->kind()));
+ break;
+ }
+
+ default:
+ break;
+ }
+ }
+ buf[bufsize - 1] = '\0';
+}
+
+JS::CallbackTracer::CallbackTracer(JSContext* cx, JS::TracerKind kind,
+ JS::TraceOptions options)
+ : CallbackTracer(cx->runtime(), kind, options) {}
+
+uint32_t JSTracer::gcNumberForMarking() const {
+ MOZ_ASSERT(isMarkingTracer());
+ return runtime()->gc.gcNumber();
+}
diff --git a/js/src/gc/Tracer.h b/js/src/gc/Tracer.h
new file mode 100644
index 0000000000..dbe1682fa6
--- /dev/null
+++ b/js/src/gc/Tracer.h
@@ -0,0 +1,347 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef js_Tracer_h
+#define js_Tracer_h
+
+#include "gc/Barrier.h"
+#include "js/HashTable.h"
+#include "js/TracingAPI.h"
+
+namespace JS {
+using CompartmentSet =
+ js::HashSet<Compartment*, js::DefaultHasher<Compartment*>,
+ js::SystemAllocPolicy>;
+} // namespace JS
+
+namespace js {
+
+// Internal Tracing API
+//
+// Tracing is an abstract visitation of each edge in a JS heap graph.[1] The
+// most common (and performance sensitive) use of this infrastructure is for GC
+// "marking" as part of the mark-and-sweep collector; however, this
+// infrastructure is much more general than that and is used for many other
+// purposes as well.
+//
+// One commonly misunderstood subtlety of the tracing architecture is the role
+// of graph vertices versus graph edges. Graph vertices are the heap
+// allocations -- GC things -- that are returned by Allocate. Graph edges are
+// pointers -- including tagged pointers like Value and jsid -- that link the
+// allocations into a complex heap. The tracing API deals *only* with edges.
+// Any action taken on the target of a graph edge is independent of the tracing
+// itself.
+//
+// Another common misunderstanding relates to the role of the JSTracer. The
+// JSTracer instance determines what tracing does when visiting an edge; it
+// does not itself participate in the tracing process, other than to be passed
+// through as opaque data. It works like a closure in that respect.
+//
+// Tracing implementations internal to SpiderMonkey should use these interfaces
+// instead of the public interfaces in js/TracingAPI.h. Unlike the public
+// tracing methods, these work on internal types and avoid an external call.
+//
+// Note that the implementations for these methods are, surprisingly, in
+// js/src/gc/Marking.cpp. This is so that the compiler can inline as much as
+// possible in the common, marking pathways. Conceptually, however, they remain
+// as part of the generic "tracing" architecture, rather than the more specific
+// marking implementation of tracing.
+//
+// 1 - In SpiderMonkey, we call this concept tracing rather than visiting
+// because "visiting" is already used by the compiler. Also, it's been
+// called "tracing" forever and changing it would be extremely difficult at
+// this point.
+
+namespace gc {
+
+// Our barrier templates are parameterized on the pointer types so that we can
+// share the definitions with Value and jsid. Thus, we need to strip the
+// pointer before sending the type to BaseGCType and re-add it on the other
+// side. As such:
+template <typename T>
+struct PtrBaseGCType {
+ using type = T;
+};
+template <typename T>
+struct PtrBaseGCType<T*> {
+ using type = typename BaseGCType<T>::type*;
+};
+
+// Cast a possibly-derived T** pointer to a base class pointer.
+template <typename T>
+typename PtrBaseGCType<T>::type* ConvertToBase(T* thingp) {
+ return reinterpret_cast<typename PtrBaseGCType<T>::type*>(thingp);
+}
+
+// Internal methods to trace edges.
+template <typename T>
+bool TraceEdgeInternal(JSTracer* trc, T* thingp, const char* name);
+template <typename T>
+void TraceRangeInternal(JSTracer* trc, size_t len, T* vec, const char* name);
+template <typename T>
+bool TraceWeakMapKeyInternal(JSTracer* trc, Zone* zone, T* thingp,
+ const char* name);
+
+#ifdef DEBUG
+void AssertRootMarkingPhase(JSTracer* trc);
+#else
+inline void AssertRootMarkingPhase(JSTracer* trc) {}
+#endif
+
+} // namespace gc
+
+// Trace through a strong edge in the live object graph on behalf of
+// tracing. The effect of tracing the edge depends on the JSTracer being
+// used. For pointer types, |*thingp| must not be null.
+//
+// Note that weak edges are handled separately. GC things with weak edges must
+// not trace those edges during marking tracing (which would keep the referent
+// alive) but instead arrange for the edge to be swept by calling
+// js::gc::IsAboutToBeFinalized or TraceWeakEdge during sweeping.
+//
+// GC things that are weakly held in containers can use WeakMap or a container
+// wrapped in the WeakCache<> template to perform the appropriate sweeping.
+
+template <typename T>
+inline void TraceEdge(JSTracer* trc, const WriteBarriered<T>* thingp,
+ const char* name) {
+ gc::TraceEdgeInternal(trc, gc::ConvertToBase(thingp->unbarrieredAddress()),
+ name);
+}
+
+template <typename T>
+inline void TraceEdge(JSTracer* trc, WeakHeapPtr<T>* thingp, const char* name) {
+ gc::TraceEdgeInternal(trc, gc::ConvertToBase(thingp->unbarrieredAddress()),
+ name);
+}
+
+template <class BC, class T>
+inline void TraceCellHeaderEdge(JSTracer* trc,
+ gc::CellWithTenuredGCPointer<BC, T>* thingp,
+ const char* name) {
+ T* thing = thingp->headerPtr();
+ gc::TraceEdgeInternal(trc, gc::ConvertToBase(&thing), name);
+ if (thing != thingp->headerPtr()) {
+ thingp->unbarrieredSetHeaderPtr(thing);
+ }
+}
+
+// Trace through a possibly-null edge in the live object graph on behalf of
+// tracing.
+
+template <typename T>
+inline void TraceNullableEdge(JSTracer* trc, const WriteBarriered<T>* thingp,
+ const char* name) {
+ if (InternalBarrierMethods<T>::isMarkable(thingp->get())) {
+ TraceEdge(trc, thingp, name);
+ }
+}
+
+template <typename T>
+inline void TraceNullableEdge(JSTracer* trc, WeakHeapPtr<T>* thingp,
+ const char* name) {
+ if (InternalBarrierMethods<T>::isMarkable(thingp->unbarrieredGet())) {
+ TraceEdge(trc, thingp, name);
+ }
+}
+
+template <class BC, class T>
+inline void TraceNullableCellHeaderEdge(
+ JSTracer* trc, gc::CellWithTenuredGCPointer<BC, T>* thingp,
+ const char* name) {
+ T* thing = thingp->headerPtr();
+ if (thing) {
+ gc::TraceEdgeInternal(trc, gc::ConvertToBase(&thing), name);
+ if (thing != thingp->headerPtr()) {
+ thingp->unbarrieredSetHeaderPtr(thing);
+ }
+ }
+}
+
+// Trace through a "root" edge. These edges are the initial edges in the object
+// graph traversal. Root edges are asserted to only be traversed in the initial
+// phase of a GC.
+
+template <typename T>
+inline void TraceRoot(JSTracer* trc, T* thingp, const char* name) {
+ gc::AssertRootMarkingPhase(trc);
+ gc::TraceEdgeInternal(trc, gc::ConvertToBase(thingp), name);
+}
+
+template <typename T>
+inline void TraceRoot(JSTracer* trc, WeakHeapPtr<T>* thingp, const char* name) {
+ TraceRoot(trc, thingp->unbarrieredAddress(), name);
+}
+
+// Idential to TraceRoot, except that this variant will not crash if |*thingp|
+// is null.
+
+template <typename T>
+inline void TraceNullableRoot(JSTracer* trc, T* thingp, const char* name) {
+ gc::AssertRootMarkingPhase(trc);
+ if (InternalBarrierMethods<T>::isMarkable(*thingp)) {
+ gc::TraceEdgeInternal(trc, gc::ConvertToBase(thingp), name);
+ }
+}
+
+template <typename T>
+inline void TraceNullableRoot(JSTracer* trc, WeakHeapPtr<T>* thingp,
+ const char* name) {
+ TraceNullableRoot(trc, thingp->unbarrieredAddress(), name);
+}
+
+// Like TraceEdge, but for edges that do not use one of the automatic barrier
+// classes and, thus, must be treated specially for moving GC. This method is
+// separate from TraceEdge to make accidental use of such edges more obvious.
+
+template <typename T>
+inline void TraceManuallyBarrieredEdge(JSTracer* trc, T* thingp,
+ const char* name) {
+ gc::TraceEdgeInternal(trc, gc::ConvertToBase(thingp), name);
+}
+
+// Trace through a weak edge. If *thingp is not marked at the end of marking,
+// it is replaced by nullptr, and this method will return false to indicate that
+// the edge no longer exists.
+template <typename T>
+inline bool TraceManuallyBarrieredWeakEdge(JSTracer* trc, T* thingp,
+ const char* name) {
+ return gc::TraceEdgeInternal(trc, gc::ConvertToBase(thingp), name);
+}
+
+template <typename T>
+inline bool TraceWeakEdge(JSTracer* trc, BarrieredBase<T>* thingp,
+ const char* name) {
+ return gc::TraceEdgeInternal(
+ trc, gc::ConvertToBase(thingp->unbarrieredAddress()), name);
+}
+
+// Trace all edges contained in the given array.
+
+template <typename T>
+void TraceRange(JSTracer* trc, size_t len, BarrieredBase<T>* vec,
+ const char* name) {
+ gc::TraceRangeInternal(trc, len,
+ gc::ConvertToBase(vec[0].unbarrieredAddress()), name);
+}
+
+// Trace all root edges in the given array.
+
+template <typename T>
+void TraceRootRange(JSTracer* trc, size_t len, T* vec, const char* name) {
+ gc::AssertRootMarkingPhase(trc);
+ gc::TraceRangeInternal(trc, len, gc::ConvertToBase(vec), name);
+}
+
+// As below but with manual barriers.
+template <typename T>
+void TraceManuallyBarrieredCrossCompartmentEdge(JSTracer* trc, JSObject* src,
+ T* dst, const char* name);
+
+// Trace an edge that crosses compartment boundaries. If the compartment of the
+// destination thing is not being GC'd, then the edge will not be traced.
+template <typename T>
+void TraceCrossCompartmentEdge(JSTracer* trc, JSObject* src,
+ const WriteBarriered<T>* dst, const char* name) {
+ TraceManuallyBarrieredCrossCompartmentEdge(
+ trc, src, gc::ConvertToBase(dst->unbarrieredAddress()), name);
+}
+
+// Trace a weak map key. For debugger weak maps these may be cross compartment,
+// but the compartment must always be within the current sweep group.
+template <typename T>
+void TraceWeakMapKeyEdgeInternal(JSTracer* trc, Zone* weakMapZone, T** thingp,
+ const char* name);
+
+template <typename T>
+inline void TraceWeakMapKeyEdge(JSTracer* trc, Zone* weakMapZone,
+ const WriteBarriered<T>* thingp,
+ const char* name) {
+ TraceWeakMapKeyEdgeInternal(
+ trc, weakMapZone, gc::ConvertToBase(thingp->unbarrieredAddress()), name);
+}
+
+// Permanent atoms and well-known symbols are shared between runtimes and must
+// use a separate marking path so that we can filter them out of normal heap
+// tracing.
+template <typename T>
+void TraceProcessGlobalRoot(JSTracer* trc, T* thing, const char* name);
+
+// Trace a root edge that uses the base GC thing type, instead of a more
+// specific type.
+void TraceGenericPointerRoot(JSTracer* trc, gc::Cell** thingp,
+ const char* name);
+
+// Trace a non-root edge that uses the base GC thing type, instead of a more
+// specific type.
+void TraceManuallyBarrieredGenericPointerEdge(JSTracer* trc, gc::Cell** thingp,
+ const char* name);
+
+void TraceGCCellPtrRoot(JSTracer* trc, JS::GCCellPtr* thingp, const char* name);
+
+namespace gc {
+
+// Trace through a shape or group iteratively during cycle collection to avoid
+// deep or infinite recursion.
+void TraceCycleCollectorChildren(JS::CallbackTracer* trc, Shape* shape);
+void TraceCycleCollectorChildren(JS::CallbackTracer* trc, ObjectGroup* group);
+
+/**
+ * Trace every value within |compartments| that is wrapped by a
+ * cross-compartment wrapper from a compartment that is not an element of
+ * |compartments|.
+ */
+void TraceIncomingCCWs(JSTracer* trc, const JS::CompartmentSet& compartments);
+
+/* Get information about a GC thing. Used when dumping the heap. */
+void GetTraceThingInfo(char* buf, size_t bufsize, void* thing,
+ JS::TraceKind kind, bool includeDetails);
+
+// Overloaded function to call the correct GenericTracer method based on the
+// argument type.
+inline JSObject* DispatchToOnEdge(GenericTracer* trc, JSObject* obj) {
+ return trc->onObjectEdge(obj);
+}
+inline JSString* DispatchToOnEdge(GenericTracer* trc, JSString* str) {
+ return trc->onStringEdge(str);
+}
+inline JS::Symbol* DispatchToOnEdge(GenericTracer* trc, JS::Symbol* sym) {
+ return trc->onSymbolEdge(sym);
+}
+inline JS::BigInt* DispatchToOnEdge(GenericTracer* trc, JS::BigInt* bi) {
+ return trc->onBigIntEdge(bi);
+}
+inline js::BaseScript* DispatchToOnEdge(GenericTracer* trc,
+ js::BaseScript* script) {
+ return trc->onScriptEdge(script);
+}
+inline js::Shape* DispatchToOnEdge(GenericTracer* trc, js::Shape* shape) {
+ return trc->onShapeEdge(shape);
+}
+inline js::ObjectGroup* DispatchToOnEdge(GenericTracer* trc,
+ js::ObjectGroup* group) {
+ return trc->onObjectGroupEdge(group);
+}
+inline js::BaseShape* DispatchToOnEdge(GenericTracer* trc,
+ js::BaseShape* base) {
+ return trc->onBaseShapeEdge(base);
+}
+inline js::jit::JitCode* DispatchToOnEdge(GenericTracer* trc,
+ js::jit::JitCode* code) {
+ return trc->onJitCodeEdge(code);
+}
+inline js::Scope* DispatchToOnEdge(GenericTracer* trc, js::Scope* scope) {
+ return trc->onScopeEdge(scope);
+}
+inline js::RegExpShared* DispatchToOnEdge(GenericTracer* trc,
+ js::RegExpShared* shared) {
+ return trc->onRegExpSharedEdge(shared);
+}
+
+} // namespace gc
+} // namespace js
+
+#endif /* js_Tracer_h */
diff --git a/js/src/gc/Verifier.cpp b/js/src/gc/Verifier.cpp
new file mode 100644
index 0000000000..f48d4ab4fd
--- /dev/null
+++ b/js/src/gc/Verifier.cpp
@@ -0,0 +1,1110 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/DebugOnly.h"
+#include "mozilla/IntegerPrintfMacros.h"
+#include "mozilla/Sprintf.h"
+
+#include <algorithm>
+#include <utility>
+
+#ifdef MOZ_VALGRIND
+# include <valgrind/memcheck.h>
+#endif
+
+#include "gc/GCInternals.h"
+#include "gc/GCLock.h"
+#include "gc/PublicIterators.h"
+#include "gc/WeakMap.h"
+#include "gc/Zone.h"
+#include "js/friend/DumpFunctions.h" // js::DumpObject
+#include "js/HashTable.h"
+#include "vm/JSContext.h"
+
+#include "gc/ArenaList-inl.h"
+#include "gc/GC-inl.h"
+#include "gc/Marking-inl.h"
+#include "gc/PrivateIterators-inl.h"
+#include "vm/JSContext-inl.h"
+
+using namespace js;
+using namespace js::gc;
+
+using mozilla::DebugOnly;
+
+#ifdef DEBUG
+bool js::RuntimeIsVerifyingPreBarriers(JSRuntime* runtime) {
+# ifdef JS_GC_ZEAL
+ return runtime->gc.isVerifyPreBarriersEnabled();
+# else
+ return false;
+# endif
+}
+#endif
+
+#ifdef JS_GC_ZEAL
+
+/*
+ * Write barrier verification
+ *
+ * The next few functions are for write barrier verification.
+ *
+ * The VerifyBarriers function is a shorthand. It checks if a verification phase
+ * is currently running. If not, it starts one. Otherwise, it ends the current
+ * phase and starts a new one.
+ *
+ * The user can adjust the frequency of verifications, which causes
+ * VerifyBarriers to be a no-op all but one out of N calls. However, if the
+ * |always| parameter is true, it starts a new phase no matter what.
+ *
+ * Pre-Barrier Verifier:
+ * When StartVerifyBarriers is called, a snapshot is taken of all objects in
+ * the GC heap and saved in an explicit graph data structure. Later,
+ * EndVerifyBarriers traverses the heap again. Any pointer values that were in
+ * the snapshot and are no longer found must be marked; otherwise an assertion
+ * triggers. Note that we must not GC in between starting and finishing a
+ * verification phase.
+ */
+
+struct EdgeValue {
+ JS::GCCellPtr thing;
+ const char* label;
+};
+
+struct VerifyNode {
+ JS::GCCellPtr thing;
+ uint32_t count;
+ EdgeValue edges[1];
+};
+
+typedef HashMap<Cell*, VerifyNode*, DefaultHasher<Cell*>, SystemAllocPolicy>
+ NodeMap;
+
+/*
+ * The verifier data structures are simple. The entire graph is stored in a
+ * single block of memory. At the beginning is a VerifyNode for the root
+ * node. It is followed by a sequence of EdgeValues--the exact number is given
+ * in the node. After the edges come more nodes and their edges.
+ *
+ * The edgeptr and term fields are used to allocate out of the block of memory
+ * for the graph. If we run out of memory (i.e., if edgeptr goes beyond term),
+ * we just abandon the verification.
+ *
+ * The nodemap field is a hashtable that maps from the address of the GC thing
+ * to the VerifyNode that represents it.
+ */
+class js::VerifyPreTracer final : public JS::CallbackTracer {
+ JS::AutoDisableGenerationalGC noggc;
+
+ void onChild(const JS::GCCellPtr& thing) override;
+
+ public:
+ /* The gcNumber when the verification began. */
+ uint64_t number;
+
+ /* This counts up to gcZealFrequency to decide whether to verify. */
+ int count;
+
+ /* This graph represents the initial GC "snapshot". */
+ VerifyNode* curnode;
+ VerifyNode* root;
+ char* edgeptr;
+ char* term;
+ NodeMap nodemap;
+
+ explicit VerifyPreTracer(JSRuntime* rt)
+ : JS::CallbackTracer(rt, JS::TracerKind::Callback,
+ JS::WeakEdgeTraceAction::Skip),
+ noggc(rt->mainContextFromOwnThread()),
+ number(rt->gc.gcNumber()),
+ count(0),
+ curnode(nullptr),
+ root(nullptr),
+ edgeptr(nullptr),
+ term(nullptr) {
+ // We don't care about weak edges here. Since they are not marked they
+ // cannot cause the problem that the pre-write barrier protects against.
+ }
+
+ ~VerifyPreTracer() { js_free(root); }
+};
+
+/*
+ * This function builds up the heap snapshot by adding edges to the current
+ * node.
+ */
+void VerifyPreTracer::onChild(const JS::GCCellPtr& thing) {
+ MOZ_ASSERT(!IsInsideNursery(thing.asCell()));
+
+ // Skip things in other runtimes.
+ if (thing.asCell()->asTenured().runtimeFromAnyThread() != runtime()) {
+ return;
+ }
+
+ edgeptr += sizeof(EdgeValue);
+ if (edgeptr >= term) {
+ edgeptr = term;
+ return;
+ }
+
+ VerifyNode* node = curnode;
+ uint32_t i = node->count;
+
+ node->edges[i].thing = thing;
+ node->edges[i].label = context().name();
+ node->count++;
+}
+
+static VerifyNode* MakeNode(VerifyPreTracer* trc, JS::GCCellPtr thing) {
+ NodeMap::AddPtr p = trc->nodemap.lookupForAdd(thing.asCell());
+ if (!p) {
+ VerifyNode* node = (VerifyNode*)trc->edgeptr;
+ trc->edgeptr += sizeof(VerifyNode) - sizeof(EdgeValue);
+ if (trc->edgeptr >= trc->term) {
+ trc->edgeptr = trc->term;
+ return nullptr;
+ }
+
+ node->thing = thing;
+ node->count = 0;
+ if (!trc->nodemap.add(p, thing.asCell(), node)) {
+ trc->edgeptr = trc->term;
+ return nullptr;
+ }
+
+ return node;
+ }
+ return nullptr;
+}
+
+static VerifyNode* NextNode(VerifyNode* node) {
+ if (node->count == 0) {
+ return (VerifyNode*)((char*)node + sizeof(VerifyNode) - sizeof(EdgeValue));
+ } else {
+ return (VerifyNode*)((char*)node + sizeof(VerifyNode) +
+ sizeof(EdgeValue) * (node->count - 1));
+ }
+}
+
+void gc::GCRuntime::startVerifyPreBarriers() {
+ if (verifyPreData || isIncrementalGCInProgress()) {
+ return;
+ }
+
+ JSContext* cx = rt->mainContextFromOwnThread();
+
+ if (IsIncrementalGCUnsafe(rt) != GCAbortReason::None ||
+ rt->hasHelperThreadZones()) {
+ return;
+ }
+
+ number++;
+
+ VerifyPreTracer* trc = js_new<VerifyPreTracer>(rt);
+ if (!trc) {
+ return;
+ }
+
+ AutoPrepareForTracing prep(cx);
+
+ {
+ AutoLockGC lock(this);
+ for (auto chunk = allNonEmptyChunks(lock); !chunk.done(); chunk.next()) {
+ chunk->markBits.clear();
+ }
+ }
+
+ gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::TRACE_HEAP);
+
+ const size_t size = 64 * 1024 * 1024;
+ trc->root = (VerifyNode*)js_malloc(size);
+ if (!trc->root) {
+ goto oom;
+ }
+ trc->edgeptr = (char*)trc->root;
+ trc->term = trc->edgeptr + size;
+
+ /* Create the root node. */
+ trc->curnode = MakeNode(trc, JS::GCCellPtr());
+
+ MOZ_ASSERT(incrementalState == State::NotActive);
+ incrementalState = State::MarkRoots;
+
+ /* Make all the roots be edges emanating from the root node. */
+ traceRuntime(trc, prep);
+
+ VerifyNode* node;
+ node = trc->curnode;
+ if (trc->edgeptr == trc->term) {
+ goto oom;
+ }
+
+ /* For each edge, make a node for it if one doesn't already exist. */
+ while ((char*)node < trc->edgeptr) {
+ for (uint32_t i = 0; i < node->count; i++) {
+ EdgeValue& e = node->edges[i];
+ VerifyNode* child = MakeNode(trc, e.thing);
+ if (child) {
+ trc->curnode = child;
+ JS::TraceChildren(trc, e.thing);
+ }
+ if (trc->edgeptr == trc->term) {
+ goto oom;
+ }
+ }
+
+ node = NextNode(node);
+ }
+
+ verifyPreData = trc;
+ incrementalState = State::Mark;
+ marker.start();
+
+ for (ZonesIter zone(this, WithAtoms); !zone.done(); zone.next()) {
+ MOZ_ASSERT(!zone->usedByHelperThread());
+ zone->setNeedsIncrementalBarrier(true);
+ zone->arenas.clearFreeLists();
+ }
+
+ return;
+
+oom:
+ incrementalState = State::NotActive;
+ js_delete(trc);
+ verifyPreData = nullptr;
+}
+
+static bool IsMarkedOrAllocated(TenuredCell* cell) {
+ return cell->isMarkedAny();
+}
+
+struct CheckEdgeTracer final : public JS::CallbackTracer {
+ VerifyNode* node;
+ explicit CheckEdgeTracer(JSRuntime* rt)
+ : JS::CallbackTracer(rt), node(nullptr) {}
+ void onChild(const JS::GCCellPtr& thing) override;
+};
+
+static const uint32_t MAX_VERIFIER_EDGES = 1000;
+
+/*
+ * This function is called by EndVerifyBarriers for every heap edge. If the edge
+ * already existed in the original snapshot, we "cancel it out" by overwriting
+ * it with nullptr. EndVerifyBarriers later asserts that the remaining
+ * non-nullptr edges (i.e., the ones from the original snapshot that must have
+ * been modified) must point to marked objects.
+ */
+void CheckEdgeTracer::onChild(const JS::GCCellPtr& thing) {
+ // Skip things in other runtimes.
+ if (thing.asCell()->asTenured().runtimeFromAnyThread() != runtime()) {
+ return;
+ }
+
+ /* Avoid n^2 behavior. */
+ if (node->count > MAX_VERIFIER_EDGES) {
+ return;
+ }
+
+ for (uint32_t i = 0; i < node->count; i++) {
+ if (node->edges[i].thing == thing) {
+ node->edges[i].thing = JS::GCCellPtr();
+ return;
+ }
+ }
+}
+
+static bool IsMarkedOrAllocated(const EdgeValue& edge) {
+ if (!edge.thing || IsMarkedOrAllocated(&edge.thing.asCell()->asTenured())) {
+ return true;
+ }
+
+ // Permanent atoms and well-known symbols aren't marked during graph
+ // traversal.
+ if (edge.thing.is<JSString>() &&
+ edge.thing.as<JSString>().isPermanentAtom()) {
+ return true;
+ }
+ if (edge.thing.is<JS::Symbol>() &&
+ edge.thing.as<JS::Symbol>().isWellKnownSymbol()) {
+ return true;
+ }
+
+ return false;
+}
+
+void gc::GCRuntime::endVerifyPreBarriers() {
+ VerifyPreTracer* trc = verifyPreData;
+
+ if (!trc) {
+ return;
+ }
+
+ MOZ_ASSERT(!JS::IsGenerationalGCEnabled(rt));
+
+ AutoPrepareForTracing prep(rt->mainContextFromOwnThread());
+
+ bool compartmentCreated = false;
+
+ /* We need to disable barriers before tracing, which may invoke barriers. */
+ for (ZonesIter zone(this, WithAtoms); !zone.done(); zone.next()) {
+ if (!zone->needsIncrementalBarrier()) {
+ compartmentCreated = true;
+ }
+ zone->setNeedsIncrementalBarrier(false);
+ }
+
+ verifyPreData = nullptr;
+ MOZ_ASSERT(incrementalState == State::Mark);
+ incrementalState = State::NotActive;
+
+ if (!compartmentCreated && IsIncrementalGCUnsafe(rt) == GCAbortReason::None &&
+ !rt->hasHelperThreadZones()) {
+ CheckEdgeTracer cetrc(rt);
+
+ /* Start after the roots. */
+ VerifyNode* node = NextNode(trc->root);
+ while ((char*)node < trc->edgeptr) {
+ cetrc.node = node;
+ JS::TraceChildren(&cetrc, node->thing);
+
+ if (node->count <= MAX_VERIFIER_EDGES) {
+ for (uint32_t i = 0; i < node->count; i++) {
+ EdgeValue& edge = node->edges[i];
+ if (!IsMarkedOrAllocated(edge)) {
+ char msgbuf[1024];
+ SprintfLiteral(
+ msgbuf,
+ "[barrier verifier] Unmarked edge: %s %p '%s' edge to %s %p",
+ JS::GCTraceKindToAscii(node->thing.kind()),
+ node->thing.asCell(), edge.label,
+ JS::GCTraceKindToAscii(edge.thing.kind()), edge.thing.asCell());
+ MOZ_ReportAssertionFailure(msgbuf, __FILE__, __LINE__);
+ MOZ_CRASH();
+ }
+ }
+ }
+
+ node = NextNode(node);
+ }
+ }
+
+ marker.reset();
+ marker.stop();
+
+ js_delete(trc);
+}
+
+/*** Barrier Verifier Scheduling ***/
+
+void gc::GCRuntime::verifyPreBarriers() {
+ if (verifyPreData) {
+ endVerifyPreBarriers();
+ } else {
+ startVerifyPreBarriers();
+ }
+}
+
+void gc::VerifyBarriers(JSRuntime* rt, VerifierType type) {
+ if (type == PreBarrierVerifier) {
+ rt->gc.verifyPreBarriers();
+ }
+}
+
+void gc::GCRuntime::maybeVerifyPreBarriers(bool always) {
+ if (!hasZealMode(ZealMode::VerifierPre)) {
+ return;
+ }
+
+ if (rt->mainContextFromOwnThread()->suppressGC) {
+ return;
+ }
+
+ if (verifyPreData) {
+ if (++verifyPreData->count < zealFrequency && !always) {
+ return;
+ }
+
+ endVerifyPreBarriers();
+ }
+
+ startVerifyPreBarriers();
+}
+
+void js::gc::MaybeVerifyBarriers(JSContext* cx, bool always) {
+ GCRuntime* gc = &cx->runtime()->gc;
+ gc->maybeVerifyPreBarriers(always);
+}
+
+void js::gc::GCRuntime::finishVerifier() {
+ if (verifyPreData) {
+ js_delete(verifyPreData.ref());
+ verifyPreData = nullptr;
+ }
+}
+
+struct GCChunkHasher {
+ typedef gc::TenuredChunk* Lookup;
+
+ /*
+ * Strip zeros for better distribution after multiplying by the golden
+ * ratio.
+ */
+ static HashNumber hash(gc::TenuredChunk* chunk) {
+ MOZ_ASSERT(!(uintptr_t(chunk) & gc::ChunkMask));
+ return HashNumber(uintptr_t(chunk) >> gc::ChunkShift);
+ }
+
+ static bool match(gc::TenuredChunk* k, gc::TenuredChunk* l) {
+ MOZ_ASSERT(!(uintptr_t(k) & gc::ChunkMask));
+ MOZ_ASSERT(!(uintptr_t(l) & gc::ChunkMask));
+ return k == l;
+ }
+};
+
+class js::gc::MarkingValidator {
+ public:
+ explicit MarkingValidator(GCRuntime* gc);
+ void nonIncrementalMark(AutoGCSession& session);
+ void validate();
+
+ private:
+ GCRuntime* gc;
+ bool initialized;
+
+ using BitmapMap = HashMap<TenuredChunk*, UniquePtr<MarkBitmap>, GCChunkHasher,
+ SystemAllocPolicy>;
+ BitmapMap map;
+};
+
+js::gc::MarkingValidator::MarkingValidator(GCRuntime* gc)
+ : gc(gc), initialized(false) {}
+
+void js::gc::MarkingValidator::nonIncrementalMark(AutoGCSession& session) {
+ /*
+ * Perform a non-incremental mark for all collecting zones and record
+ * the results for later comparison.
+ *
+ * Currently this does not validate gray marking.
+ */
+
+ JSRuntime* runtime = gc->rt;
+ GCMarker* gcmarker = &gc->marker;
+
+ MOZ_ASSERT(!gcmarker->isWeakMarking());
+
+ /* Wait for off-thread parsing which can allocate. */
+ WaitForAllHelperThreads();
+
+ gc->waitBackgroundAllocEnd();
+ gc->waitBackgroundSweepEnd();
+
+ /* Save existing mark bits. */
+ {
+ AutoLockGC lock(gc);
+ for (auto chunk = gc->allNonEmptyChunks(lock); !chunk.done();
+ chunk.next()) {
+ MarkBitmap* bitmap = &chunk->markBits;
+ auto entry = MakeUnique<MarkBitmap>();
+ if (!entry) {
+ return;
+ }
+
+ memcpy((void*)entry->bitmap, (void*)bitmap->bitmap,
+ sizeof(bitmap->bitmap));
+
+ if (!map.putNew(chunk, std::move(entry))) {
+ return;
+ }
+ }
+ }
+
+ /*
+ * Temporarily clear the weakmaps' mark flags for the compartments we are
+ * collecting.
+ */
+
+ WeakMapColors markedWeakMaps;
+
+ /*
+ * For saving, smush all of the keys into one big table and split them back
+ * up into per-zone tables when restoring.
+ */
+ gc::WeakKeyTable savedWeakKeys(SystemAllocPolicy(),
+ runtime->randomHashCodeScrambler());
+ if (!savedWeakKeys.init()) {
+ return;
+ }
+
+ for (GCZonesIter zone(gc); !zone.done(); zone.next()) {
+ if (!WeakMapBase::saveZoneMarkedWeakMaps(zone, markedWeakMaps)) {
+ return;
+ }
+
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ for (gc::WeakKeyTable::Range r = zone->gcWeakKeys().all(); !r.empty();
+ r.popFront()) {
+ MOZ_ASSERT(r.front().key->asTenured().zone() == zone);
+ if (!savedWeakKeys.put(r.front().key, std::move(r.front().value))) {
+ oomUnsafe.crash("saving weak keys table for validator");
+ }
+ }
+
+ if (!zone->gcWeakKeys().clear()) {
+ oomUnsafe.crash("clearing weak keys table for validator");
+ }
+ }
+
+ /*
+ * After this point, the function should run to completion, so we shouldn't
+ * do anything fallible.
+ */
+ initialized = true;
+
+ /* Re-do all the marking, but non-incrementally. */
+ js::gc::State state = gc->incrementalState;
+ gc->incrementalState = State::MarkRoots;
+
+ {
+ gcstats::AutoPhase ap(gc->stats(), gcstats::PhaseKind::PREPARE);
+
+ {
+ gcstats::AutoPhase ap(gc->stats(), gcstats::PhaseKind::UNMARK);
+
+ for (GCZonesIter zone(gc); !zone.done(); zone.next()) {
+ WeakMapBase::unmarkZone(zone);
+ }
+
+ MOZ_ASSERT(gcmarker->isDrained());
+ gcmarker->reset();
+
+ AutoLockGC lock(gc);
+ for (auto chunk = gc->allNonEmptyChunks(lock); !chunk.done();
+ chunk.next()) {
+ chunk->markBits.clear();
+ }
+ }
+ }
+
+ {
+ gcstats::AutoPhase ap(gc->stats(), gcstats::PhaseKind::MARK);
+
+ gc->traceRuntimeForMajorGC(gcmarker, session);
+
+ gc->incrementalState = State::Mark;
+ gc->drainMarkStack();
+ }
+
+ gc->incrementalState = State::Sweep;
+ {
+ gcstats::AutoPhase ap1(gc->stats(), gcstats::PhaseKind::SWEEP);
+ gcstats::AutoPhase ap2(gc->stats(), gcstats::PhaseKind::SWEEP_MARK);
+
+ gc->markAllWeakReferences();
+
+ /* Update zone state for gray marking. */
+ for (GCZonesIter zone(gc); !zone.done(); zone.next()) {
+ zone->changeGCState(Zone::MarkBlackOnly, Zone::MarkBlackAndGray);
+ }
+
+ AutoSetMarkColor setColorGray(*gcmarker, MarkColor::Gray);
+ gcmarker->setMainStackColor(MarkColor::Gray);
+
+ gc->markAllGrayReferences(gcstats::PhaseKind::SWEEP_MARK_GRAY);
+ gc->markAllWeakReferences();
+ gc->marker.setMainStackColor(MarkColor::Black);
+
+ /* Restore zone state. */
+ for (GCZonesIter zone(gc); !zone.done(); zone.next()) {
+ zone->changeGCState(Zone::MarkBlackAndGray, Zone::MarkBlackOnly);
+ }
+ MOZ_ASSERT(gc->marker.isDrained());
+ }
+
+ /* Take a copy of the non-incremental mark state and restore the original. */
+ {
+ AutoLockGC lock(gc);
+ for (auto chunk = gc->allNonEmptyChunks(lock); !chunk.done();
+ chunk.next()) {
+ MarkBitmap* bitmap = &chunk->markBits;
+ auto ptr = map.lookup(chunk);
+ MOZ_RELEASE_ASSERT(ptr, "Chunk not found in map");
+ MarkBitmap* entry = ptr->value().get();
+ for (size_t i = 0; i < MarkBitmap::WordCount; i++) {
+ uintptr_t v = entry->bitmap[i];
+ entry->bitmap[i] = uintptr_t(bitmap->bitmap[i]);
+ bitmap->bitmap[i] = v;
+ }
+ }
+ }
+
+ for (GCZonesIter zone(gc); !zone.done(); zone.next()) {
+ WeakMapBase::unmarkZone(zone);
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ if (!zone->gcWeakKeys().clear()) {
+ oomUnsafe.crash("clearing weak keys table for validator");
+ }
+ }
+
+ WeakMapBase::restoreMarkedWeakMaps(markedWeakMaps);
+
+ for (gc::WeakKeyTable::Range r = savedWeakKeys.all(); !r.empty();
+ r.popFront()) {
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ Zone* zone = r.front().key->asTenured().zone();
+ if (!zone->gcWeakKeys().put(r.front().key, std::move(r.front().value))) {
+ oomUnsafe.crash("restoring weak keys table for validator");
+ }
+ }
+
+ gc->incrementalState = state;
+}
+
+void js::gc::MarkingValidator::validate() {
+ /*
+ * Validates the incremental marking for a single compartment by comparing
+ * the mark bits to those previously recorded for a non-incremental mark.
+ */
+
+ if (!initialized) {
+ return;
+ }
+
+ MOZ_ASSERT(!gc->marker.isWeakMarking());
+
+ gc->waitBackgroundSweepEnd();
+
+ AutoLockGC lock(gc->rt);
+ for (auto chunk = gc->allNonEmptyChunks(lock); !chunk.done(); chunk.next()) {
+ BitmapMap::Ptr ptr = map.lookup(chunk);
+ if (!ptr) {
+ continue; /* Allocated after we did the non-incremental mark. */
+ }
+
+ MarkBitmap* bitmap = ptr->value().get();
+ MarkBitmap* incBitmap = &chunk->markBits;
+
+ for (size_t i = 0; i < ArenasPerChunk; i++) {
+ if (chunk->decommittedArenas[i]) {
+ continue;
+ }
+ Arena* arena = &chunk->arenas[i];
+ if (!arena->allocated()) {
+ continue;
+ }
+ if (!arena->zone->isGCSweeping()) {
+ continue;
+ }
+
+ AllocKind kind = arena->getAllocKind();
+ uintptr_t thing = arena->thingsStart();
+ uintptr_t end = arena->thingsEnd();
+ while (thing < end) {
+ auto* cell = reinterpret_cast<TenuredCell*>(thing);
+
+ /*
+ * If a non-incremental GC wouldn't have collected a cell, then
+ * an incremental GC won't collect it.
+ */
+ if (bitmap->isMarkedAny(cell)) {
+ MOZ_RELEASE_ASSERT(incBitmap->isMarkedAny(cell));
+ }
+
+ /*
+ * If the cycle collector isn't allowed to collect an object
+ * after a non-incremental GC has run, then it isn't allowed to
+ * collected it after an incremental GC.
+ */
+ if (!bitmap->isMarkedGray(cell)) {
+ MOZ_RELEASE_ASSERT(!incBitmap->isMarkedGray(cell));
+ }
+
+ thing += Arena::thingSize(kind);
+ }
+ }
+ }
+}
+
+void GCRuntime::computeNonIncrementalMarkingForValidation(
+ AutoGCSession& session) {
+ MOZ_ASSERT(!markingValidator);
+ if (isIncremental && hasZealMode(ZealMode::IncrementalMarkingValidator)) {
+ markingValidator = js_new<MarkingValidator>(this);
+ }
+ if (markingValidator) {
+ markingValidator->nonIncrementalMark(session);
+ }
+}
+
+void GCRuntime::validateIncrementalMarking() {
+ if (markingValidator) {
+ markingValidator->validate();
+ }
+}
+
+void GCRuntime::finishMarkingValidation() {
+ js_delete(markingValidator.ref());
+ markingValidator = nullptr;
+}
+
+#endif /* JS_GC_ZEAL */
+
+#if defined(JS_GC_ZEAL) || defined(DEBUG)
+
+class HeapCheckTracerBase : public JS::CallbackTracer {
+ public:
+ explicit HeapCheckTracerBase(JSRuntime* rt, JS::TraceOptions options);
+ bool traceHeap(AutoTraceSession& session);
+ virtual void checkCell(Cell* cell) = 0;
+
+ protected:
+ void dumpCellInfo(Cell* cell);
+ void dumpCellPath();
+
+ Cell* parentCell() {
+ return parentIndex == -1 ? nullptr : stack[parentIndex].thing.asCell();
+ }
+
+ size_t failures;
+
+ private:
+ void onChild(const JS::GCCellPtr& thing) override;
+
+ struct WorkItem {
+ WorkItem(JS::GCCellPtr thing, const char* name, int parentIndex)
+ : thing(thing),
+ name(name),
+ parentIndex(parentIndex),
+ processed(false) {}
+
+ JS::GCCellPtr thing;
+ const char* name;
+ int parentIndex;
+ bool processed;
+ };
+
+ JSRuntime* rt;
+ bool oom;
+ HashSet<Cell*, DefaultHasher<Cell*>, SystemAllocPolicy> visited;
+ Vector<WorkItem, 0, SystemAllocPolicy> stack;
+ int parentIndex;
+};
+
+HeapCheckTracerBase::HeapCheckTracerBase(JSRuntime* rt,
+ JS::TraceOptions options)
+ : CallbackTracer(rt, JS::TracerKind::Callback, options),
+ failures(0),
+ rt(rt),
+ oom(false),
+ parentIndex(-1) {}
+
+void HeapCheckTracerBase::onChild(const JS::GCCellPtr& thing) {
+ Cell* cell = thing.asCell();
+ checkCell(cell);
+
+ if (visited.lookup(cell)) {
+ return;
+ }
+
+ if (!visited.put(cell)) {
+ oom = true;
+ return;
+ }
+
+ // Don't trace into GC things owned by another runtime.
+ if (cell->runtimeFromAnyThread() != rt) {
+ return;
+ }
+
+ // Don't trace into GC in zones being used by helper threads.
+ Zone* zone = thing.asCell()->zone();
+ if (zone->usedByHelperThread()) {
+ return;
+ }
+
+ WorkItem item(thing, context().name(), parentIndex);
+ if (!stack.append(item)) {
+ oom = true;
+ }
+}
+
+bool HeapCheckTracerBase::traceHeap(AutoTraceSession& session) {
+ // The analysis thinks that traceRuntime might GC by calling a GC callback.
+ JS::AutoSuppressGCAnalysis nogc;
+ if (!rt->isBeingDestroyed()) {
+ rt->gc.traceRuntime(this, session);
+ }
+
+ while (!stack.empty() && !oom) {
+ WorkItem item = stack.back();
+ if (item.processed) {
+ stack.popBack();
+ } else {
+ parentIndex = stack.length() - 1;
+ stack.back().processed = true;
+ TraceChildren(this, item.thing);
+ }
+ }
+
+ return !oom;
+}
+
+void HeapCheckTracerBase::dumpCellInfo(Cell* cell) {
+ auto kind = cell->getTraceKind();
+ JSObject* obj =
+ kind == JS::TraceKind::Object ? static_cast<JSObject*>(cell) : nullptr;
+
+ fprintf(stderr, "%s %s", cell->color().name(), GCTraceKindToAscii(kind));
+ if (obj) {
+ fprintf(stderr, " %s", obj->getClass()->name);
+ }
+ fprintf(stderr, " %p", cell);
+ if (obj) {
+ fprintf(stderr, " (compartment %p)", obj->compartment());
+ }
+}
+
+void HeapCheckTracerBase::dumpCellPath() {
+ const char* name = context().name();
+ for (int index = parentIndex; index != -1; index = stack[index].parentIndex) {
+ const WorkItem& parent = stack[index];
+ Cell* cell = parent.thing.asCell();
+ fprintf(stderr, " from ");
+ dumpCellInfo(cell);
+ fprintf(stderr, " %s edge\n", name);
+ name = parent.name;
+ }
+ fprintf(stderr, " from root %s\n", name);
+}
+
+class CheckHeapTracer final : public HeapCheckTracerBase {
+ public:
+ enum GCType { Moving, NonMoving };
+
+ explicit CheckHeapTracer(JSRuntime* rt, GCType type);
+ void check(AutoTraceSession& session);
+
+ private:
+ void checkCell(Cell* cell) override;
+ GCType gcType;
+};
+
+CheckHeapTracer::CheckHeapTracer(JSRuntime* rt, GCType type)
+ : HeapCheckTracerBase(rt, JS::WeakMapTraceAction::TraceKeysAndValues),
+ gcType(type) {}
+
+inline static bool IsValidGCThingPointer(Cell* cell) {
+ return (uintptr_t(cell) & CellAlignMask) == 0;
+}
+
+void CheckHeapTracer::checkCell(Cell* cell) {
+ // Moving
+ if (!IsValidGCThingPointer(cell) ||
+ ((gcType == GCType::Moving) && !IsGCThingValidAfterMovingGC(cell)) ||
+ ((gcType == GCType::NonMoving) && cell->isForwarded())) {
+ failures++;
+ fprintf(stderr, "Bad pointer %p\n", cell);
+ dumpCellPath();
+ }
+}
+
+void CheckHeapTracer::check(AutoTraceSession& session) {
+ if (!traceHeap(session)) {
+ return;
+ }
+
+ if (failures) {
+ fprintf(stderr, "Heap check: %zu failure(s)\n", failures);
+ }
+ MOZ_RELEASE_ASSERT(failures == 0);
+}
+
+void js::gc::CheckHeapAfterGC(JSRuntime* rt) {
+ AutoTraceSession session(rt);
+ CheckHeapTracer::GCType gcType;
+
+ if (rt->gc.nursery().isEmpty()) {
+ gcType = CheckHeapTracer::GCType::Moving;
+ } else {
+ gcType = CheckHeapTracer::GCType::NonMoving;
+ }
+
+ CheckHeapTracer tracer(rt, gcType);
+ tracer.check(session);
+}
+
+class CheckGrayMarkingTracer final : public HeapCheckTracerBase {
+ public:
+ explicit CheckGrayMarkingTracer(JSRuntime* rt);
+ bool check(AutoTraceSession& session);
+
+ private:
+ void checkCell(Cell* cell) override;
+};
+
+CheckGrayMarkingTracer::CheckGrayMarkingTracer(JSRuntime* rt)
+ : HeapCheckTracerBase(rt, JS::TraceOptions(JS::WeakMapTraceAction::Skip,
+ JS::WeakEdgeTraceAction::Skip)) {
+ // Weak gray->black edges are allowed.
+}
+
+void CheckGrayMarkingTracer::checkCell(Cell* cell) {
+ Cell* parent = parentCell();
+ if (!parent) {
+ return;
+ }
+
+ if (parent->isMarkedBlack() && cell->isMarkedGray()) {
+ failures++;
+
+ fprintf(stderr, "Found black to gray edge to ");
+ dumpCellInfo(cell);
+ fprintf(stderr, "\n");
+ dumpCellPath();
+
+# ifdef DEBUG
+ if (parent->is<JSObject>()) {
+ fprintf(stderr, "\nSource: ");
+ DumpObject(parent->as<JSObject>(), stderr);
+ }
+ if (cell->is<JSObject>()) {
+ fprintf(stderr, "\nTarget: ");
+ DumpObject(cell->as<JSObject>(), stderr);
+ }
+# endif
+ }
+}
+
+bool CheckGrayMarkingTracer::check(AutoTraceSession& session) {
+ if (!traceHeap(session)) {
+ return true; // Ignore failure.
+ }
+
+ return failures == 0;
+}
+
+JS_FRIEND_API bool js::CheckGrayMarkingState(JSRuntime* rt) {
+ MOZ_ASSERT(!JS::RuntimeHeapIsCollecting());
+ MOZ_ASSERT(!rt->gc.isIncrementalGCInProgress());
+ if (!rt->gc.areGrayBitsValid()) {
+ return true;
+ }
+
+ gcstats::AutoPhase ap(rt->gc.stats(), gcstats::PhaseKind::TRACE_HEAP);
+ AutoTraceSession session(rt);
+ CheckGrayMarkingTracer tracer(rt);
+
+ return tracer.check(session);
+}
+
+static JSObject* MaybeGetDelegate(Cell* cell) {
+ if (!cell->is<JSObject>()) {
+ return nullptr;
+ }
+
+ JSObject* object = cell->as<JSObject>();
+ return js::UncheckedUnwrapWithoutExpose(object);
+}
+
+bool js::gc::CheckWeakMapEntryMarking(const WeakMapBase* map, Cell* key,
+ Cell* value) {
+ bool ok = true;
+
+ Zone* zone = map->zone();
+ MOZ_ASSERT(CurrentThreadCanAccessZone(zone));
+ MOZ_ASSERT(zone->isGCMarking());
+
+ JSObject* object = map->memberOf;
+ MOZ_ASSERT_IF(object, object->zone() == zone);
+
+ // Debugger weak maps can have keys in different zones.
+ Zone* keyZone = key->zoneFromAnyThread();
+ MOZ_ASSERT_IF(!map->allowKeysInOtherZones(),
+ keyZone == zone || keyZone->isAtomsZone());
+
+ Zone* valueZone = value->zoneFromAnyThread();
+ MOZ_ASSERT(valueZone == zone || valueZone->isAtomsZone());
+
+ if (object && object->color() != map->mapColor) {
+ fprintf(stderr, "WeakMap object is marked differently to the map\n");
+ fprintf(stderr, "(map %p is %s, object %p is %s)\n", map,
+ map->mapColor.name(), object, object->color().name());
+ ok = false;
+ }
+
+ // Values belonging to other runtimes or in uncollected zones are treated as
+ // black.
+ JSRuntime* mapRuntime = zone->runtimeFromAnyThread();
+ auto effectiveColor = [=](Cell* cell, Zone* cellZone) -> CellColor {
+ if (cell->runtimeFromAnyThread() != mapRuntime) {
+ return CellColor::Black;
+ }
+ if (cellZone->isGCMarkingOrSweeping()) {
+ return cell->color();
+ }
+ return CellColor::Black;
+ };
+
+ CellColor valueColor = effectiveColor(value, valueZone);
+ CellColor keyColor = effectiveColor(key, keyZone);
+
+ if (valueColor < std::min(map->mapColor, keyColor)) {
+ fprintf(stderr, "WeakMap value is less marked than map and key\n");
+ fprintf(stderr, "(map %p is %s, key %p is %s, value %p is %s)\n", map,
+ map->mapColor.name(), key, keyColor.name(), value,
+ valueColor.name());
+# ifdef DEBUG
+ fprintf(stderr, "Key:\n");
+ key->dump();
+ if (auto delegate = MaybeGetDelegate(key); delegate) {
+ fprintf(stderr, "Delegate:\n");
+ delegate->dump();
+ }
+ fprintf(stderr, "Value:\n");
+ value->dump();
+# endif
+
+ ok = false;
+ }
+
+ JSObject* delegate = MaybeGetDelegate(key);
+ if (!delegate) {
+ return ok;
+ }
+
+ CellColor delegateColor = effectiveColor(delegate, delegate->zone());
+ if (keyColor < std::min(map->mapColor, delegateColor)) {
+ fprintf(stderr, "WeakMap key is less marked than map or delegate\n");
+ fprintf(stderr, "(map %p is %s, delegate %p is %s, key %p is %s)\n", map,
+ map->mapColor.name(), delegate, delegateColor.name(), key,
+ keyColor.name());
+ ok = false;
+ }
+
+ return ok;
+}
+
+#endif // defined(JS_GC_ZEAL) || defined(DEBUG)
+
+#ifdef DEBUG
+// Return whether an arbitrary pointer is within a cell with the given
+// traceKind. Only for assertions.
+bool GCRuntime::isPointerWithinTenuredCell(void* ptr, JS::TraceKind traceKind) {
+ AutoLockGC lock(this);
+ for (auto chunk = allNonEmptyChunks(lock); !chunk.done(); chunk.next()) {
+ MOZ_ASSERT(!chunk->isNurseryChunk());
+ if (ptr >= &chunk->arenas[0] && ptr < &chunk->arenas[ArenasPerChunk]) {
+ auto* arena = reinterpret_cast<Arena*>(uintptr_t(ptr) & ~ArenaMask);
+ if (!arena->allocated()) {
+ return false;
+ }
+
+ return MapAllocToTraceKind(arena->getAllocKind()) == traceKind;
+ }
+ }
+
+ return false;
+}
+#endif // DEBUG
diff --git a/js/src/gc/WeakMap-inl.h b/js/src/gc/WeakMap-inl.h
new file mode 100644
index 0000000000..3e3754f878
--- /dev/null
+++ b/js/src/gc/WeakMap-inl.h
@@ -0,0 +1,439 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef gc_WeakMap_inl_h
+#define gc_WeakMap_inl_h
+
+#include "gc/WeakMap.h"
+
+#include "mozilla/DebugOnly.h"
+#include "mozilla/Unused.h"
+
+#include <algorithm>
+#include <type_traits>
+
+#include "gc/Zone.h"
+#include "js/TraceKind.h"
+#include "vm/JSContext.h"
+
+namespace js {
+namespace gc {
+
+// Specializations for barriered types.
+template <typename T>
+inline Cell* ToMarkable(WriteBarriered<T>* thingp) {
+ return ToMarkable(thingp->get());
+}
+
+namespace detail {
+
+template <typename T>
+static T ExtractUnbarriered(const WriteBarriered<T>& v) {
+ return v.get();
+}
+
+template <typename T>
+static T* ExtractUnbarriered(T* v) {
+ return v;
+}
+
+// Return the effective cell color given the current marking state.
+// This must be kept in sync with ShouldMark in Marking.cpp.
+template <typename T>
+static CellColor GetEffectiveColor(JSRuntime* rt, const T& item) {
+ Cell* cell = ToMarkable(item);
+ if (!cell->isTenured()) {
+ return CellColor::Black;
+ }
+ const TenuredCell& t = cell->asTenured();
+ if (rt != t.runtimeFromAnyThread()) {
+ return CellColor::Black;
+ }
+ if (!t.zoneFromAnyThread()->shouldMarkInZone()) {
+ return CellColor::Black;
+ }
+ return cell->color();
+}
+
+// Only objects have delegates, so default to returning nullptr. Note that some
+// compilation units will only ever use the object version.
+static MOZ_MAYBE_UNUSED JSObject* GetDelegateInternal(gc::Cell* key) {
+ return nullptr;
+}
+
+static JSObject* GetDelegateInternal(JSObject* key) {
+ JSObject* delegate = UncheckedUnwrapWithoutExpose(key);
+ return (key == delegate) ? nullptr : delegate;
+}
+
+// Use a helper function to do overload resolution to handle cases like
+// Heap<ObjectSubclass*>: find everything that is convertible to JSObject* (and
+// avoid calling barriers).
+template <typename T>
+static inline JSObject* GetDelegate(const T& key) {
+ return GetDelegateInternal(ExtractUnbarriered(key));
+}
+
+template <>
+inline JSObject* GetDelegate(gc::Cell* const&) = delete;
+
+} /* namespace detail */
+} /* namespace gc */
+
+// Weakmap entry -> value edges are only visible if the map is traced, which
+// only happens if the map zone is being collected. If the map and the value
+// were in different zones, then we could have a case where the map zone is not
+// collecting but the value zone is, and incorrectly free a value that is
+// reachable solely through weakmaps.
+template <class K, class V>
+void WeakMap<K, V>::assertMapIsSameZoneWithValue(const V& v) {
+#ifdef DEBUG
+ gc::Cell* cell = gc::ToMarkable(v);
+ if (cell) {
+ Zone* cellZone = cell->zoneFromAnyThread();
+ MOZ_ASSERT(zone() == cellZone || cellZone->isAtomsZone());
+ }
+#endif
+}
+
+template <class K, class V>
+WeakMap<K, V>::WeakMap(JSContext* cx, JSObject* memOf)
+ : Base(cx->zone()), WeakMapBase(memOf, cx->zone()) {
+ using ElemType = typename K::ElementType;
+ using NonPtrType = std::remove_pointer_t<ElemType>;
+
+ // The object's TraceKind needs to be added to CC graph if this object is
+ // used as a WeakMap key, otherwise the key is considered to be pointed from
+ // somewhere unknown, and results in leaking the subgraph which contains the
+ // key. See the comments in NoteWeakMapsTracer::trace for more details.
+ static_assert(JS::IsCCTraceKind(NonPtrType::TraceKind),
+ "Object's TraceKind should be added to CC graph.");
+
+ zone()->gcWeakMapList().insertFront(this);
+ if (zone()->gcState() > Zone::Prepare) {
+ mapColor = CellColor::Black;
+ }
+}
+
+// Trace a WeakMap entry based on 'markedCell' getting marked, where 'origKey'
+// is the key in the weakmap. In the absence of delegates, these will be the
+// same, but when a delegate is marked then origKey will be its wrapper.
+// `markedCell` is only used for an assertion.
+template <class K, class V>
+void WeakMap<K, V>::markKey(GCMarker* marker, gc::Cell* markedCell,
+ gc::Cell* origKey) {
+#if DEBUG
+ if (!mapColor) {
+ fprintf(stderr, "markKey called on an unmarked map %p", this);
+ Zone* zone = markedCell->asTenured().zoneFromAnyThread();
+ fprintf(stderr, " markedCell=%p from zone %p state %d mark %d\n",
+ markedCell, zone, zone->gcState(),
+ int(debug::GetMarkInfo(markedCell)));
+ zone = origKey->asTenured().zoneFromAnyThread();
+ fprintf(stderr, " origKey=%p from zone %p state %d mark %d\n", origKey,
+ zone, zone->gcState(), int(debug::GetMarkInfo(markedCell)));
+ if (memberOf) {
+ zone = memberOf->asTenured().zoneFromAnyThread();
+ fprintf(stderr, " memberOf=%p from zone %p state %d mark %d\n",
+ memberOf.get(), zone, zone->gcState(),
+ int(debug::GetMarkInfo(memberOf.get())));
+ }
+ }
+#endif
+ MOZ_ASSERT(mapColor);
+
+ Ptr p = Base::lookup(static_cast<Lookup>(origKey));
+ // We should only be processing <weakmap,key> pairs where the key exists in
+ // the weakmap. Such pairs are inserted when a weakmap is marked, and are
+ // removed by barriers if the key is removed from the weakmap. Failure here
+ // probably means gcWeakKeys is not being properly traced during a minor GC,
+ // or the weakmap keys are not being updated when tenured.
+ MOZ_ASSERT(p.found());
+
+ mozilla::DebugOnly<gc::Cell*> oldKey = gc::ToMarkable(p->key());
+ MOZ_ASSERT((markedCell == oldKey) ||
+ (markedCell == gc::detail::GetDelegate(p->key())));
+
+ markEntry(marker, p->mutableKey(), p->value());
+ MOZ_ASSERT(oldKey == gc::ToMarkable(p->key()), "no moving GC");
+}
+
+// If the entry is live, ensure its key and value are marked. Also make sure
+// the key is at least as marked as the delegate, so it cannot get discarded
+// and then recreated by rewrapping the delegate.
+template <class K, class V>
+bool WeakMap<K, V>::markEntry(GCMarker* marker, K& key, V& value) {
+ bool marked = false;
+ JSRuntime* rt = zone()->runtimeFromAnyThread();
+ CellColor keyColor = gc::detail::GetEffectiveColor(rt, key);
+ JSObject* delegate = gc::detail::GetDelegate(key);
+
+ if (delegate) {
+ CellColor delegateColor = gc::detail::GetEffectiveColor(rt, delegate);
+ // The key needs to stay alive while both the delegate and map are live.
+ CellColor proxyPreserveColor = std::min(delegateColor, mapColor);
+ if (keyColor < proxyPreserveColor) {
+ gc::AutoSetMarkColor autoColor(*marker, proxyPreserveColor);
+ TraceWeakMapKeyEdge(marker, zone(), &key,
+ "proxy-preserved WeakMap entry key");
+ MOZ_ASSERT(key->color() >= proxyPreserveColor);
+ marked = true;
+ keyColor = proxyPreserveColor;
+ }
+ }
+
+ if (keyColor) {
+ gc::Cell* cellValue = gc::ToMarkable(&value);
+ if (cellValue) {
+ gc::AutoSetMarkColor autoColor(*marker, std::min(mapColor, keyColor));
+ CellColor valueColor = gc::detail::GetEffectiveColor(rt, cellValue);
+ if (valueColor < marker->markColor()) {
+ TraceEdge(marker, &value, "WeakMap entry value");
+ MOZ_ASSERT(cellValue->color() >= std::min(mapColor, keyColor));
+ marked = true;
+ }
+ }
+ }
+
+ return marked;
+}
+
+template <class K, class V>
+void WeakMap<K, V>::trace(JSTracer* trc) {
+ MOZ_ASSERT_IF(JS::RuntimeHeapIsBusy(), isInList());
+
+ TraceNullableEdge(trc, &memberOf, "WeakMap owner");
+
+ if (trc->isMarkingTracer()) {
+ MOZ_ASSERT(trc->weakMapAction() == JS::WeakMapTraceAction::Expand);
+ auto marker = GCMarker::fromTracer(trc);
+
+ // Don't downgrade the map color from black to gray. This can happen when a
+ // barrier pushes the map object onto the black mark stack when it's
+ // already present on the gray mark stack, which is marked later.
+ if (mapColor < marker->markColor()) {
+ mapColor = marker->markColor();
+ mozilla::Unused << markEntries(marker);
+ }
+ return;
+ }
+
+ if (trc->weakMapAction() == JS::WeakMapTraceAction::Skip) {
+ return;
+ }
+
+ // Trace keys only if weakMapAction() says to.
+ if (trc->weakMapAction() == JS::WeakMapTraceAction::TraceKeysAndValues) {
+ for (Enum e(*this); !e.empty(); e.popFront()) {
+ TraceWeakMapKeyEdge(trc, zone(), &e.front().mutableKey(),
+ "WeakMap entry key");
+ }
+ }
+
+ // Always trace all values (unless weakMapAction() is Skip).
+ for (Range r = Base::all(); !r.empty(); r.popFront()) {
+ TraceEdge(trc, &r.front().value(), "WeakMap entry value");
+ }
+}
+
+template <class K, class V>
+/* static */ void WeakMap<K, V>::forgetKey(UnbarrieredKey key) {
+ // Remove the key or its delegate from weakKeys.
+ if (zone()->needsIncrementalBarrier()) {
+ JSRuntime* rt = zone()->runtimeFromMainThread();
+ if (JSObject* delegate = js::gc::detail::GetDelegate(key)) {
+ js::gc::WeakKeyTable& weakKeys = delegate->zone()->gcWeakKeys(delegate);
+ rt->gc.marker.forgetWeakKey(weakKeys, this, delegate, key);
+ } else {
+ js::gc::WeakKeyTable& weakKeys = key->zone()->gcWeakKeys(key);
+ rt->gc.marker.forgetWeakKey(weakKeys, this, key, key);
+ }
+ }
+}
+
+template <class K, class V>
+/* static */ void WeakMap<K, V>::clear() {
+ Base::clear();
+ JSRuntime* rt = zone()->runtimeFromMainThread();
+ if (zone()->needsIncrementalBarrier()) {
+ rt->gc.marker.forgetWeakMap(this, zone());
+ }
+}
+
+/* static */ inline void WeakMapBase::addWeakEntry(
+ GCMarker* marker, gc::Cell* key, const gc::WeakMarkable& markable) {
+ auto& weakKeys = key->zone()->gcWeakKeys(key);
+ auto p = weakKeys.get(key);
+ if (p) {
+ gc::WeakEntryVector& weakEntries = p->value;
+ if (!weakEntries.append(markable)) {
+ marker->abortLinearWeakMarking();
+ }
+ } else {
+ gc::WeakEntryVector weakEntries;
+ MOZ_ALWAYS_TRUE(weakEntries.append(markable));
+ if (!weakKeys.put(key, std::move(weakEntries))) {
+ marker->abortLinearWeakMarking();
+ }
+ }
+}
+
+template <class K, class V>
+bool WeakMap<K, V>::markEntries(GCMarker* marker) {
+ MOZ_ASSERT(mapColor);
+ bool markedAny = false;
+
+ for (Enum e(*this); !e.empty(); e.popFront()) {
+ if (markEntry(marker, e.front().mutableKey(), e.front().value())) {
+ markedAny = true;
+ }
+ if (!marker->incrementalWeakMapMarkingEnabled && !marker->isWeakMarking()) {
+ // Populate weak keys table when we enter weak marking mode.
+ continue;
+ }
+
+ JSRuntime* rt = zone()->runtimeFromAnyThread();
+ CellColor keyColor =
+ gc::detail::GetEffectiveColor(rt, e.front().key().get());
+
+ // Changes in the map's mark color will be handled in this code, but
+ // changes in the key's mark color are handled through the weak keys table.
+ // So we only need to populate the table if the key is less marked than the
+ // map, to catch later updates in the key's mark color.
+ if (keyColor < mapColor) {
+ MOZ_ASSERT(marker->weakMapAction() == JS::WeakMapTraceAction::Expand);
+ // The final color of the key is not yet known. Record this weakmap and
+ // the lookup key in the list of weak keys. If the key has a delegate,
+ // then the lookup key is the delegate (because marking the key will end
+ // up marking the delegate and thereby mark the entry.)
+ gc::Cell* weakKey = gc::detail::ExtractUnbarriered(e.front().key());
+ gc::WeakMarkable markable(this, weakKey);
+ if (JSObject* delegate = gc::detail::GetDelegate(e.front().key())) {
+ addWeakEntry(marker, delegate, markable);
+ } else {
+ addWeakEntry(marker, weakKey, markable);
+ }
+ }
+ }
+
+ return markedAny;
+}
+
+template <class K, class V>
+void WeakMap<K, V>::postSeverDelegate(GCMarker* marker, JSObject* key) {
+ if (mapColor) {
+ // We only stored the delegate, not the key, and we're severing the
+ // delegate from the key. So store the key.
+ gc::WeakMarkable markable(this, key);
+ addWeakEntry(marker, key, markable);
+ }
+}
+
+template <class K, class V>
+void WeakMap<K, V>::postRestoreDelegate(GCMarker* marker, JSObject* key,
+ JSObject* delegate) {
+ if (mapColor) {
+ // We had the key stored, but are removing it. Store the delegate instead.
+ gc::WeakMarkable markable(this, key);
+ addWeakEntry(marker, delegate, markable);
+ }
+}
+
+template <class K, class V>
+void WeakMap<K, V>::sweep() {
+ /* Remove all entries whose keys remain unmarked. */
+ for (Enum e(*this); !e.empty(); e.popFront()) {
+ if (gc::IsAboutToBeFinalized(&e.front().mutableKey())) {
+ e.removeFront();
+ }
+ }
+
+#if DEBUG
+ // Once we've swept, all remaining edges should stay within the known-live
+ // part of the graph.
+ assertEntriesNotAboutToBeFinalized();
+#endif
+}
+
+// memberOf can be nullptr, which means that the map is not part of a JSObject.
+template <class K, class V>
+void WeakMap<K, V>::traceMappings(WeakMapTracer* tracer) {
+ for (Range r = Base::all(); !r.empty(); r.popFront()) {
+ gc::Cell* key = gc::ToMarkable(r.front().key());
+ gc::Cell* value = gc::ToMarkable(r.front().value());
+ if (key && value) {
+ tracer->trace(memberOf, JS::GCCellPtr(r.front().key().get()),
+ JS::GCCellPtr(r.front().value().get()));
+ }
+ }
+}
+
+template <class K, class V>
+bool WeakMap<K, V>::findSweepGroupEdges() {
+ // For weakmap keys with delegates in a different zone, add a zone edge to
+ // ensure that the delegate zone finishes marking before the key zone.
+ JS::AutoSuppressGCAnalysis nogc;
+ for (Range r = all(); !r.empty(); r.popFront()) {
+ const K& key = r.front().key();
+
+ // If the key type doesn't have delegates, then this will always return
+ // nullptr and the optimizer can remove the entire body of this function.
+ JSObject* delegate = gc::detail::GetDelegate(key);
+ if (!delegate) {
+ continue;
+ }
+
+ // Marking a WeakMap key's delegate will mark the key, so process the
+ // delegate zone no later than the key zone.
+ Zone* delegateZone = delegate->zone();
+ Zone* keyZone = key->zone();
+ if (delegateZone != keyZone && delegateZone->isGCMarking() &&
+ keyZone->isGCMarking()) {
+ if (!delegateZone->addSweepGroupEdgeTo(keyZone)) {
+ return false;
+ }
+ }
+ }
+ return true;
+}
+
+#if DEBUG
+template <class K, class V>
+void WeakMap<K, V>::assertEntriesNotAboutToBeFinalized() {
+ for (Range r = Base::all(); !r.empty(); r.popFront()) {
+ auto k = gc::detail::ExtractUnbarriered(r.front().key());
+ MOZ_ASSERT(!gc::IsAboutToBeFinalizedUnbarriered(&k));
+ JSObject* delegate = gc::detail::GetDelegate(k);
+ if (delegate) {
+ MOZ_ASSERT(!gc::IsAboutToBeFinalizedUnbarriered(&delegate),
+ "weakmap marking depends on a key tracing its delegate");
+ }
+ MOZ_ASSERT(!gc::IsAboutToBeFinalized(&r.front().value()));
+ MOZ_ASSERT(k == r.front().key());
+ }
+}
+#endif
+
+#ifdef JS_GC_ZEAL
+template <class K, class V>
+bool WeakMap<K, V>::checkMarking() const {
+ bool ok = true;
+ for (Range r = Base::all(); !r.empty(); r.popFront()) {
+ gc::Cell* key = gc::ToMarkable(r.front().key());
+ gc::Cell* value = gc::ToMarkable(r.front().value());
+ if (key && value) {
+ if (!gc::CheckWeakMapEntryMarking(this, key, value)) {
+ ok = false;
+ }
+ }
+ }
+ return ok;
+}
+#endif
+
+} /* namespace js */
+
+#endif /* gc_WeakMap_inl_h */
diff --git a/js/src/gc/WeakMap.cpp b/js/src/gc/WeakMap.cpp
new file mode 100644
index 0000000000..162cdbd6b6
--- /dev/null
+++ b/js/src/gc/WeakMap.cpp
@@ -0,0 +1,186 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "gc/WeakMap-inl.h"
+
+#include <string.h>
+
+#include "jsapi.h"
+#include "jsfriendapi.h"
+
+#include "gc/PublicIterators.h"
+#include "js/Wrapper.h"
+#include "vm/GlobalObject.h"
+#include "vm/JSContext.h"
+#include "vm/JSObject.h"
+
+#include "vm/JSObject-inl.h"
+
+using namespace js;
+using namespace js::gc;
+
+WeakMapBase::WeakMapBase(JSObject* memOf, Zone* zone)
+ : memberOf(memOf), zone_(zone), mapColor(CellColor::White) {
+ MOZ_ASSERT_IF(memberOf, memberOf->compartment()->zone() == zone);
+}
+
+WeakMapBase::~WeakMapBase() {
+ MOZ_ASSERT(CurrentThreadIsGCFinalizing() ||
+ CurrentThreadCanAccessZone(zone_));
+}
+
+void WeakMapBase::unmarkZone(JS::Zone* zone) {
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ if (!zone->gcWeakKeys().clear()) {
+ oomUnsafe.crash("clearing weak keys table");
+ }
+ MOZ_ASSERT(zone->gcNurseryWeakKeys().count() == 0);
+
+ for (WeakMapBase* m : zone->gcWeakMapList()) {
+ m->mapColor = CellColor::White;
+ }
+}
+
+void WeakMapBase::traceZone(JS::Zone* zone, JSTracer* tracer) {
+ MOZ_ASSERT(tracer->weakMapAction() != JS::WeakMapTraceAction::Skip);
+ for (WeakMapBase* m : zone->gcWeakMapList()) {
+ m->trace(tracer);
+ TraceNullableEdge(tracer, &m->memberOf, "memberOf");
+ }
+}
+
+#if defined(JS_GC_ZEAL) || defined(DEBUG)
+bool WeakMapBase::checkMarkingForZone(JS::Zone* zone) {
+ // This is called at the end of marking.
+ MOZ_ASSERT(zone->isGCMarking());
+
+ bool ok = true;
+ for (WeakMapBase* m : zone->gcWeakMapList()) {
+ if (m->mapColor && !m->checkMarking()) {
+ ok = false;
+ }
+ }
+
+ return ok;
+}
+#endif
+
+bool WeakMapBase::markZoneIteratively(JS::Zone* zone, GCMarker* marker) {
+ bool markedAny = false;
+ for (WeakMapBase* m : zone->gcWeakMapList()) {
+ if (m->mapColor && m->markEntries(marker)) {
+ markedAny = true;
+ }
+ }
+ return markedAny;
+}
+
+bool WeakMapBase::findSweepGroupEdgesForZone(JS::Zone* zone) {
+ for (WeakMapBase* m : zone->gcWeakMapList()) {
+ if (!m->findSweepGroupEdges()) {
+ return false;
+ }
+ }
+ return true;
+}
+
+void WeakMapBase::sweepZone(JS::Zone* zone) {
+ for (WeakMapBase* m = zone->gcWeakMapList().getFirst(); m;) {
+ WeakMapBase* next = m->getNext();
+ if (m->mapColor) {
+ m->sweep();
+ } else {
+ m->clearAndCompact();
+ m->removeFrom(zone->gcWeakMapList());
+ }
+ m = next;
+ }
+
+#ifdef DEBUG
+ for (WeakMapBase* m : zone->gcWeakMapList()) {
+ MOZ_ASSERT(m->isInList() && m->mapColor);
+ }
+#endif
+}
+
+void WeakMapBase::traceAllMappings(WeakMapTracer* tracer) {
+ JSRuntime* rt = tracer->runtime;
+ for (ZonesIter zone(rt, SkipAtoms); !zone.done(); zone.next()) {
+ for (WeakMapBase* m : zone->gcWeakMapList()) {
+ // The WeakMapTracer callback is not allowed to GC.
+ JS::AutoSuppressGCAnalysis nogc;
+ m->traceMappings(tracer);
+ }
+ }
+}
+
+bool WeakMapBase::saveZoneMarkedWeakMaps(JS::Zone* zone,
+ WeakMapColors& markedWeakMaps) {
+ for (WeakMapBase* m : zone->gcWeakMapList()) {
+ if (m->mapColor && !markedWeakMaps.put(m, m->mapColor)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+void WeakMapBase::restoreMarkedWeakMaps(WeakMapColors& markedWeakMaps) {
+ for (WeakMapColors::Range r = markedWeakMaps.all(); !r.empty();
+ r.popFront()) {
+ WeakMapBase* map = r.front().key();
+ MOZ_ASSERT(map->zone()->isGCMarking());
+ MOZ_ASSERT(map->mapColor == CellColor::White);
+ map->mapColor = r.front().value();
+ }
+}
+
+size_t ObjectValueWeakMap::sizeOfIncludingThis(
+ mozilla::MallocSizeOf mallocSizeOf) {
+ return mallocSizeOf(this) + shallowSizeOfExcludingThis(mallocSizeOf);
+}
+
+ObjectWeakMap::ObjectWeakMap(JSContext* cx) : map(cx, nullptr) {}
+
+JSObject* ObjectWeakMap::lookup(const JSObject* obj) {
+ if (ObjectValueWeakMap::Ptr p = map.lookup(const_cast<JSObject*>(obj))) {
+ return &p->value().toObject();
+ }
+ return nullptr;
+}
+
+bool ObjectWeakMap::add(JSContext* cx, JSObject* obj, JSObject* target) {
+ MOZ_ASSERT(obj && target);
+
+ Value targetVal(ObjectValue(*target));
+ if (!map.putNew(obj, targetVal)) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ return true;
+}
+
+void ObjectWeakMap::remove(JSObject* key) {
+ MOZ_ASSERT(key);
+ map.remove(key);
+}
+
+void ObjectWeakMap::clear() { map.clear(); }
+
+void ObjectWeakMap::trace(JSTracer* trc) { map.trace(trc); }
+
+size_t ObjectWeakMap::sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) {
+ return map.shallowSizeOfExcludingThis(mallocSizeOf);
+}
+
+#ifdef JSGC_HASH_TABLE_CHECKS
+void ObjectWeakMap::checkAfterMovingGC() {
+ for (ObjectValueWeakMap::Range r = map.all(); !r.empty(); r.popFront()) {
+ CheckGCThingAfterMovingGC(r.front().key().get());
+ CheckGCThingAfterMovingGC(&r.front().value().toObject());
+ }
+}
+#endif // JSGC_HASH_TABLE_CHECKS
diff --git a/js/src/gc/WeakMap.h b/js/src/gc/WeakMap.h
new file mode 100644
index 0000000000..f95cc155aa
--- /dev/null
+++ b/js/src/gc/WeakMap.h
@@ -0,0 +1,442 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef gc_WeakMap_h
+#define gc_WeakMap_h
+
+#include "mozilla/LinkedList.h"
+
+#include "gc/Barrier.h"
+#include "gc/Tracer.h"
+#include "gc/ZoneAllocator.h"
+#include "js/HashTable.h"
+#include "js/HeapAPI.h"
+#include "js/shadow/Zone.h" // JS::shadow::Zone
+
+namespace js {
+
+class GCMarker;
+class WeakMapBase;
+struct WeakMapTracer;
+
+extern void DumpWeakMapLog(JSRuntime* rt);
+
+namespace gc {
+
+struct WeakMarkable;
+
+#if defined(JS_GC_ZEAL) || defined(DEBUG)
+// Check whether a weak map entry is marked correctly.
+bool CheckWeakMapEntryMarking(const WeakMapBase* map, Cell* key, Cell* value);
+#endif
+
+} // namespace gc
+
+// A subclass template of js::HashMap whose keys and values may be
+// garbage-collected. When a key is collected, the table entry disappears,
+// dropping its reference to the value.
+//
+// More precisely:
+//
+// A WeakMap entry is live if and only if both the WeakMap and the entry's
+// key are live. An entry holds a strong reference to its value.
+//
+// You must call this table's 'trace' method when its owning object is reached
+// by the garbage collection tracer. Once a table is known to be live, the
+// implementation takes care of the special weak marking (ie, marking through
+// the implicit edges stored in the map) and of removing (sweeping) table
+// entries when collection is complete.
+
+// WeakMaps are marked with an incremental linear-time algorithm that handles
+// all orderings of map and key marking. The basic algorithm is:
+//
+// At first while marking, do nothing special when marking WeakMap keys (there
+// is no straightforward way to know whether a particular object is being used
+// as a key in some weakmap.) When a WeakMap is marked, scan through it to mark
+// all entries with live keys, and collect all unmarked keys into a "weak keys"
+// table.
+//
+// At some point, everything reachable has been marked. At this point, enter
+// "weak marking mode". In this mode, whenever any object is marked, look it up
+// in the weak keys table to see if it is the key for any WeakMap entry and if
+// so, mark the value. When entering weak marking mode, scan the weak key table
+// to find all keys that have been marked since we added them to the table, and
+// mark those entries.
+//
+// In addition, we want weakmap marking to work incrementally. So WeakMap
+// mutations are barriered to keep the weak keys table up to date: entries are
+// removed if their key is removed from the table, etc.
+//
+// You can break down various ways that WeakMap values get marked based on the
+// order that the map and key are marked. All of these assume the map and key
+// get marked at some point:
+//
+// key marked, then map marked:
+// - value was marked with map in `markEntries()`
+// map marked, key already in map, key marked before weak marking mode:
+// - key added to weakKeys when map marked in `markEntries()`
+// - value marked during `enterWeakMarkingMode`
+// map marked, key already in map, key marked after weak marking mode:
+// - when key is marked, weakKeys[key] triggers marking of value in
+// `markImplicitEdges()`
+// map marked, key inserted into map, key marked:
+// - value marked by insert barrier in `barrierForInsert`
+//
+
+using WeakMapColors = HashMap<WeakMapBase*, js::gc::CellColor,
+ DefaultHasher<WeakMapBase*>, SystemAllocPolicy>;
+
+// Common base class for all WeakMap specializations, used for calling
+// subclasses' GC-related methods.
+class WeakMapBase : public mozilla::LinkedListElement<WeakMapBase> {
+ friend class js::GCMarker;
+
+ public:
+ using CellColor = js::gc::CellColor;
+
+ WeakMapBase(JSObject* memOf, JS::Zone* zone);
+ virtual ~WeakMapBase();
+
+ JS::Zone* zone() const { return zone_; }
+
+ // Garbage collector entry points.
+
+ // Unmark all weak maps in a zone.
+ static void unmarkZone(JS::Zone* zone);
+
+ // Trace all the weakmaps in a zone.
+ static void traceZone(JS::Zone* zone, JSTracer* tracer);
+
+ // Check all weak maps in a zone that have been marked as live in this garbage
+ // collection, and mark the values of all entries that have become strong
+ // references to them. Return true if we marked any new values, indicating
+ // that we need to make another pass. In other words, mark my marked maps'
+ // marked members' mid-collection.
+ static bool markZoneIteratively(JS::Zone* zone, GCMarker* marker);
+
+ // Add zone edges for weakmaps with key delegates in a different zone.
+ static MOZ_MUST_USE bool findSweepGroupEdgesForZone(JS::Zone* zone);
+
+ // Sweep the weak maps in a zone, removing dead weak maps and removing
+ // entries of live weak maps whose keys are dead.
+ static void sweepZone(JS::Zone* zone);
+
+ // Sweep the marked weak maps in a zone, updating moved keys.
+ static void sweepZoneAfterMinorGC(JS::Zone* zone);
+
+ // Trace all weak map bindings. Used by the cycle collector.
+ static void traceAllMappings(WeakMapTracer* tracer);
+
+ // Save information about which weak maps are marked for a zone.
+ static bool saveZoneMarkedWeakMaps(JS::Zone* zone,
+ WeakMapColors& markedWeakMaps);
+
+ // Restore information about which weak maps are marked for many zones.
+ static void restoreMarkedWeakMaps(WeakMapColors& markedWeakMaps);
+
+#if defined(JS_GC_ZEAL) || defined(DEBUG)
+ static bool checkMarkingForZone(JS::Zone* zone);
+#endif
+
+ protected:
+ // Instance member functions called by the above. Instantiations of WeakMap
+ // override these with definitions appropriate for their Key and Value types.
+ virtual void trace(JSTracer* tracer) = 0;
+ virtual bool findSweepGroupEdges() = 0;
+ virtual void sweep() = 0;
+ virtual void traceMappings(WeakMapTracer* tracer) = 0;
+ virtual void clearAndCompact() = 0;
+
+ // We have a key that, if it or its delegate is marked, may lead to a WeakMap
+ // value getting marked. Insert it or its delegate (if any) into the
+ // appropriate zone's gcWeakKeys or gcNurseryWeakKeys.
+ static inline void addWeakEntry(GCMarker* marker, gc::Cell* key,
+ const gc::WeakMarkable& markable);
+
+ // Any weakmap key types that want to participate in the non-iterative
+ // ephemeron marking must override this method.
+ virtual void markKey(GCMarker* marker, gc::Cell* markedCell, gc::Cell* l) = 0;
+
+ // An unmarked CCW with a delegate will add a weakKeys entry for the
+ // delegate. If the delegate is removed with NukeCrossCompartmentWrapper,
+ // then the (former) CCW needs to be added to weakKeys instead.
+ virtual void postSeverDelegate(GCMarker* marker, JSObject* key) = 0;
+
+ // When a wrapper is remapped, it will have its delegate removed then
+ // re-added. Update the delegate zone's gcWeakKeys accordingly.
+ virtual void postRestoreDelegate(GCMarker* marker, JSObject* key,
+ JSObject* delegate) = 0;
+
+ virtual bool markEntries(GCMarker* marker) = 0;
+
+#ifdef JS_GC_ZEAL
+ virtual bool checkMarking() const = 0;
+ virtual bool allowKeysInOtherZones() const { return false; }
+ friend bool gc::CheckWeakMapEntryMarking(const WeakMapBase*, gc::Cell*,
+ gc::Cell*);
+#endif
+
+ // Object that this weak map is part of, if any.
+ HeapPtrObject memberOf;
+
+ // Zone containing this weak map.
+ JS::Zone* zone_;
+
+ // Whether this object has been marked during garbage collection and which
+ // color it was marked.
+ gc::CellColor mapColor;
+
+ friend class JS::Zone;
+};
+
+namespace detail {
+
+template <typename T>
+struct RemoveBarrier {};
+
+template <typename T>
+struct RemoveBarrier<js::HeapPtr<T>> {
+ using Type = T;
+};
+
+} // namespace detail
+
+template <class Key, class Value>
+class WeakMap
+ : private HashMap<Key, Value, MovableCellHasher<Key>, ZoneAllocPolicy>,
+ public WeakMapBase {
+ public:
+ using Base = HashMap<Key, Value, MovableCellHasher<Key>, ZoneAllocPolicy>;
+
+ using Lookup = typename Base::Lookup;
+ using Entry = typename Base::Entry;
+ using Range = typename Base::Range;
+ using Ptr = typename Base::Ptr;
+ using AddPtr = typename Base::AddPtr;
+
+ struct Enum : public Base::Enum {
+ explicit Enum(WeakMap& map) : Base::Enum(static_cast<Base&>(map)) {}
+ };
+
+ using Base::all;
+ using Base::clear;
+ using Base::has;
+ using Base::shallowSizeOfExcludingThis;
+
+ // Resolve ambiguity with LinkedListElement<>::remove.
+ using Base::remove;
+
+ using UnbarrieredKey = typename detail::RemoveBarrier<Key>::Type;
+
+ explicit WeakMap(JSContext* cx, JSObject* memOf = nullptr);
+
+ // Add a read barrier to prevent an incorrectly gray value from escaping the
+ // weak map. See the UnmarkGrayTracer::onChild comment in gc/Marking.cpp.
+ Ptr lookup(const Lookup& l) const {
+ Ptr p = Base::lookup(l);
+ if (p) {
+ exposeGCThingToActiveJS(p->value());
+ }
+ return p;
+ }
+
+ Ptr lookupUnbarriered(const Lookup& l) const { return Base::lookup(l); }
+
+ AddPtr lookupForAdd(const Lookup& l) {
+ AddPtr p = Base::lookupForAdd(l);
+ if (p) {
+ exposeGCThingToActiveJS(p->value());
+ }
+ return p;
+ }
+
+ void remove(Ptr p) {
+ MOZ_ASSERT(p.found());
+ if (mapColor) {
+ forgetKey(p->key());
+ }
+ Base::remove(p);
+ }
+
+ void remove(const Lookup& l) {
+ if (Ptr p = lookup(l)) {
+ remove(p);
+ }
+ }
+
+ void clear();
+
+ template <typename KeyInput, typename ValueInput>
+ MOZ_MUST_USE bool add(AddPtr& p, KeyInput&& k, ValueInput&& v) {
+ MOZ_ASSERT(k);
+ if (!Base::add(p, std::forward<KeyInput>(k), std::forward<ValueInput>(v))) {
+ return false;
+ }
+ barrierForInsert(p->key(), p->value());
+ return true;
+ }
+
+ template <typename KeyInput, typename ValueInput>
+ MOZ_MUST_USE bool relookupOrAdd(AddPtr& p, KeyInput&& k, ValueInput&& v) {
+ MOZ_ASSERT(k);
+ if (!Base::relookupOrAdd(p, std::forward<KeyInput>(k),
+ std::forward<ValueInput>(v))) {
+ return false;
+ }
+ barrierForInsert(p->key(), p->value());
+ return true;
+ }
+
+ template <typename KeyInput, typename ValueInput>
+ MOZ_MUST_USE bool put(KeyInput&& k, ValueInput&& v) {
+ MOZ_ASSERT(k);
+ AddPtr p = lookupForAdd(k);
+ if (p) {
+ p->value() = std::forward<ValueInput>(v);
+ return true;
+ }
+ return add(p, std::forward<KeyInput>(k), std::forward<ValueInput>(v));
+ }
+
+ template <typename KeyInput, typename ValueInput>
+ MOZ_MUST_USE bool putNew(KeyInput&& k, ValueInput&& v) {
+ MOZ_ASSERT(k);
+ barrierForInsert(k, v);
+ return Base::putNew(std::forward<KeyInput>(k), std::forward<ValueInput>(v));
+ }
+
+ template <typename KeyInput, typename ValueInput>
+ void putNewInfallible(KeyInput&& k, ValueInput&& v) {
+ MOZ_ASSERT(k);
+ barrierForInsert(k, v);
+ Base::putNewInfallible(std::forward(k), std::forward<KeyInput>(k));
+ }
+
+#ifdef DEBUG
+ template <typename KeyInput, typename ValueInput>
+ bool hasEntry(KeyInput&& key, ValueInput&& value) {
+ Ptr p = Base::lookup(std::forward<KeyInput>(key));
+ return p && p->value() == value;
+ }
+#endif
+
+ void markKey(GCMarker* marker, gc::Cell* markedCell,
+ gc::Cell* origKey) override;
+
+ bool markEntry(GCMarker* marker, Key& key, Value& value);
+
+ // 'key' has lost its delegate, update our weak key state.
+ void postSeverDelegate(GCMarker* marker, JSObject* key) override;
+
+ // 'key' regained its delegate, update our weak key state.
+ void postRestoreDelegate(GCMarker* marker, JSObject* key,
+ JSObject* delegate) override;
+
+ void trace(JSTracer* trc) override;
+
+ protected:
+ inline void forgetKey(UnbarrieredKey key);
+
+ void barrierForInsert(Key k, const Value& v) {
+ assertMapIsSameZoneWithValue(v);
+ if (!mapColor) {
+ return;
+ }
+ auto mapZone = JS::shadow::Zone::from(zone());
+ if (!mapZone->needsIncrementalBarrier()) {
+ return;
+ }
+
+ JSTracer* trc = mapZone->barrierTracer();
+ Value tmp = v;
+ TraceEdge(trc, &tmp, "weakmap inserted value");
+ MOZ_ASSERT(tmp == v);
+ }
+
+ inline void assertMapIsSameZoneWithValue(const Value& v);
+
+ bool markEntries(GCMarker* marker) override;
+
+ protected:
+ // Find sweep group edges for delegates, if the key type has delegates. (If
+ // not, the optimizer should make this a nop.)
+ bool findSweepGroupEdges() override;
+
+ /**
+ * If a wrapper is used as a key in a weakmap, the garbage collector should
+ * keep that object around longer than it otherwise would. We want to avoid
+ * collecting the wrapper (and removing the weakmap entry) as long as the
+ * wrapped object is alive (because the object can be rewrapped and looked up
+ * again). As long as the wrapper is used as a weakmap key, it will not be
+ * collected (and remain in the weakmap) until the wrapped object is
+ * collected.
+ */
+ private:
+ void exposeGCThingToActiveJS(const JS::Value& v) const {
+ JS::ExposeValueToActiveJS(v);
+ }
+ void exposeGCThingToActiveJS(JSObject* obj) const {
+ JS::ExposeObjectToActiveJS(obj);
+ }
+
+ void sweep() override;
+
+ void clearAndCompact() override {
+ Base::clear();
+ Base::compact();
+ }
+
+ // memberOf can be nullptr, which means that the map is not part of a
+ // JSObject.
+ void traceMappings(WeakMapTracer* tracer) override;
+
+ protected:
+#if DEBUG
+ void assertEntriesNotAboutToBeFinalized();
+#endif
+
+#ifdef JS_GC_ZEAL
+ bool checkMarking() const override;
+#endif
+};
+
+class ObjectValueWeakMap : public WeakMap<HeapPtr<JSObject*>, HeapPtr<Value>> {
+ public:
+ ObjectValueWeakMap(JSContext* cx, JSObject* obj) : WeakMap(cx, obj) {}
+
+ size_t sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf);
+};
+
+// Generic weak map for mapping objects to other objects.
+class ObjectWeakMap {
+ ObjectValueWeakMap map;
+
+ public:
+ explicit ObjectWeakMap(JSContext* cx);
+
+ JS::Zone* zone() const { return map.zone(); }
+
+ JSObject* lookup(const JSObject* obj);
+ bool add(JSContext* cx, JSObject* obj, JSObject* target);
+ void remove(JSObject* key);
+ void clear();
+
+ void trace(JSTracer* trc);
+ size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf);
+ size_t sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) {
+ return mallocSizeOf(this) + sizeOfExcludingThis(mallocSizeOf);
+ }
+
+ ObjectValueWeakMap& valueMap() { return map; }
+
+#ifdef JSGC_HASH_TABLE_CHECKS
+ void checkAfterMovingGC();
+#endif
+};
+
+} /* namespace js */
+
+#endif /* gc_WeakMap_h */
diff --git a/js/src/gc/WeakMapPtr.cpp b/js/src/gc/WeakMapPtr.cpp
new file mode 100644
index 0000000000..89eab42a9d
--- /dev/null
+++ b/js/src/gc/WeakMapPtr.cpp
@@ -0,0 +1,114 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "js/WeakMapPtr.h"
+
+#include "gc/WeakMap-inl.h"
+
+//
+// Machinery for the externally-linkable JS::WeakMapPtr, which wraps js::WeakMap
+// for a few public data types.
+//
+
+using namespace js;
+
+namespace WeakMapDetails {
+
+template <typename T>
+struct DataType {};
+
+template <>
+struct DataType<JSObject*> {
+ using BarrieredType = HeapPtr<JSObject*>;
+ using HasherType = MovableCellHasher<BarrieredType>;
+ static JSObject* NullValue() { return nullptr; }
+};
+
+template <>
+struct DataType<JS::Value> {
+ using BarrieredType = HeapPtr<Value>;
+ static JS::Value NullValue() { return JS::UndefinedValue(); }
+};
+
+template <typename K, typename V>
+struct Utils {
+ using KeyType = typename DataType<K>::BarrieredType;
+ using ValueType = typename DataType<V>::BarrieredType;
+ typedef WeakMap<KeyType, ValueType> Type;
+ using PtrType = Type*;
+ static PtrType cast(void* ptr) { return static_cast<PtrType>(ptr); }
+};
+
+} // namespace WeakMapDetails
+
+template <typename K, typename V>
+void JS::WeakMapPtr<K, V>::destroy() {
+ MOZ_ASSERT(initialized());
+ js_delete(WeakMapDetails::Utils<K, V>::cast(ptr));
+ ptr = nullptr;
+}
+
+template <typename K, typename V>
+bool JS::WeakMapPtr<K, V>::init(JSContext* cx) {
+ MOZ_ASSERT(!initialized());
+ typename WeakMapDetails::Utils<K, V>::PtrType map =
+ cx->new_<typename WeakMapDetails::Utils<K, V>::Type>(cx);
+ if (!map) {
+ return false;
+ }
+ ptr = map;
+ return true;
+}
+
+template <typename K, typename V>
+void JS::WeakMapPtr<K, V>::trace(JSTracer* trc) {
+ MOZ_ASSERT(initialized());
+ return WeakMapDetails::Utils<K, V>::cast(ptr)->trace(trc);
+}
+
+template <typename K, typename V>
+V JS::WeakMapPtr<K, V>::lookup(const K& key) {
+ MOZ_ASSERT(initialized());
+ typename WeakMapDetails::Utils<K, V>::Type::Ptr result =
+ WeakMapDetails::Utils<K, V>::cast(ptr)->lookup(key);
+ if (!result) {
+ return WeakMapDetails::DataType<V>::NullValue();
+ }
+ return result->value();
+}
+
+template <typename K, typename V>
+bool JS::WeakMapPtr<K, V>::put(JSContext* cx, const K& key, const V& value) {
+ MOZ_ASSERT(initialized());
+ return WeakMapDetails::Utils<K, V>::cast(ptr)->put(key, value);
+}
+
+template <typename K, typename V>
+V JS::WeakMapPtr<K, V>::removeValue(const K& key) {
+ typedef typename WeakMapDetails::Utils<K, V>::Type Map;
+ using Ptr = typename Map::Ptr;
+
+ MOZ_ASSERT(initialized());
+
+ Map* map = WeakMapDetails::Utils<K, V>::cast(ptr);
+ if (Ptr result = map->lookup(key)) {
+ V value = result->value();
+ map->remove(result);
+ return value;
+ }
+ return WeakMapDetails::DataType<V>::NullValue();
+}
+
+//
+// Supported specializations of JS::WeakMap:
+//
+
+template class JS_PUBLIC_API JS::WeakMapPtr<JSObject*, JSObject*>;
+
+#ifdef DEBUG
+// Nobody's using this at the moment, but we want to make sure it compiles.
+template class JS_PUBLIC_API JS::WeakMapPtr<JSObject*, JS::Value>;
+#endif
diff --git a/js/src/gc/Zone-inl.h b/js/src/gc/Zone-inl.h
new file mode 100644
index 0000000000..736b5f7d1f
--- /dev/null
+++ b/js/src/gc/Zone-inl.h
@@ -0,0 +1,117 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef gc_Zone_inl_h
+#define gc_Zone_inl_h
+
+#include "gc/Zone.h"
+
+#include "vm/Runtime.h"
+
+/* static */ inline js::HashNumber JS::Zone::UniqueIdToHash(uint64_t uid) {
+ return mozilla::HashGeneric(uid);
+}
+
+inline bool JS::Zone::getHashCode(js::gc::Cell* cell, js::HashNumber* hashp) {
+ uint64_t uid;
+ if (!getOrCreateUniqueId(cell, &uid)) {
+ return false;
+ }
+ *hashp = UniqueIdToHash(uid);
+ return true;
+}
+
+inline bool JS::Zone::maybeGetUniqueId(js::gc::Cell* cell, uint64_t* uidp) {
+ MOZ_ASSERT(uidp);
+ MOZ_ASSERT(js::CurrentThreadCanAccessZone(this));
+
+ // Get an existing uid, if one has been set.
+ auto p = uniqueIds().lookup(cell);
+ if (p) {
+ *uidp = p->value();
+ }
+
+ return p.found();
+}
+
+inline bool JS::Zone::getOrCreateUniqueId(js::gc::Cell* cell, uint64_t* uidp) {
+ MOZ_ASSERT(uidp);
+ MOZ_ASSERT(js::CurrentThreadCanAccessZone(this) ||
+ js::CurrentThreadIsPerformingGC());
+
+ // Get an existing uid, if one has been set.
+ auto p = uniqueIds().lookupForAdd(cell);
+ if (p) {
+ *uidp = p->value();
+ return true;
+ }
+
+ MOZ_ASSERT(js::CurrentThreadCanAccessZone(this));
+
+ // Set a new uid on the cell.
+ *uidp = js::gc::NextCellUniqueId(runtimeFromAnyThread());
+ if (!uniqueIds().add(p, cell, *uidp)) {
+ return false;
+ }
+
+ // If the cell was in the nursery, hopefully unlikely, then we need to
+ // tell the nursery about it so that it can sweep the uid if the thing
+ // does not get tenured.
+ if (IsInsideNursery(cell) &&
+ !runtimeFromMainThread()->gc.nursery().addedUniqueIdToCell(cell)) {
+ uniqueIds().remove(cell);
+ return false;
+ }
+
+ return true;
+}
+
+inline js::HashNumber JS::Zone::getHashCodeInfallible(js::gc::Cell* cell) {
+ return UniqueIdToHash(getUniqueIdInfallible(cell));
+}
+
+inline uint64_t JS::Zone::getUniqueIdInfallible(js::gc::Cell* cell) {
+ uint64_t uid;
+ js::AutoEnterOOMUnsafeRegion oomUnsafe;
+ if (!getOrCreateUniqueId(cell, &uid)) {
+ oomUnsafe.crash("failed to allocate uid");
+ }
+ return uid;
+}
+
+inline bool JS::Zone::hasUniqueId(js::gc::Cell* cell) {
+ MOZ_ASSERT(js::CurrentThreadCanAccessZone(this) ||
+ js::CurrentThreadIsPerformingGC());
+ return uniqueIds().has(cell);
+}
+
+inline void JS::Zone::transferUniqueId(js::gc::Cell* tgt, js::gc::Cell* src) {
+ MOZ_ASSERT(src != tgt);
+ MOZ_ASSERT(!IsInsideNursery(tgt));
+ MOZ_ASSERT(js::CurrentThreadCanAccessRuntime(runtimeFromMainThread()));
+ MOZ_ASSERT(js::CurrentThreadCanAccessZone(this));
+ MOZ_ASSERT(!uniqueIds().has(tgt));
+ uniqueIds().rekeyIfMoved(src, tgt);
+}
+
+inline void JS::Zone::removeUniqueId(js::gc::Cell* cell) {
+ MOZ_ASSERT(js::CurrentThreadCanAccessZone(this));
+ uniqueIds().remove(cell);
+}
+
+inline void JS::Zone::adoptUniqueIds(JS::Zone* source) {
+ js::AutoEnterOOMUnsafeRegion oomUnsafe;
+ for (js::gc::UniqueIdMap::Enum e(source->uniqueIds()); !e.empty();
+ e.popFront()) {
+ MOZ_ASSERT(!uniqueIds().has(e.front().key()));
+ if (!uniqueIds().put(e.front().key(), e.front().value())) {
+ oomUnsafe.crash("failed to transfer unique ids from off-thread");
+ }
+ }
+ source->uniqueIds().clear();
+}
+
+#endif // gc_Zone_inl_h
diff --git a/js/src/gc/Zone.cpp b/js/src/gc/Zone.cpp
new file mode 100644
index 0000000000..70f260a5bc
--- /dev/null
+++ b/js/src/gc/Zone.cpp
@@ -0,0 +1,979 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "gc/Zone-inl.h"
+#include "js/shadow/Zone.h" // JS::shadow::Zone
+
+#include <type_traits>
+
+#include "gc/FreeOp.h"
+#include "gc/GCLock.h"
+#include "gc/Policy.h"
+#include "gc/PublicIterators.h"
+#include "jit/BaselineIC.h"
+#include "jit/BaselineJIT.h"
+#include "jit/Invalidation.h"
+#include "jit/Ion.h"
+#include "jit/JitZone.h"
+#include "vm/Runtime.h"
+#include "wasm/WasmInstance.h"
+
+#include "debugger/DebugAPI-inl.h"
+#include "gc/GC-inl.h"
+#include "gc/Marking-inl.h"
+#include "gc/Nursery-inl.h"
+#include "gc/WeakMap-inl.h"
+#include "vm/JSScript-inl.h"
+#include "vm/Realm-inl.h"
+
+using namespace js;
+using namespace js::gc;
+
+Zone* const Zone::NotOnList = reinterpret_cast<Zone*>(1);
+
+ZoneAllocator::ZoneAllocator(JSRuntime* rt, Kind kind)
+ : JS::shadow::Zone(rt, &rt->gc.marker, kind),
+ gcHeapSize(&rt->gc.heapSize),
+ mallocHeapSize(nullptr),
+ jitHeapSize(nullptr),
+ jitHeapThreshold(jit::MaxCodeBytesPerProcess * 0.8) {}
+
+ZoneAllocator::~ZoneAllocator() {
+#ifdef DEBUG
+ mallocTracker.checkEmptyOnDestroy();
+ MOZ_ASSERT(gcHeapSize.bytes() == 0);
+ MOZ_ASSERT(mallocHeapSize.bytes() == 0);
+ MOZ_ASSERT(jitHeapSize.bytes() == 0);
+#endif
+}
+
+void ZoneAllocator::fixupAfterMovingGC() {
+#ifdef DEBUG
+ mallocTracker.fixupAfterMovingGC();
+#endif
+}
+
+void js::ZoneAllocator::updateMemoryCountersOnGCStart() {
+ gcHeapSize.updateOnGCStart();
+ mallocHeapSize.updateOnGCStart();
+}
+
+void js::ZoneAllocator::updateGCStartThresholds(
+ GCRuntime& gc, JSGCInvocationKind invocationKind,
+ const js::AutoLockGC& lock) {
+ bool isAtomsZone = JS::Zone::from(this)->isAtomsZone();
+ gcHeapThreshold.updateStartThreshold(gcHeapSize.retainedBytes(),
+ invocationKind, gc.tunables,
+ gc.schedulingState, isAtomsZone, lock);
+ mallocHeapThreshold.updateStartThreshold(mallocHeapSize.retainedBytes(),
+ gc.tunables, lock);
+}
+
+void js::ZoneAllocator::setGCSliceThresholds(GCRuntime& gc) {
+ gcHeapThreshold.setSliceThreshold(this, gcHeapSize, gc.tunables);
+ mallocHeapThreshold.setSliceThreshold(this, mallocHeapSize, gc.tunables);
+ jitHeapThreshold.setSliceThreshold(this, jitHeapSize, gc.tunables);
+}
+
+void js::ZoneAllocator::clearGCSliceThresholds() {
+ gcHeapThreshold.clearSliceThreshold();
+ mallocHeapThreshold.clearSliceThreshold();
+ jitHeapThreshold.clearSliceThreshold();
+}
+
+bool ZoneAllocator::addSharedMemory(void* mem, size_t nbytes, MemoryUse use) {
+ // nbytes can be zero here for SharedArrayBuffers.
+
+ MOZ_ASSERT(CurrentThreadCanAccessRuntime(runtime_));
+
+ auto ptr = sharedMemoryUseCounts.lookupForAdd(mem);
+ MOZ_ASSERT_IF(ptr, ptr->value().use == use);
+
+ if (!ptr && !sharedMemoryUseCounts.add(ptr, mem, gc::SharedMemoryUse(use))) {
+ return false;
+ }
+
+ ptr->value().count++;
+
+ // Allocations can grow, so add any increase over the previous size and record
+ // the new size.
+ if (nbytes > ptr->value().nbytes) {
+ mallocHeapSize.addBytes(nbytes - ptr->value().nbytes);
+ ptr->value().nbytes = nbytes;
+ }
+
+ maybeTriggerGCOnMalloc();
+
+ return true;
+}
+
+void ZoneAllocator::removeSharedMemory(void* mem, size_t nbytes,
+ MemoryUse use) {
+ // nbytes can be zero here for SharedArrayBuffers.
+
+ MOZ_ASSERT(CurrentThreadCanAccessRuntime(runtime_));
+ MOZ_ASSERT(CurrentThreadIsGCFinalizing());
+
+ auto ptr = sharedMemoryUseCounts.lookup(mem);
+
+ MOZ_ASSERT(ptr);
+ MOZ_ASSERT(ptr->value().count != 0);
+ MOZ_ASSERT(ptr->value().use == use);
+ MOZ_ASSERT(ptr->value().nbytes >= nbytes);
+
+ ptr->value().count--;
+ if (ptr->value().count == 0) {
+ mallocHeapSize.removeBytes(ptr->value().nbytes, true);
+ sharedMemoryUseCounts.remove(ptr);
+ }
+}
+
+void ZoneAllocPolicy::decMemory(size_t nbytes) {
+ // Unfortunately we don't have enough context here to know whether we're being
+ // called on behalf of the collector so we have to do a TLS lookup to find
+ // out.
+ JSContext* cx = TlsContext.get();
+ zone_->decNonGCMemory(this, nbytes, MemoryUse::ZoneAllocPolicy,
+ cx->defaultFreeOp()->isCollecting());
+}
+
+JS::Zone::Zone(JSRuntime* rt, Kind kind)
+ : ZoneAllocator(rt, kind),
+ // Note: don't use |this| before initializing helperThreadUse_!
+ // ProtectedData checks in CheckZone::check may read this field.
+ helperThreadUse_(HelperThreadUse::None),
+ helperThreadOwnerContext_(nullptr),
+ arenas(this),
+ data(this, nullptr),
+ tenuredBigInts(this, 0),
+ nurseryAllocatedStrings(this, 0),
+ markedStrings(this, 0),
+ finalizedStrings(this, 0),
+ allocNurseryStrings(this, true),
+ allocNurseryBigInts(this, true),
+ suppressAllocationMetadataBuilder(this, false),
+ previousGCStringStats(this),
+ stringStats(this),
+ uniqueIds_(this),
+ tenuredAllocsSinceMinorGC_(0),
+ gcWeakMapList_(this),
+ compartments_(),
+ crossZoneStringWrappers_(this),
+ gcGrayRoots_(this),
+ weakCaches_(this),
+ gcWeakKeys_(this, SystemAllocPolicy(), rt->randomHashCodeScrambler()),
+ gcNurseryWeakKeys_(this, SystemAllocPolicy(),
+ rt->randomHashCodeScrambler()),
+ typeDescrObjects_(this, this),
+ markedAtoms_(this),
+ atomCache_(this),
+ externalStringCache_(this),
+ functionToStringCache_(this),
+ propertyTree_(this, this),
+ baseShapes_(this, this),
+ initialShapes_(this, this),
+ nurseryShapes_(this),
+ finalizationRegistries_(this, this),
+ finalizationRecordMap_(this, this),
+ jitZone_(this, nullptr),
+ gcScheduled_(false),
+ gcScheduledSaved_(false),
+ gcPreserveCode_(false),
+ keepShapeCaches_(this, false),
+ wasCollected_(false),
+ listNext_(NotOnList),
+ weakRefMap_(this, this),
+ keptObjects(this, this) {
+ /* Ensure that there are no vtables to mess us up here. */
+ MOZ_ASSERT(reinterpret_cast<JS::shadow::Zone*>(this) ==
+ static_cast<JS::shadow::Zone*>(this));
+ MOZ_ASSERT_IF(isAtomsZone(), !rt->unsafeAtomsZone());
+ MOZ_ASSERT_IF(isSelfHostingZone(), !rt->hasInitializedSelfHosting());
+
+ // We can't call updateGCStartThresholds until the Zone has been constructed.
+ AutoLockGC lock(rt);
+ updateGCStartThresholds(rt->gc, GC_NORMAL, lock);
+}
+
+Zone::~Zone() {
+ MOZ_ASSERT(helperThreadUse_ == HelperThreadUse::None);
+ MOZ_ASSERT(gcWeakMapList().isEmpty());
+ MOZ_ASSERT_IF(regExps_.ref(), regExps().empty());
+
+ JSRuntime* rt = runtimeFromAnyThread();
+ if (this == rt->gc.systemZone) {
+ MOZ_ASSERT(isSystemZone());
+ rt->gc.systemZone = nullptr;
+ }
+
+ js_delete(jitZone_.ref());
+}
+
+bool Zone::init() {
+ regExps_.ref() = make_unique<RegExpZone>(this);
+ return regExps_.ref() && gcWeakKeys().init() && gcNurseryWeakKeys().init();
+}
+
+void Zone::setNeedsIncrementalBarrier(bool needs) {
+ needsIncrementalBarrier_ = needs;
+}
+
+void Zone::changeGCState(GCState prev, GCState next) {
+ MOZ_ASSERT(RuntimeHeapIsBusy());
+ MOZ_ASSERT(canCollect());
+ MOZ_ASSERT(gcState() == prev);
+
+ // This can be called when barriers have been temporarily disabled by
+ // AutoDisableBarriers. In that case, don't update needsIncrementalBarrier_
+ // and barriers will be re-enabled by ~AutoDisableBarriers() if necessary.
+ bool barriersDisabled = isGCMarking() && !needsIncrementalBarrier();
+
+ gcState_ = next;
+
+ // Update the barriers state when we transition between marking and
+ // non-marking states, unless barriers have been disabled.
+ if (!barriersDisabled) {
+ needsIncrementalBarrier_ = isGCMarking();
+ }
+}
+
+template <class Pred>
+static void EraseIf(js::gc::WeakEntryVector& entries, Pred pred) {
+ auto* begin = entries.begin();
+ auto* const end = entries.end();
+
+ auto* newEnd = begin;
+ for (auto* p = begin; p != end; p++) {
+ if (!pred(*p)) {
+ *newEnd++ = *p;
+ }
+ }
+
+ size_t removed = end - newEnd;
+ entries.shrinkBy(removed);
+}
+
+static void SweepWeakEntryVectorWhileMinorSweeping(
+ js::gc::WeakEntryVector& entries) {
+ EraseIf(entries, [](js::gc::WeakMarkable& markable) -> bool {
+ return IsAboutToBeFinalizedDuringMinorSweep(&markable.key);
+ });
+}
+
+void Zone::sweepAfterMinorGC(JSTracer* trc) {
+ sweepWeakKeysAfterMinorGC();
+ crossZoneStringWrappers().sweepAfterMinorGC(trc);
+}
+
+void Zone::sweepWeakKeysAfterMinorGC() {
+ for (WeakKeyTable::Range r = gcNurseryWeakKeys().all(); !r.empty();
+ r.popFront()) {
+ // Sweep gcNurseryWeakKeys to move live (forwarded) keys to gcWeakKeys,
+ // scanning through all the entries for such keys to update them.
+ //
+ // Forwarded and dead keys may also appear in their delegates' entries,
+ // so sweep those too (see below.)
+
+ // The tricky case is when the key has a delegate that was already
+ // tenured. Then it will be in its compartment's gcWeakKeys, but we
+ // still need to update the key (which will be in the entries
+ // associated with it.)
+ gc::Cell* key = r.front().key;
+ MOZ_ASSERT(!key->isTenured());
+ if (!Nursery::getForwardedPointer(&key)) {
+ // Dead nursery cell => discard.
+ continue;
+ }
+
+ // Key been moved. The value is an array of <map,key> pairs; update all
+ // keys in that array.
+ WeakEntryVector& entries = r.front().value;
+ SweepWeakEntryVectorWhileMinorSweeping(entries);
+
+ // Live (moved) nursery cell. Append entries to gcWeakKeys.
+ auto entry = gcWeakKeys().get(key);
+ if (!entry) {
+ if (!gcWeakKeys().put(key, gc::WeakEntryVector())) {
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ oomUnsafe.crash("Failed to tenure weak keys entry");
+ }
+ entry = gcWeakKeys().get(key);
+ }
+
+ for (auto& markable : entries) {
+ if (!entry->value.append(markable)) {
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ oomUnsafe.crash("Failed to tenure weak keys entry");
+ }
+ }
+
+ // If the key has a delegate, then it will map to a WeakKeyEntryVector
+ // containing the key that needs to be updated.
+
+ JSObject* delegate = gc::detail::GetDelegate(key->as<JSObject>());
+ if (!delegate) {
+ continue;
+ }
+ MOZ_ASSERT(delegate->isTenured());
+
+ // If delegate was formerly nursery-allocated, we will sweep its
+ // entries when we visit its gcNurseryWeakKeys (if we haven't already).
+ // Note that we don't know the nursery address of the delegate, since
+ // the location it was stored in has already been updated.
+ //
+ // Otherwise, it will be in gcWeakKeys and we sweep it here.
+ auto p = delegate->zone()->gcWeakKeys().get(delegate);
+ if (p) {
+ SweepWeakEntryVectorWhileMinorSweeping(p->value);
+ }
+ }
+
+ if (!gcNurseryWeakKeys().clear()) {
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ oomUnsafe.crash("OOM while clearing gcNurseryWeakKeys.");
+ }
+}
+
+void Zone::sweepAllCrossCompartmentWrappers() {
+ crossZoneStringWrappers().sweep();
+ for (CompartmentsInZoneIter comp(this); !comp.done(); comp.next()) {
+ comp->sweepCrossCompartmentObjectWrappers();
+ }
+}
+
+/* static */
+void Zone::fixupAllCrossCompartmentWrappersAfterMovingGC(JSTracer* trc) {
+ MOZ_ASSERT(trc->runtime()->gc.isHeapCompacting());
+
+ for (ZonesIter zone(trc->runtime(), WithAtoms); !zone.done(); zone.next()) {
+ // Sweep the wrapper map to update keys (wrapped values) in other
+ // compartments that may have been moved.
+ zone->crossZoneStringWrappers().sweep();
+
+ for (CompartmentsInZoneIter comp(zone); !comp.done(); comp.next()) {
+ comp->fixupCrossCompartmentObjectWrappersAfterMovingGC(trc);
+ }
+ }
+}
+
+void Zone::dropStringWrappersOnGC() {
+ MOZ_ASSERT(JS::RuntimeHeapIsCollecting());
+ crossZoneStringWrappers().clear();
+}
+
+#ifdef JSGC_HASH_TABLE_CHECKS
+
+void Zone::checkAllCrossCompartmentWrappersAfterMovingGC() {
+ checkStringWrappersAfterMovingGC();
+ for (CompartmentsInZoneIter comp(this); !comp.done(); comp.next()) {
+ comp->checkObjectWrappersAfterMovingGC();
+ }
+}
+
+void Zone::checkStringWrappersAfterMovingGC() {
+ for (StringWrapperMap::Enum e(crossZoneStringWrappers()); !e.empty();
+ e.popFront()) {
+ // Assert that the postbarriers have worked and that nothing is left in the
+ // wrapper map that points into the nursery, and that the hash table entries
+ // are discoverable.
+ auto key = e.front().key();
+ CheckGCThingAfterMovingGC(key);
+
+ auto ptr = crossZoneStringWrappers().lookup(key);
+ MOZ_RELEASE_ASSERT(ptr.found() && &*ptr == &e.front());
+ }
+}
+#endif
+
+void Zone::sweepWeakMaps() {
+ /* Finalize unreachable (key,value) pairs in all weak maps. */
+ WeakMapBase::sweepZone(this);
+}
+
+void Zone::discardJitCode(JSFreeOp* fop,
+ ShouldDiscardBaselineCode discardBaselineCode,
+ ShouldDiscardJitScripts discardJitScripts) {
+ if (!jitZone()) {
+ return;
+ }
+
+ if (isPreservingCode()) {
+ return;
+ }
+
+ if (discardBaselineCode || discardJitScripts) {
+#ifdef DEBUG
+ // Assert no JitScripts are marked as active.
+ for (auto iter = cellIter<BaseScript>(); !iter.done(); iter.next()) {
+ BaseScript* base = iter.unbarrieredGet();
+ if (jit::JitScript* jitScript = base->maybeJitScript()) {
+ MOZ_ASSERT(!jitScript->active());
+ }
+ }
+#endif
+
+ // Mark JitScripts on the stack as active.
+ jit::MarkActiveJitScripts(this);
+ }
+
+ // Invalidate all Ion code in this zone.
+ jit::InvalidateAll(fop, this);
+
+ for (auto base = cellIterUnsafe<BaseScript>(); !base.done(); base.next()) {
+ jit::JitScript* jitScript = base->maybeJitScript();
+ if (!jitScript) {
+ continue;
+ }
+
+ JSScript* script = base->asJSScript();
+ jit::FinishInvalidation(fop, script);
+
+ // Discard baseline script if it's not marked as active.
+ if (discardBaselineCode) {
+ if (jitScript->hasBaselineScript() && !jitScript->active()) {
+ jit::FinishDiscardBaselineScript(fop, script);
+ }
+ }
+
+#ifdef JS_CACHEIR_SPEW
+ maybeUpdateWarmUpCount(script);
+#endif
+
+ // Warm-up counter for scripts are reset on GC. After discarding code we
+ // need to let it warm back up to get information such as which
+ // opcodes are setting array holes or accessing getter properties.
+ script->resetWarmUpCounterForGC();
+
+ // Try to release the script's JitScript. This should happen after
+ // releasing JIT code because we can't do this when the script still has
+ // JIT code.
+ if (discardJitScripts) {
+ script->maybeReleaseJitScript(fop);
+ jitScript = script->maybeJitScript();
+ if (!jitScript) {
+ // Try to discard the ScriptCounts too.
+ if (!script->realm()->collectCoverageForDebug() &&
+ !fop->runtime()->profilingScripts) {
+ script->destroyScriptCounts();
+ }
+ continue;
+ }
+ }
+
+ // If we did not release the JitScript, we need to purge optimized IC
+ // stubs because the optimizedStubSpace will be purged below.
+ if (discardBaselineCode) {
+ jitScript->purgeOptimizedStubs(script);
+ }
+
+ // Finally, reset the active flag.
+ jitScript->resetActive();
+ }
+
+ /*
+ * When scripts contains pointers to nursery things, the store buffer
+ * can contain entries that point into the optimized stub space. Since
+ * this method can be called outside the context of a GC, this situation
+ * could result in us trying to mark invalid store buffer entries.
+ *
+ * Defer freeing any allocated blocks until after the next minor GC.
+ */
+ if (discardBaselineCode) {
+ jitZone()->optimizedStubSpace()->freeAllAfterMinorGC(this);
+ jitZone()->purgeIonCacheIRStubInfo();
+ }
+}
+
+void JS::Zone::beforeClearDelegateInternal(JSObject* wrapper,
+ JSObject* delegate) {
+ MOZ_ASSERT(js::gc::detail::GetDelegate(wrapper) == delegate);
+ MOZ_ASSERT(needsIncrementalBarrier());
+ GCMarker::fromTracer(barrierTracer())->severWeakDelegate(wrapper, delegate);
+}
+
+void JS::Zone::afterAddDelegateInternal(JSObject* wrapper) {
+ JSObject* delegate = js::gc::detail::GetDelegate(wrapper);
+ if (delegate) {
+ GCMarker::fromTracer(barrierTracer())
+ ->restoreWeakDelegate(wrapper, delegate);
+ }
+}
+
+#ifdef JSGC_HASH_TABLE_CHECKS
+void JS::Zone::checkUniqueIdTableAfterMovingGC() {
+ for (auto r = uniqueIds().all(); !r.empty(); r.popFront()) {
+ js::gc::CheckGCThingAfterMovingGC(r.front().key());
+ }
+}
+#endif
+
+uint64_t Zone::gcNumber() {
+ // Zones in use by exclusive threads are not collected, and threads using
+ // them cannot access the main runtime's gcNumber without racing.
+ return usedByHelperThread() ? 0 : runtimeFromMainThread()->gc.gcNumber();
+}
+
+js::jit::JitZone* Zone::createJitZone(JSContext* cx) {
+ MOZ_ASSERT(!jitZone_);
+ MOZ_ASSERT(cx->runtime()->hasJitRuntime());
+
+ UniquePtr<jit::JitZone> jitZone(cx->new_<js::jit::JitZone>());
+ if (!jitZone) {
+ return nullptr;
+ }
+
+ jitZone_ = jitZone.release();
+ return jitZone_;
+}
+
+bool Zone::hasMarkedRealms() {
+ for (RealmsInZoneIter realm(this); !realm.done(); realm.next()) {
+ if (realm->marked()) {
+ return true;
+ }
+ }
+ return false;
+}
+
+bool Zone::canCollect() {
+ // The atoms zone cannot be collected while off-thread parsing is taking
+ // place.
+ if (isAtomsZone()) {
+ return !runtimeFromAnyThread()->hasHelperThreadZones();
+ }
+
+ // We don't collect the self hosting zone after it has been initialized.
+ if (isSelfHostingZone()) {
+ return !runtimeFromAnyThread()->gc.isSelfHostingZoneFrozen();
+ }
+
+ // Zones that will be or are currently used by other threads cannot be
+ // collected.
+ return !createdForHelperThread();
+}
+
+void Zone::notifyObservingDebuggers() {
+ AutoAssertNoGC nogc;
+ MOZ_ASSERT(JS::RuntimeHeapIsCollecting(),
+ "This method should be called during GC.");
+
+ JSRuntime* rt = runtimeFromMainThread();
+
+ for (RealmsInZoneIter realms(this); !realms.done(); realms.next()) {
+ GlobalObject* global = realms->unsafeUnbarrieredMaybeGlobal();
+ if (!global) {
+ continue;
+ }
+
+ DebugAPI::notifyParticipatesInGC(global, rt->gc.majorGCCount());
+ }
+}
+
+bool Zone::isOnList() const { return listNext_ != NotOnList; }
+
+Zone* Zone::nextZone() const {
+ MOZ_ASSERT(isOnList());
+ return listNext_;
+}
+
+void Zone::clearTables() {
+ MOZ_ASSERT(regExps().empty());
+
+ baseShapes().clear();
+ initialShapes().clear();
+}
+
+void Zone::fixupAfterMovingGC() {
+ ZoneAllocator::fixupAfterMovingGC();
+ fixupInitialShapeTable();
+}
+
+bool Zone::addTypeDescrObject(JSContext* cx, HandleObject obj) {
+ // Type descriptor objects are always tenured so we don't need post barriers
+ // on the set.
+ MOZ_ASSERT(!IsInsideNursery(obj));
+
+ if (!typeDescrObjects().put(obj)) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ return true;
+}
+
+void Zone::deleteEmptyCompartment(JS::Compartment* comp) {
+ MOZ_ASSERT(comp->zone() == this);
+ arenas.checkEmptyArenaLists();
+
+ MOZ_ASSERT(compartments().length() == 1);
+ MOZ_ASSERT(compartments()[0] == comp);
+ MOZ_ASSERT(comp->realms().length() == 1);
+
+ Realm* realm = comp->realms()[0];
+ JSFreeOp* fop = runtimeFromMainThread()->defaultFreeOp();
+ realm->destroy(fop);
+ comp->destroy(fop);
+
+ compartments().clear();
+}
+
+void Zone::setHelperThreadOwnerContext(JSContext* cx) {
+ MOZ_ASSERT_IF(cx, TlsContext.get() == cx);
+ helperThreadOwnerContext_ = cx;
+}
+
+bool Zone::ownedByCurrentHelperThread() {
+ MOZ_ASSERT(usedByHelperThread());
+ MOZ_ASSERT(TlsContext.get());
+ return helperThreadOwnerContext_ == TlsContext.get();
+}
+
+void Zone::purgeAtomCache() {
+ atomCache().clearAndCompact();
+
+ // Also purge the dtoa caches so that subsequent lookups populate atom
+ // cache too.
+ for (RealmsInZoneIter r(this); !r.done(); r.next()) {
+ r->dtoaCache.purge();
+ }
+}
+
+void Zone::addSizeOfIncludingThis(
+ mozilla::MallocSizeOf mallocSizeOf, JS::CodeSizes* code, size_t* regexpZone,
+ size_t* jitZone, size_t* baselineStubsOptimized, size_t* uniqueIdMap,
+ size_t* shapeCaches, size_t* atomsMarkBitmaps, size_t* compartmentObjects,
+ size_t* crossCompartmentWrappersTables, size_t* compartmentsPrivateData,
+ size_t* scriptCountsMapArg) {
+ *regexpZone += regExps().sizeOfExcludingThis(mallocSizeOf);
+ if (jitZone_) {
+ jitZone_->addSizeOfIncludingThis(mallocSizeOf, code, jitZone,
+ baselineStubsOptimized);
+ }
+ *uniqueIdMap += uniqueIds().shallowSizeOfExcludingThis(mallocSizeOf);
+ *shapeCaches += baseShapes().sizeOfExcludingThis(mallocSizeOf) +
+ initialShapes().sizeOfExcludingThis(mallocSizeOf);
+ *atomsMarkBitmaps += markedAtoms().sizeOfExcludingThis(mallocSizeOf);
+ *crossCompartmentWrappersTables +=
+ crossZoneStringWrappers().sizeOfExcludingThis(mallocSizeOf);
+
+ for (CompartmentsInZoneIter comp(this); !comp.done(); comp.next()) {
+ comp->addSizeOfIncludingThis(mallocSizeOf, compartmentObjects,
+ crossCompartmentWrappersTables,
+ compartmentsPrivateData);
+ }
+
+ if (scriptCountsMap) {
+ *scriptCountsMapArg +=
+ scriptCountsMap->shallowSizeOfIncludingThis(mallocSizeOf);
+ for (auto r = scriptCountsMap->all(); !r.empty(); r.popFront()) {
+ *scriptCountsMapArg +=
+ r.front().value()->sizeOfIncludingThis(mallocSizeOf);
+ }
+ }
+}
+
+void* ZoneAllocator::onOutOfMemory(js::AllocFunction allocFunc,
+ arena_id_t arena, size_t nbytes,
+ void* reallocPtr) {
+ if (!js::CurrentThreadCanAccessRuntime(runtime_)) {
+ return nullptr;
+ }
+ // The analysis sees that JSRuntime::onOutOfMemory could report an error,
+ // which with a JSErrorInterceptor could GC. But we're passing a null cx (to
+ // a default parameter) so the error will not be reported.
+ JS::AutoSuppressGCAnalysis suppress;
+ return runtimeFromMainThread()->onOutOfMemory(allocFunc, arena, nbytes,
+ reallocPtr);
+}
+
+void ZoneAllocator::reportAllocationOverflow() const {
+ js::ReportAllocationOverflow(nullptr);
+}
+
+ZoneList::ZoneList() : head(nullptr), tail(nullptr) {}
+
+ZoneList::ZoneList(Zone* zone) : head(zone), tail(zone) {
+ MOZ_RELEASE_ASSERT(!zone->isOnList());
+ zone->listNext_ = nullptr;
+}
+
+ZoneList::~ZoneList() { MOZ_ASSERT(isEmpty()); }
+
+void ZoneList::check() const {
+#ifdef DEBUG
+ MOZ_ASSERT((head == nullptr) == (tail == nullptr));
+ if (!head) {
+ return;
+ }
+
+ Zone* zone = head;
+ for (;;) {
+ MOZ_ASSERT(zone && zone->isOnList());
+ if (zone == tail) break;
+ zone = zone->listNext_;
+ }
+ MOZ_ASSERT(!zone->listNext_);
+#endif
+}
+
+bool ZoneList::isEmpty() const { return head == nullptr; }
+
+Zone* ZoneList::front() const {
+ MOZ_ASSERT(!isEmpty());
+ MOZ_ASSERT(head->isOnList());
+ return head;
+}
+
+void ZoneList::append(Zone* zone) {
+ ZoneList singleZone(zone);
+ transferFrom(singleZone);
+}
+
+void ZoneList::transferFrom(ZoneList& other) {
+ check();
+ other.check();
+ if (!other.head) {
+ return;
+ }
+
+ MOZ_ASSERT(tail != other.tail);
+
+ if (tail) {
+ tail->listNext_ = other.head;
+ } else {
+ head = other.head;
+ }
+ tail = other.tail;
+
+ other.head = nullptr;
+ other.tail = nullptr;
+}
+
+Zone* ZoneList::removeFront() {
+ MOZ_ASSERT(!isEmpty());
+ check();
+
+ Zone* front = head;
+ head = head->listNext_;
+ if (!head) {
+ tail = nullptr;
+ }
+
+ front->listNext_ = Zone::NotOnList;
+
+ return front;
+}
+
+void ZoneList::clear() {
+ while (!isEmpty()) {
+ removeFront();
+ }
+}
+
+JS_PUBLIC_API void JS::shadow::RegisterWeakCache(
+ JS::Zone* zone, detail::WeakCacheBase* cachep) {
+ zone->registerWeakCache(cachep);
+}
+
+void Zone::traceScriptTableRoots(JSTracer* trc) {
+ static_assert(std::is_convertible_v<BaseScript*, gc::TenuredCell*>,
+ "BaseScript must not be nursery-allocated for script-table "
+ "tracing to work");
+
+ // Performance optimization: the script-table keys are JSScripts, which
+ // cannot be in the nursery, so we can skip this tracing if we are only in a
+ // minor collection. We static-assert this fact above.
+ if (JS::RuntimeHeapIsMinorCollecting()) {
+ return;
+ }
+
+ // N.B.: the script-table keys are weak *except* in an exceptional case: when
+ // then --dump-bytecode command line option or the PCCount JSFriend API is
+ // used, then the scripts for all counts must remain alive. We only trace
+ // when the `trc->runtime()->profilingScripts` flag is set. This flag is
+ // cleared in JSRuntime::destroyRuntime() during shutdown to ensure that
+ // scripts are collected before the runtime goes away completely.
+ if (scriptCountsMap && trc->runtime()->profilingScripts) {
+ for (ScriptCountsMap::Range r = scriptCountsMap->all(); !r.empty();
+ r.popFront()) {
+ BaseScript* script = const_cast<BaseScript*>(r.front().key());
+ MOZ_ASSERT(script->hasScriptCounts());
+ TraceRoot(trc, &script, "profilingScripts");
+ MOZ_ASSERT(script == r.front().key(), "const_cast is only a work-around");
+ }
+ }
+}
+
+void Zone::fixupScriptMapsAfterMovingGC(JSTracer* trc) {
+ // Map entries are removed by BaseScript::finalize, but we need to update the
+ // script pointers here in case they are moved by the GC.
+
+ if (scriptCountsMap) {
+ for (ScriptCountsMap::Enum e(*scriptCountsMap); !e.empty(); e.popFront()) {
+ BaseScript* script = e.front().key();
+ TraceManuallyBarrieredEdge(trc, &script, "Realm::scriptCountsMap::key");
+ if (script != e.front().key()) {
+ e.rekeyFront(script);
+ }
+ }
+ }
+
+ if (scriptLCovMap) {
+ for (ScriptLCovMap::Enum e(*scriptLCovMap); !e.empty(); e.popFront()) {
+ BaseScript* script = e.front().key();
+ if (!IsAboutToBeFinalizedUnbarriered(&script) &&
+ script != e.front().key()) {
+ e.rekeyFront(script);
+ }
+ }
+ }
+
+ if (debugScriptMap) {
+ for (DebugScriptMap::Enum e(*debugScriptMap); !e.empty(); e.popFront()) {
+ BaseScript* script = e.front().key();
+ if (!IsAboutToBeFinalizedUnbarriered(&script) &&
+ script != e.front().key()) {
+ e.rekeyFront(script);
+ }
+ }
+ }
+
+#ifdef MOZ_VTUNE
+ if (scriptVTuneIdMap) {
+ for (ScriptVTuneIdMap::Enum e(*scriptVTuneIdMap); !e.empty();
+ e.popFront()) {
+ BaseScript* script = e.front().key();
+ if (!IsAboutToBeFinalizedUnbarriered(&script) &&
+ script != e.front().key()) {
+ e.rekeyFront(script);
+ }
+ }
+ }
+#endif
+
+#ifdef JS_CACHEIR_SPEW
+ if (scriptFinalWarmUpCountMap) {
+ for (ScriptFinalWarmUpCountMap::Enum e(*scriptFinalWarmUpCountMap);
+ !e.empty(); e.popFront()) {
+ BaseScript* script = e.front().key();
+ if (!IsAboutToBeFinalizedUnbarriered(&script) &&
+ script != e.front().key()) {
+ e.rekeyFront(script);
+ }
+ }
+ }
+#endif
+}
+
+#ifdef JSGC_HASH_TABLE_CHECKS
+void Zone::checkScriptMapsAfterMovingGC() {
+ if (scriptCountsMap) {
+ for (auto r = scriptCountsMap->all(); !r.empty(); r.popFront()) {
+ BaseScript* script = r.front().key();
+ MOZ_ASSERT(script->zone() == this);
+ CheckGCThingAfterMovingGC(script);
+ auto ptr = scriptCountsMap->lookup(script);
+ MOZ_RELEASE_ASSERT(ptr.found() && &*ptr == &r.front());
+ }
+ }
+
+ if (scriptLCovMap) {
+ for (auto r = scriptLCovMap->all(); !r.empty(); r.popFront()) {
+ BaseScript* script = r.front().key();
+ MOZ_ASSERT(script->zone() == this);
+ CheckGCThingAfterMovingGC(script);
+ auto ptr = scriptLCovMap->lookup(script);
+ MOZ_RELEASE_ASSERT(ptr.found() && &*ptr == &r.front());
+ }
+ }
+
+ if (debugScriptMap) {
+ for (auto r = debugScriptMap->all(); !r.empty(); r.popFront()) {
+ BaseScript* script = r.front().key();
+ MOZ_ASSERT(script->zone() == this);
+ CheckGCThingAfterMovingGC(script);
+ DebugScript* ds = r.front().value().get();
+ DebugAPI::checkDebugScriptAfterMovingGC(ds);
+ auto ptr = debugScriptMap->lookup(script);
+ MOZ_RELEASE_ASSERT(ptr.found() && &*ptr == &r.front());
+ }
+ }
+
+# ifdef MOZ_VTUNE
+ if (scriptVTuneIdMap) {
+ for (auto r = scriptVTuneIdMap->all(); !r.empty(); r.popFront()) {
+ BaseScript* script = r.front().key();
+ MOZ_ASSERT(script->zone() == this);
+ CheckGCThingAfterMovingGC(script);
+ auto ptr = scriptVTuneIdMap->lookup(script);
+ MOZ_RELEASE_ASSERT(ptr.found() && &*ptr == &r.front());
+ }
+ }
+# endif // MOZ_VTUNE
+
+# ifdef JS_CACHEIR_SPEW
+ if (scriptFinalWarmUpCountMap) {
+ for (auto r = scriptFinalWarmUpCountMap->all(); !r.empty(); r.popFront()) {
+ BaseScript* script = r.front().key();
+ MOZ_ASSERT(script->zone() == this);
+ CheckGCThingAfterMovingGC(script);
+ auto ptr = scriptFinalWarmUpCountMap->lookup(script);
+ MOZ_RELEASE_ASSERT(ptr.found() && &*ptr == &r.front());
+ }
+ }
+# endif // JS_CACHEIR_SPEW
+}
+#endif
+
+void Zone::clearScriptCounts(Realm* realm) {
+ if (!scriptCountsMap) {
+ return;
+ }
+
+ // Clear all hasScriptCounts_ flags of BaseScript, in order to release all
+ // ScriptCounts entries of the given realm.
+ for (auto i = scriptCountsMap->modIter(); !i.done(); i.next()) {
+ BaseScript* script = i.get().key();
+ if (script->realm() == realm) {
+ script->clearHasScriptCounts();
+ i.remove();
+ }
+ }
+}
+
+void Zone::clearScriptLCov(Realm* realm) {
+ if (!scriptLCovMap) {
+ return;
+ }
+
+ for (auto i = scriptLCovMap->modIter(); !i.done(); i.next()) {
+ BaseScript* script = i.get().key();
+ if (script->realm() == realm) {
+ i.remove();
+ }
+ }
+}
+
+void Zone::clearRootsForShutdownGC() {
+ // Finalization callbacks are not called if we're shutting down.
+ finalizationRecordMap().clear();
+
+ clearKeptObjects();
+}
+
+void Zone::finishRoots() {
+ for (RealmsInZoneIter r(this); !r.done(); r.next()) {
+ r->finishRoots();
+ }
+}
+
+void Zone::traceKeptObjects(JSTracer* trc) { keptObjects.ref().trace(trc); }
+
+bool Zone::keepDuringJob(HandleObject target) {
+ return keptObjects.ref().put(target);
+}
+
+void Zone::clearKeptObjects() { keptObjects.ref().clear(); }
diff --git a/js/src/gc/Zone.h b/js/src/gc/Zone.h
new file mode 100644
index 0000000000..5c93270a14
--- /dev/null
+++ b/js/src/gc/Zone.h
@@ -0,0 +1,711 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef gc_Zone_h
+#define gc_Zone_h
+
+#include "mozilla/Atomics.h"
+#include "mozilla/HashFunctions.h"
+#include "mozilla/SegmentedVector.h"
+
+#include "ds/Bitmap.h"
+#include "gc/ArenaList.h"
+#include "gc/Barrier.h"
+#include "gc/FindSCCs.h"
+#include "gc/GCMarker.h"
+#include "gc/NurseryAwareHashMap.h"
+#include "gc/Statistics.h"
+#include "gc/ZoneAllocator.h"
+#include "js/GCHashTable.h"
+#include "vm/AtomsTable.h"
+#include "vm/JSFunction.h"
+
+namespace js {
+
+class RegExpZone;
+class WeakRefObject;
+
+namespace jit {
+class JitZone;
+} // namespace jit
+
+namespace gc {
+
+class ZoneList;
+
+using ZoneComponentFinder = ComponentFinder<JS::Zone>;
+
+struct UniqueIdGCPolicy {
+ static bool needsSweep(Cell** cell, uint64_t* value);
+};
+
+// Maps a Cell* to a unique, 64bit id.
+using UniqueIdMap = GCHashMap<Cell*, uint64_t, PointerHasher<Cell*>,
+ SystemAllocPolicy, UniqueIdGCPolicy>;
+
+extern uint64_t NextCellUniqueId(JSRuntime* rt);
+
+template <typename T>
+class ZoneAllCellIter;
+
+template <typename T>
+class ZoneCellIter;
+
+// A vector of FinalizationRecord objects, or CCWs to them.
+using FinalizationRecordVector = GCVector<HeapPtrObject, 1, ZoneAllocPolicy>;
+
+} // namespace gc
+
+// If two different nursery strings are wrapped into the same zone, and have
+// the same contents, then deduplication may make them duplicates.
+// `DuplicatesPossible` will allow this and map both wrappers to the same (now
+// tenured) source string.
+using StringWrapperMap =
+ NurseryAwareHashMap<JSString*, JSString*, DefaultHasher<JSString*>,
+ ZoneAllocPolicy, DuplicatesPossible>;
+
+class MOZ_NON_TEMPORARY_CLASS ExternalStringCache {
+ static const size_t NumEntries = 4;
+ mozilla::Array<JSString*, NumEntries> entries_;
+
+ ExternalStringCache(const ExternalStringCache&) = delete;
+ void operator=(const ExternalStringCache&) = delete;
+
+ public:
+ ExternalStringCache() { purge(); }
+ void purge() { mozilla::PodArrayZero(entries_); }
+
+ MOZ_ALWAYS_INLINE JSString* lookup(const char16_t* chars, size_t len) const;
+ MOZ_ALWAYS_INLINE void put(JSString* s);
+};
+
+class MOZ_NON_TEMPORARY_CLASS FunctionToStringCache {
+ struct Entry {
+ BaseScript* script;
+ JSString* string;
+
+ void set(BaseScript* scriptArg, JSString* stringArg) {
+ script = scriptArg;
+ string = stringArg;
+ }
+ };
+ static const size_t NumEntries = 2;
+ mozilla::Array<Entry, NumEntries> entries_;
+
+ FunctionToStringCache(const FunctionToStringCache&) = delete;
+ void operator=(const FunctionToStringCache&) = delete;
+
+ public:
+ FunctionToStringCache() { purge(); }
+ void purge() { mozilla::PodArrayZero(entries_); }
+
+ MOZ_ALWAYS_INLINE JSString* lookup(BaseScript* script) const;
+ MOZ_ALWAYS_INLINE void put(BaseScript* script, JSString* string);
+};
+
+// WeakRefHeapPtrVector is a GCVector of WeakRefObjects.
+class WeakRefHeapPtrVector
+ : public GCVector<js::HeapPtrObject, 1, js::ZoneAllocPolicy> {
+ public:
+ using GCVector::GCVector;
+
+ // call in compacting, to update the target in each WeakRefObject.
+ void sweep(js::HeapPtrObject& target);
+};
+
+// WeakRefMap is a per-zone GCHashMap, which maps from the target of the JS
+// WeakRef to the list of JS WeakRefs.
+class WeakRefMap
+ : public GCHashMap<HeapPtrObject, WeakRefHeapPtrVector,
+ MovableCellHasher<HeapPtrObject>, ZoneAllocPolicy> {
+ public:
+ using GCHashMap::GCHashMap;
+ using Base = GCHashMap<HeapPtrObject, WeakRefHeapPtrVector,
+ MovableCellHasher<HeapPtrObject>, ZoneAllocPolicy>;
+ void sweep(gc::StoreBuffer* sbToLock);
+};
+
+} // namespace js
+
+namespace JS {
+
+// [SMDOC] GC Zones
+//
+// A zone is a collection of compartments. Every compartment belongs to exactly
+// one zone. In Firefox, there is roughly one zone per tab along with a system
+// zone for everything else. Zones mainly serve as boundaries for garbage
+// collection. Unlike compartments, they have no special security properties.
+//
+// Every GC thing belongs to exactly one zone. GC things from the same zone but
+// different compartments can share an arena (4k page). GC things from different
+// zones cannot be stored in the same arena. The garbage collector is capable of
+// collecting one zone at a time; it cannot collect at the granularity of
+// compartments.
+//
+// GC things are tied to zones and compartments as follows:
+//
+// - JSObjects belong to a compartment and cannot be shared between
+// compartments. If an object needs to point to a JSObject in a different
+// compartment, regardless of zone, it must go through a cross-compartment
+// wrapper. Each compartment keeps track of its outgoing wrappers in a table.
+// JSObjects find their compartment via their ObjectGroup.
+//
+// - JSStrings do not belong to any particular compartment, but they do belong
+// to a zone. Thus, two different compartments in the same zone can point to a
+// JSString. When a string needs to be wrapped, we copy it if it's in a
+// different zone and do nothing if it's in the same zone. Thus, transferring
+// strings within a zone is very efficient.
+//
+// - Shapes and base shapes belong to a zone and are shared between compartments
+// in that zone where possible. Accessor shapes store getter and setter
+// JSObjects which belong to a single compartment, so these shapes and all
+// their descendants can't be shared with other compartments.
+//
+// - Scripts are also compartment-local and cannot be shared. A script points to
+// its compartment.
+//
+// - ObjectGroup and JitCode objects belong to a compartment and cannot be
+// shared. There is no mechanism to obtain the compartment from a JitCode
+// object.
+//
+// A zone remains alive as long as any GC things in the zone are alive. A
+// compartment remains alive as long as any JSObjects, scripts, shapes, or base
+// shapes within it are alive.
+//
+// We always guarantee that a zone has at least one live compartment by refusing
+// to delete the last compartment in a live zone.
+class Zone : public js::ZoneAllocator, public js::gc::GraphNodeBase<JS::Zone> {
+ private:
+ enum class HelperThreadUse : uint32_t { None, Pending, Active };
+ mozilla::Atomic<HelperThreadUse, mozilla::SequentiallyConsistent>
+ helperThreadUse_;
+
+ // The helper thread context with exclusive access to this zone, if
+ // usedByHelperThread(), or nullptr when on the main thread.
+ js::UnprotectedData<JSContext*> helperThreadOwnerContext_;
+
+ public:
+ js::gc::ArenaLists arenas;
+
+ // Per-zone data for use by an embedder.
+ js::ZoneData<void*> data;
+
+ js::ZoneData<uint32_t> tenuredBigInts;
+
+ js::ZoneOrIonCompileData<uint64_t> nurseryAllocatedStrings;
+
+ // Number of marked/finalzied JSString/JSFatInlineString during major GC.
+ js::ZoneOrGCTaskData<size_t> markedStrings;
+ js::ZoneOrGCTaskData<size_t> finalizedStrings;
+
+ js::ZoneData<bool> allocNurseryStrings;
+ js::ZoneData<bool> allocNurseryBigInts;
+
+ // When true, skip calling the metadata callback. We use this:
+ // - to avoid invoking the callback recursively;
+ // - to avoid observing lazy prototype setup (which confuses callbacks that
+ // want to use the types being set up!);
+ // - to avoid attaching allocation stacks to allocation stack nodes, which
+ // is silly
+ // And so on.
+ js::ZoneData<bool> suppressAllocationMetadataBuilder;
+
+ // Script side-tables. These used to be held by Realm, but are now placed
+ // here in order to allow JSScript to access them during finalize (see bug
+ // 1568245; this change in 1575350). The tables are initialized lazily by
+ // JSScript.
+ js::UniquePtr<js::ScriptCountsMap> scriptCountsMap;
+ js::UniquePtr<js::ScriptLCovMap> scriptLCovMap;
+ js::UniquePtr<js::DebugScriptMap> debugScriptMap;
+#ifdef MOZ_VTUNE
+ js::UniquePtr<js::ScriptVTuneIdMap> scriptVTuneIdMap;
+#endif
+#ifdef JS_CACHEIR_SPEW
+ js::UniquePtr<js::ScriptFinalWarmUpCountMap> scriptFinalWarmUpCountMap;
+#endif
+
+ js::ZoneData<js::StringStats> previousGCStringStats;
+ js::ZoneData<js::StringStats> stringStats;
+
+#ifdef DEBUG
+ js::MainThreadData<unsigned> gcSweepGroupIndex;
+#endif
+
+ private:
+ // Side map for storing unique ids for cells, independent of address.
+ js::ZoneOrGCTaskData<js::gc::UniqueIdMap> uniqueIds_;
+
+ // Number of allocations since the most recent minor GC for this thread.
+ mozilla::Atomic<uint32_t, mozilla::Relaxed> tenuredAllocsSinceMinorGC_;
+
+ // Live weakmaps in this zone.
+ js::ZoneOrGCTaskData<mozilla::LinkedList<js::WeakMapBase>> gcWeakMapList_;
+
+ // The set of compartments in this zone.
+ using CompartmentVector =
+ js::Vector<JS::Compartment*, 1, js::SystemAllocPolicy>;
+ js::MainThreadOrGCTaskData<CompartmentVector> compartments_;
+
+ // All cross-zone string wrappers in the zone.
+ js::MainThreadOrGCTaskData<js::StringWrapperMap> crossZoneStringWrappers_;
+
+ // This zone's gray roots.
+ using GrayRootVector =
+ mozilla::SegmentedVector<js::gc::Cell*, 1024 * sizeof(js::gc::Cell*),
+ js::SystemAllocPolicy>;
+ js::ZoneOrGCTaskData<GrayRootVector> gcGrayRoots_;
+
+ // List of non-ephemeron weak containers to sweep during
+ // beginSweepingSweepGroup.
+ js::ZoneOrGCTaskData<mozilla::LinkedList<detail::WeakCacheBase>> weakCaches_;
+
+ // Mapping from not yet marked keys to a vector of all values that the key
+ // maps to in any live weak map. Separate tables for nursery and tenured
+ // keys.
+ js::ZoneOrGCTaskData<js::gc::WeakKeyTable> gcWeakKeys_;
+ js::ZoneOrGCTaskData<js::gc::WeakKeyTable> gcNurseryWeakKeys_;
+
+ // Keep track of all TypeDescr and related objects in this compartment.
+ // This is used by the GC to trace them all first when compacting, since the
+ // TypedObject trace hook may access these objects.
+ //
+ // There are no barriers here - the set contains only tenured objects so no
+ // post-barrier is required, and these are weak references so no pre-barrier
+ // is required.
+ using TypeDescrObjectSet =
+ js::GCHashSet<JSObject*, js::MovableCellHasher<JSObject*>,
+ js::SystemAllocPolicy>;
+
+ js::ZoneData<JS::WeakCache<TypeDescrObjectSet>> typeDescrObjects_;
+
+ js::MainThreadData<js::UniquePtr<js::RegExpZone>> regExps_;
+
+ // Bitmap of atoms marked by this zone.
+ js::ZoneOrGCTaskData<js::SparseBitmap> markedAtoms_;
+
+ // Set of atoms recently used by this Zone. Purged on GC.
+ js::ZoneOrGCTaskData<js::AtomSet> atomCache_;
+
+ // Cache storing allocated external strings. Purged on GC.
+ js::ZoneOrGCTaskData<js::ExternalStringCache> externalStringCache_;
+
+ // Cache for Function.prototype.toString. Purged on GC.
+ js::ZoneOrGCTaskData<js::FunctionToStringCache> functionToStringCache_;
+
+ // Shared Shape property tree.
+ js::ZoneData<js::PropertyTree> propertyTree_;
+
+ // Set of all unowned base shapes in the Zone.
+ js::ZoneData<js::BaseShapeSet> baseShapes_;
+
+ // Set of initial shapes in the Zone. For certain prototypes -- namely,
+ // those of various builtin classes -- there are two entries: one for a
+ // lookup via TaggedProto, and one for a lookup via JSProtoKey. See
+ // InitialShapeProto.
+ js::ZoneData<js::InitialShapeSet> initialShapes_;
+
+ // List of shapes that may contain nursery pointers.
+ using NurseryShapeVector =
+ js::Vector<js::AccessorShape*, 0, js::SystemAllocPolicy>;
+ js::ZoneData<NurseryShapeVector> nurseryShapes_;
+
+ // The set of all finalization registries in this zone.
+ using FinalizationRegistrySet =
+ GCHashSet<js::HeapPtrObject, js::MovableCellHasher<js::HeapPtrObject>,
+ js::ZoneAllocPolicy>;
+ js::ZoneOrGCTaskData<FinalizationRegistrySet> finalizationRegistries_;
+
+ // A map from finalization registry targets to a list of finalization records
+ // representing registries that the target is registered with and their
+ // associated held values.
+ using FinalizationRecordMap =
+ GCHashMap<js::HeapPtrObject, js::gc::FinalizationRecordVector,
+ js::MovableCellHasher<js::HeapPtrObject>, js::ZoneAllocPolicy>;
+ js::ZoneOrGCTaskData<FinalizationRecordMap> finalizationRecordMap_;
+
+ js::ZoneOrGCTaskData<js::jit::JitZone*> jitZone_;
+
+ js::MainThreadData<bool> gcScheduled_;
+ js::MainThreadData<bool> gcScheduledSaved_;
+ js::MainThreadData<bool> gcPreserveCode_;
+ js::ZoneData<bool> keepShapeCaches_;
+ js::MainThreadData<bool> wasCollected_;
+
+ // Allow zones to be linked into a list
+ js::MainThreadOrGCTaskData<Zone*> listNext_;
+ static Zone* const NotOnList;
+ friend class js::gc::ZoneList;
+
+ js::ZoneOrGCTaskData<js::WeakRefMap> weakRefMap_;
+
+ using KeptAliveSet =
+ JS::GCHashSet<js::HeapPtrObject, js::MovableCellHasher<js::HeapPtrObject>,
+ js::ZoneAllocPolicy>;
+ friend class js::WeakRefObject;
+ js::ZoneOrGCTaskData<KeptAliveSet> keptObjects;
+
+ public:
+ static JS::Zone* from(ZoneAllocator* zoneAlloc) {
+ return static_cast<Zone*>(zoneAlloc);
+ }
+
+ explicit Zone(JSRuntime* rt, Kind kind = NormalZone);
+ ~Zone();
+
+ MOZ_MUST_USE bool init();
+
+ void destroy(JSFreeOp* fop);
+
+ bool ownedByCurrentHelperThread();
+ void setHelperThreadOwnerContext(JSContext* cx);
+
+ // Whether this zone was created for use by a helper thread.
+ bool createdForHelperThread() const {
+ return helperThreadUse_ != HelperThreadUse::None;
+ }
+ // Whether this zone is currently in use by a helper thread.
+ bool usedByHelperThread() {
+ MOZ_ASSERT_IF(isAtomsZone(), helperThreadUse_ == HelperThreadUse::None);
+ return helperThreadUse_ == HelperThreadUse::Active;
+ }
+ void setCreatedForHelperThread() {
+ MOZ_ASSERT(helperThreadUse_ == HelperThreadUse::None);
+ helperThreadUse_ = HelperThreadUse::Pending;
+ }
+ void setUsedByHelperThread() {
+ MOZ_ASSERT(helperThreadUse_ == HelperThreadUse::Pending);
+ helperThreadUse_ = HelperThreadUse::Active;
+ }
+ void clearUsedByHelperThread() {
+ MOZ_ASSERT(helperThreadUse_ != HelperThreadUse::None);
+ helperThreadUse_ = HelperThreadUse::None;
+ }
+
+ MOZ_MUST_USE bool findSweepGroupEdges(Zone* atomsZone);
+
+ enum ShouldDiscardBaselineCode : bool {
+ KeepBaselineCode = false,
+ DiscardBaselineCode
+ };
+
+ enum ShouldDiscardJitScripts : bool {
+ KeepJitScripts = false,
+ DiscardJitScripts
+ };
+
+ void discardJitCode(
+ JSFreeOp* fop,
+ ShouldDiscardBaselineCode discardBaselineCode = DiscardBaselineCode,
+ ShouldDiscardJitScripts discardJitScripts = KeepJitScripts);
+
+ void addSizeOfIncludingThis(
+ mozilla::MallocSizeOf mallocSizeOf, JS::CodeSizes* code,
+ size_t* regexpZone, size_t* jitZone, size_t* baselineStubsOptimized,
+ size_t* uniqueIdMap, size_t* shapeCaches, size_t* atomsMarkBitmaps,
+ size_t* compartmentObjects, size_t* crossCompartmentWrappersTables,
+ size_t* compartmentsPrivateData, size_t* scriptCountsMapArg);
+
+ // Iterate over all cells in the zone. See the definition of ZoneCellIter
+ // in gc/GC-inl.h for the possible arguments and documentation.
+ template <typename T, typename... Args>
+ js::gc::ZoneCellIter<T> cellIter(Args&&... args) {
+ return js::gc::ZoneCellIter<T>(const_cast<Zone*>(this),
+ std::forward<Args>(args)...);
+ }
+
+ // As above, but can return about-to-be-finalised things.
+ template <typename T, typename... Args>
+ js::gc::ZoneAllCellIter<T> cellIterUnsafe(Args&&... args) {
+ return js::gc::ZoneAllCellIter<T>(const_cast<Zone*>(this),
+ std::forward<Args>(args)...);
+ }
+
+ bool hasMarkedRealms();
+
+ void scheduleGC() {
+ MOZ_ASSERT(!RuntimeHeapIsBusy());
+ gcScheduled_ = true;
+ }
+ void unscheduleGC() { gcScheduled_ = false; }
+ bool isGCScheduled() { return gcScheduled_; }
+
+ void setPreservingCode(bool preserving) { gcPreserveCode_ = preserving; }
+ bool isPreservingCode() const { return gcPreserveCode_; }
+
+ // Whether this zone can currently be collected.
+ bool canCollect();
+
+ void changeGCState(GCState prev, GCState next);
+
+ bool isCollecting() const {
+ MOZ_ASSERT(js::CurrentThreadCanAccessRuntime(runtimeFromMainThread()));
+ return isCollectingFromAnyThread();
+ }
+
+ bool isCollectingFromAnyThread() const {
+ if (RuntimeHeapIsCollecting()) {
+ return gcState_ != NoGC;
+ } else {
+ return needsIncrementalBarrier();
+ }
+ }
+
+ bool shouldMarkInZone() const {
+ // We only need to check needsIncrementalBarrier() for the pre-barrier
+ // verifier. During marking isGCMarking() will always be true.
+ return needsIncrementalBarrier() || isGCMarking();
+ }
+
+ // Was this zone collected in the last GC.
+ bool wasCollected() const { return wasCollected_; }
+ void setWasCollected(bool v) { wasCollected_ = v; }
+
+ // Get a number that is incremented whenever this zone is collected, and
+ // possibly at other times too.
+ uint64_t gcNumber();
+
+ void setNeedsIncrementalBarrier(bool needs);
+ const uint32_t* addressOfNeedsIncrementalBarrier() const {
+ return &needsIncrementalBarrier_;
+ }
+
+ static constexpr size_t offsetOfNeedsIncrementalBarrier() {
+ return offsetof(Zone, needsIncrementalBarrier_);
+ }
+
+ js::jit::JitZone* getJitZone(JSContext* cx) {
+ return jitZone_ ? jitZone_ : createJitZone(cx);
+ }
+ js::jit::JitZone* jitZone() { return jitZone_; }
+
+ void prepareForCompacting();
+
+ void sweepAfterMinorGC(JSTracer* trc);
+ void sweepUniqueIds();
+ void sweepWeakMaps();
+ void sweepCompartments(JSFreeOp* fop, bool keepAtleastOne, bool lastGC);
+
+ js::gc::UniqueIdMap& uniqueIds() { return uniqueIds_.ref(); }
+
+ void notifyObservingDebuggers();
+
+ void clearTables();
+
+ void addTenuredAllocsSinceMinorGC(uint32_t allocs) {
+ tenuredAllocsSinceMinorGC_ += allocs;
+ }
+
+ uint32_t getAndResetTenuredAllocsSinceMinorGC() {
+ return tenuredAllocsSinceMinorGC_.exchange(0);
+ }
+
+ mozilla::LinkedList<js::WeakMapBase>& gcWeakMapList() {
+ return gcWeakMapList_.ref();
+ }
+
+ CompartmentVector& compartments() { return compartments_.ref(); }
+
+ js::StringWrapperMap& crossZoneStringWrappers() {
+ return crossZoneStringWrappers_.ref();
+ }
+ const js::StringWrapperMap& crossZoneStringWrappers() const {
+ return crossZoneStringWrappers_.ref();
+ }
+
+ void dropStringWrappersOnGC();
+
+ void sweepAllCrossCompartmentWrappers();
+ static void fixupAllCrossCompartmentWrappersAfterMovingGC(JSTracer* trc);
+
+ GrayRootVector& gcGrayRoots() { return gcGrayRoots_.ref(); }
+
+ mozilla::LinkedList<detail::WeakCacheBase>& weakCaches() {
+ return weakCaches_.ref();
+ }
+ void registerWeakCache(detail::WeakCacheBase* cachep) {
+ weakCaches().insertBack(cachep);
+ }
+
+ void beforeClearDelegate(JSObject* wrapper, JSObject* delegate) {
+ if (needsIncrementalBarrier()) {
+ beforeClearDelegateInternal(wrapper, delegate);
+ }
+ }
+
+ void afterAddDelegate(JSObject* wrapper) {
+ if (needsIncrementalBarrier()) {
+ afterAddDelegateInternal(wrapper);
+ }
+ }
+
+ void beforeClearDelegateInternal(JSObject* wrapper, JSObject* delegate);
+ void afterAddDelegateInternal(JSObject* wrapper);
+ js::gc::WeakKeyTable& gcWeakKeys() { return gcWeakKeys_.ref(); }
+ js::gc::WeakKeyTable& gcNurseryWeakKeys() { return gcNurseryWeakKeys_.ref(); }
+
+ js::gc::WeakKeyTable& gcWeakKeys(const js::gc::Cell* cell) {
+ return cell->isTenured() ? gcWeakKeys() : gcNurseryWeakKeys();
+ }
+
+ // Perform all pending weakmap entry marking for this zone after
+ // transitioning to weak marking mode.
+ js::gc::IncrementalProgress enterWeakMarkingMode(js::GCMarker* marker,
+ js::SliceBudget& budget);
+ void checkWeakMarkingMode();
+
+ // A set of edges from this zone to other zones used during GC to calculate
+ // sweep groups.
+ NodeSet& gcSweepGroupEdges() {
+ return gcGraphEdges; // Defined in GraphNodeBase base class.
+ }
+ bool hasSweepGroupEdgeTo(Zone* otherZone) const {
+ return gcGraphEdges.has(otherZone);
+ }
+ MOZ_MUST_USE bool addSweepGroupEdgeTo(Zone* otherZone) {
+ MOZ_ASSERT(otherZone->isGCMarking());
+ return gcSweepGroupEdges().put(otherZone);
+ }
+ void clearSweepGroupEdges() { gcSweepGroupEdges().clear(); }
+
+ js::RegExpZone& regExps() { return *regExps_.ref(); }
+
+ JS::WeakCache<TypeDescrObjectSet>& typeDescrObjects() {
+ return typeDescrObjects_.ref();
+ }
+
+ bool addTypeDescrObject(JSContext* cx, HandleObject obj);
+
+ js::SparseBitmap& markedAtoms() { return markedAtoms_.ref(); }
+
+ js::AtomSet& atomCache() { return atomCache_.ref(); }
+
+ void purgeAtomCache();
+
+ js::ExternalStringCache& externalStringCache() {
+ return externalStringCache_.ref();
+ };
+
+ js::FunctionToStringCache& functionToStringCache() {
+ return functionToStringCache_.ref();
+ }
+
+ js::PropertyTree& propertyTree() { return propertyTree_.ref(); }
+
+ js::BaseShapeSet& baseShapes() { return baseShapes_.ref(); }
+
+ js::InitialShapeSet& initialShapes() { return initialShapes_.ref(); }
+
+ NurseryShapeVector& nurseryShapes() { return nurseryShapes_.ref(); }
+
+ void fixupInitialShapeTable();
+ void fixupAfterMovingGC();
+ void fixupScriptMapsAfterMovingGC(JSTracer* trc);
+
+ static js::HashNumber UniqueIdToHash(uint64_t uid);
+
+ // Creates a HashNumber based on getUniqueId. Returns false on OOM.
+ MOZ_MUST_USE bool getHashCode(js::gc::Cell* cell, js::HashNumber* hashp);
+
+ // Gets an existing UID in |uidp| if one exists.
+ MOZ_MUST_USE bool maybeGetUniqueId(js::gc::Cell* cell, uint64_t* uidp);
+
+ // Puts an existing UID in |uidp|, or creates a new UID for this Cell and
+ // puts that into |uidp|. Returns false on OOM.
+ MOZ_MUST_USE bool getOrCreateUniqueId(js::gc::Cell* cell, uint64_t* uidp);
+
+ js::HashNumber getHashCodeInfallible(js::gc::Cell* cell);
+ uint64_t getUniqueIdInfallible(js::gc::Cell* cell);
+
+ // Return true if this cell has a UID associated with it.
+ MOZ_MUST_USE bool hasUniqueId(js::gc::Cell* cell);
+
+ // Transfer an id from another cell. This must only be called on behalf of a
+ // moving GC. This method is infallible.
+ void transferUniqueId(js::gc::Cell* tgt, js::gc::Cell* src);
+
+ // Remove any unique id associated with this Cell.
+ void removeUniqueId(js::gc::Cell* cell);
+
+ // When finished parsing off-thread, transfer any UIDs we created in the
+ // off-thread zone into the target zone.
+ void adoptUniqueIds(JS::Zone* source);
+
+ bool keepShapeCaches() const { return keepShapeCaches_; }
+ void setKeepShapeCaches(bool b) { keepShapeCaches_ = b; }
+
+ // Delete an empty compartment after its contents have been merged.
+ void deleteEmptyCompartment(JS::Compartment* comp);
+
+ void clearRootsForShutdownGC();
+ void finishRoots();
+
+ void traceScriptTableRoots(JSTracer* trc);
+
+ void clearScriptCounts(Realm* realm);
+ void clearScriptLCov(Realm* realm);
+
+ // Add the target of JS WeakRef to a kept-alive set maintained by GC.
+ // See: https://tc39.es/proposal-weakrefs/#sec-keepduringjob
+ bool keepDuringJob(HandleObject target);
+
+ void traceKeptObjects(JSTracer* trc);
+
+ // Clear the kept-alive set.
+ // See: https://tc39.es/proposal-weakrefs/#sec-clear-kept-objects
+ void clearKeptObjects();
+
+#ifdef JSGC_HASH_TABLE_CHECKS
+ void checkAllCrossCompartmentWrappersAfterMovingGC();
+ void checkStringWrappersAfterMovingGC();
+
+ void checkInitialShapesTableAfterMovingGC();
+ void checkBaseShapeTableAfterMovingGC();
+
+ // Assert that the UniqueId table has been redirected successfully.
+ void checkUniqueIdTableAfterMovingGC();
+
+ void checkScriptMapsAfterMovingGC();
+#endif
+
+#ifdef DEBUG
+ // For testing purposes, return the index of the sweep group which this zone
+ // was swept in in the last GC.
+ unsigned lastSweepGroupIndex() { return gcSweepGroupIndex; }
+#endif
+
+ private:
+ js::jit::JitZone* createJitZone(JSContext* cx);
+
+ bool isQueuedForBackgroundSweep() { return isOnList(); }
+
+ void sweepWeakKeysAfterMinorGC();
+
+ FinalizationRegistrySet& finalizationRegistries() {
+ return finalizationRegistries_.ref();
+ }
+
+ FinalizationRecordMap& finalizationRecordMap() {
+ return finalizationRecordMap_.ref();
+ }
+
+ bool isOnList() const;
+ Zone* nextZone() const;
+
+ js::WeakRefMap& weakRefMap() { return weakRefMap_.ref(); }
+
+ friend bool js::CurrentThreadCanAccessZone(Zone* zone);
+ friend class js::gc::GCRuntime;
+};
+
+} // namespace JS
+
+namespace js {
+namespace gc {
+const char* StateName(JS::Zone::GCState state);
+} // namespace gc
+} // namespace js
+
+#endif // gc_Zone_h
diff --git a/js/src/gc/ZoneAllocator.h b/js/src/gc/ZoneAllocator.h
new file mode 100644
index 0000000000..45ee2ff98e
--- /dev/null
+++ b/js/src/gc/ZoneAllocator.h
@@ -0,0 +1,334 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * Public header for allocating memory associated with GC things.
+ */
+
+#ifndef gc_ZoneAllocator_h
+#define gc_ZoneAllocator_h
+
+#include "jstypes.h"
+#include "gc/Cell.h"
+#include "gc/Scheduling.h"
+#include "js/GCAPI.h"
+#include "js/HeapAPI.h"
+#include "js/shadow/Zone.h" // JS::shadow::Zone
+#include "vm/MallocProvider.h"
+
+namespace JS {
+class JS_PUBLIC_API Zone;
+} // namespace JS
+
+namespace js {
+
+class ZoneAllocator;
+
+#ifdef DEBUG
+bool CurrentThreadIsGCFinalizing();
+#endif
+
+namespace gc {
+void MaybeMallocTriggerZoneGC(JSRuntime* rt, ZoneAllocator* zoneAlloc,
+ const HeapSize& heap,
+ const HeapThreshold& threshold,
+ JS::GCReason reason);
+}
+
+// Base class of JS::Zone that provides malloc memory allocation and accounting.
+class ZoneAllocator : public JS::shadow::Zone,
+ public js::MallocProvider<JS::Zone> {
+ protected:
+ explicit ZoneAllocator(JSRuntime* rt, Kind kind);
+ ~ZoneAllocator();
+ void fixupAfterMovingGC();
+
+ public:
+ static ZoneAllocator* from(JS::Zone* zone) {
+ // This is a safe upcast, but the compiler hasn't seen the definition yet.
+ return reinterpret_cast<ZoneAllocator*>(zone);
+ }
+
+ MOZ_MUST_USE void* onOutOfMemory(js::AllocFunction allocFunc,
+ arena_id_t arena, size_t nbytes,
+ void* reallocPtr = nullptr);
+ void reportAllocationOverflow() const;
+
+ void adoptMallocBytes(ZoneAllocator* other) {
+ mallocHeapSize.adopt(other->mallocHeapSize);
+ jitHeapSize.adopt(other->jitHeapSize);
+#ifdef DEBUG
+ mallocTracker.adopt(other->mallocTracker);
+#endif
+ }
+
+ void updateMemoryCountersOnGCStart();
+ void updateGCStartThresholds(gc::GCRuntime& gc,
+ JSGCInvocationKind invocationKind,
+ const js::AutoLockGC& lock);
+ void setGCSliceThresholds(gc::GCRuntime& gc);
+ void clearGCSliceThresholds();
+
+ // Memory accounting APIs for malloc memory owned by GC cells.
+
+ void addCellMemory(js::gc::Cell* cell, size_t nbytes, js::MemoryUse use) {
+ MOZ_ASSERT(cell);
+ MOZ_ASSERT(nbytes);
+
+ mallocHeapSize.addBytes(nbytes);
+
+#ifdef DEBUG
+ mallocTracker.trackGCMemory(cell, nbytes, use);
+#endif
+
+ maybeTriggerGCOnMalloc();
+ }
+
+ void removeCellMemory(js::gc::Cell* cell, size_t nbytes, js::MemoryUse use,
+ bool wasSwept = false) {
+ MOZ_ASSERT(cell);
+ MOZ_ASSERT(nbytes);
+ MOZ_ASSERT_IF(CurrentThreadIsGCFinalizing(), wasSwept);
+
+ mallocHeapSize.removeBytes(nbytes, wasSwept);
+
+#ifdef DEBUG
+ mallocTracker.untrackGCMemory(cell, nbytes, use);
+#endif
+ }
+
+ void swapCellMemory(js::gc::Cell* a, js::gc::Cell* b, js::MemoryUse use) {
+#ifdef DEBUG
+ mallocTracker.swapGCMemory(a, b, use);
+#endif
+ }
+
+ void registerNonGCMemory(void* mem, MemoryUse use) {
+#ifdef DEBUG
+ return mallocTracker.registerNonGCMemory(mem, use);
+#endif
+ }
+ void unregisterNonGCMemory(void* mem, MemoryUse use) {
+#ifdef DEBUG
+ return mallocTracker.unregisterNonGCMemory(mem, use);
+#endif
+ }
+ void moveOtherMemory(void* dst, void* src, MemoryUse use) {
+#ifdef DEBUG
+ return mallocTracker.moveNonGCMemory(dst, src, use);
+#endif
+ }
+
+ void incNonGCMemory(void* mem, size_t nbytes, MemoryUse use) {
+ MOZ_ASSERT(nbytes);
+ mallocHeapSize.addBytes(nbytes);
+
+#ifdef DEBUG
+ mallocTracker.incNonGCMemory(mem, nbytes, use);
+#endif
+
+ maybeTriggerGCOnMalloc();
+ }
+ void decNonGCMemory(void* mem, size_t nbytes, MemoryUse use, bool wasSwept) {
+ MOZ_ASSERT(nbytes);
+ MOZ_ASSERT_IF(CurrentThreadIsGCFinalizing(), wasSwept);
+
+ mallocHeapSize.removeBytes(nbytes, wasSwept);
+
+#ifdef DEBUG
+ mallocTracker.decNonGCMemory(mem, nbytes, use);
+#endif
+ }
+
+ // Account for allocations that may be referenced by more than one GC thing.
+ bool addSharedMemory(void* mem, size_t nbytes, MemoryUse use);
+ void removeSharedMemory(void* mem, size_t nbytes, MemoryUse use);
+
+ void incJitMemory(size_t nbytes) {
+ MOZ_ASSERT(nbytes);
+ jitHeapSize.addBytes(nbytes);
+ maybeTriggerZoneGC(jitHeapSize, jitHeapThreshold,
+ JS::GCReason::TOO_MUCH_JIT_CODE);
+ }
+ void decJitMemory(size_t nbytes) {
+ MOZ_ASSERT(nbytes);
+ jitHeapSize.removeBytes(nbytes, true);
+ }
+
+ // Check malloc allocation threshold and trigger a zone GC if necessary.
+ void maybeTriggerGCOnMalloc() {
+ maybeTriggerZoneGC(mallocHeapSize, mallocHeapThreshold,
+ JS::GCReason::TOO_MUCH_MALLOC);
+ }
+
+ private:
+ void maybeTriggerZoneGC(const js::gc::HeapSize& heap,
+ const js::gc::HeapThreshold& threshold,
+ JS::GCReason reason) {
+ if (heap.bytes() >= threshold.startBytes()) {
+ gc::MaybeMallocTriggerZoneGC(runtimeFromAnyThread(), this, heap,
+ threshold, reason);
+ }
+ }
+
+ public:
+ // The size of allocated GC arenas in this zone.
+ gc::HeapSize gcHeapSize;
+
+ // Threshold used to trigger GC based on GC heap size.
+ gc::GCHeapThreshold gcHeapThreshold;
+
+ // Amount of malloc data owned by tenured GC things in this zone, including
+ // external allocations supplied by JS::AddAssociatedMemory.
+ gc::HeapSize mallocHeapSize;
+
+ // Threshold used to trigger GC based on malloc allocations.
+ gc::MallocHeapThreshold mallocHeapThreshold;
+
+ // Amount of exectuable JIT code owned by GC things in this zone.
+ gc::HeapSize jitHeapSize;
+
+ // Threshold used to trigger GC based on JIT allocations.
+ gc::JitHeapThreshold jitHeapThreshold;
+
+ // Use counts for memory that can be referenced by more than one GC thing.
+ gc::SharedMemoryMap sharedMemoryUseCounts;
+
+ private:
+#ifdef DEBUG
+ // In debug builds, malloc allocations can be tracked to make debugging easier
+ // (possible?) if allocation and free sizes don't balance.
+ gc::MemoryTracker mallocTracker;
+#endif
+
+ friend class gc::GCRuntime;
+};
+
+/*
+ * Allocation policy that performs precise memory tracking on the zone. This
+ * should be used for all containers associated with a GC thing or a zone.
+ *
+ * Since it doesn't hold a JSContext (those may not live long enough), it can't
+ * report out-of-memory conditions itself; the caller must check for OOM and
+ * take the appropriate action.
+ *
+ * FIXME bug 647103 - replace these *AllocPolicy names.
+ */
+class ZoneAllocPolicy : public MallocProvider<ZoneAllocPolicy> {
+ ZoneAllocator* zone_;
+
+#ifdef DEBUG
+ friend class js::gc::MemoryTracker; // Can clear |zone_| on merge.
+#endif
+
+ public:
+ MOZ_IMPLICIT ZoneAllocPolicy(ZoneAllocator* z) : zone_(z) {
+ zone()->registerNonGCMemory(this, MemoryUse::ZoneAllocPolicy);
+ }
+ MOZ_IMPLICIT ZoneAllocPolicy(JS::Zone* z)
+ : ZoneAllocPolicy(ZoneAllocator::from(z)) {}
+ ZoneAllocPolicy(ZoneAllocPolicy& other) : ZoneAllocPolicy(other.zone_) {}
+ ZoneAllocPolicy(ZoneAllocPolicy&& other) : zone_(other.zone_) {
+ zone()->moveOtherMemory(this, &other, MemoryUse::ZoneAllocPolicy);
+ other.zone_ = nullptr;
+ }
+ ~ZoneAllocPolicy() {
+ if (zone_) {
+ zone_->unregisterNonGCMemory(this, MemoryUse::ZoneAllocPolicy);
+ }
+ }
+
+ ZoneAllocPolicy& operator=(const ZoneAllocPolicy& other) {
+ zone()->unregisterNonGCMemory(this, MemoryUse::ZoneAllocPolicy);
+ zone_ = other.zone();
+ zone()->registerNonGCMemory(this, MemoryUse::ZoneAllocPolicy);
+ return *this;
+ }
+ ZoneAllocPolicy& operator=(ZoneAllocPolicy&& other) {
+ MOZ_ASSERT(this != &other);
+ zone()->unregisterNonGCMemory(this, MemoryUse::ZoneAllocPolicy);
+ zone_ = other.zone();
+ zone()->moveOtherMemory(this, &other, MemoryUse::ZoneAllocPolicy);
+ other.zone_ = nullptr;
+ return *this;
+ }
+
+ // Public methods required to fulfill the AllocPolicy interface.
+
+ template <typename T>
+ void free_(T* p, size_t numElems) {
+ if (p) {
+ decMemory(numElems * sizeof(T));
+ js_free(p);
+ }
+ }
+
+ MOZ_MUST_USE bool checkSimulatedOOM() const {
+ return !js::oom::ShouldFailWithOOM();
+ }
+
+ void reportAllocOverflow() const { reportAllocationOverflow(); }
+
+ // Internal methods called by the MallocProvider implementation.
+
+ MOZ_MUST_USE void* onOutOfMemory(js::AllocFunction allocFunc,
+ arena_id_t arena, size_t nbytes,
+ void* reallocPtr = nullptr) {
+ return zone()->onOutOfMemory(allocFunc, arena, nbytes, reallocPtr);
+ }
+ void reportAllocationOverflow() const { zone()->reportAllocationOverflow(); }
+ void updateMallocCounter(size_t nbytes) {
+ zone()->incNonGCMemory(this, nbytes, MemoryUse::ZoneAllocPolicy);
+ }
+
+ private:
+ ZoneAllocator* zone() const {
+ MOZ_ASSERT(zone_);
+ return zone_;
+ }
+ void decMemory(size_t nbytes);
+};
+
+// Functions for memory accounting on the zone.
+
+// Associate malloc memory with a GC thing. This call should be matched by a
+// following call to RemoveCellMemory with the same size and use. The total
+// amount of malloc memory associated with a zone is used to trigger GC.
+//
+// You should use InitReservedSlot / InitObjectPrivate in preference to this
+// where possible.
+
+inline void AddCellMemory(gc::TenuredCell* cell, size_t nbytes, MemoryUse use) {
+ if (nbytes) {
+ ZoneAllocator::from(cell->zone())->addCellMemory(cell, nbytes, use);
+ }
+}
+inline void AddCellMemory(gc::Cell* cell, size_t nbytes, MemoryUse use) {
+ if (cell->isTenured()) {
+ AddCellMemory(&cell->asTenured(), nbytes, use);
+ }
+}
+
+// Remove association between malloc memory and a GC thing. This call should
+// follow a call to AddCellMemory with the same size and use.
+
+inline void RemoveCellMemory(gc::TenuredCell* cell, size_t nbytes,
+ MemoryUse use, bool wasSwept = false) {
+ if (nbytes) {
+ auto zoneBase = ZoneAllocator::from(cell->zoneFromAnyThread());
+ zoneBase->removeCellMemory(cell, nbytes, use, wasSwept);
+ }
+}
+inline void RemoveCellMemory(gc::Cell* cell, size_t nbytes, MemoryUse use,
+ bool wasSwept = false) {
+ if (cell->isTenured()) {
+ RemoveCellMemory(&cell->asTenured(), nbytes, use, wasSwept);
+ }
+}
+
+} // namespace js
+
+#endif // gc_ZoneAllocator_h
diff --git a/js/src/gc/moz.build b/js/src/gc/moz.build
new file mode 100644
index 0000000000..88f5da0339
--- /dev/null
+++ b/js/src/gc/moz.build
@@ -0,0 +1,54 @@
+# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+FINAL_LIBRARY = "js"
+
+# Includes should be relative to parent path
+LOCAL_INCLUDES += ["!..", ".."]
+
+include("../js-config.mozbuild")
+include("../js-cxxflags.mozbuild")
+
+
+# Generate GC statistics phase data.
+GeneratedFile(
+ "StatsPhasesGenerated.h",
+ script="GenerateStatsPhases.py",
+ entry_point="generateHeader",
+)
+GeneratedFile(
+ "StatsPhasesGenerated.inc",
+ script="GenerateStatsPhases.py",
+ entry_point="generateCpp",
+)
+
+UNIFIED_SOURCES += [
+ "Allocator.cpp",
+ "AtomMarking.cpp",
+ "Barrier.cpp",
+ "FinalizationRegistry.cpp",
+ "GC.cpp",
+ "GCParallelTask.cpp",
+ "Marking.cpp",
+ "Memory.cpp",
+ "Nursery.cpp",
+ "PublicIterators.cpp",
+ "RootMarking.cpp",
+ "Scheduling.cpp",
+ "Statistics.cpp",
+ "Tracer.cpp",
+ "Verifier.cpp",
+ "WeakMap.cpp",
+ "WeakMapPtr.cpp",
+ "Zone.cpp",
+]
+
+# StoreBuffer.cpp cannot be built in unified mode because its template
+# instantiations may or may not be needed depending on what it gets bundled
+# with.
+SOURCES += [
+ "StoreBuffer.cpp",
+]