summaryrefslogtreecommitdiffstats
path: root/js/src/gc
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 19:33:14 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 19:33:14 +0000
commit36d22d82aa202bb199967e9512281e9a53db42c9 (patch)
tree105e8c98ddea1c1e4784a60a5a6410fa416be2de /js/src/gc
parentInitial commit. (diff)
downloadfirefox-esr-36d22d82aa202bb199967e9512281e9a53db42c9.tar.xz
firefox-esr-36d22d82aa202bb199967e9512281e9a53db42c9.zip
Adding upstream version 115.7.0esr.upstream/115.7.0esrupstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'js/src/gc')
-rw-r--r--js/src/gc/AllocKind.h326
-rw-r--r--js/src/gc/Allocator.cpp672
-rw-r--r--js/src/gc/Allocator.h173
-rw-r--r--js/src/gc/ArenaList-inl.h327
-rw-r--r--js/src/gc/ArenaList.h386
-rw-r--r--js/src/gc/AtomMarking-inl.h96
-rw-r--r--js/src/gc/AtomMarking.cpp294
-rw-r--r--js/src/gc/AtomMarking.h86
-rw-r--r--js/src/gc/Barrier.cpp144
-rw-r--r--js/src/gc/Barrier.h1257
-rw-r--r--js/src/gc/Cell.h930
-rw-r--r--js/src/gc/ClearEdgesTracer.h27
-rw-r--r--js/src/gc/Compacting.cpp967
-rw-r--r--js/src/gc/FinalizationObservers.cpp509
-rw-r--r--js/src/gc/FinalizationObservers.h129
-rw-r--r--js/src/gc/FindSCCs.h207
-rw-r--r--js/src/gc/GC-inl.h344
-rw-r--r--js/src/gc/GC.cpp5101
-rw-r--r--js/src/gc/GC.h243
-rw-r--r--js/src/gc/GCAPI.cpp798
-rw-r--r--js/src/gc/GCContext-inl.h40
-rw-r--r--js/src/gc/GCContext.h257
-rw-r--r--js/src/gc/GCEnum.h160
-rw-r--r--js/src/gc/GCInternals.h344
-rw-r--r--js/src/gc/GCLock.h110
-rw-r--r--js/src/gc/GCMarker.h598
-rw-r--r--js/src/gc/GCParallelTask.cpp231
-rw-r--r--js/src/gc/GCParallelTask.h246
-rw-r--r--js/src/gc/GCProbes.h49
-rw-r--r--js/src/gc/GCRuntime.h1444
-rw-r--r--js/src/gc/GenerateStatsPhases.py404
-rw-r--r--js/src/gc/HashUtil.h84
-rw-r--r--js/src/gc/Heap-inl.h70
-rw-r--r--js/src/gc/Heap.cpp635
-rw-r--r--js/src/gc/Heap.h846
-rw-r--r--js/src/gc/IteratorUtils.h121
-rw-r--r--js/src/gc/MallocedBlockCache.cpp144
-rw-r--r--js/src/gc/MallocedBlockCache.h91
-rw-r--r--js/src/gc/Marking-inl.h196
-rw-r--r--js/src/gc/Marking.cpp2774
-rw-r--r--js/src/gc/Marking.h150
-rw-r--r--js/src/gc/MaybeRooted.h141
-rw-r--r--js/src/gc/Memory.cpp1050
-rw-r--r--js/src/gc/Memory.h84
-rw-r--r--js/src/gc/Nursery-inl.h172
-rw-r--r--js/src/gc/Nursery.cpp2083
-rw-r--r--js/src/gc/Nursery.h645
-rw-r--r--js/src/gc/NurseryAwareHashMap.h209
-rw-r--r--js/src/gc/ObjectKind-inl.h162
-rw-r--r--js/src/gc/ParallelMarking.cpp359
-rw-r--r--js/src/gc/ParallelMarking.h123
-rw-r--r--js/src/gc/ParallelWork.h136
-rw-r--r--js/src/gc/Policy.h101
-rw-r--r--js/src/gc/Pretenuring.cpp459
-rw-r--r--js/src/gc/Pretenuring.h348
-rw-r--r--js/src/gc/PrivateIterators-inl.h164
-rw-r--r--js/src/gc/PublicIterators.cpp272
-rw-r--r--js/src/gc/PublicIterators.h158
-rw-r--r--js/src/gc/RelocationOverlay.h66
-rw-r--r--js/src/gc/RootMarking.cpp467
-rw-r--r--js/src/gc/Scheduling.cpp873
-rw-r--r--js/src/gc/Scheduling.h917
-rw-r--r--js/src/gc/StableCellHasher-inl.h245
-rw-r--r--js/src/gc/StableCellHasher.h46
-rw-r--r--js/src/gc/Statistics.cpp1811
-rw-r--r--js/src/gc/Statistics.h606
-rw-r--r--js/src/gc/StoreBuffer-inl.h97
-rw-r--r--js/src/gc/StoreBuffer.cpp255
-rw-r--r--js/src/gc/StoreBuffer.h662
-rw-r--r--js/src/gc/Sweeping.cpp2383
-rw-r--r--js/src/gc/Tenuring.cpp1016
-rw-r--r--js/src/gc/Tenuring.h169
-rw-r--r--js/src/gc/TraceKind.h59
-rw-r--r--js/src/gc/TraceMethods-inl.h384
-rw-r--r--js/src/gc/Tracer.cpp297
-rw-r--r--js/src/gc/Tracer.h401
-rw-r--r--js/src/gc/Verifier.cpp1135
-rw-r--r--js/src/gc/WeakMap-inl.h413
-rw-r--r--js/src/gc/WeakMap.cpp175
-rw-r--r--js/src/gc/WeakMap.h354
-rw-r--r--js/src/gc/WeakMapPtr.cpp114
-rw-r--r--js/src/gc/Zone.cpp979
-rw-r--r--js/src/gc/Zone.h653
-rw-r--r--js/src/gc/ZoneAllocator.h354
-rw-r--r--js/src/gc/moz.build61
85 files changed, 43668 insertions, 0 deletions
diff --git a/js/src/gc/AllocKind.h b/js/src/gc/AllocKind.h
new file mode 100644
index 0000000000..3427d0aba6
--- /dev/null
+++ b/js/src/gc/AllocKind.h
@@ -0,0 +1,326 @@
+
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * GC-internal definition of GC cell kinds.
+ */
+
+#ifndef gc_AllocKind_h
+#define gc_AllocKind_h
+
+#include "mozilla/EnumeratedArray.h"
+#include "mozilla/EnumeratedRange.h"
+
+#include <iterator>
+#include <stdint.h>
+
+#include "js/TraceKind.h"
+
+class JSDependentString;
+class JSExternalString;
+class JSFatInlineString;
+class JSLinearString;
+class JSRope;
+class JSThinInlineString;
+
+namespace js {
+
+class CompactPropMap;
+class FatInlineAtom;
+class NormalAtom;
+class NormalPropMap;
+class DictionaryPropMap;
+class DictionaryShape;
+class SharedShape;
+class ProxyShape;
+class WasmGCShape;
+
+namespace gc {
+
+// The GC allocation kinds.
+//
+// These are defined by macros which enumerate the different allocation kinds
+// and supply the following information:
+//
+// - the corresponding AllocKind
+// - their JS::TraceKind
+// - their C++ base type
+// - a C++ type of the correct size
+// - whether they can be finalized on the background thread
+// - whether they can be allocated in the nursery (this is true for foreground
+// finalized objects but these will can only actually be allocated in the
+// nursery if JSCLASS_SKIP_NURSERY_FINALIZE is set)
+// - whether they can be compacted
+
+// clang-format off
+#define FOR_EACH_OBJECT_ALLOCKIND(D) \
+ /* AllocKind TraceKind TypeName SizedType BGFinal Nursery Compact */ \
+ D(FUNCTION, Object, JSObject, JSObject_Slots4, true, true, true) \
+ D(FUNCTION_EXTENDED, Object, JSObject, JSObject_Slots6, true, true, true) \
+ D(OBJECT0, Object, JSObject, JSObject_Slots0, false, true, true) \
+ D(OBJECT0_BACKGROUND, Object, JSObject, JSObject_Slots0, true, true, true) \
+ D(OBJECT2, Object, JSObject, JSObject_Slots2, false, true, true) \
+ D(OBJECT2_BACKGROUND, Object, JSObject, JSObject_Slots2, true, true, true) \
+ D(ARRAYBUFFER4, Object, JSObject, JSObject_Slots4, true, true, true) \
+ D(OBJECT4, Object, JSObject, JSObject_Slots4, false, true, true) \
+ D(OBJECT4_BACKGROUND, Object, JSObject, JSObject_Slots4, true, true, true) \
+ D(ARRAYBUFFER8, Object, JSObject, JSObject_Slots8, true, true, true) \
+ D(OBJECT8, Object, JSObject, JSObject_Slots8, false, true, true) \
+ D(OBJECT8_BACKGROUND, Object, JSObject, JSObject_Slots8, true, true, true) \
+ D(ARRAYBUFFER12, Object, JSObject, JSObject_Slots12, true, true, true) \
+ D(OBJECT12, Object, JSObject, JSObject_Slots12, false, true, true) \
+ D(OBJECT12_BACKGROUND, Object, JSObject, JSObject_Slots12, true, true, true) \
+ D(ARRAYBUFFER16, Object, JSObject, JSObject_Slots16, true, true, true) \
+ D(OBJECT16, Object, JSObject, JSObject_Slots16, false, true, true) \
+ D(OBJECT16_BACKGROUND, Object, JSObject, JSObject_Slots16, true, true, true)
+
+#define FOR_EACH_NONOBJECT_NONNURSERY_ALLOCKIND(D) \
+ /* AllocKind TraceKind TypeName SizedType BGFinal Nursery Compact */ \
+ D(SCRIPT, Script, js::BaseScript, js::BaseScript, false, false, true) \
+ D(SHAPE, Shape, js::Shape, js::SizedShape, true, false, true) \
+ D(BASE_SHAPE, BaseShape, js::BaseShape, js::BaseShape, true, false, true) \
+ D(GETTER_SETTER, GetterSetter, js::GetterSetter, js::GetterSetter, true, false, true) \
+ D(COMPACT_PROP_MAP, PropMap, js::CompactPropMap, js::CompactPropMap, true, false, true) \
+ D(NORMAL_PROP_MAP, PropMap, js::NormalPropMap, js::NormalPropMap, true, false, true) \
+ D(DICT_PROP_MAP, PropMap, js::DictionaryPropMap, js::DictionaryPropMap, true, false, true) \
+ D(EXTERNAL_STRING, String, JSExternalString, JSExternalString, true, false, true) \
+ D(FAT_INLINE_ATOM, String, js::FatInlineAtom, js::FatInlineAtom, true, false, false) \
+ D(ATOM, String, js::NormalAtom, js::NormalAtom, true, false, false) \
+ D(SYMBOL, Symbol, JS::Symbol, JS::Symbol, true, false, false) \
+ D(JITCODE, JitCode, js::jit::JitCode, js::jit::JitCode, false, false, false) \
+ D(SCOPE, Scope, js::Scope, js::Scope, true, false, true) \
+ D(REGEXP_SHARED, RegExpShared, js::RegExpShared, js::RegExpShared, true, false, true)
+
+#define FOR_EACH_NONOBJECT_NURSERY_ALLOCKIND(D) \
+ /* AllocKind TraceKind TypeName SizedType BGFinal Nursery Compact */ \
+ D(BIGINT, BigInt, JS::BigInt, JS::BigInt, true, true, true)
+
+#define FOR_EACH_NURSERY_STRING_ALLOCKIND(D) \
+ D(FAT_INLINE_STRING, String, JSFatInlineString, JSFatInlineString, true, true, true) \
+ D(STRING, String, JSString, JSString, true, true, true)
+// clang-format on
+
+#define FOR_EACH_NONOBJECT_ALLOCKIND(D) \
+ FOR_EACH_NONOBJECT_NONNURSERY_ALLOCKIND(D) \
+ FOR_EACH_NONOBJECT_NURSERY_ALLOCKIND(D) \
+ FOR_EACH_NURSERY_STRING_ALLOCKIND(D)
+
+#define FOR_EACH_ALLOCKIND(D) \
+ FOR_EACH_OBJECT_ALLOCKIND(D) \
+ FOR_EACH_NONOBJECT_ALLOCKIND(D)
+
+#define DEFINE_ALLOC_KIND(allocKind, _1, _2, _3, _4, _5, _6) allocKind,
+enum class AllocKind : uint8_t {
+ // clang-format off
+ FOR_EACH_OBJECT_ALLOCKIND(DEFINE_ALLOC_KIND)
+
+ OBJECT_LIMIT,
+ OBJECT_LAST = OBJECT_LIMIT - 1,
+
+ FOR_EACH_NONOBJECT_ALLOCKIND(DEFINE_ALLOC_KIND)
+
+ LIMIT,
+ LAST = LIMIT - 1,
+
+ FIRST = 0,
+ OBJECT_FIRST = FUNCTION // Hardcoded to first object kind.
+ // clang-format on
+};
+#undef DEFINE_ALLOC_KIND
+
+static_assert(int(AllocKind::FIRST) == 0,
+ "Various places depend on AllocKind starting at 0");
+static_assert(int(AllocKind::OBJECT_FIRST) == 0,
+ "OBJECT_FIRST must be defined as the first object kind");
+
+constexpr size_t AllocKindCount = size_t(AllocKind::LIMIT);
+
+/*
+ * A flag specifying either the tenured heap or a default heap (which may be
+ * either the nursery or the tenured heap).
+ *
+ * This allows an allocation site to request a heap based upon the estimated
+ * lifetime or lifetime requirements of objects allocated from that site.
+ *
+ * Order is important as these are numerically compared.
+ */
+enum class Heap : uint8_t { Default = 0, Tenured = 1 };
+
+inline bool IsAllocKind(AllocKind kind) {
+ return kind >= AllocKind::FIRST && kind <= AllocKind::LIMIT;
+}
+
+inline bool IsValidAllocKind(AllocKind kind) {
+ return kind >= AllocKind::FIRST && kind <= AllocKind::LAST;
+}
+
+const char* AllocKindName(AllocKind kind);
+
+inline bool IsObjectAllocKind(AllocKind kind) {
+ return kind >= AllocKind::OBJECT_FIRST && kind <= AllocKind::OBJECT_LAST;
+}
+
+inline bool IsShapeAllocKind(AllocKind kind) {
+ return kind == AllocKind::SHAPE;
+}
+
+// Returns a sequence for use in a range-based for loop,
+// to iterate over all alloc kinds.
+inline auto AllAllocKinds() {
+ return mozilla::MakeEnumeratedRange(AllocKind::FIRST, AllocKind::LIMIT);
+}
+
+// Returns a sequence for use in a range-based for loop,
+// to iterate over all object alloc kinds.
+inline auto ObjectAllocKinds() {
+ return mozilla::MakeEnumeratedRange(AllocKind::OBJECT_FIRST,
+ AllocKind::OBJECT_LIMIT);
+}
+
+// Returns a sequence for use in a range-based for loop,
+// to iterate over alloc kinds from |first| to |limit|, exclusive.
+inline auto SomeAllocKinds(AllocKind first = AllocKind::FIRST,
+ AllocKind limit = AllocKind::LIMIT) {
+ MOZ_ASSERT(IsAllocKind(first), "|first| is not a valid AllocKind!");
+ MOZ_ASSERT(IsAllocKind(limit), "|limit| is not a valid AllocKind!");
+ return mozilla::MakeEnumeratedRange(first, limit);
+}
+
+// AllAllocKindArray<ValueType> gives an enumerated array of ValueTypes,
+// with each index corresponding to a particular alloc kind.
+template <typename ValueType>
+using AllAllocKindArray =
+ mozilla::EnumeratedArray<AllocKind, AllocKind::LIMIT, ValueType>;
+
+// ObjectAllocKindArray<ValueType> gives an enumerated array of ValueTypes,
+// with each index corresponding to a particular object alloc kind.
+template <typename ValueType>
+using ObjectAllocKindArray =
+ mozilla::EnumeratedArray<AllocKind, AllocKind::OBJECT_LIMIT, ValueType>;
+
+/*
+ * Map from C++ type to alloc kind for non-object types. JSObject does not have
+ * a 1:1 mapping, so must use Arena::thingSize.
+ *
+ * The AllocKind is available as MapTypeToAllocKind<SomeType>::kind.
+ *
+ * There are specializations for strings and shapes since more than one derived
+ * type shares the same alloc kind.
+ */
+template <typename T>
+struct MapTypeToAllocKind {};
+#define EXPAND_MAPTYPETOALLOCKIND(allocKind, traceKind, type, sizedType, \
+ bgFinal, nursery, compact) \
+ template <> \
+ struct MapTypeToAllocKind<type> { \
+ static const AllocKind kind = AllocKind::allocKind; \
+ };
+FOR_EACH_NONOBJECT_ALLOCKIND(EXPAND_MAPTYPETOALLOCKIND)
+#undef EXPAND_MAPTYPETOALLOCKIND
+
+template <>
+struct MapTypeToAllocKind<JSDependentString> {
+ static const AllocKind kind = AllocKind::STRING;
+};
+template <>
+struct MapTypeToAllocKind<JSRope> {
+ static const AllocKind kind = AllocKind::STRING;
+};
+template <>
+struct MapTypeToAllocKind<JSLinearString> {
+ static const AllocKind kind = AllocKind::STRING;
+};
+template <>
+struct MapTypeToAllocKind<JSThinInlineString> {
+ static const AllocKind kind = AllocKind::STRING;
+};
+
+template <>
+struct MapTypeToAllocKind<js::SharedShape> {
+ static const AllocKind kind = AllocKind::SHAPE;
+};
+template <>
+struct MapTypeToAllocKind<js::DictionaryShape> {
+ static const AllocKind kind = AllocKind::SHAPE;
+};
+template <>
+struct MapTypeToAllocKind<js::ProxyShape> {
+ static const AllocKind kind = AllocKind::SHAPE;
+};
+template <>
+struct MapTypeToAllocKind<js::WasmGCShape> {
+ static const AllocKind kind = AllocKind::SHAPE;
+};
+
+static inline JS::TraceKind MapAllocToTraceKind(AllocKind kind) {
+ static const JS::TraceKind map[] = {
+#define EXPAND_ELEMENT(allocKind, traceKind, type, sizedType, bgFinal, \
+ nursery, compact) \
+ JS::TraceKind::traceKind,
+ FOR_EACH_ALLOCKIND(EXPAND_ELEMENT)
+#undef EXPAND_ELEMENT
+ };
+
+ static_assert(std::size(map) == AllocKindCount,
+ "AllocKind-to-TraceKind mapping must be in sync");
+ return map[size_t(kind)];
+}
+
+static inline bool IsNurseryAllocable(AllocKind kind) {
+ MOZ_ASSERT(IsValidAllocKind(kind));
+
+ static const bool map[] = {
+#define DEFINE_NURSERY_ALLOCABLE(_1, _2, _3, _4, _5, nursery, _6) nursery,
+ FOR_EACH_ALLOCKIND(DEFINE_NURSERY_ALLOCABLE)
+#undef DEFINE_NURSERY_ALLOCABLE
+ };
+
+ static_assert(std::size(map) == AllocKindCount,
+ "IsNurseryAllocable sanity check");
+ return map[size_t(kind)];
+}
+
+static inline bool IsBackgroundFinalized(AllocKind kind) {
+ MOZ_ASSERT(IsValidAllocKind(kind));
+
+ static const bool map[] = {
+#define DEFINE_BACKGROUND_FINALIZED(_1, _2, _3, _4, bgFinal, _5, _6) bgFinal,
+ FOR_EACH_ALLOCKIND(DEFINE_BACKGROUND_FINALIZED)
+#undef DEFINE_BACKGROUND_FINALIZED
+ };
+
+ static_assert(std::size(map) == AllocKindCount,
+ "IsBackgroundFinalized sanity check");
+ return map[size_t(kind)];
+}
+
+static inline bool IsForegroundFinalized(AllocKind kind) {
+ return !IsBackgroundFinalized(kind);
+}
+
+static inline bool IsCompactingKind(AllocKind kind) {
+ MOZ_ASSERT(IsValidAllocKind(kind));
+
+ static const bool map[] = {
+#define DEFINE_COMPACTING_KIND(_1, _2, _3, _4, _5, _6, compact) compact,
+ FOR_EACH_ALLOCKIND(DEFINE_COMPACTING_KIND)
+#undef DEFINE_COMPACTING_KIND
+ };
+
+ static_assert(std::size(map) == AllocKindCount,
+ "IsCompactingKind sanity check");
+ return map[size_t(kind)];
+}
+
+static inline bool IsMovableKind(AllocKind kind) {
+ return IsNurseryAllocable(kind) || IsCompactingKind(kind);
+}
+
+} /* namespace gc */
+} /* namespace js */
+
+#endif /* gc_AllocKind_h */
diff --git a/js/src/gc/Allocator.cpp b/js/src/gc/Allocator.cpp
new file mode 100644
index 0000000000..92d4653961
--- /dev/null
+++ b/js/src/gc/Allocator.cpp
@@ -0,0 +1,672 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "gc/Allocator.h"
+
+#include "mozilla/DebugOnly.h"
+#include "mozilla/OperatorNewExtensions.h"
+#include "mozilla/TimeStamp.h"
+
+#include "gc/GCInternals.h"
+#include "gc/GCLock.h"
+#include "gc/GCProbes.h"
+#include "gc/Nursery.h"
+#include "threading/CpuCount.h"
+#include "util/Poison.h"
+#include "vm/BigIntType.h"
+#include "vm/JSContext.h"
+#include "vm/Runtime.h"
+#include "vm/StringType.h"
+
+#include "gc/ArenaList-inl.h"
+#include "gc/Heap-inl.h"
+#include "gc/PrivateIterators-inl.h"
+#include "vm/JSContext-inl.h"
+
+using mozilla::TimeDuration;
+using mozilla::TimeStamp;
+
+using namespace js;
+using namespace js::gc;
+
+// Return a Heap value that can be compared numerically with an
+// allocation's requested heap to determine whether to allocate in the nursery
+// or the tenured heap.
+//
+// If nursery allocation is allowed this returns Heap::Tenured, meaning only
+// Heap::Tenured allocations will be tenured. If nursery allocation is not
+// allowed this returns Heap::Default, meaning all allocations are tenured.
+static Heap MinHeapToTenure(bool allowNurseryAlloc) {
+ static_assert(Heap::Tenured > Heap::Default);
+ return allowNurseryAlloc ? Heap::Tenured : Heap::Default;
+}
+
+void Zone::setNurseryAllocFlags(bool allocObjects, bool allocStrings,
+ bool allocBigInts) {
+ allocNurseryObjects_ = allocObjects;
+ allocNurseryStrings_ = allocStrings;
+ allocNurseryBigInts_ = allocBigInts;
+
+ minObjectHeapToTenure_ = MinHeapToTenure(allocNurseryObjects());
+ minStringHeapToTenure_ = MinHeapToTenure(allocNurseryStrings());
+ minBigintHeapToTenure_ = MinHeapToTenure(allocNurseryBigInts());
+}
+
+template <JS::TraceKind traceKind, AllowGC allowGC /* = CanGC */>
+void* gc::CellAllocator::AllocNurseryOrTenuredCell(JSContext* cx,
+ AllocKind allocKind,
+ gc::Heap heap,
+ AllocSite* site) {
+ MOZ_ASSERT(!cx->isHelperThreadContext());
+ MOZ_ASSERT_IF(heap != gc::Heap::Tenured, IsNurseryAllocable(allocKind));
+ MOZ_ASSERT(MapAllocToTraceKind(allocKind) == traceKind);
+ MOZ_ASSERT_IF(site && site->initialHeap() == Heap::Tenured,
+ heap == Heap::Tenured);
+
+ size_t thingSize = Arena::thingSize(allocKind);
+
+ JSRuntime* rt = cx->runtime();
+ if (!rt->gc.checkAllocatorState<allowGC>(cx, allocKind)) {
+ return nullptr;
+ }
+
+ if (heap < cx->zone()->minHeapToTenure(traceKind)) {
+ if (!site) {
+ site = cx->zone()->unknownAllocSite(traceKind);
+ }
+
+ void* obj =
+ rt->gc.tryNewNurseryCell<traceKind, allowGC>(cx, thingSize, site);
+ if (obj) {
+ return obj;
+ }
+
+ // Our most common non-jit allocation path is NoGC; thus, if we fail the
+ // alloc and cannot GC, we *must* return nullptr here so that the caller
+ // will do a CanGC allocation to clear the nursery. Failing to do so will
+ // cause all allocations on this path to land in Tenured, and we will not
+ // get the benefit of the nursery.
+ if (!allowGC) {
+ return nullptr;
+ }
+ }
+
+ return GCRuntime::tryNewTenuredThing<allowGC>(cx, allocKind, thingSize);
+}
+
+#define INSTANTIATE_ALLOC_NURSERY_CELL(traceKind, allowGc) \
+ template void* \
+ gc::CellAllocator::AllocNurseryOrTenuredCell<traceKind, allowGc>( \
+ JSContext*, AllocKind, gc::Heap, AllocSite*);
+INSTANTIATE_ALLOC_NURSERY_CELL(JS::TraceKind::Object, NoGC)
+INSTANTIATE_ALLOC_NURSERY_CELL(JS::TraceKind::Object, CanGC)
+INSTANTIATE_ALLOC_NURSERY_CELL(JS::TraceKind::String, NoGC)
+INSTANTIATE_ALLOC_NURSERY_CELL(JS::TraceKind::String, CanGC)
+INSTANTIATE_ALLOC_NURSERY_CELL(JS::TraceKind::BigInt, NoGC)
+INSTANTIATE_ALLOC_NURSERY_CELL(JS::TraceKind::BigInt, CanGC)
+#undef INSTANTIATE_ALLOC_NURSERY_CELL
+
+// Attempt to allocate a new cell in the nursery. If there is not enough room in
+// the nursery or there is an OOM, this method will return nullptr.
+template <JS::TraceKind kind, AllowGC allowGC>
+void* GCRuntime::tryNewNurseryCell(JSContext* cx, size_t thingSize,
+ AllocSite* site) {
+ MOZ_ASSERT(cx->isNurseryAllocAllowed());
+ MOZ_ASSERT(cx->zone() == site->zone());
+ MOZ_ASSERT(!cx->zone()->isAtomsZone());
+ MOZ_ASSERT(cx->zone()->allocKindInNursery(kind));
+
+ void* ptr = cx->nursery().allocateCell(site, thingSize, kind);
+ if (ptr) {
+ return ptr;
+ }
+
+ if constexpr (allowGC) {
+ if (!cx->suppressGC) {
+ cx->runtime()->gc.minorGC(JS::GCReason::OUT_OF_NURSERY);
+
+ // Exceeding gcMaxBytes while tenuring can disable the Nursery.
+ if (cx->zone()->allocKindInNursery(kind)) {
+ return cx->nursery().allocateCell(site, thingSize, kind);
+ }
+ }
+ }
+
+ return nullptr;
+}
+
+template <AllowGC allowGC /* = CanGC */>
+void* gc::CellAllocator::AllocTenuredCell(JSContext* cx, gc::AllocKind kind,
+ size_t size) {
+ MOZ_ASSERT(!cx->isHelperThreadContext());
+ MOZ_ASSERT(!IsNurseryAllocable(kind));
+ MOZ_ASSERT(size == Arena::thingSize(kind));
+ MOZ_ASSERT(
+ size >= gc::MinCellSize,
+ "All allocations must be at least the allocator-imposed minimum size.");
+
+ if (!cx->runtime()->gc.checkAllocatorState<allowGC>(cx, kind)) {
+ return nullptr;
+ }
+
+ return GCRuntime::tryNewTenuredThing<allowGC>(cx, kind, size);
+}
+template void* gc::CellAllocator::AllocTenuredCell<NoGC>(JSContext*, AllocKind,
+ size_t);
+template void* gc::CellAllocator::AllocTenuredCell<CanGC>(JSContext*, AllocKind,
+ size_t);
+
+template <AllowGC allowGC>
+/* static */
+void* GCRuntime::tryNewTenuredThing(JSContext* cx, AllocKind kind,
+ size_t thingSize) {
+ // Bump allocate in the arena's current free-list span.
+ Zone* zone = cx->zone();
+ void* ptr = zone->arenas.freeLists().allocate(kind);
+ if (MOZ_UNLIKELY(!ptr)) {
+ // Get the next available free list and allocate out of it. This may
+ // acquire a new arena, which will lock the chunk list. If there are no
+ // chunks available it may also allocate new memory directly.
+ ptr = refillFreeList(cx, kind);
+
+ if (MOZ_UNLIKELY(!ptr)) {
+ if constexpr (allowGC) {
+ cx->runtime()->gc.attemptLastDitchGC(cx);
+ ptr = tryNewTenuredThing<NoGC>(cx, kind, thingSize);
+ if (ptr) {
+ return ptr;
+ }
+ ReportOutOfMemory(cx);
+ }
+
+ return nullptr;
+ }
+ }
+
+#ifdef DEBUG
+ checkIncrementalZoneState(cx, ptr);
+#endif
+
+ gcprobes::TenuredAlloc(ptr, kind);
+
+ // We count this regardless of the profiler's state, assuming that it costs
+ // just as much to count it, as to check the profiler's state and decide not
+ // to count it.
+ zone->noteTenuredAlloc();
+
+ return ptr;
+}
+
+void GCRuntime::attemptLastDitchGC(JSContext* cx) {
+ // Either there was no memory available for a new chunk or the heap hit its
+ // size limit. Try to perform an all-compartments, non-incremental, shrinking
+ // GC and wait for it to finish.
+
+ if (!lastLastDitchTime.IsNull() &&
+ TimeStamp::Now() - lastLastDitchTime <= tunables.minLastDitchGCPeriod()) {
+ return;
+ }
+
+ JS::PrepareForFullGC(cx);
+ gc(JS::GCOptions::Shrink, JS::GCReason::LAST_DITCH);
+ waitBackgroundAllocEnd();
+ waitBackgroundFreeEnd();
+
+ lastLastDitchTime = mozilla::TimeStamp::Now();
+}
+
+template <AllowGC allowGC>
+bool GCRuntime::checkAllocatorState(JSContext* cx, AllocKind kind) {
+ MOZ_ASSERT_IF(cx->zone()->isAtomsZone(),
+ kind == AllocKind::ATOM || kind == AllocKind::FAT_INLINE_ATOM ||
+ kind == AllocKind::SYMBOL || kind == AllocKind::JITCODE ||
+ kind == AllocKind::SCOPE);
+ MOZ_ASSERT_IF(!cx->zone()->isAtomsZone(),
+ kind != AllocKind::ATOM && kind != AllocKind::FAT_INLINE_ATOM);
+ MOZ_ASSERT(!JS::RuntimeHeapIsBusy());
+
+ if constexpr (allowGC) {
+ // Crash if we could perform a GC action when it is not safe.
+ if (!cx->suppressGC) {
+ cx->verifyIsSafeToGC();
+ }
+
+ gcIfNeededAtAllocation(cx);
+ }
+
+ // For testing out of memory conditions.
+ if (js::oom::ShouldFailWithOOM()) {
+ // If we are doing a fallible allocation, percolate up the OOM instead of
+ // reporting it.
+ if constexpr (allowGC) {
+ ReportOutOfMemory(cx);
+ }
+ return false;
+ }
+
+ return true;
+}
+
+inline void GCRuntime::gcIfNeededAtAllocation(JSContext* cx) {
+#ifdef JS_GC_ZEAL
+ if (needZealousGC()) {
+ runDebugGC();
+ }
+#endif
+
+ // Invoking the interrupt callback can fail and we can't usefully
+ // handle that here. Just check in case we need to collect instead.
+ if (cx->hasAnyPendingInterrupt()) {
+ gcIfRequested();
+ }
+}
+
+#ifdef DEBUG
+void GCRuntime::checkIncrementalZoneState(JSContext* cx, void* ptr) {
+ MOZ_ASSERT(ptr);
+ TenuredCell* cell = reinterpret_cast<TenuredCell*>(ptr);
+ TenuredChunkBase* chunk = detail::GetCellChunkBase(cell);
+ if (cx->zone()->isGCMarkingOrSweeping()) {
+ MOZ_ASSERT(chunk->markBits.isMarkedBlack(cell));
+ } else {
+ MOZ_ASSERT(!chunk->markBits.isMarkedAny(cell));
+ }
+}
+#endif
+
+void* js::gc::AllocateCellInGC(Zone* zone, AllocKind thingKind) {
+ void* ptr = zone->arenas.allocateFromFreeList(thingKind);
+ if (!ptr) {
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ ptr = GCRuntime::refillFreeListInGC(zone, thingKind);
+ if (!ptr) {
+ oomUnsafe.crash(ChunkSize, "Failed to allocate new chunk during GC");
+ }
+ }
+ return ptr;
+}
+
+// /////////// Arena -> Thing Allocator //////////////////////////////////////
+
+void GCRuntime::startBackgroundAllocTaskIfIdle() {
+ AutoLockHelperThreadState lock;
+ if (!allocTask.wasStarted(lock)) {
+ // Join the previous invocation of the task. This will return immediately
+ // if the thread has never been started.
+ allocTask.joinWithLockHeld(lock);
+ allocTask.startWithLockHeld(lock);
+ }
+}
+
+/* static */
+void* GCRuntime::refillFreeList(JSContext* cx, AllocKind thingKind) {
+ MOZ_ASSERT(cx->zone()->arenas.freeLists().isEmpty(thingKind));
+ MOZ_ASSERT(!cx->isHelperThreadContext());
+
+ // It should not be possible to allocate on the main thread while we are
+ // inside a GC.
+ MOZ_ASSERT(!JS::RuntimeHeapIsBusy(), "allocating while under GC");
+
+ return cx->zone()->arenas.refillFreeListAndAllocate(
+ thingKind, ShouldCheckThresholds::CheckThresholds);
+}
+
+/* static */
+void* GCRuntime::refillFreeListInGC(Zone* zone, AllocKind thingKind) {
+ // Called by compacting GC to refill a free list while we are in a GC.
+ MOZ_ASSERT(JS::RuntimeHeapIsCollecting());
+ MOZ_ASSERT_IF(!JS::RuntimeHeapIsMinorCollecting(),
+ !zone->runtimeFromMainThread()->gc.isBackgroundSweeping());
+
+ return zone->arenas.refillFreeListAndAllocate(
+ thingKind, ShouldCheckThresholds::DontCheckThresholds);
+}
+
+void* ArenaLists::refillFreeListAndAllocate(
+ AllocKind thingKind, ShouldCheckThresholds checkThresholds) {
+ MOZ_ASSERT(freeLists().isEmpty(thingKind));
+
+ JSRuntime* rt = runtimeFromAnyThread();
+
+ mozilla::Maybe<AutoLockGCBgAlloc> maybeLock;
+
+ // See if we can proceed without taking the GC lock.
+ if (concurrentUse(thingKind) != ConcurrentUse::None) {
+ maybeLock.emplace(rt);
+ }
+
+ Arena* arena = arenaList(thingKind).takeNextArena();
+ if (arena) {
+ // Empty arenas should be immediately freed.
+ MOZ_ASSERT(!arena->isEmpty());
+
+ return freeLists().setArenaAndAllocate(arena, thingKind);
+ }
+
+ // Parallel threads have their own ArenaLists, but chunks are shared;
+ // if we haven't already, take the GC lock now to avoid racing.
+ if (maybeLock.isNothing()) {
+ maybeLock.emplace(rt);
+ }
+
+ TenuredChunk* chunk = rt->gc.pickChunk(maybeLock.ref());
+ if (!chunk) {
+ return nullptr;
+ }
+
+ // Although our chunk should definitely have enough space for another arena,
+ // there are other valid reasons why TenuredChunk::allocateArena() may fail.
+ arena = rt->gc.allocateArena(chunk, zone_, thingKind, checkThresholds,
+ maybeLock.ref());
+ if (!arena) {
+ return nullptr;
+ }
+
+ ArenaList& al = arenaList(thingKind);
+ MOZ_ASSERT(al.isCursorAtEnd());
+ al.insertBeforeCursor(arena);
+
+ return freeLists().setArenaAndAllocate(arena, thingKind);
+}
+
+inline void* FreeLists::setArenaAndAllocate(Arena* arena, AllocKind kind) {
+#ifdef DEBUG
+ auto old = freeLists_[kind];
+ if (!old->isEmpty()) {
+ old->getArena()->checkNoMarkedFreeCells();
+ }
+#endif
+
+ FreeSpan* span = arena->getFirstFreeSpan();
+ freeLists_[kind] = span;
+
+ Zone* zone = arena->zone;
+ if (MOZ_UNLIKELY(zone->isGCMarkingOrSweeping())) {
+ arena->arenaAllocatedDuringGC();
+ }
+
+ TenuredCell* thing = span->allocate(Arena::thingSize(kind));
+ MOZ_ASSERT(thing); // This allocation is infallible.
+
+ return thing;
+}
+
+void Arena::arenaAllocatedDuringGC() {
+ // Ensure that anything allocated during the mark or sweep phases of an
+ // incremental GC will be marked black by pre-marking all free cells in the
+ // arena we are about to allocate from.
+
+ MOZ_ASSERT(zone->isGCMarkingOrSweeping());
+ for (ArenaFreeCellIter cell(this); !cell.done(); cell.next()) {
+ MOZ_ASSERT(!cell->isMarkedAny());
+ cell->markBlack();
+ }
+}
+
+// /////////// TenuredChunk -> Arena Allocator ///////////////////////////////
+
+bool GCRuntime::wantBackgroundAllocation(const AutoLockGC& lock) const {
+ // To minimize memory waste, we do not want to run the background chunk
+ // allocation if we already have some empty chunks or when the runtime has
+ // a small heap size (and therefore likely has a small growth rate).
+ return allocTask.enabled() &&
+ emptyChunks(lock).count() < minEmptyChunkCount(lock) &&
+ (fullChunks(lock).count() + availableChunks(lock).count()) >= 4;
+}
+
+Arena* GCRuntime::allocateArena(TenuredChunk* chunk, Zone* zone,
+ AllocKind thingKind,
+ ShouldCheckThresholds checkThresholds,
+ const AutoLockGC& lock) {
+ MOZ_ASSERT(chunk->hasAvailableArenas());
+
+ // Fail the allocation if we are over our heap size limits.
+ if ((checkThresholds != ShouldCheckThresholds::DontCheckThresholds) &&
+ (heapSize.bytes() >= tunables.gcMaxBytes())) {
+ return nullptr;
+ }
+
+ Arena* arena = chunk->allocateArena(this, zone, thingKind, lock);
+ zone->gcHeapSize.addGCArena(heapSize);
+
+ // Trigger an incremental slice if needed.
+ if (checkThresholds != ShouldCheckThresholds::DontCheckThresholds) {
+ maybeTriggerGCAfterAlloc(zone);
+ }
+
+ return arena;
+}
+
+Arena* TenuredChunk::allocateArena(GCRuntime* gc, Zone* zone,
+ AllocKind thingKind,
+ const AutoLockGC& lock) {
+ if (info.numArenasFreeCommitted == 0) {
+ commitOnePage(gc);
+ MOZ_ASSERT(info.numArenasFreeCommitted == ArenasPerPage);
+ }
+
+ MOZ_ASSERT(info.numArenasFreeCommitted > 0);
+ Arena* arena = fetchNextFreeArena(gc);
+
+ arena->init(zone, thingKind, lock);
+ updateChunkListAfterAlloc(gc, lock);
+
+ verify();
+
+ return arena;
+}
+
+template <size_t N>
+static inline size_t FindFirstBitSet(
+ const mozilla::BitSet<N, uint32_t>& bitset) {
+ MOZ_ASSERT(!bitset.IsEmpty());
+
+ const auto& words = bitset.Storage();
+ for (size_t i = 0; i < words.Length(); i++) {
+ uint32_t word = words[i];
+ if (word) {
+ return i * 32 + mozilla::CountTrailingZeroes32(word);
+ }
+ }
+
+ MOZ_CRASH("No bits found");
+}
+
+void TenuredChunk::commitOnePage(GCRuntime* gc) {
+ MOZ_ASSERT(info.numArenasFreeCommitted == 0);
+ MOZ_ASSERT(info.numArenasFree >= ArenasPerPage);
+
+ uint32_t pageIndex = FindFirstBitSet(decommittedPages);
+ MOZ_ASSERT(decommittedPages[pageIndex]);
+
+ if (DecommitEnabled()) {
+ MarkPagesInUseSoft(pageAddress(pageIndex), PageSize);
+ }
+
+ decommittedPages[pageIndex] = false;
+
+ for (size_t i = 0; i < ArenasPerPage; i++) {
+ size_t arenaIndex = pageIndex * ArenasPerPage + i;
+ MOZ_ASSERT(!freeCommittedArenas[arenaIndex]);
+ freeCommittedArenas[arenaIndex] = true;
+ arenas[arenaIndex].setAsNotAllocated();
+ ++info.numArenasFreeCommitted;
+ gc->updateOnArenaFree();
+ }
+
+ verify();
+}
+
+inline void GCRuntime::updateOnFreeArenaAlloc(const TenuredChunkInfo& info) {
+ MOZ_ASSERT(info.numArenasFreeCommitted <= numArenasFreeCommitted);
+ --numArenasFreeCommitted;
+}
+
+Arena* TenuredChunk::fetchNextFreeArena(GCRuntime* gc) {
+ MOZ_ASSERT(info.numArenasFreeCommitted > 0);
+ MOZ_ASSERT(info.numArenasFreeCommitted <= info.numArenasFree);
+
+ size_t index = FindFirstBitSet(freeCommittedArenas);
+ MOZ_ASSERT(freeCommittedArenas[index]);
+
+ freeCommittedArenas[index] = false;
+ --info.numArenasFreeCommitted;
+ --info.numArenasFree;
+ gc->updateOnFreeArenaAlloc(info);
+
+ return &arenas[index];
+}
+
+// /////////// System -> TenuredChunk Allocator //////////////////////////////
+
+TenuredChunk* GCRuntime::getOrAllocChunk(AutoLockGCBgAlloc& lock) {
+ TenuredChunk* chunk = emptyChunks(lock).pop();
+ if (chunk) {
+ // Reinitialize ChunkBase; arenas are all free and may or may not be
+ // committed.
+ SetMemCheckKind(chunk, sizeof(ChunkBase), MemCheckKind::MakeUndefined);
+ chunk->initBase(rt, nullptr);
+ MOZ_ASSERT(chunk->unused());
+ } else {
+ void* ptr = TenuredChunk::allocate(this);
+ if (!ptr) {
+ return nullptr;
+ }
+
+ chunk = TenuredChunk::emplace(ptr, this, /* allMemoryCommitted = */ true);
+ MOZ_ASSERT(chunk->info.numArenasFreeCommitted == 0);
+ }
+
+ if (wantBackgroundAllocation(lock)) {
+ lock.tryToStartBackgroundAllocation();
+ }
+
+ return chunk;
+}
+
+void GCRuntime::recycleChunk(TenuredChunk* chunk, const AutoLockGC& lock) {
+#ifdef DEBUG
+ MOZ_ASSERT(chunk->unused());
+ chunk->verify();
+#endif
+
+ // Poison ChunkBase to catch use after free.
+ AlwaysPoison(chunk, JS_FREED_CHUNK_PATTERN, sizeof(ChunkBase),
+ MemCheckKind::MakeNoAccess);
+
+ emptyChunks(lock).push(chunk);
+}
+
+TenuredChunk* GCRuntime::pickChunk(AutoLockGCBgAlloc& lock) {
+ if (availableChunks(lock).count()) {
+ return availableChunks(lock).head();
+ }
+
+ TenuredChunk* chunk = getOrAllocChunk(lock);
+ if (!chunk) {
+ return nullptr;
+ }
+
+#ifdef DEBUG
+ chunk->verify();
+ MOZ_ASSERT(chunk->unused());
+ MOZ_ASSERT(!fullChunks(lock).contains(chunk));
+ MOZ_ASSERT(!availableChunks(lock).contains(chunk));
+#endif
+
+ availableChunks(lock).push(chunk);
+
+ return chunk;
+}
+
+BackgroundAllocTask::BackgroundAllocTask(GCRuntime* gc, ChunkPool& pool)
+ : GCParallelTask(gc, gcstats::PhaseKind::NONE),
+ chunkPool_(pool),
+ enabled_(CanUseExtraThreads() && GetCPUCount() >= 2) {
+ // This can occur outside GCs so doesn't have a stats phase.
+}
+
+void BackgroundAllocTask::run(AutoLockHelperThreadState& lock) {
+ AutoUnlockHelperThreadState unlock(lock);
+
+ AutoLockGC gcLock(gc);
+ while (!isCancelled() && gc->wantBackgroundAllocation(gcLock)) {
+ TenuredChunk* chunk;
+ {
+ AutoUnlockGC unlock(gcLock);
+ void* ptr = TenuredChunk::allocate(gc);
+ if (!ptr) {
+ break;
+ }
+ chunk = TenuredChunk::emplace(ptr, gc, /* allMemoryCommitted = */ true);
+ }
+ chunkPool_.ref().push(chunk);
+ }
+}
+
+/* static */
+void* TenuredChunk::allocate(GCRuntime* gc) {
+ void* chunk = MapAlignedPages(ChunkSize, ChunkSize);
+ if (!chunk) {
+ return nullptr;
+ }
+
+ gc->stats().count(gcstats::COUNT_NEW_CHUNK);
+ return chunk;
+}
+
+static inline bool ShouldDecommitNewChunk(bool allMemoryCommitted,
+ const GCSchedulingState& state) {
+ if (!DecommitEnabled()) {
+ return false;
+ }
+
+ return !allMemoryCommitted || !state.inHighFrequencyGCMode();
+}
+
+TenuredChunk* TenuredChunk::emplace(void* ptr, GCRuntime* gc,
+ bool allMemoryCommitted) {
+ /* The chunk may still have some regions marked as no-access. */
+ MOZ_MAKE_MEM_UNDEFINED(ptr, ChunkSize);
+
+ /*
+ * Poison the chunk. Note that decommitAllArenas() below will mark the
+ * arenas as inaccessible (for memory sanitizers).
+ */
+ Poison(ptr, JS_FRESH_TENURED_PATTERN, ChunkSize, MemCheckKind::MakeUndefined);
+
+ TenuredChunk* chunk = new (mozilla::KnownNotNull, ptr) TenuredChunk(gc->rt);
+
+ if (ShouldDecommitNewChunk(allMemoryCommitted, gc->schedulingState)) {
+ // Decommit the arenas. We do this after poisoning so that if the OS does
+ // not have to recycle the pages, we still get the benefit of poisoning.
+ chunk->decommitAllArenas();
+ } else {
+ // The chunk metadata is initialized as decommitted regardless, to avoid
+ // having to initialize the arenas at this time.
+ chunk->initAsDecommitted();
+ }
+
+ chunk->verify();
+
+ return chunk;
+}
+
+void TenuredChunk::decommitAllArenas() {
+ MOZ_ASSERT(unused());
+ MarkPagesUnusedSoft(&arenas[0], ArenasPerChunk * ArenaSize);
+ initAsDecommitted();
+}
+
+void TenuredChunkBase::initAsDecommitted() {
+ // Set the state of all arenas to free and decommitted. They might not
+ // actually be decommitted, but in that case the re-commit operation is a
+ // no-op so it doesn't matter.
+ decommittedPages.SetAll();
+ freeCommittedArenas.ResetAll();
+ info.numArenasFree = ArenasPerChunk;
+ info.numArenasFreeCommitted = 0;
+}
diff --git a/js/src/gc/Allocator.h b/js/src/gc/Allocator.h
new file mode 100644
index 0000000000..1cd3ff6aad
--- /dev/null
+++ b/js/src/gc/Allocator.h
@@ -0,0 +1,173 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef gc_Allocator_h
+#define gc_Allocator_h
+
+#include "mozilla/OperatorNewExtensions.h"
+
+#include <stdint.h>
+
+#include "gc/AllocKind.h"
+#include "gc/Cell.h"
+#include "js/Class.h"
+#include "js/TypeDecls.h"
+
+namespace js {
+
+// [SMDOC] AllowGC template parameter
+//
+// AllowGC is a template parameter for functions that support both with and
+// without GC operation.
+//
+// The CanGC variant of the function can trigger a garbage collection, and
+// should set a pending exception on failure.
+//
+// The NoGC variant of the function cannot trigger a garbage collection, and
+// should not set any pending exception on failure. This variant can be called
+// in fast paths where the caller has unrooted pointers. The failure means we
+// need to perform GC to allocate an object. The caller can fall back to a slow
+// path that roots pointers before calling a CanGC variant of the function,
+// without having to clear a pending exception.
+enum AllowGC { NoGC = 0, CanGC = 1 };
+
+namespace gc {
+
+class AllocSite;
+struct Cell;
+class TenuredCell;
+
+// Allocator implementation functions. SpiderMonkey code outside this file
+// should use:
+//
+// cx->newCell<T>(...)
+//
+// or optionally:
+//
+// cx->newCell<T, AllowGC::NoGC>(...)
+//
+// `friend` js::gc::CellAllocator in a subtype T of Cell in order to allow it to
+// be allocated with cx->newCell<T>(...). The friend declaration will allow
+// calling T's constructor.
+//
+// The parameters will be passed to a type-specific function or constructor. For
+// nursery-allocatable types, see e.g. the NewString, NewObject, and NewBigInt
+// methods. For all other types, the parameters will be forwarded to the
+// constructor.
+class CellAllocator {
+ public:
+ template <typename T, js::AllowGC allowGC = CanGC, typename... Args>
+ static T* NewCell(JSContext* cx, Args&&... args);
+
+ private:
+ // Allocate a cell in the nursery, unless |heap| is Heap::Tenured or nursery
+ // allocation is disabled for |traceKind| in the current zone.
+ template <JS::TraceKind traceKind, AllowGC allowGC = CanGC>
+ static void* AllocNurseryOrTenuredCell(JSContext* cx, gc::AllocKind allocKind,
+ gc::Heap heap, AllocSite* site);
+
+ // Allocate a cell in the tenured heap.
+ template <AllowGC allowGC = CanGC>
+ static void* AllocTenuredCell(JSContext* cx, gc::AllocKind kind, size_t size);
+
+ // Allocate a string. Use cx->newCell<T>([heap]).
+ //
+ // Use for nursery-allocatable strings. Returns a value cast to the correct
+ // type. Non-nursery-allocatable strings will go through the fallback
+ // tenured-only allocation path.
+ template <typename T, AllowGC allowGC = CanGC, typename... Args>
+ static T* NewString(JSContext* cx, gc::Heap heap, Args&&... args) {
+ static_assert(std::is_base_of_v<JSString, T>);
+ gc::AllocKind kind = gc::MapTypeToAllocKind<T>::kind;
+ void* ptr = AllocNurseryOrTenuredCell<JS::TraceKind::String, allowGC>(
+ cx, kind, heap, nullptr);
+ if (!ptr) {
+ return nullptr;
+ }
+ return new (mozilla::KnownNotNull, ptr) T(std::forward<Args>(args)...);
+ }
+
+ template <typename T, AllowGC allowGC /* = CanGC */>
+ static T* NewBigInt(JSContext* cx, Heap heap) {
+ void* ptr = AllocNurseryOrTenuredCell<JS::TraceKind::BigInt, allowGC>(
+ cx, gc::AllocKind::BIGINT, heap, nullptr);
+ if (ptr) {
+ return new (mozilla::KnownNotNull, ptr) T();
+ }
+ return nullptr;
+ }
+
+ template <typename T, AllowGC allowGC = CanGC>
+ static T* NewObject(JSContext* cx, gc::AllocKind kind, gc::Heap heap,
+ const JSClass* clasp, gc::AllocSite* site = nullptr) {
+ MOZ_ASSERT(IsObjectAllocKind(kind));
+ MOZ_ASSERT_IF(heap != gc::Heap::Tenured && clasp->hasFinalize() &&
+ !clasp->isProxyObject(),
+ CanNurseryAllocateFinalizedClass(clasp));
+ void* cell = AllocNurseryOrTenuredCell<JS::TraceKind::Object, allowGC>(
+ cx, kind, heap, site);
+ if (!cell) {
+ return nullptr;
+ }
+ return new (mozilla::KnownNotNull, cell) T();
+ }
+
+ // Allocate all other kinds of GC thing.
+ template <typename T, AllowGC allowGC = CanGC, typename... Args>
+ static T* NewTenuredCell(JSContext* cx, Args&&... args) {
+ gc::AllocKind kind = gc::MapTypeToAllocKind<T>::kind;
+ void* cell = AllocTenuredCell<allowGC>(cx, kind, sizeof(T));
+ if (!cell) {
+ return nullptr;
+ }
+ return new (mozilla::KnownNotNull, cell) T(std::forward<Args>(args)...);
+ }
+};
+
+} // namespace gc
+
+// This is the entry point for all allocation, though callers should still not
+// use this directly. Use cx->newCell<T>(...) instead.
+//
+// After a successful allocation the caller must fully initialize the thing
+// before calling any function that can potentially trigger GC. This will
+// ensure that GC tracing never sees junk values stored in the partially
+// initialized thing.
+template <typename T, AllowGC allowGC, typename... Args>
+T* gc::CellAllocator::NewCell(JSContext* cx, Args&&... args) {
+ static_assert(std::is_base_of_v<gc::Cell, T>);
+
+ // Objects. See the valid parameter list in NewObject, above.
+ if constexpr (std::is_base_of_v<JSObject, T>) {
+ return NewObject<T, allowGC>(cx, std::forward<Args>(args)...);
+ }
+
+ // BigInt
+ else if constexpr (std::is_base_of_v<JS::BigInt, T>) {
+ return NewBigInt<T, allowGC>(cx, std::forward<Args>(args)...);
+ }
+
+ // "Normal" strings (all of which can be nursery allocated). Atoms and
+ // external strings will fall through to the generic code below. All other
+ // strings go through NewString, which will forward the arguments to the
+ // appropriate string class's constructor.
+ else if constexpr (std::is_base_of_v<JSString, T> &&
+ !std::is_base_of_v<JSAtom, T> &&
+ !std::is_base_of_v<JSExternalString, T>) {
+ return NewString<T, allowGC>(cx, std::forward<Args>(args)...);
+ }
+
+ else {
+ // Allocate a new tenured GC thing that's not nursery-allocatable. Use
+ // cx->newCell<T>(...), where the parameters are forwarded to the type's
+ // constructor.
+ return NewTenuredCell<T, allowGC>(cx, std::forward<Args>(args)...);
+ }
+}
+
+} // namespace js
+
+#endif // gc_Allocator_h
diff --git a/js/src/gc/ArenaList-inl.h b/js/src/gc/ArenaList-inl.h
new file mode 100644
index 0000000000..92925a227c
--- /dev/null
+++ b/js/src/gc/ArenaList-inl.h
@@ -0,0 +1,327 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef gc_ArenaList_inl_h
+#define gc_ArenaList_inl_h
+
+#include "gc/ArenaList.h"
+
+#include "gc/Heap.h"
+#include "gc/Zone.h"
+
+void js::gc::SortedArenaListSegment::append(Arena* arena) {
+ MOZ_ASSERT(arena);
+ MOZ_ASSERT_IF(head, head->getAllocKind() == arena->getAllocKind());
+ *tailp = arena;
+ tailp = &arena->next;
+}
+
+inline js::gc::ArenaList::ArenaList() { clear(); }
+
+inline js::gc::ArenaList::ArenaList(ArenaList&& other) { moveFrom(other); }
+
+inline js::gc::ArenaList::~ArenaList() { MOZ_ASSERT(isEmpty()); }
+
+void js::gc::ArenaList::moveFrom(ArenaList& other) {
+ other.check();
+
+ head_ = other.head_;
+ cursorp_ = other.isCursorAtHead() ? &head_ : other.cursorp_;
+ other.clear();
+
+ check();
+}
+
+js::gc::ArenaList& js::gc::ArenaList::operator=(ArenaList&& other) {
+ MOZ_ASSERT(isEmpty());
+ moveFrom(other);
+ return *this;
+}
+
+inline js::gc::ArenaList::ArenaList(const SortedArenaListSegment& segment) {
+ head_ = segment.head;
+ cursorp_ = segment.isEmpty() ? &head_ : segment.tailp;
+ check();
+}
+
+// This does checking just of |head_| and |cursorp_|.
+void js::gc::ArenaList::check() const {
+#ifdef DEBUG
+ // If the list is empty, it must have this form.
+ MOZ_ASSERT_IF(!head_, cursorp_ == &head_);
+
+ // If there's an arena following the cursor, it must not be full.
+ Arena* cursor = *cursorp_;
+ MOZ_ASSERT_IF(cursor, cursor->hasFreeThings());
+#endif
+}
+
+void js::gc::ArenaList::clear() {
+ head_ = nullptr;
+ cursorp_ = &head_;
+ check();
+}
+
+bool js::gc::ArenaList::isEmpty() const {
+ check();
+ return !head_;
+}
+
+js::gc::Arena* js::gc::ArenaList::head() const {
+ check();
+ return head_;
+}
+
+bool js::gc::ArenaList::isCursorAtHead() const {
+ check();
+ return cursorp_ == &head_;
+}
+
+bool js::gc::ArenaList::isCursorAtEnd() const {
+ check();
+ return !*cursorp_;
+}
+
+js::gc::Arena* js::gc::ArenaList::arenaAfterCursor() const {
+ check();
+ return *cursorp_;
+}
+
+js::gc::Arena* js::gc::ArenaList::takeNextArena() {
+ check();
+ Arena* arena = *cursorp_;
+ if (!arena) {
+ return nullptr;
+ }
+ cursorp_ = &arena->next;
+ check();
+ return arena;
+}
+
+void js::gc::ArenaList::insertAtCursor(Arena* a) {
+ check();
+ a->next = *cursorp_;
+ *cursorp_ = a;
+ // At this point, the cursor is sitting before |a|. Move it after |a|
+ // if necessary.
+ if (!a->hasFreeThings()) {
+ cursorp_ = &a->next;
+ }
+ check();
+}
+
+void js::gc::ArenaList::insertBeforeCursor(Arena* a) {
+ check();
+ a->next = *cursorp_;
+ *cursorp_ = a;
+ cursorp_ = &a->next;
+ check();
+}
+
+js::gc::ArenaList& js::gc::ArenaList::insertListWithCursorAtEnd(
+ ArenaList& other) {
+ check();
+ other.check();
+ MOZ_ASSERT(other.isCursorAtEnd());
+
+ if (other.isEmpty()) {
+ return *this;
+ }
+
+ // Insert the full arenas of |other| after those of |this|.
+ *other.cursorp_ = *cursorp_;
+ *cursorp_ = other.head_;
+ cursorp_ = other.cursorp_;
+ check();
+
+ other.clear();
+ return *this;
+}
+
+js::gc::Arena* js::gc::ArenaList::takeFirstArena() {
+ check();
+ Arena* arena = head_;
+ if (!arena) {
+ return nullptr;
+ }
+
+ head_ = arena->next;
+ if (cursorp_ == &arena->next) {
+ cursorp_ = &head_;
+ }
+
+ check();
+ return arena;
+}
+
+js::gc::SortedArenaList::SortedArenaList(size_t thingsPerArena) {
+ reset(thingsPerArena);
+}
+
+void js::gc::SortedArenaList::setThingsPerArena(size_t thingsPerArena) {
+ MOZ_ASSERT(thingsPerArena && thingsPerArena <= MaxThingsPerArena);
+ thingsPerArena_ = thingsPerArena;
+}
+
+void js::gc::SortedArenaList::reset(size_t thingsPerArena) {
+ setThingsPerArena(thingsPerArena);
+ // Initialize the segments.
+ for (size_t i = 0; i <= thingsPerArena; ++i) {
+ segments[i].clear();
+ }
+}
+
+void js::gc::SortedArenaList::insertAt(Arena* arena, size_t nfree) {
+ MOZ_ASSERT(nfree <= thingsPerArena_);
+ segments[nfree].append(arena);
+}
+
+void js::gc::SortedArenaList::extractEmpty(Arena** empty) {
+ SortedArenaListSegment& segment = segments[thingsPerArena_];
+ if (segment.head) {
+ *segment.tailp = *empty;
+ *empty = segment.head;
+ segment.clear();
+ }
+}
+
+js::gc::ArenaList js::gc::SortedArenaList::toArenaList() {
+ // Link the non-empty segment tails up to the non-empty segment heads.
+ size_t tailIndex = 0;
+ for (size_t headIndex = 1; headIndex <= thingsPerArena_; ++headIndex) {
+ if (headAt(headIndex)) {
+ segments[tailIndex].linkTo(headAt(headIndex));
+ tailIndex = headIndex;
+ }
+ }
+ // Point the tail of the final non-empty segment at null. Note that if
+ // the list is empty, this will just set segments[0].head to null.
+ segments[tailIndex].linkTo(nullptr);
+ // Create an ArenaList with head and cursor set to the head and tail of
+ // the first segment (if that segment is empty, only the head is used).
+ return ArenaList(segments[0]);
+}
+
+#ifdef DEBUG
+
+bool js::gc::FreeLists::allEmpty() const {
+ for (auto i : AllAllocKinds()) {
+ if (!isEmpty(i)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+bool js::gc::FreeLists::isEmpty(AllocKind kind) const {
+ return freeLists_[kind]->isEmpty();
+}
+
+#endif
+
+void js::gc::FreeLists::clear() {
+ for (auto i : AllAllocKinds()) {
+#ifdef DEBUG
+ auto old = freeLists_[i];
+ if (!old->isEmpty()) {
+ old->getArena()->checkNoMarkedFreeCells();
+ }
+#endif
+ freeLists_[i] = &emptySentinel;
+ }
+}
+
+js::gc::TenuredCell* js::gc::FreeLists::allocate(AllocKind kind) {
+ return freeLists_[kind]->allocate(Arena::thingSize(kind));
+}
+
+void js::gc::FreeLists::unmarkPreMarkedFreeCells(AllocKind kind) {
+ FreeSpan* freeSpan = freeLists_[kind];
+ if (!freeSpan->isEmpty()) {
+ freeSpan->getArena()->unmarkPreMarkedFreeCells();
+ }
+}
+
+JSRuntime* js::gc::ArenaLists::runtime() {
+ return zone_->runtimeFromMainThread();
+}
+
+JSRuntime* js::gc::ArenaLists::runtimeFromAnyThread() {
+ return zone_->runtimeFromAnyThread();
+}
+
+js::gc::Arena* js::gc::ArenaLists::getFirstArena(AllocKind thingKind) const {
+ return arenaList(thingKind).head();
+}
+
+js::gc::Arena* js::gc::ArenaLists::getFirstCollectingArena(
+ AllocKind thingKind) const {
+ return collectingArenaList(thingKind).head();
+}
+
+js::gc::Arena* js::gc::ArenaLists::getFirstSweptArena(
+ AllocKind thingKind) const {
+ if (thingKind != incrementalSweptArenaKind.ref()) {
+ return nullptr;
+ }
+ return incrementalSweptArenas.ref().head();
+}
+
+js::gc::Arena* js::gc::ArenaLists::getArenaAfterCursor(
+ AllocKind thingKind) const {
+ return arenaList(thingKind).arenaAfterCursor();
+}
+
+bool js::gc::ArenaLists::arenaListsAreEmpty() const {
+ for (auto i : AllAllocKinds()) {
+ /*
+ * The arena cannot be empty if the background finalization is not yet
+ * done.
+ */
+ if (concurrentUse(i) == ConcurrentUse::BackgroundFinalize) {
+ return false;
+ }
+ if (!arenaList(i).isEmpty()) {
+ return false;
+ }
+ }
+ return true;
+}
+
+bool js::gc::ArenaLists::doneBackgroundFinalize(AllocKind kind) const {
+ return concurrentUse(kind) != ConcurrentUse::BackgroundFinalize;
+}
+
+bool js::gc::ArenaLists::needBackgroundFinalizeWait(AllocKind kind) const {
+ return concurrentUse(kind) == ConcurrentUse::BackgroundFinalize;
+}
+
+void js::gc::ArenaLists::clearFreeLists() { freeLists().clear(); }
+
+MOZ_ALWAYS_INLINE js::gc::TenuredCell* js::gc::ArenaLists::allocateFromFreeList(
+ AllocKind thingKind) {
+ return freeLists().allocate(thingKind);
+}
+
+void js::gc::ArenaLists::unmarkPreMarkedFreeCells() {
+ for (auto i : AllAllocKinds()) {
+ freeLists().unmarkPreMarkedFreeCells(i);
+ }
+}
+
+void js::gc::ArenaLists::checkEmptyFreeLists() {
+ MOZ_ASSERT(freeLists().allEmpty());
+}
+
+void js::gc::ArenaLists::checkEmptyArenaLists() {
+#ifdef DEBUG
+ for (auto i : AllAllocKinds()) {
+ checkEmptyArenaList(i);
+ }
+#endif
+}
+
+#endif // gc_ArenaList_inl_h
diff --git a/js/src/gc/ArenaList.h b/js/src/gc/ArenaList.h
new file mode 100644
index 0000000000..4ae4afde71
--- /dev/null
+++ b/js/src/gc/ArenaList.h
@@ -0,0 +1,386 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * GC-internal definitions of ArenaList and associated heap data structures.
+ */
+
+#ifndef gc_ArenaList_h
+#define gc_ArenaList_h
+
+#include "gc/AllocKind.h"
+#include "js/GCAPI.h"
+#include "js/HeapAPI.h"
+#include "js/TypeDecls.h"
+#include "threading/ProtectedData.h"
+
+namespace js {
+
+class Nursery;
+class SliceBudget;
+
+namespace gcstats {
+struct Statistics;
+}
+
+namespace gc {
+
+class Arena;
+class BackgroundUnmarkTask;
+struct FinalizePhase;
+class FreeSpan;
+class TenuredCell;
+class TenuringTracer;
+
+/*
+ * A single segment of a SortedArenaList. Each segment has a head and a tail,
+ * which track the start and end of a segment for O(1) append and concatenation.
+ */
+struct SortedArenaListSegment {
+ Arena* head;
+ Arena** tailp;
+
+ void clear() {
+ head = nullptr;
+ tailp = &head;
+ }
+
+ bool isEmpty() const { return tailp == &head; }
+
+ // Appends |arena| to this segment.
+ inline void append(Arena* arena);
+
+ // Points the tail of this segment at |arena|, which may be null. Note
+ // that this does not change the tail itself, but merely which arena
+ // follows it. This essentially turns the tail into a cursor (see also the
+ // description of ArenaList), but from the perspective of a SortedArenaList
+ // this makes no difference.
+ void linkTo(Arena* arena) { *tailp = arena; }
+};
+
+/*
+ * Arena lists contain a singly linked lists of arenas starting from a head
+ * pointer.
+ *
+ * They also have a cursor, which conceptually lies on arena boundaries,
+ * i.e. before the first arena, between two arenas, or after the last arena.
+ *
+ * Arenas are usually sorted in order of increasing free space, with the cursor
+ * following the Arena currently being allocated from. This ordering should not
+ * be treated as an invariant, however, as the free lists may be cleared,
+ * leaving arenas previously used for allocation partially full. Sorting order
+ * is restored during sweeping.
+ *
+ * Arenas following the cursor should not be full.
+ */
+class ArenaList {
+ // The cursor is implemented via an indirect pointer, |cursorp_|, to allow
+ // for efficient list insertion at the cursor point and other list
+ // manipulations.
+ //
+ // - If the list is empty: |head| is null, |cursorp_| points to |head|, and
+ // therefore |*cursorp_| is null.
+ //
+ // - If the list is not empty: |head| is non-null, and...
+ //
+ // - If the cursor is at the start of the list: |cursorp_| points to
+ // |head|, and therefore |*cursorp_| points to the first arena.
+ //
+ // - If cursor is at the end of the list: |cursorp_| points to the |next|
+ // field of the last arena, and therefore |*cursorp_| is null.
+ //
+ // - If the cursor is at neither the start nor the end of the list:
+ // |cursorp_| points to the |next| field of the arena preceding the
+ // cursor, and therefore |*cursorp_| points to the arena following the
+ // cursor.
+ //
+ // |cursorp_| is never null.
+ //
+ Arena* head_;
+ Arena** cursorp_;
+
+ // Transfers the contents of |other| to this list and clears |other|.
+ inline void moveFrom(ArenaList& other);
+
+ public:
+ inline ArenaList();
+ inline ArenaList(ArenaList&& other);
+ inline ~ArenaList();
+
+ inline ArenaList& operator=(ArenaList&& other);
+
+ // It doesn't make sense for arenas to be present in more than one list, so
+ // list copy operations are not provided.
+ ArenaList(const ArenaList& other) = delete;
+ ArenaList& operator=(const ArenaList& other) = delete;
+
+ inline explicit ArenaList(const SortedArenaListSegment& segment);
+
+ inline void check() const;
+
+ inline void clear();
+ inline bool isEmpty() const;
+
+ // This returns nullptr if the list is empty.
+ inline Arena* head() const;
+
+ inline bool isCursorAtHead() const;
+ inline bool isCursorAtEnd() const;
+
+ // This can return nullptr.
+ inline Arena* arenaAfterCursor() const;
+
+ // This returns the arena after the cursor and moves the cursor past it.
+ inline Arena* takeNextArena();
+
+ // This does two things.
+ // - Inserts |a| at the cursor.
+ // - Leaves the cursor sitting just before |a|, if |a| is not full, or just
+ // after |a|, if |a| is full.
+ inline void insertAtCursor(Arena* a);
+
+ // Inserts |a| at the cursor, then moves the cursor past it.
+ inline void insertBeforeCursor(Arena* a);
+
+ // This inserts the contents of |other|, which must be full, at the cursor of
+ // |this| and clears |other|.
+ inline ArenaList& insertListWithCursorAtEnd(ArenaList& other);
+
+ inline Arena* takeFirstArena();
+
+ Arena* removeRemainingArenas(Arena** arenap);
+ Arena** pickArenasToRelocate(size_t& arenaTotalOut, size_t& relocTotalOut);
+ Arena* relocateArenas(Arena* toRelocate, Arena* relocated,
+ js::SliceBudget& sliceBudget,
+ gcstats::Statistics& stats);
+
+#ifdef DEBUG
+ void dump();
+#endif
+};
+
+/*
+ * A class that holds arenas in sorted order by appending arenas to specific
+ * segments. Each segment has a head and a tail, which can be linked up to
+ * other segments to create a contiguous ArenaList.
+ */
+class SortedArenaList {
+ public:
+ // The minimum size, in bytes, of a GC thing.
+ static const size_t MinThingSize = 16;
+
+ static_assert(ArenaSize <= 4096,
+ "When increasing the Arena size, please consider how"
+ " this will affect the size of a SortedArenaList.");
+
+ static_assert(MinThingSize >= 16,
+ "When decreasing the minimum thing size, please consider"
+ " how this will affect the size of a SortedArenaList.");
+
+ private:
+ // The maximum number of GC things that an arena can hold.
+ static const size_t MaxThingsPerArena =
+ (ArenaSize - ArenaHeaderSize) / MinThingSize;
+
+ size_t thingsPerArena_;
+ SortedArenaListSegment segments[MaxThingsPerArena + 1];
+
+ // Convenience functions to get the nth head and tail.
+ Arena* headAt(size_t n) { return segments[n].head; }
+ Arena** tailAt(size_t n) { return segments[n].tailp; }
+
+ public:
+ inline explicit SortedArenaList(size_t thingsPerArena = MaxThingsPerArena);
+
+ inline void setThingsPerArena(size_t thingsPerArena);
+
+ // Resets the first |thingsPerArena| segments of this list for further use.
+ inline void reset(size_t thingsPerArena = MaxThingsPerArena);
+
+ // Inserts an arena, which has room for |nfree| more things, in its segment.
+ inline void insertAt(Arena* arena, size_t nfree);
+
+ // Remove all empty arenas, inserting them as a linked list.
+ inline void extractEmpty(Arena** empty);
+
+ // Links up the tail of each non-empty segment to the head of the next
+ // non-empty segment, creating a contiguous list that is returned as an
+ // ArenaList. This is not a destructive operation: neither the head nor tail
+ // of any segment is modified. However, note that the Arenas in the
+ // resulting ArenaList should be treated as read-only unless the
+ // SortedArenaList is no longer needed: inserting or removing arenas would
+ // invalidate the SortedArenaList.
+ inline ArenaList toArenaList();
+};
+
+enum class ShouldCheckThresholds {
+ DontCheckThresholds = 0,
+ CheckThresholds = 1
+};
+
+// For each arena kind its free list is represented as the first span with free
+// things. Initially all the spans are initialized as empty. After we find a new
+// arena with available things we move its first free span into the list and set
+// the arena as fully allocated. That way we do not need to update the arena
+// after the initial allocation. When starting the GC we only move the head of
+// the of the list of spans back to the arena only for the arena that was not
+// fully allocated.
+class FreeLists {
+ AllAllocKindArray<FreeSpan*> freeLists_;
+
+ public:
+ // Because the JITs can allocate from the free lists, they cannot be null.
+ // We use a placeholder FreeSpan that is empty (and wihout an associated
+ // Arena) so the JITs can fall back gracefully.
+ static FreeSpan emptySentinel;
+
+ FreeLists();
+
+#ifdef DEBUG
+ inline bool allEmpty() const;
+ inline bool isEmpty(AllocKind kind) const;
+#endif
+
+ inline void clear();
+
+ MOZ_ALWAYS_INLINE TenuredCell* allocate(AllocKind kind);
+
+ inline void* setArenaAndAllocate(Arena* arena, AllocKind kind);
+
+ inline void unmarkPreMarkedFreeCells(AllocKind kind);
+
+ FreeSpan** addressOfFreeList(AllocKind thingKind) {
+ return &freeLists_[thingKind];
+ }
+};
+
+class ArenaLists {
+ enum class ConcurrentUse : uint32_t { None, BackgroundFinalize };
+
+ using ConcurrentUseState =
+ mozilla::Atomic<ConcurrentUse, mozilla::SequentiallyConsistent>;
+
+ JS::Zone* zone_;
+
+ // Whether this structure can be accessed by other threads.
+ UnprotectedData<AllAllocKindArray<ConcurrentUseState>> concurrentUseState_;
+
+ MainThreadData<FreeLists> freeLists_;
+
+ /* The main list of arenas for each alloc kind. */
+ MainThreadOrGCTaskData<AllAllocKindArray<ArenaList>> arenaLists_;
+
+ /*
+ * Arenas which are currently being collected. The collector can move arenas
+ * from arenaLists_ here and back again at various points in collection.
+ */
+ MainThreadOrGCTaskData<AllAllocKindArray<ArenaList>> collectingArenaLists_;
+
+ /* During incremental sweeping, a list of the arenas already swept. */
+ MainThreadOrGCTaskData<AllocKind> incrementalSweptArenaKind;
+ MainThreadOrGCTaskData<ArenaList> incrementalSweptArenas;
+
+ // Arena lists which have yet to be swept, but need additional foreground
+ // processing before they are swept.
+ MainThreadData<Arena*> gcCompactPropMapArenasToUpdate;
+ MainThreadData<Arena*> gcNormalPropMapArenasToUpdate;
+
+ // The list of empty arenas which are collected during the sweep phase and
+ // released at the end of sweeping every sweep group.
+ MainThreadOrGCTaskData<Arena*> savedEmptyArenas;
+
+ public:
+ explicit ArenaLists(JS::Zone* zone);
+ ~ArenaLists();
+
+ FreeLists& freeLists() { return freeLists_.ref(); }
+ const FreeLists& freeLists() const { return freeLists_.ref(); }
+
+ FreeSpan** addressOfFreeList(AllocKind thingKind) {
+ return freeLists_.refNoCheck().addressOfFreeList(thingKind);
+ }
+
+ inline Arena* getFirstArena(AllocKind thingKind) const;
+ inline Arena* getFirstCollectingArena(AllocKind thingKind) const;
+ inline Arena* getFirstSweptArena(AllocKind thingKind) const;
+ inline Arena* getArenaAfterCursor(AllocKind thingKind) const;
+
+ inline bool arenaListsAreEmpty() const;
+
+ inline bool doneBackgroundFinalize(AllocKind kind) const;
+ inline bool needBackgroundFinalizeWait(AllocKind kind) const;
+
+ /* Clear the free lists so we won't try to allocate from swept arenas. */
+ inline void clearFreeLists();
+
+ inline void unmarkPreMarkedFreeCells();
+
+ MOZ_ALWAYS_INLINE TenuredCell* allocateFromFreeList(AllocKind thingKind);
+
+ inline void checkEmptyFreeLists();
+ inline void checkEmptyArenaLists();
+ inline void checkEmptyFreeList(AllocKind kind);
+
+ void checkEmptyArenaList(AllocKind kind);
+
+ bool relocateArenas(Arena*& relocatedListOut, JS::GCReason reason,
+ js::SliceBudget& sliceBudget, gcstats::Statistics& stats);
+
+ void queueForegroundObjectsForSweep(JS::GCContext* gcx);
+ void queueForegroundThingsForSweep();
+
+ Arena* takeSweptEmptyArenas();
+
+ void setIncrementalSweptArenas(AllocKind kind, SortedArenaList& arenas);
+ void clearIncrementalSweptArenas();
+
+ void mergeFinalizedArenas(AllocKind thingKind,
+ SortedArenaList& finalizedArenas);
+
+ void moveArenasToCollectingLists();
+ void mergeArenasFromCollectingLists();
+
+ void checkGCStateNotInUse();
+ void checkSweepStateNotInUse();
+ void checkNoArenasToUpdate();
+ void checkNoArenasToUpdateForKind(AllocKind kind);
+
+ private:
+ ArenaList& arenaList(AllocKind i) { return arenaLists_.ref()[i]; }
+ const ArenaList& arenaList(AllocKind i) const { return arenaLists_.ref()[i]; }
+
+ ArenaList& collectingArenaList(AllocKind i) {
+ return collectingArenaLists_.ref()[i];
+ }
+ const ArenaList& collectingArenaList(AllocKind i) const {
+ return collectingArenaLists_.ref()[i];
+ }
+
+ ConcurrentUseState& concurrentUse(AllocKind i) {
+ return concurrentUseState_.ref()[i];
+ }
+ ConcurrentUse concurrentUse(AllocKind i) const {
+ return concurrentUseState_.ref()[i];
+ }
+
+ inline JSRuntime* runtime();
+ inline JSRuntime* runtimeFromAnyThread();
+
+ void initBackgroundSweep(AllocKind thingKind);
+
+ void* refillFreeListAndAllocate(AllocKind thingKind,
+ ShouldCheckThresholds checkThresholds);
+
+ friend class BackgroundUnmarkTask;
+ friend class GCRuntime;
+ friend class js::Nursery;
+ friend class TenuringTracer;
+};
+
+} /* namespace gc */
+} /* namespace js */
+
+#endif /* gc_ArenaList_h */
diff --git a/js/src/gc/AtomMarking-inl.h b/js/src/gc/AtomMarking-inl.h
new file mode 100644
index 0000000000..9b6e869e72
--- /dev/null
+++ b/js/src/gc/AtomMarking-inl.h
@@ -0,0 +1,96 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "gc/AtomMarking.h"
+
+#include "mozilla/Assertions.h"
+
+#include <type_traits>
+
+#include "vm/JSContext.h"
+#include "vm/StringType.h"
+#include "vm/SymbolType.h"
+
+#include "gc/Heap-inl.h"
+
+namespace js {
+namespace gc {
+
+inline size_t GetAtomBit(TenuredCell* thing) {
+ MOZ_ASSERT(thing->zoneFromAnyThread()->isAtomsZone());
+ Arena* arena = thing->arena();
+ size_t arenaBit = (reinterpret_cast<uintptr_t>(thing) - arena->address()) /
+ CellBytesPerMarkBit;
+ return arena->atomBitmapStart() * JS_BITS_PER_WORD + arenaBit;
+}
+
+template <typename T, bool Fallible>
+MOZ_ALWAYS_INLINE bool AtomMarkingRuntime::inlinedMarkAtomInternal(
+ JSContext* cx, T* thing) {
+ static_assert(std::is_same_v<T, JSAtom> || std::is_same_v<T, JS::Symbol>,
+ "Should only be called with JSAtom* or JS::Symbol* argument");
+
+ MOZ_ASSERT(cx->isMainThreadContext());
+ MOZ_ASSERT(cx->zone());
+ MOZ_ASSERT(!cx->zone()->isAtomsZone());
+
+ MOZ_ASSERT(thing);
+ js::gc::TenuredCell* cell = &thing->asTenured();
+ MOZ_ASSERT(cell->zoneFromAnyThread()->isAtomsZone());
+
+ // This doesn't check for pinned atoms since that might require taking a
+ // lock. This is not required for correctness.
+ if (thing->isPermanentAndMayBeShared()) {
+ return true;
+ }
+
+ size_t bit = GetAtomBit(cell);
+ MOZ_ASSERT(bit / JS_BITS_PER_WORD < allocatedWords);
+
+ if (Fallible) {
+ if (!cx->zone()->markedAtoms().setBitFallible(bit)) {
+ return false;
+ }
+ } else {
+ cx->zone()->markedAtoms().setBit(bit);
+ }
+
+ // Trigger a read barrier on the atom, in case there is an incremental
+ // GC in progress. This is necessary if the atom is being marked
+ // because a reference to it was obtained from another zone which is
+ // not being collected by the incremental GC.
+ ReadBarrier(thing);
+
+ // Children of the thing also need to be marked in the context's zone.
+ // We don't have a JSTracer for this so manually handle the cases in which
+ // an atom can reference other atoms.
+ markChildren(cx, thing);
+
+ return true;
+}
+
+void AtomMarkingRuntime::markChildren(JSContext* cx, JSAtom*) {}
+
+void AtomMarkingRuntime::markChildren(JSContext* cx, JS::Symbol* symbol) {
+ if (JSAtom* description = symbol->description()) {
+ markAtom(cx, description);
+ }
+}
+
+template <typename T>
+MOZ_ALWAYS_INLINE void AtomMarkingRuntime::inlinedMarkAtom(JSContext* cx,
+ T* thing) {
+ MOZ_ALWAYS_TRUE((inlinedMarkAtomInternal<T, false>(cx, thing)));
+}
+
+template <typename T>
+MOZ_ALWAYS_INLINE bool AtomMarkingRuntime::inlinedMarkAtomFallible(
+ JSContext* cx, T* thing) {
+ return inlinedMarkAtomInternal<T, true>(cx, thing);
+}
+
+} // namespace gc
+} // namespace js
diff --git a/js/src/gc/AtomMarking.cpp b/js/src/gc/AtomMarking.cpp
new file mode 100644
index 0000000000..78beb742d0
--- /dev/null
+++ b/js/src/gc/AtomMarking.cpp
@@ -0,0 +1,294 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "gc/AtomMarking-inl.h"
+
+#include <type_traits>
+
+#include "gc/PublicIterators.h"
+
+#include "gc/GC-inl.h"
+#include "gc/Heap-inl.h"
+
+namespace js {
+namespace gc {
+
+// [SMDOC] GC Atom Marking
+//
+// Things in the atoms zone (which includes atomized strings and other things,
+// all of which we will refer to as 'atoms' here) may be pointed to freely by
+// things in other zones. To avoid the need to perform garbage collections of
+// the entire runtime to collect atoms, we compute a separate atom mark bitmap
+// for each zone that is always an overapproximation of the atoms that zone is
+// using. When an atom is not in the mark bitmap for any zone, it can be
+// destroyed.
+//
+// To minimize interference with the rest of the GC, atom marking and sweeping
+// is done by manipulating the mark bitmaps in the chunks used for the atoms.
+// When the atoms zone is being collected, the mark bitmaps for the chunk(s)
+// used by the atoms are updated normally during marking. After marking
+// finishes, the chunk mark bitmaps are translated to a more efficient atom mark
+// bitmap (see below) that is stored on the zones which the GC collected
+// (computeBitmapFromChunkMarkBits). Before sweeping begins, the chunk mark
+// bitmaps are updated with any atoms that might be referenced by zones which
+// weren't collected (markAtomsUsedByUncollectedZones). The GC sweeping will
+// then release all atoms which are not marked by any zone.
+//
+// The representation of atom mark bitmaps is as follows:
+//
+// Each arena in the atoms zone has an atomBitmapStart() value indicating the
+// word index into the bitmap of the first thing in the arena. Each arena uses
+// ArenaBitmapWords of data to store its bitmap, which uses the same
+// representation as chunk mark bitmaps: one bit is allocated per Cell, with
+// bits for space between things being unused when things are larger than a
+// single Cell.
+
+void AtomMarkingRuntime::registerArena(Arena* arena, const AutoLockGC& lock) {
+ MOZ_ASSERT(arena->getThingSize() != 0);
+ MOZ_ASSERT(arena->getThingSize() % CellAlignBytes == 0);
+ MOZ_ASSERT(arena->zone->isAtomsZone());
+
+ // We need to find a range of bits from the atoms bitmap for this arena.
+
+ // Look for a free range of bits compatible with this arena.
+ if (freeArenaIndexes.ref().length()) {
+ arena->atomBitmapStart() = freeArenaIndexes.ref().popCopy();
+ return;
+ }
+
+ // Allocate a range of bits from the end for this arena.
+ arena->atomBitmapStart() = allocatedWords;
+ allocatedWords += ArenaBitmapWords;
+}
+
+void AtomMarkingRuntime::unregisterArena(Arena* arena, const AutoLockGC& lock) {
+ MOZ_ASSERT(arena->zone->isAtomsZone());
+
+ // Leak these atom bits if we run out of memory.
+ (void)freeArenaIndexes.ref().emplaceBack(arena->atomBitmapStart());
+}
+
+bool AtomMarkingRuntime::computeBitmapFromChunkMarkBits(JSRuntime* runtime,
+ DenseBitmap& bitmap) {
+ MOZ_ASSERT(CurrentThreadIsPerformingGC());
+
+ if (!bitmap.ensureSpace(allocatedWords)) {
+ return false;
+ }
+
+ Zone* atomsZone = runtime->unsafeAtomsZone();
+ for (auto thingKind : AllAllocKinds()) {
+ for (ArenaIter aiter(atomsZone, thingKind); !aiter.done(); aiter.next()) {
+ Arena* arena = aiter.get();
+ MarkBitmapWord* chunkWords = arena->chunk()->markBits.arenaBits(arena);
+ bitmap.copyBitsFrom(arena->atomBitmapStart(), ArenaBitmapWords,
+ chunkWords);
+ }
+ }
+
+ return true;
+}
+
+void AtomMarkingRuntime::refineZoneBitmapForCollectedZone(
+ Zone* zone, const DenseBitmap& bitmap) {
+ MOZ_ASSERT(zone->isCollectingFromAnyThread());
+
+ if (zone->isAtomsZone()) {
+ return;
+ }
+
+ // Take the bitwise and between the two mark bitmaps to get the best new
+ // overapproximation we can. |bitmap| might include bits that are not in
+ // the zone's mark bitmap, if additional zones were collected by the GC.
+ zone->markedAtoms().bitwiseAndWith(bitmap);
+}
+
+// Set any bits in the chunk mark bitmaps for atoms which are marked in bitmap.
+template <typename Bitmap>
+static void BitwiseOrIntoChunkMarkBits(JSRuntime* runtime, Bitmap& bitmap) {
+ // Make sure that by copying the mark bits for one arena in word sizes we
+ // do not affect the mark bits for other arenas.
+ static_assert(ArenaBitmapBits == ArenaBitmapWords * JS_BITS_PER_WORD,
+ "ArenaBitmapWords must evenly divide ArenaBitmapBits");
+
+ Zone* atomsZone = runtime->unsafeAtomsZone();
+ for (auto thingKind : AllAllocKinds()) {
+ for (ArenaIter aiter(atomsZone, thingKind); !aiter.done(); aiter.next()) {
+ Arena* arena = aiter.get();
+ MarkBitmapWord* chunkWords = arena->chunk()->markBits.arenaBits(arena);
+ bitmap.bitwiseOrRangeInto(arena->atomBitmapStart(), ArenaBitmapWords,
+ chunkWords);
+ }
+ }
+}
+
+void AtomMarkingRuntime::markAtomsUsedByUncollectedZones(JSRuntime* runtime) {
+ MOZ_ASSERT(CurrentThreadIsPerformingGC());
+
+ // Try to compute a simple union of the zone atom bitmaps before updating
+ // the chunk mark bitmaps. If this allocation fails then fall back to
+ // updating the chunk mark bitmaps separately for each zone.
+ DenseBitmap markedUnion;
+ if (markedUnion.ensureSpace(allocatedWords)) {
+ for (ZonesIter zone(runtime, SkipAtoms); !zone.done(); zone.next()) {
+ // We only need to update the chunk mark bits for zones which were
+ // not collected in the current GC. Atoms which are referenced by
+ // collected zones have already been marked.
+ if (!zone->isCollectingFromAnyThread()) {
+ zone->markedAtoms().bitwiseOrInto(markedUnion);
+ }
+ }
+ BitwiseOrIntoChunkMarkBits(runtime, markedUnion);
+ } else {
+ for (ZonesIter zone(runtime, SkipAtoms); !zone.done(); zone.next()) {
+ if (!zone->isCollectingFromAnyThread()) {
+ BitwiseOrIntoChunkMarkBits(runtime, zone->markedAtoms());
+ }
+ }
+ }
+}
+
+template <typename T>
+void AtomMarkingRuntime::markAtom(JSContext* cx, T* thing) {
+ return inlinedMarkAtom(cx, thing);
+}
+
+template void AtomMarkingRuntime::markAtom(JSContext* cx, JSAtom* thing);
+template void AtomMarkingRuntime::markAtom(JSContext* cx, JS::Symbol* thing);
+
+void AtomMarkingRuntime::markId(JSContext* cx, jsid id) {
+ if (id.isAtom()) {
+ markAtom(cx, id.toAtom());
+ return;
+ }
+ if (id.isSymbol()) {
+ markAtom(cx, id.toSymbol());
+ return;
+ }
+ MOZ_ASSERT(!id.isGCThing());
+}
+
+void AtomMarkingRuntime::markAtomValue(JSContext* cx, const Value& value) {
+ if (value.isString()) {
+ if (value.toString()->isAtom()) {
+ markAtom(cx, &value.toString()->asAtom());
+ }
+ return;
+ }
+ if (value.isSymbol()) {
+ markAtom(cx, value.toSymbol());
+ return;
+ }
+ MOZ_ASSERT_IF(value.isGCThing(), value.isObject() ||
+ value.isPrivateGCThing() ||
+ value.isBigInt());
+}
+
+#ifdef DEBUG
+template <typename T>
+bool AtomMarkingRuntime::atomIsMarked(Zone* zone, T* thing) {
+ static_assert(std::is_same_v<T, JSAtom> || std::is_same_v<T, JS::Symbol>,
+ "Should only be called with JSAtom* or JS::Symbol* argument");
+
+ MOZ_ASSERT(thing);
+ MOZ_ASSERT(!IsInsideNursery(thing));
+ MOZ_ASSERT(thing->zoneFromAnyThread()->isAtomsZone());
+
+ if (!zone->runtimeFromAnyThread()->permanentAtomsPopulated()) {
+ return true;
+ }
+
+ if (thing->isPermanentAndMayBeShared()) {
+ return true;
+ }
+
+ if constexpr (std::is_same_v<T, JSAtom>) {
+ if (thing->isPinned()) {
+ return true;
+ }
+ }
+
+ size_t bit = GetAtomBit(&thing->asTenured());
+ return zone->markedAtoms().readonlyThreadsafeGetBit(bit);
+}
+
+template bool AtomMarkingRuntime::atomIsMarked(Zone* zone, JSAtom* thing);
+template bool AtomMarkingRuntime::atomIsMarked(Zone* zone, JS::Symbol* thing);
+
+template <>
+bool AtomMarkingRuntime::atomIsMarked(Zone* zone, TenuredCell* thing) {
+ if (!thing) {
+ return true;
+ }
+
+ if (thing->is<JSString>()) {
+ JSString* str = thing->as<JSString>();
+ if (!str->isAtom()) {
+ return true;
+ }
+ return atomIsMarked(zone, &str->asAtom());
+ }
+
+ if (thing->is<JS::Symbol>()) {
+ return atomIsMarked(zone, thing->as<JS::Symbol>());
+ }
+
+ return true;
+}
+
+bool AtomMarkingRuntime::idIsMarked(Zone* zone, jsid id) {
+ if (id.isAtom()) {
+ return atomIsMarked(zone, id.toAtom());
+ }
+
+ if (id.isSymbol()) {
+ return atomIsMarked(zone, id.toSymbol());
+ }
+
+ MOZ_ASSERT(!id.isGCThing());
+ return true;
+}
+
+bool AtomMarkingRuntime::valueIsMarked(Zone* zone, const Value& value) {
+ if (value.isString()) {
+ if (value.toString()->isAtom()) {
+ return atomIsMarked(zone, &value.toString()->asAtom());
+ }
+ return true;
+ }
+
+ if (value.isSymbol()) {
+ return atomIsMarked(zone, value.toSymbol());
+ }
+
+ MOZ_ASSERT_IF(value.isGCThing(), value.hasObjectPayload() ||
+ value.isPrivateGCThing() ||
+ value.isBigInt());
+ return true;
+}
+
+#endif // DEBUG
+
+} // namespace gc
+
+#ifdef DEBUG
+
+bool AtomIsMarked(Zone* zone, JSAtom* atom) {
+ return zone->runtimeFromAnyThread()->gc.atomMarking.atomIsMarked(zone, atom);
+}
+
+bool AtomIsMarked(Zone* zone, jsid id) {
+ return zone->runtimeFromAnyThread()->gc.atomMarking.idIsMarked(zone, id);
+}
+
+bool AtomIsMarked(Zone* zone, const Value& value) {
+ return zone->runtimeFromAnyThread()->gc.atomMarking.valueIsMarked(zone,
+ value);
+}
+
+#endif // DEBUG
+
+} // namespace js
diff --git a/js/src/gc/AtomMarking.h b/js/src/gc/AtomMarking.h
new file mode 100644
index 0000000000..e7e97fb389
--- /dev/null
+++ b/js/src/gc/AtomMarking.h
@@ -0,0 +1,86 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef gc_AtomMarking_h
+#define gc_AtomMarking_h
+
+#include "NamespaceImports.h"
+#include "js/Vector.h"
+#include "threading/ProtectedData.h"
+
+namespace js {
+
+class AutoLockGC;
+class DenseBitmap;
+
+namespace gc {
+
+class Arena;
+
+// This class manages state used for marking atoms during GCs.
+// See AtomMarking.cpp for details.
+class AtomMarkingRuntime {
+ // Unused arena atom bitmap indexes. Protected by the GC lock.
+ js::GCLockData<Vector<size_t, 0, SystemAllocPolicy>> freeArenaIndexes;
+
+ inline void markChildren(JSContext* cx, JSAtom*);
+ inline void markChildren(JSContext* cx, JS::Symbol* symbol);
+
+ public:
+ // The extent of all allocated and free words in atom mark bitmaps.
+ // This monotonically increases and may be read from without locking.
+ mozilla::Atomic<size_t, mozilla::SequentiallyConsistent> allocatedWords;
+
+ AtomMarkingRuntime() : allocatedWords(0) {}
+
+ // Mark an arena as holding things in the atoms zone.
+ void registerArena(Arena* arena, const AutoLockGC& lock);
+
+ // Mark an arena as no longer holding things in the atoms zone.
+ void unregisterArena(Arena* arena, const AutoLockGC& lock);
+
+ // Fill |bitmap| with an atom marking bitmap based on the things that are
+ // currently marked in the chunks used by atoms zone arenas. This returns
+ // false on an allocation failure (but does not report an exception).
+ bool computeBitmapFromChunkMarkBits(JSRuntime* runtime, DenseBitmap& bitmap);
+
+ // Update the atom marking bitmap in |zone| according to another
+ // overapproximation of the reachable atoms in |bitmap|.
+ void refineZoneBitmapForCollectedZone(Zone* zone, const DenseBitmap& bitmap);
+
+ // Set any bits in the chunk mark bitmaps for atoms which are marked in any
+ // uncollected zone in the runtime.
+ void markAtomsUsedByUncollectedZones(JSRuntime* runtime);
+
+ // Mark an atom or id as being newly reachable by the context's zone.
+ template <typename T>
+ void markAtom(JSContext* cx, T* thing);
+
+ // Version of markAtom that's always inlined, for performance-sensitive
+ // callers.
+ template <typename T, bool Fallible>
+ MOZ_ALWAYS_INLINE bool inlinedMarkAtomInternal(JSContext* cx, T* thing);
+ template <typename T>
+ MOZ_ALWAYS_INLINE void inlinedMarkAtom(JSContext* cx, T* thing);
+ template <typename T>
+ MOZ_ALWAYS_INLINE bool inlinedMarkAtomFallible(JSContext* cx, T* thing);
+
+ void markId(JSContext* cx, jsid id);
+ void markAtomValue(JSContext* cx, const Value& value);
+
+#ifdef DEBUG
+ // Return whether |thing/id| is in the atom marking bitmap for |zone|.
+ template <typename T>
+ bool atomIsMarked(Zone* zone, T* thing);
+ bool idIsMarked(Zone* zone, jsid id);
+ bool valueIsMarked(Zone* zone, const Value& value);
+#endif
+};
+
+} // namespace gc
+} // namespace js
+
+#endif // gc_AtomMarking_h
diff --git a/js/src/gc/Barrier.cpp b/js/src/gc/Barrier.cpp
new file mode 100644
index 0000000000..c0b9d28798
--- /dev/null
+++ b/js/src/gc/Barrier.cpp
@@ -0,0 +1,144 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "gc/Barrier.h"
+
+#include "gc/Marking.h"
+#include "jit/JitContext.h"
+#include "js/HashTable.h"
+#include "js/shadow/Zone.h" // JS::shadow::Zone
+#include "js/Value.h"
+#include "vm/BigIntType.h" // JS::BigInt
+#include "vm/EnvironmentObject.h"
+#include "vm/GeneratorObject.h"
+#include "vm/JSObject.h"
+#include "vm/PropMap.h"
+#include "wasm/WasmJS.h"
+
+#include "gc/StableCellHasher-inl.h"
+
+namespace js {
+
+bool RuntimeFromMainThreadIsHeapMajorCollecting(JS::shadow::Zone* shadowZone) {
+ MOZ_ASSERT(
+ CurrentThreadCanAccessRuntime(shadowZone->runtimeFromMainThread()));
+ return JS::RuntimeHeapIsMajorCollecting();
+}
+
+#ifdef DEBUG
+
+bool IsMarkedBlack(JSObject* obj) { return obj->isMarkedBlack(); }
+
+bool HeapSlot::preconditionForSet(NativeObject* owner, Kind kind,
+ uint32_t slot) const {
+ if (kind == Slot) {
+ return &owner->getSlotRef(slot) == this;
+ }
+
+ uint32_t numShifted = owner->getElementsHeader()->numShiftedElements();
+ MOZ_ASSERT(slot >= numShifted);
+ return &owner->getDenseElement(slot - numShifted) == (const Value*)this;
+}
+
+void HeapSlot::assertPreconditionForPostWriteBarrier(
+ NativeObject* obj, Kind kind, uint32_t slot, const Value& target) const {
+ if (kind == Slot) {
+ MOZ_ASSERT(obj->getSlotAddressUnchecked(slot)->get() == target);
+ } else {
+ uint32_t numShifted = obj->getElementsHeader()->numShiftedElements();
+ MOZ_ASSERT(slot >= numShifted);
+ MOZ_ASSERT(
+ static_cast<HeapSlot*>(obj->getDenseElements() + (slot - numShifted))
+ ->get() == target);
+ }
+
+ if (!obj->zone()->isGCPreparing()) {
+ AssertTargetIsNotGray(obj);
+ }
+}
+
+bool CurrentThreadIsIonCompiling() {
+ jit::JitContext* jcx = jit::MaybeGetJitContext();
+ return jcx && jcx->inIonBackend();
+}
+
+#endif // DEBUG
+
+#if !MOZ_IS_GCC
+template struct JS_PUBLIC_API StableCellHasher<JSObject*>;
+#endif
+
+} // namespace js
+
+// Post-write barrier, used by the C++ Heap<T> implementation.
+
+JS_PUBLIC_API void JS::HeapObjectPostWriteBarrier(JSObject** objp,
+ JSObject* prev,
+ JSObject* next) {
+ MOZ_ASSERT(objp);
+ js::InternalBarrierMethods<JSObject*>::postBarrier(objp, prev, next);
+}
+
+JS_PUBLIC_API void JS::HeapStringPostWriteBarrier(JSString** strp,
+ JSString* prev,
+ JSString* next) {
+ MOZ_ASSERT(strp);
+ js::InternalBarrierMethods<JSString*>::postBarrier(strp, prev, next);
+}
+
+JS_PUBLIC_API void JS::HeapBigIntPostWriteBarrier(JS::BigInt** bip,
+ JS::BigInt* prev,
+ JS::BigInt* next) {
+ MOZ_ASSERT(bip);
+ js::InternalBarrierMethods<JS::BigInt*>::postBarrier(bip, prev, next);
+}
+
+JS_PUBLIC_API void JS::HeapValuePostWriteBarrier(JS::Value* valuep,
+ const Value& prev,
+ const Value& next) {
+ MOZ_ASSERT(valuep);
+ js::InternalBarrierMethods<JS::Value>::postBarrier(valuep, prev, next);
+}
+
+// Combined pre- and post-write barriers, used by the rust Heap<T>
+// implementation.
+
+JS_PUBLIC_API void JS::HeapObjectWriteBarriers(JSObject** objp, JSObject* prev,
+ JSObject* next) {
+ MOZ_ASSERT(objp);
+ js::InternalBarrierMethods<JSObject*>::preBarrier(prev);
+ js::InternalBarrierMethods<JSObject*>::postBarrier(objp, prev, next);
+}
+
+JS_PUBLIC_API void JS::HeapStringWriteBarriers(JSString** strp, JSString* prev,
+ JSString* next) {
+ MOZ_ASSERT(strp);
+ js::InternalBarrierMethods<JSString*>::preBarrier(prev);
+ js::InternalBarrierMethods<JSString*>::postBarrier(strp, prev, next);
+}
+
+JS_PUBLIC_API void JS::HeapBigIntWriteBarriers(JS::BigInt** bip,
+ JS::BigInt* prev,
+ JS::BigInt* next) {
+ MOZ_ASSERT(bip);
+ js::InternalBarrierMethods<JS::BigInt*>::preBarrier(prev);
+ js::InternalBarrierMethods<JS::BigInt*>::postBarrier(bip, prev, next);
+}
+
+JS_PUBLIC_API void JS::HeapScriptWriteBarriers(JSScript** scriptp,
+ JSScript* prev, JSScript* next) {
+ MOZ_ASSERT(scriptp);
+ js::InternalBarrierMethods<JSScript*>::preBarrier(prev);
+ js::InternalBarrierMethods<JSScript*>::postBarrier(scriptp, prev, next);
+}
+
+JS_PUBLIC_API void JS::HeapValueWriteBarriers(JS::Value* valuep,
+ const Value& prev,
+ const Value& next) {
+ MOZ_ASSERT(valuep);
+ js::InternalBarrierMethods<JS::Value>::preBarrier(prev);
+ js::InternalBarrierMethods<JS::Value>::postBarrier(valuep, prev, next);
+}
diff --git a/js/src/gc/Barrier.h b/js/src/gc/Barrier.h
new file mode 100644
index 0000000000..d4451a60e5
--- /dev/null
+++ b/js/src/gc/Barrier.h
@@ -0,0 +1,1257 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef gc_Barrier_h
+#define gc_Barrier_h
+
+#include <type_traits> // std::true_type
+
+#include "NamespaceImports.h"
+
+#include "gc/Cell.h"
+#include "gc/GCContext.h"
+#include "gc/StoreBuffer.h"
+#include "js/ComparisonOperators.h" // JS::detail::DefineComparisonOps
+#include "js/experimental/TypedData.h" // js::EnableIfABOVType
+#include "js/HeapAPI.h"
+#include "js/Id.h"
+#include "js/RootingAPI.h"
+#include "js/Value.h"
+#include "util/Poison.h"
+
+/*
+ * [SMDOC] GC Barriers
+ *
+ * Several kinds of barrier are necessary to allow the GC to function correctly.
+ * These are triggered by reading or writing to GC pointers in the heap and
+ * serve to tell the collector about changes to the graph of reachable GC
+ * things.
+ *
+ * Since it would be awkward to change every write to memory into a function
+ * call, this file contains a bunch of C++ classes and templates that use
+ * operator overloading to take care of barriers automatically. In most cases,
+ * all that's necessary is to replace:
+ *
+ * Type* field;
+ *
+ * with:
+ *
+ * HeapPtr<Type> field;
+ *
+ * All heap-based GC pointers and tagged pointers must use one of these classes,
+ * except in a couple of exceptional cases.
+ *
+ * These classes are designed to be used by the internals of the JS engine.
+ * Barriers designed to be used externally are provided in js/RootingAPI.h.
+ *
+ * Overview
+ * ========
+ *
+ * This file implements the following concrete classes:
+ *
+ * HeapPtr General wrapper for heap-based pointers that provides pre- and
+ * post-write barriers. Most clients should use this.
+ *
+ * GCPtr An optimisation of HeapPtr for objects which are only destroyed
+ * by GC finalization (this rules out use in Vector, for example).
+ *
+ * PreBarriered Provides a pre-barrier but not a post-barrier. Necessary when
+ * generational GC updates are handled manually, e.g. for hash
+ * table keys that don't use StableCellHasher.
+ *
+ * HeapSlot Provides pre and post-barriers, optimised for use in JSObject
+ * slots and elements.
+ *
+ * WeakHeapPtr Provides read and post-write barriers, for use with weak
+ * pointers.
+ *
+ * UnsafeBarePtr Provides no barriers. Don't add new uses of this, or only if
+ * you really know what you are doing.
+ *
+ * The following classes are implemented in js/RootingAPI.h (in the JS
+ * namespace):
+ *
+ * Heap General wrapper for external clients. Like HeapPtr but also
+ * handles cycle collector concerns. Most external clients should
+ * use this.
+ *
+ * Heap::Tenured Like Heap but doesn't allow nursery pointers. Allows storing
+ * flags in unused lower bits of the pointer.
+ *
+ * Which class to use?
+ * -------------------
+ *
+ * Answer the following questions to decide which barrier class is right for
+ * your use case:
+ *
+ * Is your code part of the JS engine?
+ * Yes, it's internal =>
+ * Is your pointer weak or strong?
+ * Strong =>
+ * Do you want automatic handling of nursery pointers?
+ * Yes, of course =>
+ * Can your object be destroyed outside of a GC?
+ * Yes => Use HeapPtr<T>
+ * No => Use GCPtr<T> (optimization)
+ * No, I'll do this myself =>
+ * Do you want pre-barriers so incremental marking works?
+ * Yes, of course => Use PreBarriered<T>
+ * No, and I'll fix all the bugs myself => Use UnsafeBarePtr<T>
+ * Weak => Use WeakHeapPtr<T>
+ * No, it's external =>
+ * Can your pointer refer to nursery objects?
+ * Yes => Use JS::Heap<T>
+ * Never => Use JS::Heap::Tenured<T> (optimization)
+ *
+ * If in doubt, use HeapPtr<T>.
+ *
+ * Write barriers
+ * ==============
+ *
+ * A write barrier is a mechanism used by incremental or generational GCs to
+ * ensure that every value that needs to be marked is marked. In general, the
+ * write barrier should be invoked whenever a write can cause the set of things
+ * traced through by the GC to change. This includes:
+ *
+ * - writes to object properties
+ * - writes to array slots
+ * - writes to fields like JSObject::shape_ that we trace through
+ * - writes to fields in private data
+ * - writes to non-markable fields like JSObject::private that point to
+ * markable data
+ *
+ * The last category is the trickiest. Even though the private pointer does not
+ * point to a GC thing, changing the private pointer may change the set of
+ * objects that are traced by the GC. Therefore it needs a write barrier.
+ *
+ * Every barriered write should have the following form:
+ *
+ * <pre-barrier>
+ * obj->field = value; // do the actual write
+ * <post-barrier>
+ *
+ * The pre-barrier is used for incremental GC and the post-barrier is for
+ * generational GC.
+ *
+ * Pre-write barrier
+ * -----------------
+ *
+ * To understand the pre-barrier, let's consider how incremental GC works. The
+ * GC itself is divided into "slices". Between each slice, JS code is allowed to
+ * run. Each slice should be short so that the user doesn't notice the
+ * interruptions. In our GC, the structure of the slices is as follows:
+ *
+ * 1. ... JS work, which leads to a request to do GC ...
+ * 2. [first GC slice, which performs all root marking and (maybe) more marking]
+ * 3. ... more JS work is allowed to run ...
+ * 4. [GC mark slice, which runs entirely in
+ * GCRuntime::markUntilBudgetExhausted]
+ * 5. ... more JS work ...
+ * 6. [GC mark slice, which runs entirely in
+ * GCRuntime::markUntilBudgetExhausted]
+ * 7. ... more JS work ...
+ * 8. [GC marking finishes; sweeping done non-incrementally; GC is done]
+ * 9. ... JS continues uninterrupted now that GC is finishes ...
+ *
+ * Of course, there may be a different number of slices depending on how much
+ * marking is to be done.
+ *
+ * The danger inherent in this scheme is that the JS code in steps 3, 5, and 7
+ * might change the heap in a way that causes the GC to collect an object that
+ * is actually reachable. The write barrier prevents this from happening. We use
+ * a variant of incremental GC called "snapshot at the beginning." This approach
+ * guarantees the invariant that if an object is reachable in step 2, then we
+ * will mark it eventually. The name comes from the idea that we take a
+ * theoretical "snapshot" of all reachable objects in step 2; all objects in
+ * that snapshot should eventually be marked. (Note that the write barrier
+ * verifier code takes an actual snapshot.)
+ *
+ * The basic correctness invariant of a snapshot-at-the-beginning collector is
+ * that any object reachable at the end of the GC (step 9) must either:
+ * (1) have been reachable at the beginning (step 2) and thus in the snapshot
+ * (2) or must have been newly allocated, in steps 3, 5, or 7.
+ * To deal with case (2), any objects allocated during an incremental GC are
+ * automatically marked black.
+ *
+ * This strategy is actually somewhat conservative: if an object becomes
+ * unreachable between steps 2 and 8, it would be safe to collect it. We won't,
+ * mainly for simplicity. (Also, note that the snapshot is entirely
+ * theoretical. We don't actually do anything special in step 2 that we wouldn't
+ * do in a non-incremental GC.
+ *
+ * It's the pre-barrier's job to maintain the snapshot invariant. Consider the
+ * write "obj->field = value". Let the prior value of obj->field be
+ * value0. Since it's possible that value0 may have been what obj->field
+ * contained in step 2, when the snapshot was taken, the barrier marks
+ * value0. Note that it only does this if we're in the middle of an incremental
+ * GC. Since this is rare, the cost of the write barrier is usually just an
+ * extra branch.
+ *
+ * In practice, we implement the pre-barrier differently based on the type of
+ * value0. E.g., see JSObject::preWriteBarrier, which is used if obj->field is
+ * a JSObject*. It takes value0 as a parameter.
+ *
+ * Post-write barrier
+ * ------------------
+ *
+ * For generational GC, we want to be able to quickly collect the nursery in a
+ * minor collection. Part of the way this is achieved is to only mark the
+ * nursery itself; tenured things, which may form the majority of the heap, are
+ * not traced through or marked. This leads to the problem of what to do about
+ * tenured objects that have pointers into the nursery: if such things are not
+ * marked, they may be discarded while there are still live objects which
+ * reference them. The solution is to maintain information about these pointers,
+ * and mark their targets when we start a minor collection.
+ *
+ * The pointers can be thought of as edges in an object graph, and the set of
+ * edges from the tenured generation into the nursery is known as the remembered
+ * set. Post barriers are used to track this remembered set.
+ *
+ * Whenever a slot which could contain such a pointer is written, we check
+ * whether the pointed-to thing is in the nursery (if storeBuffer() returns a
+ * buffer). If so we add the cell into the store buffer, which is the
+ * collector's representation of the remembered set. This means that when we
+ * come to do a minor collection we can examine the contents of the store buffer
+ * and mark any edge targets that are in the nursery.
+ *
+ * Read barriers
+ * =============
+ *
+ * Weak pointer read barrier
+ * -------------------------
+ *
+ * Weak pointers must have a read barrier to prevent the referent from being
+ * collected if it is read after the start of an incremental GC.
+ *
+ * The problem happens when, during an incremental GC, some code reads a weak
+ * pointer and writes it somewhere on the heap that has been marked black in a
+ * previous slice. Since the weak pointer will not otherwise be marked and will
+ * be swept and finalized in the last slice, this will leave the pointer just
+ * written dangling after the GC. To solve this, we immediately mark black all
+ * weak pointers that get read between slices so that it is safe to store them
+ * in an already marked part of the heap, e.g. in Rooted.
+ *
+ * Cycle collector read barrier
+ * ----------------------------
+ *
+ * Heap pointers external to the engine may be marked gray. The JS API has an
+ * invariant that no gray pointers may be passed, and this maintained by a read
+ * barrier that calls ExposeGCThingToActiveJS on such pointers. This is
+ * implemented by JS::Heap<T> in js/RootingAPI.h.
+ *
+ * Implementation Details
+ * ======================
+ *
+ * One additional note: not all object writes need to be pre-barriered. Writes
+ * to newly allocated objects do not need a pre-barrier. In these cases, we use
+ * the "obj->field.init(value)" method instead of "obj->field = value". We use
+ * the init naming idiom in many places to signify that a field is being
+ * assigned for the first time.
+ *
+ * This file implements the following hierarchy of classes:
+ *
+ * BarrieredBase base class of all barriers
+ * | |
+ * | WriteBarriered base class which provides common write operations
+ * | | | | |
+ * | | | | PreBarriered provides pre-barriers only
+ * | | | |
+ * | | | GCPtr provides pre- and post-barriers
+ * | | |
+ * | | HeapPtr provides pre- and post-barriers; is relocatable
+ * | | and deletable for use inside C++ managed memory
+ * | |
+ * | HeapSlot similar to GCPtr, but tailored to slots storage
+ * |
+ * ReadBarriered base class which provides common read operations
+ * |
+ * WeakHeapPtr provides read barriers only
+ *
+ *
+ * The implementation of the barrier logic is implemented in the
+ * Cell/TenuredCell base classes, which are called via:
+ *
+ * WriteBarriered<T>::pre
+ * -> InternalBarrierMethods<T*>::preBarrier
+ * -> Cell::preWriteBarrier
+ * -> InternalBarrierMethods<Value>::preBarrier
+ * -> InternalBarrierMethods<jsid>::preBarrier
+ * -> InternalBarrierMethods<T*>::preBarrier
+ * -> Cell::preWriteBarrier
+ *
+ * GCPtr<T>::post and HeapPtr<T>::post
+ * -> InternalBarrierMethods<T*>::postBarrier
+ * -> gc::PostWriteBarrierImpl
+ * -> InternalBarrierMethods<Value>::postBarrier
+ * -> StoreBuffer::put
+ *
+ * Barriers for use outside of the JS engine call into the same barrier
+ * implementations at InternalBarrierMethods<T>::post via an indirect call to
+ * Heap(.+)PostWriteBarrier.
+ *
+ * These clases are designed to be used to wrap GC thing pointers or values that
+ * act like them (i.e. JS::Value and jsid). It is possible to use them for
+ * other types by supplying the necessary barrier implementations but this
+ * is not usually necessary and should be done with caution.
+ */
+
+namespace js {
+
+class NativeObject;
+
+namespace gc {
+
+inline void ValueReadBarrier(const Value& v) {
+ MOZ_ASSERT(v.isGCThing());
+ ReadBarrierImpl(v.toGCThing());
+}
+
+inline void ValuePreWriteBarrier(const Value& v) {
+ MOZ_ASSERT(v.isGCThing());
+ PreWriteBarrierImpl(v.toGCThing());
+}
+
+inline void IdPreWriteBarrier(jsid id) {
+ MOZ_ASSERT(id.isGCThing());
+ PreWriteBarrierImpl(&id.toGCThing()->asTenured());
+}
+
+inline void CellPtrPreWriteBarrier(JS::GCCellPtr thing) {
+ MOZ_ASSERT(thing);
+ PreWriteBarrierImpl(thing.asCell());
+}
+
+} // namespace gc
+
+#ifdef DEBUG
+
+bool CurrentThreadIsTouchingGrayThings();
+
+bool IsMarkedBlack(JSObject* obj);
+
+#endif
+
+template <typename T, typename Enable = void>
+struct InternalBarrierMethods {};
+
+template <typename T>
+struct InternalBarrierMethods<T*> {
+ static_assert(std::is_base_of_v<gc::Cell, T>, "Expected a GC thing type");
+
+ static bool isMarkable(const T* v) { return v != nullptr; }
+
+ static void preBarrier(T* v) { gc::PreWriteBarrier(v); }
+
+ static void postBarrier(T** vp, T* prev, T* next) {
+ gc::PostWriteBarrier(vp, prev, next);
+ }
+
+ static void readBarrier(T* v) { gc::ReadBarrier(v); }
+
+#ifdef DEBUG
+ static void assertThingIsNotGray(T* v) { return T::assertThingIsNotGray(v); }
+#endif
+};
+
+template <>
+struct InternalBarrierMethods<Value> {
+ static bool isMarkable(const Value& v) { return v.isGCThing(); }
+
+ static void preBarrier(const Value& v) {
+ if (v.isGCThing()) {
+ gc::ValuePreWriteBarrier(v);
+ }
+ }
+
+ static MOZ_ALWAYS_INLINE void postBarrier(Value* vp, const Value& prev,
+ const Value& next) {
+ MOZ_ASSERT(!CurrentThreadIsIonCompiling());
+ MOZ_ASSERT(vp);
+
+ // If the target needs an entry, add it.
+ js::gc::StoreBuffer* sb;
+ if (next.isGCThing() && (sb = next.toGCThing()->storeBuffer())) {
+ // If we know that the prev has already inserted an entry, we can
+ // skip doing the lookup to add the new entry. Note that we cannot
+ // safely assert the presence of the entry because it may have been
+ // added via a different store buffer.
+ if (prev.isGCThing() && prev.toGCThing()->storeBuffer()) {
+ return;
+ }
+ sb->putValue(vp);
+ return;
+ }
+ // Remove the prev entry if the new value does not need it.
+ if (prev.isGCThing() && (sb = prev.toGCThing()->storeBuffer())) {
+ sb->unputValue(vp);
+ }
+ }
+
+ static void readBarrier(const Value& v) {
+ if (v.isGCThing()) {
+ gc::ValueReadBarrier(v);
+ }
+ }
+
+#ifdef DEBUG
+ static void assertThingIsNotGray(const Value& v) {
+ JS::AssertValueIsNotGray(v);
+ }
+#endif
+};
+
+template <>
+struct InternalBarrierMethods<jsid> {
+ static bool isMarkable(jsid id) { return id.isGCThing(); }
+ static void preBarrier(jsid id) {
+ if (id.isGCThing()) {
+ gc::IdPreWriteBarrier(id);
+ }
+ }
+ static void postBarrier(jsid* idp, jsid prev, jsid next) {}
+#ifdef DEBUG
+ static void assertThingIsNotGray(jsid id) { JS::AssertIdIsNotGray(id); }
+#endif
+};
+
+// Specialization for JS::ArrayBufferOrView subclasses.
+template <typename T>
+struct InternalBarrierMethods<T, EnableIfABOVType<T>> {
+ using BM = BarrierMethods<T>;
+
+ static bool isMarkable(const T& thing) { return bool(thing); }
+ static void preBarrier(const T& thing) {
+ gc::PreWriteBarrier(thing.asObjectUnbarriered());
+ }
+ static void postBarrier(T* tp, const T& prev, const T& next) {
+ BM::postWriteBarrier(tp, prev, next);
+ }
+ static void readBarrier(const T& thing) { BM::readBarrier(thing); }
+#ifdef DEBUG
+ static void assertThingIsNotGray(const T& thing) {
+ JSObject* obj = thing.asObjectUnbarriered();
+ if (obj) {
+ JS::AssertValueIsNotGray(JS::ObjectValue(*obj));
+ }
+ }
+#endif
+};
+
+template <typename T>
+static inline void AssertTargetIsNotGray(const T& v) {
+#ifdef DEBUG
+ if (!CurrentThreadIsTouchingGrayThings()) {
+ InternalBarrierMethods<T>::assertThingIsNotGray(v);
+ }
+#endif
+}
+
+// Base class of all barrier types.
+//
+// This is marked non-memmovable since post barriers added by derived classes
+// can add pointers to class instances to the store buffer.
+template <typename T>
+class MOZ_NON_MEMMOVABLE BarrieredBase {
+ protected:
+ // BarrieredBase is not directly instantiable.
+ explicit BarrieredBase(const T& v) : value(v) {}
+
+ // BarrieredBase subclasses cannot be copy constructed by default.
+ BarrieredBase(const BarrieredBase<T>& other) = default;
+
+ // Storage for all barrier classes. |value| must be a GC thing reference
+ // type: either a direct pointer to a GC thing or a supported tagged
+ // pointer that can reference GC things, such as JS::Value or jsid. Nested
+ // barrier types are NOT supported. See assertTypeConstraints.
+ T value;
+
+ public:
+ using ElementType = T;
+
+ // Note: this is public because C++ cannot friend to a specific template
+ // instantiation. Friending to the generic template leads to a number of
+ // unintended consequences, including template resolution ambiguity and a
+ // circular dependency with Tracing.h.
+ T* unbarrieredAddress() const { return const_cast<T*>(&value); }
+};
+
+// Base class for barriered pointer types that intercept only writes.
+template <class T>
+class WriteBarriered : public BarrieredBase<T>,
+ public WrappedPtrOperations<T, WriteBarriered<T>> {
+ protected:
+ using BarrieredBase<T>::value;
+
+ // WriteBarriered is not directly instantiable.
+ explicit WriteBarriered(const T& v) : BarrieredBase<T>(v) {}
+
+ public:
+ DECLARE_POINTER_CONSTREF_OPS(T);
+
+ // Use this if the automatic coercion to T isn't working.
+ const T& get() const { return this->value; }
+
+ // Use this if you want to change the value without invoking barriers.
+ // Obviously this is dangerous unless you know the barrier is not needed.
+ void unbarrieredSet(const T& v) { this->value = v; }
+
+ // For users who need to manually barrier the raw types.
+ static void preWriteBarrier(const T& v) {
+ InternalBarrierMethods<T>::preBarrier(v);
+ }
+
+ protected:
+ void pre() { InternalBarrierMethods<T>::preBarrier(this->value); }
+ MOZ_ALWAYS_INLINE void post(const T& prev, const T& next) {
+ InternalBarrierMethods<T>::postBarrier(&this->value, prev, next);
+ }
+};
+
+#define DECLARE_POINTER_ASSIGN_AND_MOVE_OPS(Wrapper, T) \
+ DECLARE_POINTER_ASSIGN_OPS(Wrapper, T) \
+ Wrapper<T>& operator=(Wrapper<T>&& other) { \
+ setUnchecked(other.release()); \
+ return *this; \
+ }
+
+/*
+ * PreBarriered only automatically handles pre-barriers. Post-barriers must be
+ * manually implemented when using this class. GCPtr and HeapPtr should be used
+ * in all cases that do not require explicit low-level control of moving
+ * behavior.
+ *
+ * This class is useful for example for HashMap keys where automatically
+ * updating a moved nursery pointer would break the hash table.
+ */
+template <class T>
+class PreBarriered : public WriteBarriered<T> {
+ public:
+ PreBarriered() : WriteBarriered<T>(JS::SafelyInitialized<T>::create()) {}
+ /*
+ * Allow implicit construction for use in generic contexts.
+ */
+ MOZ_IMPLICIT PreBarriered(const T& v) : WriteBarriered<T>(v) {}
+
+ explicit PreBarriered(const PreBarriered<T>& other)
+ : WriteBarriered<T>(other.value) {}
+
+ PreBarriered(PreBarriered<T>&& other) : WriteBarriered<T>(other.release()) {}
+
+ ~PreBarriered() { this->pre(); }
+
+ void init(const T& v) { this->value = v; }
+
+ /* Use to set the pointer to nullptr. */
+ void clear() { set(JS::SafelyInitialized<T>::create()); }
+
+ DECLARE_POINTER_ASSIGN_AND_MOVE_OPS(PreBarriered, T);
+
+ void set(const T& v) {
+ AssertTargetIsNotGray(v);
+ setUnchecked(v);
+ }
+
+ private:
+ void setUnchecked(const T& v) {
+ this->pre();
+ this->value = v;
+ }
+
+ T release() {
+ T tmp = this->value;
+ this->value = JS::SafelyInitialized<T>::create();
+ return tmp;
+ }
+};
+
+} // namespace js
+
+namespace JS {
+
+namespace detail {
+
+template <typename T>
+struct DefineComparisonOps<js::PreBarriered<T>> : std::true_type {
+ static const T& get(const js::PreBarriered<T>& v) { return v.get(); }
+};
+
+} // namespace detail
+
+} // namespace JS
+
+namespace js {
+
+/*
+ * A pre- and post-barriered heap pointer, for use inside the JS engine.
+ *
+ * It must only be stored in memory that has GC lifetime. GCPtr must not be
+ * used in contexts where it may be implicitly moved or deleted, e.g. most
+ * containers.
+ *
+ * The post-barriers implemented by this class are faster than those
+ * implemented by js::HeapPtr<T> or JS::Heap<T> at the cost of not
+ * automatically handling deletion or movement.
+ */
+template <class T>
+class GCPtr : public WriteBarriered<T> {
+ public:
+ GCPtr() : WriteBarriered<T>(JS::SafelyInitialized<T>::create()) {}
+
+ explicit GCPtr(const T& v) : WriteBarriered<T>(v) {
+ this->post(JS::SafelyInitialized<T>::create(), v);
+ }
+
+ explicit GCPtr(const GCPtr<T>& v) : WriteBarriered<T>(v) {
+ this->post(JS::SafelyInitialized<T>::create(), v);
+ }
+
+#ifdef DEBUG
+ ~GCPtr() {
+ // No barriers are necessary as this only happens when the GC is sweeping.
+ //
+ // If this assertion fails you may need to make the containing object use a
+ // HeapPtr instead, as this can be deleted from outside of GC.
+ MOZ_ASSERT(CurrentThreadIsGCSweeping() || CurrentThreadIsGCFinalizing());
+
+ Poison(this, JS_FREED_HEAP_PTR_PATTERN, sizeof(*this),
+ MemCheckKind::MakeNoAccess);
+ }
+#endif
+
+ void init(const T& v) {
+ AssertTargetIsNotGray(v);
+ this->value = v;
+ this->post(JS::SafelyInitialized<T>::create(), v);
+ }
+
+ DECLARE_POINTER_ASSIGN_OPS(GCPtr, T);
+
+ void set(const T& v) {
+ AssertTargetIsNotGray(v);
+ setUnchecked(v);
+ }
+
+ private:
+ void setUnchecked(const T& v) {
+ this->pre();
+ T tmp = this->value;
+ this->value = v;
+ this->post(tmp, this->value);
+ }
+
+ /*
+ * Unlike HeapPtr<T>, GCPtr<T> must be managed with GC lifetimes.
+ * Specifically, the memory used by the pointer itself must be live until
+ * at least the next minor GC. For that reason, move semantics are invalid
+ * and are deleted here. Please note that not all containers support move
+ * semantics, so this does not completely prevent invalid uses.
+ */
+ GCPtr(GCPtr<T>&&) = delete;
+ GCPtr<T>& operator=(GCPtr<T>&&) = delete;
+};
+
+} // namespace js
+
+namespace JS {
+
+namespace detail {
+
+template <typename T>
+struct DefineComparisonOps<js::GCPtr<T>> : std::true_type {
+ static const T& get(const js::GCPtr<T>& v) { return v.get(); }
+};
+
+} // namespace detail
+
+} // namespace JS
+
+namespace js {
+
+/*
+ * A pre- and post-barriered heap pointer, for use inside the JS engine. These
+ * heap pointers can be stored in C++ containers like GCVector and GCHashMap.
+ *
+ * The GC sometimes keeps pointers to pointers to GC things --- for example, to
+ * track references into the nursery. However, C++ containers like GCVector and
+ * GCHashMap usually reserve the right to relocate their elements any time
+ * they're modified, invalidating all pointers to the elements. HeapPtr
+ * has a move constructor which knows how to keep the GC up to date if it is
+ * moved to a new location.
+ *
+ * However, because of this additional communication with the GC, HeapPtr
+ * is somewhat slower, so it should only be used in contexts where this ability
+ * is necessary.
+ *
+ * Obviously, JSObjects, JSStrings, and the like get tenured and compacted, so
+ * whatever pointers they contain get relocated, in the sense used here.
+ * However, since the GC itself is moving those values, it takes care of its
+ * internal pointers to those pointers itself. HeapPtr is only necessary
+ * when the relocation would otherwise occur without the GC's knowledge.
+ */
+template <class T>
+class HeapPtr : public WriteBarriered<T> {
+ public:
+ HeapPtr() : WriteBarriered<T>(JS::SafelyInitialized<T>::create()) {}
+
+ // Implicitly adding barriers is a reasonable default.
+ MOZ_IMPLICIT HeapPtr(const T& v) : WriteBarriered<T>(v) {
+ this->post(JS::SafelyInitialized<T>::create(), this->value);
+ }
+
+ MOZ_IMPLICIT HeapPtr(const HeapPtr<T>& other) : WriteBarriered<T>(other) {
+ this->post(JS::SafelyInitialized<T>::create(), this->value);
+ }
+
+ HeapPtr(HeapPtr<T>&& other) : WriteBarriered<T>(other.release()) {
+ this->post(JS::SafelyInitialized<T>::create(), this->value);
+ }
+
+ ~HeapPtr() {
+ this->pre();
+ this->post(this->value, JS::SafelyInitialized<T>::create());
+ }
+
+ void init(const T& v) {
+ MOZ_ASSERT(this->value == JS::SafelyInitialized<T>::create());
+ AssertTargetIsNotGray(v);
+ this->value = v;
+ this->post(JS::SafelyInitialized<T>::create(), this->value);
+ }
+
+ DECLARE_POINTER_ASSIGN_AND_MOVE_OPS(HeapPtr, T);
+
+ void set(const T& v) {
+ AssertTargetIsNotGray(v);
+ setUnchecked(v);
+ }
+
+ /* Make this friend so it can access pre() and post(). */
+ template <class T1, class T2>
+ friend inline void BarrieredSetPair(Zone* zone, HeapPtr<T1*>& v1, T1* val1,
+ HeapPtr<T2*>& v2, T2* val2);
+
+ protected:
+ void setUnchecked(const T& v) {
+ this->pre();
+ postBarrieredSet(v);
+ }
+
+ void postBarrieredSet(const T& v) {
+ T tmp = this->value;
+ this->value = v;
+ this->post(tmp, this->value);
+ }
+
+ T release() {
+ T tmp = this->value;
+ postBarrieredSet(JS::SafelyInitialized<T>::create());
+ return tmp;
+ }
+};
+
+/*
+ * A pre-barriered heap pointer, for use inside the JS engine.
+ *
+ * Similar to GCPtr, but used for a pointer to a malloc-allocated structure
+ * containing GC thing pointers.
+ *
+ * It must only be stored in memory that has GC lifetime. It must not be used in
+ * contexts where it may be implicitly moved or deleted, e.g. most containers.
+ *
+ * A post-barrier is unnecessary since malloc-allocated structures cannot be in
+ * the nursery.
+ */
+template <class T>
+class GCStructPtr : public BarrieredBase<T> {
+ public:
+ // This is sometimes used to hold tagged pointers.
+ static constexpr uintptr_t MaxTaggedPointer = 0x2;
+
+ GCStructPtr() : BarrieredBase<T>(JS::SafelyInitialized<T>::create()) {}
+
+ // Implicitly adding barriers is a reasonable default.
+ MOZ_IMPLICIT GCStructPtr(const T& v) : BarrieredBase<T>(v) {}
+
+ GCStructPtr(const GCStructPtr<T>& other) : BarrieredBase<T>(other) {}
+
+ GCStructPtr(GCStructPtr<T>&& other) : BarrieredBase<T>(other.release()) {}
+
+ ~GCStructPtr() {
+ // No barriers are necessary as this only happens when the GC is sweeping.
+ MOZ_ASSERT_IF(isTraceable(),
+ CurrentThreadIsGCSweeping() || CurrentThreadIsGCFinalizing());
+ }
+
+ void init(const T& v) {
+ MOZ_ASSERT(this->get() == JS::SafelyInitialized<T>());
+ AssertTargetIsNotGray(v);
+ this->value = v;
+ }
+
+ void set(JS::Zone* zone, const T& v) {
+ pre(zone);
+ this->value = v;
+ }
+
+ T get() const { return this->value; }
+ operator T() const { return get(); }
+ T operator->() const { return get(); }
+
+ protected:
+ bool isTraceable() const { return uintptr_t(get()) > MaxTaggedPointer; }
+
+ void pre(JS::Zone* zone) {
+ if (isTraceable()) {
+ PreWriteBarrier(zone, get());
+ }
+ }
+};
+
+} // namespace js
+
+namespace JS {
+
+namespace detail {
+
+template <typename T>
+struct DefineComparisonOps<js::HeapPtr<T>> : std::true_type {
+ static const T& get(const js::HeapPtr<T>& v) { return v.get(); }
+};
+
+} // namespace detail
+
+} // namespace JS
+
+namespace js {
+
+// Base class for barriered pointer types that intercept reads and writes.
+template <typename T>
+class ReadBarriered : public BarrieredBase<T> {
+ protected:
+ // ReadBarriered is not directly instantiable.
+ explicit ReadBarriered(const T& v) : BarrieredBase<T>(v) {}
+
+ void read() const { InternalBarrierMethods<T>::readBarrier(this->value); }
+ void post(const T& prev, const T& next) {
+ InternalBarrierMethods<T>::postBarrier(&this->value, prev, next);
+ }
+};
+
+// Incremental GC requires that weak pointers have read barriers. See the block
+// comment at the top of Barrier.h for a complete discussion of why.
+//
+// Note that this class also has post-barriers, so is safe to use with nursery
+// pointers. However, when used as a hashtable key, care must still be taken to
+// insert manual post-barriers on the table for rekeying if the key is based in
+// any way on the address of the object.
+template <typename T>
+class WeakHeapPtr : public ReadBarriered<T>,
+ public WrappedPtrOperations<T, WeakHeapPtr<T>> {
+ protected:
+ using ReadBarriered<T>::value;
+
+ public:
+ WeakHeapPtr() : ReadBarriered<T>(JS::SafelyInitialized<T>::create()) {}
+
+ // It is okay to add barriers implicitly.
+ MOZ_IMPLICIT WeakHeapPtr(const T& v) : ReadBarriered<T>(v) {
+ this->post(JS::SafelyInitialized<T>::create(), v);
+ }
+
+ // The copy constructor creates a new weak edge but the wrapped pointer does
+ // not escape, so no read barrier is necessary.
+ explicit WeakHeapPtr(const WeakHeapPtr& other) : ReadBarriered<T>(other) {
+ this->post(JS::SafelyInitialized<T>::create(), value);
+ }
+
+ // Move retains the lifetime status of the source edge, so does not fire
+ // the read barrier of the defunct edge.
+ WeakHeapPtr(WeakHeapPtr&& other) : ReadBarriered<T>(other.release()) {
+ this->post(JS::SafelyInitialized<T>::create(), value);
+ }
+
+ ~WeakHeapPtr() {
+ this->post(this->value, JS::SafelyInitialized<T>::create());
+ }
+
+ WeakHeapPtr& operator=(const WeakHeapPtr& v) {
+ AssertTargetIsNotGray(v.value);
+ T prior = this->value;
+ this->value = v.value;
+ this->post(prior, v.value);
+ return *this;
+ }
+
+ const T& get() const {
+ if (InternalBarrierMethods<T>::isMarkable(this->value)) {
+ this->read();
+ }
+ return this->value;
+ }
+
+ const T& unbarrieredGet() const { return this->value; }
+
+ explicit operator bool() const { return bool(this->value); }
+
+ operator const T&() const { return get(); }
+
+ const T& operator->() const { return get(); }
+
+ void set(const T& v) {
+ AssertTargetIsNotGray(v);
+ setUnchecked(v);
+ }
+
+ void unbarrieredSet(const T& v) {
+ AssertTargetIsNotGray(v);
+ this->value = v;
+ }
+
+ private:
+ void setUnchecked(const T& v) {
+ T tmp = this->value;
+ this->value = v;
+ this->post(tmp, v);
+ }
+
+ T release() {
+ T tmp = value;
+ set(JS::SafelyInitialized<T>::create());
+ return tmp;
+ }
+};
+
+// A wrapper for a bare pointer, with no barriers.
+//
+// This should only be necessary in a limited number of cases. Please don't add
+// more uses of this if at all possible.
+template <typename T>
+class UnsafeBarePtr : public BarrieredBase<T> {
+ public:
+ UnsafeBarePtr() : BarrieredBase<T>(JS::SafelyInitialized<T>::create()) {}
+ MOZ_IMPLICIT UnsafeBarePtr(T v) : BarrieredBase<T>(v) {}
+ const T& get() const { return this->value; }
+ void set(T newValue) { this->value = newValue; }
+ DECLARE_POINTER_CONSTREF_OPS(T);
+};
+
+} // namespace js
+
+namespace JS {
+
+namespace detail {
+
+template <typename T>
+struct DefineComparisonOps<js::WeakHeapPtr<T>> : std::true_type {
+ static const T& get(const js::WeakHeapPtr<T>& v) {
+ return v.unbarrieredGet();
+ }
+};
+
+} // namespace detail
+
+} // namespace JS
+
+namespace js {
+
+// A pre- and post-barriered Value that is specialized to be aware that it
+// resides in a slots or elements vector. This allows it to be relocated in
+// memory, but with substantially less overhead than a HeapPtr.
+class HeapSlot : public WriteBarriered<Value> {
+ public:
+ enum Kind { Slot = 0, Element = 1 };
+
+ void init(NativeObject* owner, Kind kind, uint32_t slot, const Value& v) {
+ value = v;
+ post(owner, kind, slot, v);
+ }
+
+ void initAsUndefined() { value.setUndefined(); }
+
+ void destroy() { pre(); }
+
+ void setUndefinedUnchecked() {
+ pre();
+ value.setUndefined();
+ }
+
+#ifdef DEBUG
+ bool preconditionForSet(NativeObject* owner, Kind kind, uint32_t slot) const;
+ void assertPreconditionForPostWriteBarrier(NativeObject* obj, Kind kind,
+ uint32_t slot,
+ const Value& target) const;
+#endif
+
+ MOZ_ALWAYS_INLINE void set(NativeObject* owner, Kind kind, uint32_t slot,
+ const Value& v) {
+ MOZ_ASSERT(preconditionForSet(owner, kind, slot));
+ pre();
+ value = v;
+ post(owner, kind, slot, v);
+ }
+
+ private:
+ void post(NativeObject* owner, Kind kind, uint32_t slot,
+ const Value& target) {
+#ifdef DEBUG
+ assertPreconditionForPostWriteBarrier(owner, kind, slot, target);
+#endif
+ if (this->value.isGCThing()) {
+ gc::Cell* cell = this->value.toGCThing();
+ if (cell->storeBuffer()) {
+ cell->storeBuffer()->putSlot(owner, kind, slot, 1);
+ }
+ }
+ }
+};
+
+} // namespace js
+
+namespace JS {
+
+namespace detail {
+
+template <>
+struct DefineComparisonOps<js::HeapSlot> : std::true_type {
+ static const Value& get(const js::HeapSlot& v) { return v.get(); }
+};
+
+} // namespace detail
+
+} // namespace JS
+
+namespace js {
+
+class HeapSlotArray {
+ HeapSlot* array;
+
+ public:
+ explicit HeapSlotArray(HeapSlot* array) : array(array) {}
+
+ HeapSlot* begin() const { return array; }
+
+ operator const Value*() const {
+ static_assert(sizeof(GCPtr<Value>) == sizeof(Value));
+ static_assert(sizeof(HeapSlot) == sizeof(Value));
+ return reinterpret_cast<const Value*>(array);
+ }
+ operator HeapSlot*() const { return begin(); }
+
+ HeapSlotArray operator+(int offset) const {
+ return HeapSlotArray(array + offset);
+ }
+ HeapSlotArray operator+(uint32_t offset) const {
+ return HeapSlotArray(array + offset);
+ }
+};
+
+/*
+ * This is a hack for RegExpStatics::updateFromMatch. It allows us to do two
+ * barriers with only one branch to check if we're in an incremental GC.
+ */
+template <class T1, class T2>
+static inline void BarrieredSetPair(Zone* zone, HeapPtr<T1*>& v1, T1* val1,
+ HeapPtr<T2*>& v2, T2* val2) {
+ AssertTargetIsNotGray(val1);
+ AssertTargetIsNotGray(val2);
+ if (T1::needPreWriteBarrier(zone)) {
+ v1.pre();
+ v2.pre();
+ }
+ v1.postBarrieredSet(val1);
+ v2.postBarrieredSet(val2);
+}
+
+/*
+ * ImmutableTenuredPtr is designed for one very narrow case: replacing
+ * immutable raw pointers to GC-managed things, implicitly converting to a
+ * handle type for ease of use. Pointers encapsulated by this type must:
+ *
+ * be immutable (no incremental write barriers),
+ * never point into the nursery (no generational write barriers), and
+ * be traced via MarkRuntime (we use fromMarkedLocation).
+ *
+ * In short: you *really* need to know what you're doing before you use this
+ * class!
+ */
+template <typename T>
+class MOZ_HEAP_CLASS ImmutableTenuredPtr {
+ T value;
+
+ public:
+ operator T() const { return value; }
+ T operator->() const { return value; }
+
+ // `ImmutableTenuredPtr<T>` is implicitly convertible to `Handle<T>`.
+ //
+ // In case you need to convert to `Handle<U>` where `U` is base class of `T`,
+ // convert this to `Handle<T>` by `toHandle()` and then use implicit
+ // conversion from `Handle<T>` to `Handle<U>`.
+ operator Handle<T>() const { return toHandle(); }
+ Handle<T> toHandle() const { return Handle<T>::fromMarkedLocation(&value); }
+
+ void init(T ptr) {
+ MOZ_ASSERT(ptr->isTenured());
+ AssertTargetIsNotGray(ptr);
+ value = ptr;
+ }
+
+ T get() const { return value; }
+ const T* address() { return &value; }
+};
+
+// Template to remove any barrier wrapper and get the underlying type.
+template <typename T>
+struct RemoveBarrier {
+ using Type = T;
+};
+template <typename T>
+struct RemoveBarrier<HeapPtr<T>> {
+ using Type = T;
+};
+template <typename T>
+struct RemoveBarrier<GCPtr<T>> {
+ using Type = T;
+};
+template <typename T>
+struct RemoveBarrier<PreBarriered<T>> {
+ using Type = T;
+};
+template <typename T>
+struct RemoveBarrier<WeakHeapPtr<T>> {
+ using Type = T;
+};
+
+#if MOZ_IS_GCC
+template struct JS_PUBLIC_API StableCellHasher<JSObject*>;
+#endif
+
+template <typename T>
+struct StableCellHasher<PreBarriered<T>> {
+ using Key = PreBarriered<T>;
+ using Lookup = T;
+
+ static bool maybeGetHash(const Lookup& l, HashNumber* hashOut) {
+ return StableCellHasher<T>::maybeGetHash(l, hashOut);
+ }
+ static bool ensureHash(const Lookup& l, HashNumber* hashOut) {
+ return StableCellHasher<T>::ensureHash(l, hashOut);
+ }
+ static HashNumber hash(const Lookup& l) {
+ return StableCellHasher<T>::hash(l);
+ }
+ static bool match(const Key& k, const Lookup& l) {
+ return StableCellHasher<T>::match(k, l);
+ }
+};
+
+template <typename T>
+struct StableCellHasher<HeapPtr<T>> {
+ using Key = HeapPtr<T>;
+ using Lookup = T;
+
+ static bool maybeGetHash(const Lookup& l, HashNumber* hashOut) {
+ return StableCellHasher<T>::maybeGetHash(l, hashOut);
+ }
+ static bool ensureHash(const Lookup& l, HashNumber* hashOut) {
+ return StableCellHasher<T>::ensureHash(l, hashOut);
+ }
+ static HashNumber hash(const Lookup& l) {
+ return StableCellHasher<T>::hash(l);
+ }
+ static bool match(const Key& k, const Lookup& l) {
+ return StableCellHasher<T>::match(k, l);
+ }
+};
+
+template <typename T>
+struct StableCellHasher<WeakHeapPtr<T>> {
+ using Key = WeakHeapPtr<T>;
+ using Lookup = T;
+
+ static bool maybeGetHash(const Lookup& l, HashNumber* hashOut) {
+ return StableCellHasher<T>::maybeGetHash(l, hashOut);
+ }
+ static bool ensureHash(const Lookup& l, HashNumber* hashOut) {
+ return StableCellHasher<T>::ensureHash(l, hashOut);
+ }
+ static HashNumber hash(const Lookup& l) {
+ return StableCellHasher<T>::hash(l);
+ }
+ static bool match(const Key& k, const Lookup& l) {
+ return StableCellHasher<T>::match(k.unbarrieredGet(), l);
+ }
+};
+
+/* Useful for hashtables with a HeapPtr as key. */
+template <class T>
+struct HeapPtrHasher {
+ using Key = HeapPtr<T>;
+ using Lookup = T;
+
+ static HashNumber hash(Lookup obj) { return DefaultHasher<T>::hash(obj); }
+ static bool match(const Key& k, Lookup l) { return k.get() == l; }
+ static void rekey(Key& k, const Key& newKey) { k.unbarrieredSet(newKey); }
+};
+
+template <class T>
+struct PreBarrieredHasher {
+ using Key = PreBarriered<T>;
+ using Lookup = T;
+
+ static HashNumber hash(Lookup obj) { return DefaultHasher<T>::hash(obj); }
+ static bool match(const Key& k, Lookup l) { return k.get() == l; }
+ static void rekey(Key& k, const Key& newKey) { k.unbarrieredSet(newKey); }
+};
+
+/* Useful for hashtables with a WeakHeapPtr as key. */
+template <class T>
+struct WeakHeapPtrHasher {
+ using Key = WeakHeapPtr<T>;
+ using Lookup = T;
+
+ static HashNumber hash(Lookup obj) { return DefaultHasher<T>::hash(obj); }
+ static bool match(const Key& k, Lookup l) { return k.unbarrieredGet() == l; }
+ static void rekey(Key& k, const Key& newKey) {
+ k.set(newKey.unbarrieredGet());
+ }
+};
+
+template <class T>
+struct UnsafeBarePtrHasher {
+ using Key = UnsafeBarePtr<T>;
+ using Lookup = T;
+
+ static HashNumber hash(const Lookup& l) { return DefaultHasher<T>::hash(l); }
+ static bool match(const Key& k, Lookup l) { return k.get() == l; }
+ static void rekey(Key& k, const Key& newKey) { k.set(newKey.get()); }
+};
+
+} // namespace js
+
+namespace mozilla {
+
+template <class T>
+struct DefaultHasher<js::HeapPtr<T>> : js::HeapPtrHasher<T> {};
+
+template <class T>
+struct DefaultHasher<js::GCPtr<T>> {
+ // Not implemented. GCPtr can't be used as a hash table key because it has a
+ // post barrier but doesn't support relocation.
+};
+
+template <class T>
+struct DefaultHasher<js::PreBarriered<T>> : js::PreBarrieredHasher<T> {};
+
+template <class T>
+struct DefaultHasher<js::WeakHeapPtr<T>> : js::WeakHeapPtrHasher<T> {};
+
+template <class T>
+struct DefaultHasher<js::UnsafeBarePtr<T>> : js::UnsafeBarePtrHasher<T> {};
+
+} // namespace mozilla
+
+#endif /* gc_Barrier_h */
diff --git a/js/src/gc/Cell.h b/js/src/gc/Cell.h
new file mode 100644
index 0000000000..bc2da39b28
--- /dev/null
+++ b/js/src/gc/Cell.h
@@ -0,0 +1,930 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef gc_Cell_h
+#define gc_Cell_h
+
+#include "mozilla/Atomics.h"
+#include "mozilla/EndianUtils.h"
+
+#include <type_traits>
+
+#include "gc/GCContext.h"
+#include "gc/Heap.h"
+#include "gc/TraceKind.h"
+#include "js/GCAnnotations.h"
+#include "js/shadow/Zone.h" // JS::shadow::Zone
+#include "js/TypeDecls.h"
+
+namespace JS {
+enum class TraceKind;
+} /* namespace JS */
+
+namespace js {
+
+class JS_PUBLIC_API GenericPrinter;
+
+extern bool RuntimeFromMainThreadIsHeapMajorCollecting(
+ JS::shadow::Zone* shadowZone);
+
+#ifdef DEBUG
+// Barriers can't be triggered during backend Ion compilation, which may run on
+// a helper thread.
+extern bool CurrentThreadIsIonCompiling();
+#endif
+
+extern void TraceManuallyBarrieredGenericPointerEdge(JSTracer* trc,
+ gc::Cell** thingp,
+ const char* name);
+
+namespace gc {
+
+enum class AllocKind : uint8_t;
+class CellAllocator; // Declared so subtypes of Cell can friend it easily.
+class StoreBuffer;
+class TenuredCell;
+
+extern void PerformIncrementalReadBarrier(TenuredCell* cell);
+extern void PerformIncrementalPreWriteBarrier(TenuredCell* cell);
+extern void PerformIncrementalBarrierDuringFlattening(JSString* str);
+extern void UnmarkGrayGCThingRecursively(TenuredCell* cell);
+
+// Like gc::MarkColor but allows the possibility of the cell being unmarked.
+//
+// This class mimics an enum class, but supports operator overloading.
+class CellColor {
+ public:
+ enum Color { White = 0, Gray = 1, Black = 2 };
+
+ CellColor() : color(White) {}
+
+ MOZ_IMPLICIT CellColor(MarkColor markColor)
+ : color(markColor == MarkColor::Black ? Black : Gray) {}
+
+ MOZ_IMPLICIT constexpr CellColor(Color c) : color(c) {}
+
+ MarkColor asMarkColor() const {
+ MOZ_ASSERT(color != White);
+ return color == Black ? MarkColor::Black : MarkColor::Gray;
+ }
+
+ // Implement a total ordering for CellColor, with white being 'least marked'
+ // and black being 'most marked'.
+ bool operator<(const CellColor other) const { return color < other.color; }
+ bool operator>(const CellColor other) const { return color > other.color; }
+ bool operator<=(const CellColor other) const { return color <= other.color; }
+ bool operator>=(const CellColor other) const { return color >= other.color; }
+ bool operator!=(const CellColor other) const { return color != other.color; }
+ bool operator==(const CellColor other) const { return color == other.color; }
+ explicit operator bool() const { return color != White; }
+
+#if defined(JS_GC_ZEAL) || defined(DEBUG)
+ const char* name() const {
+ switch (color) {
+ case CellColor::White:
+ return "white";
+ case CellColor::Black:
+ return "black";
+ case CellColor::Gray:
+ return "gray";
+ default:
+ MOZ_CRASH("Unexpected cell color");
+ }
+ }
+#endif
+
+ private:
+ Color color;
+};
+
+// Cell header word. Stores GC flags and derived class data.
+//
+// Loads of GC flags + all stores are marked as (relaxed) atomic operations,
+// to deal with the following benign data race during compacting GC:
+//
+// - Thread 1 checks isForwarded (which is always false in this situation).
+// - Thread 2 updates the derived class data (without changing the forwarded
+// flag).
+//
+// To improve performance, we don't use atomic operations for get() because
+// atomic operations inhibit certain compiler optimizations: GCC and Clang are
+// unable to fold multiple loads even if they're both relaxed atomics. This is
+// especially a problem for chained loads such as obj->shape->base->clasp.
+class HeaderWord {
+ // Indicates whether the cell has been forwarded (moved) by generational or
+ // compacting GC and is now a RelocationOverlay.
+ static constexpr uintptr_t FORWARD_BIT = Bit(0);
+ // Bits 1 and 2 are reserved for future use by the GC.
+
+ uintptr_t value_;
+
+ void setAtomic(uintptr_t value) {
+ __atomic_store_n(&value_, value, __ATOMIC_RELAXED);
+ }
+
+ public:
+ static constexpr uintptr_t RESERVED_MASK =
+ BitMask(gc::CellFlagBitsReservedForGC);
+ static_assert(gc::CellFlagBitsReservedForGC >= 3,
+ "Not enough flag bits reserved for GC");
+
+ uintptr_t getAtomic() const {
+ return __atomic_load_n(&value_, __ATOMIC_RELAXED);
+ }
+
+ // Accessors for derived class data.
+ uintptr_t get() const {
+ // Note: non-atomic load. See class comment.
+ uintptr_t value = value_;
+ MOZ_ASSERT((value & RESERVED_MASK) == 0);
+ return value;
+ }
+ void set(uintptr_t value) {
+ MOZ_ASSERT((value & RESERVED_MASK) == 0);
+ setAtomic(value);
+ }
+
+ // Accessors for GC data.
+ uintptr_t flags() const { return getAtomic() & RESERVED_MASK; }
+ bool isForwarded() const { return flags() & FORWARD_BIT; }
+ void setForwardingAddress(uintptr_t ptr) {
+ MOZ_ASSERT((ptr & RESERVED_MASK) == 0);
+ setAtomic(ptr | FORWARD_BIT);
+ }
+ uintptr_t getForwardingAddress() const {
+ MOZ_ASSERT(isForwarded());
+ return getAtomic() & ~RESERVED_MASK;
+ }
+};
+
+// [SMDOC] GC Cell
+//
+// A GC cell is the ultimate base class for all GC things. All types allocated
+// on the GC heap extend either gc::Cell or gc::TenuredCell. If a type is always
+// tenured, prefer the TenuredCell class as base.
+//
+// The first word of Cell is a HeaderWord (a uintptr_t) that reserves the low
+// three bits for GC purposes. The remaining bits are available to sub-classes
+// and can be used store a pointer to another gc::Cell. To make use of the
+// remaining space, sub-classes derive from a helper class such as
+// TenuredCellWithNonGCPointer.
+//
+// During moving GC operation a Cell may be marked as forwarded. This indicates
+// that a gc::RelocationOverlay is currently stored in the Cell's memory and
+// should be used to find the new location of the Cell.
+struct Cell {
+ // Cell header word. Stores GC flags and derived class data.
+ HeaderWord header_;
+
+ public:
+ Cell() = default;
+
+ Cell(const Cell&) = delete;
+ void operator=(const Cell&) = delete;
+
+ bool isForwarded() const { return header_.isForwarded(); }
+ uintptr_t flags() const { return header_.flags(); }
+
+ MOZ_ALWAYS_INLINE bool isTenured() const { return !IsInsideNursery(this); }
+ MOZ_ALWAYS_INLINE const TenuredCell& asTenured() const;
+ MOZ_ALWAYS_INLINE TenuredCell& asTenured();
+
+ MOZ_ALWAYS_INLINE bool isMarkedAny() const;
+ MOZ_ALWAYS_INLINE bool isMarkedBlack() const;
+ MOZ_ALWAYS_INLINE bool isMarkedGray() const;
+ MOZ_ALWAYS_INLINE bool isMarked(gc::MarkColor color) const;
+ MOZ_ALWAYS_INLINE bool isMarkedAtLeast(gc::MarkColor color) const;
+ MOZ_ALWAYS_INLINE CellColor color() const;
+
+ inline JSRuntime* runtimeFromMainThread() const;
+
+ // Note: Unrestricted access to the runtime of a GC thing from an arbitrary
+ // thread can easily lead to races. Use this method very carefully.
+ inline JSRuntime* runtimeFromAnyThread() const;
+
+ // May be overridden by GC thing kinds that have a compartment pointer.
+ inline JS::Compartment* maybeCompartment() const { return nullptr; }
+
+ // The StoreBuffer used to record incoming pointers from the tenured heap.
+ // This will return nullptr for a tenured cell.
+ inline StoreBuffer* storeBuffer() const;
+
+ inline JS::TraceKind getTraceKind() const;
+
+ static MOZ_ALWAYS_INLINE bool needPreWriteBarrier(JS::Zone* zone);
+
+ template <typename T, typename = std::enable_if_t<JS::IsBaseTraceType_v<T>>>
+ inline bool is() const {
+ return getTraceKind() == JS::MapTypeToTraceKind<T>::kind;
+ }
+
+ template <typename T, typename = std::enable_if_t<JS::IsBaseTraceType_v<T>>>
+ inline T* as() {
+ // |this|-qualify the |is| call below to avoid compile errors with even
+ // fairly recent versions of gcc, e.g. 7.1.1 according to bz.
+ MOZ_ASSERT(this->is<T>());
+ return static_cast<T*>(this);
+ }
+
+ template <typename T, typename = std::enable_if_t<JS::IsBaseTraceType_v<T>>>
+ inline const T* as() const {
+ // |this|-qualify the |is| call below to avoid compile errors with even
+ // fairly recent versions of gcc, e.g. 7.1.1 according to bz.
+ MOZ_ASSERT(this->is<T>());
+ return static_cast<const T*>(this);
+ }
+
+ inline JS::Zone* zone() const;
+ inline JS::Zone* zoneFromAnyThread() const;
+
+ // Get the zone for a cell known to be in the nursery.
+ inline JS::Zone* nurseryZone() const;
+ inline JS::Zone* nurseryZoneFromAnyThread() const;
+
+ // Default implementation for kinds that cannot be permanent. This may be
+ // overriden by derived classes.
+ MOZ_ALWAYS_INLINE bool isPermanentAndMayBeShared() const { return false; }
+
+#ifdef DEBUG
+ static inline void assertThingIsNotGray(Cell* cell);
+ inline bool isAligned() const;
+ void dump(GenericPrinter& out) const;
+ void dump() const;
+#endif
+
+ protected:
+ uintptr_t address() const;
+ inline ChunkBase* chunk() const;
+
+ private:
+ // Cells are destroyed by the GC. Do not delete them directly.
+ void operator delete(void*) = delete;
+} JS_HAZ_GC_THING;
+
+// A GC TenuredCell gets behaviors that are valid for things in the Tenured
+// heap, such as access to the arena and mark bits.
+class TenuredCell : public Cell {
+ public:
+ MOZ_ALWAYS_INLINE bool isTenured() const {
+ MOZ_ASSERT(!IsInsideNursery(this));
+ return true;
+ }
+
+ TenuredChunk* chunk() const {
+ return static_cast<TenuredChunk*>(Cell::chunk());
+ }
+
+ // Mark bit management.
+ MOZ_ALWAYS_INLINE bool isMarkedAny() const;
+ MOZ_ALWAYS_INLINE bool isMarkedBlack() const;
+ MOZ_ALWAYS_INLINE bool isMarkedGray() const;
+ MOZ_ALWAYS_INLINE CellColor color() const;
+
+ // The return value indicates if the cell went from unmarked to marked.
+ MOZ_ALWAYS_INLINE bool markIfUnmarked(
+ MarkColor color = MarkColor::Black) const;
+ MOZ_ALWAYS_INLINE bool markIfUnmarkedAtomic(MarkColor color) const;
+ MOZ_ALWAYS_INLINE void markBlack() const;
+ MOZ_ALWAYS_INLINE void markBlackAtomic() const;
+ MOZ_ALWAYS_INLINE void copyMarkBitsFrom(const TenuredCell* src);
+ MOZ_ALWAYS_INLINE void unmark();
+
+ // Access to the arena.
+ inline Arena* arena() const;
+ inline AllocKind getAllocKind() const;
+ inline JS::TraceKind getTraceKind() const;
+ inline JS::Zone* zone() const;
+ inline JS::Zone* zoneFromAnyThread() const;
+ inline bool isInsideZone(JS::Zone* zone) const;
+
+ MOZ_ALWAYS_INLINE JS::shadow::Zone* shadowZone() const {
+ return JS::shadow::Zone::from(zone());
+ }
+ MOZ_ALWAYS_INLINE JS::shadow::Zone* shadowZoneFromAnyThread() const {
+ return JS::shadow::Zone::from(zoneFromAnyThread());
+ }
+
+ template <typename T, typename = std::enable_if_t<JS::IsBaseTraceType_v<T>>>
+ inline bool is() const {
+ return getTraceKind() == JS::MapTypeToTraceKind<T>::kind;
+ }
+
+ template <typename T, typename = std::enable_if_t<JS::IsBaseTraceType_v<T>>>
+ inline T* as() {
+ // |this|-qualify the |is| call below to avoid compile errors with even
+ // fairly recent versions of gcc, e.g. 7.1.1 according to bz.
+ MOZ_ASSERT(this->is<T>());
+ return static_cast<T*>(this);
+ }
+
+ template <typename T, typename = std::enable_if_t<JS::IsBaseTraceType_v<T>>>
+ inline const T* as() const {
+ // |this|-qualify the |is| call below to avoid compile errors with even
+ // fairly recent versions of gcc, e.g. 7.1.1 according to bz.
+ MOZ_ASSERT(this->is<T>());
+ return static_cast<const T*>(this);
+ }
+
+ // Default implementation for kinds that don't require fixup.
+ void fixupAfterMovingGC() {}
+
+ static inline CellColor getColor(MarkBitmap* bitmap, const TenuredCell* cell);
+
+#ifdef DEBUG
+ inline bool isAligned() const;
+#endif
+};
+
+MOZ_ALWAYS_INLINE const TenuredCell& Cell::asTenured() const {
+ MOZ_ASSERT(isTenured());
+ return *static_cast<const TenuredCell*>(this);
+}
+
+MOZ_ALWAYS_INLINE TenuredCell& Cell::asTenured() {
+ MOZ_ASSERT(isTenured());
+ return *static_cast<TenuredCell*>(this);
+}
+
+MOZ_ALWAYS_INLINE bool Cell::isMarkedAny() const {
+ return !isTenured() || asTenured().isMarkedAny();
+}
+
+MOZ_ALWAYS_INLINE bool Cell::isMarkedBlack() const {
+ return !isTenured() || asTenured().isMarkedBlack();
+}
+
+MOZ_ALWAYS_INLINE bool Cell::isMarkedGray() const {
+ return isTenured() && asTenured().isMarkedGray();
+}
+
+MOZ_ALWAYS_INLINE bool Cell::isMarked(gc::MarkColor color) const {
+ return color == MarkColor::Gray ? isMarkedGray() : isMarkedBlack();
+}
+
+MOZ_ALWAYS_INLINE bool Cell::isMarkedAtLeast(gc::MarkColor color) const {
+ return color == MarkColor::Gray ? isMarkedAny() : isMarkedBlack();
+}
+
+MOZ_ALWAYS_INLINE CellColor Cell::color() const {
+ return isTenured() ? asTenured().color() : CellColor::Black;
+}
+
+inline JSRuntime* Cell::runtimeFromMainThread() const {
+ JSRuntime* rt = chunk()->runtime;
+ MOZ_ASSERT(CurrentThreadCanAccessRuntime(rt));
+ return rt;
+}
+
+inline JSRuntime* Cell::runtimeFromAnyThread() const {
+ return chunk()->runtime;
+}
+
+inline uintptr_t Cell::address() const {
+ uintptr_t addr = uintptr_t(this);
+ MOZ_ASSERT(addr % CellAlignBytes == 0);
+ MOZ_ASSERT(TenuredChunk::withinValidRange(addr));
+ return addr;
+}
+
+ChunkBase* Cell::chunk() const {
+ uintptr_t addr = uintptr_t(this);
+ MOZ_ASSERT(addr % CellAlignBytes == 0);
+ addr &= ~ChunkMask;
+ return reinterpret_cast<ChunkBase*>(addr);
+}
+
+inline StoreBuffer* Cell::storeBuffer() const { return chunk()->storeBuffer; }
+
+JS::Zone* Cell::zone() const {
+ if (isTenured()) {
+ return asTenured().zone();
+ }
+
+ return nurseryZone();
+}
+
+JS::Zone* Cell::zoneFromAnyThread() const {
+ if (isTenured()) {
+ return asTenured().zoneFromAnyThread();
+ }
+
+ return nurseryZoneFromAnyThread();
+}
+
+JS::Zone* Cell::nurseryZone() const {
+ JS::Zone* zone = nurseryZoneFromAnyThread();
+ MOZ_ASSERT(CurrentThreadIsGCMarking() || CurrentThreadCanAccessZone(zone));
+ return zone;
+}
+
+JS::Zone* Cell::nurseryZoneFromAnyThread() const {
+ return NurseryCellHeader::from(this)->zone();
+}
+
+#ifdef DEBUG
+extern Cell* UninlinedForwarded(const Cell* cell);
+#endif
+
+inline JS::TraceKind Cell::getTraceKind() const {
+ if (isTenured()) {
+ MOZ_ASSERT_IF(isForwarded(), UninlinedForwarded(this)->getTraceKind() ==
+ asTenured().getTraceKind());
+ return asTenured().getTraceKind();
+ }
+
+ return NurseryCellHeader::from(this)->traceKind();
+}
+
+/* static */ MOZ_ALWAYS_INLINE bool Cell::needPreWriteBarrier(JS::Zone* zone) {
+ return JS::shadow::Zone::from(zone)->needsIncrementalBarrier();
+}
+
+MOZ_ALWAYS_INLINE bool TenuredCell::isMarkedAny() const {
+ MOZ_ASSERT(arena()->allocated());
+ return chunk()->markBits.isMarkedAny(this);
+}
+
+MOZ_ALWAYS_INLINE bool TenuredCell::isMarkedBlack() const {
+ MOZ_ASSERT(arena()->allocated());
+ return chunk()->markBits.isMarkedBlack(this);
+}
+
+MOZ_ALWAYS_INLINE bool TenuredCell::isMarkedGray() const {
+ MOZ_ASSERT(arena()->allocated());
+ return chunk()->markBits.isMarkedGray(this);
+}
+
+MOZ_ALWAYS_INLINE CellColor TenuredCell::color() const {
+ return getColor(&chunk()->markBits, this);
+}
+
+/* static */
+inline CellColor TenuredCell::getColor(MarkBitmap* bitmap,
+ const TenuredCell* cell) {
+ // Note that this method isn't synchronised so may give surprising results if
+ // the mark bitmap is being modified concurrently.
+
+ if (bitmap->isMarkedBlack(cell)) {
+ return CellColor::Black;
+ }
+
+ if (bitmap->isMarkedGray(cell)) {
+ return CellColor::Gray;
+ }
+
+ return CellColor::White;
+}
+
+bool TenuredCell::markIfUnmarked(MarkColor color /* = Black */) const {
+ return chunk()->markBits.markIfUnmarked(this, color);
+}
+
+bool TenuredCell::markIfUnmarkedAtomic(MarkColor color) const {
+ return chunk()->markBits.markIfUnmarkedAtomic(this, color);
+}
+
+void TenuredCell::markBlack() const { chunk()->markBits.markBlack(this); }
+void TenuredCell::markBlackAtomic() const {
+ chunk()->markBits.markBlackAtomic(this);
+}
+
+void TenuredCell::copyMarkBitsFrom(const TenuredCell* src) {
+ MarkBitmap& markBits = chunk()->markBits;
+ markBits.copyMarkBit(this, src, ColorBit::BlackBit);
+ markBits.copyMarkBit(this, src, ColorBit::GrayOrBlackBit);
+}
+
+void TenuredCell::unmark() { chunk()->markBits.unmark(this); }
+
+inline Arena* TenuredCell::arena() const {
+ MOZ_ASSERT(isTenured());
+ uintptr_t addr = address();
+ addr &= ~ArenaMask;
+ return reinterpret_cast<Arena*>(addr);
+}
+
+AllocKind TenuredCell::getAllocKind() const { return arena()->getAllocKind(); }
+
+JS::TraceKind TenuredCell::getTraceKind() const {
+ return MapAllocToTraceKind(getAllocKind());
+}
+
+JS::Zone* TenuredCell::zone() const {
+ JS::Zone* zone = arena()->zone;
+ MOZ_ASSERT(CurrentThreadIsGCMarking() || CurrentThreadCanAccessZone(zone));
+ return zone;
+}
+
+JS::Zone* TenuredCell::zoneFromAnyThread() const { return arena()->zone; }
+
+bool TenuredCell::isInsideZone(JS::Zone* zone) const {
+ return zone == arena()->zone;
+}
+
+// Read barrier and pre-write barrier implementation for GC cells.
+
+template <typename T>
+MOZ_ALWAYS_INLINE void ReadBarrier(T* thing) {
+ static_assert(std::is_base_of_v<Cell, T>);
+ static_assert(!std::is_same_v<Cell, T> && !std::is_same_v<TenuredCell, T>);
+
+ if (thing) {
+ ReadBarrierImpl(thing);
+ }
+}
+
+MOZ_ALWAYS_INLINE void ReadBarrierImpl(TenuredCell* thing) {
+ MOZ_ASSERT(CurrentThreadIsMainThread());
+ MOZ_ASSERT(!JS::RuntimeHeapIsCollecting());
+ MOZ_ASSERT(thing);
+
+ JS::shadow::Zone* shadowZone = thing->shadowZoneFromAnyThread();
+ if (shadowZone->needsIncrementalBarrier()) {
+ PerformIncrementalReadBarrier(thing);
+ return;
+ }
+
+ if (thing->isMarkedGray()) {
+ UnmarkGrayGCThingRecursively(thing);
+ }
+}
+
+MOZ_ALWAYS_INLINE void ReadBarrierImpl(Cell* thing) {
+ MOZ_ASSERT(!CurrentThreadIsGCMarking());
+ MOZ_ASSERT(thing);
+
+ if (thing->isTenured()) {
+ ReadBarrierImpl(&thing->asTenured());
+ }
+}
+
+MOZ_ALWAYS_INLINE void PreWriteBarrierImpl(TenuredCell* thing) {
+ MOZ_ASSERT(CurrentThreadIsMainThread() || CurrentThreadIsGCSweeping() ||
+ CurrentThreadIsGCFinalizing());
+ MOZ_ASSERT(thing);
+
+ // Barriers can be triggered on the main thread while collecting, but are
+ // disabled. For example, this happens when sweeping HeapPtr wrappers. See
+ // AutoDisableBarriers.
+
+ JS::shadow::Zone* zone = thing->shadowZoneFromAnyThread();
+ if (zone->needsIncrementalBarrier()) {
+ PerformIncrementalPreWriteBarrier(thing);
+ }
+}
+
+MOZ_ALWAYS_INLINE void PreWriteBarrierImpl(Cell* thing) {
+ MOZ_ASSERT(!CurrentThreadIsGCMarking());
+ MOZ_ASSERT(thing);
+
+ if (thing->isTenured()) {
+ PreWriteBarrierImpl(&thing->asTenured());
+ }
+}
+
+template <typename T>
+MOZ_ALWAYS_INLINE void PreWriteBarrier(T* thing) {
+ static_assert(std::is_base_of_v<Cell, T>);
+ static_assert(!std::is_same_v<Cell, T> && !std::is_same_v<TenuredCell, T>);
+
+ if (thing) {
+ PreWriteBarrierImpl(thing);
+ }
+}
+
+// Pre-write barrier implementation for structures containing GC cells, taking a
+// functor to trace the structure.
+template <typename T, typename F>
+MOZ_ALWAYS_INLINE void PreWriteBarrier(JS::Zone* zone, T* data,
+ const F& traceFn) {
+ MOZ_ASSERT(data);
+ MOZ_ASSERT(!CurrentThreadIsIonCompiling());
+ MOZ_ASSERT(!CurrentThreadIsGCMarking());
+
+ auto* shadowZone = JS::shadow::Zone::from(zone);
+ if (!shadowZone->needsIncrementalBarrier()) {
+ return;
+ }
+
+ MOZ_ASSERT(CurrentThreadCanAccessRuntime(shadowZone->runtimeFromAnyThread()));
+ MOZ_ASSERT(!RuntimeFromMainThreadIsHeapMajorCollecting(shadowZone));
+
+ traceFn(shadowZone->barrierTracer(), data);
+}
+
+// Pre-write barrier implementation for structures containing GC cells. T must
+// support a |trace| method.
+template <typename T>
+MOZ_ALWAYS_INLINE void PreWriteBarrier(JS::Zone* zone, T* data) {
+ MOZ_ASSERT(data);
+ PreWriteBarrier(zone, data, [](JSTracer* trc, T* data) { data->trace(trc); });
+}
+
+#ifdef DEBUG
+
+/* static */ void Cell::assertThingIsNotGray(Cell* cell) {
+ JS::AssertCellIsNotGray(cell);
+}
+
+bool Cell::isAligned() const {
+ if (!isTenured()) {
+ return true;
+ }
+ return asTenured().isAligned();
+}
+
+bool TenuredCell::isAligned() const {
+ return Arena::isAligned(address(), arena()->getThingSize());
+}
+
+#endif
+
+// Base class for nusery-allocatable GC things that have 32-bit length and
+// 32-bit flags (currently JSString and BigInt).
+//
+// This tries to store both in Cell::header_, but if that isn't large enough the
+// length is stored separately.
+//
+// 32 0
+// ------------------
+// | Length | Flags |
+// ------------------
+//
+// The low bits of the flags word (see CellFlagBitsReservedForGC) are reserved
+// for GC. Derived classes must ensure they don't use these flags for non-GC
+// purposes.
+class alignas(gc::CellAlignBytes) CellWithLengthAndFlags : public Cell {
+#if JS_BITS_PER_WORD == 32
+ // Additional storage for length if |header_| is too small to fit both.
+ uint32_t length_;
+#endif
+
+ protected:
+ uint32_t headerLengthField() const {
+#if JS_BITS_PER_WORD == 32
+ return length_;
+#else
+ return uint32_t(header_.get() >> 32);
+#endif
+ }
+
+ uint32_t headerFlagsField() const { return uint32_t(header_.get()); }
+
+ void setHeaderFlagBit(uint32_t flag) {
+ header_.set(header_.get() | uintptr_t(flag));
+ }
+ void clearHeaderFlagBit(uint32_t flag) {
+ header_.set(header_.get() & ~uintptr_t(flag));
+ }
+ void toggleHeaderFlagBit(uint32_t flag) {
+ header_.set(header_.get() ^ uintptr_t(flag));
+ }
+
+ void setHeaderLengthAndFlags(uint32_t len, uint32_t flags) {
+#if JS_BITS_PER_WORD == 32
+ header_.set(flags);
+ length_ = len;
+#else
+ header_.set((uint64_t(len) << 32) | uint64_t(flags));
+#endif
+ }
+
+ public:
+ // Returns the offset of header_. JIT code should use offsetOfFlags
+ // below.
+ static constexpr size_t offsetOfRawHeaderFlagsField() {
+ return offsetof(CellWithLengthAndFlags, header_);
+ }
+
+ // Offsets for direct field from jit code. A number of places directly
+ // access 32-bit length and flags fields so do endian trickery here.
+#if JS_BITS_PER_WORD == 32
+ static constexpr size_t offsetOfHeaderFlags() {
+ return offsetof(CellWithLengthAndFlags, header_);
+ }
+ static constexpr size_t offsetOfHeaderLength() {
+ return offsetof(CellWithLengthAndFlags, length_);
+ }
+#elif MOZ_LITTLE_ENDIAN()
+ static constexpr size_t offsetOfHeaderFlags() {
+ return offsetof(CellWithLengthAndFlags, header_);
+ }
+ static constexpr size_t offsetOfHeaderLength() {
+ return offsetof(CellWithLengthAndFlags, header_) + sizeof(uint32_t);
+ }
+#else
+ static constexpr size_t offsetOfHeaderFlags() {
+ return offsetof(CellWithLengthAndFlags, header_) + sizeof(uint32_t);
+ }
+ static constexpr size_t offsetOfHeaderLength() {
+ return offsetof(CellWithLengthAndFlags, header_);
+ }
+#endif
+};
+
+// Base class for non-nursery-allocatable GC things that allows storing a non-GC
+// thing pointer in the first word.
+//
+// The low bits of the word (see CellFlagBitsReservedForGC) are reserved for GC.
+template <class PtrT>
+class alignas(gc::CellAlignBytes) TenuredCellWithNonGCPointer
+ : public TenuredCell {
+ static_assert(!std::is_pointer_v<PtrT>,
+ "PtrT should be the type of the referent, not of the pointer");
+ static_assert(
+ !std::is_base_of_v<Cell, PtrT>,
+ "Don't use TenuredCellWithNonGCPointer for pointers to GC things");
+
+ protected:
+ TenuredCellWithNonGCPointer() = default;
+ explicit TenuredCellWithNonGCPointer(PtrT* initial) {
+ uintptr_t data = uintptr_t(initial);
+ header_.set(data);
+ }
+
+ PtrT* headerPtr() const {
+ MOZ_ASSERT(flags() == 0);
+ return reinterpret_cast<PtrT*>(uintptr_t(header_.get()));
+ }
+
+ void setHeaderPtr(PtrT* newValue) {
+ // As above, no flags are expected to be set here.
+ uintptr_t data = uintptr_t(newValue);
+ MOZ_ASSERT(flags() == 0);
+ header_.set(data);
+ }
+
+ public:
+ static constexpr size_t offsetOfHeaderPtr() {
+ return offsetof(TenuredCellWithNonGCPointer, header_);
+ }
+};
+
+// Base class for non-nursery-allocatable GC things that allows storing flags
+// in the first word.
+//
+// The low bits of the flags word (see CellFlagBitsReservedForGC) are reserved
+// for GC.
+class alignas(gc::CellAlignBytes) TenuredCellWithFlags : public TenuredCell {
+ protected:
+ TenuredCellWithFlags() { header_.set(0); }
+ explicit TenuredCellWithFlags(uintptr_t initial) { header_.set(initial); }
+
+ uintptr_t headerFlagsField() const {
+ MOZ_ASSERT(flags() == 0);
+ return header_.get();
+ }
+
+ void setHeaderFlagBits(uintptr_t flags) {
+ header_.set(header_.get() | flags);
+ }
+ void clearHeaderFlagBits(uintptr_t flags) {
+ header_.set(header_.get() & ~flags);
+ }
+};
+
+// Base class for GC things that have a tenured GC pointer as their first word.
+//
+// The low bits of the first word (see CellFlagBitsReservedForGC) are reserved
+// for GC.
+//
+// This includes a pre write barrier when the pointer is update. No post barrier
+// is necessary as the pointer is always tenured.
+template <class BaseCell, class PtrT>
+class alignas(gc::CellAlignBytes) CellWithTenuredGCPointer : public BaseCell {
+ static void staticAsserts() {
+ // These static asserts are not in class scope because the PtrT may not be
+ // defined when this class template is instantiated.
+ static_assert(
+ std::is_same_v<BaseCell, Cell> || std::is_same_v<BaseCell, TenuredCell>,
+ "BaseCell must be either Cell or TenuredCell");
+ static_assert(
+ !std::is_pointer_v<PtrT>,
+ "PtrT should be the type of the referent, not of the pointer");
+ static_assert(
+ std::is_base_of_v<Cell, PtrT>,
+ "Only use CellWithTenuredGCPointer for pointers to GC things");
+ }
+
+ protected:
+ CellWithTenuredGCPointer() = default;
+ explicit CellWithTenuredGCPointer(PtrT* initial) { initHeaderPtr(initial); }
+
+ void initHeaderPtr(PtrT* initial) {
+ MOZ_ASSERT_IF(initial, !IsInsideNursery(initial));
+ uintptr_t data = uintptr_t(initial);
+ this->header_.set(data);
+ }
+
+ void setHeaderPtr(PtrT* newValue) {
+ // As above, no flags are expected to be set here.
+ MOZ_ASSERT_IF(newValue, !IsInsideNursery(newValue));
+ PreWriteBarrier(headerPtr());
+ unbarrieredSetHeaderPtr(newValue);
+ }
+
+ public:
+ PtrT* headerPtr() const {
+ staticAsserts();
+ MOZ_ASSERT(this->flags() == 0);
+ return reinterpret_cast<PtrT*>(uintptr_t(this->header_.get()));
+ }
+ PtrT* headerPtrAtomic() const {
+ staticAsserts();
+ MOZ_ASSERT(this->flags() == 0);
+ return reinterpret_cast<PtrT*>(uintptr_t(this->header_.getAtomic()));
+ }
+
+ void unbarrieredSetHeaderPtr(PtrT* newValue) {
+ uintptr_t data = uintptr_t(newValue);
+ MOZ_ASSERT(this->flags() == 0);
+ this->header_.set(data);
+ }
+
+ static constexpr size_t offsetOfHeaderPtr() {
+ return offsetof(CellWithTenuredGCPointer, header_);
+ }
+};
+
+void CellHeaderPostWriteBarrier(JSObject** ptr, JSObject* prev, JSObject* next);
+
+template <typename T>
+constexpr inline bool GCTypeIsTenured() {
+ static_assert(std::is_base_of_v<Cell, T>);
+ static_assert(!std::is_same_v<Cell, T> && !std::is_same_v<TenuredCell, T>);
+
+ return std::is_base_of_v<TenuredCell, T> || std::is_base_of_v<JSAtom, T>;
+}
+
+template <class PtrT>
+class alignas(gc::CellAlignBytes) TenuredCellWithGCPointer
+ : public TenuredCell {
+ static void staticAsserts() {
+ // These static asserts are not in class scope because the PtrT may not be
+ // defined when this class template is instantiated.
+ static_assert(
+ !std::is_pointer_v<PtrT>,
+ "PtrT should be the type of the referent, not of the pointer");
+ static_assert(
+ std::is_base_of_v<Cell, PtrT>,
+ "Only use TenuredCellWithGCPointer for pointers to GC things");
+ static_assert(
+ !GCTypeIsTenured<PtrT>,
+ "Don't use TenuredCellWithGCPointer for always-tenured GC things");
+ }
+
+ protected:
+ TenuredCellWithGCPointer() = default;
+ explicit TenuredCellWithGCPointer(PtrT* initial) { initHeaderPtr(initial); }
+
+ void initHeaderPtr(PtrT* initial) {
+ uintptr_t data = uintptr_t(initial);
+ this->header_.set(data);
+ if (initial && IsInsideNursery(initial)) {
+ CellHeaderPostWriteBarrier(headerPtrAddress(), nullptr, initial);
+ }
+ }
+
+ PtrT** headerPtrAddress() {
+ MOZ_ASSERT(this->flags() == 0);
+ return reinterpret_cast<PtrT**>(&this->header_);
+ }
+
+ public:
+ PtrT* headerPtr() const {
+ MOZ_ASSERT(this->flags() == 0);
+ return reinterpret_cast<PtrT*>(uintptr_t(this->header_.get()));
+ }
+
+ void unbarrieredSetHeaderPtr(PtrT* newValue) {
+ uintptr_t data = uintptr_t(newValue);
+ MOZ_ASSERT(this->flags() == 0);
+ this->header_.set(data);
+ }
+
+ static constexpr size_t offsetOfHeaderPtr() {
+ return offsetof(TenuredCellWithGCPointer, header_);
+ }
+};
+
+// Check whether a typed GC thing is marked at all. Doesn't check gray bits for
+// kinds that can't be marked gray.
+template <typename T>
+static inline bool TenuredThingIsMarkedAny(T* thing) {
+ using BaseT = typename BaseGCType<T>::type;
+ TenuredCell* cell = &thing->asTenured();
+ if constexpr (TraceKindCanBeGray<BaseT>::value) {
+ return cell->isMarkedAny();
+ } else {
+ MOZ_ASSERT(!cell->isMarkedGray());
+ return cell->isMarkedBlack();
+ }
+}
+
+} /* namespace gc */
+} /* namespace js */
+
+#endif /* gc_Cell_h */
diff --git a/js/src/gc/ClearEdgesTracer.h b/js/src/gc/ClearEdgesTracer.h
new file mode 100644
index 0000000000..3e8f0309a3
--- /dev/null
+++ b/js/src/gc/ClearEdgesTracer.h
@@ -0,0 +1,27 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef gc_ClearEdgesTracer_h
+#define gc_ClearEdgesTracer_h
+
+#include "js/TracingAPI.h"
+
+namespace js {
+namespace gc {
+
+struct ClearEdgesTracer final : public GenericTracerImpl<ClearEdgesTracer> {
+ explicit ClearEdgesTracer(JSRuntime* rt);
+
+ private:
+ template <typename T>
+ void onEdge(T** thingp, const char* name);
+ friend class GenericTracerImpl<ClearEdgesTracer>;
+};
+
+} // namespace gc
+} // namespace js
+
+#endif // gc_ClearEdgesTracer_h
diff --git a/js/src/gc/Compacting.cpp b/js/src/gc/Compacting.cpp
new file mode 100644
index 0000000000..6af0a98860
--- /dev/null
+++ b/js/src/gc/Compacting.cpp
@@ -0,0 +1,967 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * Implementation of compacting GC.
+ */
+
+#include "mozilla/Maybe.h"
+
+#include "debugger/DebugAPI.h"
+#include "gc/ArenaList.h"
+#include "gc/GCInternals.h"
+#include "gc/GCLock.h"
+#include "gc/ParallelWork.h"
+#include "gc/Zone.h"
+#include "jit/JitCode.h"
+#include "jit/JitRuntime.h"
+#include "jit/JitZone.h"
+#include "js/GCAPI.h"
+#include "vm/HelperThreads.h"
+#include "vm/Realm.h"
+#include "wasm/WasmGcObject.h"
+
+#include "gc/Heap-inl.h"
+#include "gc/Marking-inl.h"
+#include "gc/PrivateIterators-inl.h"
+#include "gc/StableCellHasher-inl.h"
+#include "gc/TraceMethods-inl.h"
+#include "vm/GeckoProfiler-inl.h"
+
+using namespace js;
+using namespace js::gc;
+
+using mozilla::Maybe;
+
+bool GCRuntime::canRelocateZone(Zone* zone) const {
+ return !zone->isAtomsZone();
+}
+
+void GCRuntime::beginCompactPhase() {
+ MOZ_ASSERT(!isBackgroundSweeping());
+ assertBackgroundSweepingFinished();
+
+ gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::COMPACT);
+
+ MOZ_ASSERT(zonesToMaybeCompact.ref().isEmpty());
+ for (GCZonesIter zone(this); !zone.done(); zone.next()) {
+ if (canRelocateZone(zone)) {
+ zonesToMaybeCompact.ref().append(zone);
+ }
+ }
+
+ startedCompacting = true;
+ zonesCompacted = 0;
+
+#ifdef DEBUG
+ AutoLockGC lock(this);
+ MOZ_ASSERT(!relocatedArenasToRelease);
+#endif
+}
+
+IncrementalProgress GCRuntime::compactPhase(JS::GCReason reason,
+ SliceBudget& sliceBudget,
+ AutoGCSession& session) {
+ assertBackgroundSweepingFinished();
+ MOZ_ASSERT(startedCompacting);
+
+ AutoMajorGCProfilerEntry s(this);
+ gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::COMPACT);
+
+ // TODO: JSScripts can move. If the sampler interrupts the GC in the
+ // middle of relocating an arena, invalid JSScript pointers may be
+ // accessed. Suppress all sampling until a finer-grained solution can be
+ // found. See bug 1295775.
+ AutoSuppressProfilerSampling suppressSampling(rt->mainContextFromOwnThread());
+
+ ZoneList relocatedZones;
+ Arena* relocatedArenas = nullptr;
+ while (!zonesToMaybeCompact.ref().isEmpty()) {
+ Zone* zone = zonesToMaybeCompact.ref().front();
+ zonesToMaybeCompact.ref().removeFront();
+
+ MOZ_ASSERT(nursery().isEmpty());
+ zone->changeGCState(Zone::Finished, Zone::Compact);
+
+ if (relocateArenas(zone, reason, relocatedArenas, sliceBudget)) {
+ updateZonePointersToRelocatedCells(zone);
+ relocatedZones.append(zone);
+ zonesCompacted++;
+ } else {
+ zone->changeGCState(Zone::Compact, Zone::Finished);
+ }
+
+ if (sliceBudget.isOverBudget()) {
+ break;
+ }
+ }
+
+ if (!relocatedZones.isEmpty()) {
+ updateRuntimePointersToRelocatedCells(session);
+
+ do {
+ Zone* zone = relocatedZones.front();
+ relocatedZones.removeFront();
+ zone->changeGCState(Zone::Compact, Zone::Finished);
+ } while (!relocatedZones.isEmpty());
+ }
+
+ clearRelocatedArenas(relocatedArenas, reason);
+
+#ifdef DEBUG
+ protectOrReleaseRelocatedArenas(relocatedArenas, reason);
+#else
+ releaseRelocatedArenas(relocatedArenas);
+#endif
+
+ // Clear caches that can contain cell pointers.
+ rt->caches().purgeForCompaction();
+
+#ifdef DEBUG
+ checkHashTablesAfterMovingGC();
+#endif
+
+ return zonesToMaybeCompact.ref().isEmpty() ? Finished : NotFinished;
+}
+
+void GCRuntime::endCompactPhase() { startedCompacting = false; }
+
+static bool ShouldRelocateAllArenas(JS::GCReason reason) {
+ return reason == JS::GCReason::DEBUG_GC;
+}
+
+/*
+ * Choose which arenas to relocate all cells from. Return an arena cursor that
+ * can be passed to removeRemainingArenas().
+ */
+Arena** ArenaList::pickArenasToRelocate(size_t& arenaTotalOut,
+ size_t& relocTotalOut) {
+ // Relocate the greatest number of arenas such that the number of used cells
+ // in relocated arenas is less than or equal to the number of free cells in
+ // unrelocated arenas. In other words we only relocate cells we can move
+ // into existing arenas, and we choose the least full areans to relocate.
+ //
+ // This is made easier by the fact that the arena list has been sorted in
+ // descending order of number of used cells, so we will always relocate a
+ // tail of the arena list. All we need to do is find the point at which to
+ // start relocating.
+
+ check();
+
+ if (isCursorAtEnd()) {
+ return nullptr;
+ }
+
+ Arena** arenap = cursorp_; // Next arena to consider for relocation.
+ size_t previousFreeCells = 0; // Count of free cells before arenap.
+ size_t followingUsedCells = 0; // Count of used cells after arenap.
+ size_t fullArenaCount = 0; // Number of full arenas (not relocated).
+ size_t nonFullArenaCount =
+ 0; // Number of non-full arenas (considered for relocation).
+ size_t arenaIndex = 0; // Index of the next arena to consider.
+
+ for (Arena* arena = head_; arena != *cursorp_; arena = arena->next) {
+ fullArenaCount++;
+ }
+
+ for (Arena* arena = *cursorp_; arena; arena = arena->next) {
+ followingUsedCells += arena->countUsedCells();
+ nonFullArenaCount++;
+ }
+
+ mozilla::DebugOnly<size_t> lastFreeCells(0);
+ size_t cellsPerArena = Arena::thingsPerArena((*arenap)->getAllocKind());
+
+ while (*arenap) {
+ Arena* arena = *arenap;
+ if (followingUsedCells <= previousFreeCells) {
+ break;
+ }
+
+ size_t freeCells = arena->countFreeCells();
+ size_t usedCells = cellsPerArena - freeCells;
+ followingUsedCells -= usedCells;
+#ifdef DEBUG
+ MOZ_ASSERT(freeCells >= lastFreeCells);
+ lastFreeCells = freeCells;
+#endif
+ previousFreeCells += freeCells;
+ arenap = &arena->next;
+ arenaIndex++;
+ }
+
+ size_t relocCount = nonFullArenaCount - arenaIndex;
+ MOZ_ASSERT(relocCount < nonFullArenaCount);
+ MOZ_ASSERT((relocCount == 0) == (!*arenap));
+ arenaTotalOut += fullArenaCount + nonFullArenaCount;
+ relocTotalOut += relocCount;
+
+ return arenap;
+}
+
+#ifdef DEBUG
+inline bool PtrIsInRange(const void* ptr, const void* start, size_t length) {
+ return uintptr_t(ptr) - uintptr_t(start) < length;
+}
+#endif
+
+static void RelocateCell(Zone* zone, TenuredCell* src, AllocKind thingKind,
+ size_t thingSize) {
+ JS::AutoSuppressGCAnalysis nogc;
+
+ // Allocate a new cell.
+ MOZ_ASSERT(zone == src->zone());
+ TenuredCell* dst =
+ reinterpret_cast<TenuredCell*>(AllocateCellInGC(zone, thingKind));
+
+ // Copy source cell contents to destination.
+ memcpy(dst, src, thingSize);
+
+ // Move any uid attached to the object.
+ gc::TransferUniqueId(dst, src);
+
+ if (IsObjectAllocKind(thingKind)) {
+ auto* srcObj = static_cast<JSObject*>(static_cast<Cell*>(src));
+ auto* dstObj = static_cast<JSObject*>(static_cast<Cell*>(dst));
+
+ if (srcObj->is<NativeObject>()) {
+ NativeObject* srcNative = &srcObj->as<NativeObject>();
+ NativeObject* dstNative = &dstObj->as<NativeObject>();
+
+ // Fixup the pointer to inline object elements if necessary.
+ if (srcNative->hasFixedElements()) {
+ uint32_t numShifted =
+ srcNative->getElementsHeader()->numShiftedElements();
+ dstNative->setFixedElements(numShifted);
+ }
+ } else if (srcObj->is<ProxyObject>()) {
+ if (srcObj->as<ProxyObject>().usingInlineValueArray()) {
+ dstObj->as<ProxyObject>().setInlineValueArray();
+ }
+ }
+
+ // Call object moved hook if present.
+ if (JSObjectMovedOp op = srcObj->getClass()->extObjectMovedOp()) {
+ op(dstObj, srcObj);
+ }
+
+ MOZ_ASSERT_IF(
+ dstObj->is<NativeObject>(),
+ !PtrIsInRange(
+ (const Value*)dstObj->as<NativeObject>().getDenseElements(), src,
+ thingSize));
+ }
+
+ // Copy the mark bits.
+ dst->copyMarkBitsFrom(src);
+
+ // Poison the source cell contents except for the forwarding flag and pointer
+ // which will be stored in the first word. We can't do this for native object
+ // with fixed elements because this would overwrite the element flags and
+ // these are needed when updating COW elements referred to by other objects.
+#ifdef DEBUG
+ JSObject* srcObj = IsObjectAllocKind(thingKind)
+ ? static_cast<JSObject*>(static_cast<Cell*>(src))
+ : nullptr;
+ if (!srcObj || !srcObj->is<NativeObject>() ||
+ !srcObj->as<NativeObject>().hasFixedElements()) {
+ AlwaysPoison(reinterpret_cast<uint8_t*>(src) + sizeof(uintptr_t),
+ JS_MOVED_TENURED_PATTERN, thingSize - sizeof(uintptr_t),
+ MemCheckKind::MakeNoAccess);
+ }
+#endif
+
+ // Mark source cell as forwarded and leave a pointer to the destination.
+ RelocationOverlay::forwardCell(src, dst);
+}
+
+static void RelocateArena(Arena* arena, SliceBudget& sliceBudget) {
+ MOZ_ASSERT(arena->allocated());
+ MOZ_ASSERT(!arena->onDelayedMarkingList());
+ MOZ_ASSERT(arena->bufferedCells()->isEmpty());
+
+ Zone* zone = arena->zone;
+
+ AllocKind thingKind = arena->getAllocKind();
+ size_t thingSize = arena->getThingSize();
+
+ for (ArenaCellIterUnderGC cell(arena); !cell.done(); cell.next()) {
+ RelocateCell(zone, cell, thingKind, thingSize);
+ sliceBudget.step();
+ }
+
+#ifdef DEBUG
+ for (ArenaCellIterUnderGC cell(arena); !cell.done(); cell.next()) {
+ TenuredCell* src = cell;
+ MOZ_ASSERT(src->isForwarded());
+ TenuredCell* dest = Forwarded(src);
+ MOZ_ASSERT(src->isMarkedBlack() == dest->isMarkedBlack());
+ MOZ_ASSERT(src->isMarkedGray() == dest->isMarkedGray());
+ }
+#endif
+}
+
+/*
+ * Relocate all arenas identified by pickArenasToRelocate: for each arena,
+ * relocate each cell within it, then add it to a list of relocated arenas.
+ */
+Arena* ArenaList::relocateArenas(Arena* toRelocate, Arena* relocated,
+ SliceBudget& sliceBudget,
+ gcstats::Statistics& stats) {
+ check();
+
+ while (Arena* arena = toRelocate) {
+ toRelocate = arena->next;
+ RelocateArena(arena, sliceBudget);
+ // Prepend to list of relocated arenas
+ arena->next = relocated;
+ relocated = arena;
+ stats.count(gcstats::COUNT_ARENA_RELOCATED);
+ }
+
+ check();
+
+ return relocated;
+}
+
+// Skip compacting zones unless we can free a certain proportion of their GC
+// heap memory.
+static const float MIN_ZONE_RECLAIM_PERCENT = 2.0;
+
+static bool ShouldRelocateZone(size_t arenaCount, size_t relocCount,
+ JS::GCReason reason) {
+ if (relocCount == 0) {
+ return false;
+ }
+
+ if (IsOOMReason(reason)) {
+ return true;
+ }
+
+ return (relocCount * 100.0f) / arenaCount >= MIN_ZONE_RECLAIM_PERCENT;
+}
+
+static AllocKinds CompactingAllocKinds() {
+ AllocKinds result;
+ for (AllocKind kind : AllAllocKinds()) {
+ if (IsCompactingKind(kind)) {
+ result += kind;
+ }
+ }
+ return result;
+}
+
+bool ArenaLists::relocateArenas(Arena*& relocatedListOut, JS::GCReason reason,
+ SliceBudget& sliceBudget,
+ gcstats::Statistics& stats) {
+ // This is only called from the main thread while we are doing a GC, so
+ // there is no need to lock.
+ MOZ_ASSERT(CurrentThreadCanAccessRuntime(runtime()));
+ MOZ_ASSERT(runtime()->gc.isHeapCompacting());
+ MOZ_ASSERT(!runtime()->gc.isBackgroundSweeping());
+
+ // Relocate all compatible kinds
+ AllocKinds allocKindsToRelocate = CompactingAllocKinds();
+
+ // Clear all the free lists.
+ clearFreeLists();
+
+ if (ShouldRelocateAllArenas(reason)) {
+ zone_->prepareForCompacting();
+ for (auto kind : allocKindsToRelocate) {
+ ArenaList& al = arenaList(kind);
+ Arena* allArenas = al.head();
+ al.clear();
+ relocatedListOut =
+ al.relocateArenas(allArenas, relocatedListOut, sliceBudget, stats);
+ }
+ } else {
+ size_t arenaCount = 0;
+ size_t relocCount = 0;
+ AllAllocKindArray<Arena**> toRelocate;
+
+ for (auto kind : allocKindsToRelocate) {
+ toRelocate[kind] =
+ arenaList(kind).pickArenasToRelocate(arenaCount, relocCount);
+ }
+
+ if (!ShouldRelocateZone(arenaCount, relocCount, reason)) {
+ return false;
+ }
+
+ zone_->prepareForCompacting();
+ for (auto kind : allocKindsToRelocate) {
+ if (toRelocate[kind]) {
+ ArenaList& al = arenaList(kind);
+ Arena* arenas = al.removeRemainingArenas(toRelocate[kind]);
+ relocatedListOut =
+ al.relocateArenas(arenas, relocatedListOut, sliceBudget, stats);
+ }
+ }
+ }
+
+ return true;
+}
+
+bool GCRuntime::relocateArenas(Zone* zone, JS::GCReason reason,
+ Arena*& relocatedListOut,
+ SliceBudget& sliceBudget) {
+ gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::COMPACT_MOVE);
+
+ MOZ_ASSERT(!zone->isPreservingCode());
+ MOZ_ASSERT(canRelocateZone(zone));
+
+ js::CancelOffThreadIonCompile(rt, JS::Zone::Compact);
+
+ if (!zone->arenas.relocateArenas(relocatedListOut, reason, sliceBudget,
+ stats())) {
+ return false;
+ }
+
+#ifdef DEBUG
+ // Check that we did as much compaction as we should have. There
+ // should always be less than one arena's worth of free cells.
+ for (auto kind : CompactingAllocKinds()) {
+ ArenaList& al = zone->arenas.arenaList(kind);
+ size_t freeCells = 0;
+ for (Arena* arena = al.arenaAfterCursor(); arena; arena = arena->next) {
+ freeCells += arena->countFreeCells();
+ }
+ MOZ_ASSERT(freeCells < Arena::thingsPerArena(kind));
+ }
+#endif
+
+ return true;
+}
+
+MovingTracer::MovingTracer(JSRuntime* rt)
+ : GenericTracerImpl(rt, JS::TracerKind::Moving,
+ JS::WeakMapTraceAction::TraceKeysAndValues) {}
+
+template <typename T>
+inline void MovingTracer::onEdge(T** thingp, const char* name) {
+ T* thing = *thingp;
+ if (thing->runtimeFromAnyThread() == runtime() && IsForwarded(thing)) {
+ *thingp = Forwarded(thing);
+ }
+}
+
+void Zone::prepareForCompacting() {
+ JS::GCContext* gcx = runtimeFromMainThread()->gcContext();
+ discardJitCode(gcx);
+}
+
+void GCRuntime::sweepZoneAfterCompacting(MovingTracer* trc, Zone* zone) {
+ MOZ_ASSERT(zone->isGCCompacting());
+
+ zone->traceWeakMaps(trc);
+
+ traceWeakFinalizationObserverEdges(trc, zone);
+
+ for (auto* cache : zone->weakCaches()) {
+ cache->traceWeak(trc, nullptr);
+ }
+
+ if (jit::JitZone* jitZone = zone->jitZone()) {
+ jitZone->traceWeak(trc);
+ }
+
+ for (CompartmentsInZoneIter c(zone); !c.done(); c.next()) {
+ c->traceWeakNativeIterators(trc);
+
+ for (RealmsInCompartmentIter r(c); !r.done(); r.next()) {
+ r->traceWeakRegExps(trc);
+ r->traceWeakSavedStacks(trc);
+ r->traceWeakGlobalEdge(trc);
+ r->traceWeakDebugEnvironmentEdges(trc);
+ r->traceWeakEdgesInJitRealm(trc);
+ }
+ }
+}
+
+template <typename T>
+static inline void UpdateCellPointers(MovingTracer* trc, T* cell) {
+ // We only update unmoved GC things or the new copy of moved GC things, never
+ // the old copy. If this happened it could clear the forwarded flag which
+ // could lead to pointers to the old copy not being updated.
+ MOZ_ASSERT(!cell->isForwarded());
+
+ cell->fixupAfterMovingGC();
+ cell->traceChildren(trc);
+}
+
+template <typename T>
+static void UpdateArenaPointersTyped(MovingTracer* trc, Arena* arena) {
+ for (ArenaCellIterUnderGC cell(arena); !cell.done(); cell.next()) {
+ UpdateCellPointers(trc, cell.as<T>());
+ }
+}
+
+static bool CanUpdateKindInBackground(AllocKind kind) {
+ // We try to update as many GC things in parallel as we can, but there are
+ // kinds for which this might not be safe:
+ // - we assume JSObjects that are foreground finalized are not safe to
+ // update in parallel
+ // - updating a SharedPropMap touches child maps in
+ // SharedPropMap::fixupAfterMovingGC
+ return js::gc::IsBackgroundFinalized(kind) && !IsShapeAllocKind(kind) &&
+ kind != AllocKind::BASE_SHAPE;
+}
+
+/*
+ * Update the internal pointers for all cells in an arena.
+ */
+static void UpdateArenaPointers(MovingTracer* trc, Arena* arena) {
+ AllocKind kind = arena->getAllocKind();
+
+ MOZ_ASSERT_IF(!CanUpdateKindInBackground(kind),
+ CurrentThreadCanAccessRuntime(trc->runtime()));
+
+ switch (kind) {
+#define EXPAND_CASE(allocKind, traceKind, type, sizedType, bgFinal, nursery, \
+ compact) \
+ case AllocKind::allocKind: \
+ UpdateArenaPointersTyped<type>(trc, arena); \
+ return;
+ FOR_EACH_ALLOCKIND(EXPAND_CASE)
+#undef EXPAND_CASE
+
+ default:
+ MOZ_CRASH("Invalid alloc kind for UpdateArenaPointers");
+ }
+}
+
+struct ArenaListSegment {
+ Arena* begin;
+ Arena* end;
+};
+
+/*
+ * Update the internal pointers for all arenas in a segment of an arena list.
+ *
+ * Returns the number of steps to count against the slice budget.
+ */
+static size_t UpdateArenaListSegmentPointers(GCRuntime* gc,
+ const ArenaListSegment& arenas) {
+ MOZ_ASSERT(arenas.begin);
+ MovingTracer trc(gc->rt);
+ size_t count = 0;
+ for (Arena* arena = arenas.begin; arena != arenas.end; arena = arena->next) {
+ UpdateArenaPointers(&trc, arena);
+ count++;
+ }
+ return count * 256;
+}
+
+class ArenasToUpdate {
+ // Maximum number of arenas to update in one block.
+#ifdef DEBUG
+ static const unsigned MaxArenasToProcess = 16;
+#else
+ static const unsigned MaxArenasToProcess = 256;
+#endif
+
+ public:
+ explicit ArenasToUpdate(Zone* zone);
+ ArenasToUpdate(Zone* zone, const AllocKinds& kinds);
+
+ bool done() const { return !segmentBegin; }
+
+ ArenaListSegment get() const {
+ MOZ_ASSERT(!done());
+ return {segmentBegin, segmentEnd};
+ }
+
+ void next();
+
+ private:
+ Maybe<AllocKinds> kinds; // Selects which thing kinds to update.
+ Zone* zone; // Zone to process.
+ AllocKind kind = AllocKind::FIRST; // Current alloc kind to process.
+ Arena* segmentBegin = nullptr;
+ Arena* segmentEnd = nullptr;
+
+ static AllocKind nextAllocKind(AllocKind i) {
+ return AllocKind(uint8_t(i) + 1);
+ }
+
+ void settle();
+ void findSegmentEnd();
+};
+
+ArenasToUpdate::ArenasToUpdate(Zone* zone) : zone(zone) { settle(); }
+
+ArenasToUpdate::ArenasToUpdate(Zone* zone, const AllocKinds& kinds)
+ : kinds(Some(kinds)), zone(zone) {
+ settle();
+}
+
+void ArenasToUpdate::settle() {
+ // Called when we have set |kind| to a new kind. Sets |arena| to the next
+ // arena or null if there are no more arenas to update.
+
+ MOZ_ASSERT(!segmentBegin);
+
+ for (; kind < AllocKind::LIMIT; kind = nextAllocKind(kind)) {
+ if (kinds && !kinds.ref().contains(kind)) {
+ continue;
+ }
+
+ Arena* arena = zone->arenas.getFirstArena(kind);
+ if (arena) {
+ segmentBegin = arena;
+ findSegmentEnd();
+ break;
+ }
+ }
+}
+
+void ArenasToUpdate::findSegmentEnd() {
+ // Take up to MaxArenasToProcess arenas from the list starting at
+ // |segmentBegin| and set |segmentEnd|.
+ Arena* arena = segmentBegin;
+ for (size_t i = 0; arena && i < MaxArenasToProcess; i++) {
+ arena = arena->next;
+ }
+ segmentEnd = arena;
+}
+
+void ArenasToUpdate::next() {
+ MOZ_ASSERT(!done());
+
+ segmentBegin = segmentEnd;
+ if (segmentBegin) {
+ findSegmentEnd();
+ return;
+ }
+
+ kind = nextAllocKind(kind);
+ settle();
+}
+
+static AllocKinds ForegroundUpdateKinds(AllocKinds kinds) {
+ AllocKinds result;
+ for (AllocKind kind : kinds) {
+ if (!CanUpdateKindInBackground(kind)) {
+ result += kind;
+ }
+ }
+ return result;
+}
+
+void GCRuntime::updateCellPointers(Zone* zone, AllocKinds kinds) {
+ AllocKinds fgKinds = ForegroundUpdateKinds(kinds);
+ AllocKinds bgKinds = kinds - fgKinds;
+
+ ArenasToUpdate fgArenas(zone, fgKinds);
+ ArenasToUpdate bgArenas(zone, bgKinds);
+
+ AutoLockHelperThreadState lock;
+
+ AutoRunParallelWork bgTasks(this, UpdateArenaListSegmentPointers,
+ gcstats::PhaseKind::COMPACT_UPDATE_CELLS,
+ GCUse::Unspecified, bgArenas,
+ SliceBudget::unlimited(), lock);
+
+ AutoUnlockHelperThreadState unlock(lock);
+
+ for (; !fgArenas.done(); fgArenas.next()) {
+ UpdateArenaListSegmentPointers(this, fgArenas.get());
+ }
+}
+
+// After cells have been relocated any pointers to a cell's old locations must
+// be updated to point to the new location. This happens by iterating through
+// all cells in heap and tracing their children (non-recursively) to update
+// them.
+//
+// This is complicated by the fact that updating a GC thing sometimes depends on
+// making use of other GC things. After a moving GC these things may not be in
+// a valid state since they may contain pointers which have not been updated
+// yet.
+//
+// The main dependencies are:
+//
+// - Updating a JSObject makes use of its shape
+// - Updating a typed object makes use of its type descriptor object
+//
+// This means we require at least three phases for update:
+//
+// 1) shapes
+// 2) typed object type descriptor objects
+// 3) all other objects
+//
+// Also, there can be data races calling IsForwarded() on the new location of a
+// cell whose first word is being updated in parallel on another thread. This
+// easiest way to avoid this is to not store a GC pointer in the first word of a
+// cell. Otherwise this can be avoided by updating different kinds of cell in
+// different phases.
+//
+// Since we want to minimize the number of phases, arrange kinds into three
+// arbitrary phases.
+
+static constexpr AllocKinds UpdatePhaseOne{AllocKind::SCRIPT,
+ AllocKind::BASE_SHAPE,
+ AllocKind::SHAPE,
+ AllocKind::STRING,
+ AllocKind::JITCODE,
+ AllocKind::REGEXP_SHARED,
+ AllocKind::SCOPE,
+ AllocKind::GETTER_SETTER,
+ AllocKind::COMPACT_PROP_MAP,
+ AllocKind::NORMAL_PROP_MAP,
+ AllocKind::DICT_PROP_MAP};
+
+// UpdatePhaseTwo is typed object descriptor objects.
+
+static constexpr AllocKinds UpdatePhaseThree{AllocKind::FUNCTION,
+ AllocKind::FUNCTION_EXTENDED,
+ AllocKind::OBJECT0,
+ AllocKind::OBJECT0_BACKGROUND,
+ AllocKind::OBJECT2,
+ AllocKind::OBJECT2_BACKGROUND,
+ AllocKind::ARRAYBUFFER4,
+ AllocKind::OBJECT4,
+ AllocKind::OBJECT4_BACKGROUND,
+ AllocKind::ARRAYBUFFER8,
+ AllocKind::OBJECT8,
+ AllocKind::OBJECT8_BACKGROUND,
+ AllocKind::ARRAYBUFFER12,
+ AllocKind::OBJECT12,
+ AllocKind::OBJECT12_BACKGROUND,
+ AllocKind::ARRAYBUFFER16,
+ AllocKind::OBJECT16,
+ AllocKind::OBJECT16_BACKGROUND};
+
+void GCRuntime::updateAllCellPointers(MovingTracer* trc, Zone* zone) {
+ updateCellPointers(zone, UpdatePhaseOne);
+
+ updateCellPointers(zone, UpdatePhaseThree);
+}
+
+/*
+ * Update pointers to relocated cells in a single zone by doing a traversal of
+ * that zone's arenas and calling per-zone sweep hooks.
+ *
+ * The latter is necessary to update weak references which are not marked as
+ * part of the traversal.
+ */
+void GCRuntime::updateZonePointersToRelocatedCells(Zone* zone) {
+ MOZ_ASSERT(!rt->isBeingDestroyed());
+ MOZ_ASSERT(zone->isGCCompacting());
+
+ AutoTouchingGrayThings tgt;
+
+ gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::COMPACT_UPDATE);
+ MovingTracer trc(rt);
+
+ zone->fixupAfterMovingGC();
+ zone->fixupScriptMapsAfterMovingGC(&trc);
+
+ // Fixup compartment global pointers as these get accessed during marking.
+ for (CompartmentsInZoneIter comp(zone); !comp.done(); comp.next()) {
+ comp->fixupAfterMovingGC(&trc);
+ }
+
+ zone->externalStringCache().purge();
+ zone->functionToStringCache().purge();
+ zone->shapeZone().purgeShapeCaches(rt->gcContext());
+ rt->caches().stringToAtomCache.purge();
+
+ // Iterate through all cells that can contain relocatable pointers to update
+ // them. Since updating each cell is independent we try to parallelize this
+ // as much as possible.
+ updateAllCellPointers(&trc, zone);
+
+ // Sweep everything to fix up weak pointers.
+ sweepZoneAfterCompacting(&trc, zone);
+
+ // Call callbacks to get the rest of the system to fixup other untraced
+ // pointers.
+ for (CompartmentsInZoneIter comp(zone); !comp.done(); comp.next()) {
+ callWeakPointerCompartmentCallbacks(&trc, comp);
+ }
+}
+
+/*
+ * Update runtime-wide pointers to relocated cells.
+ */
+void GCRuntime::updateRuntimePointersToRelocatedCells(AutoGCSession& session) {
+ MOZ_ASSERT(!rt->isBeingDestroyed());
+
+ gcstats::AutoPhase ap1(stats(), gcstats::PhaseKind::COMPACT_UPDATE);
+ MovingTracer trc(rt);
+
+ Zone::fixupAllCrossCompartmentWrappersAfterMovingGC(&trc);
+
+ rt->geckoProfiler().fixupStringsMapAfterMovingGC();
+
+ // Mark roots to update them.
+
+ traceRuntimeForMajorGC(&trc, session);
+
+ {
+ gcstats::AutoPhase ap2(stats(), gcstats::PhaseKind::MARK_ROOTS);
+ DebugAPI::traceAllForMovingGC(&trc);
+ DebugAPI::traceCrossCompartmentEdges(&trc);
+
+ // Mark all gray roots.
+ traceEmbeddingGrayRoots(&trc);
+ Compartment::traceIncomingCrossCompartmentEdgesForZoneGC(
+ &trc, Compartment::GrayEdges);
+ }
+
+ // Sweep everything to fix up weak pointers.
+ jit::JitRuntime::TraceWeakJitcodeGlobalTable(rt, &trc);
+ for (JS::detail::WeakCacheBase* cache : rt->weakCaches()) {
+ cache->traceWeak(&trc, nullptr);
+ }
+
+ if (rt->hasJitRuntime() && rt->jitRuntime()->hasInterpreterEntryMap()) {
+ rt->jitRuntime()->getInterpreterEntryMap()->updateScriptsAfterMovingGC();
+ }
+
+ // Type inference may put more blocks here to free.
+ {
+ AutoLockHelperThreadState lock;
+ lifoBlocksToFree.ref().freeAll();
+ }
+
+ // Call callbacks to get the rest of the system to fixup other untraced
+ // pointers.
+ callWeakPointerZonesCallbacks(&trc);
+}
+
+void GCRuntime::clearRelocatedArenas(Arena* arenaList, JS::GCReason reason) {
+ AutoLockGC lock(this);
+ clearRelocatedArenasWithoutUnlocking(arenaList, reason, lock);
+}
+
+void GCRuntime::clearRelocatedArenasWithoutUnlocking(Arena* arenaList,
+ JS::GCReason reason,
+ const AutoLockGC& lock) {
+ // Clear the relocated arenas, now containing only forwarding pointers
+ while (arenaList) {
+ Arena* arena = arenaList;
+ arenaList = arenaList->next;
+
+ // Clear the mark bits
+ arena->unmarkAll();
+
+ // Mark arena as empty
+ arena->setAsFullyUnused();
+
+#ifdef DEBUG
+ // The cell contents have been partially marked no access in RelocateCell,
+ // so we need to mark the region as undefined again so we can poison it.
+ SetMemCheckKind(reinterpret_cast<void*>(arena->thingsStart()),
+ arena->getThingsSpan(), MemCheckKind::MakeUndefined);
+#endif
+
+ AlwaysPoison(reinterpret_cast<void*>(arena->thingsStart()),
+ JS_MOVED_TENURED_PATTERN, arena->getThingsSpan(),
+ MemCheckKind::MakeNoAccess);
+
+ // Don't count emptied arenas as being freed by the current GC:
+ // - if we purposely moved everything to new arenas, as that will already
+ // have allocated a similar number of arenas. (This only happens for
+ // collections triggered by GC zeal.)
+ // - if they were allocated since the start of the GC.
+ bool allArenasRelocated = ShouldRelocateAllArenas(reason);
+ bool updateRetainedSize = !allArenasRelocated && !arena->isNewlyCreated();
+ arena->zone->gcHeapSize.removeBytes(ArenaSize, updateRetainedSize,
+ heapSize);
+
+ // Release the arena but don't return it to the chunk yet.
+ arena->release(lock);
+ }
+}
+
+#ifdef DEBUG
+
+// In debug mode we don't always release relocated arenas straight away.
+// Sometimes protect them instead and hold onto them until the next GC sweep
+// phase to catch any pointers to them that didn't get forwarded.
+
+static inline bool CanProtectArenas() {
+ // On some systems the page size is larger than the size of an arena so we
+ // can't change the mapping permissions per arena.
+ return SystemPageSize() <= ArenaSize;
+}
+
+static inline bool ShouldProtectRelocatedArenas(JS::GCReason reason) {
+ // For zeal mode collections we don't release the relocated arenas
+ // immediately. Instead we protect them and keep them around until the next
+ // collection so we can catch any stray accesses to them.
+ return reason == JS::GCReason::DEBUG_GC && CanProtectArenas();
+}
+
+void GCRuntime::protectOrReleaseRelocatedArenas(Arena* arenaList,
+ JS::GCReason reason) {
+ if (ShouldProtectRelocatedArenas(reason)) {
+ protectAndHoldArenas(arenaList);
+ return;
+ }
+
+ releaseRelocatedArenas(arenaList);
+}
+
+void GCRuntime::protectAndHoldArenas(Arena* arenaList) {
+ for (Arena* arena = arenaList; arena;) {
+ MOZ_ASSERT(!arena->allocated());
+ Arena* next = arena->next;
+ if (!next) {
+ // Prepend to hold list before we protect the memory.
+ AutoLockGC lock(this);
+ arena->next = relocatedArenasToRelease;
+ relocatedArenasToRelease = arenaList;
+ }
+ ProtectPages(arena, ArenaSize);
+ arena = next;
+ }
+}
+
+void GCRuntime::unprotectHeldRelocatedArenas(const AutoLockGC& lock) {
+ for (Arena* arena = relocatedArenasToRelease; arena; arena = arena->next) {
+ UnprotectPages(arena, ArenaSize);
+ MOZ_ASSERT(!arena->allocated());
+ }
+}
+
+void GCRuntime::releaseHeldRelocatedArenas() {
+ AutoLockGC lock(this);
+ unprotectHeldRelocatedArenas(lock);
+ Arena* arenas = relocatedArenasToRelease;
+ relocatedArenasToRelease = nullptr;
+ releaseRelocatedArenasWithoutUnlocking(arenas, lock);
+}
+
+void GCRuntime::releaseHeldRelocatedArenasWithoutUnlocking(
+ const AutoLockGC& lock) {
+ unprotectHeldRelocatedArenas(lock);
+ releaseRelocatedArenasWithoutUnlocking(relocatedArenasToRelease, lock);
+ relocatedArenasToRelease = nullptr;
+}
+
+#endif
+
+void GCRuntime::releaseRelocatedArenas(Arena* arenaList) {
+ AutoLockGC lock(this);
+ releaseRelocatedArenasWithoutUnlocking(arenaList, lock);
+}
+
+void GCRuntime::releaseRelocatedArenasWithoutUnlocking(Arena* arenaList,
+ const AutoLockGC& lock) {
+ // Release relocated arenas previously cleared with clearRelocatedArenas().
+ while (arenaList) {
+ Arena* arena = arenaList;
+ arenaList = arenaList->next;
+
+ // We already updated the memory accounting so just call
+ // Chunk::releaseArena.
+ arena->chunk()->releaseArena(this, arena, lock);
+ }
+}
diff --git a/js/src/gc/FinalizationObservers.cpp b/js/src/gc/FinalizationObservers.cpp
new file mode 100644
index 0000000000..3a7a114645
--- /dev/null
+++ b/js/src/gc/FinalizationObservers.cpp
@@ -0,0 +1,509 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * GC support for FinalizationRegistry and WeakRef objects.
+ */
+
+#include "gc/FinalizationObservers.h"
+
+#include "mozilla/ScopeExit.h"
+
+#include "builtin/FinalizationRegistryObject.h"
+#include "builtin/WeakRefObject.h"
+#include "gc/GCRuntime.h"
+#include "gc/Zone.h"
+#include "vm/JSContext.h"
+
+#include "gc/WeakMap-inl.h"
+#include "vm/JSObject-inl.h"
+#include "vm/NativeObject-inl.h"
+
+using namespace js;
+using namespace js::gc;
+
+FinalizationObservers::FinalizationObservers(Zone* zone)
+ : zone(zone),
+ registries(zone),
+ recordMap(zone),
+ crossZoneRecords(zone),
+ weakRefMap(zone),
+ crossZoneWeakRefs(zone) {}
+
+FinalizationObservers::~FinalizationObservers() {
+ MOZ_ASSERT(registries.empty());
+ MOZ_ASSERT(recordMap.empty());
+ MOZ_ASSERT(crossZoneRecords.empty());
+ MOZ_ASSERT(crossZoneWeakRefs.empty());
+}
+
+bool GCRuntime::addFinalizationRegistry(
+ JSContext* cx, Handle<FinalizationRegistryObject*> registry) {
+ if (!cx->zone()->ensureFinalizationObservers() ||
+ !cx->zone()->finalizationObservers()->addRegistry(registry)) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ return true;
+}
+
+bool FinalizationObservers::addRegistry(
+ Handle<FinalizationRegistryObject*> registry) {
+ return registries.put(registry);
+}
+
+bool GCRuntime::registerWithFinalizationRegistry(JSContext* cx,
+ HandleObject target,
+ HandleObject record) {
+ MOZ_ASSERT(!IsCrossCompartmentWrapper(target));
+ MOZ_ASSERT(
+ UncheckedUnwrapWithoutExpose(record)->is<FinalizationRecordObject>());
+ MOZ_ASSERT(target->compartment() == record->compartment());
+
+ Zone* zone = cx->zone();
+ if (!zone->ensureFinalizationObservers() ||
+ !zone->finalizationObservers()->addRecord(target, record)) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ return true;
+}
+
+bool FinalizationObservers::addRecord(HandleObject target,
+ HandleObject record) {
+ // Add a record to the record map and clean up on failure.
+ //
+ // The following must be updated and kept in sync:
+ // - the zone's recordMap (to observe the target)
+ // - the registry's global objects's recordSet (to trace the record)
+ // - the count of cross zone records (to calculate sweep groups)
+
+ MOZ_ASSERT(target->zone() == zone);
+
+ FinalizationRecordObject* unwrappedRecord =
+ &UncheckedUnwrapWithoutExpose(record)->as<FinalizationRecordObject>();
+
+ Zone* registryZone = unwrappedRecord->zone();
+ bool crossZone = registryZone != zone;
+ if (crossZone && !addCrossZoneWrapper(crossZoneRecords, record)) {
+ return false;
+ }
+ auto wrapperGuard = mozilla::MakeScopeExit([&] {
+ if (crossZone) {
+ removeCrossZoneWrapper(crossZoneRecords, record);
+ }
+ });
+
+ GlobalObject* registryGlobal = &unwrappedRecord->global();
+ auto* globalData = registryGlobal->getOrCreateFinalizationRegistryData();
+ if (!globalData || !globalData->addRecord(unwrappedRecord)) {
+ return false;
+ }
+ auto globalDataGuard = mozilla::MakeScopeExit(
+ [&] { globalData->removeRecord(unwrappedRecord); });
+
+ auto ptr = recordMap.lookupForAdd(target);
+ if (!ptr && !recordMap.add(ptr, target, RecordVector(zone))) {
+ return false;
+ }
+
+ if (!ptr->value().append(record)) {
+ return false;
+ }
+
+ unwrappedRecord->setInRecordMap(true);
+
+ globalDataGuard.release();
+ wrapperGuard.release();
+ return true;
+}
+
+bool FinalizationObservers::addCrossZoneWrapper(WrapperWeakSet& weakSet,
+ JSObject* wrapper) {
+ MOZ_ASSERT(IsCrossCompartmentWrapper(wrapper));
+ MOZ_ASSERT(UncheckedUnwrapWithoutExpose(wrapper)->zone() != zone);
+
+ auto ptr = weakSet.lookupForAdd(wrapper);
+ MOZ_ASSERT(!ptr);
+ return weakSet.add(ptr, wrapper, UndefinedValue());
+}
+
+void FinalizationObservers::removeCrossZoneWrapper(WrapperWeakSet& weakSet,
+ JSObject* wrapper) {
+ MOZ_ASSERT(IsCrossCompartmentWrapper(wrapper));
+ MOZ_ASSERT(UncheckedUnwrapWithoutExpose(wrapper)->zone() != zone);
+
+ auto ptr = weakSet.lookupForAdd(wrapper);
+ MOZ_ASSERT(ptr);
+ weakSet.remove(ptr);
+}
+
+static FinalizationRecordObject* UnwrapFinalizationRecord(JSObject* obj) {
+ obj = UncheckedUnwrapWithoutExpose(obj);
+ if (!obj->is<FinalizationRecordObject>()) {
+ MOZ_ASSERT(JS_IsDeadWrapper(obj));
+ // CCWs between the compartments have been nuked. The
+ // FinalizationRegistry's callback doesn't run in this case.
+ return nullptr;
+ }
+ return &obj->as<FinalizationRecordObject>();
+}
+
+void FinalizationObservers::clearRecords() {
+ // Clear table entries related to FinalizationRecordObjects, which are not
+ // processed after the start of shutdown.
+ //
+ // WeakRefs are still updated during shutdown to avoid the possibility of
+ // stale or dangling pointers.
+
+#ifdef DEBUG
+ checkTables();
+#endif
+
+ recordMap.clear();
+ crossZoneRecords.clear();
+}
+
+void GCRuntime::traceWeakFinalizationObserverEdges(JSTracer* trc, Zone* zone) {
+ MOZ_ASSERT(CurrentThreadCanAccessRuntime(trc->runtime()));
+ FinalizationObservers* observers = zone->finalizationObservers();
+ if (observers) {
+ observers->traceWeakEdges(trc);
+ }
+}
+
+void FinalizationObservers::traceRoots(JSTracer* trc) {
+ // The cross-zone wrapper weak maps are traced as roots; this does not keep
+ // any of their entries alive by itself.
+ crossZoneRecords.trace(trc);
+ crossZoneWeakRefs.trace(trc);
+}
+
+void FinalizationObservers::traceWeakEdges(JSTracer* trc) {
+ // Removing dead pointers from vectors may reorder live pointers to gray
+ // things in the vector. This is OK.
+ AutoTouchingGrayThings atgt;
+
+ traceWeakWeakRefEdges(trc);
+ traceWeakFinalizationRegistryEdges(trc);
+}
+
+void FinalizationObservers::traceWeakFinalizationRegistryEdges(JSTracer* trc) {
+ // Sweep finalization registry data and queue finalization records for cleanup
+ // for any entries whose target is dying and remove them from the map.
+
+ GCRuntime* gc = &trc->runtime()->gc;
+
+ for (RegistrySet::Enum e(registries); !e.empty(); e.popFront()) {
+ auto result = TraceWeakEdge(trc, &e.mutableFront(), "FinalizationRegistry");
+ if (result.isDead()) {
+ auto* registry =
+ &result.initialTarget()->as<FinalizationRegistryObject>();
+ registry->queue()->setHasRegistry(false);
+ e.removeFront();
+ } else {
+ result.finalTarget()->as<FinalizationRegistryObject>().traceWeak(trc);
+ }
+ }
+
+ for (RecordMap::Enum e(recordMap); !e.empty(); e.popFront()) {
+ RecordVector& records = e.front().value();
+
+ // Sweep finalization records, updating any pointers moved by the GC and
+ // remove if necessary.
+ records.mutableEraseIf([&](HeapPtr<JSObject*>& heapPtr) {
+ auto result = TraceWeakEdge(trc, &heapPtr, "FinalizationRecord");
+ JSObject* obj =
+ result.isLive() ? result.finalTarget() : result.initialTarget();
+ FinalizationRecordObject* record = UnwrapFinalizationRecord(obj);
+ MOZ_ASSERT_IF(record, record->isInRecordMap());
+
+ bool shouldRemove = !result.isLive() || shouldRemoveRecord(record);
+ if (shouldRemove && record && record->isInRecordMap()) {
+ updateForRemovedRecord(obj, record);
+ }
+
+ return shouldRemove;
+ });
+
+#ifdef DEBUG
+ for (JSObject* obj : records) {
+ MOZ_ASSERT(UnwrapFinalizationRecord(obj)->isInRecordMap());
+ }
+#endif
+
+ // Queue finalization records for targets that are dying.
+ if (!TraceWeakEdge(trc, &e.front().mutableKey(),
+ "FinalizationRecord target")) {
+ for (JSObject* obj : records) {
+ FinalizationRecordObject* record = UnwrapFinalizationRecord(obj);
+ FinalizationQueueObject* queue = record->queue();
+ updateForRemovedRecord(obj, record);
+ queue->queueRecordToBeCleanedUp(record);
+ gc->queueFinalizationRegistryForCleanup(queue);
+ }
+ e.removeFront();
+ }
+ }
+}
+
+// static
+bool FinalizationObservers::shouldRemoveRecord(
+ FinalizationRecordObject* record) {
+ // Records are removed from the target's vector for the following reasons:
+ return !record || // Nuked CCW to record.
+ !record->isRegistered() || // Unregistered record.
+ !record->queue()->hasRegistry(); // Dead finalization registry.
+}
+
+void FinalizationObservers::updateForRemovedRecord(
+ JSObject* wrapper, FinalizationRecordObject* record) {
+ // Remove other references to a record when it has been removed from the
+ // zone's record map. See addRecord().
+ MOZ_ASSERT(record->isInRecordMap());
+
+ Zone* registryZone = record->zone();
+ if (registryZone != zone) {
+ removeCrossZoneWrapper(crossZoneRecords, wrapper);
+ }
+
+ GlobalObject* registryGlobal = &record->global();
+ auto* globalData = registryGlobal->maybeFinalizationRegistryData();
+ globalData->removeRecord(record);
+
+ // The removed record may be gray, and that's OK.
+ AutoTouchingGrayThings atgt;
+
+ record->setInRecordMap(false);
+}
+
+void GCRuntime::nukeFinalizationRecordWrapper(
+ JSObject* wrapper, FinalizationRecordObject* record) {
+ if (record->isInRecordMap()) {
+ FinalizationRegistryObject::unregisterRecord(record);
+ FinalizationObservers* observers = wrapper->zone()->finalizationObservers();
+ observers->updateForRemovedRecord(wrapper, record);
+ }
+}
+
+void GCRuntime::queueFinalizationRegistryForCleanup(
+ FinalizationQueueObject* queue) {
+ // Prod the embedding to call us back later to run the finalization callbacks,
+ // if necessary.
+
+ if (queue->isQueuedForCleanup()) {
+ return;
+ }
+
+ // Derive the incumbent global by unwrapping the incumbent global object and
+ // then getting its global.
+ JSObject* object = UncheckedUnwrapWithoutExpose(queue->incumbentObject());
+ MOZ_ASSERT(object);
+ GlobalObject* incumbentGlobal = &object->nonCCWGlobal();
+
+ callHostCleanupFinalizationRegistryCallback(queue->doCleanupFunction(),
+ incumbentGlobal);
+
+ // The queue object may be gray, and that's OK.
+ AutoTouchingGrayThings atgt;
+
+ queue->setQueuedForCleanup(true);
+}
+
+// Insert a target -> weakRef mapping in the target's Zone so that a dying
+// target will clear out the weakRef's target. If the weakRef is in a different
+// Zone, then the crossZoneWeakRefs table will keep the weakRef alive. If the
+// weakRef is in the same Zone, then it must be the actual WeakRefObject and
+// not a cross-compartment wrapper, since nothing would keep that alive.
+bool GCRuntime::registerWeakRef(HandleObject target, HandleObject weakRef) {
+ MOZ_ASSERT(!IsCrossCompartmentWrapper(target));
+ MOZ_ASSERT(UncheckedUnwrap(weakRef)->is<WeakRefObject>());
+ MOZ_ASSERT_IF(target->zone() != weakRef->zone(),
+ target->compartment() == weakRef->compartment());
+
+ Zone* zone = target->zone();
+ return zone->ensureFinalizationObservers() &&
+ zone->finalizationObservers()->addWeakRefTarget(target, weakRef);
+}
+
+bool FinalizationObservers::addWeakRefTarget(HandleObject target,
+ HandleObject weakRef) {
+ WeakRefObject* unwrappedWeakRef =
+ &UncheckedUnwrapWithoutExpose(weakRef)->as<WeakRefObject>();
+
+ Zone* weakRefZone = unwrappedWeakRef->zone();
+ bool crossZone = weakRefZone != zone;
+ if (crossZone && !addCrossZoneWrapper(crossZoneWeakRefs, weakRef)) {
+ return false;
+ }
+ auto wrapperGuard = mozilla::MakeScopeExit([&] {
+ if (crossZone) {
+ removeCrossZoneWrapper(crossZoneWeakRefs, weakRef);
+ }
+ });
+
+ auto ptr = weakRefMap.lookupForAdd(target);
+ if (!ptr && !weakRefMap.add(ptr, target, WeakRefHeapPtrVector(zone))) {
+ return false;
+ }
+
+ if (!ptr->value().emplaceBack(weakRef)) {
+ return false;
+ }
+
+ wrapperGuard.release();
+ return true;
+}
+
+static WeakRefObject* UnwrapWeakRef(JSObject* obj) {
+ MOZ_ASSERT(!JS_IsDeadWrapper(obj));
+ obj = UncheckedUnwrapWithoutExpose(obj);
+ return &obj->as<WeakRefObject>();
+}
+
+void FinalizationObservers::removeWeakRefTarget(
+ Handle<JSObject*> target, Handle<WeakRefObject*> weakRef) {
+ MOZ_ASSERT(target);
+
+ WeakRefHeapPtrVector& weakRefs = weakRefMap.lookup(target)->value();
+ JSObject* wrapper = nullptr;
+ weakRefs.eraseIf([weakRef, &wrapper](JSObject* obj) {
+ if (UnwrapWeakRef(obj) == weakRef) {
+ wrapper = obj;
+ return true;
+ }
+ return false;
+ });
+
+ MOZ_ASSERT(wrapper);
+ updateForRemovedWeakRef(wrapper, weakRef);
+}
+
+void GCRuntime::nukeWeakRefWrapper(JSObject* wrapper, WeakRefObject* weakRef) {
+ // WeakRef wrappers can exist independently of the ones we create for the
+ // weakRefMap so don't assume |wrapper| is in the same zone as the WeakRef
+ // target.
+ JSObject* target = weakRef->target();
+ if (!target) {
+ return;
+ }
+
+ FinalizationObservers* observers = target->zone()->finalizationObservers();
+ if (observers) {
+ observers->unregisterWeakRefWrapper(wrapper, weakRef);
+ }
+}
+
+void FinalizationObservers::unregisterWeakRefWrapper(JSObject* wrapper,
+ WeakRefObject* weakRef) {
+ JSObject* target = weakRef->target();
+ MOZ_ASSERT(target);
+
+ bool removed = false;
+ WeakRefHeapPtrVector& weakRefs = weakRefMap.lookup(target)->value();
+ weakRefs.eraseIf([wrapper, &removed](JSObject* obj) {
+ bool remove = obj == wrapper;
+ if (remove) {
+ removed = true;
+ }
+ return remove;
+ });
+
+ if (removed) {
+ updateForRemovedWeakRef(wrapper, weakRef);
+ }
+}
+
+void FinalizationObservers::updateForRemovedWeakRef(JSObject* wrapper,
+ WeakRefObject* weakRef) {
+ weakRef->clearTarget();
+
+ Zone* weakRefZone = weakRef->zone();
+ if (weakRefZone != zone) {
+ removeCrossZoneWrapper(crossZoneWeakRefs, wrapper);
+ }
+}
+
+void FinalizationObservers::traceWeakWeakRefEdges(JSTracer* trc) {
+ for (WeakRefMap::Enum e(weakRefMap); !e.empty(); e.popFront()) {
+ // If target is dying, clear the target field of all weakRefs, and remove
+ // the entry from the map.
+ auto result = TraceWeakEdge(trc, &e.front().mutableKey(), "WeakRef target");
+ if (result.isDead()) {
+ for (JSObject* obj : e.front().value()) {
+ updateForRemovedWeakRef(obj, UnwrapWeakRef(obj));
+ }
+ e.removeFront();
+ } else {
+ // Update the target field after compacting.
+ traceWeakWeakRefVector(trc, e.front().value(), result.finalTarget());
+ }
+ }
+}
+
+void FinalizationObservers::traceWeakWeakRefVector(
+ JSTracer* trc, WeakRefHeapPtrVector& weakRefs, JSObject* target) {
+ weakRefs.mutableEraseIf([&](HeapPtr<JSObject*>& obj) -> bool {
+ auto result = TraceWeakEdge(trc, &obj, "WeakRef");
+ if (result.isDead()) {
+ JSObject* wrapper = result.initialTarget();
+ updateForRemovedWeakRef(wrapper, UnwrapWeakRef(wrapper));
+ } else {
+ UnwrapWeakRef(result.finalTarget())->setTargetUnbarriered(target);
+ }
+ return result.isDead();
+ });
+}
+
+#ifdef DEBUG
+void FinalizationObservers::checkTables() const {
+ // Check all cross-zone wrappers are present in the appropriate table.
+ size_t recordCount = 0;
+ for (auto r = recordMap.all(); !r.empty(); r.popFront()) {
+ for (JSObject* object : r.front().value()) {
+ FinalizationRecordObject* record = UnwrapFinalizationRecord(object);
+ if (record && record->isInRecordMap() && record->zone() != zone) {
+ MOZ_ASSERT(crossZoneRecords.has(object));
+ recordCount++;
+ }
+ }
+ }
+ MOZ_ASSERT(crossZoneRecords.count() == recordCount);
+
+ size_t weakRefCount = 0;
+ for (auto r = weakRefMap.all(); !r.empty(); r.popFront()) {
+ for (JSObject* object : r.front().value()) {
+ WeakRefObject* weakRef = UnwrapWeakRef(object);
+ if (weakRef && weakRef->zone() != zone) {
+ MOZ_ASSERT(crossZoneWeakRefs.has(object));
+ weakRefCount++;
+ }
+ }
+ }
+ MOZ_ASSERT(crossZoneWeakRefs.count() == weakRefCount);
+}
+#endif
+
+FinalizationRegistryGlobalData::FinalizationRegistryGlobalData(Zone* zone)
+ : recordSet(zone) {}
+
+bool FinalizationRegistryGlobalData::addRecord(
+ FinalizationRecordObject* record) {
+ return recordSet.putNew(record);
+}
+
+void FinalizationRegistryGlobalData::removeRecord(
+ FinalizationRecordObject* record) {
+ MOZ_ASSERT_IF(!record->runtimeFromMainThread()->gc.isShuttingDown(),
+ recordSet.has(record));
+ recordSet.remove(record);
+}
+
+void FinalizationRegistryGlobalData::trace(JSTracer* trc) {
+ recordSet.trace(trc);
+}
diff --git a/js/src/gc/FinalizationObservers.h b/js/src/gc/FinalizationObservers.h
new file mode 100644
index 0000000000..eee8ef14e0
--- /dev/null
+++ b/js/src/gc/FinalizationObservers.h
@@ -0,0 +1,129 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef gc_FinalizationObservers_h
+#define gc_FinalizationObservers_h
+
+#include "gc/Barrier.h"
+#include "gc/WeakMap.h"
+#include "gc/ZoneAllocator.h"
+#include "js/GCHashTable.h"
+#include "js/GCVector.h"
+
+namespace js {
+
+class FinalizationRegistryObject;
+class FinalizationRecordObject;
+class WeakRefObject;
+
+namespace gc {
+
+// Per-zone data structures to support FinalizationRegistry and WeakRef.
+class FinalizationObservers {
+ Zone* const zone;
+
+ // The set of all finalization registries in the associated zone. These are
+ // direct pointers and are not wrapped.
+ using RegistrySet =
+ GCHashSet<HeapPtr<JSObject*>, StableCellHasher<HeapPtr<JSObject*>>,
+ ZoneAllocPolicy>;
+ RegistrySet registries;
+
+ // A vector of FinalizationRecord objects, or CCWs to them.
+ using RecordVector = GCVector<HeapPtr<JSObject*>, 1, ZoneAllocPolicy>;
+
+ // A map from finalization registry targets to a vector of finalization
+ // records representing registries that the target is registered with and
+ // their associated held values. The records may be in other zones and are
+ // wrapped appropriately.
+ using RecordMap =
+ GCHashMap<HeapPtr<JSObject*>, RecordVector,
+ StableCellHasher<HeapPtr<JSObject*>>, ZoneAllocPolicy>;
+ RecordMap recordMap;
+
+ // A weak map used as a set of cross-zone wrappers. This is used for both
+ // finalization registries and weak refs. For the former it has wrappers to
+ // finalization record objects and for the latter wrappers to weak refs.
+ //
+ // The weak map marking rules keep the wrappers alive while their targets are
+ // alive and ensure that they are both swept in the same sweep group.
+ using WrapperWeakSet = ObjectValueWeakMap;
+ WrapperWeakSet crossZoneRecords;
+
+ // A map of weak ref targets to a vector of weak refs that are observing the
+ // target. The weak refs may be in other zones and are wrapped appropriately.
+ using WeakRefHeapPtrVector =
+ GCVector<js::HeapPtr<JSObject*>, 1, js::ZoneAllocPolicy>;
+ using WeakRefMap =
+ GCHashMap<HeapPtr<JSObject*>, WeakRefHeapPtrVector,
+ StableCellHasher<HeapPtr<JSObject*>>, ZoneAllocPolicy>;
+ WeakRefMap weakRefMap;
+
+ // A weak map used as a set of cross-zone weak refs wrappers.
+ WrapperWeakSet crossZoneWeakRefs;
+
+ public:
+ explicit FinalizationObservers(Zone* zone);
+ ~FinalizationObservers();
+
+ // FinalizationRegistry support:
+ bool addRegistry(Handle<FinalizationRegistryObject*> registry);
+ bool addRecord(HandleObject target, HandleObject record);
+ void clearRecords();
+
+ void updateForRemovedRecord(JSObject* wrapper,
+ FinalizationRecordObject* record);
+
+ // WeakRef support:
+ bool addWeakRefTarget(Handle<JSObject*> target, Handle<JSObject*> weakRef);
+ void removeWeakRefTarget(Handle<JSObject*> target,
+ Handle<WeakRefObject*> weakRef);
+
+ void unregisterWeakRefWrapper(JSObject* wrapper, WeakRefObject* weakRef);
+
+ void traceRoots(JSTracer* trc);
+ void traceWeakEdges(JSTracer* trc);
+
+#ifdef DEBUG
+ void checkTables() const;
+#endif
+
+ private:
+ bool addCrossZoneWrapper(WrapperWeakSet& weakSet, JSObject* wrapper);
+ void removeCrossZoneWrapper(WrapperWeakSet& weakSet, JSObject* wrapper);
+
+ void updateForRemovedWeakRef(JSObject* wrapper, WeakRefObject* weakRef);
+
+ void traceWeakFinalizationRegistryEdges(JSTracer* trc);
+ void traceWeakWeakRefEdges(JSTracer* trc);
+ void traceWeakWeakRefVector(JSTracer* trc, WeakRefHeapPtrVector& weakRefs,
+ JSObject* target);
+
+ static bool shouldRemoveRecord(FinalizationRecordObject* record);
+};
+
+// Per-global data structures to support FinalizationRegistry.
+class FinalizationRegistryGlobalData {
+ // Set of finalization records for finalization registries in this
+ // realm. These are traced as part of the realm's global.
+ using RecordSet =
+ GCHashSet<HeapPtr<JSObject*>, StableCellHasher<HeapPtr<JSObject*>>,
+ ZoneAllocPolicy>;
+ RecordSet recordSet;
+
+ public:
+ explicit FinalizationRegistryGlobalData(Zone* zone);
+
+ bool addRecord(FinalizationRecordObject* record);
+ void removeRecord(FinalizationRecordObject* record);
+
+ void trace(JSTracer* trc);
+};
+
+} // namespace gc
+} // namespace js
+
+#endif // gc_FinalizationObservers_h
diff --git a/js/src/gc/FindSCCs.h b/js/src/gc/FindSCCs.h
new file mode 100644
index 0000000000..97069bcd09
--- /dev/null
+++ b/js/src/gc/FindSCCs.h
@@ -0,0 +1,207 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef gc_FindSCCs_h
+#define gc_FindSCCs_h
+
+#include "mozilla/Assertions.h" // MOZ_ASSERT
+
+#include <algorithm> // std::min
+
+#include "js/AllocPolicy.h" // js::SystemAllocPolicy
+#include "js/friend/StackLimits.h" // js::AutoCheckRecursionLimit
+#include "js/HashTable.h" // js::HashSet, js::DefaultHasher
+
+namespace js {
+namespace gc {
+
+template <typename Node>
+struct GraphNodeBase {
+ using NodeSet =
+ js::HashSet<Node*, js::DefaultHasher<Node*>, js::SystemAllocPolicy>;
+
+ NodeSet gcGraphEdges;
+ Node* gcNextGraphNode = nullptr;
+ Node* gcNextGraphComponent = nullptr;
+ unsigned gcDiscoveryTime = 0;
+ unsigned gcLowLink = 0;
+
+ Node* nextNodeInGroup() const {
+ if (gcNextGraphNode &&
+ gcNextGraphNode->gcNextGraphComponent == gcNextGraphComponent) {
+ return gcNextGraphNode;
+ }
+ return nullptr;
+ }
+
+ Node* nextGroup() const { return gcNextGraphComponent; }
+};
+
+/*
+ * Find the strongly connected components of a graph using Tarjan's algorithm,
+ * and return them in topological order.
+ *
+ * Nodes derive from GraphNodeBase and add target edge pointers to
+ * sourceNode.gcGraphEdges to describe the graph:
+ *
+ * struct MyGraphNode : public GraphNodeBase<MyGraphNode>
+ * {
+ * ...
+ * }
+ *
+ * MyGraphNode node1, node2, node3;
+ * node1.gcGraphEdges.put(node2); // Error checking elided.
+ * node2.gcGraphEdges.put(node3);
+ * node3.gcGraphEdges.put(node2);
+ *
+ * ComponentFinder<MyGraphNode> finder;
+ * finder.addNode(node1);
+ * finder.addNode(node2);
+ * finder.addNode(node3);
+ * MyGraphNode* result = finder.getResultsList();
+ */
+
+template <typename Node>
+class ComponentFinder {
+ public:
+ explicit ComponentFinder(JSContext* cx) : cx(cx) {}
+
+ ~ComponentFinder() {
+ MOZ_ASSERT(!stack);
+ MOZ_ASSERT(!firstComponent);
+ }
+
+ /* Forces all nodes to be added to a single component. */
+ void useOneComponent() { stackFull = true; }
+
+ void addNode(Node* v) {
+ if (v->gcDiscoveryTime == Undefined) {
+ MOZ_ASSERT(v->gcLowLink == Undefined);
+ processNode(v);
+ }
+ }
+
+ Node* getResultsList() {
+ if (stackFull) {
+ /*
+ * All nodes after the stack overflow are in |stack|. Put them all in
+ * one big component of their own.
+ */
+ Node* firstGoodComponent = firstComponent;
+ for (Node* v = stack; v; v = stack) {
+ stack = v->gcNextGraphNode;
+ v->gcNextGraphComponent = firstGoodComponent;
+ v->gcNextGraphNode = firstComponent;
+ firstComponent = v;
+ }
+ stackFull = false;
+ }
+
+ MOZ_ASSERT(!stack);
+
+ Node* result = firstComponent;
+ firstComponent = nullptr;
+
+ for (Node* v = result; v; v = v->gcNextGraphNode) {
+ v->gcDiscoveryTime = Undefined;
+ v->gcLowLink = Undefined;
+ }
+
+ return result;
+ }
+
+ static void mergeGroups(Node* first) {
+ for (Node* v = first; v; v = v->gcNextGraphNode) {
+ v->gcNextGraphComponent = nullptr;
+ }
+ }
+
+ private:
+ // Constant used to indicate an unprocessed vertex.
+ static const unsigned Undefined = 0;
+
+ // Constant used to indicate a processed vertex that is no longer on the
+ // stack.
+ static const unsigned Finished = (unsigned)-1;
+
+ void addEdgeTo(Node* w) {
+ if (w->gcDiscoveryTime == Undefined) {
+ processNode(w);
+ cur->gcLowLink = std::min(cur->gcLowLink, w->gcLowLink);
+ } else if (w->gcDiscoveryTime != Finished) {
+ cur->gcLowLink = std::min(cur->gcLowLink, w->gcDiscoveryTime);
+ }
+ }
+
+ void processNode(Node* v) {
+ v->gcDiscoveryTime = clock;
+ v->gcLowLink = clock;
+ ++clock;
+
+ v->gcNextGraphNode = stack;
+ stack = v;
+
+ if (stackFull) {
+ return;
+ }
+
+ AutoCheckRecursionLimit recursion(cx);
+ if (!recursion.checkSystemDontReport(cx)) {
+ stackFull = true;
+ return;
+ }
+
+ Node* old = cur;
+ cur = v;
+ for (auto r = cur->gcGraphEdges.all(); !r.empty(); r.popFront()) {
+ addEdgeTo(r.front());
+ }
+ cur = old;
+
+ if (stackFull) {
+ return;
+ }
+
+ if (v->gcLowLink == v->gcDiscoveryTime) {
+ Node* nextComponent = firstComponent;
+ Node* w;
+ do {
+ MOZ_ASSERT(stack);
+ w = stack;
+ stack = w->gcNextGraphNode;
+
+ /*
+ * Record that the element is no longer on the stack by setting the
+ * discovery time to a special value that's not Undefined.
+ */
+ w->gcDiscoveryTime = Finished;
+
+ /* Figure out which group we're in. */
+ w->gcNextGraphComponent = nextComponent;
+
+ /*
+ * Prepend the component to the beginning of the output list to
+ * reverse the list and achieve the desired order.
+ */
+ w->gcNextGraphNode = firstComponent;
+ firstComponent = w;
+ } while (w != v);
+ }
+ }
+
+ private:
+ unsigned clock = 1;
+ Node* stack = nullptr;
+ Node* firstComponent = nullptr;
+ Node* cur = nullptr;
+ JSContext* cx;
+ bool stackFull = false;
+};
+
+} /* namespace gc */
+} /* namespace js */
+
+#endif /* gc_FindSCCs_h */
diff --git a/js/src/gc/GC-inl.h b/js/src/gc/GC-inl.h
new file mode 100644
index 0000000000..1c177f51ab
--- /dev/null
+++ b/js/src/gc/GC-inl.h
@@ -0,0 +1,344 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef gc_GC_inl_h
+#define gc_GC_inl_h
+
+#include "gc/GC.h"
+
+#include "mozilla/DebugOnly.h"
+#include "mozilla/Maybe.h"
+
+#include "gc/IteratorUtils.h"
+#include "gc/Marking.h"
+#include "gc/Zone.h"
+#include "vm/Runtime.h"
+
+#include "gc/ArenaList-inl.h"
+
+namespace js {
+namespace gc {
+
+class AutoAssertEmptyNursery;
+
+class ArenaListIter {
+ Arena* arena;
+
+ public:
+ explicit ArenaListIter(Arena* head) : arena(head) {}
+ bool done() const { return !arena; }
+ Arena* get() const {
+ MOZ_ASSERT(!done());
+ return arena;
+ }
+ void next() {
+ MOZ_ASSERT(!done());
+ arena = arena->next;
+ }
+
+ operator Arena*() const { return get(); }
+ Arena* operator->() const { return get(); }
+};
+
+class ArenaIter : public ChainedIterator<ArenaListIter, 3> {
+ public:
+ ArenaIter(JS::Zone* zone, AllocKind kind)
+ : ChainedIterator(zone->arenas.getFirstArena(kind),
+ zone->arenas.getFirstCollectingArena(kind),
+ zone->arenas.getFirstSweptArena(kind)) {}
+};
+
+class ArenaCellIter {
+ size_t firstThingOffset;
+ size_t thingSize;
+ Arena* arenaAddr;
+ FreeSpan span;
+ uint_fast16_t thing;
+ mozilla::DebugOnly<JS::TraceKind> traceKind;
+
+ // Upon entry, |thing| points to any thing (free or used) and finds the
+ // first used thing, which may be |thing|.
+ void settle() {
+ MOZ_ASSERT(!done());
+ MOZ_ASSERT(thing);
+ // Note: if |span| is empty, this test will fail, which is what we want
+ // -- |span| being empty means that we're past the end of the last free
+ // thing, all the remaining things in the arena are used, and we'll
+ // never need to move forward.
+ if (thing == span.first) {
+ thing = span.last + thingSize;
+ span = *span.nextSpan(arenaAddr);
+ }
+ }
+
+ public:
+ explicit ArenaCellIter(Arena* arena) {
+ MOZ_ASSERT(arena);
+ AllocKind kind = arena->getAllocKind();
+ firstThingOffset = Arena::firstThingOffset(kind);
+ thingSize = Arena::thingSize(kind);
+ traceKind = MapAllocToTraceKind(kind);
+ arenaAddr = arena;
+ span = *arena->getFirstFreeSpan();
+ thing = firstThingOffset;
+ settle();
+ }
+
+ bool done() const {
+ MOZ_ASSERT(thing <= ArenaSize);
+ return thing == ArenaSize;
+ }
+
+ TenuredCell* get() const {
+ MOZ_ASSERT(!done());
+ return reinterpret_cast<TenuredCell*>(uintptr_t(arenaAddr) + thing);
+ }
+
+ template <typename T>
+ T* as() const {
+ MOZ_ASSERT(!done());
+ MOZ_ASSERT(JS::MapTypeToTraceKind<T>::kind == traceKind);
+ return reinterpret_cast<T*>(get());
+ }
+
+ void next() {
+ MOZ_ASSERT(!done());
+ thing += thingSize;
+ if (thing < ArenaSize) {
+ settle();
+ }
+ }
+
+ operator TenuredCell*() const { return get(); }
+ TenuredCell* operator->() const { return get(); }
+};
+
+template <typename T>
+class ZoneAllCellIter;
+
+template <>
+class ZoneAllCellIter<TenuredCell> {
+ mozilla::Maybe<NestedIterator<ArenaIter, ArenaCellIter>> iter;
+ mozilla::Maybe<JS::AutoAssertNoGC> nogc;
+
+ protected:
+ // For use when a subclass wants to insert some setup before init().
+ ZoneAllCellIter() = default;
+
+ void init(JS::Zone* zone, AllocKind kind) {
+ MOZ_ASSERT_IF(IsNurseryAllocable(kind),
+ (zone->isAtomsZone() ||
+ zone->runtimeFromMainThread()->gc.nursery().isEmpty()));
+ initForTenuredIteration(zone, kind);
+ }
+
+ void initForTenuredIteration(JS::Zone* zone, AllocKind kind) {
+ JSRuntime* rt = zone->runtimeFromAnyThread();
+
+ // If called from outside a GC, ensure that the heap is in a state
+ // that allows us to iterate.
+ if (!JS::RuntimeHeapIsBusy()) {
+ // Assert that no GCs can occur while a ZoneAllCellIter is live.
+ nogc.emplace();
+ }
+
+ // We have a single-threaded runtime, so there's no need to protect
+ // against other threads iterating or allocating. However, we do have
+ // background finalization; we may have to wait for this to finish if
+ // it's currently active.
+ if (IsBackgroundFinalized(kind) &&
+ zone->arenas.needBackgroundFinalizeWait(kind)) {
+ rt->gc.waitBackgroundSweepEnd();
+ }
+ iter.emplace(zone, kind);
+ }
+
+ public:
+ ZoneAllCellIter(JS::Zone* zone, AllocKind kind) {
+ // If we are iterating a nursery-allocated kind then we need to
+ // evict first so that we can see all things.
+ if (IsNurseryAllocable(kind)) {
+ zone->runtimeFromMainThread()->gc.evictNursery();
+ }
+
+ init(zone, kind);
+ }
+
+ ZoneAllCellIter(JS::Zone* zone, AllocKind kind,
+ const js::gc::AutoAssertEmptyNursery&) {
+ // No need to evict the nursery. (This constructor is known statically
+ // to not GC.)
+ init(zone, kind);
+ }
+
+ bool done() const { return iter->done(); }
+
+ template <typename T>
+ T* get() const {
+ return iter->ref().as<T>();
+ }
+
+ TenuredCell* getCell() const { return iter->get(); }
+
+ void next() { iter->next(); }
+};
+
+/* clang-format off */
+//
+// Iterator over the cells in a Zone, where the GC type (JSString, JSObject) is
+// known, for a single AllocKind. Example usages:
+//
+// for (auto obj = zone->cellIter<JSObject>(AllocKind::OBJECT0); !obj.done(); obj.next()) {
+// ...
+// }
+//
+// for (auto script = zone->cellIter<JSScript>(); !script.done(); script.next()) {
+// f(script->code());
+// }
+//
+// As this code demonstrates, you can use 'script' as if it were a JSScript*.
+// Its actual type is ZoneAllCellIter<JSScript>, but for most purposes it will
+// autoconvert to JSScript*.
+//
+// Note that in the JSScript case, ZoneAllCellIter is able to infer the AllocKind
+// from the type 'JSScript', whereas in the JSObject case, the kind must be
+// given (because there are multiple AllocKinds for objects).
+//
+// Also, the static rooting hazard analysis knows that the JSScript case will
+// not GC during construction. The JSObject case needs to GC, or more precisely
+// to empty the nursery and clear out the store buffer, so that it can see all
+// objects to iterate over (the nursery is not iterable) and remove the
+// possibility of having pointers from the store buffer to data hanging off
+// stuff we're iterating over that we are going to delete. (The latter should
+// not be a problem, since such instances should be using RelocatablePtr do
+// remove themselves from the store buffer on deletion, but currently for
+// subtle reasons that isn't good enough.)
+//
+// If the iterator is used within a GC, then there is no need to evict the
+// nursery (again). You may select a variant that will skip the eviction either
+// by specializing on a GCType that is never allocated in the nursery, or
+// explicitly by passing in a trailing AutoAssertEmptyNursery argument.
+//
+// NOTE: This class can return items that are about to be swept/finalized.
+// You must not keep pointers to such items across GCs. Use
+// ZoneCellIter below to filter these out.
+//
+// NOTE: This class also does not read barrier returned items, so may return
+// gray cells. You must not store such items anywhere on the heap without
+// gray-unmarking them. Use ZoneCellIter to automatically unmark them.
+//
+/* clang-format on */
+template <typename GCType>
+class ZoneAllCellIter : public ZoneAllCellIter<TenuredCell> {
+ public:
+ // Non-nursery allocated (equivalent to having an entry in
+ // MapTypeToAllocKind). The template declaration here is to discard this
+ // constructor overload if MapTypeToAllocKind<GCType>::kind does not
+ // exist. Note that there will be no remaining overloads that will work, which
+ // makes sense given that you haven't specified which of the AllocKinds to use
+ // for GCType.
+ //
+ // If we later add a nursery allocable GCType with a single AllocKind, we will
+ // want to add an overload of this constructor that does the right thing (ie,
+ // it empties the nursery before iterating.)
+ explicit ZoneAllCellIter(JS::Zone* zone) : ZoneAllCellIter<TenuredCell>() {
+ init(zone, MapTypeToAllocKind<GCType>::kind);
+ }
+
+ // Non-nursery allocated, nursery is known to be empty: same behavior as
+ // above.
+ ZoneAllCellIter(JS::Zone* zone, const js::gc::AutoAssertEmptyNursery&)
+ : ZoneAllCellIter(zone) {}
+
+ // Arbitrary kind, which will be assumed to be nursery allocable (and
+ // therefore the nursery will be emptied before iterating.)
+ ZoneAllCellIter(JS::Zone* zone, AllocKind kind)
+ : ZoneAllCellIter<TenuredCell>(zone, kind) {}
+
+ // Arbitrary kind, which will be assumed to be nursery allocable, but the
+ // nursery is known to be empty already: same behavior as non-nursery types.
+ ZoneAllCellIter(JS::Zone* zone, AllocKind kind,
+ const js::gc::AutoAssertEmptyNursery& empty)
+ : ZoneAllCellIter<TenuredCell>(zone, kind, empty) {}
+
+ GCType* get() const { return ZoneAllCellIter<TenuredCell>::get<GCType>(); }
+ operator GCType*() const { return get(); }
+ GCType* operator->() const { return get(); }
+};
+
+// Like the above class but filter out cells that are about to be finalized.
+// Also, read barrier all cells returned (unless the Unbarriered variants are
+// used) to prevent gray cells from escaping.
+template <typename T>
+class ZoneCellIter : protected ZoneAllCellIter<T> {
+ using Base = ZoneAllCellIter<T>;
+
+ public:
+ /*
+ * The same constructors as above.
+ */
+ explicit ZoneCellIter(JS::Zone* zone) : ZoneAllCellIter<T>(zone) {
+ skipDying();
+ }
+ ZoneCellIter(JS::Zone* zone, const js::gc::AutoAssertEmptyNursery& empty)
+ : ZoneAllCellIter<T>(zone, empty) {
+ skipDying();
+ }
+ ZoneCellIter(JS::Zone* zone, AllocKind kind)
+ : ZoneAllCellIter<T>(zone, kind) {
+ skipDying();
+ }
+ ZoneCellIter(JS::Zone* zone, AllocKind kind,
+ const js::gc::AutoAssertEmptyNursery& empty)
+ : ZoneAllCellIter<T>(zone, kind, empty) {
+ skipDying();
+ }
+
+ using Base::done;
+
+ void next() {
+ ZoneAllCellIter<T>::next();
+ skipDying();
+ }
+
+ TenuredCell* getCell() const {
+ TenuredCell* cell = Base::getCell();
+
+ // This can result in a new reference being created to an object that an
+ // ongoing incremental GC may find to be unreachable, so we may need a
+ // barrier here.
+ JSRuntime* rt = cell->runtimeFromAnyThread();
+ if (!JS::RuntimeHeapIsCollecting(rt->heapState())) {
+ JS::TraceKind traceKind = JS::MapTypeToTraceKind<T>::kind;
+ ExposeGCThingToActiveJS(JS::GCCellPtr(cell, traceKind));
+ }
+
+ return cell;
+ }
+
+ T* get() const { return reinterpret_cast<T*>(getCell()); }
+
+ TenuredCell* unbarrieredGetCell() const { return Base::getCell(); }
+ T* unbarrieredGet() const { return Base::get(); }
+ operator T*() const { return get(); }
+ T* operator->() const { return get(); }
+
+ private:
+ void skipDying() {
+ while (!ZoneAllCellIter<T>::done()) {
+ T* current = ZoneAllCellIter<T>::get();
+ if (!IsAboutToBeFinalizedUnbarriered(current)) {
+ return;
+ }
+ ZoneAllCellIter<T>::next();
+ }
+ }
+};
+
+} /* namespace gc */
+} /* namespace js */
+
+#endif /* gc_GC_inl_h */
diff --git a/js/src/gc/GC.cpp b/js/src/gc/GC.cpp
new file mode 100644
index 0000000000..b8e1d21f2a
--- /dev/null
+++ b/js/src/gc/GC.cpp
@@ -0,0 +1,5101 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * [SMDOC] Garbage Collector
+ *
+ * This code implements an incremental mark-and-sweep garbage collector, with
+ * most sweeping carried out in the background on a parallel thread.
+ *
+ * Full vs. zone GC
+ * ----------------
+ *
+ * The collector can collect all zones at once, or a subset. These types of
+ * collection are referred to as a full GC and a zone GC respectively.
+ *
+ * It is possible for an incremental collection that started out as a full GC to
+ * become a zone GC if new zones are created during the course of the
+ * collection.
+ *
+ * Incremental collection
+ * ----------------------
+ *
+ * For a collection to be carried out incrementally the following conditions
+ * must be met:
+ * - the collection must be run by calling js::GCSlice() rather than js::GC()
+ * - the GC parameter JSGC_INCREMENTAL_GC_ENABLED must be true.
+ *
+ * The last condition is an engine-internal mechanism to ensure that incremental
+ * collection is not carried out without the correct barriers being implemented.
+ * For more information see 'Incremental marking' below.
+ *
+ * If the collection is not incremental, all foreground activity happens inside
+ * a single call to GC() or GCSlice(). However the collection is not complete
+ * until the background sweeping activity has finished.
+ *
+ * An incremental collection proceeds as a series of slices, interleaved with
+ * mutator activity, i.e. running JavaScript code. Slices are limited by a time
+ * budget. The slice finishes as soon as possible after the requested time has
+ * passed.
+ *
+ * Collector states
+ * ----------------
+ *
+ * The collector proceeds through the following states, the current state being
+ * held in JSRuntime::gcIncrementalState:
+ *
+ * - Prepare - unmarks GC things, discards JIT code and other setup
+ * - MarkRoots - marks the stack and other roots
+ * - Mark - incrementally marks reachable things
+ * - Sweep - sweeps zones in groups and continues marking unswept zones
+ * - Finalize - performs background finalization, concurrent with mutator
+ * - Compact - incrementally compacts by zone
+ * - Decommit - performs background decommit and chunk removal
+ *
+ * Roots are marked in the first MarkRoots slice; this is the start of the GC
+ * proper. The following states can take place over one or more slices.
+ *
+ * In other words an incremental collection proceeds like this:
+ *
+ * Slice 1: Prepare: Starts background task to unmark GC things
+ *
+ * ... JS code runs, background unmarking finishes ...
+ *
+ * Slice 2: MarkRoots: Roots are pushed onto the mark stack.
+ * Mark: The mark stack is processed by popping an element,
+ * marking it, and pushing its children.
+ *
+ * ... JS code runs ...
+ *
+ * Slice 3: Mark: More mark stack processing.
+ *
+ * ... JS code runs ...
+ *
+ * Slice n-1: Mark: More mark stack processing.
+ *
+ * ... JS code runs ...
+ *
+ * Slice n: Mark: Mark stack is completely drained.
+ * Sweep: Select first group of zones to sweep and sweep them.
+ *
+ * ... JS code runs ...
+ *
+ * Slice n+1: Sweep: Mark objects in unswept zones that were newly
+ * identified as alive (see below). Then sweep more zone
+ * sweep groups.
+ *
+ * ... JS code runs ...
+ *
+ * Slice n+2: Sweep: Mark objects in unswept zones that were newly
+ * identified as alive. Then sweep more zones.
+ *
+ * ... JS code runs ...
+ *
+ * Slice m: Sweep: Sweeping is finished, and background sweeping
+ * started on the helper thread.
+ *
+ * ... JS code runs, remaining sweeping done on background thread ...
+ *
+ * When background sweeping finishes the GC is complete.
+ *
+ * Incremental marking
+ * -------------------
+ *
+ * Incremental collection requires close collaboration with the mutator (i.e.,
+ * JS code) to guarantee correctness.
+ *
+ * - During an incremental GC, if a memory location (except a root) is written
+ * to, then the value it previously held must be marked. Write barriers
+ * ensure this.
+ *
+ * - Any object that is allocated during incremental GC must start out marked.
+ *
+ * - Roots are marked in the first slice and hence don't need write barriers.
+ * Roots are things like the C stack and the VM stack.
+ *
+ * The problem that write barriers solve is that between slices the mutator can
+ * change the object graph. We must ensure that it cannot do this in such a way
+ * that makes us fail to mark a reachable object (marking an unreachable object
+ * is tolerable).
+ *
+ * We use a snapshot-at-the-beginning algorithm to do this. This means that we
+ * promise to mark at least everything that is reachable at the beginning of
+ * collection. To implement it we mark the old contents of every non-root memory
+ * location written to by the mutator while the collection is in progress, using
+ * write barriers. This is described in gc/Barrier.h.
+ *
+ * Incremental sweeping
+ * --------------------
+ *
+ * Sweeping is difficult to do incrementally because object finalizers must be
+ * run at the start of sweeping, before any mutator code runs. The reason is
+ * that some objects use their finalizers to remove themselves from caches. If
+ * mutator code was allowed to run after the start of sweeping, it could observe
+ * the state of the cache and create a new reference to an object that was just
+ * about to be destroyed.
+ *
+ * Sweeping all finalizable objects in one go would introduce long pauses, so
+ * instead sweeping broken up into groups of zones. Zones which are not yet
+ * being swept are still marked, so the issue above does not apply.
+ *
+ * The order of sweeping is restricted by cross compartment pointers - for
+ * example say that object |a| from zone A points to object |b| in zone B and
+ * neither object was marked when we transitioned to the Sweep phase. Imagine we
+ * sweep B first and then return to the mutator. It's possible that the mutator
+ * could cause |a| to become alive through a read barrier (perhaps it was a
+ * shape that was accessed via a shape table). Then we would need to mark |b|,
+ * which |a| points to, but |b| has already been swept.
+ *
+ * So if there is such a pointer then marking of zone B must not finish before
+ * marking of zone A. Pointers which form a cycle between zones therefore
+ * restrict those zones to being swept at the same time, and these are found
+ * using Tarjan's algorithm for finding the strongly connected components of a
+ * graph.
+ *
+ * GC things without finalizers, and things with finalizers that are able to run
+ * in the background, are swept on the background thread. This accounts for most
+ * of the sweeping work.
+ *
+ * Reset
+ * -----
+ *
+ * During incremental collection it is possible, although unlikely, for
+ * conditions to change such that incremental collection is no longer safe. In
+ * this case, the collection is 'reset' by resetIncrementalGC(). If we are in
+ * the mark state, this just stops marking, but if we have started sweeping
+ * already, we continue non-incrementally until we have swept the current sweep
+ * group. Following a reset, a new collection is started.
+ *
+ * Compacting GC
+ * -------------
+ *
+ * Compacting GC happens at the end of a major GC as part of the last slice.
+ * There are three parts:
+ *
+ * - Arenas are selected for compaction.
+ * - The contents of those arenas are moved to new arenas.
+ * - All references to moved things are updated.
+ *
+ * Collecting Atoms
+ * ----------------
+ *
+ * Atoms are collected differently from other GC things. They are contained in
+ * a special zone and things in other zones may have pointers to them that are
+ * not recorded in the cross compartment pointer map. Each zone holds a bitmap
+ * with the atoms it might be keeping alive, and atoms are only collected if
+ * they are not included in any zone's atom bitmap. See AtomMarking.cpp for how
+ * this bitmap is managed.
+ */
+
+#include "gc/GC-inl.h"
+
+#include "mozilla/Range.h"
+#include "mozilla/ScopeExit.h"
+#include "mozilla/TextUtils.h"
+#include "mozilla/TimeStamp.h"
+
+#include <algorithm>
+#include <initializer_list>
+#include <iterator>
+#include <stdlib.h>
+#include <string.h>
+#include <utility>
+
+#include "jsapi.h" // JS_AbortIfWrongThread
+#include "jstypes.h"
+
+#include "debugger/DebugAPI.h"
+#include "gc/ClearEdgesTracer.h"
+#include "gc/GCContext.h"
+#include "gc/GCInternals.h"
+#include "gc/GCLock.h"
+#include "gc/GCProbes.h"
+#include "gc/Memory.h"
+#include "gc/ParallelMarking.h"
+#include "gc/ParallelWork.h"
+#include "gc/WeakMap.h"
+#include "jit/ExecutableAllocator.h"
+#include "jit/JitCode.h"
+#include "jit/JitRealm.h"
+#include "jit/JitRuntime.h"
+#include "jit/ProcessExecutableMemory.h"
+#include "js/HeapAPI.h" // JS::GCCellPtr
+#include "js/Printer.h"
+#include "js/SliceBudget.h"
+#include "util/DifferentialTesting.h"
+#include "vm/BigIntType.h"
+#include "vm/EnvironmentObject.h"
+#include "vm/GetterSetter.h"
+#include "vm/HelperThreadState.h"
+#include "vm/JitActivation.h"
+#include "vm/JSObject.h"
+#include "vm/JSScript.h"
+#include "vm/PropMap.h"
+#include "vm/Realm.h"
+#include "vm/Shape.h"
+#include "vm/StringType.h"
+#include "vm/SymbolType.h"
+#include "vm/Time.h"
+
+#include "gc/Heap-inl.h"
+#include "gc/Nursery-inl.h"
+#include "gc/ObjectKind-inl.h"
+#include "gc/PrivateIterators-inl.h"
+#include "vm/GeckoProfiler-inl.h"
+#include "vm/JSContext-inl.h"
+#include "vm/Realm-inl.h"
+#include "vm/Stack-inl.h"
+
+using namespace js;
+using namespace js::gc;
+
+using mozilla::MakeScopeExit;
+using mozilla::Maybe;
+using mozilla::Nothing;
+using mozilla::Some;
+using mozilla::TimeDuration;
+using mozilla::TimeStamp;
+
+using JS::AutoGCRooter;
+
+const AllocKind gc::slotsToThingKind[] = {
+ // clang-format off
+ /* 0 */ AllocKind::OBJECT0, AllocKind::OBJECT2, AllocKind::OBJECT2, AllocKind::OBJECT4,
+ /* 4 */ AllocKind::OBJECT4, AllocKind::OBJECT8, AllocKind::OBJECT8, AllocKind::OBJECT8,
+ /* 8 */ AllocKind::OBJECT8, AllocKind::OBJECT12, AllocKind::OBJECT12, AllocKind::OBJECT12,
+ /* 12 */ AllocKind::OBJECT12, AllocKind::OBJECT16, AllocKind::OBJECT16, AllocKind::OBJECT16,
+ /* 16 */ AllocKind::OBJECT16
+ // clang-format on
+};
+
+static_assert(std::size(slotsToThingKind) == SLOTS_TO_THING_KIND_LIMIT,
+ "We have defined a slot count for each kind.");
+
+MOZ_THREAD_LOCAL(JS::GCContext*) js::TlsGCContext;
+
+JS::GCContext::GCContext(JSRuntime* runtime) : runtime_(runtime) {}
+
+JS::GCContext::~GCContext() {
+ MOZ_ASSERT(!hasJitCodeToPoison());
+ MOZ_ASSERT(!isCollecting());
+ MOZ_ASSERT(gcUse() == GCUse::None);
+ MOZ_ASSERT(!gcSweepZone());
+ MOZ_ASSERT(!isTouchingGrayThings());
+}
+
+void JS::GCContext::poisonJitCode() {
+ if (hasJitCodeToPoison()) {
+ jit::ExecutableAllocator::poisonCode(runtime(), jitPoisonRanges);
+ jitPoisonRanges.clearAndFree();
+ }
+}
+
+#ifdef DEBUG
+void GCRuntime::verifyAllChunks() {
+ AutoLockGC lock(this);
+ fullChunks(lock).verifyChunks();
+ availableChunks(lock).verifyChunks();
+ emptyChunks(lock).verifyChunks();
+}
+#endif
+
+void GCRuntime::setMinEmptyChunkCount(uint32_t value, const AutoLockGC& lock) {
+ minEmptyChunkCount_ = value;
+ if (minEmptyChunkCount_ > maxEmptyChunkCount_) {
+ maxEmptyChunkCount_ = minEmptyChunkCount_;
+ }
+ MOZ_ASSERT(maxEmptyChunkCount_ >= minEmptyChunkCount_);
+}
+
+void GCRuntime::setMaxEmptyChunkCount(uint32_t value, const AutoLockGC& lock) {
+ maxEmptyChunkCount_ = value;
+ if (minEmptyChunkCount_ > maxEmptyChunkCount_) {
+ minEmptyChunkCount_ = maxEmptyChunkCount_;
+ }
+ MOZ_ASSERT(maxEmptyChunkCount_ >= minEmptyChunkCount_);
+}
+
+inline bool GCRuntime::tooManyEmptyChunks(const AutoLockGC& lock) {
+ return emptyChunks(lock).count() > minEmptyChunkCount(lock);
+}
+
+ChunkPool GCRuntime::expireEmptyChunkPool(const AutoLockGC& lock) {
+ MOZ_ASSERT(emptyChunks(lock).verify());
+ MOZ_ASSERT(minEmptyChunkCount(lock) <= maxEmptyChunkCount(lock));
+
+ ChunkPool expired;
+ while (tooManyEmptyChunks(lock)) {
+ TenuredChunk* chunk = emptyChunks(lock).pop();
+ prepareToFreeChunk(chunk->info);
+ expired.push(chunk);
+ }
+
+ MOZ_ASSERT(expired.verify());
+ MOZ_ASSERT(emptyChunks(lock).verify());
+ MOZ_ASSERT(emptyChunks(lock).count() <= maxEmptyChunkCount(lock));
+ MOZ_ASSERT(emptyChunks(lock).count() <= minEmptyChunkCount(lock));
+ return expired;
+}
+
+static void FreeChunkPool(ChunkPool& pool) {
+ for (ChunkPool::Iter iter(pool); !iter.done();) {
+ TenuredChunk* chunk = iter.get();
+ iter.next();
+ pool.remove(chunk);
+ MOZ_ASSERT(chunk->unused());
+ UnmapPages(static_cast<void*>(chunk), ChunkSize);
+ }
+ MOZ_ASSERT(pool.count() == 0);
+}
+
+void GCRuntime::freeEmptyChunks(const AutoLockGC& lock) {
+ FreeChunkPool(emptyChunks(lock));
+}
+
+inline void GCRuntime::prepareToFreeChunk(TenuredChunkInfo& info) {
+ MOZ_ASSERT(numArenasFreeCommitted >= info.numArenasFreeCommitted);
+ numArenasFreeCommitted -= info.numArenasFreeCommitted;
+ stats().count(gcstats::COUNT_DESTROY_CHUNK);
+#ifdef DEBUG
+ /*
+ * Let FreeChunkPool detect a missing prepareToFreeChunk call before it
+ * frees chunk.
+ */
+ info.numArenasFreeCommitted = 0;
+#endif
+}
+
+void GCRuntime::releaseArena(Arena* arena, const AutoLockGC& lock) {
+ MOZ_ASSERT(arena->allocated());
+ MOZ_ASSERT(!arena->onDelayedMarkingList());
+ MOZ_ASSERT(TlsGCContext.get()->isFinalizing());
+
+ arena->zone->gcHeapSize.removeGCArena(heapSize);
+ arena->release(lock);
+ arena->chunk()->releaseArena(this, arena, lock);
+}
+
+GCRuntime::GCRuntime(JSRuntime* rt)
+ : rt(rt),
+ systemZone(nullptr),
+ mainThreadContext(rt),
+ heapState_(JS::HeapState::Idle),
+ stats_(this),
+ sweepingTracer(rt),
+ fullGCRequested(false),
+ helperThreadRatio(TuningDefaults::HelperThreadRatio),
+ maxHelperThreads(TuningDefaults::MaxHelperThreads),
+ helperThreadCount(1),
+ createBudgetCallback(nullptr),
+ minEmptyChunkCount_(TuningDefaults::MinEmptyChunkCount),
+ maxEmptyChunkCount_(TuningDefaults::MaxEmptyChunkCount),
+ rootsHash(256),
+ nextCellUniqueId_(LargestTaggedNullCellPointer +
+ 1), // Ensure disjoint from null tagged pointers.
+ numArenasFreeCommitted(0),
+ verifyPreData(nullptr),
+ lastGCStartTime_(TimeStamp::Now()),
+ lastGCEndTime_(TimeStamp::Now()),
+ incrementalGCEnabled(TuningDefaults::IncrementalGCEnabled),
+ perZoneGCEnabled(TuningDefaults::PerZoneGCEnabled),
+ numActiveZoneIters(0),
+ cleanUpEverything(false),
+ grayBitsValid(true),
+ majorGCTriggerReason(JS::GCReason::NO_REASON),
+ minorGCNumber(0),
+ majorGCNumber(0),
+ number(0),
+ sliceNumber(0),
+ isFull(false),
+ incrementalState(gc::State::NotActive),
+ initialState(gc::State::NotActive),
+ useZeal(false),
+ lastMarkSlice(false),
+ safeToYield(true),
+ markOnBackgroundThreadDuringSweeping(false),
+ useBackgroundThreads(false),
+#ifdef DEBUG
+ hadShutdownGC(false),
+#endif
+ requestSliceAfterBackgroundTask(false),
+ lifoBlocksToFree((size_t)JSContext::TEMP_LIFO_ALLOC_PRIMARY_CHUNK_SIZE),
+ lifoBlocksToFreeAfterMinorGC(
+ (size_t)JSContext::TEMP_LIFO_ALLOC_PRIMARY_CHUNK_SIZE),
+ sweepGroupIndex(0),
+ sweepGroups(nullptr),
+ currentSweepGroup(nullptr),
+ sweepZone(nullptr),
+ abortSweepAfterCurrentGroup(false),
+ sweepMarkResult(IncrementalProgress::NotFinished),
+#ifdef DEBUG
+ testMarkQueue(rt),
+#endif
+ startedCompacting(false),
+ zonesCompacted(0),
+#ifdef DEBUG
+ relocatedArenasToRelease(nullptr),
+#endif
+#ifdef JS_GC_ZEAL
+ markingValidator(nullptr),
+#endif
+ defaultTimeBudgetMS_(TuningDefaults::DefaultTimeBudgetMS),
+ incrementalAllowed(true),
+ compactingEnabled(TuningDefaults::CompactingEnabled),
+ parallelMarkingEnabled(TuningDefaults::ParallelMarkingEnabled),
+ rootsRemoved(false),
+#ifdef JS_GC_ZEAL
+ zealModeBits(0),
+ zealFrequency(0),
+ nextScheduled(0),
+ deterministicOnly(false),
+ zealSliceBudget(0),
+ selectedForMarking(rt),
+#endif
+ fullCompartmentChecks(false),
+ gcCallbackDepth(0),
+ alwaysPreserveCode(false),
+ lowMemoryState(false),
+ lock(mutexid::GCLock),
+ delayedMarkingLock(mutexid::GCDelayedMarkingLock),
+ allocTask(this, emptyChunks_.ref()),
+ unmarkTask(this),
+ markTask(this),
+ sweepTask(this),
+ freeTask(this),
+ decommitTask(this),
+ nursery_(this),
+ storeBuffer_(rt, nursery()),
+ lastAllocRateUpdateTime(TimeStamp::Now()) {
+}
+
+using CharRange = mozilla::Range<const char>;
+using CharRangeVector = Vector<CharRange, 0, SystemAllocPolicy>;
+
+static bool SplitStringBy(CharRange text, char delimiter,
+ CharRangeVector* result) {
+ auto start = text.begin();
+ for (auto ptr = start; ptr != text.end(); ptr++) {
+ if (*ptr == delimiter) {
+ if (!result->emplaceBack(start, ptr)) {
+ return false;
+ }
+ start = ptr + 1;
+ }
+ }
+
+ return result->emplaceBack(start, text.end());
+}
+
+static bool ParseTimeDuration(CharRange text, TimeDuration* durationOut) {
+ const char* str = text.begin().get();
+ char* end;
+ *durationOut = TimeDuration::FromMilliseconds(strtol(str, &end, 10));
+ return str != end && end == text.end().get();
+}
+
+static void PrintProfileHelpAndExit(const char* envName, const char* helpText) {
+ fprintf(stderr, "%s=N[,(main|all)]\n", envName);
+ fprintf(stderr, "%s", helpText);
+ exit(0);
+}
+
+void js::gc::ReadProfileEnv(const char* envName, const char* helpText,
+ bool* enableOut, bool* workersOut,
+ TimeDuration* thresholdOut) {
+ *enableOut = false;
+ *workersOut = false;
+ *thresholdOut = TimeDuration();
+
+ const char* env = getenv(envName);
+ if (!env) {
+ return;
+ }
+
+ if (strcmp(env, "help") == 0) {
+ PrintProfileHelpAndExit(envName, helpText);
+ }
+
+ CharRangeVector parts;
+ auto text = CharRange(env, strlen(env));
+ if (!SplitStringBy(text, ',', &parts)) {
+ MOZ_CRASH("OOM parsing environment variable");
+ }
+
+ if (parts.length() == 0 || parts.length() > 2) {
+ PrintProfileHelpAndExit(envName, helpText);
+ }
+
+ *enableOut = true;
+
+ if (!ParseTimeDuration(parts[0], thresholdOut)) {
+ PrintProfileHelpAndExit(envName, helpText);
+ }
+
+ if (parts.length() == 2) {
+ const char* threads = parts[1].begin().get();
+ if (strcmp(threads, "all") == 0) {
+ *workersOut = true;
+ } else if (strcmp(threads, "main") != 0) {
+ PrintProfileHelpAndExit(envName, helpText);
+ }
+ }
+}
+
+bool js::gc::ShouldPrintProfile(JSRuntime* runtime, bool enable,
+ bool profileWorkers, TimeDuration threshold,
+ TimeDuration duration) {
+ return enable && (runtime->isMainRuntime() || profileWorkers) &&
+ duration >= threshold;
+}
+
+#ifdef JS_GC_ZEAL
+
+void GCRuntime::getZealBits(uint32_t* zealBits, uint32_t* frequency,
+ uint32_t* scheduled) {
+ *zealBits = zealModeBits;
+ *frequency = zealFrequency;
+ *scheduled = nextScheduled;
+}
+
+const char gc::ZealModeHelpText[] =
+ " Specifies how zealous the garbage collector should be. Some of these "
+ "modes can\n"
+ " be set simultaneously, by passing multiple level options, e.g. \"2;4\" "
+ "will activate\n"
+ " both modes 2 and 4. Modes can be specified by name or number.\n"
+ " \n"
+ " Values:\n"
+ " 0: (None) Normal amount of collection (resets all modes)\n"
+ " 1: (RootsChange) Collect when roots are added or removed\n"
+ " 2: (Alloc) Collect when every N allocations (default: 100)\n"
+ " 4: (VerifierPre) Verify pre write barriers between instructions\n"
+ " 6: (YieldBeforeRootMarking) Incremental GC in two slices that yields "
+ "before root marking\n"
+ " 7: (GenerationalGC) Collect the nursery every N nursery allocations\n"
+ " 8: (YieldBeforeMarking) Incremental GC in two slices that yields "
+ "between\n"
+ " the root marking and marking phases\n"
+ " 9: (YieldBeforeSweeping) Incremental GC in two slices that yields "
+ "between\n"
+ " the marking and sweeping phases\n"
+ " 10: (IncrementalMultipleSlices) Incremental GC in many slices\n"
+ " 11: (IncrementalMarkingValidator) Verify incremental marking\n"
+ " 12: (ElementsBarrier) Use the individual element post-write barrier\n"
+ " regardless of elements size\n"
+ " 13: (CheckHashTablesOnMinorGC) Check internal hashtables on minor GC\n"
+ " 14: (Compact) Perform a shrinking collection every N allocations\n"
+ " 15: (CheckHeapAfterGC) Walk the heap to check its integrity after "
+ "every GC\n"
+ " 17: (YieldBeforeSweepingAtoms) Incremental GC in two slices that "
+ "yields\n"
+ " before sweeping the atoms table\n"
+ " 18: (CheckGrayMarking) Check gray marking invariants after every GC\n"
+ " 19: (YieldBeforeSweepingCaches) Incremental GC in two slices that "
+ "yields\n"
+ " before sweeping weak caches\n"
+ " 21: (YieldBeforeSweepingObjects) Incremental GC in two slices that "
+ "yields\n"
+ " before sweeping foreground finalized objects\n"
+ " 22: (YieldBeforeSweepingNonObjects) Incremental GC in two slices that "
+ "yields\n"
+ " before sweeping non-object GC things\n"
+ " 23: (YieldBeforeSweepingPropMapTrees) Incremental GC in two slices "
+ "that "
+ "yields\n"
+ " before sweeping shape trees\n"
+ " 24: (CheckWeakMapMarking) Check weak map marking invariants after "
+ "every GC\n"
+ " 25: (YieldWhileGrayMarking) Incremental GC in two slices that yields\n"
+ " during gray marking\n";
+
+// The set of zeal modes that control incremental slices. These modes are
+// mutually exclusive.
+static const mozilla::EnumSet<ZealMode> IncrementalSliceZealModes = {
+ ZealMode::YieldBeforeRootMarking,
+ ZealMode::YieldBeforeMarking,
+ ZealMode::YieldBeforeSweeping,
+ ZealMode::IncrementalMultipleSlices,
+ ZealMode::YieldBeforeSweepingAtoms,
+ ZealMode::YieldBeforeSweepingCaches,
+ ZealMode::YieldBeforeSweepingObjects,
+ ZealMode::YieldBeforeSweepingNonObjects,
+ ZealMode::YieldBeforeSweepingPropMapTrees};
+
+void GCRuntime::setZeal(uint8_t zeal, uint32_t frequency) {
+ MOZ_ASSERT(zeal <= unsigned(ZealMode::Limit));
+
+ if (verifyPreData) {
+ VerifyBarriers(rt, PreBarrierVerifier);
+ }
+
+ if (zeal == 0) {
+ if (hasZealMode(ZealMode::GenerationalGC)) {
+ evictNursery(JS::GCReason::DEBUG_GC);
+ nursery().leaveZealMode();
+ }
+
+ if (isIncrementalGCInProgress()) {
+ finishGC(JS::GCReason::DEBUG_GC);
+ }
+ }
+
+ ZealMode zealMode = ZealMode(zeal);
+ if (zealMode == ZealMode::GenerationalGC) {
+ evictNursery(JS::GCReason::DEBUG_GC);
+ nursery().enterZealMode();
+ }
+
+ // Some modes are mutually exclusive. If we're setting one of those, we
+ // first reset all of them.
+ if (IncrementalSliceZealModes.contains(zealMode)) {
+ for (auto mode : IncrementalSliceZealModes) {
+ clearZealMode(mode);
+ }
+ }
+
+ bool schedule = zealMode >= ZealMode::Alloc;
+ if (zeal != 0) {
+ zealModeBits |= 1 << unsigned(zeal);
+ } else {
+ zealModeBits = 0;
+ }
+ zealFrequency = frequency;
+ nextScheduled = schedule ? frequency : 0;
+}
+
+void GCRuntime::unsetZeal(uint8_t zeal) {
+ MOZ_ASSERT(zeal <= unsigned(ZealMode::Limit));
+ ZealMode zealMode = ZealMode(zeal);
+
+ if (!hasZealMode(zealMode)) {
+ return;
+ }
+
+ if (verifyPreData) {
+ VerifyBarriers(rt, PreBarrierVerifier);
+ }
+
+ if (zealMode == ZealMode::GenerationalGC) {
+ evictNursery(JS::GCReason::DEBUG_GC);
+ nursery().leaveZealMode();
+ }
+
+ clearZealMode(zealMode);
+
+ if (zealModeBits == 0) {
+ if (isIncrementalGCInProgress()) {
+ finishGC(JS::GCReason::DEBUG_GC);
+ }
+
+ zealFrequency = 0;
+ nextScheduled = 0;
+ }
+}
+
+void GCRuntime::setNextScheduled(uint32_t count) { nextScheduled = count; }
+
+static bool ParseZealModeName(CharRange text, uint32_t* modeOut) {
+ struct ModeInfo {
+ const char* name;
+ size_t length;
+ uint32_t value;
+ };
+
+ static const ModeInfo zealModes[] = {{"None", 0},
+# define ZEAL_MODE(name, value) {#name, strlen(#name), value},
+ JS_FOR_EACH_ZEAL_MODE(ZEAL_MODE)
+# undef ZEAL_MODE
+ };
+
+ for (auto mode : zealModes) {
+ if (text.length() == mode.length &&
+ memcmp(text.begin().get(), mode.name, mode.length) == 0) {
+ *modeOut = mode.value;
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static bool ParseZealModeNumericParam(CharRange text, uint32_t* paramOut) {
+ if (text.length() == 0) {
+ return false;
+ }
+
+ for (auto c : text) {
+ if (!mozilla::IsAsciiDigit(c)) {
+ return false;
+ }
+ }
+
+ *paramOut = atoi(text.begin().get());
+ return true;
+}
+
+static bool PrintZealHelpAndFail() {
+ fprintf(stderr, "Format: JS_GC_ZEAL=level(;level)*[,N]\n");
+ fputs(ZealModeHelpText, stderr);
+ return false;
+}
+
+bool GCRuntime::parseAndSetZeal(const char* str) {
+ // Set the zeal mode from a string consisting of one or more mode specifiers
+ // separated by ';', optionally followed by a ',' and the trigger frequency.
+ // The mode specifiers can by a mode name or its number.
+
+ auto text = CharRange(str, strlen(str));
+
+ CharRangeVector parts;
+ if (!SplitStringBy(text, ',', &parts)) {
+ return false;
+ }
+
+ if (parts.length() == 0 || parts.length() > 2) {
+ return PrintZealHelpAndFail();
+ }
+
+ uint32_t frequency = JS_DEFAULT_ZEAL_FREQ;
+ if (parts.length() == 2 && !ParseZealModeNumericParam(parts[1], &frequency)) {
+ return PrintZealHelpAndFail();
+ }
+
+ CharRangeVector modes;
+ if (!SplitStringBy(parts[0], ';', &modes)) {
+ return false;
+ }
+
+ for (const auto& descr : modes) {
+ uint32_t mode;
+ if (!ParseZealModeName(descr, &mode) &&
+ !(ParseZealModeNumericParam(descr, &mode) &&
+ mode <= unsigned(ZealMode::Limit))) {
+ return PrintZealHelpAndFail();
+ }
+
+ setZeal(mode, frequency);
+ }
+
+ return true;
+}
+
+const char* js::gc::AllocKindName(AllocKind kind) {
+ static const char* const names[] = {
+# define EXPAND_THING_NAME(allocKind, _1, _2, _3, _4, _5, _6) #allocKind,
+ FOR_EACH_ALLOCKIND(EXPAND_THING_NAME)
+# undef EXPAND_THING_NAME
+ };
+ static_assert(std::size(names) == AllocKindCount,
+ "names array should have an entry for every AllocKind");
+
+ size_t i = size_t(kind);
+ MOZ_ASSERT(i < std::size(names));
+ return names[i];
+}
+
+void js::gc::DumpArenaInfo() {
+ fprintf(stderr, "Arena header size: %zu\n\n", ArenaHeaderSize);
+
+ fprintf(stderr, "GC thing kinds:\n");
+ fprintf(stderr, "%25s %8s %8s %8s\n",
+ "AllocKind:", "Size:", "Count:", "Padding:");
+ for (auto kind : AllAllocKinds()) {
+ fprintf(stderr, "%25s %8zu %8zu %8zu\n", AllocKindName(kind),
+ Arena::thingSize(kind), Arena::thingsPerArena(kind),
+ Arena::firstThingOffset(kind) - ArenaHeaderSize);
+ }
+}
+
+#endif // JS_GC_ZEAL
+
+bool GCRuntime::init(uint32_t maxbytes) {
+ MOZ_ASSERT(!wasInitialized());
+
+ MOZ_ASSERT(SystemPageSize());
+ Arena::checkLookupTables();
+
+ if (!TlsGCContext.init()) {
+ return false;
+ }
+ TlsGCContext.set(&mainThreadContext.ref());
+
+ updateHelperThreadCount();
+
+#ifdef JS_GC_ZEAL
+ const char* size = getenv("JSGC_MARK_STACK_LIMIT");
+ if (size) {
+ maybeMarkStackLimit = atoi(size);
+ }
+#endif
+
+ if (!updateMarkersVector()) {
+ return false;
+ }
+
+ {
+ AutoLockGCBgAlloc lock(this);
+
+ MOZ_ALWAYS_TRUE(tunables.setParameter(JSGC_MAX_BYTES, maxbytes));
+
+ if (!nursery().init(lock)) {
+ return false;
+ }
+
+ const char* pretenureThresholdStr = getenv("JSGC_PRETENURE_THRESHOLD");
+ if (pretenureThresholdStr && pretenureThresholdStr[0]) {
+ char* last;
+ long pretenureThreshold = strtol(pretenureThresholdStr, &last, 10);
+ if (last[0] || !tunables.setParameter(JSGC_PRETENURE_THRESHOLD,
+ pretenureThreshold)) {
+ fprintf(stderr, "Invalid value for JSGC_PRETENURE_THRESHOLD: %s\n",
+ pretenureThresholdStr);
+ }
+ }
+ }
+
+#ifdef JS_GC_ZEAL
+ const char* zealSpec = getenv("JS_GC_ZEAL");
+ if (zealSpec && zealSpec[0] && !parseAndSetZeal(zealSpec)) {
+ return false;
+ }
+#endif
+
+ for (auto& marker : markers) {
+ if (!marker->init()) {
+ return false;
+ }
+ }
+
+ if (!initSweepActions()) {
+ return false;
+ }
+
+ UniquePtr<Zone> zone = MakeUnique<Zone>(rt, Zone::AtomsZone);
+ if (!zone || !zone->init()) {
+ return false;
+ }
+
+ // The atoms zone is stored as the first element of the zones vector.
+ MOZ_ASSERT(zone->isAtomsZone());
+ MOZ_ASSERT(zones().empty());
+ MOZ_ALWAYS_TRUE(zones().reserve(1)); // ZonesVector has inline capacity 4.
+ zones().infallibleAppend(zone.release());
+
+ gcprobes::Init(this);
+
+ initialized = true;
+ return true;
+}
+
+void GCRuntime::finish() {
+ MOZ_ASSERT(inPageLoadCount == 0);
+ MOZ_ASSERT(!sharedAtomsZone_);
+
+ // Wait for nursery background free to end and disable it to release memory.
+ if (nursery().isEnabled()) {
+ nursery().disable();
+ }
+
+ // Wait until the background finalization and allocation stops and the
+ // helper thread shuts down before we forcefully release any remaining GC
+ // memory.
+ sweepTask.join();
+ markTask.join();
+ freeTask.join();
+ allocTask.cancelAndWait();
+ decommitTask.cancelAndWait();
+
+#ifdef JS_GC_ZEAL
+ // Free memory associated with GC verification.
+ finishVerifier();
+#endif
+
+ // Delete all remaining zones.
+ for (ZonesIter zone(this, WithAtoms); !zone.done(); zone.next()) {
+ AutoSetThreadIsSweeping threadIsSweeping(rt->gcContext(), zone);
+ for (CompartmentsInZoneIter comp(zone); !comp.done(); comp.next()) {
+ for (RealmsInCompartmentIter realm(comp); !realm.done(); realm.next()) {
+ js_delete(realm.get());
+ }
+ comp->realms().clear();
+ js_delete(comp.get());
+ }
+ zone->compartments().clear();
+ js_delete(zone.get());
+ }
+
+ zones().clear();
+
+ FreeChunkPool(fullChunks_.ref());
+ FreeChunkPool(availableChunks_.ref());
+ FreeChunkPool(emptyChunks_.ref());
+
+ TlsGCContext.set(nullptr);
+
+ gcprobes::Finish(this);
+
+ nursery().printTotalProfileTimes();
+ stats().printTotalProfileTimes();
+}
+
+bool GCRuntime::freezeSharedAtomsZone() {
+ // This is called just after permanent atoms and well-known symbols have been
+ // created. At this point all existing atoms and symbols are permanent.
+ //
+ // This method makes the current atoms zone into a shared atoms zone and
+ // removes it from the zones list. Everything in it is marked black. A new
+ // empty atoms zone is created, where all atoms local to this runtime will
+ // live.
+ //
+ // The shared atoms zone will not be collected until shutdown when it is
+ // returned to the zone list by restoreSharedAtomsZone().
+
+ MOZ_ASSERT(rt->isMainRuntime());
+ MOZ_ASSERT(!sharedAtomsZone_);
+ MOZ_ASSERT(zones().length() == 1);
+ MOZ_ASSERT(atomsZone());
+ MOZ_ASSERT(!atomsZone()->wasGCStarted());
+ MOZ_ASSERT(!atomsZone()->needsIncrementalBarrier());
+
+ AutoAssertEmptyNursery nurseryIsEmpty(rt->mainContextFromOwnThread());
+
+ atomsZone()->arenas.clearFreeLists();
+
+ for (auto kind : AllAllocKinds()) {
+ for (auto thing =
+ atomsZone()->cellIterUnsafe<TenuredCell>(kind, nurseryIsEmpty);
+ !thing.done(); thing.next()) {
+ TenuredCell* cell = thing.getCell();
+ MOZ_ASSERT((cell->is<JSString>() &&
+ cell->as<JSString>()->isPermanentAndMayBeShared()) ||
+ (cell->is<JS::Symbol>() &&
+ cell->as<JS::Symbol>()->isPermanentAndMayBeShared()));
+ cell->markBlack();
+ }
+ }
+
+ sharedAtomsZone_ = atomsZone();
+ zones().clear();
+
+ UniquePtr<Zone> zone = MakeUnique<Zone>(rt, Zone::AtomsZone);
+ if (!zone || !zone->init()) {
+ return false;
+ }
+
+ MOZ_ASSERT(zone->isAtomsZone());
+ zones().infallibleAppend(zone.release());
+
+ return true;
+}
+
+void GCRuntime::restoreSharedAtomsZone() {
+ // Return the shared atoms zone to the zone list. This allows the contents of
+ // the shared atoms zone to be collected when the parent runtime is shut down.
+
+ if (!sharedAtomsZone_) {
+ return;
+ }
+
+ MOZ_ASSERT(rt->isMainRuntime());
+ MOZ_ASSERT(rt->childRuntimeCount == 0);
+
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ if (!zones().append(sharedAtomsZone_)) {
+ oomUnsafe.crash("restoreSharedAtomsZone");
+ }
+
+ sharedAtomsZone_ = nullptr;
+}
+
+bool GCRuntime::setParameter(JSContext* cx, JSGCParamKey key, uint32_t value) {
+ MOZ_ASSERT(CurrentThreadCanAccessRuntime(rt));
+
+ AutoStopVerifyingBarriers pauseVerification(rt, false);
+ FinishGC(cx);
+ waitBackgroundSweepEnd();
+
+ AutoLockGC lock(this);
+ return setParameter(key, value, lock);
+}
+
+static bool IsGCThreadParameter(JSGCParamKey key) {
+ return key == JSGC_HELPER_THREAD_RATIO || key == JSGC_MAX_HELPER_THREADS ||
+ key == JSGC_MARKING_THREAD_COUNT;
+}
+
+bool GCRuntime::setParameter(JSGCParamKey key, uint32_t value,
+ AutoLockGC& lock) {
+ switch (key) {
+ case JSGC_SLICE_TIME_BUDGET_MS:
+ defaultTimeBudgetMS_ = value;
+ break;
+ case JSGC_INCREMENTAL_GC_ENABLED:
+ setIncrementalGCEnabled(value != 0);
+ break;
+ case JSGC_PER_ZONE_GC_ENABLED:
+ perZoneGCEnabled = value != 0;
+ break;
+ case JSGC_COMPACTING_ENABLED:
+ compactingEnabled = value != 0;
+ break;
+ case JSGC_PARALLEL_MARKING_ENABLED:
+ // Not supported on workers.
+ parallelMarkingEnabled = rt->isMainRuntime() && value != 0;
+ updateMarkersVector();
+ break;
+ case JSGC_INCREMENTAL_WEAKMAP_ENABLED:
+ for (auto& marker : markers) {
+ marker->incrementalWeakMapMarkingEnabled = value != 0;
+ }
+ break;
+ case JSGC_MIN_EMPTY_CHUNK_COUNT:
+ setMinEmptyChunkCount(value, lock);
+ break;
+ case JSGC_MAX_EMPTY_CHUNK_COUNT:
+ setMaxEmptyChunkCount(value, lock);
+ break;
+ default:
+ if (IsGCThreadParameter(key)) {
+ return setThreadParameter(key, value, lock);
+ }
+
+ if (!tunables.setParameter(key, value)) {
+ return false;
+ }
+ updateAllGCStartThresholds();
+ }
+
+ return true;
+}
+
+bool GCRuntime::setThreadParameter(JSGCParamKey key, uint32_t value,
+ AutoLockGC& lock) {
+ if (rt->parentRuntime) {
+ // Don't allow these to be set for worker runtimes.
+ return false;
+ }
+
+ switch (key) {
+ case JSGC_HELPER_THREAD_RATIO:
+ if (value == 0) {
+ return false;
+ }
+ helperThreadRatio = double(value) / 100.0;
+ break;
+ case JSGC_MAX_HELPER_THREADS:
+ if (value == 0) {
+ return false;
+ }
+ maxHelperThreads = value;
+ break;
+ case JSGC_MARKING_THREAD_COUNT:
+ markingThreadCount = std::min(size_t(value), MaxParallelWorkers);
+ break;
+ default:
+ MOZ_CRASH("Unexpected parameter key");
+ }
+
+ updateHelperThreadCount();
+ updateMarkersVector();
+
+ return true;
+}
+
+void GCRuntime::resetParameter(JSContext* cx, JSGCParamKey key) {
+ MOZ_ASSERT(CurrentThreadCanAccessRuntime(rt));
+
+ AutoStopVerifyingBarriers pauseVerification(rt, false);
+ FinishGC(cx);
+ waitBackgroundSweepEnd();
+
+ AutoLockGC lock(this);
+ resetParameter(key, lock);
+}
+
+void GCRuntime::resetParameter(JSGCParamKey key, AutoLockGC& lock) {
+ switch (key) {
+ case JSGC_SLICE_TIME_BUDGET_MS:
+ defaultTimeBudgetMS_ = TuningDefaults::DefaultTimeBudgetMS;
+ break;
+ case JSGC_INCREMENTAL_GC_ENABLED:
+ setIncrementalGCEnabled(TuningDefaults::IncrementalGCEnabled);
+ break;
+ case JSGC_PER_ZONE_GC_ENABLED:
+ perZoneGCEnabled = TuningDefaults::PerZoneGCEnabled;
+ break;
+ case JSGC_COMPACTING_ENABLED:
+ compactingEnabled = TuningDefaults::CompactingEnabled;
+ break;
+ case JSGC_PARALLEL_MARKING_ENABLED:
+ parallelMarkingEnabled = TuningDefaults::ParallelMarkingEnabled;
+ updateMarkersVector();
+ break;
+ case JSGC_INCREMENTAL_WEAKMAP_ENABLED:
+ for (auto& marker : markers) {
+ marker->incrementalWeakMapMarkingEnabled =
+ TuningDefaults::IncrementalWeakMapMarkingEnabled;
+ }
+ break;
+ case JSGC_MIN_EMPTY_CHUNK_COUNT:
+ setMinEmptyChunkCount(TuningDefaults::MinEmptyChunkCount, lock);
+ break;
+ case JSGC_MAX_EMPTY_CHUNK_COUNT:
+ setMaxEmptyChunkCount(TuningDefaults::MaxEmptyChunkCount, lock);
+ break;
+ default:
+ if (IsGCThreadParameter(key)) {
+ resetThreadParameter(key, lock);
+ return;
+ }
+
+ tunables.resetParameter(key);
+ updateAllGCStartThresholds();
+ }
+}
+
+void GCRuntime::resetThreadParameter(JSGCParamKey key, AutoLockGC& lock) {
+ if (rt->parentRuntime) {
+ return;
+ }
+
+ switch (key) {
+ case JSGC_HELPER_THREAD_RATIO:
+ helperThreadRatio = TuningDefaults::HelperThreadRatio;
+ break;
+ case JSGC_MAX_HELPER_THREADS:
+ maxHelperThreads = TuningDefaults::MaxHelperThreads;
+ break;
+ case JSGC_MARKING_THREAD_COUNT:
+ markingThreadCount = 0;
+ break;
+ default:
+ MOZ_CRASH("Unexpected parameter key");
+ }
+
+ updateHelperThreadCount();
+ updateMarkersVector();
+}
+
+uint32_t GCRuntime::getParameter(JSGCParamKey key) {
+ MOZ_ASSERT(CurrentThreadCanAccessRuntime(rt));
+ AutoLockGC lock(this);
+ return getParameter(key, lock);
+}
+
+uint32_t GCRuntime::getParameter(JSGCParamKey key, const AutoLockGC& lock) {
+ switch (key) {
+ case JSGC_BYTES:
+ return uint32_t(heapSize.bytes());
+ case JSGC_NURSERY_BYTES:
+ return nursery().capacity();
+ case JSGC_NUMBER:
+ return uint32_t(number);
+ case JSGC_MAJOR_GC_NUMBER:
+ return uint32_t(majorGCNumber);
+ case JSGC_MINOR_GC_NUMBER:
+ return uint32_t(minorGCNumber);
+ case JSGC_INCREMENTAL_GC_ENABLED:
+ return incrementalGCEnabled;
+ case JSGC_PER_ZONE_GC_ENABLED:
+ return perZoneGCEnabled;
+ case JSGC_UNUSED_CHUNKS:
+ return uint32_t(emptyChunks(lock).count());
+ case JSGC_TOTAL_CHUNKS:
+ return uint32_t(fullChunks(lock).count() + availableChunks(lock).count() +
+ emptyChunks(lock).count());
+ case JSGC_SLICE_TIME_BUDGET_MS:
+ MOZ_RELEASE_ASSERT(defaultTimeBudgetMS_ >= 0);
+ MOZ_RELEASE_ASSERT(defaultTimeBudgetMS_ <= UINT32_MAX);
+ return uint32_t(defaultTimeBudgetMS_);
+ case JSGC_MIN_EMPTY_CHUNK_COUNT:
+ return minEmptyChunkCount(lock);
+ case JSGC_MAX_EMPTY_CHUNK_COUNT:
+ return maxEmptyChunkCount(lock);
+ case JSGC_COMPACTING_ENABLED:
+ return compactingEnabled;
+ case JSGC_PARALLEL_MARKING_ENABLED:
+ return parallelMarkingEnabled;
+ case JSGC_INCREMENTAL_WEAKMAP_ENABLED:
+ return marker().incrementalWeakMapMarkingEnabled;
+ case JSGC_CHUNK_BYTES:
+ return ChunkSize;
+ case JSGC_HELPER_THREAD_RATIO:
+ MOZ_ASSERT(helperThreadRatio > 0.0);
+ return uint32_t(helperThreadRatio * 100.0);
+ case JSGC_MAX_HELPER_THREADS:
+ MOZ_ASSERT(maxHelperThreads <= UINT32_MAX);
+ return maxHelperThreads;
+ case JSGC_HELPER_THREAD_COUNT:
+ return helperThreadCount;
+ case JSGC_MARKING_THREAD_COUNT:
+ return markingThreadCount;
+ case JSGC_SYSTEM_PAGE_SIZE_KB:
+ return SystemPageSize() / 1024;
+ default:
+ return tunables.getParameter(key);
+ }
+}
+
+#ifdef JS_GC_ZEAL
+void GCRuntime::setMarkStackLimit(size_t limit, AutoLockGC& lock) {
+ MOZ_ASSERT(!JS::RuntimeHeapIsBusy());
+
+ maybeMarkStackLimit = limit;
+
+ AutoUnlockGC unlock(lock);
+ AutoStopVerifyingBarriers pauseVerification(rt, false);
+ for (auto& marker : markers) {
+ marker->setMaxCapacity(limit);
+ }
+}
+#endif
+
+void GCRuntime::setIncrementalGCEnabled(bool enabled) {
+ incrementalGCEnabled = enabled;
+}
+
+void GCRuntime::updateHelperThreadCount() {
+ if (!CanUseExtraThreads()) {
+ // startTask will run the work on the main thread if the count is 1.
+ MOZ_ASSERT(helperThreadCount == 1);
+ return;
+ }
+
+ // Number of extra threads required during parallel marking to ensure we can
+ // start the necessary marking tasks. Background free and background
+ // allocation may already be running and we want to avoid these tasks blocking
+ // marking. In real configurations there will be enough threads that this
+ // won't affect anything.
+ static constexpr size_t SpareThreadsDuringParallelMarking = 2;
+
+ // The count of helper threads used for GC tasks is process wide. Don't set it
+ // for worker JS runtimes.
+ if (rt->parentRuntime) {
+ helperThreadCount = rt->parentRuntime->gc.helperThreadCount;
+ return;
+ }
+
+ // Calculate the target thread count for GC parallel tasks.
+ double cpuCount = GetHelperThreadCPUCount();
+ helperThreadCount = std::clamp(size_t(cpuCount * helperThreadRatio.ref()),
+ size_t(1), maxHelperThreads.ref());
+
+ // Calculate the overall target thread count taking into account the separate
+ // parameter for parallel marking threads. Add spare threads to avoid blocking
+ // parallel marking when there is other GC work happening.
+ size_t targetCount =
+ std::max(helperThreadCount.ref(),
+ markingThreadCount.ref() + SpareThreadsDuringParallelMarking);
+
+ // Attempt to create extra threads if possible. This is not supported when
+ // using an external thread pool.
+ AutoLockHelperThreadState lock;
+ (void)HelperThreadState().ensureThreadCount(targetCount, lock);
+
+ // Limit all thread counts based on the number of threads available, which may
+ // be fewer than requested.
+ size_t availableThreadCount = GetHelperThreadCount();
+ MOZ_ASSERT(availableThreadCount != 0);
+ targetCount = std::min(targetCount, availableThreadCount);
+ helperThreadCount = std::min(helperThreadCount.ref(), availableThreadCount);
+ markingThreadCount =
+ std::min(markingThreadCount.ref(),
+ availableThreadCount - SpareThreadsDuringParallelMarking);
+
+ // Update the maximum number of threads that will be used for GC work.
+ HelperThreadState().setGCParallelThreadCount(targetCount, lock);
+}
+
+size_t GCRuntime::markingWorkerCount() const {
+ if (!CanUseExtraThreads() || !parallelMarkingEnabled) {
+ return 1;
+ }
+
+ if (markingThreadCount) {
+ return markingThreadCount;
+ }
+
+ // Limit parallel marking to use at most two threads initially.
+ return 2;
+}
+
+#ifdef DEBUG
+void GCRuntime::assertNoMarkingWork() const {
+ for (auto& marker : markers) {
+ MOZ_ASSERT(marker->isDrained());
+ }
+ MOZ_ASSERT(!hasDelayedMarking());
+}
+#endif
+
+static size_t GetGCParallelThreadCount() {
+ AutoLockHelperThreadState lock;
+ return HelperThreadState().getGCParallelThreadCount(lock);
+}
+
+bool GCRuntime::updateMarkersVector() {
+ MOZ_ASSERT(helperThreadCount >= 1,
+ "There must always be at least one mark task");
+ MOZ_ASSERT(CurrentThreadCanAccessRuntime(rt));
+ assertNoMarkingWork();
+
+ // Limit worker count to number of GC parallel tasks that can run
+ // concurrently, otherwise one thread can deadlock waiting on another.
+ size_t targetCount =
+ std::min(markingWorkerCount(), GetGCParallelThreadCount());
+
+ if (markers.length() > targetCount) {
+ return markers.resize(targetCount);
+ }
+
+ while (markers.length() < targetCount) {
+ auto marker = MakeUnique<GCMarker>(rt);
+ if (!marker) {
+ return false;
+ }
+
+#ifdef JS_GC_ZEAL
+ if (maybeMarkStackLimit) {
+ marker->setMaxCapacity(maybeMarkStackLimit);
+ }
+#endif
+
+ if (!marker->init()) {
+ return false;
+ }
+
+ if (!markers.emplaceBack(std::move(marker))) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+bool GCRuntime::addBlackRootsTracer(JSTraceDataOp traceOp, void* data) {
+ AssertHeapIsIdle();
+ return !!blackRootTracers.ref().append(
+ Callback<JSTraceDataOp>(traceOp, data));
+}
+
+void GCRuntime::removeBlackRootsTracer(JSTraceDataOp traceOp, void* data) {
+ // Can be called from finalizers
+ for (size_t i = 0; i < blackRootTracers.ref().length(); i++) {
+ Callback<JSTraceDataOp>* e = &blackRootTracers.ref()[i];
+ if (e->op == traceOp && e->data == data) {
+ blackRootTracers.ref().erase(e);
+ break;
+ }
+ }
+}
+
+void GCRuntime::setGrayRootsTracer(JSGrayRootsTracer traceOp, void* data) {
+ AssertHeapIsIdle();
+ grayRootTracer.ref() = {traceOp, data};
+}
+
+void GCRuntime::clearBlackAndGrayRootTracers() {
+ MOZ_ASSERT(rt->isBeingDestroyed());
+ blackRootTracers.ref().clear();
+ setGrayRootsTracer(nullptr, nullptr);
+}
+
+void GCRuntime::setGCCallback(JSGCCallback callback, void* data) {
+ gcCallback.ref() = {callback, data};
+}
+
+void GCRuntime::callGCCallback(JSGCStatus status, JS::GCReason reason) const {
+ const auto& callback = gcCallback.ref();
+ MOZ_ASSERT(callback.op);
+ callback.op(rt->mainContextFromOwnThread(), status, reason, callback.data);
+}
+
+void GCRuntime::setObjectsTenuredCallback(JSObjectsTenuredCallback callback,
+ void* data) {
+ tenuredCallback.ref() = {callback, data};
+}
+
+void GCRuntime::callObjectsTenuredCallback() {
+ JS::AutoSuppressGCAnalysis nogc;
+ const auto& callback = tenuredCallback.ref();
+ if (callback.op) {
+ callback.op(rt->mainContextFromOwnThread(), callback.data);
+ }
+}
+
+bool GCRuntime::addFinalizeCallback(JSFinalizeCallback callback, void* data) {
+ return finalizeCallbacks.ref().append(
+ Callback<JSFinalizeCallback>(callback, data));
+}
+
+template <typename F>
+static void EraseCallback(CallbackVector<F>& vector, F callback) {
+ for (Callback<F>* p = vector.begin(); p != vector.end(); p++) {
+ if (p->op == callback) {
+ vector.erase(p);
+ return;
+ }
+ }
+}
+
+void GCRuntime::removeFinalizeCallback(JSFinalizeCallback callback) {
+ EraseCallback(finalizeCallbacks.ref(), callback);
+}
+
+void GCRuntime::callFinalizeCallbacks(JS::GCContext* gcx,
+ JSFinalizeStatus status) const {
+ for (auto& p : finalizeCallbacks.ref()) {
+ p.op(gcx, status, p.data);
+ }
+}
+
+void GCRuntime::setHostCleanupFinalizationRegistryCallback(
+ JSHostCleanupFinalizationRegistryCallback callback, void* data) {
+ hostCleanupFinalizationRegistryCallback.ref() = {callback, data};
+}
+
+void GCRuntime::callHostCleanupFinalizationRegistryCallback(
+ JSFunction* doCleanup, GlobalObject* incumbentGlobal) {
+ JS::AutoSuppressGCAnalysis nogc;
+ const auto& callback = hostCleanupFinalizationRegistryCallback.ref();
+ if (callback.op) {
+ callback.op(doCleanup, incumbentGlobal, callback.data);
+ }
+}
+
+bool GCRuntime::addWeakPointerZonesCallback(JSWeakPointerZonesCallback callback,
+ void* data) {
+ return updateWeakPointerZonesCallbacks.ref().append(
+ Callback<JSWeakPointerZonesCallback>(callback, data));
+}
+
+void GCRuntime::removeWeakPointerZonesCallback(
+ JSWeakPointerZonesCallback callback) {
+ EraseCallback(updateWeakPointerZonesCallbacks.ref(), callback);
+}
+
+void GCRuntime::callWeakPointerZonesCallbacks(JSTracer* trc) const {
+ for (auto const& p : updateWeakPointerZonesCallbacks.ref()) {
+ p.op(trc, p.data);
+ }
+}
+
+bool GCRuntime::addWeakPointerCompartmentCallback(
+ JSWeakPointerCompartmentCallback callback, void* data) {
+ return updateWeakPointerCompartmentCallbacks.ref().append(
+ Callback<JSWeakPointerCompartmentCallback>(callback, data));
+}
+
+void GCRuntime::removeWeakPointerCompartmentCallback(
+ JSWeakPointerCompartmentCallback callback) {
+ EraseCallback(updateWeakPointerCompartmentCallbacks.ref(), callback);
+}
+
+void GCRuntime::callWeakPointerCompartmentCallbacks(
+ JSTracer* trc, JS::Compartment* comp) const {
+ for (auto const& p : updateWeakPointerCompartmentCallbacks.ref()) {
+ p.op(trc, comp, p.data);
+ }
+}
+
+JS::GCSliceCallback GCRuntime::setSliceCallback(JS::GCSliceCallback callback) {
+ return stats().setSliceCallback(callback);
+}
+
+JS::GCNurseryCollectionCallback GCRuntime::setNurseryCollectionCallback(
+ JS::GCNurseryCollectionCallback callback) {
+ return stats().setNurseryCollectionCallback(callback);
+}
+
+JS::DoCycleCollectionCallback GCRuntime::setDoCycleCollectionCallback(
+ JS::DoCycleCollectionCallback callback) {
+ const auto prior = gcDoCycleCollectionCallback.ref();
+ gcDoCycleCollectionCallback.ref() = {callback, nullptr};
+ return prior.op;
+}
+
+void GCRuntime::callDoCycleCollectionCallback(JSContext* cx) {
+ const auto& callback = gcDoCycleCollectionCallback.ref();
+ if (callback.op) {
+ callback.op(cx);
+ }
+}
+
+bool GCRuntime::addRoot(Value* vp, const char* name) {
+ /*
+ * Sometimes Firefox will hold weak references to objects and then convert
+ * them to strong references by calling AddRoot (e.g., via PreserveWrapper,
+ * or ModifyBusyCount in workers). We need a read barrier to cover these
+ * cases.
+ */
+ MOZ_ASSERT(vp);
+ Value value = *vp;
+ if (value.isGCThing()) {
+ ValuePreWriteBarrier(value);
+ }
+
+ return rootsHash.ref().put(vp, name);
+}
+
+void GCRuntime::removeRoot(Value* vp) {
+ rootsHash.ref().remove(vp);
+ notifyRootsRemoved();
+}
+
+/* Compacting GC */
+
+bool js::gc::IsCurrentlyAnimating(const TimeStamp& lastAnimationTime,
+ const TimeStamp& currentTime) {
+ // Assume that we're currently animating if js::NotifyAnimationActivity has
+ // been called in the last second.
+ static const auto oneSecond = TimeDuration::FromSeconds(1);
+ return !lastAnimationTime.IsNull() &&
+ currentTime < (lastAnimationTime + oneSecond);
+}
+
+static bool DiscardedCodeRecently(Zone* zone, const TimeStamp& currentTime) {
+ static const auto thirtySeconds = TimeDuration::FromSeconds(30);
+ return !zone->lastDiscardedCodeTime().IsNull() &&
+ currentTime < (zone->lastDiscardedCodeTime() + thirtySeconds);
+}
+
+bool GCRuntime::shouldCompact() {
+ // Compact on shrinking GC if enabled. Skip compacting in incremental GCs
+ // if we are currently animating, unless the user is inactive or we're
+ // responding to memory pressure.
+
+ if (!isShrinkingGC() || !isCompactingGCEnabled()) {
+ return false;
+ }
+
+ if (initialReason == JS::GCReason::USER_INACTIVE ||
+ initialReason == JS::GCReason::MEM_PRESSURE) {
+ return true;
+ }
+
+ return !isIncremental ||
+ !IsCurrentlyAnimating(rt->lastAnimationTime, TimeStamp::Now());
+}
+
+bool GCRuntime::isCompactingGCEnabled() const {
+ return compactingEnabled &&
+ rt->mainContextFromOwnThread()->compactingDisabledCount == 0;
+}
+
+JS_PUBLIC_API void JS::SetCreateGCSliceBudgetCallback(
+ JSContext* cx, JS::CreateSliceBudgetCallback cb) {
+ cx->runtime()->gc.createBudgetCallback = cb;
+}
+
+void TimeBudget::setDeadlineFromNow() { deadline = TimeStamp::Now() + budget; }
+
+SliceBudget::SliceBudget(TimeBudget time, InterruptRequestFlag* interrupt)
+ : budget(TimeBudget(time)),
+ interruptRequested(interrupt),
+ counter(StepsPerExpensiveCheck) {
+ budget.as<TimeBudget>().setDeadlineFromNow();
+}
+
+SliceBudget::SliceBudget(WorkBudget work)
+ : budget(work), interruptRequested(nullptr), counter(work.budget) {}
+
+int SliceBudget::describe(char* buffer, size_t maxlen) const {
+ if (isUnlimited()) {
+ return snprintf(buffer, maxlen, "unlimited");
+ } else if (isWorkBudget()) {
+ return snprintf(buffer, maxlen, "work(%" PRId64 ")", workBudget());
+ } else {
+ const char* interruptStr = "";
+ if (interruptRequested) {
+ interruptStr = interrupted ? "INTERRUPTED " : "interruptible ";
+ }
+ const char* extra = "";
+ if (idle) {
+ extra = extended ? " (started idle but extended)" : " (idle)";
+ }
+ return snprintf(buffer, maxlen, "%s%" PRId64 "ms%s", interruptStr,
+ timeBudget(), extra);
+ }
+}
+
+bool SliceBudget::checkOverBudget() {
+ MOZ_ASSERT(counter <= 0);
+ MOZ_ASSERT(!isUnlimited());
+
+ if (isWorkBudget()) {
+ return true;
+ }
+
+ if (interruptRequested && *interruptRequested) {
+ *interruptRequested = false;
+ interrupted = true;
+ }
+
+ if (interrupted) {
+ return true;
+ }
+
+ if (TimeStamp::Now() >= budget.as<TimeBudget>().deadline) {
+ return true;
+ }
+
+ counter = StepsPerExpensiveCheck;
+ return false;
+}
+
+void GCRuntime::requestMajorGC(JS::GCReason reason) {
+ MOZ_ASSERT_IF(reason != JS::GCReason::BG_TASK_FINISHED,
+ !CurrentThreadIsPerformingGC());
+
+ if (majorGCRequested()) {
+ return;
+ }
+
+ majorGCTriggerReason = reason;
+ rt->mainContextFromAnyThread()->requestInterrupt(InterruptReason::GC);
+}
+
+void Nursery::requestMinorGC(JS::GCReason reason) const {
+ MOZ_ASSERT(CurrentThreadCanAccessRuntime(runtime()));
+
+ if (minorGCRequested()) {
+ return;
+ }
+
+ minorGCTriggerReason_ = reason;
+ runtime()->mainContextFromOwnThread()->requestInterrupt(InterruptReason::GC);
+}
+
+bool GCRuntime::triggerGC(JS::GCReason reason) {
+ /*
+ * Don't trigger GCs if this is being called off the main thread from
+ * onTooMuchMalloc().
+ */
+ if (!CurrentThreadCanAccessRuntime(rt)) {
+ return false;
+ }
+
+ /* GC is already running. */
+ if (JS::RuntimeHeapIsCollecting()) {
+ return false;
+ }
+
+ JS::PrepareForFullGC(rt->mainContextFromOwnThread());
+ requestMajorGC(reason);
+ return true;
+}
+
+void GCRuntime::maybeTriggerGCAfterAlloc(Zone* zone) {
+ MOZ_ASSERT(CurrentThreadCanAccessRuntime(rt));
+ MOZ_ASSERT(!JS::RuntimeHeapIsCollecting());
+
+ TriggerResult trigger =
+ checkHeapThreshold(zone, zone->gcHeapSize, zone->gcHeapThreshold);
+
+ if (trigger.shouldTrigger) {
+ // Start or continue an in progress incremental GC. We do this to try to
+ // avoid performing non-incremental GCs on zones which allocate a lot of
+ // data, even when incremental slices can't be triggered via scheduling in
+ // the event loop.
+ triggerZoneGC(zone, JS::GCReason::ALLOC_TRIGGER, trigger.usedBytes,
+ trigger.thresholdBytes);
+ }
+}
+
+void js::gc::MaybeMallocTriggerZoneGC(JSRuntime* rt, ZoneAllocator* zoneAlloc,
+ const HeapSize& heap,
+ const HeapThreshold& threshold,
+ JS::GCReason reason) {
+ rt->gc.maybeTriggerGCAfterMalloc(Zone::from(zoneAlloc), heap, threshold,
+ reason);
+}
+
+void GCRuntime::maybeTriggerGCAfterMalloc(Zone* zone) {
+ if (maybeTriggerGCAfterMalloc(zone, zone->mallocHeapSize,
+ zone->mallocHeapThreshold,
+ JS::GCReason::TOO_MUCH_MALLOC)) {
+ return;
+ }
+
+ maybeTriggerGCAfterMalloc(zone, zone->jitHeapSize, zone->jitHeapThreshold,
+ JS::GCReason::TOO_MUCH_JIT_CODE);
+}
+
+bool GCRuntime::maybeTriggerGCAfterMalloc(Zone* zone, const HeapSize& heap,
+ const HeapThreshold& threshold,
+ JS::GCReason reason) {
+ // Ignore malloc during sweeping, for example when we resize hash tables.
+ if (heapState() != JS::HeapState::Idle) {
+ return false;
+ }
+
+ MOZ_ASSERT(CurrentThreadCanAccessRuntime(rt));
+
+ TriggerResult trigger = checkHeapThreshold(zone, heap, threshold);
+ if (!trigger.shouldTrigger) {
+ return false;
+ }
+
+ // Trigger a zone GC. budgetIncrementalGC() will work out whether to do an
+ // incremental or non-incremental collection.
+ triggerZoneGC(zone, reason, trigger.usedBytes, trigger.thresholdBytes);
+ return true;
+}
+
+TriggerResult GCRuntime::checkHeapThreshold(
+ Zone* zone, const HeapSize& heapSize, const HeapThreshold& heapThreshold) {
+ MOZ_ASSERT_IF(heapThreshold.hasSliceThreshold(), zone->wasGCStarted());
+
+ size_t usedBytes = heapSize.bytes();
+ size_t thresholdBytes = heapThreshold.hasSliceThreshold()
+ ? heapThreshold.sliceBytes()
+ : heapThreshold.startBytes();
+
+ // The incremental limit will be checked if we trigger a GC slice.
+ MOZ_ASSERT(thresholdBytes <= heapThreshold.incrementalLimitBytes());
+
+ return TriggerResult{usedBytes >= thresholdBytes, usedBytes, thresholdBytes};
+}
+
+bool GCRuntime::triggerZoneGC(Zone* zone, JS::GCReason reason, size_t used,
+ size_t threshold) {
+ MOZ_ASSERT(CurrentThreadCanAccessRuntime(rt));
+
+ /* GC is already running. */
+ if (JS::RuntimeHeapIsBusy()) {
+ return false;
+ }
+
+#ifdef JS_GC_ZEAL
+ if (hasZealMode(ZealMode::Alloc)) {
+ MOZ_RELEASE_ASSERT(triggerGC(reason));
+ return true;
+ }
+#endif
+
+ if (zone->isAtomsZone()) {
+ stats().recordTrigger(used, threshold);
+ MOZ_RELEASE_ASSERT(triggerGC(reason));
+ return true;
+ }
+
+ stats().recordTrigger(used, threshold);
+ zone->scheduleGC();
+ requestMajorGC(reason);
+ return true;
+}
+
+void GCRuntime::maybeGC() {
+ MOZ_ASSERT(CurrentThreadCanAccessRuntime(rt));
+
+#ifdef JS_GC_ZEAL
+ if (hasZealMode(ZealMode::Alloc) || hasZealMode(ZealMode::RootsChange)) {
+ JS::PrepareForFullGC(rt->mainContextFromOwnThread());
+ gc(JS::GCOptions::Normal, JS::GCReason::DEBUG_GC);
+ return;
+ }
+#endif
+
+ (void)gcIfRequestedImpl(/* eagerOk = */ true);
+}
+
+JS::GCReason GCRuntime::wantMajorGC(bool eagerOk) {
+ MOZ_ASSERT(CurrentThreadCanAccessRuntime(rt));
+
+ if (majorGCRequested()) {
+ return majorGCTriggerReason;
+ }
+
+ if (isIncrementalGCInProgress() || !eagerOk) {
+ return JS::GCReason::NO_REASON;
+ }
+
+ JS::GCReason reason = JS::GCReason::NO_REASON;
+ for (ZonesIter zone(this, WithAtoms); !zone.done(); zone.next()) {
+ if (checkEagerAllocTrigger(zone->gcHeapSize, zone->gcHeapThreshold) ||
+ checkEagerAllocTrigger(zone->mallocHeapSize,
+ zone->mallocHeapThreshold)) {
+ zone->scheduleGC();
+ reason = JS::GCReason::EAGER_ALLOC_TRIGGER;
+ }
+ }
+
+ return reason;
+}
+
+bool GCRuntime::checkEagerAllocTrigger(const HeapSize& size,
+ const HeapThreshold& threshold) {
+ double thresholdBytes =
+ threshold.eagerAllocTrigger(schedulingState.inHighFrequencyGCMode());
+ double usedBytes = size.bytes();
+ if (usedBytes <= 1024 * 1024 || usedBytes < thresholdBytes) {
+ return false;
+ }
+
+ stats().recordTrigger(usedBytes, thresholdBytes);
+ return true;
+}
+
+bool GCRuntime::shouldDecommit() const {
+ // If we're doing a shrinking GC we always decommit to release as much memory
+ // as possible.
+ if (cleanUpEverything) {
+ return true;
+ }
+
+ // If we are allocating heavily enough to trigger "high frequency" GC then
+ // skip decommit so that we do not compete with the mutator.
+ return !schedulingState.inHighFrequencyGCMode();
+}
+
+void GCRuntime::startDecommit() {
+ gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::DECOMMIT);
+
+#ifdef DEBUG
+ MOZ_ASSERT(CurrentThreadCanAccessRuntime(rt));
+ MOZ_ASSERT(decommitTask.isIdle());
+
+ {
+ AutoLockGC lock(this);
+ MOZ_ASSERT(fullChunks(lock).verify());
+ MOZ_ASSERT(availableChunks(lock).verify());
+ MOZ_ASSERT(emptyChunks(lock).verify());
+
+ // Verify that all entries in the empty chunks pool are unused.
+ for (ChunkPool::Iter chunk(emptyChunks(lock)); !chunk.done();
+ chunk.next()) {
+ MOZ_ASSERT(chunk->unused());
+ }
+ }
+#endif
+
+ if (!shouldDecommit()) {
+ return;
+ }
+
+ {
+ AutoLockGC lock(this);
+ if (availableChunks(lock).empty() && !tooManyEmptyChunks(lock) &&
+ emptyChunks(lock).empty()) {
+ return; // Nothing to do.
+ }
+ }
+
+#ifdef DEBUG
+ {
+ AutoLockHelperThreadState lock;
+ MOZ_ASSERT(!requestSliceAfterBackgroundTask);
+ }
+#endif
+
+ if (useBackgroundThreads) {
+ decommitTask.start();
+ return;
+ }
+
+ decommitTask.runFromMainThread();
+}
+
+BackgroundDecommitTask::BackgroundDecommitTask(GCRuntime* gc)
+ : GCParallelTask(gc, gcstats::PhaseKind::DECOMMIT) {}
+
+void js::gc::BackgroundDecommitTask::run(AutoLockHelperThreadState& lock) {
+ {
+ AutoUnlockHelperThreadState unlock(lock);
+
+ ChunkPool emptyChunksToFree;
+ {
+ AutoLockGC gcLock(gc);
+ emptyChunksToFree = gc->expireEmptyChunkPool(gcLock);
+ }
+
+ FreeChunkPool(emptyChunksToFree);
+
+ {
+ AutoLockGC gcLock(gc);
+
+ // To help minimize the total number of chunks needed over time, sort the
+ // available chunks list so that we allocate into more-used chunks first.
+ gc->availableChunks(gcLock).sort();
+
+ if (DecommitEnabled()) {
+ gc->decommitEmptyChunks(cancel_, gcLock);
+ gc->decommitFreeArenas(cancel_, gcLock);
+ }
+ }
+ }
+
+ gc->maybeRequestGCAfterBackgroundTask(lock);
+}
+
+static inline bool CanDecommitWholeChunk(TenuredChunk* chunk) {
+ return chunk->unused() && chunk->info.numArenasFreeCommitted != 0;
+}
+
+// Called from a background thread to decommit free arenas. Releases the GC
+// lock.
+void GCRuntime::decommitEmptyChunks(const bool& cancel, AutoLockGC& lock) {
+ Vector<TenuredChunk*, 0, SystemAllocPolicy> chunksToDecommit;
+ for (ChunkPool::Iter chunk(emptyChunks(lock)); !chunk.done(); chunk.next()) {
+ if (CanDecommitWholeChunk(chunk) && !chunksToDecommit.append(chunk)) {
+ onOutOfMallocMemory(lock);
+ return;
+ }
+ }
+
+ for (TenuredChunk* chunk : chunksToDecommit) {
+ if (cancel) {
+ break;
+ }
+
+ // Check whether something used the chunk while lock was released.
+ if (!CanDecommitWholeChunk(chunk)) {
+ continue;
+ }
+
+ // Temporarily remove the chunk while decommitting its memory so that the
+ // mutator doesn't start allocating from it when we drop the lock.
+ emptyChunks(lock).remove(chunk);
+
+ {
+ AutoUnlockGC unlock(lock);
+ chunk->decommitAllArenas();
+ MOZ_ASSERT(chunk->info.numArenasFreeCommitted == 0);
+ }
+
+ emptyChunks(lock).push(chunk);
+ }
+}
+
+// Called from a background thread to decommit free arenas. Releases the GC
+// lock.
+void GCRuntime::decommitFreeArenas(const bool& cancel, AutoLockGC& lock) {
+ MOZ_ASSERT(DecommitEnabled());
+
+ // Since we release the GC lock while doing the decommit syscall below,
+ // it is dangerous to iterate the available list directly, as the active
+ // thread could modify it concurrently. Instead, we build and pass an
+ // explicit Vector containing the Chunks we want to visit.
+ Vector<TenuredChunk*, 0, SystemAllocPolicy> chunksToDecommit;
+ for (ChunkPool::Iter chunk(availableChunks(lock)); !chunk.done();
+ chunk.next()) {
+ if (chunk->info.numArenasFreeCommitted != 0 &&
+ !chunksToDecommit.append(chunk)) {
+ onOutOfMallocMemory(lock);
+ return;
+ }
+ }
+
+ for (TenuredChunk* chunk : chunksToDecommit) {
+ chunk->decommitFreeArenas(this, cancel, lock);
+ }
+}
+
+// Do all possible decommit immediately from the current thread without
+// releasing the GC lock or allocating any memory.
+void GCRuntime::decommitFreeArenasWithoutUnlocking(const AutoLockGC& lock) {
+ MOZ_ASSERT(DecommitEnabled());
+ for (ChunkPool::Iter chunk(availableChunks(lock)); !chunk.done();
+ chunk.next()) {
+ chunk->decommitFreeArenasWithoutUnlocking(lock);
+ }
+ MOZ_ASSERT(availableChunks(lock).verify());
+}
+
+void GCRuntime::maybeRequestGCAfterBackgroundTask(
+ const AutoLockHelperThreadState& lock) {
+ if (requestSliceAfterBackgroundTask) {
+ // Trigger a slice so the main thread can continue the collection
+ // immediately.
+ requestSliceAfterBackgroundTask = false;
+ requestMajorGC(JS::GCReason::BG_TASK_FINISHED);
+ }
+}
+
+void GCRuntime::cancelRequestedGCAfterBackgroundTask() {
+ MOZ_ASSERT(CurrentThreadCanAccessRuntime(rt));
+
+#ifdef DEBUG
+ {
+ AutoLockHelperThreadState lock;
+ MOZ_ASSERT(!requestSliceAfterBackgroundTask);
+ }
+#endif
+
+ majorGCTriggerReason.compareExchange(JS::GCReason::BG_TASK_FINISHED,
+ JS::GCReason::NO_REASON);
+}
+
+bool GCRuntime::isWaitingOnBackgroundTask() const {
+ AutoLockHelperThreadState lock;
+ return requestSliceAfterBackgroundTask;
+}
+
+void GCRuntime::queueUnusedLifoBlocksForFree(LifoAlloc* lifo) {
+ MOZ_ASSERT(JS::RuntimeHeapIsBusy());
+ AutoLockHelperThreadState lock;
+ lifoBlocksToFree.ref().transferUnusedFrom(lifo);
+}
+
+void GCRuntime::queueAllLifoBlocksForFreeAfterMinorGC(LifoAlloc* lifo) {
+ lifoBlocksToFreeAfterMinorGC.ref().transferFrom(lifo);
+}
+
+void GCRuntime::queueBuffersForFreeAfterMinorGC(Nursery::BufferSet& buffers) {
+ AutoLockHelperThreadState lock;
+
+ if (!buffersToFreeAfterMinorGC.ref().empty()) {
+ // In the rare case that this hasn't processed the buffers from a previous
+ // minor GC we have to wait here.
+ MOZ_ASSERT(!freeTask.isIdle(lock));
+ freeTask.joinWithLockHeld(lock);
+ }
+
+ MOZ_ASSERT(buffersToFreeAfterMinorGC.ref().empty());
+ std::swap(buffersToFreeAfterMinorGC.ref(), buffers);
+}
+
+void Realm::destroy(JS::GCContext* gcx) {
+ JSRuntime* rt = gcx->runtime();
+ if (auto callback = rt->destroyRealmCallback) {
+ callback(gcx, this);
+ }
+ if (principals()) {
+ JS_DropPrincipals(rt->mainContextFromOwnThread(), principals());
+ }
+ // Bug 1560019: Malloc memory associated with a zone but not with a specific
+ // GC thing is not currently tracked.
+ gcx->deleteUntracked(this);
+}
+
+void Compartment::destroy(JS::GCContext* gcx) {
+ JSRuntime* rt = gcx->runtime();
+ if (auto callback = rt->destroyCompartmentCallback) {
+ callback(gcx, this);
+ }
+ // Bug 1560019: Malloc memory associated with a zone but not with a specific
+ // GC thing is not currently tracked.
+ gcx->deleteUntracked(this);
+ rt->gc.stats().sweptCompartment();
+}
+
+void Zone::destroy(JS::GCContext* gcx) {
+ MOZ_ASSERT(compartments().empty());
+ JSRuntime* rt = gcx->runtime();
+ if (auto callback = rt->destroyZoneCallback) {
+ callback(gcx, this);
+ }
+ // Bug 1560019: Malloc memory associated with a zone but not with a specific
+ // GC thing is not currently tracked.
+ gcx->deleteUntracked(this);
+ gcx->runtime()->gc.stats().sweptZone();
+}
+
+/*
+ * It's simpler if we preserve the invariant that every zone (except atoms
+ * zones) has at least one compartment, and every compartment has at least one
+ * realm. If we know we're deleting the entire zone, then sweepCompartments is
+ * allowed to delete all compartments. In this case, |keepAtleastOne| is false.
+ * If any cells remain alive in the zone, set |keepAtleastOne| true to prohibit
+ * sweepCompartments from deleting every compartment. Instead, it preserves an
+ * arbitrary compartment in the zone.
+ */
+void Zone::sweepCompartments(JS::GCContext* gcx, bool keepAtleastOne,
+ bool destroyingRuntime) {
+ MOZ_ASSERT_IF(!isAtomsZone(), !compartments().empty());
+ MOZ_ASSERT_IF(destroyingRuntime, !keepAtleastOne);
+
+ Compartment** read = compartments().begin();
+ Compartment** end = compartments().end();
+ Compartment** write = read;
+ while (read < end) {
+ Compartment* comp = *read++;
+
+ /*
+ * Don't delete the last compartment and realm if keepAtleastOne is
+ * still true, meaning all the other compartments were deleted.
+ */
+ bool keepAtleastOneRealm = read == end && keepAtleastOne;
+ comp->sweepRealms(gcx, keepAtleastOneRealm, destroyingRuntime);
+
+ if (!comp->realms().empty()) {
+ *write++ = comp;
+ keepAtleastOne = false;
+ } else {
+ comp->destroy(gcx);
+ }
+ }
+ compartments().shrinkTo(write - compartments().begin());
+ MOZ_ASSERT_IF(keepAtleastOne, !compartments().empty());
+ MOZ_ASSERT_IF(destroyingRuntime, compartments().empty());
+}
+
+void Compartment::sweepRealms(JS::GCContext* gcx, bool keepAtleastOne,
+ bool destroyingRuntime) {
+ MOZ_ASSERT(!realms().empty());
+ MOZ_ASSERT_IF(destroyingRuntime, !keepAtleastOne);
+
+ Realm** read = realms().begin();
+ Realm** end = realms().end();
+ Realm** write = read;
+ while (read < end) {
+ Realm* realm = *read++;
+
+ /*
+ * Don't delete the last realm if keepAtleastOne is still true, meaning
+ * all the other realms were deleted.
+ */
+ bool dontDelete = read == end && keepAtleastOne;
+ if ((realm->marked() || dontDelete) && !destroyingRuntime) {
+ *write++ = realm;
+ keepAtleastOne = false;
+ } else {
+ realm->destroy(gcx);
+ }
+ }
+ realms().shrinkTo(write - realms().begin());
+ MOZ_ASSERT_IF(keepAtleastOne, !realms().empty());
+ MOZ_ASSERT_IF(destroyingRuntime, realms().empty());
+}
+
+void GCRuntime::sweepZones(JS::GCContext* gcx, bool destroyingRuntime) {
+ MOZ_ASSERT_IF(destroyingRuntime, numActiveZoneIters == 0);
+
+ if (numActiveZoneIters) {
+ return;
+ }
+
+ assertBackgroundSweepingFinished();
+
+ // Sweep zones following the atoms zone.
+ MOZ_ASSERT(zones()[0]->isAtomsZone());
+ Zone** read = zones().begin() + 1;
+ Zone** end = zones().end();
+ Zone** write = read;
+
+ while (read < end) {
+ Zone* zone = *read++;
+
+ if (zone->wasGCStarted()) {
+ MOZ_ASSERT(!zone->isQueuedForBackgroundSweep());
+ AutoSetThreadIsSweeping threadIsSweeping(zone);
+ const bool zoneIsDead =
+ zone->arenas.arenaListsAreEmpty() && !zone->hasMarkedRealms();
+ MOZ_ASSERT_IF(destroyingRuntime, zoneIsDead);
+ if (zoneIsDead) {
+ zone->arenas.checkEmptyFreeLists();
+ zone->sweepCompartments(gcx, false, destroyingRuntime);
+ MOZ_ASSERT(zone->compartments().empty());
+ zone->destroy(gcx);
+ continue;
+ }
+ zone->sweepCompartments(gcx, true, destroyingRuntime);
+ }
+ *write++ = zone;
+ }
+ zones().shrinkTo(write - zones().begin());
+}
+
+void ArenaLists::checkEmptyArenaList(AllocKind kind) {
+ MOZ_ASSERT(arenaList(kind).isEmpty());
+}
+
+void GCRuntime::purgeRuntimeForMinorGC() {
+ // If external strings become nursery allocable, remember to call
+ // zone->externalStringCache().purge() (and delete this assert.)
+ MOZ_ASSERT(!IsNurseryAllocable(AllocKind::EXTERNAL_STRING));
+
+ for (ZonesIter zone(this, SkipAtoms); !zone.done(); zone.next()) {
+ zone->functionToStringCache().purge();
+ }
+}
+
+void GCRuntime::purgeRuntime() {
+ gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::PURGE);
+
+ for (GCRealmsIter realm(rt); !realm.done(); realm.next()) {
+ realm->purge();
+ }
+
+ for (GCZonesIter zone(this); !zone.done(); zone.next()) {
+ zone->purgeAtomCache();
+ zone->externalStringCache().purge();
+ zone->functionToStringCache().purge();
+ zone->boundPrefixCache().clearAndCompact();
+ zone->shapeZone().purgeShapeCaches(rt->gcContext());
+ }
+
+ JSContext* cx = rt->mainContextFromOwnThread();
+ queueUnusedLifoBlocksForFree(&cx->tempLifoAlloc());
+ cx->interpreterStack().purge(rt);
+ cx->frontendCollectionPool().purge();
+
+ rt->caches().purge();
+
+ if (rt->isMainRuntime()) {
+ SharedImmutableStringsCache::getSingleton().purge();
+ }
+
+ MOZ_ASSERT(marker().unmarkGrayStack.empty());
+ marker().unmarkGrayStack.clearAndFree();
+
+ // If we're the main runtime, tell helper threads to free their unused
+ // memory when they are next idle.
+ if (!rt->parentRuntime) {
+ HelperThreadState().triggerFreeUnusedMemory();
+ }
+}
+
+bool GCRuntime::shouldPreserveJITCode(Realm* realm,
+ const TimeStamp& currentTime,
+ JS::GCReason reason,
+ bool canAllocateMoreCode,
+ bool isActiveCompartment) {
+ if (cleanUpEverything) {
+ return false;
+ }
+ if (!canAllocateMoreCode) {
+ return false;
+ }
+
+ if (isActiveCompartment) {
+ return true;
+ }
+ if (alwaysPreserveCode) {
+ return true;
+ }
+ if (realm->preserveJitCode()) {
+ return true;
+ }
+ if (IsCurrentlyAnimating(realm->lastAnimationTime, currentTime) &&
+ DiscardedCodeRecently(realm->zone(), currentTime)) {
+ return true;
+ }
+ if (reason == JS::GCReason::DEBUG_GC) {
+ return true;
+ }
+
+ return false;
+}
+
+#ifdef DEBUG
+class CompartmentCheckTracer final : public JS::CallbackTracer {
+ void onChild(JS::GCCellPtr thing, const char* name) override;
+ bool edgeIsInCrossCompartmentMap(JS::GCCellPtr dst);
+
+ public:
+ explicit CompartmentCheckTracer(JSRuntime* rt)
+ : JS::CallbackTracer(rt, JS::TracerKind::CompartmentCheck,
+ JS::WeakEdgeTraceAction::Skip),
+ src(nullptr),
+ zone(nullptr),
+ compartment(nullptr) {}
+
+ Cell* src;
+ JS::TraceKind srcKind;
+ Zone* zone;
+ Compartment* compartment;
+};
+
+static bool InCrossCompartmentMap(JSRuntime* rt, JSObject* src,
+ JS::GCCellPtr dst) {
+ // Cross compartment edges are either in the cross compartment map or in a
+ // debugger weakmap.
+
+ Compartment* srccomp = src->compartment();
+
+ if (dst.is<JSObject>()) {
+ if (ObjectWrapperMap::Ptr p = srccomp->lookupWrapper(&dst.as<JSObject>())) {
+ if (*p->value().unsafeGet() == src) {
+ return true;
+ }
+ }
+ }
+
+ if (DebugAPI::edgeIsInDebuggerWeakmap(rt, src, dst)) {
+ return true;
+ }
+
+ return false;
+}
+
+void CompartmentCheckTracer::onChild(JS::GCCellPtr thing, const char* name) {
+ Compartment* comp =
+ MapGCThingTyped(thing, [](auto t) { return t->maybeCompartment(); });
+ if (comp && compartment) {
+ MOZ_ASSERT(comp == compartment || edgeIsInCrossCompartmentMap(thing));
+ } else {
+ TenuredCell* tenured = &thing.asCell()->asTenured();
+ Zone* thingZone = tenured->zoneFromAnyThread();
+ MOZ_ASSERT(thingZone == zone || thingZone->isAtomsZone());
+ }
+}
+
+bool CompartmentCheckTracer::edgeIsInCrossCompartmentMap(JS::GCCellPtr dst) {
+ return srcKind == JS::TraceKind::Object &&
+ InCrossCompartmentMap(runtime(), static_cast<JSObject*>(src), dst);
+}
+
+static bool IsPartiallyInitializedObject(Cell* cell) {
+ if (!cell->is<JSObject>()) {
+ return false;
+ }
+
+ JSObject* obj = cell->as<JSObject>();
+ if (!obj->is<NativeObject>()) {
+ return false;
+ }
+
+ NativeObject* nobj = &obj->as<NativeObject>();
+
+ // Check for failed allocation of dynamic slots in
+ // NativeObject::allocateInitialSlots.
+ size_t nDynamicSlots = NativeObject::calculateDynamicSlots(
+ nobj->numFixedSlots(), nobj->slotSpan(), nobj->getClass());
+ return nDynamicSlots != 0 && !nobj->hasDynamicSlots();
+}
+
+void GCRuntime::checkForCompartmentMismatches() {
+ JSContext* cx = rt->mainContextFromOwnThread();
+ if (cx->disableStrictProxyCheckingCount) {
+ return;
+ }
+
+ CompartmentCheckTracer trc(rt);
+ AutoAssertEmptyNursery empty(cx);
+ for (ZonesIter zone(this, SkipAtoms); !zone.done(); zone.next()) {
+ trc.zone = zone;
+ for (auto thingKind : AllAllocKinds()) {
+ for (auto i = zone->cellIterUnsafe<TenuredCell>(thingKind, empty);
+ !i.done(); i.next()) {
+ // We may encounter partially initialized objects. These are unreachable
+ // and it's safe to ignore them.
+ if (IsPartiallyInitializedObject(i.getCell())) {
+ continue;
+ }
+
+ trc.src = i.getCell();
+ trc.srcKind = MapAllocToTraceKind(thingKind);
+ trc.compartment = MapGCThingTyped(
+ trc.src, trc.srcKind, [](auto t) { return t->maybeCompartment(); });
+ JS::TraceChildren(&trc, JS::GCCellPtr(trc.src, trc.srcKind));
+ }
+ }
+ }
+}
+#endif
+
+static bool ShouldCleanUpEverything(JS::GCOptions options) {
+ // During shutdown, we must clean everything up, for the sake of leak
+ // detection. When a runtime has no contexts, or we're doing a GC before a
+ // shutdown CC, those are strong indications that we're shutting down.
+ return options == JS::GCOptions::Shutdown || options == JS::GCOptions::Shrink;
+}
+
+static bool ShouldUseBackgroundThreads(bool isIncremental,
+ JS::GCReason reason) {
+ bool shouldUse = isIncremental && CanUseExtraThreads();
+ MOZ_ASSERT_IF(reason == JS::GCReason::DESTROY_RUNTIME, !shouldUse);
+ return shouldUse;
+}
+
+void GCRuntime::startCollection(JS::GCReason reason) {
+ checkGCStateNotInUse();
+ MOZ_ASSERT_IF(
+ isShuttingDown(),
+ isShutdownGC() ||
+ reason == JS::GCReason::XPCONNECT_SHUTDOWN /* Bug 1650075 */);
+
+ initialReason = reason;
+ cleanUpEverything = ShouldCleanUpEverything(gcOptions());
+ isCompacting = shouldCompact();
+ rootsRemoved = false;
+ sweepGroupIndex = 0;
+ lastGCStartTime_ = TimeStamp::Now();
+
+#ifdef DEBUG
+ if (isShutdownGC()) {
+ hadShutdownGC = true;
+ }
+
+ for (ZonesIter zone(this, WithAtoms); !zone.done(); zone.next()) {
+ zone->gcSweepGroupIndex = 0;
+ }
+#endif
+}
+
+static void RelazifyFunctions(Zone* zone, AllocKind kind) {
+ MOZ_ASSERT(kind == AllocKind::FUNCTION ||
+ kind == AllocKind::FUNCTION_EXTENDED);
+
+ JSRuntime* rt = zone->runtimeFromMainThread();
+ AutoAssertEmptyNursery empty(rt->mainContextFromOwnThread());
+
+ for (auto i = zone->cellIterUnsafe<JSObject>(kind, empty); !i.done();
+ i.next()) {
+ JSFunction* fun = &i->as<JSFunction>();
+ // When iterating over the GC-heap, we may encounter function objects that
+ // are incomplete (missing a BaseScript when we expect one). We must check
+ // for this case before we can call JSFunction::hasBytecode().
+ if (fun->isIncomplete()) {
+ continue;
+ }
+ if (fun->hasBytecode()) {
+ fun->maybeRelazify(rt);
+ }
+ }
+}
+
+static bool ShouldCollectZone(Zone* zone, JS::GCReason reason) {
+ // If we are repeating a GC because we noticed dead compartments haven't
+ // been collected, then only collect zones containing those compartments.
+ if (reason == JS::GCReason::COMPARTMENT_REVIVED) {
+ for (CompartmentsInZoneIter comp(zone); !comp.done(); comp.next()) {
+ if (comp->gcState.scheduledForDestruction) {
+ return true;
+ }
+ }
+
+ return false;
+ }
+
+ // Otherwise we only collect scheduled zones.
+ return zone->isGCScheduled();
+}
+
+bool GCRuntime::prepareZonesForCollection(JS::GCReason reason,
+ bool* isFullOut) {
+#ifdef DEBUG
+ /* Assert that zone state is as we expect */
+ for (ZonesIter zone(this, WithAtoms); !zone.done(); zone.next()) {
+ MOZ_ASSERT(!zone->isCollecting());
+ MOZ_ASSERT_IF(!zone->isAtomsZone(), !zone->compartments().empty());
+ for (auto i : AllAllocKinds()) {
+ MOZ_ASSERT(zone->arenas.collectingArenaList(i).isEmpty());
+ }
+ }
+#endif
+
+ *isFullOut = true;
+ bool any = false;
+
+ for (ZonesIter zone(this, WithAtoms); !zone.done(); zone.next()) {
+ /* Set up which zones will be collected. */
+ bool shouldCollect = ShouldCollectZone(zone, reason);
+ if (shouldCollect) {
+ any = true;
+ zone->changeGCState(Zone::NoGC, Zone::Prepare);
+ } else {
+ *isFullOut = false;
+ }
+
+ zone->setWasCollected(shouldCollect);
+ }
+
+ /* Check that at least one zone is scheduled for collection. */
+ return any;
+}
+
+void GCRuntime::discardJITCodeForGC() {
+ size_t nurserySiteResetCount = 0;
+ size_t pretenuredSiteResetCount = 0;
+
+ js::CancelOffThreadIonCompile(rt, JS::Zone::Prepare);
+ for (GCZonesIter zone(this); !zone.done(); zone.next()) {
+ gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::MARK_DISCARD_CODE);
+
+ // We may need to reset allocation sites and discard JIT code to recover if
+ // we find object lifetimes have changed.
+ PretenuringZone& pz = zone->pretenuring;
+ bool resetNurserySites = pz.shouldResetNurseryAllocSites();
+ bool resetPretenuredSites = pz.shouldResetPretenuredAllocSites();
+
+ if (!zone->isPreservingCode()) {
+ Zone::DiscardOptions options;
+ options.discardBaselineCode = true;
+ options.discardJitScripts = true;
+ options.resetNurseryAllocSites = resetNurserySites;
+ options.resetPretenuredAllocSites = resetPretenuredSites;
+ zone->discardJitCode(rt->gcContext(), options);
+ } else if (resetNurserySites || resetPretenuredSites) {
+ zone->resetAllocSitesAndInvalidate(resetNurserySites,
+ resetPretenuredSites);
+ }
+
+ if (resetNurserySites) {
+ nurserySiteResetCount++;
+ }
+ if (resetPretenuredSites) {
+ pretenuredSiteResetCount++;
+ }
+ }
+
+ if (nursery().reportPretenuring()) {
+ if (nurserySiteResetCount) {
+ fprintf(
+ stderr,
+ "GC reset nursery alloc sites and invalidated code in %zu zones\n",
+ nurserySiteResetCount);
+ }
+ if (pretenuredSiteResetCount) {
+ fprintf(
+ stderr,
+ "GC reset pretenured alloc sites and invalidated code in %zu zones\n",
+ pretenuredSiteResetCount);
+ }
+ }
+}
+
+void GCRuntime::relazifyFunctionsForShrinkingGC() {
+ gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::RELAZIFY_FUNCTIONS);
+ for (GCZonesIter zone(this); !zone.done(); zone.next()) {
+ RelazifyFunctions(zone, AllocKind::FUNCTION);
+ RelazifyFunctions(zone, AllocKind::FUNCTION_EXTENDED);
+ }
+}
+
+void GCRuntime::purgePropMapTablesForShrinkingGC() {
+ gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::PURGE_PROP_MAP_TABLES);
+ for (GCZonesIter zone(this); !zone.done(); zone.next()) {
+ if (!canRelocateZone(zone) || zone->keepPropMapTables()) {
+ continue;
+ }
+
+ // Note: CompactPropMaps never have a table.
+ for (auto map = zone->cellIterUnsafe<NormalPropMap>(); !map.done();
+ map.next()) {
+ if (map->asLinked()->hasTable()) {
+ map->asLinked()->purgeTable(rt->gcContext());
+ }
+ }
+ for (auto map = zone->cellIterUnsafe<DictionaryPropMap>(); !map.done();
+ map.next()) {
+ if (map->asLinked()->hasTable()) {
+ map->asLinked()->purgeTable(rt->gcContext());
+ }
+ }
+ }
+}
+
+// The debugger keeps track of the URLs for the sources of each realm's scripts.
+// These URLs are purged on shrinking GCs.
+void GCRuntime::purgeSourceURLsForShrinkingGC() {
+ gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::PURGE_SOURCE_URLS);
+ for (GCZonesIter zone(this); !zone.done(); zone.next()) {
+ // URLs are not tracked for realms in the system zone.
+ if (!canRelocateZone(zone) || zone->isSystemZone()) {
+ continue;
+ }
+ for (CompartmentsInZoneIter comp(zone); !comp.done(); comp.next()) {
+ for (RealmsInCompartmentIter realm(comp); !realm.done(); realm.next()) {
+ GlobalObject* global = realm.get()->unsafeUnbarrieredMaybeGlobal();
+ if (global) {
+ global->clearSourceURLSHolder();
+ }
+ }
+ }
+ }
+}
+
+void GCRuntime::unmarkWeakMaps() {
+ for (GCZonesIter zone(this); !zone.done(); zone.next()) {
+ /* Unmark all weak maps in the zones being collected. */
+ WeakMapBase::unmarkZone(zone);
+ }
+}
+
+bool GCRuntime::beginPreparePhase(JS::GCReason reason, AutoGCSession& session) {
+ gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::PREPARE);
+
+ if (!prepareZonesForCollection(reason, &isFull.ref())) {
+ return false;
+ }
+
+ /*
+ * Start a parallel task to clear all mark state for the zones we are
+ * collecting. This is linear in the size of the heap we are collecting and so
+ * can be slow. This usually happens concurrently with the mutator and GC
+ * proper does not start until this is complete.
+ */
+ unmarkTask.initZones();
+ if (useBackgroundThreads) {
+ unmarkTask.start();
+ } else {
+ unmarkTask.runFromMainThread();
+ }
+
+ /*
+ * Process any queued source compressions during the start of a major
+ * GC.
+ *
+ * Bug 1650075: When we start passing GCOptions::Shutdown for
+ * GCReason::XPCONNECT_SHUTDOWN GCs we can remove the extra check.
+ */
+ if (!isShutdownGC() && reason != JS::GCReason::XPCONNECT_SHUTDOWN) {
+ StartHandlingCompressionsOnGC(rt);
+ }
+
+ return true;
+}
+
+BackgroundUnmarkTask::BackgroundUnmarkTask(GCRuntime* gc)
+ : GCParallelTask(gc, gcstats::PhaseKind::UNMARK) {}
+
+void BackgroundUnmarkTask::initZones() {
+ MOZ_ASSERT(isIdle());
+ MOZ_ASSERT(zones.empty());
+ MOZ_ASSERT(!isCancelled());
+
+ // We can't safely iterate the zones vector from another thread so we copy the
+ // zones to be collected into another vector.
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ for (GCZonesIter zone(gc); !zone.done(); zone.next()) {
+ if (!zones.append(zone.get())) {
+ oomUnsafe.crash("BackgroundUnmarkTask::initZones");
+ }
+
+ zone->arenas.clearFreeLists();
+ zone->arenas.moveArenasToCollectingLists();
+ }
+}
+
+void BackgroundUnmarkTask::run(AutoLockHelperThreadState& helperTheadLock) {
+ AutoUnlockHelperThreadState unlock(helperTheadLock);
+
+ for (Zone* zone : zones) {
+ for (auto kind : AllAllocKinds()) {
+ ArenaList& arenas = zone->arenas.collectingArenaList(kind);
+ for (ArenaListIter arena(arenas.head()); !arena.done(); arena.next()) {
+ arena->unmarkAll();
+ if (isCancelled()) {
+ break;
+ }
+ }
+ }
+ }
+
+ zones.clear();
+}
+
+void GCRuntime::endPreparePhase(JS::GCReason reason) {
+ MOZ_ASSERT(unmarkTask.isIdle());
+
+ for (GCZonesIter zone(this); !zone.done(); zone.next()) {
+ /*
+ * In an incremental GC, clear the area free lists to ensure that subsequent
+ * allocations refill them and end up marking new cells back. See
+ * arenaAllocatedDuringGC().
+ */
+ zone->arenas.clearFreeLists();
+
+ zone->markedStrings = 0;
+ zone->finalizedStrings = 0;
+
+ zone->setPreservingCode(false);
+
+#ifdef JS_GC_ZEAL
+ if (hasZealMode(ZealMode::YieldBeforeRootMarking)) {
+ for (auto kind : AllAllocKinds()) {
+ for (ArenaIter arena(zone, kind); !arena.done(); arena.next()) {
+ arena->checkNoMarkedCells();
+ }
+ }
+ }
+#endif
+ }
+
+ // Discard JIT code more aggressively if the process is approaching its
+ // executable code limit.
+ bool canAllocateMoreCode = jit::CanLikelyAllocateMoreExecutableMemory();
+ auto currentTime = TimeStamp::Now();
+
+ Compartment* activeCompartment = nullptr;
+ jit::JitActivationIterator activation(rt->mainContextFromOwnThread());
+ if (!activation.done()) {
+ activeCompartment = activation->compartment();
+ }
+
+ for (CompartmentsIter c(rt); !c.done(); c.next()) {
+ c->gcState.scheduledForDestruction = false;
+ c->gcState.maybeAlive = false;
+ c->gcState.hasEnteredRealm = false;
+ bool isActiveCompartment = c == activeCompartment;
+ for (RealmsInCompartmentIter r(c); !r.done(); r.next()) {
+ if (r->shouldTraceGlobal() || !r->zone()->isGCScheduled()) {
+ c->gcState.maybeAlive = true;
+ }
+ if (shouldPreserveJITCode(r, currentTime, reason, canAllocateMoreCode,
+ isActiveCompartment)) {
+ r->zone()->setPreservingCode(true);
+ }
+ if (r->hasBeenEnteredIgnoringJit()) {
+ c->gcState.hasEnteredRealm = true;
+ }
+ }
+ }
+
+ /*
+ * Perform remaining preparation work that must take place in the first true
+ * GC slice.
+ */
+
+ {
+ gcstats::AutoPhase ap1(stats(), gcstats::PhaseKind::PREPARE);
+
+ AutoLockHelperThreadState helperLock;
+
+ /* Clear mark state for WeakMaps in parallel with other work. */
+ AutoRunParallelTask unmarkWeakMaps(this, &GCRuntime::unmarkWeakMaps,
+ gcstats::PhaseKind::UNMARK_WEAKMAPS,
+ GCUse::Unspecified, helperLock);
+
+ AutoUnlockHelperThreadState unlock(helperLock);
+
+ // Discard JIT code. For incremental collections, the sweep phase will
+ // also discard JIT code.
+ discardJITCodeForGC();
+
+ /*
+ * Relazify functions after discarding JIT code (we can't relazify
+ * functions with JIT code) and before the actual mark phase, so that
+ * the current GC can collect the JSScripts we're unlinking here. We do
+ * this only when we're performing a shrinking GC, as too much
+ * relazification can cause performance issues when we have to reparse
+ * the same functions over and over.
+ */
+ if (isShrinkingGC()) {
+ relazifyFunctionsForShrinkingGC();
+ purgePropMapTablesForShrinkingGC();
+ purgeSourceURLsForShrinkingGC();
+ }
+
+ /*
+ * We must purge the runtime at the beginning of an incremental GC. The
+ * danger if we purge later is that the snapshot invariant of
+ * incremental GC will be broken, as follows. If some object is
+ * reachable only through some cache (say the dtoaCache) then it will
+ * not be part of the snapshot. If we purge after root marking, then
+ * the mutator could obtain a pointer to the object and start using
+ * it. This object might never be marked, so a GC hazard would exist.
+ */
+ purgeRuntime();
+
+ startBackgroundFreeAfterMinorGC();
+
+ if (isShutdownGC()) {
+ /* Clear any engine roots that may hold external data live. */
+ for (GCZonesIter zone(this); !zone.done(); zone.next()) {
+ zone->clearRootsForShutdownGC();
+ }
+
+#ifdef DEBUG
+ testMarkQueue.clear();
+ queuePos = 0;
+#endif
+ }
+ }
+
+#ifdef DEBUG
+ if (fullCompartmentChecks) {
+ checkForCompartmentMismatches();
+ }
+#endif
+}
+
+AutoUpdateLiveCompartments::AutoUpdateLiveCompartments(GCRuntime* gc) : gc(gc) {
+ for (GCCompartmentsIter c(gc->rt); !c.done(); c.next()) {
+ c->gcState.hasMarkedCells = false;
+ }
+}
+
+AutoUpdateLiveCompartments::~AutoUpdateLiveCompartments() {
+ for (GCCompartmentsIter c(gc->rt); !c.done(); c.next()) {
+ if (c->gcState.hasMarkedCells) {
+ c->gcState.maybeAlive = true;
+ }
+ }
+}
+
+Zone::GCState Zone::initialMarkingState() const {
+ if (isAtomsZone()) {
+ // Don't delay gray marking in the atoms zone like we do in other zones.
+ return MarkBlackAndGray;
+ }
+
+ return MarkBlackOnly;
+}
+
+void GCRuntime::beginMarkPhase(AutoGCSession& session) {
+ /*
+ * Mark phase.
+ */
+ gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::MARK);
+
+ // This is the slice we actually start collecting. The number can be used to
+ // check whether a major GC has started so we must not increment it until we
+ // get here.
+ incMajorGcNumber();
+
+ MOZ_ASSERT(!hasDelayedMarking());
+ for (auto& marker : markers) {
+ marker->start();
+ }
+
+#ifdef DEBUG
+ queuePos = 0;
+ queueMarkColor.reset();
+#endif
+
+ for (GCZonesIter zone(this); !zone.done(); zone.next()) {
+ // Incremental marking barriers are enabled at this point.
+ zone->changeGCState(Zone::Prepare, zone->initialMarkingState());
+
+ // Merge arenas allocated during the prepare phase, then move all arenas to
+ // the collecting arena lists.
+ zone->arenas.mergeArenasFromCollectingLists();
+ zone->arenas.moveArenasToCollectingLists();
+
+ for (RealmsInZoneIter realm(zone); !realm.done(); realm.next()) {
+ realm->clearAllocatedDuringGC();
+ }
+ }
+
+ if (rt->isBeingDestroyed()) {
+ checkNoRuntimeRoots(session);
+ } else {
+ AutoUpdateLiveCompartments updateLive(this);
+ marker().setRootMarkingMode(true);
+ traceRuntimeForMajorGC(marker().tracer(), session);
+ marker().setRootMarkingMode(false);
+ }
+
+ updateSchedulingStateOnGCStart();
+ stats().measureInitialHeapSize();
+}
+
+void GCRuntime::findDeadCompartments() {
+ gcstats::AutoPhase ap1(stats(), gcstats::PhaseKind::FIND_DEAD_COMPARTMENTS);
+
+ /*
+ * This code ensures that if a compartment is "dead", then it will be
+ * collected in this GC. A compartment is considered dead if its maybeAlive
+ * flag is false. The maybeAlive flag is set if:
+ *
+ * (1) the compartment has been entered (set in beginMarkPhase() above)
+ * (2) the compartment's zone is not being collected (set in
+ * beginMarkPhase() above)
+ * (3) an object in the compartment was marked during root marking, either
+ * as a black root or a gray root. This is arranged by
+ * SetCompartmentHasMarkedCells and AutoUpdateLiveCompartments.
+ * (4) the compartment has incoming cross-compartment edges from another
+ * compartment that has maybeAlive set (set by this method).
+ *
+ * If the maybeAlive is false, then we set the scheduledForDestruction flag.
+ * At the end of the GC, we look for compartments where
+ * scheduledForDestruction is true. These are compartments that were somehow
+ * "revived" during the incremental GC. If any are found, we do a special,
+ * non-incremental GC of those compartments to try to collect them.
+ *
+ * Compartments can be revived for a variety of reasons. On reason is bug
+ * 811587, where a reflector that was dead can be revived by DOM code that
+ * still refers to the underlying DOM node.
+ *
+ * Read barriers and allocations can also cause revival. This might happen
+ * during a function like JS_TransplantObject, which iterates over all
+ * compartments, live or dead, and operates on their objects. See bug 803376
+ * for details on this problem. To avoid the problem, we try to avoid
+ * allocation and read barriers during JS_TransplantObject and the like.
+ */
+
+ // Propagate the maybeAlive flag via cross-compartment edges.
+
+ Vector<Compartment*, 0, js::SystemAllocPolicy> workList;
+
+ for (CompartmentsIter comp(rt); !comp.done(); comp.next()) {
+ if (comp->gcState.maybeAlive) {
+ if (!workList.append(comp)) {
+ return;
+ }
+ }
+ }
+
+ while (!workList.empty()) {
+ Compartment* comp = workList.popCopy();
+ for (Compartment::WrappedObjectCompartmentEnum e(comp); !e.empty();
+ e.popFront()) {
+ Compartment* dest = e.front();
+ if (!dest->gcState.maybeAlive) {
+ dest->gcState.maybeAlive = true;
+ if (!workList.append(dest)) {
+ return;
+ }
+ }
+ }
+ }
+
+ // Set scheduledForDestruction based on maybeAlive.
+
+ for (GCCompartmentsIter comp(rt); !comp.done(); comp.next()) {
+ MOZ_ASSERT(!comp->gcState.scheduledForDestruction);
+ if (!comp->gcState.maybeAlive) {
+ comp->gcState.scheduledForDestruction = true;
+ }
+ }
+}
+
+void GCRuntime::updateSchedulingStateOnGCStart() {
+ heapSize.updateOnGCStart();
+
+ // Update memory counters for the zones we are collecting.
+ for (GCZonesIter zone(this); !zone.done(); zone.next()) {
+ zone->updateSchedulingStateOnGCStart();
+ }
+}
+
+inline bool GCRuntime::canMarkInParallel() const {
+#if defined(DEBUG) || defined(JS_OOM_BREAKPOINT)
+ // OOM testing limits the engine to using a single helper thread.
+ if (oom::simulator.targetThread() == THREAD_TYPE_GCPARALLEL) {
+ return false;
+ }
+#endif
+
+ return markers.length() > 1 && stats().initialCollectedBytes() >=
+ tunables.parallelMarkingThresholdBytes();
+}
+
+IncrementalProgress GCRuntime::markUntilBudgetExhausted(
+ SliceBudget& sliceBudget, ParallelMarking allowParallelMarking,
+ ShouldReportMarkTime reportTime) {
+ // Run a marking slice and return whether the stack is now empty.
+
+ AutoMajorGCProfilerEntry s(this);
+
+ if (processTestMarkQueue() == QueueYielded) {
+ return NotFinished;
+ }
+
+ if (allowParallelMarking && canMarkInParallel()) {
+ MOZ_ASSERT(parallelMarkingEnabled);
+ MOZ_ASSERT(reportTime);
+ MOZ_ASSERT(!isBackgroundMarking());
+
+ ParallelMarker pm(this);
+ if (!pm.mark(sliceBudget)) {
+ return NotFinished;
+ }
+
+ assertNoMarkingWork();
+ return Finished;
+ }
+
+#ifdef DEBUG
+ AutoSetThreadIsMarking threadIsMarking;
+#endif // DEBUG
+
+ return marker().markUntilBudgetExhausted(sliceBudget, reportTime)
+ ? Finished
+ : NotFinished;
+}
+
+void GCRuntime::drainMarkStack() {
+ auto unlimited = SliceBudget::unlimited();
+ MOZ_RELEASE_ASSERT(marker().markUntilBudgetExhausted(unlimited));
+}
+
+#ifdef DEBUG
+
+const GCVector<HeapPtr<JS::Value>, 0, SystemAllocPolicy>&
+GCRuntime::getTestMarkQueue() const {
+ return testMarkQueue.get();
+}
+
+bool GCRuntime::appendTestMarkQueue(const JS::Value& value) {
+ return testMarkQueue.append(value);
+}
+
+void GCRuntime::clearTestMarkQueue() {
+ testMarkQueue.clear();
+ queuePos = 0;
+}
+
+size_t GCRuntime::testMarkQueuePos() const { return queuePos; }
+
+#endif
+
+GCRuntime::MarkQueueProgress GCRuntime::processTestMarkQueue() {
+#ifdef DEBUG
+ if (testMarkQueue.empty()) {
+ return QueueComplete;
+ }
+
+ if (queueMarkColor == mozilla::Some(MarkColor::Gray) &&
+ state() != State::Sweep) {
+ return QueueSuspended;
+ }
+
+ // If the queue wants to be gray marking, but we've pushed a black object
+ // since set-color-gray was processed, then we can't switch to gray and must
+ // again wait until gray marking is possible.
+ //
+ // Remove this code if the restriction against marking gray during black is
+ // relaxed.
+ if (queueMarkColor == mozilla::Some(MarkColor::Gray) &&
+ marker().hasBlackEntries()) {
+ return QueueSuspended;
+ }
+
+ // If the queue wants to be marking a particular color, switch to that color.
+ // In any case, restore the mark color to whatever it was when we entered
+ // this function.
+ bool willRevertToGray = marker().markColor() == MarkColor::Gray;
+ AutoSetMarkColor autoRevertColor(
+ marker(), queueMarkColor.valueOr(marker().markColor()));
+
+ // Process the mark queue by taking each object in turn, pushing it onto the
+ // mark stack, and processing just the top element with processMarkStackTop
+ // without recursing into reachable objects.
+ while (queuePos < testMarkQueue.length()) {
+ Value val = testMarkQueue[queuePos++].get();
+ if (val.isObject()) {
+ JSObject* obj = &val.toObject();
+ JS::Zone* zone = obj->zone();
+ if (!zone->isGCMarking() || obj->isMarkedAtLeast(marker().markColor())) {
+ continue;
+ }
+
+ // If we have started sweeping, obey sweep group ordering. But note that
+ // we will first be called during the initial sweep slice, when the sweep
+ // group indexes have not yet been computed. In that case, we can mark
+ // freely.
+ if (state() == State::Sweep && initialState != State::Sweep) {
+ if (zone->gcSweepGroupIndex < getCurrentSweepGroupIndex()) {
+ // Too late. This must have been added after we started collecting,
+ // and we've already processed its sweep group. Skip it.
+ continue;
+ }
+ if (zone->gcSweepGroupIndex > getCurrentSweepGroupIndex()) {
+ // Not ready yet. Wait until we reach the object's sweep group.
+ queuePos--;
+ return QueueSuspended;
+ }
+ }
+
+ if (marker().markColor() == MarkColor::Gray &&
+ zone->isGCMarkingBlackOnly()) {
+ // Have not yet reached the point where we can mark this object, so
+ // continue with the GC.
+ queuePos--;
+ return QueueSuspended;
+ }
+
+ if (marker().markColor() == MarkColor::Black && willRevertToGray) {
+ // If we put any black objects on the stack, we wouldn't be able to
+ // return to gray marking. So delay the marking until we're back to
+ // black marking.
+ queuePos--;
+ return QueueSuspended;
+ }
+
+ // Mark the object and push it onto the stack.
+ size_t oldPosition = marker().stack.position();
+ marker().markAndTraverse<NormalMarkingOptions>(obj);
+
+ // If we overflow the stack here and delay marking, then we won't be
+ // testing what we think we're testing.
+ if (marker().stack.position() == oldPosition) {
+ MOZ_ASSERT(obj->asTenured().arena()->onDelayedMarkingList());
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ oomUnsafe.crash("Overflowed stack while marking test queue");
+ }
+
+ SliceBudget unlimited = SliceBudget::unlimited();
+ marker().processMarkStackTop<NormalMarkingOptions>(unlimited);
+ } else if (val.isString()) {
+ JSLinearString* str = &val.toString()->asLinear();
+ if (js::StringEqualsLiteral(str, "yield") && isIncrementalGc()) {
+ return QueueYielded;
+ } else if (js::StringEqualsLiteral(str, "enter-weak-marking-mode") ||
+ js::StringEqualsLiteral(str, "abort-weak-marking-mode")) {
+ if (marker().isRegularMarking()) {
+ // We can't enter weak marking mode at just any time, so instead
+ // we'll stop processing the queue and continue on with the GC. Once
+ // we enter weak marking mode, we can continue to the rest of the
+ // queue. Note that we will also suspend for aborting, and then abort
+ // the earliest following weak marking mode.
+ queuePos--;
+ return QueueSuspended;
+ }
+ if (js::StringEqualsLiteral(str, "abort-weak-marking-mode")) {
+ marker().abortLinearWeakMarking();
+ }
+ } else if (js::StringEqualsLiteral(str, "drain")) {
+ auto unlimited = SliceBudget::unlimited();
+ MOZ_RELEASE_ASSERT(
+ marker().markUntilBudgetExhausted(unlimited, DontReportMarkTime));
+ } else if (js::StringEqualsLiteral(str, "set-color-gray")) {
+ queueMarkColor = mozilla::Some(MarkColor::Gray);
+ if (state() != State::Sweep || marker().hasBlackEntries()) {
+ // Cannot mark gray yet, so continue with the GC.
+ queuePos--;
+ return QueueSuspended;
+ }
+ marker().setMarkColor(MarkColor::Gray);
+ } else if (js::StringEqualsLiteral(str, "set-color-black")) {
+ queueMarkColor = mozilla::Some(MarkColor::Black);
+ marker().setMarkColor(MarkColor::Black);
+ } else if (js::StringEqualsLiteral(str, "unset-color")) {
+ queueMarkColor.reset();
+ }
+ }
+ }
+#endif
+
+ return QueueComplete;
+}
+
+static bool IsEmergencyGC(JS::GCReason reason) {
+ return reason == JS::GCReason::LAST_DITCH ||
+ reason == JS::GCReason::MEM_PRESSURE;
+}
+
+void GCRuntime::finishCollection(JS::GCReason reason) {
+ assertBackgroundSweepingFinished();
+
+ MOZ_ASSERT(!hasDelayedMarking());
+ for (auto& marker : markers) {
+ marker->stop();
+ }
+
+ maybeStopPretenuring();
+
+ if (IsEmergencyGC(reason)) {
+ waitBackgroundFreeEnd();
+ }
+
+ TimeStamp currentTime = TimeStamp::Now();
+
+ updateSchedulingStateAfterCollection(currentTime);
+
+ for (GCZonesIter zone(this); !zone.done(); zone.next()) {
+ zone->changeGCState(Zone::Finished, Zone::NoGC);
+ zone->notifyObservingDebuggers();
+ }
+
+#ifdef JS_GC_ZEAL
+ clearSelectedForMarking();
+#endif
+
+ schedulingState.updateHighFrequencyMode(lastGCEndTime_, currentTime,
+ tunables);
+ lastGCEndTime_ = currentTime;
+
+ checkGCStateNotInUse();
+}
+
+void GCRuntime::checkGCStateNotInUse() {
+#ifdef DEBUG
+ for (auto& marker : markers) {
+ MOZ_ASSERT(!marker->isActive());
+ MOZ_ASSERT(marker->isDrained());
+ }
+ MOZ_ASSERT(!hasDelayedMarking());
+
+ MOZ_ASSERT(!lastMarkSlice);
+
+ for (ZonesIter zone(this, WithAtoms); !zone.done(); zone.next()) {
+ if (zone->wasCollected()) {
+ zone->arenas.checkGCStateNotInUse();
+ }
+ MOZ_ASSERT(!zone->wasGCStarted());
+ MOZ_ASSERT(!zone->needsIncrementalBarrier());
+ MOZ_ASSERT(!zone->isOnList());
+ }
+
+ MOZ_ASSERT(zonesToMaybeCompact.ref().isEmpty());
+ MOZ_ASSERT(cellsToAssertNotGray.ref().empty());
+
+ AutoLockHelperThreadState lock;
+ MOZ_ASSERT(!requestSliceAfterBackgroundTask);
+ MOZ_ASSERT(unmarkTask.isIdle(lock));
+ MOZ_ASSERT(markTask.isIdle(lock));
+ MOZ_ASSERT(sweepTask.isIdle(lock));
+ MOZ_ASSERT(decommitTask.isIdle(lock));
+#endif
+}
+
+void GCRuntime::maybeStopPretenuring() {
+ nursery().maybeStopPretenuring(this);
+
+ for (GCZonesIter zone(this); !zone.done(); zone.next()) {
+ if (!zone->nurseryStringsDisabled) {
+ continue;
+ }
+
+ // Count the number of strings before the major GC.
+ size_t numStrings = zone->markedStrings + zone->finalizedStrings;
+ double rate = double(zone->finalizedStrings) / double(numStrings);
+ if (rate > tunables.stopPretenureStringThreshold()) {
+ zone->markedStrings = 0;
+ zone->finalizedStrings = 0;
+ zone->nurseryStringsDisabled = false;
+ nursery().updateAllocFlagsForZone(zone);
+ }
+ }
+}
+
+void GCRuntime::updateSchedulingStateAfterCollection(TimeStamp currentTime) {
+ TimeDuration totalGCTime = stats().totalGCTime();
+ size_t totalInitialBytes = stats().initialCollectedBytes();
+
+ for (GCZonesIter zone(this); !zone.done(); zone.next()) {
+ if (tunables.balancedHeapLimitsEnabled() && totalInitialBytes != 0) {
+ zone->updateCollectionRate(totalGCTime, totalInitialBytes);
+ }
+ zone->clearGCSliceThresholds();
+ zone->updateGCStartThresholds(*this);
+ }
+}
+
+void GCRuntime::updateAllGCStartThresholds() {
+ for (ZonesIter zone(this, WithAtoms); !zone.done(); zone.next()) {
+ zone->updateGCStartThresholds(*this);
+ }
+}
+
+void GCRuntime::updateAllocationRates() {
+ // Calculate mutator time since the last update. This ignores the fact that
+ // the zone could have been created since the last update.
+
+ TimeStamp currentTime = TimeStamp::Now();
+ TimeDuration totalTime = currentTime - lastAllocRateUpdateTime;
+ if (collectorTimeSinceAllocRateUpdate >= totalTime) {
+ // It shouldn't happen but occasionally we see collector time being larger
+ // than total time. Skip the update in that case.
+ return;
+ }
+
+ TimeDuration mutatorTime = totalTime - collectorTimeSinceAllocRateUpdate;
+
+ for (AllZonesIter zone(this); !zone.done(); zone.next()) {
+ zone->updateAllocationRate(mutatorTime);
+ zone->updateGCStartThresholds(*this);
+ }
+
+ lastAllocRateUpdateTime = currentTime;
+ collectorTimeSinceAllocRateUpdate = TimeDuration();
+}
+
+static const char* GCHeapStateToLabel(JS::HeapState heapState) {
+ switch (heapState) {
+ case JS::HeapState::MinorCollecting:
+ return "js::Nursery::collect";
+ case JS::HeapState::MajorCollecting:
+ return "js::GCRuntime::collect";
+ default:
+ MOZ_CRASH("Unexpected heap state when pushing GC profiling stack frame");
+ }
+ MOZ_ASSERT_UNREACHABLE("Should have exhausted every JS::HeapState variant!");
+ return nullptr;
+}
+
+static JS::ProfilingCategoryPair GCHeapStateToProfilingCategory(
+ JS::HeapState heapState) {
+ return heapState == JS::HeapState::MinorCollecting
+ ? JS::ProfilingCategoryPair::GCCC_MinorGC
+ : JS::ProfilingCategoryPair::GCCC_MajorGC;
+}
+
+/* Start a new heap session. */
+AutoHeapSession::AutoHeapSession(GCRuntime* gc, JS::HeapState heapState)
+ : gc(gc), prevState(gc->heapState_) {
+ MOZ_ASSERT(CurrentThreadCanAccessRuntime(gc->rt));
+ MOZ_ASSERT(prevState == JS::HeapState::Idle ||
+ (prevState == JS::HeapState::MajorCollecting &&
+ heapState == JS::HeapState::MinorCollecting));
+ MOZ_ASSERT(heapState != JS::HeapState::Idle);
+
+ gc->heapState_ = heapState;
+
+ if (heapState == JS::HeapState::MinorCollecting ||
+ heapState == JS::HeapState::MajorCollecting) {
+ profilingStackFrame.emplace(gc->rt->mainContextFromOwnThread(),
+ GCHeapStateToLabel(heapState),
+ GCHeapStateToProfilingCategory(heapState));
+ }
+}
+
+AutoHeapSession::~AutoHeapSession() {
+ MOZ_ASSERT(JS::RuntimeHeapIsBusy());
+ gc->heapState_ = prevState;
+}
+
+static const char* MajorGCStateToLabel(State state) {
+ switch (state) {
+ case State::Mark:
+ return "js::GCRuntime::markUntilBudgetExhausted";
+ case State::Sweep:
+ return "js::GCRuntime::performSweepActions";
+ case State::Compact:
+ return "js::GCRuntime::compactPhase";
+ default:
+ MOZ_CRASH("Unexpected heap state when pushing GC profiling stack frame");
+ }
+
+ MOZ_ASSERT_UNREACHABLE("Should have exhausted every State variant!");
+ return nullptr;
+}
+
+static JS::ProfilingCategoryPair MajorGCStateToProfilingCategory(State state) {
+ switch (state) {
+ case State::Mark:
+ return JS::ProfilingCategoryPair::GCCC_MajorGC_Mark;
+ case State::Sweep:
+ return JS::ProfilingCategoryPair::GCCC_MajorGC_Sweep;
+ case State::Compact:
+ return JS::ProfilingCategoryPair::GCCC_MajorGC_Compact;
+ default:
+ MOZ_CRASH("Unexpected heap state when pushing GC profiling stack frame");
+ }
+}
+
+AutoMajorGCProfilerEntry::AutoMajorGCProfilerEntry(GCRuntime* gc)
+ : AutoGeckoProfilerEntry(gc->rt->mainContextFromAnyThread(),
+ MajorGCStateToLabel(gc->state()),
+ MajorGCStateToProfilingCategory(gc->state())) {
+ MOZ_ASSERT(gc->heapState() == JS::HeapState::MajorCollecting);
+}
+
+GCRuntime::IncrementalResult GCRuntime::resetIncrementalGC(
+ GCAbortReason reason) {
+ MOZ_ASSERT(reason != GCAbortReason::None);
+
+ // Drop as much work as possible from an ongoing incremental GC so
+ // we can start a new GC after it has finished.
+ if (incrementalState == State::NotActive) {
+ return IncrementalResult::Ok;
+ }
+
+ AutoGCSession session(this, JS::HeapState::MajorCollecting);
+
+ switch (incrementalState) {
+ case State::NotActive:
+ case State::MarkRoots:
+ case State::Finish:
+ MOZ_CRASH("Unexpected GC state in resetIncrementalGC");
+ break;
+
+ case State::Prepare:
+ unmarkTask.cancelAndWait();
+
+ for (GCZonesIter zone(this); !zone.done(); zone.next()) {
+ zone->changeGCState(Zone::Prepare, Zone::NoGC);
+ zone->clearGCSliceThresholds();
+ zone->arenas.clearFreeLists();
+ zone->arenas.mergeArenasFromCollectingLists();
+ }
+
+ incrementalState = State::NotActive;
+ checkGCStateNotInUse();
+ break;
+
+ case State::Mark: {
+ // Cancel any ongoing marking.
+ for (auto& marker : markers) {
+ marker->reset();
+ }
+ resetDelayedMarking();
+
+ for (GCCompartmentsIter c(rt); !c.done(); c.next()) {
+ resetGrayList(c);
+ }
+
+ for (GCZonesIter zone(this); !zone.done(); zone.next()) {
+ zone->changeGCState(zone->initialMarkingState(), Zone::NoGC);
+ zone->clearGCSliceThresholds();
+ zone->arenas.unmarkPreMarkedFreeCells();
+ zone->arenas.mergeArenasFromCollectingLists();
+ }
+
+ {
+ AutoLockHelperThreadState lock;
+ lifoBlocksToFree.ref().freeAll();
+ }
+
+ lastMarkSlice = false;
+ incrementalState = State::Finish;
+
+#ifdef DEBUG
+ for (auto& marker : markers) {
+ MOZ_ASSERT(!marker->shouldCheckCompartments());
+ }
+#endif
+
+ break;
+ }
+
+ case State::Sweep: {
+ // Finish sweeping the current sweep group, then abort.
+ for (CompartmentsIter c(rt); !c.done(); c.next()) {
+ c->gcState.scheduledForDestruction = false;
+ }
+
+ abortSweepAfterCurrentGroup = true;
+ isCompacting = false;
+
+ break;
+ }
+
+ case State::Finalize: {
+ isCompacting = false;
+ break;
+ }
+
+ case State::Compact: {
+ // Skip any remaining zones that would have been compacted.
+ MOZ_ASSERT(isCompacting);
+ startedCompacting = true;
+ zonesToMaybeCompact.ref().clear();
+ break;
+ }
+
+ case State::Decommit: {
+ break;
+ }
+ }
+
+ stats().reset(reason);
+
+ return IncrementalResult::ResetIncremental;
+}
+
+AutoDisableBarriers::AutoDisableBarriers(GCRuntime* gc) : gc(gc) {
+ /*
+ * Clear needsIncrementalBarrier early so we don't do any write barriers
+ * during sweeping.
+ */
+ for (GCZonesIter zone(gc); !zone.done(); zone.next()) {
+ if (zone->isGCMarking()) {
+ MOZ_ASSERT(zone->needsIncrementalBarrier());
+ zone->setNeedsIncrementalBarrier(false);
+ }
+ MOZ_ASSERT(!zone->needsIncrementalBarrier());
+ }
+}
+
+AutoDisableBarriers::~AutoDisableBarriers() {
+ for (GCZonesIter zone(gc); !zone.done(); zone.next()) {
+ MOZ_ASSERT(!zone->needsIncrementalBarrier());
+ if (zone->isGCMarking()) {
+ zone->setNeedsIncrementalBarrier(true);
+ }
+ }
+}
+
+static bool NeedToCollectNursery(GCRuntime* gc) {
+ return !gc->nursery().isEmpty() || !gc->storeBuffer().isEmpty();
+}
+
+#ifdef DEBUG
+static const char* DescribeBudget(const SliceBudget& budget) {
+ MOZ_ASSERT(TlsContext.get()->isMainThreadContext());
+ constexpr size_t length = 32;
+ static char buffer[length];
+ budget.describe(buffer, length);
+ return buffer;
+}
+#endif
+
+static bool ShouldPauseMutatorWhileWaiting(const SliceBudget& budget,
+ JS::GCReason reason,
+ bool budgetWasIncreased) {
+ // When we're nearing the incremental limit at which we will finish the
+ // collection synchronously, pause the main thread if there is only background
+ // GC work happening. This allows the GC to catch up and avoid hitting the
+ // limit.
+ return budget.isTimeBudget() &&
+ (reason == JS::GCReason::ALLOC_TRIGGER ||
+ reason == JS::GCReason::TOO_MUCH_MALLOC) &&
+ budgetWasIncreased;
+}
+
+void GCRuntime::incrementalSlice(SliceBudget& budget, JS::GCReason reason,
+ bool budgetWasIncreased) {
+ MOZ_ASSERT_IF(isIncrementalGCInProgress(), isIncremental);
+
+ AutoSetThreadIsPerformingGC performingGC(rt->gcContext());
+
+ AutoGCSession session(this, JS::HeapState::MajorCollecting);
+
+ bool destroyingRuntime = (reason == JS::GCReason::DESTROY_RUNTIME);
+
+ initialState = incrementalState;
+ isIncremental = !budget.isUnlimited();
+ useBackgroundThreads = ShouldUseBackgroundThreads(isIncremental, reason);
+
+#ifdef JS_GC_ZEAL
+ // Do the incremental collection type specified by zeal mode if the collection
+ // was triggered by runDebugGC() and incremental GC has not been cancelled by
+ // resetIncrementalGC().
+ useZeal = isIncremental && reason == JS::GCReason::DEBUG_GC;
+#endif
+
+#ifdef DEBUG
+ stats().log(
+ "Incremental: %d, lastMarkSlice: %d, useZeal: %d, budget: %s, "
+ "budgetWasIncreased: %d",
+ bool(isIncremental), bool(lastMarkSlice), bool(useZeal),
+ DescribeBudget(budget), budgetWasIncreased);
+#endif
+
+ if (useZeal && hasIncrementalTwoSliceZealMode()) {
+ // Yields between slices occurs at predetermined points in these modes; the
+ // budget is not used. |isIncremental| is still true.
+ stats().log("Using unlimited budget for two-slice zeal mode");
+ budget = SliceBudget::unlimited();
+ }
+
+ bool shouldPauseMutator =
+ ShouldPauseMutatorWhileWaiting(budget, reason, budgetWasIncreased);
+
+ switch (incrementalState) {
+ case State::NotActive:
+ startCollection(reason);
+
+ incrementalState = State::Prepare;
+ if (!beginPreparePhase(reason, session)) {
+ incrementalState = State::NotActive;
+ break;
+ }
+
+ if (useZeal && hasZealMode(ZealMode::YieldBeforeRootMarking)) {
+ break;
+ }
+
+ [[fallthrough]];
+
+ case State::Prepare:
+ if (waitForBackgroundTask(unmarkTask, budget, shouldPauseMutator,
+ DontTriggerSliceWhenFinished) == NotFinished) {
+ break;
+ }
+
+ incrementalState = State::MarkRoots;
+ [[fallthrough]];
+
+ case State::MarkRoots:
+ if (NeedToCollectNursery(this)) {
+ collectNurseryFromMajorGC(reason);
+ }
+
+ endPreparePhase(reason);
+ beginMarkPhase(session);
+ incrementalState = State::Mark;
+
+ if (useZeal && hasZealMode(ZealMode::YieldBeforeMarking) &&
+ isIncremental) {
+ break;
+ }
+
+ [[fallthrough]];
+
+ case State::Mark:
+ if (mightSweepInThisSlice(budget.isUnlimited())) {
+ // Trace wrapper rooters before marking if we might start sweeping in
+ // this slice.
+ rt->mainContextFromOwnThread()->traceWrapperGCRooters(
+ marker().tracer());
+ }
+
+ {
+ gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::MARK);
+ if (markUntilBudgetExhausted(budget, AllowParallelMarking) ==
+ NotFinished) {
+ break;
+ }
+ }
+
+ assertNoMarkingWork();
+
+ /*
+ * There are a number of reasons why we break out of collection here,
+ * either ending the slice or to run a new interation of the loop in
+ * GCRuntime::collect()
+ */
+
+ /*
+ * In incremental GCs where we have already performed more than one
+ * slice we yield after marking with the aim of starting the sweep in
+ * the next slice, since the first slice of sweeping can be expensive.
+ *
+ * This is modified by the various zeal modes. We don't yield in
+ * YieldBeforeMarking mode and we always yield in YieldBeforeSweeping
+ * mode.
+ *
+ * We will need to mark anything new on the stack when we resume, so
+ * we stay in Mark state.
+ */
+ if (isIncremental && !lastMarkSlice) {
+ if ((initialState == State::Mark &&
+ !(useZeal && hasZealMode(ZealMode::YieldBeforeMarking))) ||
+ (useZeal && hasZealMode(ZealMode::YieldBeforeSweeping))) {
+ lastMarkSlice = true;
+ stats().log("Yielding before starting sweeping");
+ break;
+ }
+ }
+
+ incrementalState = State::Sweep;
+ lastMarkSlice = false;
+
+ beginSweepPhase(reason, session);
+
+ [[fallthrough]];
+
+ case State::Sweep:
+ if (storeBuffer().mayHavePointersToDeadCells()) {
+ collectNurseryFromMajorGC(reason);
+ }
+
+ if (initialState == State::Sweep) {
+ rt->mainContextFromOwnThread()->traceWrapperGCRooters(
+ marker().tracer());
+ }
+
+ if (performSweepActions(budget) == NotFinished) {
+ break;
+ }
+
+ endSweepPhase(destroyingRuntime);
+
+ incrementalState = State::Finalize;
+
+ [[fallthrough]];
+
+ case State::Finalize:
+ if (waitForBackgroundTask(sweepTask, budget, shouldPauseMutator,
+ TriggerSliceWhenFinished) == NotFinished) {
+ break;
+ }
+
+ assertBackgroundSweepingFinished();
+
+ {
+ // Sweep the zones list now that background finalization is finished to
+ // remove and free dead zones, compartments and realms.
+ gcstats::AutoPhase ap1(stats(), gcstats::PhaseKind::SWEEP);
+ gcstats::AutoPhase ap2(stats(), gcstats::PhaseKind::DESTROY);
+ sweepZones(rt->gcContext(), destroyingRuntime);
+ }
+
+ MOZ_ASSERT(!startedCompacting);
+ incrementalState = State::Compact;
+
+ // Always yield before compacting since it is not incremental.
+ if (isCompacting && !budget.isUnlimited()) {
+ break;
+ }
+
+ [[fallthrough]];
+
+ case State::Compact:
+ if (isCompacting) {
+ if (NeedToCollectNursery(this)) {
+ collectNurseryFromMajorGC(reason);
+ }
+
+ storeBuffer().checkEmpty();
+ if (!startedCompacting) {
+ beginCompactPhase();
+ }
+
+ if (compactPhase(reason, budget, session) == NotFinished) {
+ break;
+ }
+
+ endCompactPhase();
+ }
+
+ startDecommit();
+ incrementalState = State::Decommit;
+
+ [[fallthrough]];
+
+ case State::Decommit:
+ if (waitForBackgroundTask(decommitTask, budget, shouldPauseMutator,
+ TriggerSliceWhenFinished) == NotFinished) {
+ break;
+ }
+
+ incrementalState = State::Finish;
+
+ [[fallthrough]];
+
+ case State::Finish:
+ finishCollection(reason);
+ incrementalState = State::NotActive;
+ break;
+ }
+
+#ifdef DEBUG
+ MOZ_ASSERT(safeToYield);
+ for (auto& marker : markers) {
+ MOZ_ASSERT(marker->markColor() == MarkColor::Black);
+ }
+ MOZ_ASSERT(!rt->gcContext()->hasJitCodeToPoison());
+#endif
+}
+
+void GCRuntime::collectNurseryFromMajorGC(JS::GCReason reason) {
+ collectNursery(gcOptions(), reason,
+ gcstats::PhaseKind::EVICT_NURSERY_FOR_MAJOR_GC);
+}
+
+bool GCRuntime::hasForegroundWork() const {
+ switch (incrementalState) {
+ case State::NotActive:
+ // Incremental GC is not running and no work is pending.
+ return false;
+ case State::Prepare:
+ // We yield in the Prepare state after starting unmarking.
+ return !unmarkTask.wasStarted();
+ case State::Finalize:
+ // We yield in the Finalize state to wait for background sweeping.
+ return !isBackgroundSweeping();
+ case State::Decommit:
+ // We yield in the Decommit state to wait for background decommit.
+ return !decommitTask.wasStarted();
+ default:
+ // In all other states there is still work to do.
+ return true;
+ }
+}
+
+IncrementalProgress GCRuntime::waitForBackgroundTask(
+ GCParallelTask& task, const SliceBudget& budget, bool shouldPauseMutator,
+ ShouldTriggerSliceWhenFinished triggerSlice) {
+ // Wait here in non-incremental collections, or if we want to pause the
+ // mutator to let the GC catch up.
+ if (budget.isUnlimited() || shouldPauseMutator) {
+ gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::WAIT_BACKGROUND_THREAD);
+ Maybe<TimeStamp> deadline;
+ if (budget.isTimeBudget()) {
+ deadline.emplace(budget.deadline());
+ }
+ task.join(deadline);
+ }
+
+ // In incremental collections, yield if the task has not finished and
+ // optionally request a slice to notify us when this happens.
+ if (!budget.isUnlimited()) {
+ AutoLockHelperThreadState lock;
+ if (task.wasStarted(lock)) {
+ if (triggerSlice) {
+ requestSliceAfterBackgroundTask = true;
+ }
+ return NotFinished;
+ }
+
+ task.joinWithLockHeld(lock);
+ }
+
+ MOZ_ASSERT(task.isIdle());
+
+ if (triggerSlice) {
+ cancelRequestedGCAfterBackgroundTask();
+ }
+
+ return Finished;
+}
+
+GCAbortReason gc::IsIncrementalGCUnsafe(JSRuntime* rt) {
+ MOZ_ASSERT(!rt->mainContextFromOwnThread()->suppressGC);
+
+ if (!rt->gc.isIncrementalGCAllowed()) {
+ return GCAbortReason::IncrementalDisabled;
+ }
+
+ return GCAbortReason::None;
+}
+
+inline void GCRuntime::checkZoneIsScheduled(Zone* zone, JS::GCReason reason,
+ const char* trigger) {
+#ifdef DEBUG
+ if (zone->isGCScheduled()) {
+ return;
+ }
+
+ fprintf(stderr,
+ "checkZoneIsScheduled: Zone %p not scheduled as expected in %s GC "
+ "for %s trigger\n",
+ zone, JS::ExplainGCReason(reason), trigger);
+ for (ZonesIter zone(this, WithAtoms); !zone.done(); zone.next()) {
+ fprintf(stderr, " Zone %p:%s%s\n", zone.get(),
+ zone->isAtomsZone() ? " atoms" : "",
+ zone->isGCScheduled() ? " scheduled" : "");
+ }
+ fflush(stderr);
+ MOZ_CRASH("Zone not scheduled");
+#endif
+}
+
+GCRuntime::IncrementalResult GCRuntime::budgetIncrementalGC(
+ bool nonincrementalByAPI, JS::GCReason reason, SliceBudget& budget) {
+ if (nonincrementalByAPI) {
+ stats().nonincremental(GCAbortReason::NonIncrementalRequested);
+ budget = SliceBudget::unlimited();
+
+ // Reset any in progress incremental GC if this was triggered via the
+ // API. This isn't required for correctness, but sometimes during tests
+ // the caller expects this GC to collect certain objects, and we need
+ // to make sure to collect everything possible.
+ if (reason != JS::GCReason::ALLOC_TRIGGER) {
+ return resetIncrementalGC(GCAbortReason::NonIncrementalRequested);
+ }
+
+ return IncrementalResult::Ok;
+ }
+
+ if (reason == JS::GCReason::ABORT_GC) {
+ budget = SliceBudget::unlimited();
+ stats().nonincremental(GCAbortReason::AbortRequested);
+ return resetIncrementalGC(GCAbortReason::AbortRequested);
+ }
+
+ if (!budget.isUnlimited()) {
+ GCAbortReason unsafeReason = IsIncrementalGCUnsafe(rt);
+ if (unsafeReason == GCAbortReason::None) {
+ if (reason == JS::GCReason::COMPARTMENT_REVIVED) {
+ unsafeReason = GCAbortReason::CompartmentRevived;
+ } else if (!incrementalGCEnabled) {
+ unsafeReason = GCAbortReason::ModeChange;
+ }
+ }
+
+ if (unsafeReason != GCAbortReason::None) {
+ budget = SliceBudget::unlimited();
+ stats().nonincremental(unsafeReason);
+ return resetIncrementalGC(unsafeReason);
+ }
+ }
+
+ GCAbortReason resetReason = GCAbortReason::None;
+ for (ZonesIter zone(this, WithAtoms); !zone.done(); zone.next()) {
+ if (zone->gcHeapSize.bytes() >=
+ zone->gcHeapThreshold.incrementalLimitBytes()) {
+ checkZoneIsScheduled(zone, reason, "GC bytes");
+ budget = SliceBudget::unlimited();
+ stats().nonincremental(GCAbortReason::GCBytesTrigger);
+ if (zone->wasGCStarted() && zone->gcState() > Zone::Sweep) {
+ resetReason = GCAbortReason::GCBytesTrigger;
+ }
+ }
+
+ if (zone->mallocHeapSize.bytes() >=
+ zone->mallocHeapThreshold.incrementalLimitBytes()) {
+ checkZoneIsScheduled(zone, reason, "malloc bytes");
+ budget = SliceBudget::unlimited();
+ stats().nonincremental(GCAbortReason::MallocBytesTrigger);
+ if (zone->wasGCStarted() && zone->gcState() > Zone::Sweep) {
+ resetReason = GCAbortReason::MallocBytesTrigger;
+ }
+ }
+
+ if (zone->jitHeapSize.bytes() >=
+ zone->jitHeapThreshold.incrementalLimitBytes()) {
+ checkZoneIsScheduled(zone, reason, "JIT code bytes");
+ budget = SliceBudget::unlimited();
+ stats().nonincremental(GCAbortReason::JitCodeBytesTrigger);
+ if (zone->wasGCStarted() && zone->gcState() > Zone::Sweep) {
+ resetReason = GCAbortReason::JitCodeBytesTrigger;
+ }
+ }
+
+ if (isIncrementalGCInProgress() &&
+ zone->isGCScheduled() != zone->wasGCStarted()) {
+ budget = SliceBudget::unlimited();
+ resetReason = GCAbortReason::ZoneChange;
+ }
+ }
+
+ if (resetReason != GCAbortReason::None) {
+ return resetIncrementalGC(resetReason);
+ }
+
+ return IncrementalResult::Ok;
+}
+
+bool GCRuntime::maybeIncreaseSliceBudget(SliceBudget& budget) {
+ if (js::SupportDifferentialTesting()) {
+ return false;
+ }
+
+ if (!budget.isTimeBudget() || !isIncrementalGCInProgress()) {
+ return false;
+ }
+
+ bool wasIncreasedForLongCollections =
+ maybeIncreaseSliceBudgetForLongCollections(budget);
+ bool wasIncreasedForUgentCollections =
+ maybeIncreaseSliceBudgetForUrgentCollections(budget);
+
+ return wasIncreasedForLongCollections || wasIncreasedForUgentCollections;
+}
+
+// Return true if the budget is actually extended after rounding.
+static bool ExtendBudget(SliceBudget& budget, double newDuration) {
+ long newDurationMS = lround(newDuration);
+ if (newDurationMS <= budget.timeBudget()) {
+ return false;
+ }
+
+ bool idleTriggered = budget.idle;
+ budget = SliceBudget(TimeBudget(newDuration), nullptr); // Uninterruptible.
+ budget.idle = idleTriggered;
+ budget.extended = true;
+ return true;
+}
+
+bool GCRuntime::maybeIncreaseSliceBudgetForLongCollections(
+ SliceBudget& budget) {
+ // For long-running collections, enforce a minimum time budget that increases
+ // linearly with time up to a maximum.
+
+ // All times are in milliseconds.
+ struct BudgetAtTime {
+ double time;
+ double budget;
+ };
+ const BudgetAtTime MinBudgetStart{1500, 0.0};
+ const BudgetAtTime MinBudgetEnd{2500, 100.0};
+
+ double totalTime = (TimeStamp::Now() - lastGCStartTime()).ToMilliseconds();
+
+ double minBudget =
+ LinearInterpolate(totalTime, MinBudgetStart.time, MinBudgetStart.budget,
+ MinBudgetEnd.time, MinBudgetEnd.budget);
+
+ return ExtendBudget(budget, minBudget);
+}
+
+bool GCRuntime::maybeIncreaseSliceBudgetForUrgentCollections(
+ SliceBudget& budget) {
+ // Enforce a minimum time budget based on how close we are to the incremental
+ // limit.
+
+ size_t minBytesRemaining = SIZE_MAX;
+ for (AllZonesIter zone(this); !zone.done(); zone.next()) {
+ if (!zone->wasGCStarted()) {
+ continue;
+ }
+ size_t gcBytesRemaining =
+ zone->gcHeapThreshold.incrementalBytesRemaining(zone->gcHeapSize);
+ minBytesRemaining = std::min(minBytesRemaining, gcBytesRemaining);
+ size_t mallocBytesRemaining =
+ zone->mallocHeapThreshold.incrementalBytesRemaining(
+ zone->mallocHeapSize);
+ minBytesRemaining = std::min(minBytesRemaining, mallocBytesRemaining);
+ }
+
+ if (minBytesRemaining < tunables.urgentThresholdBytes() &&
+ minBytesRemaining != 0) {
+ // Increase budget based on the reciprocal of the fraction remaining.
+ double fractionRemaining =
+ double(minBytesRemaining) / double(tunables.urgentThresholdBytes());
+ double minBudget = double(defaultSliceBudgetMS()) / fractionRemaining;
+ return ExtendBudget(budget, minBudget);
+ }
+
+ return false;
+}
+
+static void ScheduleZones(GCRuntime* gc, JS::GCReason reason) {
+ for (ZonesIter zone(gc, WithAtoms); !zone.done(); zone.next()) {
+ // Re-check heap threshold for alloc-triggered zones that were not
+ // previously collected. Now we have allocation rate data, the heap limit
+ // may have been increased beyond the current size.
+ if (gc->tunables.balancedHeapLimitsEnabled() && zone->isGCScheduled() &&
+ zone->smoothedCollectionRate.ref().isNothing() &&
+ reason == JS::GCReason::ALLOC_TRIGGER &&
+ zone->gcHeapSize.bytes() < zone->gcHeapThreshold.startBytes()) {
+ zone->unscheduleGC(); // May still be re-scheduled below.
+ }
+
+ if (gc->isShutdownGC()) {
+ zone->scheduleGC();
+ }
+
+ if (!gc->isPerZoneGCEnabled()) {
+ zone->scheduleGC();
+ }
+
+ // To avoid resets, continue to collect any zones that were being
+ // collected in a previous slice.
+ if (gc->isIncrementalGCInProgress() && zone->wasGCStarted()) {
+ zone->scheduleGC();
+ }
+
+ // This is a heuristic to reduce the total number of collections.
+ bool inHighFrequencyMode = gc->schedulingState.inHighFrequencyGCMode();
+ if (zone->gcHeapSize.bytes() >=
+ zone->gcHeapThreshold.eagerAllocTrigger(inHighFrequencyMode) ||
+ zone->mallocHeapSize.bytes() >=
+ zone->mallocHeapThreshold.eagerAllocTrigger(inHighFrequencyMode) ||
+ zone->jitHeapSize.bytes() >= zone->jitHeapThreshold.startBytes()) {
+ zone->scheduleGC();
+ }
+ }
+}
+
+static void UnscheduleZones(GCRuntime* gc) {
+ for (ZonesIter zone(gc->rt, WithAtoms); !zone.done(); zone.next()) {
+ zone->unscheduleGC();
+ }
+}
+
+class js::gc::AutoCallGCCallbacks {
+ GCRuntime& gc_;
+ JS::GCReason reason_;
+
+ public:
+ explicit AutoCallGCCallbacks(GCRuntime& gc, JS::GCReason reason)
+ : gc_(gc), reason_(reason) {
+ gc_.maybeCallGCCallback(JSGC_BEGIN, reason);
+ }
+ ~AutoCallGCCallbacks() { gc_.maybeCallGCCallback(JSGC_END, reason_); }
+};
+
+void GCRuntime::maybeCallGCCallback(JSGCStatus status, JS::GCReason reason) {
+ if (!gcCallback.ref().op) {
+ return;
+ }
+
+ if (isIncrementalGCInProgress()) {
+ return;
+ }
+
+ if (gcCallbackDepth == 0) {
+ // Save scheduled zone information in case the callback clears it.
+ for (ZonesIter zone(this, WithAtoms); !zone.done(); zone.next()) {
+ zone->gcScheduledSaved_ = zone->gcScheduled_;
+ }
+ }
+
+ // Save and clear GC options and state in case the callback reenters GC.
+ JS::GCOptions options = gcOptions();
+ maybeGcOptions = Nothing();
+ bool savedFullGCRequested = fullGCRequested;
+ fullGCRequested = false;
+
+ gcCallbackDepth++;
+
+ callGCCallback(status, reason);
+
+ MOZ_ASSERT(gcCallbackDepth != 0);
+ gcCallbackDepth--;
+
+ // Restore the original GC options.
+ maybeGcOptions = Some(options);
+
+ // At the end of a GC, clear out the fullGCRequested state. At the start,
+ // restore the previous setting.
+ fullGCRequested = (status == JSGC_END) ? false : savedFullGCRequested;
+
+ if (gcCallbackDepth == 0) {
+ // Ensure any zone that was originally scheduled stays scheduled.
+ for (ZonesIter zone(this, WithAtoms); !zone.done(); zone.next()) {
+ zone->gcScheduled_ = zone->gcScheduled_ || zone->gcScheduledSaved_;
+ }
+ }
+}
+
+/*
+ * We disable inlining to ensure that the bottom of the stack with possible GC
+ * roots recorded in MarkRuntime excludes any pointers we use during the marking
+ * implementation.
+ */
+MOZ_NEVER_INLINE GCRuntime::IncrementalResult GCRuntime::gcCycle(
+ bool nonincrementalByAPI, const SliceBudget& budgetArg,
+ JS::GCReason reason) {
+ // Assert if this is a GC unsafe region.
+ rt->mainContextFromOwnThread()->verifyIsSafeToGC();
+
+ // It's ok if threads other than the main thread have suppressGC set, as
+ // they are operating on zones which will not be collected from here.
+ MOZ_ASSERT(!rt->mainContextFromOwnThread()->suppressGC);
+
+ // This reason is used internally. See below.
+ MOZ_ASSERT(reason != JS::GCReason::RESET);
+
+ // Background finalization and decommit are finished by definition before we
+ // can start a new major GC. Background allocation may still be running, but
+ // that's OK because chunk pools are protected by the GC lock.
+ if (!isIncrementalGCInProgress()) {
+ assertBackgroundSweepingFinished();
+ MOZ_ASSERT(decommitTask.isIdle());
+ }
+
+ // Note that GC callbacks are allowed to re-enter GC.
+ AutoCallGCCallbacks callCallbacks(*this, reason);
+
+ // Increase slice budget for long running collections before it is recorded by
+ // AutoGCSlice.
+ SliceBudget budget(budgetArg);
+ bool budgetWasIncreased = maybeIncreaseSliceBudget(budget);
+
+ ScheduleZones(this, reason);
+
+ auto updateCollectorTime = MakeScopeExit([&] {
+ if (const gcstats::Statistics::SliceData* slice = stats().lastSlice()) {
+ collectorTimeSinceAllocRateUpdate += slice->duration();
+ }
+ });
+
+ gcstats::AutoGCSlice agc(stats(), scanZonesBeforeGC(), gcOptions(), budget,
+ reason, budgetWasIncreased);
+
+ IncrementalResult result =
+ budgetIncrementalGC(nonincrementalByAPI, reason, budget);
+ if (result == IncrementalResult::ResetIncremental) {
+ if (incrementalState == State::NotActive) {
+ // The collection was reset and has finished.
+ return result;
+ }
+
+ // The collection was reset but we must finish up some remaining work.
+ reason = JS::GCReason::RESET;
+ }
+
+ majorGCTriggerReason = JS::GCReason::NO_REASON;
+ MOZ_ASSERT(!stats().hasTrigger());
+
+ incGcNumber();
+ incGcSliceNumber();
+
+ gcprobes::MajorGCStart();
+ incrementalSlice(budget, reason, budgetWasIncreased);
+ gcprobes::MajorGCEnd();
+
+ MOZ_ASSERT_IF(result == IncrementalResult::ResetIncremental,
+ !isIncrementalGCInProgress());
+ return result;
+}
+
+inline bool GCRuntime::mightSweepInThisSlice(bool nonIncremental) {
+ MOZ_ASSERT(incrementalState < State::Sweep);
+ return nonIncremental || lastMarkSlice || hasIncrementalTwoSliceZealMode();
+}
+
+#ifdef JS_GC_ZEAL
+static bool IsDeterministicGCReason(JS::GCReason reason) {
+ switch (reason) {
+ case JS::GCReason::API:
+ case JS::GCReason::DESTROY_RUNTIME:
+ case JS::GCReason::LAST_DITCH:
+ case JS::GCReason::TOO_MUCH_MALLOC:
+ case JS::GCReason::TOO_MUCH_WASM_MEMORY:
+ case JS::GCReason::TOO_MUCH_JIT_CODE:
+ case JS::GCReason::ALLOC_TRIGGER:
+ case JS::GCReason::DEBUG_GC:
+ case JS::GCReason::CC_FORCED:
+ case JS::GCReason::SHUTDOWN_CC:
+ case JS::GCReason::ABORT_GC:
+ case JS::GCReason::DISABLE_GENERATIONAL_GC:
+ case JS::GCReason::FINISH_GC:
+ case JS::GCReason::PREPARE_FOR_TRACING:
+ return true;
+
+ default:
+ return false;
+ }
+}
+#endif
+
+gcstats::ZoneGCStats GCRuntime::scanZonesBeforeGC() {
+ gcstats::ZoneGCStats zoneStats;
+ for (ZonesIter zone(this, WithAtoms); !zone.done(); zone.next()) {
+ zoneStats.zoneCount++;
+ zoneStats.compartmentCount += zone->compartments().length();
+ if (zone->isGCScheduled()) {
+ zoneStats.collectedZoneCount++;
+ zoneStats.collectedCompartmentCount += zone->compartments().length();
+ }
+ }
+
+ return zoneStats;
+}
+
+// The GC can only clean up scheduledForDestruction realms that were marked live
+// by a barrier (e.g. by RemapWrappers from a navigation event). It is also
+// common to have realms held live because they are part of a cycle in gecko,
+// e.g. involving the HTMLDocument wrapper. In this case, we need to run the
+// CycleCollector in order to remove these edges before the realm can be freed.
+void GCRuntime::maybeDoCycleCollection() {
+ const static float ExcessiveGrayRealms = 0.8f;
+ const static size_t LimitGrayRealms = 200;
+
+ size_t realmsTotal = 0;
+ size_t realmsGray = 0;
+ for (RealmsIter realm(rt); !realm.done(); realm.next()) {
+ ++realmsTotal;
+ GlobalObject* global = realm->unsafeUnbarrieredMaybeGlobal();
+ if (global && global->isMarkedGray()) {
+ ++realmsGray;
+ }
+ }
+ float grayFraction = float(realmsGray) / float(realmsTotal);
+ if (grayFraction > ExcessiveGrayRealms || realmsGray > LimitGrayRealms) {
+ callDoCycleCollectionCallback(rt->mainContextFromOwnThread());
+ }
+}
+
+void GCRuntime::checkCanCallAPI() {
+ MOZ_RELEASE_ASSERT(CurrentThreadCanAccessRuntime(rt));
+
+ /* If we attempt to invoke the GC while we are running in the GC, assert. */
+ MOZ_RELEASE_ASSERT(!JS::RuntimeHeapIsBusy());
+}
+
+bool GCRuntime::checkIfGCAllowedInCurrentState(JS::GCReason reason) {
+ if (rt->mainContextFromOwnThread()->suppressGC) {
+ return false;
+ }
+
+ // Only allow shutdown GCs when we're destroying the runtime. This keeps
+ // the GC callback from triggering a nested GC and resetting global state.
+ if (rt->isBeingDestroyed() && !isShutdownGC()) {
+ return false;
+ }
+
+#ifdef JS_GC_ZEAL
+ if (deterministicOnly && !IsDeterministicGCReason(reason)) {
+ return false;
+ }
+#endif
+
+ return true;
+}
+
+bool GCRuntime::shouldRepeatForDeadZone(JS::GCReason reason) {
+ MOZ_ASSERT_IF(reason == JS::GCReason::COMPARTMENT_REVIVED, !isIncremental);
+ MOZ_ASSERT(!isIncrementalGCInProgress());
+
+ if (!isIncremental) {
+ return false;
+ }
+
+ for (CompartmentsIter c(rt); !c.done(); c.next()) {
+ if (c->gcState.scheduledForDestruction) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+struct MOZ_RAII AutoSetZoneSliceThresholds {
+ explicit AutoSetZoneSliceThresholds(GCRuntime* gc) : gc(gc) {
+ // On entry, zones that are already collecting should have a slice threshold
+ // set.
+ for (ZonesIter zone(gc, WithAtoms); !zone.done(); zone.next()) {
+ MOZ_ASSERT(zone->wasGCStarted() ==
+ zone->gcHeapThreshold.hasSliceThreshold());
+ MOZ_ASSERT(zone->wasGCStarted() ==
+ zone->mallocHeapThreshold.hasSliceThreshold());
+ }
+ }
+
+ ~AutoSetZoneSliceThresholds() {
+ // On exit, update the thresholds for all collecting zones.
+ bool waitingOnBGTask = gc->isWaitingOnBackgroundTask();
+ for (ZonesIter zone(gc, WithAtoms); !zone.done(); zone.next()) {
+ if (zone->wasGCStarted()) {
+ zone->setGCSliceThresholds(*gc, waitingOnBGTask);
+ } else {
+ MOZ_ASSERT(!zone->gcHeapThreshold.hasSliceThreshold());
+ MOZ_ASSERT(!zone->mallocHeapThreshold.hasSliceThreshold());
+ }
+ }
+ }
+
+ GCRuntime* gc;
+};
+
+void GCRuntime::collect(bool nonincrementalByAPI, const SliceBudget& budget,
+ JS::GCReason reason) {
+ TimeStamp startTime = TimeStamp::Now();
+ auto timer = MakeScopeExit([&] {
+ if (Realm* realm = rt->mainContextFromOwnThread()->realm()) {
+ realm->timers.gcTime += TimeStamp::Now() - startTime;
+ }
+ });
+
+ auto clearGCOptions = MakeScopeExit([&] {
+ if (!isIncrementalGCInProgress()) {
+ maybeGcOptions = Nothing();
+ }
+ });
+
+ MOZ_ASSERT(reason != JS::GCReason::NO_REASON);
+
+ // Checks run for each request, even if we do not actually GC.
+ checkCanCallAPI();
+
+ // Check if we are allowed to GC at this time before proceeding.
+ if (!checkIfGCAllowedInCurrentState(reason)) {
+ return;
+ }
+
+ stats().log("GC slice starting in state %s", StateName(incrementalState));
+
+ AutoStopVerifyingBarriers av(rt, isShutdownGC());
+ AutoMaybeLeaveAtomsZone leaveAtomsZone(rt->mainContextFromOwnThread());
+ AutoSetZoneSliceThresholds sliceThresholds(this);
+
+ schedulingState.updateHighFrequencyModeForReason(reason);
+
+ if (!isIncrementalGCInProgress() && tunables.balancedHeapLimitsEnabled()) {
+ updateAllocationRates();
+ }
+
+ bool repeat;
+ do {
+ IncrementalResult cycleResult =
+ gcCycle(nonincrementalByAPI, budget, reason);
+
+ if (reason == JS::GCReason::ABORT_GC) {
+ MOZ_ASSERT(!isIncrementalGCInProgress());
+ stats().log("GC aborted by request");
+ break;
+ }
+
+ /*
+ * Sometimes when we finish a GC we need to immediately start a new one.
+ * This happens in the following cases:
+ * - when we reset the current GC
+ * - when finalizers drop roots during shutdown
+ * - when zones that we thought were dead at the start of GC are
+ * not collected (see the large comment in beginMarkPhase)
+ */
+ repeat = false;
+ if (!isIncrementalGCInProgress()) {
+ if (cycleResult == ResetIncremental) {
+ repeat = true;
+ } else if (rootsRemoved && isShutdownGC()) {
+ /* Need to re-schedule all zones for GC. */
+ JS::PrepareForFullGC(rt->mainContextFromOwnThread());
+ repeat = true;
+ reason = JS::GCReason::ROOTS_REMOVED;
+ } else if (shouldRepeatForDeadZone(reason)) {
+ repeat = true;
+ reason = JS::GCReason::COMPARTMENT_REVIVED;
+ }
+ }
+ } while (repeat);
+
+ if (reason == JS::GCReason::COMPARTMENT_REVIVED) {
+ maybeDoCycleCollection();
+ }
+
+#ifdef JS_GC_ZEAL
+ if (hasZealMode(ZealMode::CheckHeapAfterGC)) {
+ gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::TRACE_HEAP);
+ CheckHeapAfterGC(rt);
+ }
+ if (hasZealMode(ZealMode::CheckGrayMarking) && !isIncrementalGCInProgress()) {
+ MOZ_RELEASE_ASSERT(CheckGrayMarkingState(rt));
+ }
+#endif
+ stats().log("GC slice ending in state %s", StateName(incrementalState));
+
+ UnscheduleZones(this);
+}
+
+SliceBudget GCRuntime::defaultBudget(JS::GCReason reason, int64_t millis) {
+ // millis == 0 means use internal GC scheduling logic to come up with
+ // a duration for the slice budget. This may end up still being zero
+ // based on preferences.
+ if (millis == 0) {
+ millis = defaultSliceBudgetMS();
+ }
+
+ // If the embedding has registered a callback for creating SliceBudgets,
+ // then use it.
+ if (createBudgetCallback) {
+ return createBudgetCallback(reason, millis);
+ }
+
+ // Otherwise, the preference can request an unlimited duration slice.
+ if (millis == 0) {
+ return SliceBudget::unlimited();
+ }
+
+ return SliceBudget(TimeBudget(millis));
+}
+
+void GCRuntime::gc(JS::GCOptions options, JS::GCReason reason) {
+ if (!isIncrementalGCInProgress()) {
+ setGCOptions(options);
+ }
+
+ collect(true, SliceBudget::unlimited(), reason);
+}
+
+void GCRuntime::startGC(JS::GCOptions options, JS::GCReason reason,
+ const js::SliceBudget& budget) {
+ MOZ_ASSERT(!isIncrementalGCInProgress());
+ setGCOptions(options);
+
+ if (!JS::IsIncrementalGCEnabled(rt->mainContextFromOwnThread())) {
+ collect(true, SliceBudget::unlimited(), reason);
+ return;
+ }
+
+ collect(false, budget, reason);
+}
+
+void GCRuntime::setGCOptions(JS::GCOptions options) {
+ MOZ_ASSERT(maybeGcOptions == Nothing());
+ maybeGcOptions = Some(options);
+}
+
+void GCRuntime::gcSlice(JS::GCReason reason, const js::SliceBudget& budget) {
+ MOZ_ASSERT(isIncrementalGCInProgress());
+ collect(false, budget, reason);
+}
+
+void GCRuntime::finishGC(JS::GCReason reason) {
+ MOZ_ASSERT(isIncrementalGCInProgress());
+
+ // If we're not collecting because we're out of memory then skip the
+ // compacting phase if we need to finish an ongoing incremental GC
+ // non-incrementally to avoid janking the browser.
+ if (!IsOOMReason(initialReason)) {
+ if (incrementalState == State::Compact) {
+ abortGC();
+ return;
+ }
+
+ isCompacting = false;
+ }
+
+ collect(false, SliceBudget::unlimited(), reason);
+}
+
+void GCRuntime::abortGC() {
+ MOZ_ASSERT(isIncrementalGCInProgress());
+ checkCanCallAPI();
+ MOZ_ASSERT(!rt->mainContextFromOwnThread()->suppressGC);
+
+ collect(false, SliceBudget::unlimited(), JS::GCReason::ABORT_GC);
+}
+
+static bool ZonesSelected(GCRuntime* gc) {
+ for (ZonesIter zone(gc, WithAtoms); !zone.done(); zone.next()) {
+ if (zone->isGCScheduled()) {
+ return true;
+ }
+ }
+ return false;
+}
+
+void GCRuntime::startDebugGC(JS::GCOptions options, const SliceBudget& budget) {
+ MOZ_ASSERT(!isIncrementalGCInProgress());
+ setGCOptions(options);
+
+ if (!ZonesSelected(this)) {
+ JS::PrepareForFullGC(rt->mainContextFromOwnThread());
+ }
+
+ collect(false, budget, JS::GCReason::DEBUG_GC);
+}
+
+void GCRuntime::debugGCSlice(const SliceBudget& budget) {
+ MOZ_ASSERT(isIncrementalGCInProgress());
+
+ if (!ZonesSelected(this)) {
+ JS::PrepareForIncrementalGC(rt->mainContextFromOwnThread());
+ }
+
+ collect(false, budget, JS::GCReason::DEBUG_GC);
+}
+
+/* Schedule a full GC unless a zone will already be collected. */
+void js::PrepareForDebugGC(JSRuntime* rt) {
+ if (!ZonesSelected(&rt->gc)) {
+ JS::PrepareForFullGC(rt->mainContextFromOwnThread());
+ }
+}
+
+void GCRuntime::onOutOfMallocMemory() {
+ // Stop allocating new chunks.
+ allocTask.cancelAndWait();
+
+ // Make sure we release anything queued for release.
+ decommitTask.join();
+ nursery().joinDecommitTask();
+
+ // Wait for background free of nursery huge slots to finish.
+ sweepTask.join();
+
+ AutoLockGC lock(this);
+ onOutOfMallocMemory(lock);
+}
+
+void GCRuntime::onOutOfMallocMemory(const AutoLockGC& lock) {
+#ifdef DEBUG
+ // Release any relocated arenas we may be holding on to, without releasing
+ // the GC lock.
+ releaseHeldRelocatedArenasWithoutUnlocking(lock);
+#endif
+
+ // Throw away any excess chunks we have lying around.
+ freeEmptyChunks(lock);
+
+ // Immediately decommit as many arenas as possible in the hopes that this
+ // might let the OS scrape together enough pages to satisfy the failing
+ // malloc request.
+ if (DecommitEnabled()) {
+ decommitFreeArenasWithoutUnlocking(lock);
+ }
+}
+
+void GCRuntime::minorGC(JS::GCReason reason, gcstats::PhaseKind phase) {
+ MOZ_ASSERT(!JS::RuntimeHeapIsBusy());
+
+ MOZ_ASSERT_IF(reason == JS::GCReason::EVICT_NURSERY,
+ !rt->mainContextFromOwnThread()->suppressGC);
+ if (rt->mainContextFromOwnThread()->suppressGC) {
+ return;
+ }
+
+ incGcNumber();
+
+ collectNursery(JS::GCOptions::Normal, reason, phase);
+
+#ifdef JS_GC_ZEAL
+ if (hasZealMode(ZealMode::CheckHeapAfterGC)) {
+ gcstats::AutoPhase ap(stats(), phase);
+ CheckHeapAfterGC(rt);
+ }
+#endif
+
+ for (ZonesIter zone(this, WithAtoms); !zone.done(); zone.next()) {
+ maybeTriggerGCAfterAlloc(zone);
+ maybeTriggerGCAfterMalloc(zone);
+ }
+}
+
+void GCRuntime::collectNursery(JS::GCOptions options, JS::GCReason reason,
+ gcstats::PhaseKind phase) {
+ AutoMaybeLeaveAtomsZone leaveAtomsZone(rt->mainContextFromOwnThread());
+
+ uint32_t numAllocs = 0;
+ for (ZonesIter zone(this, WithAtoms); !zone.done(); zone.next()) {
+ numAllocs += zone->getAndResetTenuredAllocsSinceMinorGC();
+ }
+ stats().setAllocsSinceMinorGCTenured(numAllocs);
+
+ gcstats::AutoPhase ap(stats(), phase);
+
+ nursery().clearMinorGCRequest();
+ nursery().collect(options, reason);
+ MOZ_ASSERT(nursery().isEmpty());
+
+ startBackgroundFreeAfterMinorGC();
+}
+
+void GCRuntime::startBackgroundFreeAfterMinorGC() {
+ MOZ_ASSERT(nursery().isEmpty());
+
+ {
+ AutoLockHelperThreadState lock;
+
+ lifoBlocksToFree.ref().transferFrom(&lifoBlocksToFreeAfterMinorGC.ref());
+
+ if (lifoBlocksToFree.ref().isEmpty() &&
+ buffersToFreeAfterMinorGC.ref().empty()) {
+ return;
+ }
+ }
+
+ startBackgroundFree();
+}
+
+bool GCRuntime::gcIfRequestedImpl(bool eagerOk) {
+ // This method returns whether a major GC was performed.
+
+ if (nursery().minorGCRequested()) {
+ minorGC(nursery().minorGCTriggerReason());
+ }
+
+ JS::GCReason reason = wantMajorGC(eagerOk);
+ if (reason == JS::GCReason::NO_REASON) {
+ return false;
+ }
+
+ SliceBudget budget = defaultBudget(reason, 0);
+ if (!isIncrementalGCInProgress()) {
+ startGC(JS::GCOptions::Normal, reason, budget);
+ } else {
+ gcSlice(reason, budget);
+ }
+ return true;
+}
+
+void js::gc::FinishGC(JSContext* cx, JS::GCReason reason) {
+ // Calling this when GC is suppressed won't have any effect.
+ MOZ_ASSERT(!cx->suppressGC);
+
+ // GC callbacks may run arbitrary code, including JS. Check this regardless of
+ // whether we GC for this invocation.
+ MOZ_ASSERT(cx->isNurseryAllocAllowed());
+
+ if (JS::IsIncrementalGCInProgress(cx)) {
+ JS::PrepareForIncrementalGC(cx);
+ JS::FinishIncrementalGC(cx, reason);
+ }
+}
+
+void js::gc::WaitForBackgroundTasks(JSContext* cx) {
+ cx->runtime()->gc.waitForBackgroundTasks();
+}
+
+void GCRuntime::waitForBackgroundTasks() {
+ MOZ_ASSERT(!isIncrementalGCInProgress());
+ MOZ_ASSERT(sweepTask.isIdle());
+ MOZ_ASSERT(decommitTask.isIdle());
+ MOZ_ASSERT(markTask.isIdle());
+
+ allocTask.join();
+ freeTask.join();
+ nursery().joinDecommitTask();
+}
+
+Realm* js::NewRealm(JSContext* cx, JSPrincipals* principals,
+ const JS::RealmOptions& options) {
+ JSRuntime* rt = cx->runtime();
+ JS_AbortIfWrongThread(cx);
+
+ UniquePtr<Zone> zoneHolder;
+ UniquePtr<Compartment> compHolder;
+
+ Compartment* comp = nullptr;
+ Zone* zone = nullptr;
+ JS::CompartmentSpecifier compSpec =
+ options.creationOptions().compartmentSpecifier();
+ switch (compSpec) {
+ case JS::CompartmentSpecifier::NewCompartmentInSystemZone:
+ // systemZone might be null here, in which case we'll make a zone and
+ // set this field below.
+ zone = rt->gc.systemZone;
+ break;
+ case JS::CompartmentSpecifier::NewCompartmentInExistingZone:
+ zone = options.creationOptions().zone();
+ MOZ_ASSERT(zone);
+ break;
+ case JS::CompartmentSpecifier::ExistingCompartment:
+ comp = options.creationOptions().compartment();
+ zone = comp->zone();
+ break;
+ case JS::CompartmentSpecifier::NewCompartmentAndZone:
+ break;
+ }
+
+ if (!zone) {
+ Zone::Kind kind = Zone::NormalZone;
+ const JSPrincipals* trusted = rt->trustedPrincipals();
+ if (compSpec == JS::CompartmentSpecifier::NewCompartmentInSystemZone ||
+ (principals && principals == trusted)) {
+ kind = Zone::SystemZone;
+ }
+
+ zoneHolder = MakeUnique<Zone>(cx->runtime(), kind);
+ if (!zoneHolder || !zoneHolder->init()) {
+ ReportOutOfMemory(cx);
+ return nullptr;
+ }
+
+ zone = zoneHolder.get();
+ }
+
+ bool invisibleToDebugger = options.creationOptions().invisibleToDebugger();
+ if (comp) {
+ // Debugger visibility is per-compartment, not per-realm, so make sure the
+ // new realm's visibility matches its compartment's.
+ MOZ_ASSERT(comp->invisibleToDebugger() == invisibleToDebugger);
+ } else {
+ compHolder = cx->make_unique<JS::Compartment>(zone, invisibleToDebugger);
+ if (!compHolder) {
+ return nullptr;
+ }
+
+ comp = compHolder.get();
+ }
+
+ UniquePtr<Realm> realm(cx->new_<Realm>(comp, options));
+ if (!realm) {
+ return nullptr;
+ }
+ realm->init(cx, principals);
+
+ // Make sure we don't put system and non-system realms in the same
+ // compartment.
+ if (!compHolder) {
+ MOZ_RELEASE_ASSERT(realm->isSystem() == IsSystemCompartment(comp));
+ }
+
+ AutoLockGC lock(rt);
+
+ // Reserve space in the Vectors before we start mutating them.
+ if (!comp->realms().reserve(comp->realms().length() + 1) ||
+ (compHolder &&
+ !zone->compartments().reserve(zone->compartments().length() + 1)) ||
+ (zoneHolder && !rt->gc.zones().reserve(rt->gc.zones().length() + 1))) {
+ ReportOutOfMemory(cx);
+ return nullptr;
+ }
+
+ // After this everything must be infallible.
+
+ comp->realms().infallibleAppend(realm.get());
+
+ if (compHolder) {
+ zone->compartments().infallibleAppend(compHolder.release());
+ }
+
+ if (zoneHolder) {
+ rt->gc.zones().infallibleAppend(zoneHolder.release());
+
+ // Lazily set the runtime's system zone.
+ if (compSpec == JS::CompartmentSpecifier::NewCompartmentInSystemZone) {
+ MOZ_RELEASE_ASSERT(!rt->gc.systemZone);
+ MOZ_ASSERT(zone->isSystemZone());
+ rt->gc.systemZone = zone;
+ }
+ }
+
+ return realm.release();
+}
+
+void GCRuntime::runDebugGC() {
+#ifdef JS_GC_ZEAL
+ if (rt->mainContextFromOwnThread()->suppressGC) {
+ return;
+ }
+
+ if (hasZealMode(ZealMode::GenerationalGC)) {
+ return minorGC(JS::GCReason::DEBUG_GC);
+ }
+
+ PrepareForDebugGC(rt);
+
+ auto budget = SliceBudget::unlimited();
+ if (hasZealMode(ZealMode::IncrementalMultipleSlices)) {
+ /*
+ * Start with a small slice limit and double it every slice. This
+ * ensure that we get multiple slices, and collection runs to
+ * completion.
+ */
+ if (!isIncrementalGCInProgress()) {
+ zealSliceBudget = zealFrequency / 2;
+ } else {
+ zealSliceBudget *= 2;
+ }
+ budget = SliceBudget(WorkBudget(zealSliceBudget));
+
+ js::gc::State initialState = incrementalState;
+ if (!isIncrementalGCInProgress()) {
+ setGCOptions(JS::GCOptions::Shrink);
+ }
+ collect(false, budget, JS::GCReason::DEBUG_GC);
+
+ /* Reset the slice size when we get to the sweep or compact phases. */
+ if ((initialState == State::Mark && incrementalState == State::Sweep) ||
+ (initialState == State::Sweep && incrementalState == State::Compact)) {
+ zealSliceBudget = zealFrequency / 2;
+ }
+ } else if (hasIncrementalTwoSliceZealMode()) {
+ // These modes trigger incremental GC that happens in two slices and the
+ // supplied budget is ignored by incrementalSlice.
+ budget = SliceBudget(WorkBudget(1));
+
+ if (!isIncrementalGCInProgress()) {
+ setGCOptions(JS::GCOptions::Normal);
+ }
+ collect(false, budget, JS::GCReason::DEBUG_GC);
+ } else if (hasZealMode(ZealMode::Compact)) {
+ gc(JS::GCOptions::Shrink, JS::GCReason::DEBUG_GC);
+ } else {
+ gc(JS::GCOptions::Normal, JS::GCReason::DEBUG_GC);
+ }
+
+#endif
+}
+
+void GCRuntime::setFullCompartmentChecks(bool enabled) {
+ MOZ_ASSERT(!JS::RuntimeHeapIsMajorCollecting());
+ fullCompartmentChecks = enabled;
+}
+
+void GCRuntime::notifyRootsRemoved() {
+ rootsRemoved = true;
+
+#ifdef JS_GC_ZEAL
+ /* Schedule a GC to happen "soon". */
+ if (hasZealMode(ZealMode::RootsChange)) {
+ nextScheduled = 1;
+ }
+#endif
+}
+
+#ifdef JS_GC_ZEAL
+bool GCRuntime::selectForMarking(JSObject* object) {
+ MOZ_ASSERT(!JS::RuntimeHeapIsMajorCollecting());
+ return selectedForMarking.ref().get().append(object);
+}
+
+void GCRuntime::clearSelectedForMarking() {
+ selectedForMarking.ref().get().clearAndFree();
+}
+
+void GCRuntime::setDeterministic(bool enabled) {
+ MOZ_ASSERT(!JS::RuntimeHeapIsMajorCollecting());
+ deterministicOnly = enabled;
+}
+#endif
+
+#ifdef DEBUG
+
+AutoAssertNoNurseryAlloc::AutoAssertNoNurseryAlloc() {
+ TlsContext.get()->disallowNurseryAlloc();
+}
+
+AutoAssertNoNurseryAlloc::~AutoAssertNoNurseryAlloc() {
+ TlsContext.get()->allowNurseryAlloc();
+}
+
+#endif // DEBUG
+
+#ifdef JSGC_HASH_TABLE_CHECKS
+void GCRuntime::checkHashTablesAfterMovingGC() {
+ /*
+ * Check that internal hash tables no longer have any pointers to things
+ * that have been moved.
+ */
+ rt->geckoProfiler().checkStringsMapAfterMovingGC();
+ if (rt->hasJitRuntime() && rt->jitRuntime()->hasInterpreterEntryMap()) {
+ rt->jitRuntime()->getInterpreterEntryMap()->checkScriptsAfterMovingGC();
+ }
+ for (ZonesIter zone(this, SkipAtoms); !zone.done(); zone.next()) {
+ zone->checkUniqueIdTableAfterMovingGC();
+ zone->shapeZone().checkTablesAfterMovingGC();
+ zone->checkAllCrossCompartmentWrappersAfterMovingGC();
+ zone->checkScriptMapsAfterMovingGC();
+
+ // Note: CompactPropMaps never have a table.
+ JS::AutoCheckCannotGC nogc;
+ for (auto map = zone->cellIterUnsafe<NormalPropMap>(); !map.done();
+ map.next()) {
+ if (PropMapTable* table = map->asLinked()->maybeTable(nogc)) {
+ table->checkAfterMovingGC();
+ }
+ }
+ for (auto map = zone->cellIterUnsafe<DictionaryPropMap>(); !map.done();
+ map.next()) {
+ if (PropMapTable* table = map->asLinked()->maybeTable(nogc)) {
+ table->checkAfterMovingGC();
+ }
+ }
+ }
+
+ for (CompartmentsIter c(this); !c.done(); c.next()) {
+ for (RealmsInCompartmentIter r(c); !r.done(); r.next()) {
+ r->dtoaCache.checkCacheAfterMovingGC();
+ if (r->debugEnvs()) {
+ r->debugEnvs()->checkHashTablesAfterMovingGC();
+ }
+ }
+ }
+}
+#endif
+
+#ifdef DEBUG
+bool GCRuntime::hasZone(Zone* target) {
+ for (AllZonesIter zone(this); !zone.done(); zone.next()) {
+ if (zone == target) {
+ return true;
+ }
+ }
+ return false;
+}
+#endif
+
+void AutoAssertEmptyNursery::checkCondition(JSContext* cx) {
+ if (!noAlloc) {
+ noAlloc.emplace();
+ }
+ this->cx = cx;
+ MOZ_ASSERT(cx->nursery().isEmpty());
+}
+
+AutoEmptyNursery::AutoEmptyNursery(JSContext* cx) : AutoAssertEmptyNursery() {
+ MOZ_ASSERT(!cx->suppressGC);
+ cx->runtime()->gc.stats().suspendPhases();
+ cx->runtime()->gc.evictNursery(JS::GCReason::EVICT_NURSERY);
+ cx->runtime()->gc.stats().resumePhases();
+ checkCondition(cx);
+}
+
+#ifdef DEBUG
+
+namespace js {
+
+// We don't want jsfriendapi.h to depend on GenericPrinter,
+// so these functions are declared directly in the cpp.
+
+extern JS_PUBLIC_API void DumpString(JSString* str, js::GenericPrinter& out);
+
+} // namespace js
+
+void js::gc::Cell::dump(js::GenericPrinter& out) const {
+ switch (getTraceKind()) {
+ case JS::TraceKind::Object:
+ reinterpret_cast<const JSObject*>(this)->dump(out);
+ break;
+
+ case JS::TraceKind::String:
+ js::DumpString(reinterpret_cast<JSString*>(const_cast<Cell*>(this)), out);
+ break;
+
+ case JS::TraceKind::Shape:
+ reinterpret_cast<const Shape*>(this)->dump(out);
+ break;
+
+ default:
+ out.printf("%s(%p)\n", JS::GCTraceKindToAscii(getTraceKind()),
+ (void*)this);
+ }
+}
+
+// For use in a debugger.
+void js::gc::Cell::dump() const {
+ js::Fprinter out(stderr);
+ dump(out);
+}
+#endif
+
+JS_PUBLIC_API bool js::gc::detail::CanCheckGrayBits(const TenuredCell* cell) {
+ // We do not check the gray marking state of cells in the following cases:
+ //
+ // 1) When OOM has caused us to clear the gcGrayBitsValid_ flag.
+ //
+ // 2) When we are in an incremental GC and examine a cell that is in a zone
+ // that is not being collected. Gray targets of CCWs that are marked black
+ // by a barrier will eventually be marked black in a later GC slice.
+ //
+ // 3) When mark bits are being cleared concurrently by a helper thread.
+
+ MOZ_ASSERT(cell);
+
+ auto runtime = cell->runtimeFromAnyThread();
+ MOZ_ASSERT(CurrentThreadCanAccessRuntime(runtime));
+
+ if (!runtime->gc.areGrayBitsValid()) {
+ return false;
+ }
+
+ JS::Zone* zone = cell->zone();
+
+ if (runtime->gc.isIncrementalGCInProgress() && !zone->wasGCStarted()) {
+ return false;
+ }
+
+ return !zone->isGCPreparing();
+}
+
+JS_PUBLIC_API bool js::gc::detail::CellIsMarkedGrayIfKnown(
+ const TenuredCell* cell) {
+ MOZ_ASSERT_IF(cell->isPermanentAndMayBeShared(), cell->isMarkedBlack());
+ if (!cell->isMarkedGray()) {
+ return false;
+ }
+
+ return CanCheckGrayBits(cell);
+}
+
+#ifdef DEBUG
+
+JS_PUBLIC_API void js::gc::detail::AssertCellIsNotGray(const Cell* cell) {
+ if (!cell->isTenured()) {
+ return;
+ }
+
+ // Check that a cell is not marked gray.
+ //
+ // Since this is a debug-only check, take account of the eventual mark state
+ // of cells that will be marked black by the next GC slice in an incremental
+ // GC. For performance reasons we don't do this in CellIsMarkedGrayIfKnown.
+
+ auto tc = &cell->asTenured();
+ if (!tc->isMarkedGray() || !CanCheckGrayBits(tc)) {
+ return;
+ }
+
+ // TODO: I'd like to AssertHeapIsIdle() here, but this ends up getting
+ // called during GC and while iterating the heap for memory reporting.
+ MOZ_ASSERT(!JS::RuntimeHeapIsCycleCollecting());
+
+ if (tc->zone()->isGCMarkingBlackAndGray()) {
+ // We are doing gray marking in the cell's zone. Even if the cell is
+ // currently marked gray it may eventually be marked black. Delay checking
+ // non-black cells until we finish gray marking.
+
+ if (!tc->isMarkedBlack()) {
+ JSRuntime* rt = tc->zone()->runtimeFromMainThread();
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ if (!rt->gc.cellsToAssertNotGray.ref().append(cell)) {
+ oomUnsafe.crash("Can't append to delayed gray checks list");
+ }
+ }
+ return;
+ }
+
+ MOZ_ASSERT(!tc->isMarkedGray());
+}
+
+extern JS_PUBLIC_API bool js::gc::detail::ObjectIsMarkedBlack(
+ const JSObject* obj) {
+ return obj->isMarkedBlack();
+}
+
+#endif
+
+js::gc::ClearEdgesTracer::ClearEdgesTracer(JSRuntime* rt)
+ : GenericTracerImpl(rt, JS::TracerKind::ClearEdges,
+ JS::WeakMapTraceAction::TraceKeysAndValues) {}
+
+template <typename T>
+void js::gc::ClearEdgesTracer::onEdge(T** thingp, const char* name) {
+ // We don't handle removing pointers to nursery edges from the store buffer
+ // with this tracer. Check that this doesn't happen.
+ T* thing = *thingp;
+ MOZ_ASSERT(!IsInsideNursery(thing));
+
+ // Fire the pre-barrier since we're removing an edge from the graph.
+ InternalBarrierMethods<T*>::preBarrier(thing);
+
+ *thingp = nullptr;
+}
+
+void GCRuntime::setPerformanceHint(PerformanceHint hint) {
+ if (hint == PerformanceHint::InPageLoad) {
+ inPageLoadCount++;
+ } else {
+ MOZ_ASSERT(inPageLoadCount);
+ inPageLoadCount--;
+ }
+}
diff --git a/js/src/gc/GC.h b/js/src/gc/GC.h
new file mode 100644
index 0000000000..58475bc74a
--- /dev/null
+++ b/js/src/gc/GC.h
@@ -0,0 +1,243 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * JS engine garbage collector API.
+ */
+
+#ifndef gc_GC_h
+#define gc_GC_h
+
+#include "gc/GCEnum.h"
+#include "js/GCAPI.h"
+#include "js/HeapAPI.h"
+#include "js/RealmIterators.h"
+#include "js/TraceKind.h"
+
+class JSTracer;
+
+namespace JS {
+class RealmOptions;
+}
+
+namespace js {
+
+class Nursery;
+
+namespace gc {
+
+class Arena;
+class TenuredChunk;
+
+} /* namespace gc */
+
+// Define name, key and writability for the GC parameters.
+#define FOR_EACH_GC_PARAM(_) \
+ _("maxBytes", JSGC_MAX_BYTES, true) \
+ _("minNurseryBytes", JSGC_MIN_NURSERY_BYTES, true) \
+ _("maxNurseryBytes", JSGC_MAX_NURSERY_BYTES, true) \
+ _("gcBytes", JSGC_BYTES, false) \
+ _("nurseryBytes", JSGC_NURSERY_BYTES, false) \
+ _("gcNumber", JSGC_NUMBER, false) \
+ _("majorGCNumber", JSGC_MAJOR_GC_NUMBER, false) \
+ _("minorGCNumber", JSGC_MINOR_GC_NUMBER, false) \
+ _("incrementalGCEnabled", JSGC_INCREMENTAL_GC_ENABLED, true) \
+ _("perZoneGCEnabled", JSGC_PER_ZONE_GC_ENABLED, true) \
+ _("unusedChunks", JSGC_UNUSED_CHUNKS, false) \
+ _("totalChunks", JSGC_TOTAL_CHUNKS, false) \
+ _("sliceTimeBudgetMS", JSGC_SLICE_TIME_BUDGET_MS, true) \
+ _("highFrequencyTimeLimit", JSGC_HIGH_FREQUENCY_TIME_LIMIT, true) \
+ _("smallHeapSizeMax", JSGC_SMALL_HEAP_SIZE_MAX, true) \
+ _("largeHeapSizeMin", JSGC_LARGE_HEAP_SIZE_MIN, true) \
+ _("highFrequencySmallHeapGrowth", JSGC_HIGH_FREQUENCY_SMALL_HEAP_GROWTH, \
+ true) \
+ _("highFrequencyLargeHeapGrowth", JSGC_HIGH_FREQUENCY_LARGE_HEAP_GROWTH, \
+ true) \
+ _("lowFrequencyHeapGrowth", JSGC_LOW_FREQUENCY_HEAP_GROWTH, true) \
+ _("balancedHeapLimitsEnabled", JSGC_BALANCED_HEAP_LIMITS_ENABLED, true) \
+ _("heapGrowthFactor", JSGC_HEAP_GROWTH_FACTOR, true) \
+ _("allocationThreshold", JSGC_ALLOCATION_THRESHOLD, true) \
+ _("smallHeapIncrementalLimit", JSGC_SMALL_HEAP_INCREMENTAL_LIMIT, true) \
+ _("largeHeapIncrementalLimit", JSGC_LARGE_HEAP_INCREMENTAL_LIMIT, true) \
+ _("minEmptyChunkCount", JSGC_MIN_EMPTY_CHUNK_COUNT, true) \
+ _("maxEmptyChunkCount", JSGC_MAX_EMPTY_CHUNK_COUNT, true) \
+ _("compactingEnabled", JSGC_COMPACTING_ENABLED, true) \
+ _("parallelMarkingEnabled", JSGC_PARALLEL_MARKING_ENABLED, true) \
+ _("parallelMarkingThresholdKB", JSGC_PARALLEL_MARKING_THRESHOLD_KB, true) \
+ _("minLastDitchGCPeriod", JSGC_MIN_LAST_DITCH_GC_PERIOD, true) \
+ _("nurseryFreeThresholdForIdleCollection", \
+ JSGC_NURSERY_FREE_THRESHOLD_FOR_IDLE_COLLECTION, true) \
+ _("nurseryFreeThresholdForIdleCollectionPercent", \
+ JSGC_NURSERY_FREE_THRESHOLD_FOR_IDLE_COLLECTION_PERCENT, true) \
+ _("nurseryTimeoutForIdleCollectionMS", \
+ JSGC_NURSERY_TIMEOUT_FOR_IDLE_COLLECTION_MS, true) \
+ _("pretenureThreshold", JSGC_PRETENURE_THRESHOLD, true) \
+ _("zoneAllocDelayKB", JSGC_ZONE_ALLOC_DELAY_KB, true) \
+ _("mallocThresholdBase", JSGC_MALLOC_THRESHOLD_BASE, true) \
+ _("urgentThreshold", JSGC_URGENT_THRESHOLD_MB, true) \
+ _("chunkBytes", JSGC_CHUNK_BYTES, false) \
+ _("helperThreadRatio", JSGC_HELPER_THREAD_RATIO, true) \
+ _("maxHelperThreads", JSGC_MAX_HELPER_THREADS, true) \
+ _("helperThreadCount", JSGC_HELPER_THREAD_COUNT, false) \
+ _("markingThreadCount", JSGC_MARKING_THREAD_COUNT, true) \
+ _("systemPageSizeKB", JSGC_SYSTEM_PAGE_SIZE_KB, false)
+
+// Get the key and writability give a GC parameter name.
+extern bool GetGCParameterInfo(const char* name, JSGCParamKey* keyOut,
+ bool* writableOut);
+
+extern void TraceRuntime(JSTracer* trc);
+
+// Trace roots but don't evict the nursery first; used from DumpHeap.
+extern void TraceRuntimeWithoutEviction(JSTracer* trc);
+
+extern void ReleaseAllJITCode(JS::GCContext* gcx);
+
+extern void PrepareForDebugGC(JSRuntime* rt);
+
+/* Functions for managing cross compartment gray pointers. */
+
+extern void NotifyGCNukeWrapper(JSContext* cx, JSObject* wrapper);
+
+extern unsigned NotifyGCPreSwap(JSObject* a, JSObject* b);
+
+extern void NotifyGCPostSwap(JSObject* a, JSObject* b, unsigned preResult);
+
+using IterateChunkCallback = void (*)(JSRuntime*, void*, gc::TenuredChunk*,
+ const JS::AutoRequireNoGC&);
+using IterateZoneCallback = void (*)(JSRuntime*, void*, JS::Zone*,
+ const JS::AutoRequireNoGC&);
+using IterateArenaCallback = void (*)(JSRuntime*, void*, gc::Arena*,
+ JS::TraceKind, size_t,
+ const JS::AutoRequireNoGC&);
+using IterateCellCallback = void (*)(JSRuntime*, void*, JS::GCCellPtr, size_t,
+ const JS::AutoRequireNoGC&);
+
+/*
+ * This function calls |zoneCallback| on every zone, |realmCallback| on
+ * every realm, |arenaCallback| on every in-use arena, and |cellCallback|
+ * on every in-use cell in the GC heap.
+ *
+ * Note that no read barrier is triggered on the cells passed to cellCallback,
+ * so no these pointers must not escape the callback.
+ */
+extern void IterateHeapUnbarriered(JSContext* cx, void* data,
+ IterateZoneCallback zoneCallback,
+ JS::IterateRealmCallback realmCallback,
+ IterateArenaCallback arenaCallback,
+ IterateCellCallback cellCallback);
+
+/*
+ * This function is like IterateHeapUnbarriered, but does it for a single zone.
+ */
+extern void IterateHeapUnbarrieredForZone(
+ JSContext* cx, JS::Zone* zone, void* data, IterateZoneCallback zoneCallback,
+ JS::IterateRealmCallback realmCallback, IterateArenaCallback arenaCallback,
+ IterateCellCallback cellCallback);
+
+/*
+ * Invoke chunkCallback on every in-use chunk.
+ */
+extern void IterateChunks(JSContext* cx, void* data,
+ IterateChunkCallback chunkCallback);
+
+using IterateScriptCallback = void (*)(JSRuntime*, void*, BaseScript*,
+ const JS::AutoRequireNoGC&);
+
+/*
+ * Invoke scriptCallback on every in-use script for the given realm or for all
+ * realms if it is null. The scripts may or may not have bytecode.
+ */
+extern void IterateScripts(JSContext* cx, JS::Realm* realm, void* data,
+ IterateScriptCallback scriptCallback);
+
+JS::Realm* NewRealm(JSContext* cx, JSPrincipals* principals,
+ const JS::RealmOptions& options);
+
+namespace gc {
+
+void FinishGC(JSContext* cx, JS::GCReason = JS::GCReason::FINISH_GC);
+
+void WaitForBackgroundTasks(JSContext* cx);
+
+enum VerifierType { PreBarrierVerifier };
+
+#ifdef JS_GC_ZEAL
+
+extern const char ZealModeHelpText[];
+
+/* Check that write barriers have been used correctly. See gc/Verifier.cpp. */
+void VerifyBarriers(JSRuntime* rt, VerifierType type);
+
+void MaybeVerifyBarriers(JSContext* cx, bool always = false);
+
+void DumpArenaInfo();
+
+#else
+
+static inline void VerifyBarriers(JSRuntime* rt, VerifierType type) {}
+
+static inline void MaybeVerifyBarriers(JSContext* cx, bool always = false) {}
+
+#endif
+
+/*
+ * Instances of this class prevent GC from happening while they are live. If an
+ * allocation causes a heap threshold to be exceeded, no GC will be performed
+ * and the allocation will succeed. Allocation may still fail for other reasons.
+ *
+ * Use of this class is highly discouraged, since without GC system memory can
+ * become exhausted and this can cause crashes at places where we can't handle
+ * allocation failure.
+ *
+ * Use of this is permissible in situations where it would be impossible (or at
+ * least very difficult) to tolerate GC and where only a fixed number of objects
+ * are allocated, such as:
+ *
+ * - error reporting
+ * - JIT bailout handling
+ * - brain transplants (JSObject::swap)
+ * - debugging utilities not exposed to the browser
+ *
+ * This works by updating the |JSContext::suppressGC| counter which is checked
+ * at the start of GC.
+ */
+class MOZ_RAII JS_HAZ_GC_SUPPRESSED AutoSuppressGC
+ : public JS::AutoRequireNoGC {
+ int32_t& suppressGC_;
+
+ public:
+ explicit AutoSuppressGC(JSContext* cx);
+
+ ~AutoSuppressGC() { suppressGC_--; }
+};
+
+const char* StateName(State state);
+
+} /* namespace gc */
+
+/* Use this to avoid assertions when manipulating the wrapper map. */
+class MOZ_RAII AutoDisableProxyCheck {
+ public:
+#ifdef DEBUG
+ AutoDisableProxyCheck();
+ ~AutoDisableProxyCheck();
+#else
+ AutoDisableProxyCheck() {}
+#endif
+};
+
+struct MOZ_RAII AutoDisableCompactingGC {
+ explicit AutoDisableCompactingGC(JSContext* cx);
+ ~AutoDisableCompactingGC();
+
+ private:
+ JSContext* cx;
+};
+
+} /* namespace js */
+
+#endif /* gc_GC_h */
diff --git a/js/src/gc/GCAPI.cpp b/js/src/gc/GCAPI.cpp
new file mode 100644
index 0000000000..7911baaf93
--- /dev/null
+++ b/js/src/gc/GCAPI.cpp
@@ -0,0 +1,798 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * API functions and methods used by the rest of SpiderMonkey and by embeddings.
+ */
+
+#include "mozilla/TimeStamp.h"
+
+#include "jsapi.h"
+#include "jsfriendapi.h"
+
+#include "gc/GC.h"
+#include "gc/PublicIterators.h"
+#include "jit/JitRealm.h"
+#include "js/HeapAPI.h"
+#include "js/Value.h"
+#include "util/DifferentialTesting.h"
+#include "vm/HelperThreads.h"
+#include "vm/Realm.h"
+#include "vm/Scope.h"
+
+#include "gc/Marking-inl.h"
+#include "gc/StableCellHasher-inl.h"
+#include "vm/GeckoProfiler-inl.h"
+#include "vm/JSContext-inl.h"
+
+using namespace js;
+using namespace js::gc;
+
+using mozilla::TimeStamp;
+
+extern JS_PUBLIC_API bool js::AddRawValueRoot(JSContext* cx, Value* vp,
+ const char* name) {
+ MOZ_ASSERT(vp);
+ MOZ_ASSERT(name);
+ bool ok = cx->runtime()->gc.addRoot(vp, name);
+ if (!ok) {
+ JS_ReportOutOfMemory(cx);
+ }
+ return ok;
+}
+
+extern JS_PUBLIC_API void js::RemoveRawValueRoot(JSContext* cx, Value* vp) {
+ cx->runtime()->gc.removeRoot(vp);
+}
+
+JS_PUBLIC_API JS::HeapState JS::RuntimeHeapState() {
+ return TlsContext.get()->runtime()->gc.heapState();
+}
+
+JS::AutoDisableGenerationalGC::AutoDisableGenerationalGC(JSContext* cx)
+ : cx(cx) {
+ if (!cx->generationalDisabled) {
+ cx->runtime()->gc.evictNursery(JS::GCReason::DISABLE_GENERATIONAL_GC);
+ cx->nursery().disable();
+ }
+ ++cx->generationalDisabled;
+ MOZ_ASSERT(cx->nursery().isEmpty());
+}
+
+JS::AutoDisableGenerationalGC::~AutoDisableGenerationalGC() {
+ if (--cx->generationalDisabled == 0 &&
+ cx->runtime()->gc.tunables.gcMaxNurseryBytes() > 0) {
+ cx->nursery().enable();
+ }
+}
+
+JS_PUBLIC_API bool JS::IsGenerationalGCEnabled(JSRuntime* rt) {
+ return !rt->mainContextFromOwnThread()->generationalDisabled;
+}
+
+AutoDisableCompactingGC::AutoDisableCompactingGC(JSContext* cx) : cx(cx) {
+ ++cx->compactingDisabledCount;
+ if (cx->runtime()->gc.isIncrementalGCInProgress() &&
+ cx->runtime()->gc.isCompactingGc()) {
+ FinishGC(cx);
+ }
+}
+
+AutoDisableCompactingGC::~AutoDisableCompactingGC() {
+ MOZ_ASSERT(cx->compactingDisabledCount > 0);
+ --cx->compactingDisabledCount;
+}
+
+#ifdef DEBUG
+
+/* Should only be called manually under gdb */
+void PreventGCDuringInteractiveDebug() { TlsContext.get()->suppressGC++; }
+
+#endif
+
+void js::ReleaseAllJITCode(JS::GCContext* gcx) {
+ js::CancelOffThreadIonCompile(gcx->runtime());
+
+ for (ZonesIter zone(gcx->runtime(), SkipAtoms); !zone.done(); zone.next()) {
+ zone->forceDiscardJitCode(gcx);
+ }
+
+ for (RealmsIter realm(gcx->runtime()); !realm.done(); realm.next()) {
+ if (jit::JitRealm* jitRealm = realm->jitRealm()) {
+ jitRealm->discardStubs();
+ }
+ }
+}
+
+AutoSuppressGC::AutoSuppressGC(JSContext* cx)
+ : suppressGC_(cx->suppressGC.ref()) {
+ suppressGC_++;
+}
+
+#ifdef DEBUG
+AutoDisableProxyCheck::AutoDisableProxyCheck() {
+ TlsContext.get()->disableStrictProxyChecking();
+}
+
+AutoDisableProxyCheck::~AutoDisableProxyCheck() {
+ TlsContext.get()->enableStrictProxyChecking();
+}
+
+JS_PUBLIC_API void JS::AssertGCThingMustBeTenured(JSObject* obj) {
+ MOZ_ASSERT(obj->isTenured());
+ MOZ_ASSERT(obj->getClass()->hasFinalize() &&
+ !(obj->getClass()->flags & JSCLASS_SKIP_NURSERY_FINALIZE));
+}
+
+JS_PUBLIC_API void JS::AssertGCThingIsNotNurseryAllocable(Cell* cell) {
+ MOZ_ASSERT(cell);
+ MOZ_ASSERT(!cell->is<JSObject>() && !cell->is<JSString>() &&
+ !cell->is<JS::BigInt>());
+}
+
+JS_PUBLIC_API void js::gc::AssertGCThingHasType(js::gc::Cell* cell,
+ JS::TraceKind kind) {
+ if (!cell) {
+ MOZ_ASSERT(kind == JS::TraceKind::Null);
+ return;
+ }
+
+ MOZ_ASSERT(IsCellPointerValid(cell));
+ MOZ_ASSERT(cell->getTraceKind() == kind);
+}
+#endif
+
+#ifdef MOZ_DIAGNOSTIC_ASSERT_ENABLED
+
+JS::AutoAssertNoGC::AutoAssertNoGC(JSContext* maybecx) {
+ if (maybecx) {
+ cx_ = maybecx;
+ } else if (TlsContext.initialized()) {
+ cx_ = TlsContext.get();
+ } else {
+ cx_ = nullptr;
+ }
+ if (cx_) {
+ cx_->inUnsafeRegion++;
+ }
+}
+
+JS::AutoAssertNoGC::~AutoAssertNoGC() {
+ if (cx_) {
+ MOZ_ASSERT(cx_->inUnsafeRegion > 0);
+ cx_->inUnsafeRegion--;
+ }
+}
+
+#endif // MOZ_DIAGNOSTIC_ASSERT_ENABLED
+
+#ifdef DEBUG
+
+JS::AutoEnterCycleCollection::AutoEnterCycleCollection(JSRuntime* rt)
+ : runtime_(rt) {
+ MOZ_ASSERT(CurrentThreadCanAccessRuntime(rt));
+ MOZ_ASSERT(!JS::RuntimeHeapIsBusy());
+ runtime_->gc.heapState_ = HeapState::CycleCollecting;
+}
+
+JS::AutoEnterCycleCollection::~AutoEnterCycleCollection() {
+ MOZ_ASSERT(JS::RuntimeHeapIsCycleCollecting());
+ runtime_->gc.heapState_ = HeapState::Idle;
+}
+
+JS::AutoAssertGCCallback::AutoAssertGCCallback() : AutoSuppressGCAnalysis() {
+ MOZ_ASSERT(JS::RuntimeHeapIsCollecting());
+}
+
+#endif // DEBUG
+
+JS_PUBLIC_API const char* JS::GCTraceKindToAscii(JS::TraceKind kind) {
+ switch (kind) {
+#define MAP_NAME(name, _0, _1, _2) \
+ case JS::TraceKind::name: \
+ return "JS " #name;
+ JS_FOR_EACH_TRACEKIND(MAP_NAME);
+#undef MAP_NAME
+ default:
+ return "Invalid";
+ }
+}
+
+JS_PUBLIC_API size_t JS::GCTraceKindSize(JS::TraceKind kind) {
+ switch (kind) {
+#define MAP_SIZE(name, type, _0, _1) \
+ case JS::TraceKind::name: \
+ return sizeof(type);
+ JS_FOR_EACH_TRACEKIND(MAP_SIZE);
+#undef MAP_SIZE
+ default:
+ return 0;
+ }
+}
+
+JS::GCCellPtr::GCCellPtr(const Value& v)
+ : GCCellPtr(v.toGCThing(), v.traceKind()) {}
+
+JS::TraceKind JS::GCCellPtr::outOfLineKind() const {
+ MOZ_ASSERT((ptr & OutOfLineTraceKindMask) == OutOfLineTraceKindMask);
+ MOZ_ASSERT(asCell()->isTenured());
+ return MapAllocToTraceKind(asCell()->asTenured().getAllocKind());
+}
+
+JS_PUBLIC_API void JS::PrepareZoneForGC(JSContext* cx, Zone* zone) {
+ AssertHeapIsIdle();
+ CHECK_THREAD(cx);
+
+ // If we got the zone from a shared atom, we may have the wrong atoms zone
+ // here.
+ if (zone->isAtomsZone()) {
+ zone = cx->runtime()->atomsZone();
+ }
+
+ MOZ_ASSERT(cx->runtime()->gc.hasZone(zone));
+ zone->scheduleGC();
+}
+
+JS_PUBLIC_API void JS::PrepareForFullGC(JSContext* cx) {
+ AssertHeapIsIdle();
+ CHECK_THREAD(cx);
+
+ cx->runtime()->gc.fullGCRequested = true;
+ for (ZonesIter zone(cx->runtime(), WithAtoms); !zone.done(); zone.next()) {
+ zone->scheduleGC();
+ }
+}
+
+JS_PUBLIC_API void JS::PrepareForIncrementalGC(JSContext* cx) {
+ AssertHeapIsIdle();
+ CHECK_THREAD(cx);
+
+ if (!JS::IsIncrementalGCInProgress(cx)) {
+ return;
+ }
+
+ for (ZonesIter zone(cx->runtime(), WithAtoms); !zone.done(); zone.next()) {
+ if (zone->wasGCStarted()) {
+ zone->scheduleGC();
+ }
+ }
+}
+
+JS_PUBLIC_API bool JS::IsGCScheduled(JSContext* cx) {
+ AssertHeapIsIdle();
+ CHECK_THREAD(cx);
+
+ for (ZonesIter zone(cx->runtime(), WithAtoms); !zone.done(); zone.next()) {
+ if (zone->isGCScheduled()) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+JS_PUBLIC_API void JS::SkipZoneForGC(JSContext* cx, Zone* zone) {
+ AssertHeapIsIdle();
+ CHECK_THREAD(cx);
+ MOZ_ASSERT(cx->runtime()->gc.hasZone(zone));
+
+ cx->runtime()->gc.fullGCRequested = false;
+ zone->unscheduleGC();
+}
+
+static inline void CheckGCOptions(JS::GCOptions options) {
+ MOZ_ASSERT(options == JS::GCOptions::Normal ||
+ options == JS::GCOptions::Shrink ||
+ options == JS::GCOptions::Shutdown);
+}
+
+JS_PUBLIC_API void JS::NonIncrementalGC(JSContext* cx, JS::GCOptions options,
+ GCReason reason) {
+ AssertHeapIsIdle();
+ CHECK_THREAD(cx);
+ CheckGCOptions(options);
+
+ cx->runtime()->gc.gc(options, reason);
+
+ MOZ_ASSERT(!IsIncrementalGCInProgress(cx));
+}
+
+JS_PUBLIC_API void JS::StartIncrementalGC(JSContext* cx, JS::GCOptions options,
+ GCReason reason,
+ const js::SliceBudget& budget) {
+ AssertHeapIsIdle();
+ CHECK_THREAD(cx);
+ CheckGCOptions(options);
+
+ cx->runtime()->gc.startGC(options, reason, budget);
+}
+
+JS_PUBLIC_API void JS::IncrementalGCSlice(JSContext* cx, GCReason reason,
+ const js::SliceBudget& budget) {
+ AssertHeapIsIdle();
+ CHECK_THREAD(cx);
+
+ cx->runtime()->gc.gcSlice(reason, budget);
+}
+
+JS_PUBLIC_API bool JS::IncrementalGCHasForegroundWork(JSContext* cx) {
+ AssertHeapIsIdle();
+ CHECK_THREAD(cx);
+
+ return cx->runtime()->gc.hasForegroundWork();
+}
+
+JS_PUBLIC_API void JS::FinishIncrementalGC(JSContext* cx, GCReason reason) {
+ AssertHeapIsIdle();
+ CHECK_THREAD(cx);
+
+ cx->runtime()->gc.finishGC(reason);
+}
+
+JS_PUBLIC_API void JS::AbortIncrementalGC(JSContext* cx) {
+ AssertHeapIsIdle();
+ CHECK_THREAD(cx);
+
+ if (IsIncrementalGCInProgress(cx)) {
+ cx->runtime()->gc.abortGC();
+ }
+}
+
+char16_t* JS::GCDescription::formatSliceMessage(JSContext* cx) const {
+ UniqueChars cstr = cx->runtime()->gc.stats().formatCompactSliceMessage();
+
+ size_t nchars = strlen(cstr.get());
+ UniqueTwoByteChars out(js_pod_malloc<char16_t>(nchars + 1));
+ if (!out) {
+ return nullptr;
+ }
+ out.get()[nchars] = 0;
+
+ CopyAndInflateChars(out.get(), cstr.get(), nchars);
+ return out.release();
+}
+
+char16_t* JS::GCDescription::formatSummaryMessage(JSContext* cx) const {
+ UniqueChars cstr = cx->runtime()->gc.stats().formatCompactSummaryMessage();
+
+ size_t nchars = strlen(cstr.get());
+ UniqueTwoByteChars out(js_pod_malloc<char16_t>(nchars + 1));
+ if (!out) {
+ return nullptr;
+ }
+ out.get()[nchars] = 0;
+
+ CopyAndInflateChars(out.get(), cstr.get(), nchars);
+ return out.release();
+}
+
+JS::dbg::GarbageCollectionEvent::Ptr JS::GCDescription::toGCEvent(
+ JSContext* cx) const {
+ return JS::dbg::GarbageCollectionEvent::Create(
+ cx->runtime(), cx->runtime()->gc.stats(),
+ cx->runtime()->gc.majorGCCount());
+}
+
+TimeStamp JS::GCDescription::startTime(JSContext* cx) const {
+ return cx->runtime()->gc.stats().start();
+}
+
+TimeStamp JS::GCDescription::endTime(JSContext* cx) const {
+ return cx->runtime()->gc.stats().end();
+}
+
+TimeStamp JS::GCDescription::lastSliceStart(JSContext* cx) const {
+ return cx->runtime()->gc.stats().slices().back().start;
+}
+
+TimeStamp JS::GCDescription::lastSliceEnd(JSContext* cx) const {
+ return cx->runtime()->gc.stats().slices().back().end;
+}
+
+JS::UniqueChars JS::GCDescription::sliceToJSONProfiler(JSContext* cx) const {
+ size_t slices = cx->runtime()->gc.stats().slices().length();
+ MOZ_ASSERT(slices > 0);
+ return cx->runtime()->gc.stats().renderJsonSlice(slices - 1);
+}
+
+JS::UniqueChars JS::GCDescription::formatJSONProfiler(JSContext* cx) const {
+ return cx->runtime()->gc.stats().renderJsonMessage();
+}
+
+JS_PUBLIC_API JS::UniqueChars JS::MinorGcToJSON(JSContext* cx) {
+ JSRuntime* rt = cx->runtime();
+ return rt->gc.stats().renderNurseryJson();
+}
+
+JS_PUBLIC_API JS::GCSliceCallback JS::SetGCSliceCallback(
+ JSContext* cx, GCSliceCallback callback) {
+ return cx->runtime()->gc.setSliceCallback(callback);
+}
+
+JS_PUBLIC_API JS::DoCycleCollectionCallback JS::SetDoCycleCollectionCallback(
+ JSContext* cx, JS::DoCycleCollectionCallback callback) {
+ return cx->runtime()->gc.setDoCycleCollectionCallback(callback);
+}
+
+JS_PUBLIC_API JS::GCNurseryCollectionCallback
+JS::SetGCNurseryCollectionCallback(JSContext* cx,
+ GCNurseryCollectionCallback callback) {
+ return cx->runtime()->gc.setNurseryCollectionCallback(callback);
+}
+
+JS_PUBLIC_API void JS::SetLowMemoryState(JSContext* cx, bool newState) {
+ return cx->runtime()->gc.setLowMemoryState(newState);
+}
+
+JS_PUBLIC_API void JS::DisableIncrementalGC(JSContext* cx) {
+ cx->runtime()->gc.disallowIncrementalGC();
+}
+
+JS_PUBLIC_API bool JS::IsIncrementalGCEnabled(JSContext* cx) {
+ GCRuntime& gc = cx->runtime()->gc;
+ return gc.isIncrementalGCEnabled() && gc.isIncrementalGCAllowed();
+}
+
+JS_PUBLIC_API bool JS::IsIncrementalGCInProgress(JSContext* cx) {
+ return cx->runtime()->gc.isIncrementalGCInProgress();
+}
+
+JS_PUBLIC_API bool JS::IsIncrementalGCInProgress(JSRuntime* rt) {
+ return rt->gc.isIncrementalGCInProgress() &&
+ !rt->gc.isVerifyPreBarriersEnabled();
+}
+
+JS_PUBLIC_API bool JS::IsIncrementalBarrierNeeded(JSContext* cx) {
+ if (JS::RuntimeHeapIsBusy()) {
+ return false;
+ }
+
+ auto state = cx->runtime()->gc.state();
+ return state != gc::State::NotActive && state <= gc::State::Sweep;
+}
+
+JS_PUBLIC_API void JS::IncrementalPreWriteBarrier(JSObject* obj) {
+ if (!obj) {
+ return;
+ }
+
+ AutoGeckoProfilerEntry profilingStackFrame(
+ TlsContext.get(), "IncrementalPreWriteBarrier(JSObject*)",
+ JS::ProfilingCategoryPair::GCCC_Barrier);
+ PreWriteBarrier(obj);
+}
+
+JS_PUBLIC_API void JS::IncrementalPreWriteBarrier(GCCellPtr thing) {
+ if (!thing) {
+ return;
+ }
+
+ AutoGeckoProfilerEntry profilingStackFrame(
+ TlsContext.get(), "IncrementalPreWriteBarrier(GCCellPtr)",
+ JS::ProfilingCategoryPair::GCCC_Barrier);
+ CellPtrPreWriteBarrier(thing);
+}
+
+JS_PUBLIC_API bool JS::WasIncrementalGC(JSRuntime* rt) {
+ return rt->gc.isIncrementalGc();
+}
+
+bool js::gc::CreateUniqueIdForNativeObject(NativeObject* nobj, uint64_t* uidp) {
+ JSRuntime* runtime = nobj->runtimeFromMainThread();
+ *uidp = NextCellUniqueId(runtime);
+ JSContext* cx = runtime->mainContextFromOwnThread();
+ return nobj->setUniqueId(cx, *uidp);
+}
+
+bool js::gc::CreateUniqueIdForNonNativeObject(Cell* cell,
+ UniqueIdMap::AddPtr ptr,
+ uint64_t* uidp) {
+ // If the cell is in the nursery, hopefully unlikely, then we need to tell the
+ // nursery about it so that it can sweep the uid if the thing does not get
+ // tenured.
+ JSRuntime* runtime = cell->runtimeFromMainThread();
+ if (IsInsideNursery(cell) &&
+ !runtime->gc.nursery().addedUniqueIdToCell(cell)) {
+ return false;
+ }
+
+ // Set a new uid on the cell.
+ *uidp = NextCellUniqueId(runtime);
+ return cell->zone()->uniqueIds().add(ptr, cell, *uidp);
+}
+
+uint64_t js::gc::NextCellUniqueId(JSRuntime* rt) {
+ return rt->gc.nextCellUniqueId();
+}
+
+namespace js {
+
+static const struct GCParamInfo {
+ const char* name;
+ JSGCParamKey key;
+ bool writable;
+} GCParameters[] = {
+#define DEFINE_PARAM_INFO(name, key, writable) {name, key, writable},
+ FOR_EACH_GC_PARAM(DEFINE_PARAM_INFO)
+#undef DEFINE_PARAM_INFO
+};
+
+bool GetGCParameterInfo(const char* name, JSGCParamKey* keyOut,
+ bool* writableOut) {
+ MOZ_ASSERT(keyOut);
+ MOZ_ASSERT(writableOut);
+
+ for (const GCParamInfo& info : GCParameters) {
+ if (strcmp(name, info.name) == 0) {
+ *keyOut = info.key;
+ *writableOut = info.writable;
+ return true;
+ }
+ }
+
+ return false;
+}
+
+namespace gc {
+namespace MemInfo {
+
+static bool GCBytesGetter(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ args.rval().setNumber(double(cx->runtime()->gc.heapSize.bytes()));
+ return true;
+}
+
+static bool MallocBytesGetter(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ double bytes = 0;
+ for (ZonesIter zone(cx->runtime(), WithAtoms); !zone.done(); zone.next()) {
+ bytes += zone->mallocHeapSize.bytes();
+ }
+ args.rval().setNumber(bytes);
+ return true;
+}
+
+static bool GCMaxBytesGetter(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ args.rval().setNumber(double(cx->runtime()->gc.tunables.gcMaxBytes()));
+ return true;
+}
+
+static bool GCHighFreqGetter(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ args.rval().setBoolean(
+ cx->runtime()->gc.schedulingState.inHighFrequencyGCMode());
+ return true;
+}
+
+static bool GCNumberGetter(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ args.rval().setNumber(double(cx->runtime()->gc.gcNumber()));
+ return true;
+}
+
+static bool MajorGCCountGetter(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ args.rval().setNumber(double(cx->runtime()->gc.majorGCCount()));
+ return true;
+}
+
+static bool MinorGCCountGetter(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ args.rval().setNumber(double(cx->runtime()->gc.minorGCCount()));
+ return true;
+}
+
+static bool GCSliceCountGetter(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ args.rval().setNumber(double(cx->runtime()->gc.gcSliceCount()));
+ return true;
+}
+
+static bool GCCompartmentCount(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ size_t count = 0;
+ for (ZonesIter zone(cx->runtime(), WithAtoms); !zone.done(); zone.next()) {
+ count += zone->compartments().length();
+ }
+
+ args.rval().setNumber(double(count));
+ return true;
+}
+
+static bool GCLastStartReason(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ const char* reason = ExplainGCReason(cx->runtime()->gc.lastStartReason());
+ RootedString str(cx, JS_NewStringCopyZ(cx, reason));
+ if (!str) {
+ return false;
+ }
+
+ args.rval().setString(str);
+ return true;
+}
+
+static bool ZoneGCBytesGetter(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ args.rval().setNumber(double(cx->zone()->gcHeapSize.bytes()));
+ return true;
+}
+
+static bool ZoneGCTriggerBytesGetter(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ args.rval().setNumber(double(cx->zone()->gcHeapThreshold.startBytes()));
+ return true;
+}
+
+static bool ZoneGCAllocTriggerGetter(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ bool highFrequency =
+ cx->runtime()->gc.schedulingState.inHighFrequencyGCMode();
+ args.rval().setNumber(
+ double(cx->zone()->gcHeapThreshold.eagerAllocTrigger(highFrequency)));
+ return true;
+}
+
+static bool ZoneMallocBytesGetter(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ args.rval().setNumber(double(cx->zone()->mallocHeapSize.bytes()));
+ return true;
+}
+
+static bool ZoneMallocTriggerBytesGetter(JSContext* cx, unsigned argc,
+ Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ args.rval().setNumber(double(cx->zone()->mallocHeapThreshold.startBytes()));
+ return true;
+}
+
+static bool ZoneGCNumberGetter(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ args.rval().setNumber(double(cx->runtime()->gc.gcNumber()));
+ return true;
+}
+
+#ifdef DEBUG
+static bool DummyGetter(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ args.rval().setUndefined();
+ return true;
+}
+#endif
+
+} /* namespace MemInfo */
+
+JSObject* NewMemoryInfoObject(JSContext* cx) {
+ RootedObject obj(cx, JS_NewObject(cx, nullptr));
+ if (!obj) {
+ return nullptr;
+ }
+
+ using namespace MemInfo;
+ struct NamedGetter {
+ const char* name;
+ JSNative getter;
+ } getters[] = {{"gcBytes", GCBytesGetter},
+ {"gcMaxBytes", GCMaxBytesGetter},
+ {"mallocBytes", MallocBytesGetter},
+ {"gcIsHighFrequencyMode", GCHighFreqGetter},
+ {"gcNumber", GCNumberGetter},
+ {"majorGCCount", MajorGCCountGetter},
+ {"minorGCCount", MinorGCCountGetter},
+ {"sliceCount", GCSliceCountGetter},
+ {"compartmentCount", GCCompartmentCount},
+ {"lastStartReason", GCLastStartReason}};
+
+ for (auto pair : getters) {
+ JSNative getter = pair.getter;
+
+#ifdef DEBUG
+ if (js::SupportDifferentialTesting()) {
+ getter = DummyGetter;
+ }
+#endif
+
+ if (!JS_DefineProperty(cx, obj, pair.name, getter, nullptr,
+ JSPROP_ENUMERATE)) {
+ return nullptr;
+ }
+ }
+
+ RootedObject zoneObj(cx, JS_NewObject(cx, nullptr));
+ if (!zoneObj) {
+ return nullptr;
+ }
+
+ if (!JS_DefineProperty(cx, obj, "zone", zoneObj, JSPROP_ENUMERATE)) {
+ return nullptr;
+ }
+
+ struct NamedZoneGetter {
+ const char* name;
+ JSNative getter;
+ } zoneGetters[] = {{"gcBytes", ZoneGCBytesGetter},
+ {"gcTriggerBytes", ZoneGCTriggerBytesGetter},
+ {"gcAllocTrigger", ZoneGCAllocTriggerGetter},
+ {"mallocBytes", ZoneMallocBytesGetter},
+ {"mallocTriggerBytes", ZoneMallocTriggerBytesGetter},
+ {"gcNumber", ZoneGCNumberGetter}};
+
+ for (auto pair : zoneGetters) {
+ JSNative getter = pair.getter;
+
+#ifdef DEBUG
+ if (js::SupportDifferentialTesting()) {
+ getter = DummyGetter;
+ }
+#endif
+
+ if (!JS_DefineProperty(cx, zoneObj, pair.name, getter, nullptr,
+ JSPROP_ENUMERATE)) {
+ return nullptr;
+ }
+ }
+
+ return obj;
+}
+
+const char* StateName(State state) {
+ switch (state) {
+#define MAKE_CASE(name) \
+ case State::name: \
+ return #name;
+ GCSTATES(MAKE_CASE)
+#undef MAKE_CASE
+ }
+ MOZ_CRASH("Invalid gc::State enum value");
+}
+
+const char* StateName(JS::Zone::GCState state) {
+ switch (state) {
+ case JS::Zone::NoGC:
+ return "NoGC";
+ case JS::Zone::Prepare:
+ return "Prepare";
+ case JS::Zone::MarkBlackOnly:
+ return "MarkBlackOnly";
+ case JS::Zone::MarkBlackAndGray:
+ return "MarkBlackAndGray";
+ case JS::Zone::Sweep:
+ return "Sweep";
+ case JS::Zone::Finished:
+ return "Finished";
+ case JS::Zone::Compact:
+ return "Compact";
+ case JS::Zone::VerifyPreBarriers:
+ return "VerifyPreBarriers";
+ case JS::Zone::Limit:
+ break;
+ }
+ MOZ_CRASH("Invalid Zone::GCState enum value");
+}
+
+} /* namespace gc */
+} /* namespace js */
+
+JS_PUBLIC_API void js::gc::FinalizeDeadNurseryObject(JSContext* cx,
+ JSObject* obj) {
+ CHECK_THREAD(cx);
+ MOZ_ASSERT(JS::RuntimeHeapIsMinorCollecting());
+
+ MOZ_ASSERT(obj);
+ MOZ_ASSERT(IsInsideNursery(obj));
+ MOZ_ASSERT(!IsForwarded(obj));
+
+ const JSClass* jsClass = JS::GetClass(obj);
+ jsClass->doFinalize(cx->gcContext(), obj);
+}
+
+JS_PUBLIC_API void js::gc::SetPerformanceHint(JSContext* cx,
+ PerformanceHint hint) {
+ CHECK_THREAD(cx);
+ MOZ_ASSERT(!JS::RuntimeHeapIsCollecting());
+
+ cx->runtime()->gc.setPerformanceHint(hint);
+}
diff --git a/js/src/gc/GCContext-inl.h b/js/src/gc/GCContext-inl.h
new file mode 100644
index 0000000000..7b53d50b77
--- /dev/null
+++ b/js/src/gc/GCContext-inl.h
@@ -0,0 +1,40 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef gc_GCContext_inl_h
+#define gc_GCContext_inl_h
+
+#include "gc/GCContext.h"
+
+#include "gc/ZoneAllocator.h"
+
+inline void JS::GCContext::free_(Cell* cell, void* p, size_t nbytes,
+ MemoryUse use) {
+ if (p) {
+ removeCellMemory(cell, nbytes, use);
+ js_free(p);
+ }
+}
+
+template <class T>
+inline void JS::GCContext::release(Cell* cell, T* p, size_t nbytes,
+ MemoryUse use) {
+ if (p) {
+ removeCellMemory(cell, nbytes, use);
+ p->Release();
+ }
+}
+
+inline void JS::GCContext::removeCellMemory(Cell* cell, size_t nbytes,
+ MemoryUse use) {
+ // This may or may not be called as part of GC.
+ if (nbytes && cell->isTenured()) {
+ auto zone = js::ZoneAllocator::from(cell->asTenured().zoneFromAnyThread());
+ zone->removeCellMemory(cell, nbytes, use, isFinalizing());
+ }
+}
+
+#endif // gc_GCContext_inl_h
diff --git a/js/src/gc/GCContext.h b/js/src/gc/GCContext.h
new file mode 100644
index 0000000000..8c74a81e0c
--- /dev/null
+++ b/js/src/gc/GCContext.h
@@ -0,0 +1,257 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef gc_GCContext_h
+#define gc_GCContext_h
+
+#include "mozilla/Assertions.h" // MOZ_ASSERT
+#include "mozilla/ThreadLocal.h"
+
+#include "jspubtd.h"
+#include "jstypes.h" // JS_PUBLIC_API
+#include "gc/GCEnum.h" // js::MemoryUse
+#include "jit/ExecutableAllocator.h" // jit::JitPoisonRangeVector
+#include "js/Utility.h" // js_free
+
+struct JS_PUBLIC_API JSRuntime;
+
+namespace js {
+
+class AutoTouchingGrayThings;
+
+namespace gc {
+
+class AutoSetThreadGCUse;
+class AutoSetThreadIsSweeping;
+
+enum class GCUse {
+ // This thread is not running in the garbage collector.
+ None,
+
+ // This thread is currently collecting. Used when no finer detail is known.
+ Unspecified,
+
+ // This thread is currently marking GC things. This thread could be the main
+ // thread or a helper thread doing sweep-marking.
+ Marking,
+
+ // This thread is currently sweeping GC things. This thread could be the
+ // main thread or a helper thread while the main thread is running the
+ // mutator.
+ Sweeping,
+
+ // Whether this thread is currently finalizing GC things. This thread could
+ // be the main thread or a helper thread doing finalization while the main
+ // thread is running the mutator.
+ Finalizing
+};
+
+} // namespace gc
+} // namespace js
+
+namespace JS {
+
+/*
+ * GCContext is by GC operations that can run on or off the main thread.
+ *
+ * Its main function is to provide methods to free memory and update memory
+ * accounting. For convenience, it also has delete_ convenience methods that
+ * also call destructors.
+ *
+ * It is passed to finalizers and other sweep-phase hooks as JSContext is not
+ * available off the main thread.
+ */
+class GCContext {
+ using Cell = js::gc::Cell;
+ using MemoryUse = js::MemoryUse;
+
+ JSRuntime* const runtime_;
+
+ js::jit::JitPoisonRangeVector jitPoisonRanges;
+
+ // Which part of the garbage collector this context is running at the moment.
+ js::gc::GCUse gcUse_ = js::gc::GCUse::None;
+ friend class js::gc::AutoSetThreadGCUse;
+ friend class js::gc::AutoSetThreadIsSweeping;
+
+#ifdef DEBUG
+ // The specific zone currently being swept, if any.
+ Zone* gcSweepZone_ = nullptr;
+
+ // Whether this thread is currently manipulating possibly-gray GC things.
+ size_t isTouchingGrayThings_ = false;
+ friend class js::AutoTouchingGrayThings;
+#endif
+
+ public:
+ explicit GCContext(JSRuntime* maybeRuntime);
+ ~GCContext();
+
+ JSRuntime* runtime() const {
+ MOZ_ASSERT(onMainThread());
+ return runtimeFromAnyThread();
+ }
+ JSRuntime* runtimeFromAnyThread() const {
+ MOZ_ASSERT(runtime_);
+ return runtime_;
+ }
+
+ js::gc::GCUse gcUse() const { return gcUse_; }
+ bool isCollecting() const { return gcUse() != js::gc::GCUse::None; }
+ bool isFinalizing() const { return gcUse_ == js::gc::GCUse::Finalizing; }
+
+#ifdef DEBUG
+ bool onMainThread() const {
+ return js::CurrentThreadCanAccessRuntime(runtime_);
+ }
+
+ Zone* gcSweepZone() const { return gcSweepZone_; }
+ bool isTouchingGrayThings() const { return isTouchingGrayThings_; }
+#endif
+
+ // Deprecated. Where possible, memory should be tracked against the owning GC
+ // thing by calling js::AddCellMemory and the memory freed with free_() below.
+ void freeUntracked(void* p) { js_free(p); }
+
+ // Free memory associated with a GC thing and update the memory accounting.
+ //
+ // The memory should have been associated with the GC thing using
+ // js::InitReservedSlot or js::InitObjectPrivate, or possibly
+ // js::AddCellMemory.
+ void free_(Cell* cell, void* p, size_t nbytes, MemoryUse use);
+
+ bool appendJitPoisonRange(const js::jit::JitPoisonRange& range) {
+ return jitPoisonRanges.append(range);
+ }
+ bool hasJitCodeToPoison() const { return !jitPoisonRanges.empty(); }
+ void poisonJitCode();
+
+ // Deprecated. Where possible, memory should be tracked against the owning GC
+ // thing by calling js::AddCellMemory and the memory freed with delete_()
+ // below.
+ template <class T>
+ void deleteUntracked(T* p) {
+ if (p) {
+ p->~T();
+ js_free(p);
+ }
+ }
+
+ // Delete a C++ object that was associated with a GC thing and update the
+ // memory accounting. The size is determined by the type T.
+ //
+ // The memory should have been associated with the GC thing using
+ // js::InitReservedSlot or js::InitObjectPrivate, or possibly
+ // js::AddCellMemory.
+ template <class T>
+ void delete_(Cell* cell, T* p, MemoryUse use) {
+ delete_(cell, p, sizeof(T), use);
+ }
+
+ // Delete a C++ object that was associated with a GC thing and update the
+ // memory accounting.
+ //
+ // The memory should have been associated with the GC thing using
+ // js::InitReservedSlot or js::InitObjectPrivate, or possibly
+ // js::AddCellMemory.
+ template <class T>
+ void delete_(Cell* cell, T* p, size_t nbytes, MemoryUse use) {
+ if (p) {
+ p->~T();
+ free_(cell, p, nbytes, use);
+ }
+ }
+
+ // Release a RefCounted object that was associated with a GC thing and update
+ // the memory accounting.
+ //
+ // The memory should have been associated with the GC thing using
+ // js::InitReservedSlot or js::InitObjectPrivate, or possibly
+ // js::AddCellMemory.
+ //
+ // This counts the memory once per association with a GC thing. It's not
+ // expected that the same object is associated with more than one GC thing in
+ // each zone. If this is the case then some other form of accounting would be
+ // more appropriate.
+ template <class T>
+ void release(Cell* cell, T* p, MemoryUse use) {
+ release(cell, p, sizeof(T), use);
+ }
+
+ // Release a RefCounted object and that was associated with a GC thing and
+ // update the memory accounting.
+ //
+ // The memory should have been associated with the GC thing using
+ // js::InitReservedSlot or js::InitObjectPrivate, or possibly
+ // js::AddCellMemory.
+ template <class T>
+ void release(Cell* cell, T* p, size_t nbytes, MemoryUse use);
+
+ // Update the memory accounting for a GC for memory freed by some other
+ // method.
+ void removeCellMemory(Cell* cell, size_t nbytes, MemoryUse use);
+};
+
+} // namespace JS
+
+namespace js {
+
+/* Thread Local Storage for storing the GCContext for a thread. */
+extern MOZ_THREAD_LOCAL(JS::GCContext*) TlsGCContext;
+
+inline JS::GCContext* MaybeGetGCContext() {
+ if (!TlsGCContext.init()) {
+ return nullptr;
+ }
+ return TlsGCContext.get();
+}
+
+class MOZ_RAII AutoTouchingGrayThings {
+ public:
+#ifdef DEBUG
+ AutoTouchingGrayThings() { TlsGCContext.get()->isTouchingGrayThings_++; }
+ ~AutoTouchingGrayThings() {
+ JS::GCContext* gcx = TlsGCContext.get();
+ MOZ_ASSERT(gcx->isTouchingGrayThings_);
+ gcx->isTouchingGrayThings_--;
+ }
+#else
+ AutoTouchingGrayThings() {}
+#endif
+};
+
+#ifdef DEBUG
+
+inline bool CurrentThreadIsGCMarking() {
+ JS::GCContext* gcx = MaybeGetGCContext();
+ return gcx && gcx->gcUse() == gc::GCUse::Marking;
+}
+
+inline bool CurrentThreadIsGCSweeping() {
+ JS::GCContext* gcx = MaybeGetGCContext();
+ return gcx && gcx->gcUse() == gc::GCUse::Sweeping;
+}
+
+inline bool CurrentThreadIsGCFinalizing() {
+ JS::GCContext* gcx = MaybeGetGCContext();
+ return gcx && gcx->gcUse() == gc::GCUse::Finalizing;
+}
+
+inline bool CurrentThreadIsTouchingGrayThings() {
+ JS::GCContext* gcx = MaybeGetGCContext();
+ return gcx && gcx->isTouchingGrayThings();
+}
+
+inline bool CurrentThreadIsPerformingGC() {
+ JS::GCContext* gcx = MaybeGetGCContext();
+ return gcx && gcx->isCollecting();
+}
+
+#endif
+
+} // namespace js
+
+#endif // gc_GCContext_h
diff --git a/js/src/gc/GCEnum.h b/js/src/gc/GCEnum.h
new file mode 100644
index 0000000000..7434e34b1c
--- /dev/null
+++ b/js/src/gc/GCEnum.h
@@ -0,0 +1,160 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * GC-internal enum definitions.
+ */
+
+#ifndef gc_GCEnum_h
+#define gc_GCEnum_h
+
+#include <stdint.h>
+
+#include "js/MemoryFunctions.h" // JS_FOR_EACH_PUBLIC_MEMORY_USE
+
+namespace js {
+namespace gc {
+
+// The phases of an incremental GC.
+#define GCSTATES(D) \
+ D(NotActive) \
+ D(Prepare) \
+ D(MarkRoots) \
+ D(Mark) \
+ D(Sweep) \
+ D(Finalize) \
+ D(Compact) \
+ D(Decommit) \
+ D(Finish)
+enum class State {
+#define MAKE_STATE(name) name,
+ GCSTATES(MAKE_STATE)
+#undef MAKE_STATE
+};
+
+#define JS_FOR_EACH_ZEAL_MODE(D) \
+ D(RootsChange, 1) \
+ D(Alloc, 2) \
+ D(VerifierPre, 4) \
+ D(YieldBeforeRootMarking, 6) \
+ D(GenerationalGC, 7) \
+ D(YieldBeforeMarking, 8) \
+ D(YieldBeforeSweeping, 9) \
+ D(IncrementalMultipleSlices, 10) \
+ D(IncrementalMarkingValidator, 11) \
+ D(ElementsBarrier, 12) \
+ D(CheckHashTablesOnMinorGC, 13) \
+ D(Compact, 14) \
+ D(CheckHeapAfterGC, 15) \
+ D(YieldBeforeSweepingAtoms, 17) \
+ D(CheckGrayMarking, 18) \
+ D(YieldBeforeSweepingCaches, 19) \
+ D(YieldBeforeSweepingObjects, 21) \
+ D(YieldBeforeSweepingNonObjects, 22) \
+ D(YieldBeforeSweepingPropMapTrees, 23) \
+ D(CheckWeakMapMarking, 24) \
+ D(YieldWhileGrayMarking, 25)
+
+enum class ZealMode {
+#define ZEAL_MODE(name, value) name = value,
+ JS_FOR_EACH_ZEAL_MODE(ZEAL_MODE)
+#undef ZEAL_MODE
+ Count,
+ Limit = Count - 1
+};
+
+} /* namespace gc */
+
+// Reasons we reset an ongoing incremental GC or perform a non-incremental GC.
+#define GC_ABORT_REASONS(D) \
+ D(None, 0) \
+ D(NonIncrementalRequested, 1) \
+ D(AbortRequested, 2) \
+ D(Unused1, 3) \
+ D(IncrementalDisabled, 4) \
+ D(ModeChange, 5) \
+ D(MallocBytesTrigger, 6) \
+ D(GCBytesTrigger, 7) \
+ D(ZoneChange, 8) \
+ D(CompartmentRevived, 9) \
+ D(GrayRootBufferingFailed, 10) \
+ D(JitCodeBytesTrigger, 11)
+enum class GCAbortReason {
+#define MAKE_REASON(name, num) name = num,
+ GC_ABORT_REASONS(MAKE_REASON)
+#undef MAKE_REASON
+};
+
+#define JS_FOR_EACH_INTERNAL_MEMORY_USE(_) \
+ _(ArrayBufferContents) \
+ _(StringContents) \
+ _(ObjectElements) \
+ _(ObjectSlots) \
+ _(ScriptPrivateData) \
+ _(MapObjectTable) \
+ _(BigIntDigits) \
+ _(ScopeData) \
+ _(WeakMapObject) \
+ _(ShapeSetForAdd) \
+ _(PropMapChildren) \
+ _(PropMapTable) \
+ _(ModuleBindingMap) \
+ _(ModuleCyclicFields) \
+ _(ModuleExports) \
+ _(BaselineScript) \
+ _(IonScript) \
+ _(ArgumentsData) \
+ _(RareArgumentsData) \
+ _(RegExpSharedBytecode) \
+ _(RegExpSharedNamedCaptureData) \
+ _(TypedArrayElements) \
+ _(NativeIterator) \
+ _(JitScript) \
+ _(ScriptDebugScript) \
+ _(BreakpointSite) \
+ _(Breakpoint) \
+ _(ForOfPIC) \
+ _(ForOfPICStub) \
+ _(WasmInstanceExports) \
+ _(WasmInstanceScopes) \
+ _(WasmInstanceGlobals) \
+ _(WasmInstanceInstance) \
+ _(WasmMemoryObservers) \
+ _(WasmGlobalCell) \
+ _(WasmResolveResponseClosure) \
+ _(WasmModule) \
+ _(WasmTableTable) \
+ _(WasmExceptionData) \
+ _(WasmTagType) \
+ _(FileObjectFile) \
+ _(Debugger) \
+ _(DebuggerFrameGeneratorInfo) \
+ _(DebuggerFrameIterData) \
+ _(DebuggerOnStepHandler) \
+ _(DebuggerOnPopHandler) \
+ _(ICUObject) \
+ _(FinalizationRegistryRecordVector) \
+ _(FinalizationRegistryRegistrations) \
+ _(FinalizationRecordVector) \
+ _(TrackedAllocPolicy) \
+ _(SharedArrayRawBuffer) \
+ _(XDRBufferElements) \
+ _(GlobalObjectData) \
+ _(ProxyExternalValueArray)
+
+#define JS_FOR_EACH_MEMORY_USE(_) \
+ JS_FOR_EACH_PUBLIC_MEMORY_USE(_) \
+ JS_FOR_EACH_INTERNAL_MEMORY_USE(_)
+
+enum class MemoryUse : uint8_t {
+#define DEFINE_MEMORY_USE(Name) Name,
+ JS_FOR_EACH_MEMORY_USE(DEFINE_MEMORY_USE)
+#undef DEFINE_MEMORY_USE
+};
+
+} /* namespace js */
+
+#endif /* gc_GCEnum_h */
diff --git a/js/src/gc/GCInternals.h b/js/src/gc/GCInternals.h
new file mode 100644
index 0000000000..c234ad4b2b
--- /dev/null
+++ b/js/src/gc/GCInternals.h
@@ -0,0 +1,344 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * GC-internal definitions.
+ */
+
+#ifndef gc_GCInternals_h
+#define gc_GCInternals_h
+
+#include "mozilla/Maybe.h"
+#include "mozilla/TimeStamp.h"
+
+#include "gc/Cell.h"
+#include "gc/GC.h"
+#include "gc/GCContext.h"
+#include "vm/GeckoProfiler.h"
+#include "vm/HelperThreads.h"
+#include "vm/JSContext.h"
+
+namespace js {
+namespace gc {
+
+/*
+ * There are a couple of classes here that serve mostly as "tokens" indicating
+ * that a precondition holds. Some functions force the caller to possess such a
+ * token because they require the precondition to hold, and it is better to make
+ * the precondition explicit at the API entry point than to crash in an
+ * assertion later on when it is relied upon.
+ */
+
+struct MOZ_RAII AutoAssertNoNurseryAlloc {
+#ifdef DEBUG
+ AutoAssertNoNurseryAlloc();
+ ~AutoAssertNoNurseryAlloc();
+#else
+ AutoAssertNoNurseryAlloc() {}
+#endif
+};
+
+/*
+ * A class that serves as a token that the nursery in the current thread's zone
+ * group is empty.
+ */
+class MOZ_RAII AutoAssertEmptyNursery {
+ protected:
+ JSContext* cx;
+
+ mozilla::Maybe<AutoAssertNoNurseryAlloc> noAlloc;
+
+ // Check that the nursery is empty.
+ void checkCondition(JSContext* cx);
+
+ // For subclasses that need to empty the nursery in their constructors.
+ AutoAssertEmptyNursery() : cx(nullptr) {}
+
+ public:
+ explicit AutoAssertEmptyNursery(JSContext* cx) : cx(nullptr) {
+ checkCondition(cx);
+ }
+
+ AutoAssertEmptyNursery(const AutoAssertEmptyNursery& other)
+ : AutoAssertEmptyNursery(other.cx) {}
+};
+
+/*
+ * Evict the nursery upon construction. Serves as a token indicating that the
+ * nursery is empty. (See AutoAssertEmptyNursery, above.)
+ */
+class MOZ_RAII AutoEmptyNursery : public AutoAssertEmptyNursery {
+ public:
+ explicit AutoEmptyNursery(JSContext* cx);
+};
+
+// Abstract base class for exclusive heap access for tracing or GC.
+class MOZ_RAII AutoHeapSession {
+ public:
+ ~AutoHeapSession();
+
+ protected:
+ AutoHeapSession(GCRuntime* gc, JS::HeapState state);
+
+ private:
+ AutoHeapSession(const AutoHeapSession&) = delete;
+ void operator=(const AutoHeapSession&) = delete;
+
+ GCRuntime* gc;
+ JS::HeapState prevState;
+ mozilla::Maybe<AutoGeckoProfilerEntry> profilingStackFrame;
+};
+
+class MOZ_RAII AutoGCSession : public AutoHeapSession {
+ public:
+ explicit AutoGCSession(GCRuntime* gc, JS::HeapState state)
+ : AutoHeapSession(gc, state) {}
+};
+
+class MOZ_RAII AutoMajorGCProfilerEntry : public AutoGeckoProfilerEntry {
+ public:
+ explicit AutoMajorGCProfilerEntry(GCRuntime* gc);
+};
+
+class MOZ_RAII AutoTraceSession : public AutoHeapSession {
+ public:
+ explicit AutoTraceSession(JSRuntime* rt)
+ : AutoHeapSession(&rt->gc, JS::HeapState::Tracing) {}
+};
+
+struct MOZ_RAII AutoFinishGC {
+ explicit AutoFinishGC(JSContext* cx, JS::GCReason reason) {
+ FinishGC(cx, reason);
+ }
+};
+
+// This class should be used by any code that needs exclusive access to the heap
+// in order to trace through it.
+class MOZ_RAII AutoPrepareForTracing : private AutoFinishGC,
+ public AutoTraceSession {
+ public:
+ explicit AutoPrepareForTracing(JSContext* cx)
+ : AutoFinishGC(cx, JS::GCReason::PREPARE_FOR_TRACING),
+ AutoTraceSession(cx->runtime()) {}
+};
+
+// This class should be used by any code that needs exclusive access to the heap
+// in order to trace through it.
+//
+// This version also empties the nursery after finishing any ongoing GC.
+class MOZ_RAII AutoEmptyNurseryAndPrepareForTracing : private AutoFinishGC,
+ public AutoEmptyNursery,
+ public AutoTraceSession {
+ public:
+ explicit AutoEmptyNurseryAndPrepareForTracing(JSContext* cx)
+ : AutoFinishGC(cx, JS::GCReason::PREPARE_FOR_TRACING),
+ AutoEmptyNursery(cx),
+ AutoTraceSession(cx->runtime()) {}
+};
+
+/*
+ * Temporarily disable incremental barriers.
+ */
+class AutoDisableBarriers {
+ public:
+ explicit AutoDisableBarriers(GCRuntime* gc);
+ ~AutoDisableBarriers();
+
+ private:
+ GCRuntime* gc;
+};
+
+// Set compartments' maybeAlive flags if anything is marked while this class is
+// live. This is used while marking roots.
+class AutoUpdateLiveCompartments {
+ GCRuntime* gc;
+
+ public:
+ explicit AutoUpdateLiveCompartments(GCRuntime* gc);
+ ~AutoUpdateLiveCompartments();
+};
+
+class MOZ_RAII AutoRunParallelTask : public GCParallelTask {
+ // This class takes a pointer to a member function of GCRuntime.
+ using TaskFunc = JS_MEMBER_FN_PTR_TYPE(GCRuntime, void);
+
+ TaskFunc func_;
+ AutoLockHelperThreadState& lock_;
+
+ public:
+ AutoRunParallelTask(GCRuntime* gc, TaskFunc func, gcstats::PhaseKind phase,
+ GCUse use, AutoLockHelperThreadState& lock)
+ : GCParallelTask(gc, phase, use), func_(func), lock_(lock) {
+ gc->startTask(*this, lock_);
+ }
+
+ ~AutoRunParallelTask() { gc->joinTask(*this, lock_); }
+
+ void run(AutoLockHelperThreadState& lock) override {
+ AutoUnlockHelperThreadState unlock(lock);
+
+ // The hazard analysis can't tell what the call to func_ will do but it's
+ // not allowed to GC.
+ JS::AutoSuppressGCAnalysis nogc;
+
+ // Call pointer to member function on |gc|.
+ JS_CALL_MEMBER_FN_PTR(gc, func_);
+ }
+};
+
+GCAbortReason IsIncrementalGCUnsafe(JSRuntime* rt);
+
+#ifdef JS_GC_ZEAL
+
+class MOZ_RAII AutoStopVerifyingBarriers {
+ GCRuntime* gc;
+ bool restartPreVerifier;
+
+ public:
+ AutoStopVerifyingBarriers(JSRuntime* rt, bool isShutdown) : gc(&rt->gc) {
+ if (gc->isVerifyPreBarriersEnabled()) {
+ gc->endVerifyPreBarriers();
+ restartPreVerifier = !isShutdown;
+ } else {
+ restartPreVerifier = false;
+ }
+ }
+
+ ~AutoStopVerifyingBarriers() {
+ // Nasty special case: verification runs a minor GC, which *may* nest
+ // inside of an outer minor GC. This is not allowed by the
+ // gc::Statistics phase tree. So we pause the "real" GC, if in fact one
+ // is in progress.
+ gcstats::PhaseKind outer = gc->stats().currentPhaseKind();
+ if (outer != gcstats::PhaseKind::NONE) {
+ gc->stats().endPhase(outer);
+ }
+ MOZ_ASSERT(gc->stats().currentPhaseKind() == gcstats::PhaseKind::NONE);
+
+ if (restartPreVerifier) {
+ gc->startVerifyPreBarriers();
+ }
+
+ if (outer != gcstats::PhaseKind::NONE) {
+ gc->stats().beginPhase(outer);
+ }
+ }
+};
+#else
+struct MOZ_RAII AutoStopVerifyingBarriers {
+ AutoStopVerifyingBarriers(JSRuntime*, bool) {}
+};
+#endif /* JS_GC_ZEAL */
+
+class MOZ_RAII AutoPoisonFreedJitCode {
+ JS::GCContext* const gcx;
+
+ public:
+ explicit AutoPoisonFreedJitCode(JS::GCContext* gcx) : gcx(gcx) {}
+ ~AutoPoisonFreedJitCode() { gcx->poisonJitCode(); }
+};
+
+// Set/restore the GCContext GC use flag for the current thread.
+
+class MOZ_RAII AutoSetThreadGCUse {
+ public:
+ AutoSetThreadGCUse(JS::GCContext* gcx, GCUse use)
+ : gcx(gcx), prevUse(gcx->gcUse_) {
+ gcx->gcUse_ = use;
+ }
+ explicit AutoSetThreadGCUse(GCUse use)
+ : AutoSetThreadGCUse(TlsGCContext.get(), use) {}
+
+ ~AutoSetThreadGCUse() { gcx->gcUse_ = prevUse; }
+
+ protected:
+ JS::GCContext* gcx;
+ GCUse prevUse;
+};
+
+template <GCUse Use>
+class AutoSetThreadGCUseT : public AutoSetThreadGCUse {
+ public:
+ explicit AutoSetThreadGCUseT(JS::GCContext* gcx)
+ : AutoSetThreadGCUse(gcx, Use) {}
+ AutoSetThreadGCUseT() : AutoSetThreadGCUseT(TlsGCContext.get()) {}
+};
+
+using AutoSetThreadIsPerformingGC = AutoSetThreadGCUseT<GCUse::Unspecified>;
+using AutoSetThreadIsMarking = AutoSetThreadGCUseT<GCUse::Marking>;
+using AutoSetThreadIsFinalizing = AutoSetThreadGCUseT<GCUse::Finalizing>;
+
+class AutoSetThreadIsSweeping : public AutoSetThreadGCUseT<GCUse::Sweeping> {
+ public:
+ explicit AutoSetThreadIsSweeping(JS::GCContext* gcx,
+ JS::Zone* sweepZone = nullptr)
+ : AutoSetThreadGCUseT(gcx) {
+#ifdef DEBUG
+ prevZone = gcx->gcSweepZone_;
+ gcx->gcSweepZone_ = sweepZone;
+#endif
+ }
+ explicit AutoSetThreadIsSweeping(JS::Zone* sweepZone = nullptr)
+ : AutoSetThreadIsSweeping(TlsGCContext.get(), sweepZone) {}
+
+ ~AutoSetThreadIsSweeping() {
+#ifdef DEBUG
+ MOZ_ASSERT_IF(prevUse == GCUse::None, !prevZone);
+ gcx->gcSweepZone_ = prevZone;
+#endif
+ }
+
+ private:
+#ifdef DEBUG
+ JS::Zone* prevZone;
+#endif
+};
+
+#ifdef JSGC_HASH_TABLE_CHECKS
+void CheckHashTablesAfterMovingGC(JSRuntime* rt);
+void CheckHeapAfterGC(JSRuntime* rt);
+#endif
+
+struct MovingTracer final : public GenericTracerImpl<MovingTracer> {
+ explicit MovingTracer(JSRuntime* rt);
+
+ private:
+ template <typename T>
+ void onEdge(T** thingp, const char* name);
+ friend class GenericTracerImpl<MovingTracer>;
+};
+
+struct MinorSweepingTracer final
+ : public GenericTracerImpl<MinorSweepingTracer> {
+ explicit MinorSweepingTracer(JSRuntime* rt);
+
+ private:
+ template <typename T>
+ void onEdge(T** thingp, const char* name);
+ friend class GenericTracerImpl<MinorSweepingTracer>;
+};
+
+extern void DelayCrossCompartmentGrayMarking(GCMarker* maybeMarker,
+ JSObject* src);
+
+inline bool IsOOMReason(JS::GCReason reason) {
+ return reason == JS::GCReason::LAST_DITCH ||
+ reason == JS::GCReason::MEM_PRESSURE;
+}
+
+void* AllocateCellInGC(JS::Zone* zone, AllocKind thingKind);
+
+void ReadProfileEnv(const char* envName, const char* helpText, bool* enableOut,
+ bool* workersOut, mozilla::TimeDuration* thresholdOut);
+
+bool ShouldPrintProfile(JSRuntime* runtime, bool enable, bool workers,
+ mozilla::TimeDuration threshold,
+ mozilla::TimeDuration duration);
+
+} /* namespace gc */
+} /* namespace js */
+
+#endif /* gc_GCInternals_h */
diff --git a/js/src/gc/GCLock.h b/js/src/gc/GCLock.h
new file mode 100644
index 0000000000..64c28ac544
--- /dev/null
+++ b/js/src/gc/GCLock.h
@@ -0,0 +1,110 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * GC-internal classes for acquiring and releasing the GC lock.
+ */
+
+#ifndef gc_GCLock_h
+#define gc_GCLock_h
+
+#include "vm/Runtime.h"
+
+namespace js {
+
+class AutoUnlockGC;
+
+/*
+ * RAII class that takes the GC lock while it is live.
+ *
+ * Usually functions will pass const references of this class. However
+ * non-const references can be used to either temporarily release the lock by
+ * use of AutoUnlockGC or to start background allocation when the lock is
+ * released.
+ */
+class MOZ_RAII AutoLockGC {
+ public:
+ explicit AutoLockGC(gc::GCRuntime* gc) : gc(gc) { lock(); }
+ explicit AutoLockGC(JSRuntime* rt) : AutoLockGC(&rt->gc) {}
+
+ ~AutoLockGC() { lockGuard_.reset(); }
+
+ js::LockGuard<js::Mutex>& guard() { return lockGuard_.ref(); }
+
+ protected:
+ void lock() {
+ MOZ_ASSERT(lockGuard_.isNothing());
+ lockGuard_.emplace(gc->lock);
+ }
+
+ void unlock() {
+ MOZ_ASSERT(lockGuard_.isSome());
+ lockGuard_.reset();
+ }
+
+ gc::GCRuntime* const gc;
+
+ private:
+ mozilla::Maybe<js::LockGuard<js::Mutex>> lockGuard_;
+
+ AutoLockGC(const AutoLockGC&) = delete;
+ AutoLockGC& operator=(const AutoLockGC&) = delete;
+
+ friend class AutoUnlockGC; // For lock/unlock.
+};
+
+/*
+ * Same as AutoLockGC except it can optionally start a background chunk
+ * allocation task when the lock is released.
+ */
+class MOZ_RAII AutoLockGCBgAlloc : public AutoLockGC {
+ public:
+ explicit AutoLockGCBgAlloc(gc::GCRuntime* gc) : AutoLockGC(gc) {}
+ explicit AutoLockGCBgAlloc(JSRuntime* rt) : AutoLockGCBgAlloc(&rt->gc) {}
+
+ ~AutoLockGCBgAlloc() {
+ unlock();
+
+ /*
+ * We have to do this after releasing the lock because it may acquire
+ * the helper lock which could cause lock inversion if we still held
+ * the GC lock.
+ */
+ if (startBgAlloc) {
+ gc->startBackgroundAllocTaskIfIdle();
+ }
+ }
+
+ /*
+ * This can be used to start a background allocation task (if one isn't
+ * already running) that allocates chunks and makes them available in the
+ * free chunks list. This happens after the lock is released in order to
+ * avoid lock inversion.
+ */
+ void tryToStartBackgroundAllocation() { startBgAlloc = true; }
+
+ private:
+ // true if we should start a background chunk allocation task after the
+ // lock is released.
+ bool startBgAlloc = false;
+};
+
+class MOZ_RAII AutoUnlockGC {
+ public:
+ explicit AutoUnlockGC(AutoLockGC& lock) : lock(lock) { lock.unlock(); }
+
+ ~AutoUnlockGC() { lock.lock(); }
+
+ private:
+ AutoLockGC& lock;
+
+ AutoUnlockGC(const AutoUnlockGC&) = delete;
+ AutoUnlockGC& operator=(const AutoUnlockGC&) = delete;
+};
+
+} // namespace js
+
+#endif /* gc_GCLock_h */
diff --git a/js/src/gc/GCMarker.h b/js/src/gc/GCMarker.h
new file mode 100644
index 0000000000..053ba90e18
--- /dev/null
+++ b/js/src/gc/GCMarker.h
@@ -0,0 +1,598 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef gc_GCMarker_h
+#define gc_GCMarker_h
+
+#include "mozilla/Maybe.h"
+#include "mozilla/Variant.h"
+
+#include "ds/OrderedHashTable.h"
+#include "gc/Barrier.h"
+#include "js/TracingAPI.h"
+#include "js/TypeDecls.h"
+#include "threading/ProtectedData.h"
+
+class JSRope;
+
+namespace js {
+
+class GCMarker;
+class SliceBudget;
+class WeakMapBase;
+
+static const size_t MARK_STACK_BASE_CAPACITY = 4096;
+
+enum class SlotsOrElementsKind {
+ Unused = 0, // Must match SlotsOrElementsRangeTag
+ Elements,
+ FixedSlots,
+ DynamicSlots
+};
+
+namespace gc {
+
+enum IncrementalProgress { NotFinished = 0, Finished };
+
+class AutoSetMarkColor;
+struct Cell;
+class ParallelMarker;
+class UnmarkGrayTracer;
+
+struct EphemeronEdgeTableHashPolicy {
+ using Lookup = Cell*;
+ static HashNumber hash(const Lookup& v,
+ const mozilla::HashCodeScrambler& hcs) {
+ return hcs.scramble(mozilla::HashGeneric(v));
+ }
+ static bool match(Cell* const& k, const Lookup& l) { return k == l; }
+ static bool isEmpty(Cell* const& v) { return !v; }
+ static void makeEmpty(Cell** vp) { *vp = nullptr; }
+};
+
+// Ephemeron edges have two source nodes and one target, and mark the target
+// with the minimum (least-marked) color of the sources. Currently, one of
+// those sources will always be a WeakMapBase, so this will refer to its color
+// at the time the edge is traced through. The other source's color will be
+// given by the current mark color of the GCMarker.
+struct EphemeronEdge {
+ CellColor color;
+ Cell* target;
+
+ EphemeronEdge(CellColor color_, Cell* cell) : color(color_), target(cell) {}
+};
+
+using EphemeronEdgeVector = Vector<EphemeronEdge, 2, js::SystemAllocPolicy>;
+
+using EphemeronEdgeTable =
+ OrderedHashMap<Cell*, EphemeronEdgeVector, EphemeronEdgeTableHashPolicy,
+ js::SystemAllocPolicy>;
+
+/*
+ * The mark stack. Pointers in this stack are "gray" in the GC sense, but
+ * their references may be marked either black or gray (in the CC sense).
+ *
+ * When the mark stack is full, the GC does not call js::TraceChildren to mark
+ * the reachable "children" of the thing. Rather the thing is put aside and
+ * js::TraceChildren is called later when the mark stack is empty.
+ *
+ * To implement such delayed marking of the children with minimal overhead for
+ * the normal case of sufficient stack, we link arenas into a list using
+ * Arena::setNextDelayedMarkingArena(). The head of the list is stored in
+ * GCMarker::delayedMarkingList. GCMarker::delayMarkingChildren() adds arenas
+ * to the list as necessary while markAllDelayedChildren() pops the arenas from
+ * the stack until it is empty.
+ */
+class MarkStack {
+ public:
+ /*
+ * We use a common mark stack to mark GC things of different types and use
+ * the explicit tags to distinguish them when it cannot be deduced from
+ * the context of push or pop operation.
+ */
+ enum Tag {
+ SlotsOrElementsRangeTag = 0, // Must match SlotsOrElementsKind::Unused.
+ ObjectTag,
+ JitCodeTag,
+ ScriptTag,
+ TempRopeTag,
+
+ LastTag = TempRopeTag
+ };
+
+ static const uintptr_t TagMask = 7;
+ static_assert(TagMask >= uintptr_t(LastTag),
+ "The tag mask must subsume the tags.");
+ static_assert(TagMask <= gc::CellAlignMask,
+ "The tag mask must be embeddable in a Cell*.");
+
+ class TaggedPtr {
+ uintptr_t bits;
+
+ Cell* ptr() const;
+
+ public:
+ TaggedPtr() = default;
+ TaggedPtr(Tag tag, Cell* ptr);
+ Tag tag() const;
+ uintptr_t tagUnchecked() const;
+ template <typename T>
+ T* as() const;
+
+ JSObject* asRangeObject() const;
+ JSRope* asTempRope() const;
+
+ void assertValid() const;
+ };
+
+ struct SlotsOrElementsRange {
+ SlotsOrElementsRange(SlotsOrElementsKind kind, JSObject* obj, size_t start);
+ void assertValid() const;
+
+ SlotsOrElementsKind kind() const;
+ size_t start() const;
+ TaggedPtr ptr() const;
+
+ static constexpr size_t StartShift = 2;
+ static constexpr size_t KindMask = (1 << StartShift) - 1;
+
+ private:
+ uintptr_t startAndKind_;
+ TaggedPtr ptr_;
+ };
+
+ MarkStack();
+ ~MarkStack();
+
+ explicit MarkStack(const MarkStack& other);
+ MarkStack& operator=(const MarkStack& other);
+
+ MarkStack(MarkStack&& other);
+ MarkStack& operator=(MarkStack&& other);
+
+ // The unit for MarkStack::capacity() is mark stack words.
+ size_t capacity() { return stack().length(); }
+
+ size_t position() const { return topIndex_; }
+
+ [[nodiscard]] bool init();
+ [[nodiscard]] bool resetStackCapacity();
+
+#ifdef JS_GC_ZEAL
+ void setMaxCapacity(size_t maxCapacity);
+#endif
+
+ template <typename T>
+ [[nodiscard]] bool push(T* ptr);
+
+ [[nodiscard]] bool push(JSObject* obj, SlotsOrElementsKind kind,
+ size_t start);
+ [[nodiscard]] bool push(const TaggedPtr& ptr);
+ [[nodiscard]] bool push(const SlotsOrElementsRange& array);
+ void infalliblePush(const TaggedPtr& ptr);
+ void infalliblePush(const SlotsOrElementsRange& array);
+
+ // GCMarker::eagerlyMarkChildren uses unused marking stack as temporary
+ // storage to hold rope pointers.
+ [[nodiscard]] bool pushTempRope(JSRope* ptr);
+
+ bool isEmpty() const { return position() == 0; }
+ bool hasEntries() const { return !isEmpty(); }
+
+ Tag peekTag() const;
+ TaggedPtr popPtr();
+ SlotsOrElementsRange popSlotsOrElementsRange();
+
+ void clearAndResetCapacity();
+ void clearAndFreeStack();
+
+ void poisonUnused();
+
+ [[nodiscard]] bool ensureSpace(size_t count);
+
+ static void moveWork(MarkStack& dst, MarkStack& src);
+
+ size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const;
+
+ private:
+ using StackVector = Vector<TaggedPtr, 0, SystemAllocPolicy>;
+ const StackVector& stack() const { return stack_.ref(); }
+ StackVector& stack() { return stack_.ref(); }
+
+ /* Grow the stack, ensuring there is space for at least count elements. */
+ [[nodiscard]] bool enlarge(size_t count);
+
+ [[nodiscard]] bool resize(size_t newCapacity);
+
+ TaggedPtr* topPtr();
+
+ const TaggedPtr& peekPtr() const;
+ [[nodiscard]] bool pushTaggedPtr(Tag tag, Cell* ptr);
+
+ bool indexIsEntryBase(size_t index) const;
+
+ // Vector containing allocated stack memory. Unused beyond topIndex_.
+ MainThreadOrGCTaskData<StackVector> stack_;
+
+ // Index of the top of the stack.
+ MainThreadOrGCTaskData<size_t> topIndex_;
+
+#ifdef JS_GC_ZEAL
+ // The maximum stack capacity to grow to.
+ MainThreadOrGCTaskData<size_t> maxCapacity_{SIZE_MAX};
+#endif
+};
+
+static_assert(unsigned(SlotsOrElementsKind::Unused) ==
+ unsigned(MarkStack::SlotsOrElementsRangeTag),
+ "To split the mark stack we depend on being able to tell the "
+ "difference between SlotsOrElementsRange::startAndKind_ and a "
+ "tagged SlotsOrElementsRange");
+
+// Bitmask of options to parameterize MarkingTracerT.
+namespace MarkingOptions {
+enum : uint32_t {
+ // Set the compartment's hasMarkedCells flag for roots.
+ MarkRootCompartments = 1,
+
+ // The marking tracer is operating in parallel. Use appropriate atomic
+ // accesses to update the mark bits correctly.
+ ParallelMarking = 2,
+
+ // Mark any implicit edges if we are in weak marking mode.
+ MarkImplicitEdges = 4,
+};
+} // namespace MarkingOptions
+
+constexpr uint32_t NormalMarkingOptions = MarkingOptions::MarkImplicitEdges;
+
+template <uint32_t markingOptions>
+class MarkingTracerT
+ : public GenericTracerImpl<MarkingTracerT<markingOptions>> {
+ public:
+ MarkingTracerT(JSRuntime* runtime, GCMarker* marker);
+ virtual ~MarkingTracerT() = default;
+
+ template <typename T>
+ void onEdge(T** thingp, const char* name);
+ friend class GenericTracerImpl<MarkingTracerT<markingOptions>>;
+
+ GCMarker* getMarker();
+};
+
+using MarkingTracer = MarkingTracerT<NormalMarkingOptions>;
+using RootMarkingTracer = MarkingTracerT<MarkingOptions::MarkRootCompartments>;
+using ParallelMarkingTracer = MarkingTracerT<MarkingOptions::ParallelMarking>;
+
+enum ShouldReportMarkTime : bool {
+ ReportMarkTime = true,
+ DontReportMarkTime = false
+};
+
+} /* namespace gc */
+
+class GCMarker {
+ enum MarkingState : uint8_t {
+ // Have not yet started marking.
+ NotActive,
+
+ // Root marking mode. This sets the hasMarkedCells flag on compartments
+ // containing objects and scripts, which is used to make sure we clean up
+ // dead compartments.
+ RootMarking,
+
+ // Main marking mode. Weakmap marking will be populating the
+ // gcEphemeronEdges tables but not consulting them. The state will
+ // transition to WeakMarking until it is done, then back to RegularMarking.
+ RegularMarking,
+
+ // Like RegularMarking but with multiple threads running in parallel.
+ ParallelMarking,
+
+ // Same as RegularMarking except now every marked obj/script is immediately
+ // looked up in the gcEphemeronEdges table to find edges generated by
+ // weakmap keys, and traversing them to their values. Transitions back to
+ // RegularMarking when done.
+ WeakMarking,
+ };
+
+ public:
+ explicit GCMarker(JSRuntime* rt);
+ [[nodiscard]] bool init();
+
+ JSRuntime* runtime() { return runtime_; }
+ JSTracer* tracer() {
+ return tracer_.match([](auto& t) -> JSTracer* { return &t; });
+ }
+
+#ifdef JS_GC_ZEAL
+ void setMaxCapacity(size_t maxCap) { stack.setMaxCapacity(maxCap); }
+#endif
+
+ bool isActive() const { return state != NotActive; }
+ bool isRegularMarking() const { return state == RegularMarking; }
+ bool isParallelMarking() const { return state == ParallelMarking; }
+ bool isWeakMarking() const { return state == WeakMarking; }
+
+ gc::MarkColor markColor() const { return markColor_; }
+
+ bool isDrained() const { return stack.isEmpty() && otherStack.isEmpty(); }
+
+ bool hasEntriesForCurrentColor() { return stack.hasEntries(); }
+ bool hasBlackEntries() const { return hasEntries(gc::MarkColor::Black); }
+ bool hasGrayEntries() const { return hasEntries(gc::MarkColor::Gray); }
+ bool hasEntries(gc::MarkColor color) const;
+
+ bool canDonateWork() const;
+
+ void start();
+ void stop();
+ void reset();
+
+ [[nodiscard]] bool markUntilBudgetExhausted(
+ SliceBudget& budget,
+ gc::ShouldReportMarkTime reportTime = gc::ReportMarkTime);
+
+ void setRootMarkingMode(bool newState);
+
+ bool enterWeakMarkingMode();
+ void leaveWeakMarkingMode();
+
+ void enterParallelMarkingMode(gc::ParallelMarker* pm);
+ void leaveParallelMarkingMode();
+
+ // Do not use linear-time weak marking for the rest of this collection.
+ // Currently, this will only be triggered by an OOM when updating needed data
+ // structures.
+ void abortLinearWeakMarking();
+
+ // 'delegate' is no longer the delegate of 'key'.
+ void severWeakDelegate(JSObject* key, JSObject* delegate);
+
+ // 'delegate' is now the delegate of 'key'. Update weakmap marking state.
+ void restoreWeakDelegate(JSObject* key, JSObject* delegate);
+
+#ifdef DEBUG
+ // We can't check atom marking if the helper thread lock is already held by
+ // the current thread. This allows us to disable the check.
+ void setCheckAtomMarking(bool check);
+
+ bool shouldCheckCompartments() { return strictCompartmentChecking; }
+#endif
+
+ bool markCurrentColorInParallel(SliceBudget& budget);
+
+ template <uint32_t markingOptions, gc::MarkColor>
+ bool markOneColor(SliceBudget& budget);
+
+ static void moveWork(GCMarker* dst, GCMarker* src);
+
+ size_t sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const;
+
+ static GCMarker* fromTracer(JSTracer* trc) {
+ MOZ_ASSERT(trc->isMarkingTracer());
+ auto* marker = reinterpret_cast<GCMarker*>(uintptr_t(trc) -
+ offsetof(GCMarker, tracer_));
+ MOZ_ASSERT(marker->tracer() == trc);
+ return marker;
+ }
+
+ // Internal public methods, for ease of use by the rest of the GC:
+
+ // If |thing| is unmarked, mark it and then traverse its children.
+ template <uint32_t, typename T>
+ void markAndTraverse(T* thing);
+
+ template <typename T>
+ void markImplicitEdges(T* oldThing);
+
+ private:
+ /*
+ * Care must be taken changing the mark color from gray to black. The cycle
+ * collector depends on the invariant that there are no black to gray edges
+ * in the GC heap. This invariant lets the CC not trace through black
+ * objects. If this invariant is violated, the cycle collector may free
+ * objects that are still reachable.
+ */
+ void setMarkColor(gc::MarkColor newColor);
+ friend class js::gc::AutoSetMarkColor;
+
+ template <typename Tracer>
+ void setMarkingStateAndTracer(MarkingState prev, MarkingState next);
+
+ template <uint32_t markingOptions>
+ bool processMarkStackTop(SliceBudget& budget);
+ friend class gc::GCRuntime;
+
+ // Helper methods that coerce their second argument to the base pointer
+ // type.
+ template <uint32_t markingOptions, typename S>
+ void markAndTraverseObjectEdge(S source, JSObject* target) {
+ markAndTraverseEdge<markingOptions>(source, target);
+ }
+ template <uint32_t markingOptions, typename S>
+ void markAndTraverseStringEdge(S source, JSString* target) {
+ markAndTraverseEdge<markingOptions>(source, target);
+ }
+
+ template <uint32_t markingOptions, typename S, typename T>
+ void markAndTraverseEdge(S source, T* target);
+ template <uint32_t markingOptions, typename S, typename T>
+ void markAndTraverseEdge(S source, const T& target);
+
+ template <typename S, typename T>
+ void checkTraversedEdge(S source, T* target);
+
+ // Mark the given GC thing, but do not trace its children. Return true
+ // if the thing became marked.
+ template <uint32_t markingOptions, typename T>
+ [[nodiscard]] bool mark(T* thing);
+
+ // Traverse a GC thing's children, using a strategy depending on the type.
+ // This can either processing them immediately or push them onto the mark
+ // stack for later.
+#define DEFINE_TRAVERSE_METHOD(_1, Type, _2, _3) \
+ template <uint32_t> \
+ void traverse(Type* thing);
+ JS_FOR_EACH_TRACEKIND(DEFINE_TRAVERSE_METHOD)
+#undef DEFINE_TRAVERSE_METHOD
+
+ // Process a marked thing's children by calling T::traceChildren().
+ template <uint32_t markingOptions, typename T>
+ void traceChildren(T* thing);
+
+ // Process a marked thing's children recursively using an iterative loop and
+ // manual dispatch, for kinds where this is possible.
+ template <uint32_t markingOptions, typename T>
+ void scanChildren(T* thing);
+
+ // Push a marked thing onto the mark stack. Its children will be marked later.
+ template <uint32_t markingOptions, typename T>
+ void pushThing(T* thing);
+
+ template <uint32_t markingOptions>
+ void eagerlyMarkChildren(JSLinearString* str);
+ template <uint32_t markingOptions>
+ void eagerlyMarkChildren(JSRope* rope);
+ template <uint32_t markingOptions>
+ void eagerlyMarkChildren(JSString* str);
+ template <uint32_t markingOptions>
+ void eagerlyMarkChildren(Shape* shape);
+ template <uint32_t markingOptions>
+ void eagerlyMarkChildren(PropMap* map);
+ template <uint32_t markingOptions>
+ void eagerlyMarkChildren(Scope* scope);
+
+ template <typename T>
+ inline void pushTaggedPtr(T* ptr);
+
+ inline void pushValueRange(JSObject* obj, SlotsOrElementsKind kind,
+ size_t start, size_t end);
+
+ // Push an object onto the stack for later tracing and assert that it has
+ // already been marked.
+ inline void repush(JSObject* obj);
+
+ template <typename T>
+ void markImplicitEdgesHelper(T oldThing);
+
+ // Mark through edges whose target color depends on the colors of two source
+ // entities (eg a WeakMap and one of its keys), and push the target onto the
+ // mark stack.
+ void markEphemeronEdges(gc::EphemeronEdgeVector& edges,
+ gc::CellColor srcColor);
+ friend class JS::Zone;
+
+#ifdef DEBUG
+ void checkZone(void* p);
+#else
+ void checkZone(void* p) {}
+#endif
+
+ template <uint32_t markingOptions>
+ bool doMarking(SliceBudget& budget, gc::ShouldReportMarkTime reportTime);
+
+ void delayMarkingChildrenOnOOM(gc::Cell* cell);
+
+ /*
+ * The JSTracer used for marking. This can change depending on the current
+ * state.
+ */
+ mozilla::Variant<gc::MarkingTracer, gc::RootMarkingTracer,
+ gc::ParallelMarkingTracer>
+ tracer_;
+
+ JSRuntime* const runtime_;
+
+ // The main mark stack, holding entries of color |markColor_|.
+ gc::MarkStack stack;
+
+ // The auxiliary mark stack, which may contain entries of the other color.
+ gc::MarkStack otherStack;
+
+ // Track whether we're using the main or auxiliary stack.
+ MainThreadOrGCTaskData<bool> haveSwappedStacks;
+
+ // The current mark stack color.
+ MainThreadOrGCTaskData<gc::MarkColor> markColor_;
+
+ MainThreadOrGCTaskData<gc::ParallelMarker*> parallelMarker_;
+
+ Vector<JS::GCCellPtr, 0, SystemAllocPolicy> unmarkGrayStack;
+ friend class gc::UnmarkGrayTracer;
+
+ /* Track the state of marking. */
+ MainThreadOrGCTaskData<MarkingState> state;
+
+ /* Whether we successfully added all edges to the implicit edges table. */
+ MainThreadOrGCTaskData<bool> haveAllImplicitEdges;
+
+ public:
+ /*
+ * Whether weakmaps can be marked incrementally.
+ *
+ * JSGC_INCREMENTAL_WEAKMAP_ENABLED
+ * pref: javascript.options.mem.incremental_weakmap
+ */
+ MainThreadOrGCTaskData<bool> incrementalWeakMapMarkingEnabled;
+
+#ifdef DEBUG
+ private:
+ /* Assert that start and stop are called with correct ordering. */
+ MainThreadOrGCTaskData<bool> started;
+
+ /*
+ * Whether to check that atoms traversed are present in atom marking
+ * bitmap.
+ */
+ MainThreadOrGCTaskData<bool> checkAtomMarking;
+
+ /*
+ * If this is true, all marked objects must belong to a compartment being
+ * GCed. This is used to look for compartment bugs.
+ */
+ MainThreadOrGCTaskData<bool> strictCompartmentChecking;
+
+ public:
+ /*
+ * The compartment and zone of the object whose trace hook is currently being
+ * called, if any. Used to catch cross-compartment edges traced without use of
+ * TraceCrossCompartmentEdge.
+ */
+ MainThreadOrGCTaskData<Compartment*> tracingCompartment;
+ MainThreadOrGCTaskData<Zone*> tracingZone;
+#endif // DEBUG
+};
+
+namespace gc {
+
+/*
+ * Temporarily change the mark color while this class is on the stack.
+ *
+ * During incremental sweeping this also transitions zones in the
+ * current sweep group into the Mark or MarkGray state as appropriate.
+ */
+class MOZ_RAII AutoSetMarkColor {
+ GCMarker& marker_;
+ MarkColor initialColor_;
+
+ public:
+ AutoSetMarkColor(GCMarker& marker, MarkColor newColor)
+ : marker_(marker), initialColor_(marker.markColor()) {
+ marker_.setMarkColor(newColor);
+ }
+
+ AutoSetMarkColor(GCMarker& marker, CellColor newColor)
+ : AutoSetMarkColor(marker, newColor.asMarkColor()) {}
+
+ ~AutoSetMarkColor() { marker_.setMarkColor(initialColor_); }
+};
+
+} /* namespace gc */
+
+} /* namespace js */
+
+#endif /* gc_GCMarker_h */
diff --git a/js/src/gc/GCParallelTask.cpp b/js/src/gc/GCParallelTask.cpp
new file mode 100644
index 0000000000..029315b119
--- /dev/null
+++ b/js/src/gc/GCParallelTask.cpp
@@ -0,0 +1,231 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "gc/GCParallelTask.h"
+
+#include "mozilla/Maybe.h"
+#include "mozilla/TimeStamp.h"
+
+#include "gc/GCContext.h"
+#include "gc/GCInternals.h"
+#include "gc/ParallelWork.h"
+#include "vm/HelperThreadState.h"
+#include "vm/Runtime.h"
+#include "vm/Time.h"
+
+using namespace js;
+using namespace js::gc;
+
+using mozilla::Maybe;
+using mozilla::TimeDuration;
+using mozilla::TimeStamp;
+
+js::GCParallelTask::~GCParallelTask() {
+ // The LinkedListElement destructor will remove us from any list we are part
+ // of without synchronization, so ensure that doesn't happen.
+ MOZ_DIAGNOSTIC_ASSERT(!isInList());
+
+ // Only most-derived classes' destructors may do the join: base class
+ // destructors run after those for derived classes' members, so a join in a
+ // base class can't ensure that the task is done using the members. All we
+ // can do now is check that someone has previously stopped the task.
+ assertIdle();
+}
+
+static bool ShouldMeasureTaskStartDelay() {
+ // We use many tasks during GC so randomly sample a small fraction for the
+ // purposes of recording telemetry.
+ return (rand() % 100) == 0;
+}
+
+void js::GCParallelTask::startWithLockHeld(AutoLockHelperThreadState& lock) {
+ MOZ_ASSERT(CanUseExtraThreads());
+ MOZ_ASSERT(HelperThreadState().isInitialized(lock));
+ assertIdle();
+
+ maybeQueueTime_ = TimeStamp();
+ if (ShouldMeasureTaskStartDelay()) {
+ maybeQueueTime_ = TimeStamp::Now();
+ }
+
+ setDispatched(lock);
+ HelperThreadState().submitTask(this, lock);
+}
+
+void js::GCParallelTask::start() {
+ if (!CanUseExtraThreads()) {
+ runFromMainThread();
+ return;
+ }
+
+ AutoLockHelperThreadState lock;
+ startWithLockHeld(lock);
+}
+
+void js::GCParallelTask::startOrRunIfIdle(AutoLockHelperThreadState& lock) {
+ if (wasStarted(lock)) {
+ return;
+ }
+
+ // Join the previous invocation of the task. This will return immediately
+ // if the thread has never been started.
+ joinWithLockHeld(lock);
+
+ if (!CanUseExtraThreads()) {
+ runFromMainThread(lock);
+ return;
+ }
+
+ startWithLockHeld(lock);
+}
+
+void js::GCParallelTask::cancelAndWait() {
+ MOZ_ASSERT(!isCancelled());
+ cancel_ = true;
+ join();
+ cancel_ = false;
+}
+
+void js::GCParallelTask::join(Maybe<TimeStamp> deadline) {
+ AutoLockHelperThreadState lock;
+ joinWithLockHeld(lock, deadline);
+}
+
+void js::GCParallelTask::joinWithLockHeld(AutoLockHelperThreadState& lock,
+ Maybe<TimeStamp> deadline) {
+ // Task has not been started; there's nothing to do.
+ if (isIdle(lock)) {
+ return;
+ }
+
+ if (isDispatched(lock) && deadline.isNothing()) {
+ // If the task was dispatched but has not yet started then cancel the task
+ // and run it from the main thread. This stops us from blocking here when
+ // the helper threads are busy with other tasks.
+ cancelDispatchedTask(lock);
+ runFromMainThread(lock);
+ } else {
+ // Otherwise wait for the task to complete.
+ joinNonIdleTask(deadline, lock);
+ }
+
+ if (isIdle(lock)) {
+ recordDuration();
+ }
+}
+
+void GCParallelTask::recordDuration() {
+ if (phaseKind != gcstats::PhaseKind::NONE) {
+ gc->stats().recordParallelPhase(phaseKind, duration_);
+ }
+}
+
+void js::GCParallelTask::joinNonIdleTask(Maybe<TimeStamp> deadline,
+ AutoLockHelperThreadState& lock) {
+ MOZ_ASSERT(!isIdle(lock));
+
+ while (!isFinished(lock)) {
+ TimeDuration timeout = TimeDuration::Forever();
+ if (deadline) {
+ TimeStamp now = TimeStamp::Now();
+ if (*deadline <= now) {
+ break;
+ }
+ timeout = *deadline - now;
+ }
+
+ HelperThreadState().wait(lock, timeout);
+ }
+
+ if (isFinished(lock)) {
+ setIdle(lock);
+ }
+}
+
+void js::GCParallelTask::cancelDispatchedTask(AutoLockHelperThreadState& lock) {
+ MOZ_ASSERT(isDispatched(lock));
+ MOZ_ASSERT(isInList());
+ remove();
+ setIdle(lock);
+}
+
+void js::GCParallelTask::runFromMainThread(AutoLockHelperThreadState& lock) {
+ assertIdle();
+ MOZ_ASSERT(js::CurrentThreadCanAccessRuntime(gc->rt));
+ state_ = State::Running;
+ runTask(gc->rt->gcContext(), lock);
+ state_ = State::Idle;
+}
+
+void js::GCParallelTask::runFromMainThread() {
+ AutoLockHelperThreadState lock;
+ runFromMainThread(lock);
+}
+
+class MOZ_RAII AutoGCContext {
+ JS::GCContext context;
+
+ public:
+ explicit AutoGCContext(JSRuntime* runtime) : context(runtime) {
+ MOZ_RELEASE_ASSERT(TlsGCContext.init(),
+ "Failed to initialize TLS for GC context");
+
+ MOZ_ASSERT(!TlsGCContext.get());
+ TlsGCContext.set(&context);
+ }
+
+ ~AutoGCContext() {
+ MOZ_ASSERT(TlsGCContext.get() == &context);
+ TlsGCContext.set(nullptr);
+ }
+
+ JS::GCContext* get() { return &context; }
+};
+
+void js::GCParallelTask::runHelperThreadTask(AutoLockHelperThreadState& lock) {
+ setRunning(lock);
+
+ AutoGCContext gcContext(gc->rt);
+
+ runTask(gcContext.get(), lock);
+
+ setFinished(lock);
+}
+
+void GCParallelTask::runTask(JS::GCContext* gcx,
+ AutoLockHelperThreadState& lock) {
+ // Run the task from either the main thread or a helper thread.
+
+ AutoSetThreadGCUse setUse(gcx, use);
+
+ // The hazard analysis can't tell what the call to func_ will do but it's not
+ // allowed to GC.
+ JS::AutoSuppressGCAnalysis nogc;
+
+ TimeStamp timeStart = TimeStamp::Now();
+ run(lock);
+ duration_ = TimeSince(timeStart);
+
+ if (maybeQueueTime_) {
+ TimeDuration delay = timeStart - maybeQueueTime_;
+ gc->rt->metrics().GC_TASK_START_DELAY_US(delay);
+ }
+}
+
+bool js::GCParallelTask::isIdle() const {
+ AutoLockHelperThreadState lock;
+ return isIdle(lock);
+}
+
+bool js::GCParallelTask::wasStarted() const {
+ AutoLockHelperThreadState lock;
+ return wasStarted(lock);
+}
+
+/* static */
+size_t js::gc::GCRuntime::parallelWorkerCount() const {
+ return std::min(helperThreadCount.ref(), MaxParallelWorkers);
+}
diff --git a/js/src/gc/GCParallelTask.h b/js/src/gc/GCParallelTask.h
new file mode 100644
index 0000000000..4784382ab5
--- /dev/null
+++ b/js/src/gc/GCParallelTask.h
@@ -0,0 +1,246 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef gc_GCParallelTask_h
+#define gc_GCParallelTask_h
+
+#include "mozilla/LinkedList.h"
+#include "mozilla/Maybe.h"
+#include "mozilla/TimeStamp.h"
+
+#include <utility>
+
+#include "gc/GCContext.h"
+#include "js/Utility.h"
+#include "threading/ProtectedData.h"
+#include "vm/HelperThreads.h"
+#include "vm/HelperThreadTask.h"
+
+#define JS_MEMBER_FN_PTR_TYPE(ClassT, ReturnT, /* ArgTs */...) \
+ ReturnT (ClassT::*)(__VA_ARGS__)
+
+#define JS_CALL_MEMBER_FN_PTR(Receiver, Ptr, /* Args */...) \
+ ((Receiver)->*(Ptr))(__VA_ARGS__)
+
+namespace js {
+
+namespace gcstats {
+enum class PhaseKind : uint8_t;
+}
+
+namespace gc {
+
+class GCRuntime;
+
+static inline mozilla::TimeDuration TimeSince(mozilla::TimeStamp prev) {
+ mozilla::TimeStamp now = mozilla::TimeStamp::Now();
+ // Sadly this happens sometimes.
+ MOZ_ASSERT(now >= prev);
+ if (now < prev) {
+ now = prev;
+ }
+ return now - prev;
+}
+
+} // namespace gc
+
+class AutoLockHelperThreadState;
+class GCParallelTask;
+class HelperThread;
+
+// A wrapper around a linked list to enforce synchronization.
+class GCParallelTaskList {
+ mozilla::LinkedList<GCParallelTask> tasks;
+
+ public:
+ bool isEmpty(const AutoLockHelperThreadState& lock) {
+ gHelperThreadLock.assertOwnedByCurrentThread();
+ return tasks.isEmpty();
+ }
+
+ void insertBack(GCParallelTask* task, const AutoLockHelperThreadState& lock) {
+ gHelperThreadLock.assertOwnedByCurrentThread();
+ tasks.insertBack(task);
+ }
+
+ GCParallelTask* popFirst(const AutoLockHelperThreadState& lock) {
+ gHelperThreadLock.assertOwnedByCurrentThread();
+ return tasks.popFirst();
+ }
+
+ size_t sizeOfExcludingThis(mozilla::MallocSizeOf aMallocSizeOf,
+ const AutoLockHelperThreadState& lock) const {
+ gHelperThreadLock.assertOwnedByCurrentThread();
+ return tasks.sizeOfExcludingThis(aMallocSizeOf);
+ }
+};
+
+// A generic task used to dispatch work to the helper thread system.
+// Users override the pure-virtual run() method.
+class GCParallelTask : private mozilla::LinkedListElement<GCParallelTask>,
+ public HelperThreadTask {
+ friend class mozilla::LinkedList<GCParallelTask>;
+ friend class mozilla::LinkedListElement<GCParallelTask>;
+
+ public:
+ gc::GCRuntime* const gc;
+
+ // This can be PhaseKind::NONE for tasks that take place outside a GC.
+ const gcstats::PhaseKind phaseKind;
+
+ gc::GCUse use;
+
+ private:
+ // The state of the parallel computation.
+ enum class State {
+ // The task is idle. Either start() has not been called or join() has
+ // returned.
+ Idle,
+
+ // The task has been started but has not yet begun running on a helper
+ // thread.
+ Dispatched,
+
+ // The task is currently running on a helper thread.
+ Running,
+
+ // The task has finished running but has not yet been joined by the main
+ // thread.
+ Finished
+ };
+
+ UnprotectedData<State> state_;
+
+ // May be set to the time this task was queued to collect telemetry.
+ mozilla::TimeStamp maybeQueueTime_;
+
+ // Amount of time this task took to execute.
+ MainThreadOrGCTaskData<mozilla::TimeDuration> duration_;
+
+ explicit GCParallelTask(const GCParallelTask&) = delete;
+
+ protected:
+ // A flag to signal a request for early completion of the off-thread task.
+ mozilla::Atomic<bool, mozilla::MemoryOrdering::ReleaseAcquire> cancel_;
+
+ public:
+ explicit GCParallelTask(gc::GCRuntime* gc, gcstats::PhaseKind phaseKind,
+ gc::GCUse use = gc::GCUse::Unspecified)
+ : gc(gc),
+ phaseKind(phaseKind),
+ use(use),
+ state_(State::Idle),
+ cancel_(false) {}
+ GCParallelTask(GCParallelTask&& other)
+ : gc(other.gc),
+ phaseKind(other.phaseKind),
+ use(other.use),
+ state_(other.state_),
+ cancel_(false) {}
+
+ // Derived classes must override this to ensure that join() gets called
+ // before members get destructed.
+ virtual ~GCParallelTask();
+
+ // Time spent in the most recent invocation of this task.
+ mozilla::TimeDuration duration() const { return duration_; }
+
+ // The simple interface to a parallel task works exactly like pthreads.
+ void start();
+ void join(mozilla::Maybe<mozilla::TimeStamp> deadline = mozilla::Nothing());
+
+ // If multiple tasks are to be started or joined at once, it is more
+ // efficient to take the helper thread lock once and use these methods.
+ void startWithLockHeld(AutoLockHelperThreadState& lock);
+ void joinWithLockHeld(
+ AutoLockHelperThreadState& lock,
+ mozilla::Maybe<mozilla::TimeStamp> deadline = mozilla::Nothing());
+ void joinNonIdleTask(mozilla::Maybe<mozilla::TimeStamp> deadline,
+ AutoLockHelperThreadState& lock);
+
+ // Instead of dispatching to a helper, run the task on the current thread.
+ void runFromMainThread();
+ void runFromMainThread(AutoLockHelperThreadState& lock);
+
+ // If the task is not already running, either start it or run it on the main
+ // thread if that fails.
+ void startOrRunIfIdle(AutoLockHelperThreadState& lock);
+
+ // Cancel a dispatched task before it started executing.
+ void cancelDispatchedTask(AutoLockHelperThreadState& lock);
+
+ // Set the cancel flag and wait for the task to finish.
+ void cancelAndWait();
+
+ // Report whether the task is idle. This means either before start() has been
+ // called or after join() has been called.
+ bool isIdle() const;
+ bool isIdle(const AutoLockHelperThreadState& lock) const {
+ return state_ == State::Idle;
+ }
+
+ // Report whether the task has been started. This means after start() has been
+ // called but before the task has run to completion. The task may not yet have
+ // started running.
+ bool wasStarted() const;
+ bool wasStarted(const AutoLockHelperThreadState& lock) const {
+ return isDispatched(lock) || isRunning(lock);
+ }
+
+ bool isDispatched(const AutoLockHelperThreadState& lock) const {
+ return state_ == State::Dispatched;
+ }
+
+ protected:
+ // Override this method to provide the task's functionality.
+ virtual void run(AutoLockHelperThreadState& lock) = 0;
+
+ virtual void recordDuration();
+
+ bool isCancelled() const { return cancel_; }
+
+ private:
+ void assertIdle() const {
+ // Don't lock here because that adds extra synchronization in debug
+ // builds that may hide bugs. There's no race if the assertion passes.
+ MOZ_ASSERT(state_ == State::Idle);
+ }
+
+ bool isRunning(const AutoLockHelperThreadState& lock) const {
+ return state_ == State::Running;
+ }
+ bool isFinished(const AutoLockHelperThreadState& lock) const {
+ return state_ == State::Finished;
+ }
+
+ void setDispatched(const AutoLockHelperThreadState& lock) {
+ MOZ_ASSERT(isIdle(lock));
+ state_ = State::Dispatched;
+ }
+ void setRunning(const AutoLockHelperThreadState& lock) {
+ MOZ_ASSERT(isDispatched(lock));
+ state_ = State::Running;
+ }
+ void setFinished(const AutoLockHelperThreadState& lock) {
+ MOZ_ASSERT(isRunning(lock));
+ state_ = State::Finished;
+ }
+ void setIdle(const AutoLockHelperThreadState& lock) {
+ MOZ_ASSERT(isDispatched(lock) || isFinished(lock));
+ state_ = State::Idle;
+ }
+
+ void runTask(JS::GCContext* gcx, AutoLockHelperThreadState& lock);
+
+ // Implement the HelperThreadTask interface.
+ ThreadType threadType() override {
+ return ThreadType::THREAD_TYPE_GCPARALLEL;
+ }
+ void runHelperThreadTask(AutoLockHelperThreadState& locked) override;
+};
+
+} /* namespace js */
+#endif /* gc_GCParallelTask_h */
diff --git a/js/src/gc/GCProbes.h b/js/src/gc/GCProbes.h
new file mode 100644
index 0000000000..6942610493
--- /dev/null
+++ b/js/src/gc/GCProbes.h
@@ -0,0 +1,49 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef gc_GCProbes_h
+#define gc_GCProbes_h
+
+/*
+ * This interface can be used to insert probes for GC related events.
+ *
+ * The code must be built with JS_GC_PROBES for these probes to be called
+ * from JIT code.
+ */
+
+#include <stddef.h>
+
+#include "gc/AllocKind.h"
+#include "js/TraceKind.h"
+
+class JSObject;
+
+namespace js {
+namespace gc {
+
+class GCRuntime;
+struct Cell;
+
+namespace gcprobes {
+
+inline void Init(gc::GCRuntime* gc) {}
+inline void Finish(gc::GCRuntime* gc) {}
+inline void NurseryAlloc(void* ptr, JS::TraceKind kind) {}
+inline void TenuredAlloc(void* ptr, gc::AllocKind kind) {}
+inline void CreateObject(JSObject* object) {}
+inline void MinorGCStart() {}
+inline void PromoteToTenured(gc::Cell* src, gc::Cell* dst) {}
+inline void MinorGCEnd() {}
+inline void MajorGCStart() {}
+inline void TenuredFinalize(gc::Cell* thing) {
+} // May be called off main thread.
+inline void MajorGCEnd() {}
+
+} // namespace gcprobes
+} // namespace gc
+} // namespace js
+
+#endif // gc_GCProbes_h
diff --git a/js/src/gc/GCRuntime.h b/js/src/gc/GCRuntime.h
new file mode 100644
index 0000000000..5336114020
--- /dev/null
+++ b/js/src/gc/GCRuntime.h
@@ -0,0 +1,1444 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef gc_GCRuntime_h
+#define gc_GCRuntime_h
+
+#include "mozilla/Atomics.h"
+#include "mozilla/DoublyLinkedList.h"
+#include "mozilla/EnumSet.h"
+#include "mozilla/Maybe.h"
+#include "mozilla/TimeStamp.h"
+
+#include "gc/ArenaList.h"
+#include "gc/AtomMarking.h"
+#include "gc/GCContext.h"
+#include "gc/GCMarker.h"
+#include "gc/GCParallelTask.h"
+#include "gc/IteratorUtils.h"
+#include "gc/Nursery.h"
+#include "gc/Scheduling.h"
+#include "gc/Statistics.h"
+#include "gc/StoreBuffer.h"
+#include "js/friend/PerformanceHint.h"
+#include "js/GCAnnotations.h"
+#include "js/UniquePtr.h"
+#include "vm/AtomsTable.h"
+
+namespace js {
+
+class AutoLockGC;
+class AutoLockGCBgAlloc;
+class AutoLockHelperThreadState;
+class FinalizationRegistryObject;
+class FinalizationRecordObject;
+class FinalizationQueueObject;
+class GlobalObject;
+class VerifyPreTracer;
+class WeakRefObject;
+
+namespace gc {
+
+using BlackGrayEdgeVector = Vector<TenuredCell*, 0, SystemAllocPolicy>;
+using ZoneVector = Vector<JS::Zone*, 4, SystemAllocPolicy>;
+
+class AutoCallGCCallbacks;
+class AutoGCSession;
+class AutoHeapSession;
+class AutoTraceSession;
+struct FinalizePhase;
+class MarkingValidator;
+struct MovingTracer;
+class ParallelMarkTask;
+enum class ShouldCheckThresholds;
+class SweepGroupsIter;
+
+// Interface to a sweep action.
+struct SweepAction {
+ // The arguments passed to each action.
+ struct Args {
+ GCRuntime* gc;
+ JS::GCContext* gcx;
+ SliceBudget& budget;
+ };
+
+ virtual ~SweepAction() = default;
+ virtual IncrementalProgress run(Args& state) = 0;
+ virtual void assertFinished() const = 0;
+ virtual bool shouldSkip() { return false; }
+};
+
+class ChunkPool {
+ TenuredChunk* head_;
+ size_t count_;
+
+ public:
+ ChunkPool() : head_(nullptr), count_(0) {}
+ ChunkPool(const ChunkPool& other) = delete;
+ ChunkPool(ChunkPool&& other) { *this = std::move(other); }
+
+ ~ChunkPool() {
+ MOZ_ASSERT(!head_);
+ MOZ_ASSERT(count_ == 0);
+ }
+
+ ChunkPool& operator=(const ChunkPool& other) = delete;
+ ChunkPool& operator=(ChunkPool&& other) {
+ head_ = other.head_;
+ other.head_ = nullptr;
+ count_ = other.count_;
+ other.count_ = 0;
+ return *this;
+ }
+
+ bool empty() const { return !head_; }
+ size_t count() const { return count_; }
+
+ TenuredChunk* head() {
+ MOZ_ASSERT(head_);
+ return head_;
+ }
+ TenuredChunk* pop();
+ void push(TenuredChunk* chunk);
+ TenuredChunk* remove(TenuredChunk* chunk);
+
+ void sort();
+
+ private:
+ TenuredChunk* mergeSort(TenuredChunk* list, size_t count);
+ bool isSorted() const;
+
+#ifdef DEBUG
+ public:
+ bool contains(TenuredChunk* chunk) const;
+ bool verify() const;
+ void verifyChunks() const;
+#endif
+
+ public:
+ // Pool mutation does not invalidate an Iter unless the mutation
+ // is of the TenuredChunk currently being visited by the Iter.
+ class Iter {
+ public:
+ explicit Iter(ChunkPool& pool) : current_(pool.head_) {}
+ bool done() const { return !current_; }
+ void next();
+ TenuredChunk* get() const { return current_; }
+ operator TenuredChunk*() const { return get(); }
+ TenuredChunk* operator->() const { return get(); }
+
+ private:
+ TenuredChunk* current_;
+ };
+};
+
+class BackgroundMarkTask : public GCParallelTask {
+ public:
+ explicit BackgroundMarkTask(GCRuntime* gc);
+ void setBudget(const SliceBudget& budget) { this->budget = budget; }
+ void run(AutoLockHelperThreadState& lock) override;
+
+ private:
+ SliceBudget budget;
+};
+
+class BackgroundUnmarkTask : public GCParallelTask {
+ public:
+ explicit BackgroundUnmarkTask(GCRuntime* gc);
+ void initZones();
+ void run(AutoLockHelperThreadState& lock) override;
+
+ ZoneVector zones;
+};
+
+class BackgroundSweepTask : public GCParallelTask {
+ public:
+ explicit BackgroundSweepTask(GCRuntime* gc);
+ void run(AutoLockHelperThreadState& lock) override;
+};
+
+class BackgroundFreeTask : public GCParallelTask {
+ public:
+ explicit BackgroundFreeTask(GCRuntime* gc);
+ void run(AutoLockHelperThreadState& lock) override;
+};
+
+// Performs extra allocation off thread so that when memory is required on the
+// main thread it will already be available and waiting.
+class BackgroundAllocTask : public GCParallelTask {
+ // Guarded by the GC lock.
+ GCLockData<ChunkPool&> chunkPool_;
+
+ const bool enabled_;
+
+ public:
+ BackgroundAllocTask(GCRuntime* gc, ChunkPool& pool);
+ bool enabled() const { return enabled_; }
+
+ void run(AutoLockHelperThreadState& lock) override;
+};
+
+// Search the provided chunks for free arenas and decommit them.
+class BackgroundDecommitTask : public GCParallelTask {
+ public:
+ explicit BackgroundDecommitTask(GCRuntime* gc);
+ void run(AutoLockHelperThreadState& lock) override;
+};
+
+template <typename F>
+struct Callback {
+ F op;
+ void* data;
+
+ Callback() : op(nullptr), data(nullptr) {}
+ Callback(F op, void* data) : op(op), data(data) {}
+};
+
+template <typename F>
+using CallbackVector = Vector<Callback<F>, 4, SystemAllocPolicy>;
+
+typedef HashMap<Value*, const char*, DefaultHasher<Value*>, SystemAllocPolicy>
+ RootedValueMap;
+
+using AllocKinds = mozilla::EnumSet<AllocKind, uint64_t>;
+
+// A singly linked list of zones.
+class ZoneList {
+ static Zone* const End;
+
+ Zone* head;
+ Zone* tail;
+
+ public:
+ ZoneList();
+ ~ZoneList();
+
+ bool isEmpty() const;
+ Zone* front() const;
+
+ void prepend(Zone* zone);
+ void append(Zone* zone);
+ void prependList(ZoneList&& other);
+ void appendList(ZoneList&& other);
+ Zone* removeFront();
+ void clear();
+
+ private:
+ explicit ZoneList(Zone* singleZone);
+ void check() const;
+
+ ZoneList(const ZoneList& other) = delete;
+ ZoneList& operator=(const ZoneList& other) = delete;
+};
+
+struct WeakCacheToSweep {
+ JS::detail::WeakCacheBase* cache;
+ JS::Zone* zone;
+};
+
+class WeakCacheSweepIterator {
+ using WeakCacheBase = JS::detail::WeakCacheBase;
+
+ JS::Zone* sweepZone;
+ WeakCacheBase* sweepCache;
+
+ public:
+ explicit WeakCacheSweepIterator(JS::Zone* sweepGroup);
+
+ bool done() const;
+ WeakCacheToSweep get() const;
+ void next();
+
+ private:
+ void settle();
+};
+
+struct SweepingTracer final : public GenericTracerImpl<SweepingTracer> {
+ explicit SweepingTracer(JSRuntime* rt);
+
+ private:
+ template <typename T>
+ void onEdge(T** thingp, const char* name);
+ friend class GenericTracerImpl<SweepingTracer>;
+};
+
+class GCRuntime {
+ public:
+ explicit GCRuntime(JSRuntime* rt);
+ [[nodiscard]] bool init(uint32_t maxbytes);
+ bool wasInitialized() const { return initialized; }
+ void finishRoots();
+ void finish();
+
+ Zone* atomsZone() {
+ Zone* zone = zones()[0];
+ MOZ_ASSERT(JS::shadow::Zone::from(zone)->isAtomsZone());
+ return zone;
+ }
+ Zone* maybeSharedAtomsZone() { return sharedAtomsZone_; }
+
+ [[nodiscard]] bool freezeSharedAtomsZone();
+ void restoreSharedAtomsZone();
+
+ JS::HeapState heapState() const { return heapState_; }
+
+ inline bool hasZealMode(ZealMode mode);
+ inline void clearZealMode(ZealMode mode);
+ inline bool needZealousGC();
+ inline bool hasIncrementalTwoSliceZealMode();
+
+ [[nodiscard]] bool addRoot(Value* vp, const char* name);
+ void removeRoot(Value* vp);
+
+ [[nodiscard]] bool setParameter(JSContext* cx, JSGCParamKey key,
+ uint32_t value);
+ void resetParameter(JSContext* cx, JSGCParamKey key);
+ uint32_t getParameter(JSGCParamKey key);
+
+ void setPerformanceHint(PerformanceHint hint);
+ bool isInPageLoad() const { return inPageLoadCount != 0; }
+
+ [[nodiscard]] bool triggerGC(JS::GCReason reason);
+ // Check whether to trigger a zone GC after allocating GC cells.
+ void maybeTriggerGCAfterAlloc(Zone* zone);
+ // Check whether to trigger a zone GC after malloc memory.
+ void maybeTriggerGCAfterMalloc(Zone* zone);
+ bool maybeTriggerGCAfterMalloc(Zone* zone, const HeapSize& heap,
+ const HeapThreshold& threshold,
+ JS::GCReason reason);
+ // The return value indicates if we were able to do the GC.
+ bool triggerZoneGC(Zone* zone, JS::GCReason reason, size_t usedBytes,
+ size_t thresholdBytes);
+
+ void maybeGC();
+
+ // Return whether we want to run a major GC. If eagerOk is true, include eager
+ // triggers (eg EAGER_ALLOC_TRIGGER) in this determination, and schedule all
+ // zones that exceed the eager thresholds.
+ JS::GCReason wantMajorGC(bool eagerOk);
+ bool checkEagerAllocTrigger(const HeapSize& size,
+ const HeapThreshold& threshold);
+
+ // Do a minor GC if requested, followed by a major GC if requested. The return
+ // value indicates whether a major GC was performed.
+ bool gcIfRequested() { return gcIfRequestedImpl(false); }
+
+ // Internal function to do a GC if previously requested. But if not and
+ // eagerOk, do an eager GC for all Zones that have exceeded the eager
+ // thresholds.
+ //
+ // Return whether a major GC was performed or started.
+ bool gcIfRequestedImpl(bool eagerOk);
+
+ void gc(JS::GCOptions options, JS::GCReason reason);
+ void startGC(JS::GCOptions options, JS::GCReason reason,
+ const SliceBudget& budget);
+ void gcSlice(JS::GCReason reason, const SliceBudget& budget);
+ void finishGC(JS::GCReason reason);
+ void abortGC();
+ void startDebugGC(JS::GCOptions options, const SliceBudget& budget);
+ void debugGCSlice(const SliceBudget& budget);
+
+ void runDebugGC();
+ void notifyRootsRemoved();
+
+ enum TraceOrMarkRuntime { TraceRuntime, MarkRuntime };
+ void traceRuntime(JSTracer* trc, AutoTraceSession& session);
+ void traceRuntimeForMinorGC(JSTracer* trc, AutoGCSession& session);
+
+ void purgeRuntimeForMinorGC();
+
+ void shrinkBuffers();
+ void onOutOfMallocMemory();
+ void onOutOfMallocMemory(const AutoLockGC& lock);
+
+ Nursery& nursery() { return nursery_.ref(); }
+ gc::StoreBuffer& storeBuffer() { return storeBuffer_.ref(); }
+
+ void minorGC(JS::GCReason reason,
+ gcstats::PhaseKind phase = gcstats::PhaseKind::MINOR_GC)
+ JS_HAZ_GC_CALL;
+ void evictNursery(JS::GCReason reason = JS::GCReason::EVICT_NURSERY) {
+ minorGC(reason, gcstats::PhaseKind::EVICT_NURSERY);
+ }
+
+ void* addressOfNurseryPosition() {
+ return nursery_.refNoCheck().addressOfPosition();
+ }
+
+ const void* addressOfLastBufferedWholeCell() {
+ return storeBuffer_.refNoCheck().addressOfLastBufferedWholeCell();
+ }
+
+#ifdef JS_GC_ZEAL
+ const uint32_t* addressOfZealModeBits() { return &zealModeBits.refNoCheck(); }
+ void getZealBits(uint32_t* zealBits, uint32_t* frequency,
+ uint32_t* nextScheduled);
+ void setZeal(uint8_t zeal, uint32_t frequency);
+ void unsetZeal(uint8_t zeal);
+ bool parseAndSetZeal(const char* str);
+ void setNextScheduled(uint32_t count);
+ void verifyPreBarriers();
+ void maybeVerifyPreBarriers(bool always);
+ bool selectForMarking(JSObject* object);
+ void clearSelectedForMarking();
+ void setDeterministic(bool enable);
+ void setMarkStackLimit(size_t limit, AutoLockGC& lock);
+#endif
+
+ uint64_t nextCellUniqueId() {
+ MOZ_ASSERT(nextCellUniqueId_ > 0);
+ uint64_t uid = ++nextCellUniqueId_;
+ return uid;
+ }
+
+ void setLowMemoryState(bool newState) { lowMemoryState = newState; }
+ bool systemHasLowMemory() const { return lowMemoryState; }
+
+ public:
+ // Internal public interface
+ ZoneVector& zones() { return zones_.ref(); }
+ gcstats::Statistics& stats() { return stats_.ref(); }
+ const gcstats::Statistics& stats() const { return stats_.ref(); }
+ State state() const { return incrementalState; }
+ bool isHeapCompacting() const { return state() == State::Compact; }
+ bool isForegroundSweeping() const { return state() == State::Sweep; }
+ bool isBackgroundSweeping() const { return sweepTask.wasStarted(); }
+ bool isBackgroundMarking() const { return markTask.wasStarted(); }
+ void waitBackgroundSweepEnd();
+ void waitBackgroundAllocEnd() { allocTask.cancelAndWait(); }
+ void waitBackgroundFreeEnd();
+ void waitForBackgroundTasks();
+ bool isWaitingOnBackgroundTask() const;
+
+ void lockGC() { lock.lock(); }
+ bool tryLockGC() { return lock.tryLock(); }
+ void unlockGC() { lock.unlock(); }
+
+#ifdef DEBUG
+ void assertCurrentThreadHasLockedGC() const {
+ lock.assertOwnedByCurrentThread();
+ }
+#endif // DEBUG
+
+ void setAlwaysPreserveCode() { alwaysPreserveCode = true; }
+
+ bool isIncrementalGCAllowed() const { return incrementalAllowed; }
+ void disallowIncrementalGC() { incrementalAllowed = false; }
+
+ void setIncrementalGCEnabled(bool enabled);
+
+ bool isIncrementalGCEnabled() const { return incrementalGCEnabled; }
+ bool isPerZoneGCEnabled() const { return perZoneGCEnabled; }
+ bool isCompactingGCEnabled() const;
+ bool isParallelMarkingEnabled() const { return parallelMarkingEnabled; }
+
+ bool isIncrementalGCInProgress() const {
+ return state() != State::NotActive && !isVerifyPreBarriersEnabled();
+ }
+
+ bool hasForegroundWork() const;
+
+ bool isShrinkingGC() const { return gcOptions() == JS::GCOptions::Shrink; }
+
+ bool isShutdownGC() const { return gcOptions() == JS::GCOptions::Shutdown; }
+
+#ifdef DEBUG
+ bool isShuttingDown() const { return hadShutdownGC; }
+#endif
+
+ bool initSweepActions();
+
+ void setGrayRootsTracer(JSGrayRootsTracer traceOp, void* data);
+ [[nodiscard]] bool addBlackRootsTracer(JSTraceDataOp traceOp, void* data);
+ void removeBlackRootsTracer(JSTraceDataOp traceOp, void* data);
+ void clearBlackAndGrayRootTracers();
+
+ void setGCCallback(JSGCCallback callback, void* data);
+ void callGCCallback(JSGCStatus status, JS::GCReason reason) const;
+ void setObjectsTenuredCallback(JSObjectsTenuredCallback callback, void* data);
+ void callObjectsTenuredCallback();
+ [[nodiscard]] bool addFinalizeCallback(JSFinalizeCallback callback,
+ void* data);
+ void removeFinalizeCallback(JSFinalizeCallback func);
+ void setHostCleanupFinalizationRegistryCallback(
+ JSHostCleanupFinalizationRegistryCallback callback, void* data);
+ void callHostCleanupFinalizationRegistryCallback(
+ JSFunction* doCleanup, GlobalObject* incumbentGlobal);
+ [[nodiscard]] bool addWeakPointerZonesCallback(
+ JSWeakPointerZonesCallback callback, void* data);
+ void removeWeakPointerZonesCallback(JSWeakPointerZonesCallback callback);
+ [[nodiscard]] bool addWeakPointerCompartmentCallback(
+ JSWeakPointerCompartmentCallback callback, void* data);
+ void removeWeakPointerCompartmentCallback(
+ JSWeakPointerCompartmentCallback callback);
+ JS::GCSliceCallback setSliceCallback(JS::GCSliceCallback callback);
+ JS::GCNurseryCollectionCallback setNurseryCollectionCallback(
+ JS::GCNurseryCollectionCallback callback);
+ JS::DoCycleCollectionCallback setDoCycleCollectionCallback(
+ JS::DoCycleCollectionCallback callback);
+
+ bool addFinalizationRegistry(JSContext* cx,
+ Handle<FinalizationRegistryObject*> registry);
+ bool registerWithFinalizationRegistry(JSContext* cx, HandleObject target,
+ HandleObject record);
+ void queueFinalizationRegistryForCleanup(FinalizationQueueObject* queue);
+
+ void nukeFinalizationRecordWrapper(JSObject* wrapper,
+ FinalizationRecordObject* record);
+ void nukeWeakRefWrapper(JSObject* wrapper, WeakRefObject* record);
+
+ void setFullCompartmentChecks(bool enable);
+
+ // Get the main marking tracer.
+ GCMarker& marker() { return *markers[0]; }
+
+ JS::Zone* getCurrentSweepGroup() { return currentSweepGroup; }
+ unsigned getCurrentSweepGroupIndex() {
+ MOZ_ASSERT_IF(unsigned(state()) < unsigned(State::Sweep),
+ sweepGroupIndex == 0);
+ return sweepGroupIndex;
+ }
+
+ uint64_t gcNumber() const { return number; }
+ void incGcNumber() { ++number; }
+
+ uint64_t minorGCCount() const { return minorGCNumber; }
+ void incMinorGcNumber() { ++minorGCNumber; }
+
+ uint64_t majorGCCount() const { return majorGCNumber; }
+ void incMajorGcNumber() { ++majorGCNumber; }
+
+ uint64_t gcSliceCount() const { return sliceNumber; }
+ void incGcSliceNumber() { ++sliceNumber; }
+
+ int64_t defaultSliceBudgetMS() const { return defaultTimeBudgetMS_; }
+
+ bool isIncrementalGc() const { return isIncremental; }
+ bool isFullGc() const { return isFull; }
+ bool isCompactingGc() const { return isCompacting; }
+ bool didCompactZones() const { return isCompacting && zonesCompacted; }
+
+ bool areGrayBitsValid() const { return grayBitsValid; }
+ void setGrayBitsInvalid() { grayBitsValid = false; }
+
+ mozilla::TimeStamp lastGCStartTime() const { return lastGCStartTime_; }
+ mozilla::TimeStamp lastGCEndTime() const { return lastGCEndTime_; }
+
+ bool majorGCRequested() const {
+ return majorGCTriggerReason != JS::GCReason::NO_REASON;
+ }
+
+ double computeHeapGrowthFactor(size_t lastBytes);
+ size_t computeTriggerBytes(double growthFactor, size_t lastBytes);
+
+ inline void updateOnFreeArenaAlloc(const TenuredChunkInfo& info);
+ void updateOnArenaFree() { ++numArenasFreeCommitted; }
+
+ ChunkPool& fullChunks(const AutoLockGC& lock) { return fullChunks_.ref(); }
+ ChunkPool& availableChunks(const AutoLockGC& lock) {
+ return availableChunks_.ref();
+ }
+ ChunkPool& emptyChunks(const AutoLockGC& lock) { return emptyChunks_.ref(); }
+ const ChunkPool& fullChunks(const AutoLockGC& lock) const {
+ return fullChunks_.ref();
+ }
+ const ChunkPool& availableChunks(const AutoLockGC& lock) const {
+ return availableChunks_.ref();
+ }
+ const ChunkPool& emptyChunks(const AutoLockGC& lock) const {
+ return emptyChunks_.ref();
+ }
+ using NonEmptyChunksIter = ChainedIterator<ChunkPool::Iter, 2>;
+ NonEmptyChunksIter allNonEmptyChunks(const AutoLockGC& lock) {
+ return NonEmptyChunksIter(availableChunks(lock), fullChunks(lock));
+ }
+ uint32_t minEmptyChunkCount(const AutoLockGC& lock) const {
+ return minEmptyChunkCount_;
+ }
+ uint32_t maxEmptyChunkCount(const AutoLockGC& lock) const {
+ return maxEmptyChunkCount_;
+ }
+#ifdef DEBUG
+ void verifyAllChunks();
+#endif
+
+ TenuredChunk* getOrAllocChunk(AutoLockGCBgAlloc& lock);
+ void recycleChunk(TenuredChunk* chunk, const AutoLockGC& lock);
+
+#ifdef JS_GC_ZEAL
+ void startVerifyPreBarriers();
+ void endVerifyPreBarriers();
+ void finishVerifier();
+ bool isVerifyPreBarriersEnabled() const { return verifyPreData.refNoCheck(); }
+ bool shouldYieldForZeal(ZealMode mode);
+#else
+ bool isVerifyPreBarriersEnabled() const { return false; }
+#endif
+
+#ifdef JSGC_HASH_TABLE_CHECKS
+ void checkHashTablesAfterMovingGC();
+#endif
+
+#ifdef DEBUG
+ // Crawl the heap to check whether an arbitary pointer is within a cell of
+ // the given kind.
+ bool isPointerWithinTenuredCell(void* ptr, JS::TraceKind traceKind);
+
+ bool hasZone(Zone* target);
+#endif
+
+ // Queue memory memory to be freed on a background thread if possible.
+ void queueUnusedLifoBlocksForFree(LifoAlloc* lifo);
+ void queueAllLifoBlocksForFreeAfterMinorGC(LifoAlloc* lifo);
+ void queueBuffersForFreeAfterMinorGC(Nursery::BufferSet& buffers);
+
+ // Public here for ReleaseArenaLists and FinalizeTypedArenas.
+ void releaseArena(Arena* arena, const AutoLockGC& lock);
+
+ // Allocator
+ template <AllowGC allowGC>
+ [[nodiscard]] bool checkAllocatorState(JSContext* cx, AllocKind kind);
+ template <JS::TraceKind kind, AllowGC allowGC>
+ void* tryNewNurseryCell(JSContext* cx, size_t thingSize, AllocSite* site);
+ template <AllowGC allowGC>
+ static void* tryNewTenuredThing(JSContext* cx, AllocKind kind,
+ size_t thingSize);
+ static void* refillFreeListInGC(Zone* zone, AllocKind thingKind);
+
+ // Delayed marking.
+ void delayMarkingChildren(gc::Cell* cell, MarkColor color);
+ bool hasDelayedMarking() const;
+ void markAllDelayedChildren(ShouldReportMarkTime reportTime);
+
+ /*
+ * Concurrent sweep infrastructure.
+ */
+ void startTask(GCParallelTask& task, AutoLockHelperThreadState& lock);
+ void joinTask(GCParallelTask& task, AutoLockHelperThreadState& lock);
+ void updateHelperThreadCount();
+ bool updateMarkersVector();
+ size_t parallelWorkerCount() const;
+ size_t markingWorkerCount() const;
+
+ // WeakRefs
+ bool registerWeakRef(HandleObject target, HandleObject weakRef);
+ void traceKeptObjects(JSTracer* trc);
+
+ JS::GCReason lastStartReason() const { return initialReason; }
+
+ void updateAllocationRates();
+
+#ifdef DEBUG
+ const GCVector<HeapPtr<JS::Value>, 0, SystemAllocPolicy>& getTestMarkQueue()
+ const;
+ [[nodiscard]] bool appendTestMarkQueue(const JS::Value& value);
+ void clearTestMarkQueue();
+ size_t testMarkQueuePos() const;
+#endif
+
+ private:
+ enum IncrementalResult { ResetIncremental = 0, Ok };
+
+ [[nodiscard]] bool setParameter(JSGCParamKey key, uint32_t value,
+ AutoLockGC& lock);
+ void resetParameter(JSGCParamKey key, AutoLockGC& lock);
+ uint32_t getParameter(JSGCParamKey key, const AutoLockGC& lock);
+ bool setThreadParameter(JSGCParamKey key, uint32_t value, AutoLockGC& lock);
+ void resetThreadParameter(JSGCParamKey key, AutoLockGC& lock);
+ void updateThreadDataStructures(AutoLockGC& lock);
+
+ JS::GCOptions gcOptions() const { return maybeGcOptions.ref().ref(); }
+
+ TriggerResult checkHeapThreshold(Zone* zone, const HeapSize& heapSize,
+ const HeapThreshold& heapThreshold);
+
+ void updateSchedulingStateOnGCStart();
+ void updateSchedulingStateAfterCollection(mozilla::TimeStamp currentTime);
+ void updateAllGCStartThresholds();
+
+ // For ArenaLists::allocateFromArena()
+ friend class ArenaLists;
+ TenuredChunk* pickChunk(AutoLockGCBgAlloc& lock);
+ Arena* allocateArena(TenuredChunk* chunk, Zone* zone, AllocKind kind,
+ ShouldCheckThresholds checkThresholds,
+ const AutoLockGC& lock);
+
+ // Allocator internals
+ void gcIfNeededAtAllocation(JSContext* cx);
+ static void* refillFreeList(JSContext* cx, AllocKind thingKind);
+ void attemptLastDitchGC(JSContext* cx);
+#ifdef DEBUG
+ static void checkIncrementalZoneState(JSContext* cx, void* ptr);
+#endif
+
+ /*
+ * Return the list of chunks that can be released outside the GC lock.
+ * Must be called either during the GC or with the GC lock taken.
+ */
+ friend class BackgroundDecommitTask;
+ bool tooManyEmptyChunks(const AutoLockGC& lock);
+ ChunkPool expireEmptyChunkPool(const AutoLockGC& lock);
+ void freeEmptyChunks(const AutoLockGC& lock);
+ void prepareToFreeChunk(TenuredChunkInfo& info);
+ void setMinEmptyChunkCount(uint32_t value, const AutoLockGC& lock);
+ void setMaxEmptyChunkCount(uint32_t value, const AutoLockGC& lock);
+
+ friend class BackgroundAllocTask;
+ bool wantBackgroundAllocation(const AutoLockGC& lock) const;
+ void startBackgroundAllocTaskIfIdle();
+
+ void requestMajorGC(JS::GCReason reason);
+ SliceBudget defaultBudget(JS::GCReason reason, int64_t millis);
+ bool maybeIncreaseSliceBudget(SliceBudget& budget);
+ bool maybeIncreaseSliceBudgetForLongCollections(SliceBudget& budget);
+ bool maybeIncreaseSliceBudgetForUrgentCollections(SliceBudget& budget);
+ IncrementalResult budgetIncrementalGC(bool nonincrementalByAPI,
+ JS::GCReason reason,
+ SliceBudget& budget);
+ void checkZoneIsScheduled(Zone* zone, JS::GCReason reason,
+ const char* trigger);
+ IncrementalResult resetIncrementalGC(GCAbortReason reason);
+
+ // Assert if the system state is such that we should never
+ // receive a request to do GC work.
+ void checkCanCallAPI();
+
+ // Check if the system state is such that GC has been supressed
+ // or otherwise delayed.
+ [[nodiscard]] bool checkIfGCAllowedInCurrentState(JS::GCReason reason);
+
+ gcstats::ZoneGCStats scanZonesBeforeGC();
+
+ void setGCOptions(JS::GCOptions options);
+
+ void collect(bool nonincrementalByAPI, const SliceBudget& budget,
+ JS::GCReason reason) JS_HAZ_GC_CALL;
+
+ /*
+ * Run one GC "cycle" (either a slice of incremental GC or an entire
+ * non-incremental GC).
+ *
+ * Returns:
+ * * ResetIncremental if we "reset" an existing incremental GC, which would
+ * force us to run another cycle or
+ * * Ok otherwise.
+ */
+ [[nodiscard]] IncrementalResult gcCycle(bool nonincrementalByAPI,
+ const SliceBudget& budgetArg,
+ JS::GCReason reason);
+ bool shouldRepeatForDeadZone(JS::GCReason reason);
+
+ void incrementalSlice(SliceBudget& budget, JS::GCReason reason,
+ bool budgetWasIncreased);
+
+ bool mightSweepInThisSlice(bool nonIncremental);
+ void collectNurseryFromMajorGC(JS::GCReason reason);
+ void collectNursery(JS::GCOptions options, JS::GCReason reason,
+ gcstats::PhaseKind phase);
+
+ friend class AutoCallGCCallbacks;
+ void maybeCallGCCallback(JSGCStatus status, JS::GCReason reason);
+
+ void startCollection(JS::GCReason reason);
+
+ void purgeRuntime();
+ [[nodiscard]] bool beginPreparePhase(JS::GCReason reason,
+ AutoGCSession& session);
+ bool prepareZonesForCollection(JS::GCReason reason, bool* isFullOut);
+ void unmarkWeakMaps();
+ void endPreparePhase(JS::GCReason reason);
+ void beginMarkPhase(AutoGCSession& session);
+ bool shouldPreserveJITCode(JS::Realm* realm,
+ const mozilla::TimeStamp& currentTime,
+ JS::GCReason reason, bool canAllocateMoreCode,
+ bool isActiveCompartment);
+ void discardJITCodeForGC();
+ void startBackgroundFreeAfterMinorGC();
+ void relazifyFunctionsForShrinkingGC();
+ void purgePropMapTablesForShrinkingGC();
+ void purgeSourceURLsForShrinkingGC();
+ void traceRuntimeForMajorGC(JSTracer* trc, AutoGCSession& session);
+ void traceRuntimeAtoms(JSTracer* trc);
+ void traceRuntimeCommon(JSTracer* trc, TraceOrMarkRuntime traceOrMark);
+ void traceEmbeddingBlackRoots(JSTracer* trc);
+ void traceEmbeddingGrayRoots(JSTracer* trc);
+ IncrementalProgress traceEmbeddingGrayRoots(JSTracer* trc,
+ SliceBudget& budget);
+ void checkNoRuntimeRoots(AutoGCSession& session);
+ void maybeDoCycleCollection();
+ void findDeadCompartments();
+
+ friend class BackgroundMarkTask;
+ enum ParallelMarking : bool {
+ SingleThreadedMarking = false,
+ AllowParallelMarking = true
+ };
+ IncrementalProgress markUntilBudgetExhausted(
+ SliceBudget& sliceBudget,
+ ParallelMarking allowParallelMarking = SingleThreadedMarking,
+ ShouldReportMarkTime reportTime = ReportMarkTime);
+ bool canMarkInParallel() const;
+
+ bool hasMarkingWork(MarkColor color) const;
+
+ void drainMarkStack();
+
+#ifdef DEBUG
+ void assertNoMarkingWork() const;
+#else
+ void assertNoMarkingWork() const {}
+#endif
+
+ void markDelayedChildren(gc::Arena* arena, MarkColor color);
+ void processDelayedMarkingList(gc::MarkColor color);
+ void rebuildDelayedMarkingList();
+ void appendToDelayedMarkingList(gc::Arena** listTail, gc::Arena* arena);
+ void resetDelayedMarking();
+ template <typename F>
+ void forEachDelayedMarkingArena(F&& f);
+
+ template <class ZoneIterT>
+ IncrementalProgress markWeakReferences(SliceBudget& budget);
+ IncrementalProgress markWeakReferencesInCurrentGroup(SliceBudget& budget);
+ template <class ZoneIterT>
+ IncrementalProgress markGrayRoots(SliceBudget& budget,
+ gcstats::PhaseKind phase);
+ void markBufferedGrayRoots(JS::Zone* zone);
+ IncrementalProgress markAllWeakReferences();
+ void markAllGrayReferences(gcstats::PhaseKind phase);
+
+ // The mark queue is a testing-only feature for controlling mark ordering and
+ // yield timing.
+ enum MarkQueueProgress {
+ QueueYielded, // End this incremental GC slice, if possible
+ QueueComplete, // Done with the queue
+ QueueSuspended // Continue the GC without ending the slice
+ };
+ MarkQueueProgress processTestMarkQueue();
+
+ // GC Sweeping. Implemented in Sweeping.cpp.
+ void beginSweepPhase(JS::GCReason reason, AutoGCSession& session);
+ void dropStringWrappers();
+ void groupZonesForSweeping(JS::GCReason reason);
+ [[nodiscard]] bool findSweepGroupEdges();
+ [[nodiscard]] bool addEdgesForMarkQueue();
+ void getNextSweepGroup();
+ void resetGrayList(Compartment* comp);
+ IncrementalProgress beginMarkingSweepGroup(JS::GCContext* gcx,
+ SliceBudget& budget);
+ IncrementalProgress markGrayRootsInCurrentGroup(JS::GCContext* gcx,
+ SliceBudget& budget);
+ IncrementalProgress markGray(JS::GCContext* gcx, SliceBudget& budget);
+ IncrementalProgress endMarkingSweepGroup(JS::GCContext* gcx,
+ SliceBudget& budget);
+ void markIncomingGrayCrossCompartmentPointers();
+ IncrementalProgress beginSweepingSweepGroup(JS::GCContext* gcx,
+ SliceBudget& budget);
+ void initBackgroundSweep(Zone* zone, JS::GCContext* gcx,
+ const FinalizePhase& phase);
+ IncrementalProgress markDuringSweeping(JS::GCContext* gcx,
+ SliceBudget& budget);
+ void updateAtomsBitmap();
+ void sweepCCWrappers();
+ void sweepRealmGlobals();
+ void sweepEmbeddingWeakPointers(JS::GCContext* gcx);
+ void sweepMisc();
+ void sweepCompressionTasks();
+ void sweepWeakMaps();
+ void sweepUniqueIds();
+ void sweepDebuggerOnMainThread(JS::GCContext* gcx);
+ void sweepJitDataOnMainThread(JS::GCContext* gcx);
+ void sweepFinalizationObserversOnMainThread();
+ void traceWeakFinalizationObserverEdges(JSTracer* trc, Zone* zone);
+ void sweepWeakRefs();
+ IncrementalProgress endSweepingSweepGroup(JS::GCContext* gcx,
+ SliceBudget& budget);
+ IncrementalProgress performSweepActions(SliceBudget& sliceBudget);
+ void startSweepingAtomsTable();
+ IncrementalProgress sweepAtomsTable(JS::GCContext* gcx, SliceBudget& budget);
+ IncrementalProgress sweepWeakCaches(JS::GCContext* gcx, SliceBudget& budget);
+ IncrementalProgress finalizeAllocKind(JS::GCContext* gcx,
+ SliceBudget& budget);
+ bool foregroundFinalize(JS::GCContext* gcx, Zone* zone, AllocKind thingKind,
+ js::SliceBudget& sliceBudget,
+ SortedArenaList& sweepList);
+ IncrementalProgress sweepPropMapTree(JS::GCContext* gcx, SliceBudget& budget);
+ void endSweepPhase(bool lastGC);
+ void queueZonesAndStartBackgroundSweep(ZoneList&& zones);
+ void sweepFromBackgroundThread(AutoLockHelperThreadState& lock);
+ void startBackgroundFree();
+ void freeFromBackgroundThread(AutoLockHelperThreadState& lock);
+ void sweepBackgroundThings(ZoneList& zones);
+ void backgroundFinalize(JS::GCContext* gcx, Zone* zone, AllocKind kind,
+ Arena** empty);
+ void assertBackgroundSweepingFinished();
+
+ bool allCCVisibleZonesWereCollected();
+ void sweepZones(JS::GCContext* gcx, bool destroyingRuntime);
+ bool shouldDecommit() const;
+ void startDecommit();
+ void decommitEmptyChunks(const bool& cancel, AutoLockGC& lock);
+ void decommitFreeArenas(const bool& cancel, AutoLockGC& lock);
+ void decommitFreeArenasWithoutUnlocking(const AutoLockGC& lock);
+
+ // Compacting GC. Implemented in Compacting.cpp.
+ bool shouldCompact();
+ void beginCompactPhase();
+ IncrementalProgress compactPhase(JS::GCReason reason,
+ SliceBudget& sliceBudget,
+ AutoGCSession& session);
+ void endCompactPhase();
+ void sweepZoneAfterCompacting(MovingTracer* trc, Zone* zone);
+ bool canRelocateZone(Zone* zone) const;
+ [[nodiscard]] bool relocateArenas(Zone* zone, JS::GCReason reason,
+ Arena*& relocatedListOut,
+ SliceBudget& sliceBudget);
+ void updateCellPointers(Zone* zone, AllocKinds kinds);
+ void updateAllCellPointers(MovingTracer* trc, Zone* zone);
+ void updateZonePointersToRelocatedCells(Zone* zone);
+ void updateRuntimePointersToRelocatedCells(AutoGCSession& session);
+ void clearRelocatedArenas(Arena* arenaList, JS::GCReason reason);
+ void clearRelocatedArenasWithoutUnlocking(Arena* arenaList,
+ JS::GCReason reason,
+ const AutoLockGC& lock);
+ void releaseRelocatedArenas(Arena* arenaList);
+ void releaseRelocatedArenasWithoutUnlocking(Arena* arenaList,
+ const AutoLockGC& lock);
+#ifdef DEBUG
+ void protectOrReleaseRelocatedArenas(Arena* arenaList, JS::GCReason reason);
+ void protectAndHoldArenas(Arena* arenaList);
+ void unprotectHeldRelocatedArenas(const AutoLockGC& lock);
+ void releaseHeldRelocatedArenas();
+ void releaseHeldRelocatedArenasWithoutUnlocking(const AutoLockGC& lock);
+#endif
+
+ /*
+ * Whether to immediately trigger a slice after a background task
+ * finishes. This may not happen at a convenient time, so the consideration is
+ * whether the slice will run quickly or may take a long time.
+ */
+ enum ShouldTriggerSliceWhenFinished : bool {
+ DontTriggerSliceWhenFinished = false,
+ TriggerSliceWhenFinished = true
+ };
+
+ IncrementalProgress waitForBackgroundTask(
+ GCParallelTask& task, const SliceBudget& budget, bool shouldPauseMutator,
+ ShouldTriggerSliceWhenFinished triggerSlice);
+
+ void maybeRequestGCAfterBackgroundTask(const AutoLockHelperThreadState& lock);
+ void cancelRequestedGCAfterBackgroundTask();
+ void finishCollection(JS::GCReason reason);
+ void maybeStopPretenuring();
+ void checkGCStateNotInUse();
+ IncrementalProgress joinBackgroundMarkTask();
+
+#ifdef JS_GC_ZEAL
+ void computeNonIncrementalMarkingForValidation(AutoGCSession& session);
+ void validateIncrementalMarking();
+ void finishMarkingValidation();
+#endif
+
+#ifdef DEBUG
+ void checkForCompartmentMismatches();
+#endif
+
+ void callFinalizeCallbacks(JS::GCContext* gcx, JSFinalizeStatus status) const;
+ void callWeakPointerZonesCallbacks(JSTracer* trc) const;
+ void callWeakPointerCompartmentCallbacks(JSTracer* trc,
+ JS::Compartment* comp) const;
+ void callDoCycleCollectionCallback(JSContext* cx);
+
+ public:
+ JSRuntime* const rt;
+
+ // Embedders can use this zone however they wish.
+ MainThreadData<JS::Zone*> systemZone;
+
+ MainThreadData<JS::GCContext> mainThreadContext;
+
+ private:
+ // For parent runtimes, a zone containing atoms that is shared by child
+ // runtimes.
+ MainThreadData<Zone*> sharedAtomsZone_;
+
+ // All zones in the runtime. The first element is always the atoms zone.
+ MainThreadOrGCTaskData<ZoneVector> zones_;
+
+ // Any activity affecting the heap.
+ MainThreadOrGCTaskData<JS::HeapState> heapState_;
+ friend class AutoHeapSession;
+ friend class JS::AutoEnterCycleCollection;
+
+ UnprotectedData<gcstats::Statistics> stats_;
+
+ public:
+ js::StringStats stringStats;
+
+ Vector<UniquePtr<GCMarker>, 1, SystemAllocPolicy> markers;
+
+ // Delayed marking support in case we OOM pushing work onto the mark stack.
+ MainThreadOrGCTaskData<js::gc::Arena*> delayedMarkingList;
+ MainThreadOrGCTaskData<bool> delayedMarkingWorkAdded;
+#ifdef DEBUG
+ /* Count of arenas that are currently in the stack. */
+ MainThreadOrGCTaskData<size_t> markLaterArenas;
+#endif
+
+ SweepingTracer sweepingTracer;
+
+ /* Track total GC heap size for this runtime. */
+ HeapSize heapSize;
+
+ /* GC scheduling state and parameters. */
+ GCSchedulingTunables tunables;
+ GCSchedulingState schedulingState;
+ MainThreadData<bool> fullGCRequested;
+
+ // Helper thread configuration.
+ MainThreadData<double> helperThreadRatio;
+ MainThreadData<size_t> maxHelperThreads;
+ MainThreadOrGCTaskData<size_t> helperThreadCount;
+ MainThreadData<size_t> markingThreadCount;
+
+ // State used for managing atom mark bitmaps in each zone.
+ AtomMarkingRuntime atomMarking;
+
+ /*
+ * Pointer to a callback that, if set, will be used to create a
+ * budget for internally-triggered GCs.
+ */
+ MainThreadData<JS::CreateSliceBudgetCallback> createBudgetCallback;
+
+ private:
+ // Arenas used for permanent things created at startup and shared by child
+ // runtimes.
+ MainThreadData<ArenaList> permanentAtoms;
+ MainThreadData<ArenaList> permanentFatInlineAtoms;
+ MainThreadData<ArenaList> permanentWellKnownSymbols;
+
+ // When chunks are empty, they reside in the emptyChunks pool and are
+ // re-used as needed or eventually expired if not re-used. The emptyChunks
+ // pool gets refilled from the background allocation task heuristically so
+ // that empty chunks should always be available for immediate allocation
+ // without syscalls.
+ GCLockData<ChunkPool> emptyChunks_;
+
+ // Chunks which have had some, but not all, of their arenas allocated live
+ // in the available chunk lists. When all available arenas in a chunk have
+ // been allocated, the chunk is removed from the available list and moved
+ // to the fullChunks pool. During a GC, if all arenas are free, the chunk
+ // is moved back to the emptyChunks pool and scheduled for eventual
+ // release.
+ GCLockData<ChunkPool> availableChunks_;
+
+ // When all arenas in a chunk are used, it is moved to the fullChunks pool
+ // so as to reduce the cost of operations on the available lists.
+ GCLockData<ChunkPool> fullChunks_;
+
+ /*
+ * JSGC_MIN_EMPTY_CHUNK_COUNT
+ * JSGC_MAX_EMPTY_CHUNK_COUNT
+ *
+ * Controls the number of empty chunks reserved for future allocation.
+ *
+ * They can be read off main thread by the background allocation task and the
+ * background decommit task.
+ */
+ GCLockData<uint32_t> minEmptyChunkCount_;
+ GCLockData<uint32_t> maxEmptyChunkCount_;
+
+ MainThreadData<RootedValueMap> rootsHash;
+
+ // An incrementing id used to assign unique ids to cells that require one.
+ MainThreadData<uint64_t> nextCellUniqueId_;
+
+ /*
+ * Number of the committed arenas in all GC chunks including empty chunks.
+ */
+ mozilla::Atomic<uint32_t, mozilla::ReleaseAcquire> numArenasFreeCommitted;
+ MainThreadData<VerifyPreTracer*> verifyPreData;
+
+ MainThreadData<mozilla::TimeStamp> lastGCStartTime_;
+ MainThreadData<mozilla::TimeStamp> lastGCEndTime_;
+
+ WriteOnceData<bool> initialized;
+ MainThreadData<bool> incrementalGCEnabled;
+ MainThreadData<bool> perZoneGCEnabled;
+
+ mozilla::Atomic<size_t, mozilla::ReleaseAcquire> numActiveZoneIters;
+
+ /* During shutdown, the GC needs to clean up every possible object. */
+ MainThreadData<bool> cleanUpEverything;
+
+ /*
+ * The gray bits can become invalid if UnmarkGray overflows the stack. A
+ * full GC will reset this bit, since it fills in all the gray bits.
+ */
+ UnprotectedData<bool> grayBitsValid;
+
+ mozilla::Atomic<JS::GCReason, mozilla::ReleaseAcquire> majorGCTriggerReason;
+
+ /* Incremented at the start of every minor GC. */
+ MainThreadData<uint64_t> minorGCNumber;
+
+ /* Incremented at the start of every major GC. */
+ MainThreadData<uint64_t> majorGCNumber;
+
+ /* Incremented on every GC slice or minor collection. */
+ MainThreadData<uint64_t> number;
+
+ /* Incremented on every GC slice. */
+ MainThreadData<uint64_t> sliceNumber;
+
+ /* Whether the currently running GC can finish in multiple slices. */
+ MainThreadOrGCTaskData<bool> isIncremental;
+
+ /* Whether all zones are being collected in first GC slice. */
+ MainThreadData<bool> isFull;
+
+ /* Whether the heap will be compacted at the end of GC. */
+ MainThreadData<bool> isCompacting;
+
+ /* The invocation kind of the current GC, set at the start of collection. */
+ MainThreadOrGCTaskData<mozilla::Maybe<JS::GCOptions>> maybeGcOptions;
+
+ /* The initial GC reason, taken from the first slice. */
+ MainThreadData<JS::GCReason> initialReason;
+
+ /*
+ * The current incremental GC phase. This is also used internally in
+ * non-incremental GC.
+ */
+ MainThreadOrGCTaskData<State> incrementalState;
+
+ /* The incremental state at the start of this slice. */
+ MainThreadOrGCTaskData<State> initialState;
+
+ /* Whether to pay attention the zeal settings in this incremental slice. */
+#ifdef JS_GC_ZEAL
+ MainThreadData<bool> useZeal;
+#else
+ const bool useZeal;
+#endif
+
+ /* Indicates that the last incremental slice exhausted the mark stack. */
+ MainThreadData<bool> lastMarkSlice;
+
+ // Whether it's currently safe to yield to the mutator in an incremental GC.
+ MainThreadData<bool> safeToYield;
+
+ // Whether to do any marking caused by barriers on a background thread during
+ // incremental sweeping, while also sweeping zones which have finished
+ // marking.
+ MainThreadData<bool> markOnBackgroundThreadDuringSweeping;
+
+ // Whether any sweeping and decommitting will run on a separate GC helper
+ // thread.
+ MainThreadData<bool> useBackgroundThreads;
+
+#ifdef DEBUG
+ /* Shutdown has started. Further collections must be shutdown collections. */
+ MainThreadData<bool> hadShutdownGC;
+#endif
+
+ /* Singly linked list of zones to be swept in the background. */
+ HelperThreadLockData<ZoneList> backgroundSweepZones;
+
+ /*
+ * Whether to trigger a GC slice after a background task is complete, so that
+ * the collector can continue or finsish collecting. This is only used for the
+ * tasks that run concurrently with the mutator, which are background
+ * finalization and background decommit.
+ */
+ HelperThreadLockData<bool> requestSliceAfterBackgroundTask;
+
+ /*
+ * Free LIFO blocks are transferred to these allocators before being freed on
+ * a background thread.
+ */
+ HelperThreadLockData<LifoAlloc> lifoBlocksToFree;
+ MainThreadData<LifoAlloc> lifoBlocksToFreeAfterMinorGC;
+ HelperThreadLockData<Nursery::BufferSet> buffersToFreeAfterMinorGC;
+
+ /* Index of current sweep group (for stats). */
+ MainThreadData<unsigned> sweepGroupIndex;
+
+ /*
+ * Incremental sweep state.
+ */
+ MainThreadData<JS::Zone*> sweepGroups;
+ MainThreadOrGCTaskData<JS::Zone*> currentSweepGroup;
+ MainThreadData<UniquePtr<SweepAction>> sweepActions;
+ MainThreadOrGCTaskData<JS::Zone*> sweepZone;
+ MainThreadOrGCTaskData<AllocKind> sweepAllocKind;
+ MainThreadData<mozilla::Maybe<AtomsTable::SweepIterator>> maybeAtomsToSweep;
+ MainThreadOrGCTaskData<mozilla::Maybe<WeakCacheSweepIterator>>
+ weakCachesToSweep;
+ MainThreadData<bool> abortSweepAfterCurrentGroup;
+ MainThreadOrGCTaskData<IncrementalProgress> sweepMarkResult;
+
+#ifdef DEBUG
+ /*
+ * List of objects to mark at the beginning of a GC for testing purposes. May
+ * also contain string directives to change mark color or wait until different
+ * phases of the GC.
+ *
+ * This is a WeakCache because not everything in this list is guaranteed to
+ * end up marked (eg if you insert an object from an already-processed sweep
+ * group in the middle of an incremental GC). Also, the mark queue is not
+ * used during shutdown GCs. In either case, unmarked objects may need to be
+ * discarded.
+ */
+ JS::WeakCache<GCVector<HeapPtr<JS::Value>, 0, SystemAllocPolicy>>
+ testMarkQueue;
+
+ /* Position within the test mark queue. */
+ size_t queuePos;
+
+ /* The test marking queue might want to be marking a particular color. */
+ mozilla::Maybe<js::gc::MarkColor> queueMarkColor;
+
+ // During gray marking, delay AssertCellIsNotGray checks by
+ // recording the cell pointers here and checking after marking has
+ // finished.
+ MainThreadData<Vector<const Cell*, 0, SystemAllocPolicy>>
+ cellsToAssertNotGray;
+ friend void js::gc::detail::AssertCellIsNotGray(const Cell*);
+#endif
+
+ friend class SweepGroupsIter;
+
+ /*
+ * Incremental compacting state.
+ */
+ MainThreadData<bool> startedCompacting;
+ MainThreadData<ZoneList> zonesToMaybeCompact;
+ MainThreadData<size_t> zonesCompacted;
+#ifdef DEBUG
+ GCLockData<Arena*> relocatedArenasToRelease;
+#endif
+
+#ifdef JS_GC_ZEAL
+ MainThreadData<MarkingValidator*> markingValidator;
+#endif
+
+ /*
+ * Default budget for incremental GC slice. See js/SliceBudget.h.
+ *
+ * JSGC_SLICE_TIME_BUDGET_MS
+ * pref: javascript.options.mem.gc_incremental_slice_ms,
+ */
+ MainThreadData<int64_t> defaultTimeBudgetMS_;
+
+ /*
+ * We disable incremental GC if we encounter a Class with a trace hook
+ * that does not implement write barriers.
+ */
+ MainThreadData<bool> incrementalAllowed;
+
+ /*
+ * Whether compacting GC can is enabled globally.
+ *
+ * JSGC_COMPACTING_ENABLED
+ * pref: javascript.options.mem.gc_compacting
+ */
+ MainThreadData<bool> compactingEnabled;
+
+ /*
+ * Whether parallel marking is enabled globally.
+ *
+ * JSGC_PARALLEL_MARKING_ENABLED
+ * pref: javascript.options.mem.gc_parallel_marking
+ */
+ MainThreadData<bool> parallelMarkingEnabled;
+
+ MainThreadData<bool> rootsRemoved;
+
+ /*
+ * These options control the zealousness of the GC. At every allocation,
+ * nextScheduled is decremented. When it reaches zero we do a full GC.
+ *
+ * At this point, if zeal_ is one of the types that trigger periodic
+ * collection, then nextScheduled is reset to the value of zealFrequency.
+ * Otherwise, no additional GCs take place.
+ *
+ * You can control these values in several ways:
+ * - Set the JS_GC_ZEAL environment variable
+ * - Call gczeal() or schedulegc() from inside shell-executed JS code
+ * (see the help for details)
+ *
+ * If gcZeal_ == 1 then we perform GCs in select places (during MaybeGC and
+ * whenever we are notified that GC roots have been removed). This option is
+ * mainly useful to embedders.
+ *
+ * We use zeal_ == 4 to enable write barrier verification. See the comment
+ * in gc/Verifier.cpp for more information about this.
+ *
+ * zeal_ values from 8 to 10 periodically run different types of
+ * incremental GC.
+ *
+ * zeal_ value 14 performs periodic shrinking collections.
+ */
+#ifdef JS_GC_ZEAL
+ static_assert(size_t(ZealMode::Count) <= 32,
+ "Too many zeal modes to store in a uint32_t");
+ MainThreadData<uint32_t> zealModeBits;
+ MainThreadData<int> zealFrequency;
+ MainThreadData<int> nextScheduled;
+ MainThreadData<bool> deterministicOnly;
+ MainThreadData<int> zealSliceBudget;
+ MainThreadData<size_t> maybeMarkStackLimit;
+
+ MainThreadData<PersistentRooted<GCVector<JSObject*, 0, SystemAllocPolicy>>>
+ selectedForMarking;
+#endif
+
+ MainThreadData<bool> fullCompartmentChecks;
+
+ MainThreadData<uint32_t> gcCallbackDepth;
+
+ MainThreadData<Callback<JSGCCallback>> gcCallback;
+ MainThreadData<Callback<JS::DoCycleCollectionCallback>>
+ gcDoCycleCollectionCallback;
+ MainThreadData<Callback<JSObjectsTenuredCallback>> tenuredCallback;
+ MainThreadData<CallbackVector<JSFinalizeCallback>> finalizeCallbacks;
+ MainThreadOrGCTaskData<Callback<JSHostCleanupFinalizationRegistryCallback>>
+ hostCleanupFinalizationRegistryCallback;
+ MainThreadData<CallbackVector<JSWeakPointerZonesCallback>>
+ updateWeakPointerZonesCallbacks;
+ MainThreadData<CallbackVector<JSWeakPointerCompartmentCallback>>
+ updateWeakPointerCompartmentCallbacks;
+
+ /*
+ * The trace operations to trace embedding-specific GC roots. One is for
+ * tracing through black roots and the other is for tracing through gray
+ * roots. The black/gray distinction is only relevant to the cycle
+ * collector.
+ */
+ MainThreadData<CallbackVector<JSTraceDataOp>> blackRootTracers;
+ MainThreadOrGCTaskData<Callback<JSGrayRootsTracer>> grayRootTracer;
+
+ /* Always preserve JIT code during GCs, for testing. */
+ MainThreadData<bool> alwaysPreserveCode;
+
+ /* Count of the number of zones that are currently in page load. */
+ MainThreadData<size_t> inPageLoadCount;
+
+ MainThreadData<bool> lowMemoryState;
+
+ /*
+ * General purpose GC lock, used for synchronising operations on
+ * arenas and during parallel marking.
+ */
+ friend class js::AutoLockGC;
+ friend class js::AutoLockGCBgAlloc;
+ js::Mutex lock MOZ_UNANNOTATED;
+
+ /* Lock used to synchronise access to delayed marking state. */
+ js::Mutex delayedMarkingLock MOZ_UNANNOTATED;
+
+ friend class BackgroundSweepTask;
+ friend class BackgroundFreeTask;
+
+ BackgroundAllocTask allocTask;
+ BackgroundUnmarkTask unmarkTask;
+ BackgroundMarkTask markTask;
+ BackgroundSweepTask sweepTask;
+ BackgroundFreeTask freeTask;
+ BackgroundDecommitTask decommitTask;
+
+ /*
+ * During incremental sweeping, this field temporarily holds the arenas of
+ * the current AllocKind being swept in order of increasing free space.
+ */
+ MainThreadData<SortedArenaList> incrementalSweepList;
+
+ MainThreadData<Nursery> nursery_;
+
+ // The store buffer used to track tenured to nursery edges for generational
+ // GC. This is accessed off main thread when sweeping WeakCaches.
+ MainThreadOrGCTaskData<gc::StoreBuffer> storeBuffer_;
+
+ mozilla::TimeStamp lastLastDitchTime;
+
+ // The last time per-zone allocation rates were updated.
+ MainThreadData<mozilla::TimeStamp> lastAllocRateUpdateTime;
+
+ // Total collector time since per-zone allocation rates were last updated.
+ MainThreadData<mozilla::TimeDuration> collectorTimeSinceAllocRateUpdate;
+
+ friend class MarkingValidator;
+ friend class AutoEnterIteration;
+};
+
+/* Prevent compartments and zones from being collected during iteration. */
+class MOZ_RAII AutoEnterIteration {
+ GCRuntime* gc;
+
+ public:
+ explicit AutoEnterIteration(GCRuntime* gc_) : gc(gc_) {
+ ++gc->numActiveZoneIters;
+ }
+
+ ~AutoEnterIteration() {
+ MOZ_ASSERT(gc->numActiveZoneIters);
+ --gc->numActiveZoneIters;
+ }
+};
+
+#ifdef JS_GC_ZEAL
+
+inline bool GCRuntime::hasZealMode(ZealMode mode) {
+ static_assert(size_t(ZealMode::Limit) < sizeof(zealModeBits) * 8,
+ "Zeal modes must fit in zealModeBits");
+ return zealModeBits & (1 << uint32_t(mode));
+}
+
+inline void GCRuntime::clearZealMode(ZealMode mode) {
+ zealModeBits &= ~(1 << uint32_t(mode));
+ MOZ_ASSERT(!hasZealMode(mode));
+}
+
+inline bool GCRuntime::needZealousGC() {
+ if (nextScheduled > 0 && --nextScheduled == 0) {
+ if (hasZealMode(ZealMode::Alloc) || hasZealMode(ZealMode::GenerationalGC) ||
+ hasZealMode(ZealMode::IncrementalMultipleSlices) ||
+ hasZealMode(ZealMode::Compact) || hasIncrementalTwoSliceZealMode()) {
+ nextScheduled = zealFrequency;
+ }
+ return true;
+ }
+ return false;
+}
+
+inline bool GCRuntime::hasIncrementalTwoSliceZealMode() {
+ return hasZealMode(ZealMode::YieldBeforeRootMarking) ||
+ hasZealMode(ZealMode::YieldBeforeMarking) ||
+ hasZealMode(ZealMode::YieldBeforeSweeping) ||
+ hasZealMode(ZealMode::YieldBeforeSweepingAtoms) ||
+ hasZealMode(ZealMode::YieldBeforeSweepingCaches) ||
+ hasZealMode(ZealMode::YieldBeforeSweepingObjects) ||
+ hasZealMode(ZealMode::YieldBeforeSweepingNonObjects) ||
+ hasZealMode(ZealMode::YieldBeforeSweepingPropMapTrees) ||
+ hasZealMode(ZealMode::YieldWhileGrayMarking);
+}
+
+#else
+inline bool GCRuntime::hasZealMode(ZealMode mode) { return false; }
+inline void GCRuntime::clearZealMode(ZealMode mode) {}
+inline bool GCRuntime::needZealousGC() { return false; }
+inline bool GCRuntime::hasIncrementalTwoSliceZealMode() { return false; }
+#endif
+
+bool IsCurrentlyAnimating(const mozilla::TimeStamp& lastAnimationTime,
+ const mozilla::TimeStamp& currentTime);
+
+} /* namespace gc */
+} /* namespace js */
+
+#endif
diff --git a/js/src/gc/GenerateStatsPhases.py b/js/src/gc/GenerateStatsPhases.py
new file mode 100644
index 0000000000..355abaf09d
--- /dev/null
+++ b/js/src/gc/GenerateStatsPhases.py
@@ -0,0 +1,404 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+# flake8: noqa: F821
+
+# Generate graph structures for GC statistics recording.
+#
+# Stats phases are nested and form a directed acyclic graph starting
+# from a set of root phases. Importantly, a phase may appear under more
+# than one parent phase.
+#
+# For example, the following arrangement is possible:
+#
+# +---+
+# | A |
+# +---+
+# |
+# +-------+-------+
+# | | |
+# v v v
+# +---+ +---+ +---+
+# | B | | C | | D |
+# +---+ +---+ +---+
+# | |
+# +---+---+
+# |
+# v
+# +---+
+# | E |
+# +---+
+#
+# This graph is expanded into a tree (or really a forest) and phases
+# with multiple parents are duplicated.
+#
+# For example, the input example above would be expanded to:
+#
+# +---+
+# | A |
+# +---+
+# |
+# +-------+-------+
+# | | |
+# v v v
+# +---+ +---+ +---+
+# | B | | C | | D |
+# +---+ +---+ +---+
+# | |
+# v v
+# +---+ +---+
+# | E | | E'|
+# +---+ +---+
+
+# NOTE: If you add new phases here the current next phase kind number can be
+# found at the end of js/src/gc/StatsPhasesGenerated.inc
+
+import collections
+import re
+
+
+class PhaseKind:
+ def __init__(self, name, descr, bucket, children=[]):
+ self.name = name
+ self.descr = descr
+ # For telemetry
+ self.bucket = bucket
+ self.children = children
+
+
+AllPhaseKinds = []
+PhaseKindsByName = dict()
+
+
+def addPhaseKind(name, descr, bucket, children=[]):
+ assert name not in PhaseKindsByName
+ phaseKind = PhaseKind(name, descr, bucket, children)
+ AllPhaseKinds.append(phaseKind)
+ PhaseKindsByName[name] = phaseKind
+ return phaseKind
+
+
+def getPhaseKind(name):
+ return PhaseKindsByName[name]
+
+
+PhaseKindGraphRoots = [
+ addPhaseKind("MUTATOR", "Mutator Running", 0),
+ addPhaseKind("GC_BEGIN", "Begin Callback", 1),
+ addPhaseKind(
+ "EVICT_NURSERY_FOR_MAJOR_GC",
+ "Evict Nursery For Major GC",
+ 70,
+ [
+ addPhaseKind(
+ "MARK_ROOTS",
+ "Mark Roots",
+ 48,
+ [
+ addPhaseKind("MARK_CCWS", "Mark Cross Compartment Wrappers", 50),
+ addPhaseKind("MARK_STACK", "Mark C and JS stacks", 51),
+ addPhaseKind("MARK_RUNTIME_DATA", "Mark Runtime-wide Data", 52),
+ addPhaseKind("MARK_EMBEDDING", "Mark Embedding", 53),
+ ],
+ )
+ ],
+ ),
+ addPhaseKind("WAIT_BACKGROUND_THREAD", "Wait Background Thread", 2),
+ addPhaseKind(
+ "PREPARE",
+ "Prepare For Collection",
+ 69,
+ [
+ addPhaseKind("UNMARK", "Unmark", 7),
+ addPhaseKind("UNMARK_WEAKMAPS", "Unmark WeakMaps", 76),
+ addPhaseKind("MARK_DISCARD_CODE", "Mark Discard Code", 3),
+ addPhaseKind("RELAZIFY_FUNCTIONS", "Relazify Functions", 4),
+ addPhaseKind("PURGE", "Purge", 5),
+ addPhaseKind("PURGE_PROP_MAP_TABLES", "Purge PropMapTables", 60),
+ addPhaseKind("PURGE_SOURCE_URLS", "Purge Source URLs", 73),
+ addPhaseKind("JOIN_PARALLEL_TASKS", "Join Parallel Tasks", 67),
+ ],
+ ),
+ addPhaseKind(
+ "MARK",
+ "Mark",
+ 6,
+ [
+ getPhaseKind("MARK_ROOTS"),
+ addPhaseKind("MARK_DELAYED", "Mark Delayed", 8),
+ addPhaseKind(
+ "MARK_WEAK",
+ "Mark Weak",
+ 13,
+ [
+ getPhaseKind("MARK_DELAYED"),
+ addPhaseKind("MARK_GRAY_WEAK", "Mark Gray and Weak", 16),
+ ],
+ ),
+ addPhaseKind("MARK_INCOMING_GRAY", "Mark Incoming Gray Pointers", 14),
+ addPhaseKind("MARK_GRAY", "Mark Gray", 15),
+ addPhaseKind(
+ "PARALLEL_MARK",
+ "Parallel marking",
+ 78,
+ [
+ getPhaseKind("JOIN_PARALLEL_TASKS"),
+ # The following are only used for parallel phase times:
+ addPhaseKind("PARALLEL_MARK_MARK", "Parallel marking work", 79),
+ addPhaseKind("PARALLEL_MARK_WAIT", "Waiting for work", 80),
+ ],
+ ),
+ ],
+ ),
+ addPhaseKind(
+ "SWEEP",
+ "Sweep",
+ 9,
+ [
+ getPhaseKind("MARK"),
+ addPhaseKind(
+ "FINALIZE_START",
+ "Finalize Start Callbacks",
+ 17,
+ [
+ addPhaseKind("WEAK_ZONES_CALLBACK", "Per-Slice Weak Callback", 57),
+ addPhaseKind(
+ "WEAK_COMPARTMENT_CALLBACK", "Per-Compartment Weak Callback", 58
+ ),
+ ],
+ ),
+ addPhaseKind("UPDATE_ATOMS_BITMAP", "Sweep Atoms Bitmap", 68),
+ addPhaseKind("SWEEP_ATOMS_TABLE", "Sweep Atoms Table", 18),
+ addPhaseKind(
+ "SWEEP_COMPARTMENTS",
+ "Sweep Compartments",
+ 20,
+ [
+ addPhaseKind("SWEEP_DISCARD_CODE", "Sweep Discard Code", 21),
+ addPhaseKind("SWEEP_INNER_VIEWS", "Sweep Inner Views", 22),
+ addPhaseKind(
+ "SWEEP_CC_WRAPPER", "Sweep Cross Compartment Wrappers", 23
+ ),
+ addPhaseKind("SWEEP_BASE_SHAPE", "Sweep Base Shapes", 24),
+ addPhaseKind("SWEEP_INITIAL_SHAPE", "Sweep Initial Shapes", 25),
+ addPhaseKind("SWEEP_REGEXP", "Sweep Regexps", 28),
+ addPhaseKind("SWEEP_COMPRESSION", "Sweep Compression Tasks", 62),
+ addPhaseKind("SWEEP_WEAKMAPS", "Sweep WeakMaps", 63),
+ addPhaseKind("SWEEP_UNIQUEIDS", "Sweep Unique IDs", 64),
+ addPhaseKind(
+ "SWEEP_FINALIZATION_OBSERVERS",
+ "Sweep FinalizationRegistries and WeakRefs",
+ 74,
+ ),
+ addPhaseKind("SWEEP_JIT_DATA", "Sweep JIT Data", 65),
+ addPhaseKind("SWEEP_WEAK_CACHES", "Sweep Weak Caches", 66),
+ addPhaseKind("SWEEP_MISC", "Sweep Miscellaneous", 29),
+ getPhaseKind("JOIN_PARALLEL_TASKS"),
+ ],
+ ),
+ addPhaseKind("FINALIZE_OBJECT", "Finalize Objects", 33),
+ addPhaseKind("FINALIZE_NON_OBJECT", "Finalize Non-objects", 34),
+ addPhaseKind("SWEEP_PROP_MAP", "Sweep PropMap Tree", 77),
+ addPhaseKind("FINALIZE_END", "Finalize End Callback", 38),
+ addPhaseKind("DESTROY", "Deallocate", 39),
+ getPhaseKind("JOIN_PARALLEL_TASKS"),
+ addPhaseKind("FIND_DEAD_COMPARTMENTS", "Find Dead Compartments", 54),
+ ],
+ ),
+ addPhaseKind(
+ "COMPACT",
+ "Compact",
+ 40,
+ [
+ addPhaseKind("COMPACT_MOVE", "Compact Move", 41),
+ addPhaseKind(
+ "COMPACT_UPDATE",
+ "Compact Update",
+ 42,
+ [
+ getPhaseKind("MARK_ROOTS"),
+ addPhaseKind("COMPACT_UPDATE_CELLS", "Compact Update Cells", 43),
+ getPhaseKind("JOIN_PARALLEL_TASKS"),
+ ],
+ ),
+ ],
+ ),
+ addPhaseKind("DECOMMIT", "Decommit", 72),
+ addPhaseKind("GC_END", "End Callback", 44),
+ addPhaseKind(
+ "MINOR_GC",
+ "All Minor GCs",
+ 45,
+ [
+ getPhaseKind("MARK_ROOTS"),
+ ],
+ ),
+ addPhaseKind(
+ "EVICT_NURSERY",
+ "Minor GCs to Evict Nursery",
+ 46,
+ [
+ getPhaseKind("MARK_ROOTS"),
+ ],
+ ),
+ addPhaseKind(
+ "TRACE_HEAP",
+ "Trace Heap",
+ 47,
+ [
+ getPhaseKind("MARK_ROOTS"),
+ ],
+ ),
+]
+
+
+class Phase:
+ # Expand the DAG into a tree, duplicating phases which have more than
+ # one parent.
+ def __init__(self, phaseKind, parent):
+ self.phaseKind = phaseKind
+ self.parent = parent
+ self.depth = parent.depth + 1 if parent else 0
+ self.children = []
+ self.nextSibling = None
+ self.nextInPhaseKind = None
+
+ self.path = re.sub(r"\W+", "_", phaseKind.name.lower())
+ if parent is not None:
+ self.path = parent.path + "." + self.path
+
+
+def expandPhases():
+ phases = []
+ phasesForKind = collections.defaultdict(list)
+
+ def traverse(phaseKind, parent):
+ ep = Phase(phaseKind, parent)
+ phases.append(ep)
+
+ # Update list of expanded phases for this phase kind.
+ if phasesForKind[phaseKind]:
+ phasesForKind[phaseKind][-1].nextInPhaseKind = ep
+ phasesForKind[phaseKind].append(ep)
+
+ # Recurse over children.
+ for child in phaseKind.children:
+ child_ep = traverse(child, ep)
+ if ep.children:
+ ep.children[-1].nextSibling = child_ep
+ ep.children.append(child_ep)
+ return ep
+
+ for phaseKind in PhaseKindGraphRoots:
+ traverse(phaseKind, None)
+
+ return phases, phasesForKind
+
+
+AllPhases, PhasesForPhaseKind = expandPhases()
+
+# Name phases based on phase kind name and index if there are multiple phases
+# corresponding to a single phase kind.
+
+for phaseKind in AllPhaseKinds:
+ phases = PhasesForPhaseKind[phaseKind]
+ if len(phases) == 1:
+ phases[0].name = "%s" % phaseKind.name
+ else:
+ for index, phase in enumerate(phases):
+ phase.name = "%s_%d" % (phaseKind.name, index + 1)
+
+# Find the maximum phase nesting.
+MaxPhaseNesting = max(phase.depth for phase in AllPhases) + 1
+
+# And the maximum bucket number.
+MaxBucket = max(kind.bucket for kind in AllPhaseKinds)
+
+# Generate code.
+
+
+def writeList(out, items):
+ if items:
+ out.write(",\n".join(" " + item for item in items) + "\n")
+
+
+def writeEnumClass(out, name, type, items, extraItems):
+ items = ["FIRST"] + list(items) + ["LIMIT"] + list(extraItems)
+ items[1] += " = " + items[0]
+ out.write("enum class %s : %s {\n" % (name, type))
+ writeList(out, items)
+ out.write("};\n")
+
+
+def generateHeader(out):
+ #
+ # Generate PhaseKind enum.
+ #
+ phaseKindNames = map(lambda phaseKind: phaseKind.name, AllPhaseKinds)
+ extraPhaseKinds = [
+ "NONE = LIMIT",
+ "EXPLICIT_SUSPENSION = LIMIT",
+ "IMPLICIT_SUSPENSION",
+ ]
+ writeEnumClass(out, "PhaseKind", "uint8_t", phaseKindNames, extraPhaseKinds)
+ out.write("\n")
+
+ #
+ # Generate Phase enum.
+ #
+ phaseNames = map(lambda phase: phase.name, AllPhases)
+ extraPhases = ["NONE = LIMIT", "EXPLICIT_SUSPENSION = LIMIT", "IMPLICIT_SUSPENSION"]
+ writeEnumClass(out, "Phase", "uint8_t", phaseNames, extraPhases)
+ out.write("\n")
+
+ #
+ # Generate MAX_PHASE_NESTING constant.
+ #
+ out.write("static const size_t MAX_PHASE_NESTING = %d;\n" % MaxPhaseNesting)
+
+
+def generateCpp(out):
+ #
+ # Generate the PhaseKindInfo table.
+ #
+ out.write("static constexpr PhaseKindTable phaseKinds = {\n")
+ for phaseKind in AllPhaseKinds:
+ phase = PhasesForPhaseKind[phaseKind][0]
+ out.write(
+ ' /* PhaseKind::%s */ PhaseKindInfo { Phase::%s, %d, "%s" },\n'
+ % (phaseKind.name, phase.name, phaseKind.bucket, phaseKind.name)
+ )
+ out.write("};\n")
+ out.write("\n")
+
+ #
+ # Generate the PhaseInfo tree.
+ #
+ def name(phase):
+ return "Phase::" + phase.name if phase else "Phase::NONE"
+
+ out.write("static constexpr PhaseTable phases = {\n")
+ for phase in AllPhases:
+ firstChild = phase.children[0] if phase.children else None
+ phaseKind = phase.phaseKind
+ out.write(
+ ' /* %s */ PhaseInfo { %s, %s, %s, %s, PhaseKind::%s, %d, "%s", "%s" },\n'
+ % ( # NOQA: E501
+ name(phase),
+ name(phase.parent),
+ name(firstChild),
+ name(phase.nextSibling),
+ name(phase.nextInPhaseKind),
+ phaseKind.name,
+ phase.depth,
+ phaseKind.descr,
+ phase.path,
+ )
+ )
+ out.write("};\n")
+
+ #
+ # Print in a comment the next available phase kind number.
+ #
+ out.write("// The next available phase kind number is: %d\n" % (MaxBucket + 1))
diff --git a/js/src/gc/HashUtil.h b/js/src/gc/HashUtil.h
new file mode 100644
index 0000000000..52a3058f8a
--- /dev/null
+++ b/js/src/gc/HashUtil.h
@@ -0,0 +1,84 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef gc_HashUtil_h
+#define gc_HashUtil_h
+
+#include <type_traits>
+
+#include "vm/JSContext.h"
+
+namespace js {
+
+/*
+ * Used to add entries to a js::HashMap or HashSet where the key depends on a GC
+ * thing that may be moved by generational or compacting GC between the call to
+ * lookupForAdd() and relookupOrAdd().
+ */
+template <class T>
+struct DependentAddPtr {
+ typedef typename T::AddPtr AddPtr;
+ typedef typename T::Entry Entry;
+
+ template <class Lookup>
+ DependentAddPtr(const JSContext* cx, T& table, const Lookup& lookup)
+ : addPtr(table.lookupForAdd(lookup)),
+ originalGcNumber(cx->runtime()->gc.gcNumber()) {}
+
+ DependentAddPtr(DependentAddPtr&& other)
+ : addPtr(other.addPtr), originalGcNumber(other.originalGcNumber) {}
+
+ template <class KeyInput, class ValueInput>
+ bool add(JSContext* cx, T& table, const KeyInput& key,
+ const ValueInput& value) {
+ refreshAddPtr(cx, table, key);
+ if (!table.relookupOrAdd(addPtr, key, value)) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+ return true;
+ }
+
+ template <class KeyInput>
+ void remove(JSContext* cx, T& table, const KeyInput& key) {
+ refreshAddPtr(cx, table, key);
+ if (addPtr) {
+ table.remove(addPtr);
+ }
+ }
+
+ bool found() const { return addPtr.found(); }
+ explicit operator bool() const { return found(); }
+ const Entry& operator*() const { return *addPtr; }
+ const Entry* operator->() const { return &*addPtr; }
+
+ private:
+ AddPtr addPtr;
+ const uint64_t originalGcNumber;
+
+ template <class KeyInput>
+ void refreshAddPtr(JSContext* cx, T& table, const KeyInput& key) {
+ bool gcHappened = originalGcNumber != cx->runtime()->gc.gcNumber();
+ if (gcHappened) {
+ addPtr = table.lookupForAdd(key);
+ }
+ }
+
+ DependentAddPtr() = delete;
+ DependentAddPtr(const DependentAddPtr&) = delete;
+ DependentAddPtr& operator=(const DependentAddPtr&) = delete;
+};
+
+template <typename T, typename Lookup>
+inline auto MakeDependentAddPtr(const JSContext* cx, T& table,
+ const Lookup& lookup) {
+ using Ptr = DependentAddPtr<std::remove_reference_t<decltype(table)>>;
+ return Ptr(cx, table, lookup);
+}
+
+} // namespace js
+
+#endif
diff --git a/js/src/gc/Heap-inl.h b/js/src/gc/Heap-inl.h
new file mode 100644
index 0000000000..87dcdc900c
--- /dev/null
+++ b/js/src/gc/Heap-inl.h
@@ -0,0 +1,70 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef gc_Heap_inl_h
+#define gc_Heap_inl_h
+
+#include "gc/Heap.h"
+
+#include "gc/StoreBuffer.h"
+#include "gc/Zone.h"
+#include "util/Poison.h"
+#include "vm/Runtime.h"
+
+inline void js::gc::Arena::init(JS::Zone* zoneArg, AllocKind kind,
+ const AutoLockGC& lock) {
+#ifdef DEBUG
+ MOZ_MAKE_MEM_DEFINED(&zone, sizeof(zone));
+ MOZ_ASSERT((uintptr_t(zone) & 0xff) == JS_FREED_ARENA_PATTERN);
+#endif
+
+ MOZ_ASSERT(firstFreeSpan.isEmpty());
+ MOZ_ASSERT(!allocated());
+ MOZ_ASSERT(!onDelayedMarkingList_);
+ MOZ_ASSERT(!hasDelayedBlackMarking_);
+ MOZ_ASSERT(!hasDelayedGrayMarking_);
+ MOZ_ASSERT(!nextDelayedMarkingArena_);
+
+ MOZ_MAKE_MEM_UNDEFINED(this, ArenaSize);
+
+ zone = zoneArg;
+ allocKind = kind;
+ isNewlyCreated_ = 1;
+ onDelayedMarkingList_ = 0;
+ hasDelayedBlackMarking_ = 0;
+ hasDelayedGrayMarking_ = 0;
+ nextDelayedMarkingArena_ = 0;
+ if (zone->isAtomsZone()) {
+ zone->runtimeFromAnyThread()->gc.atomMarking.registerArena(this, lock);
+ } else {
+ bufferedCells() = &ArenaCellSet::Empty;
+ }
+
+ setAsFullyUnused();
+}
+
+inline void js::gc::Arena::release(const AutoLockGC& lock) {
+ if (zone->isAtomsZone()) {
+ zone->runtimeFromAnyThread()->gc.atomMarking.unregisterArena(this, lock);
+ }
+ setAsNotAllocated();
+}
+
+inline js::gc::ArenaCellSet*& js::gc::Arena::bufferedCells() {
+ MOZ_ASSERT(zone && !zone->isAtomsZone());
+ return bufferedCells_;
+}
+
+inline size_t& js::gc::Arena::atomBitmapStart() {
+ MOZ_ASSERT(zone && zone->isAtomsZone());
+ return atomBitmapStart_;
+}
+
+inline js::gc::NurseryCellHeader::NurseryCellHeader(AllocSite* site,
+ JS::TraceKind kind)
+ : allocSiteAndTraceKind(MakeValue(site, kind)) {}
+
+#endif
diff --git a/js/src/gc/Heap.cpp b/js/src/gc/Heap.cpp
new file mode 100644
index 0000000000..fdf24fbb7c
--- /dev/null
+++ b/js/src/gc/Heap.cpp
@@ -0,0 +1,635 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * Tenured heap management.
+ *
+ * This file contains method definitions for the following classes for code that
+ * is not specific to a particular phase of GC:
+ *
+ * - Arena
+ * - ArenaList
+ * - FreeLists
+ * - ArenaLists
+ * - TenuredChunk
+ * - ChunkPool
+ */
+
+#include "gc/Heap-inl.h"
+
+#include "gc/GCLock.h"
+#include "gc/Memory.h"
+#include "jit/Assembler.h"
+#include "vm/BigIntType.h"
+#include "vm/RegExpShared.h"
+#include "vm/Scope.h"
+
+#include "gc/ArenaList-inl.h"
+#include "gc/PrivateIterators-inl.h"
+
+using namespace js;
+using namespace js::gc;
+
+// Check that reserved bits of a Cell are compatible with our typical allocators
+// since most derived classes will store a pointer in the first word.
+static const size_t MinFirstWordAlignment = 1u << CellFlagBitsReservedForGC;
+static_assert(js::detail::LIFO_ALLOC_ALIGN >= MinFirstWordAlignment,
+ "CellFlagBitsReservedForGC should support LifoAlloc");
+static_assert(CellAlignBytes >= MinFirstWordAlignment,
+ "CellFlagBitsReservedForGC should support gc::Cell");
+static_assert(js::jit::CodeAlignment >= MinFirstWordAlignment,
+ "CellFlagBitsReservedForGC should support JIT code");
+static_assert(js::gc::JSClassAlignBytes >= MinFirstWordAlignment,
+ "CellFlagBitsReservedForGC should support JSClass pointers");
+static_assert(js::ScopeDataAlignBytes >= MinFirstWordAlignment,
+ "CellFlagBitsReservedForGC should support scope data pointers");
+
+#define CHECK_THING_SIZE(allocKind, traceKind, type, sizedType, bgFinal, \
+ nursery, compact) \
+ static_assert(sizeof(sizedType) >= SortedArenaList::MinThingSize, \
+ #sizedType " is smaller than SortedArenaList::MinThingSize!"); \
+ static_assert(sizeof(sizedType) >= sizeof(FreeSpan), \
+ #sizedType " is smaller than FreeSpan"); \
+ static_assert(sizeof(sizedType) % CellAlignBytes == 0, \
+ "Size of " #sizedType " is not a multiple of CellAlignBytes"); \
+ static_assert(sizeof(sizedType) >= MinCellSize, \
+ "Size of " #sizedType " is smaller than the minimum size");
+FOR_EACH_ALLOCKIND(CHECK_THING_SIZE);
+#undef CHECK_THING_SIZE
+
+FreeSpan FreeLists::emptySentinel;
+
+template <typename T>
+struct ArenaLayout {
+ static constexpr size_t thingSize() { return sizeof(T); }
+ static constexpr size_t thingsPerArena() {
+ return (ArenaSize - ArenaHeaderSize) / thingSize();
+ }
+ static constexpr size_t firstThingOffset() {
+ return ArenaSize - thingSize() * thingsPerArena();
+ }
+};
+
+const uint8_t Arena::ThingSizes[] = {
+#define EXPAND_THING_SIZE(_1, _2, _3, sizedType, _4, _5, _6) \
+ ArenaLayout<sizedType>::thingSize(),
+ FOR_EACH_ALLOCKIND(EXPAND_THING_SIZE)
+#undef EXPAND_THING_SIZE
+};
+
+const uint8_t Arena::FirstThingOffsets[] = {
+#define EXPAND_FIRST_THING_OFFSET(_1, _2, _3, sizedType, _4, _5, _6) \
+ ArenaLayout<sizedType>::firstThingOffset(),
+ FOR_EACH_ALLOCKIND(EXPAND_FIRST_THING_OFFSET)
+#undef EXPAND_FIRST_THING_OFFSET
+};
+
+const uint8_t Arena::ThingsPerArena[] = {
+#define EXPAND_THINGS_PER_ARENA(_1, _2, _3, sizedType, _4, _5, _6) \
+ ArenaLayout<sizedType>::thingsPerArena(),
+ FOR_EACH_ALLOCKIND(EXPAND_THINGS_PER_ARENA)
+#undef EXPAND_THINGS_PER_ARENA
+};
+
+void Arena::unmarkAll() {
+ MarkBitmapWord* arenaBits = chunk()->markBits.arenaBits(this);
+ for (size_t i = 0; i < ArenaBitmapWords; i++) {
+ arenaBits[i] = 0;
+ }
+}
+
+void Arena::unmarkPreMarkedFreeCells() {
+ for (ArenaFreeCellIter cell(this); !cell.done(); cell.next()) {
+ MOZ_ASSERT(cell->isMarkedBlack());
+ cell->unmark();
+ }
+}
+
+#ifdef DEBUG
+
+void Arena::checkNoMarkedFreeCells() {
+ for (ArenaFreeCellIter cell(this); !cell.done(); cell.next()) {
+ MOZ_ASSERT(!cell->isMarkedAny());
+ }
+}
+
+void Arena::checkAllCellsMarkedBlack() {
+ for (ArenaCellIter cell(this); !cell.done(); cell.next()) {
+ MOZ_ASSERT(cell->isMarkedBlack());
+ }
+}
+
+#endif
+
+#if defined(DEBUG) || defined(JS_GC_ZEAL)
+void Arena::checkNoMarkedCells() {
+ for (ArenaCellIter cell(this); !cell.done(); cell.next()) {
+ MOZ_ASSERT(!cell->isMarkedAny());
+ }
+}
+#endif
+
+/* static */
+void Arena::staticAsserts() {
+ static_assert(size_t(AllocKind::LIMIT) <= 255,
+ "All AllocKinds and AllocKind::LIMIT must fit in a uint8_t.");
+ static_assert(std::size(ThingSizes) == AllocKindCount,
+ "We haven't defined all thing sizes.");
+ static_assert(std::size(FirstThingOffsets) == AllocKindCount,
+ "We haven't defined all offsets.");
+ static_assert(std::size(ThingsPerArena) == AllocKindCount,
+ "We haven't defined all counts.");
+}
+
+/* static */
+void Arena::checkLookupTables() {
+#ifdef DEBUG
+ for (size_t i = 0; i < AllocKindCount; i++) {
+ MOZ_ASSERT(
+ FirstThingOffsets[i] + ThingsPerArena[i] * ThingSizes[i] == ArenaSize,
+ "Inconsistent arena lookup table data");
+ }
+#endif
+}
+
+#ifdef DEBUG
+void js::gc::ArenaList::dump() {
+ fprintf(stderr, "ArenaList %p:", this);
+ if (cursorp_ == &head_) {
+ fprintf(stderr, " *");
+ }
+ for (Arena* arena = head(); arena; arena = arena->next) {
+ fprintf(stderr, " %p", arena);
+ if (cursorp_ == &arena->next) {
+ fprintf(stderr, " *");
+ }
+ }
+ fprintf(stderr, "\n");
+}
+#endif
+
+Arena* ArenaList::removeRemainingArenas(Arena** arenap) {
+ // This is only ever called to remove arenas that are after the cursor, so
+ // we don't need to update it.
+#ifdef DEBUG
+ for (Arena* arena = *arenap; arena; arena = arena->next) {
+ MOZ_ASSERT(cursorp_ != &arena->next);
+ }
+#endif
+ Arena* remainingArenas = *arenap;
+ *arenap = nullptr;
+ check();
+ return remainingArenas;
+}
+
+FreeLists::FreeLists() {
+ for (auto i : AllAllocKinds()) {
+ freeLists_[i] = &emptySentinel;
+ }
+}
+
+ArenaLists::ArenaLists(Zone* zone)
+ : zone_(zone),
+ incrementalSweptArenaKind(AllocKind::LIMIT),
+ gcCompactPropMapArenasToUpdate(nullptr),
+ gcNormalPropMapArenasToUpdate(nullptr),
+ savedEmptyArenas(nullptr) {
+ for (auto i : AllAllocKinds()) {
+ concurrentUse(i) = ConcurrentUse::None;
+ }
+}
+
+void ReleaseArenas(JSRuntime* rt, Arena* arena, const AutoLockGC& lock) {
+ Arena* next;
+ for (; arena; arena = next) {
+ next = arena->next;
+ rt->gc.releaseArena(arena, lock);
+ }
+}
+
+void ReleaseArenaList(JSRuntime* rt, ArenaList& arenaList,
+ const AutoLockGC& lock) {
+ ReleaseArenas(rt, arenaList.head(), lock);
+ arenaList.clear();
+}
+
+ArenaLists::~ArenaLists() {
+ AutoLockGC lock(runtime());
+
+ for (auto i : AllAllocKinds()) {
+ /*
+ * We can only call this during the shutdown after the last GC when
+ * the background finalization is disabled.
+ */
+ MOZ_ASSERT(concurrentUse(i) == ConcurrentUse::None);
+ ReleaseArenaList(runtime(), arenaList(i), lock);
+ }
+ ReleaseArenaList(runtime(), incrementalSweptArenas.ref(), lock);
+
+ ReleaseArenas(runtime(), savedEmptyArenas, lock);
+}
+
+void ArenaLists::moveArenasToCollectingLists() {
+ checkEmptyFreeLists();
+ for (AllocKind kind : AllAllocKinds()) {
+ MOZ_ASSERT(collectingArenaList(kind).isEmpty());
+ collectingArenaList(kind) = std::move(arenaList(kind));
+ MOZ_ASSERT(arenaList(kind).isEmpty());
+ }
+}
+
+void ArenaLists::mergeArenasFromCollectingLists() {
+ for (AllocKind kind : AllAllocKinds()) {
+ collectingArenaList(kind).insertListWithCursorAtEnd(arenaList(kind));
+ arenaList(kind) = std::move(collectingArenaList(kind));
+ MOZ_ASSERT(collectingArenaList(kind).isEmpty());
+ }
+}
+
+Arena* ArenaLists::takeSweptEmptyArenas() {
+ Arena* arenas = savedEmptyArenas;
+ savedEmptyArenas = nullptr;
+ return arenas;
+}
+
+void ArenaLists::setIncrementalSweptArenas(AllocKind kind,
+ SortedArenaList& arenas) {
+ incrementalSweptArenaKind = kind;
+ incrementalSweptArenas.ref().clear();
+ incrementalSweptArenas = arenas.toArenaList();
+}
+
+void ArenaLists::clearIncrementalSweptArenas() {
+ incrementalSweptArenaKind = AllocKind::LIMIT;
+ incrementalSweptArenas.ref().clear();
+}
+
+void ArenaLists::checkGCStateNotInUse() {
+ // Called before and after collection to check the state is as expected.
+#ifdef DEBUG
+ checkSweepStateNotInUse();
+ for (auto i : AllAllocKinds()) {
+ MOZ_ASSERT(collectingArenaList(i).isEmpty());
+ }
+#endif
+}
+
+void ArenaLists::checkSweepStateNotInUse() {
+#ifdef DEBUG
+ checkNoArenasToUpdate();
+ MOZ_ASSERT(incrementalSweptArenaKind == AllocKind::LIMIT);
+ MOZ_ASSERT(incrementalSweptArenas.ref().isEmpty());
+ MOZ_ASSERT(!savedEmptyArenas);
+ for (auto i : AllAllocKinds()) {
+ MOZ_ASSERT(concurrentUse(i) == ConcurrentUse::None);
+ }
+#endif
+}
+
+void ArenaLists::checkNoArenasToUpdate() {
+ MOZ_ASSERT(!gcCompactPropMapArenasToUpdate);
+ MOZ_ASSERT(!gcNormalPropMapArenasToUpdate);
+}
+
+void ArenaLists::checkNoArenasToUpdateForKind(AllocKind kind) {
+#ifdef DEBUG
+ switch (kind) {
+ case AllocKind::COMPACT_PROP_MAP:
+ MOZ_ASSERT(!gcCompactPropMapArenasToUpdate);
+ break;
+ case AllocKind::NORMAL_PROP_MAP:
+ MOZ_ASSERT(!gcNormalPropMapArenasToUpdate);
+ break;
+ default:
+ break;
+ }
+#endif
+}
+
+inline bool TenuredChunk::canDecommitPage(size_t pageIndex) const {
+ if (decommittedPages[pageIndex]) {
+ return false;
+ }
+
+ size_t arenaIndex = pageIndex * ArenasPerPage;
+ for (size_t i = 0; i < ArenasPerPage; i++) {
+ if (!freeCommittedArenas[arenaIndex + i]) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+void TenuredChunk::decommitFreeArenas(GCRuntime* gc, const bool& cancel,
+ AutoLockGC& lock) {
+ MOZ_ASSERT(DecommitEnabled());
+
+ for (size_t i = 0; i < PagesPerChunk; i++) {
+ if (cancel) {
+ break;
+ }
+
+ if (canDecommitPage(i) && !decommitOneFreePage(gc, i, lock)) {
+ break;
+ }
+ }
+}
+
+void TenuredChunk::recycleArena(Arena* arena, SortedArenaList& dest,
+ size_t thingsPerArena) {
+ arena->setAsFullyUnused();
+ dest.insertAt(arena, thingsPerArena);
+}
+
+void TenuredChunk::releaseArena(GCRuntime* gc, Arena* arena,
+ const AutoLockGC& lock) {
+ MOZ_ASSERT(!arena->allocated());
+ MOZ_ASSERT(!freeCommittedArenas[arenaIndex(arena)]);
+
+ freeCommittedArenas[arenaIndex(arena)] = true;
+ ++info.numArenasFreeCommitted;
+ ++info.numArenasFree;
+ gc->updateOnArenaFree();
+
+ verify();
+
+ updateChunkListAfterFree(gc, 1, lock);
+}
+
+bool TenuredChunk::decommitOneFreePage(GCRuntime* gc, size_t pageIndex,
+ AutoLockGC& lock) {
+ MOZ_ASSERT(DecommitEnabled());
+ MOZ_ASSERT(canDecommitPage(pageIndex));
+ MOZ_ASSERT(info.numArenasFreeCommitted >= ArenasPerPage);
+
+ // Temporarily mark the page as allocated while we decommit.
+ for (size_t i = 0; i < ArenasPerPage; i++) {
+ size_t arenaIndex = pageIndex * ArenasPerPage + i;
+ MOZ_ASSERT(freeCommittedArenas[arenaIndex]);
+ freeCommittedArenas[arenaIndex] = false;
+ }
+ info.numArenasFreeCommitted -= ArenasPerPage;
+ info.numArenasFree -= ArenasPerPage;
+ updateChunkListAfterAlloc(gc, lock);
+
+ verify();
+
+ bool ok;
+ {
+ AutoUnlockGC unlock(lock);
+ ok = !oom::ShouldFailWithOOM() &&
+ MarkPagesUnusedSoft(pageAddress(pageIndex), PageSize);
+ }
+
+ // Mark the page as decommited if successful or restore the original free
+ // state.
+ if (ok) {
+ decommittedPages[pageIndex] = true;
+ } else {
+ for (size_t i = 0; i < ArenasPerPage; i++) {
+ size_t arenaIndex = pageIndex * ArenasPerPage + i;
+ MOZ_ASSERT(!freeCommittedArenas[arenaIndex]);
+ freeCommittedArenas[arenaIndex] = true;
+ }
+ info.numArenasFreeCommitted += ArenasPerPage;
+ }
+
+ info.numArenasFree += ArenasPerPage;
+ updateChunkListAfterFree(gc, ArenasPerPage, lock);
+
+ verify();
+
+ return ok;
+}
+
+void TenuredChunk::decommitFreeArenasWithoutUnlocking(const AutoLockGC& lock) {
+ MOZ_ASSERT(DecommitEnabled());
+
+ for (size_t i = 0; i < PagesPerChunk; i++) {
+ if (!canDecommitPage(i)) {
+ continue;
+ }
+
+ MOZ_ASSERT(!decommittedPages[i]);
+ MOZ_ASSERT(info.numArenasFreeCommitted >= ArenasPerPage);
+
+ if (js::oom::ShouldFailWithOOM() ||
+ !MarkPagesUnusedSoft(pageAddress(i), SystemPageSize())) {
+ break;
+ }
+
+ decommittedPages[i] = true;
+ for (size_t j = 0; j < ArenasPerPage; ++j) {
+ size_t arenaIndex = i * ArenasPerPage + j;
+ MOZ_ASSERT(freeCommittedArenas[arenaIndex]);
+ freeCommittedArenas[arenaIndex] = false;
+ }
+ info.numArenasFreeCommitted -= ArenasPerPage;
+ }
+
+ verify();
+}
+
+void TenuredChunk::updateChunkListAfterAlloc(GCRuntime* gc,
+ const AutoLockGC& lock) {
+ if (MOZ_UNLIKELY(!hasAvailableArenas())) {
+ gc->availableChunks(lock).remove(this);
+ gc->fullChunks(lock).push(this);
+ }
+}
+
+void TenuredChunk::updateChunkListAfterFree(GCRuntime* gc, size_t numArenasFree,
+ const AutoLockGC& lock) {
+ if (info.numArenasFree == numArenasFree) {
+ gc->fullChunks(lock).remove(this);
+ gc->availableChunks(lock).push(this);
+ } else if (!unused()) {
+ MOZ_ASSERT(gc->availableChunks(lock).contains(this));
+ } else {
+ MOZ_ASSERT(unused());
+ gc->availableChunks(lock).remove(this);
+ gc->recycleChunk(this, lock);
+ }
+}
+
+TenuredChunk* ChunkPool::pop() {
+ MOZ_ASSERT(bool(head_) == bool(count_));
+ if (!count_) {
+ return nullptr;
+ }
+ return remove(head_);
+}
+
+void ChunkPool::push(TenuredChunk* chunk) {
+ MOZ_ASSERT(!chunk->info.next);
+ MOZ_ASSERT(!chunk->info.prev);
+
+ chunk->info.next = head_;
+ if (head_) {
+ head_->info.prev = chunk;
+ }
+ head_ = chunk;
+ ++count_;
+}
+
+TenuredChunk* ChunkPool::remove(TenuredChunk* chunk) {
+ MOZ_ASSERT(count_ > 0);
+ MOZ_ASSERT(contains(chunk));
+
+ if (head_ == chunk) {
+ head_ = chunk->info.next;
+ }
+ if (chunk->info.prev) {
+ chunk->info.prev->info.next = chunk->info.next;
+ }
+ if (chunk->info.next) {
+ chunk->info.next->info.prev = chunk->info.prev;
+ }
+ chunk->info.next = chunk->info.prev = nullptr;
+ --count_;
+
+ return chunk;
+}
+
+// We could keep the chunk pool sorted, but that's likely to be more expensive.
+// This sort is nlogn, but keeping it sorted is likely to be m*n, with m being
+// the number of operations (likely higher than n).
+void ChunkPool::sort() {
+ // Only sort if the list isn't already sorted.
+ if (!isSorted()) {
+ head_ = mergeSort(head(), count());
+
+ // Fixup prev pointers.
+ TenuredChunk* prev = nullptr;
+ for (TenuredChunk* cur = head_; cur; cur = cur->info.next) {
+ cur->info.prev = prev;
+ prev = cur;
+ }
+ }
+
+ MOZ_ASSERT(verify());
+ MOZ_ASSERT(isSorted());
+}
+
+TenuredChunk* ChunkPool::mergeSort(TenuredChunk* list, size_t count) {
+ MOZ_ASSERT(bool(list) == bool(count));
+
+ if (count < 2) {
+ return list;
+ }
+
+ size_t half = count / 2;
+
+ // Split;
+ TenuredChunk* front = list;
+ TenuredChunk* back;
+ {
+ TenuredChunk* cur = list;
+ for (size_t i = 0; i < half - 1; i++) {
+ MOZ_ASSERT(cur);
+ cur = cur->info.next;
+ }
+ back = cur->info.next;
+ cur->info.next = nullptr;
+ }
+
+ front = mergeSort(front, half);
+ back = mergeSort(back, count - half);
+
+ // Merge
+ list = nullptr;
+ TenuredChunk** cur = &list;
+ while (front || back) {
+ if (!front) {
+ *cur = back;
+ break;
+ }
+ if (!back) {
+ *cur = front;
+ break;
+ }
+
+ // Note that the sort is stable due to the <= here. Nothing depends on
+ // this but it could.
+ if (front->info.numArenasFree <= back->info.numArenasFree) {
+ *cur = front;
+ front = front->info.next;
+ cur = &(*cur)->info.next;
+ } else {
+ *cur = back;
+ back = back->info.next;
+ cur = &(*cur)->info.next;
+ }
+ }
+
+ return list;
+}
+
+bool ChunkPool::isSorted() const {
+ uint32_t last = 1;
+ for (TenuredChunk* cursor = head_; cursor; cursor = cursor->info.next) {
+ if (cursor->info.numArenasFree < last) {
+ return false;
+ }
+ last = cursor->info.numArenasFree;
+ }
+ return true;
+}
+
+#ifdef DEBUG
+
+bool ChunkPool::contains(TenuredChunk* chunk) const {
+ verify();
+ for (TenuredChunk* cursor = head_; cursor; cursor = cursor->info.next) {
+ if (cursor == chunk) {
+ return true;
+ }
+ }
+ return false;
+}
+
+bool ChunkPool::verify() const {
+ MOZ_ASSERT(bool(head_) == bool(count_));
+ uint32_t count = 0;
+ for (TenuredChunk* cursor = head_; cursor;
+ cursor = cursor->info.next, ++count) {
+ MOZ_ASSERT_IF(cursor->info.prev, cursor->info.prev->info.next == cursor);
+ MOZ_ASSERT_IF(cursor->info.next, cursor->info.next->info.prev == cursor);
+ }
+ MOZ_ASSERT(count_ == count);
+ return true;
+}
+
+void ChunkPool::verifyChunks() const {
+ for (TenuredChunk* chunk = head_; chunk; chunk = chunk->info.next) {
+ chunk->verify();
+ }
+}
+
+void TenuredChunk::verify() const {
+ MOZ_ASSERT(info.numArenasFree <= ArenasPerChunk);
+ MOZ_ASSERT(info.numArenasFreeCommitted <= info.numArenasFree);
+
+ size_t decommittedCount = decommittedPages.Count() * ArenasPerPage;
+ size_t freeCommittedCount = freeCommittedArenas.Count();
+ size_t freeCount = freeCommittedCount + decommittedCount;
+
+ MOZ_ASSERT(freeCount == info.numArenasFree);
+ MOZ_ASSERT(freeCommittedCount == info.numArenasFreeCommitted);
+
+ for (size_t i = 0; i < ArenasPerChunk; ++i) {
+ MOZ_ASSERT(!(decommittedPages[pageIndex(i)] && freeCommittedArenas[i]));
+ MOZ_ASSERT_IF(freeCommittedArenas[i], !arenas[i].allocated());
+ }
+}
+
+#endif
+
+void ChunkPool::Iter::next() {
+ MOZ_ASSERT(!done());
+ current_ = current_->info.next;
+}
diff --git a/js/src/gc/Heap.h b/js/src/gc/Heap.h
new file mode 100644
index 0000000000..9a1b25d508
--- /dev/null
+++ b/js/src/gc/Heap.h
@@ -0,0 +1,846 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef gc_Heap_h
+#define gc_Heap_h
+
+#include "mozilla/DebugOnly.h"
+
+#include "gc/AllocKind.h"
+#include "gc/Pretenuring.h"
+#include "js/HeapAPI.h"
+#include "js/TypeDecls.h"
+#include "util/Poison.h"
+
+namespace js {
+
+class AutoLockGC;
+class AutoLockGCBgAlloc;
+class Nursery;
+
+// To prevent false sharing, some data structures are aligned to a typical cache
+// line size.
+static constexpr size_t TypicalCacheLineSize = 64;
+
+namespace gc {
+
+class Arena;
+class ArenaCellSet;
+class ArenaList;
+class GCRuntime;
+class MarkingValidator;
+class SortedArenaList;
+class TenuredCell;
+
+// Cells are aligned to CellAlignShift, so the largest tagged null pointer is:
+const uintptr_t LargestTaggedNullCellPointer = (1 << CellAlignShift) - 1;
+
+/*
+ * The minimum cell size ends up as twice the cell alignment because the mark
+ * bitmap contains one bit per CellBytesPerMarkBit bytes (which is equal to
+ * CellAlignBytes) and we need two mark bits per cell.
+ */
+const size_t MinCellSize = CellBytesPerMarkBit * MarkBitsPerCell;
+
+static_assert(ArenaSize % CellAlignBytes == 0,
+ "Arena size must be a multiple of cell alignment");
+
+/*
+ * A FreeSpan represents a contiguous sequence of free cells in an Arena. It
+ * can take two forms.
+ *
+ * - In an empty span, |first| and |last| are both zero.
+ *
+ * - In a non-empty span, |first| is the address of the first free thing in the
+ * span, and |last| is the address of the last free thing in the span.
+ * Furthermore, the memory pointed to by |last| holds a FreeSpan structure
+ * that points to the next span (which may be empty); this works because
+ * sizeof(FreeSpan) is less than the smallest thingSize.
+ */
+class FreeSpan {
+ friend class Arena;
+ friend class ArenaCellIter;
+ friend class ArenaFreeCellIter;
+
+ uint16_t first;
+ uint16_t last;
+
+ public:
+ // This inits just |first| and |last|; if the span is non-empty it doesn't
+ // do anything with the next span stored at |last|.
+ void initBounds(uintptr_t firstArg, uintptr_t lastArg, const Arena* arena) {
+ checkRange(firstArg, lastArg, arena);
+ first = firstArg;
+ last = lastArg;
+ }
+
+ void initAsEmpty() {
+ first = 0;
+ last = 0;
+ }
+
+ // This sets |first| and |last|, and also sets the next span stored at
+ // |last| as empty. (As a result, |firstArg| and |lastArg| cannot represent
+ // an empty span.)
+ void initFinal(uintptr_t firstArg, uintptr_t lastArg, const Arena* arena) {
+ initBounds(firstArg, lastArg, arena);
+ FreeSpan* last = nextSpanUnchecked(arena);
+ last->initAsEmpty();
+ checkSpan(arena);
+ }
+
+ bool isEmpty() const { return !first; }
+
+ Arena* getArenaUnchecked() { return reinterpret_cast<Arena*>(this); }
+ inline Arena* getArena();
+
+ static size_t offsetOfFirst() { return offsetof(FreeSpan, first); }
+
+ static size_t offsetOfLast() { return offsetof(FreeSpan, last); }
+
+ // Like nextSpan(), but no checking of the following span is done.
+ FreeSpan* nextSpanUnchecked(const Arena* arena) const {
+ MOZ_ASSERT(arena && !isEmpty());
+ return reinterpret_cast<FreeSpan*>(uintptr_t(arena) + last);
+ }
+
+ const FreeSpan* nextSpan(const Arena* arena) const {
+ checkSpan(arena);
+ return nextSpanUnchecked(arena);
+ }
+
+ MOZ_ALWAYS_INLINE TenuredCell* allocate(size_t thingSize) {
+ // Eschew the usual checks, because this might be the placeholder span.
+ // If this is somehow an invalid, non-empty span, checkSpan() will catch it.
+ Arena* arena = getArenaUnchecked();
+ checkSpan(arena);
+ uintptr_t thing = uintptr_t(arena) + first;
+ if (first < last) {
+ // We have space for at least two more things, so do a simple
+ // bump-allocate.
+ first += thingSize;
+ } else if (MOZ_LIKELY(first)) {
+ // The last space points to the next free span (which may be empty).
+ const FreeSpan* next = nextSpan(arena);
+ first = next->first;
+ last = next->last;
+ } else {
+ return nullptr; // The span is empty.
+ }
+ checkSpan(arena);
+ DebugOnlyPoison(reinterpret_cast<void*>(thing),
+ JS_ALLOCATED_TENURED_PATTERN, thingSize,
+ MemCheckKind::MakeUndefined);
+ return reinterpret_cast<TenuredCell*>(thing);
+ }
+
+ inline void checkSpan(const Arena* arena) const;
+ inline void checkRange(uintptr_t first, uintptr_t last,
+ const Arena* arena) const;
+};
+
+/*
+ * Arenas are the allocation units of the tenured heap in the GC. An arena
+ * is 4kiB in size and 4kiB-aligned. It starts with several header fields
+ * followed by some bytes of padding. The remainder of the arena is filled
+ * with GC things of a particular AllocKind. The padding ensures that the
+ * GC thing array ends exactly at the end of the arena:
+ *
+ * <----------------------------------------------> = ArenaSize bytes
+ * +---------------+---------+----+----+-----+----+
+ * | header fields | padding | T0 | T1 | ... | Tn |
+ * +---------------+---------+----+----+-----+----+
+ * <-------------------------> = first thing offset
+ */
+class alignas(ArenaSize) Arena {
+ static JS_PUBLIC_DATA const uint8_t ThingSizes[];
+ static JS_PUBLIC_DATA const uint8_t FirstThingOffsets[];
+ static JS_PUBLIC_DATA const uint8_t ThingsPerArena[];
+ /*
+ * The first span of free things in the arena. Most of these spans are
+ * stored as offsets in free regions of the data array, and most operations
+ * on FreeSpans take an Arena pointer for safety. However, the FreeSpans
+ * used for allocation are stored here, at the start of an Arena, and use
+ * their own address to grab the next span within the same Arena.
+ */
+ FreeSpan firstFreeSpan;
+
+ public:
+ /*
+ * One of the AllocKind constants or AllocKind::LIMIT when the arena does
+ * not contain any GC things and is on the list of empty arenas in the GC
+ * chunk.
+ */
+ AllocKind allocKind;
+
+ /*
+ * The zone that this Arena is contained within, when allocated. The offset
+ * of this field must match the ArenaZoneOffset stored in js/HeapAPI.h,
+ * as is statically asserted below.
+ */
+ JS::Zone* zone;
+
+ /*
+ * Arena::next has two purposes: when unallocated, it points to the next
+ * available Arena. When allocated, it points to the next Arena in the same
+ * zone and with the same alloc kind.
+ */
+ Arena* next;
+
+ private:
+ static const size_t ARENA_FLAG_BITS = 4;
+ static const size_t DELAYED_MARKING_ARENA_BITS =
+ JS_BITS_PER_WORD - ArenaShift;
+ static_assert(
+ ARENA_FLAG_BITS + DELAYED_MARKING_ARENA_BITS <= JS_BITS_PER_WORD,
+ "Not enough space to pack flags and nextDelayedMarkingArena_ pointer "
+ "into a single word.");
+
+ /*
+ * True until the arena is swept for the first time.
+ */
+ size_t isNewlyCreated_ : 1;
+
+ /*
+ * When recursive marking uses too much stack we delay marking of arenas and
+ * link them into a list for later processing. This uses the following fields.
+ */
+ size_t onDelayedMarkingList_ : 1;
+ size_t hasDelayedBlackMarking_ : 1;
+ size_t hasDelayedGrayMarking_ : 1;
+ size_t nextDelayedMarkingArena_ : DELAYED_MARKING_ARENA_BITS;
+
+ union {
+ /*
+ * For arenas in zones other than the atoms zone, if non-null, points
+ * to an ArenaCellSet that represents the set of cells in this arena
+ * that are in the nursery's store buffer.
+ */
+ ArenaCellSet* bufferedCells_;
+
+ /*
+ * For arenas in the atoms zone, the starting index into zone atom
+ * marking bitmaps (see AtomMarking.h) of the things in this zone.
+ * Atoms never refer to nursery things, so no store buffer index is
+ * needed.
+ */
+ size_t atomBitmapStart_;
+ };
+
+ public:
+ /*
+ * The size of data should be |ArenaSize - offsetof(data)|, but the offset
+ * is not yet known to the compiler, so we do it by hand. |firstFreeSpan|
+ * takes up 8 bytes on 64-bit due to alignment requirements; the rest are
+ * obvious. This constant is stored in js/HeapAPI.h.
+ */
+ uint8_t data[ArenaSize - ArenaHeaderSize];
+
+ void init(JS::Zone* zoneArg, AllocKind kind, const AutoLockGC& lock);
+
+ // Sets |firstFreeSpan| to the Arena's entire valid range, and
+ // also sets the next span stored at |firstFreeSpan.last| as empty.
+ void setAsFullyUnused() {
+ AllocKind kind = getAllocKind();
+ firstFreeSpan.first = firstThingOffset(kind);
+ firstFreeSpan.last = lastThingOffset(kind);
+ FreeSpan* last = firstFreeSpan.nextSpanUnchecked(this);
+ last->initAsEmpty();
+ }
+
+ // Initialize an arena to its unallocated state. For arenas that were
+ // previously allocated for some zone, use release() instead.
+ void setAsNotAllocated() {
+ firstFreeSpan.initAsEmpty();
+
+ // Poison zone pointer to highlight UAF on released arenas in crash data.
+ AlwaysPoison(&zone, JS_FREED_ARENA_PATTERN, sizeof(zone),
+ MemCheckKind::MakeNoAccess);
+
+ allocKind = AllocKind::LIMIT;
+ onDelayedMarkingList_ = 0;
+ hasDelayedBlackMarking_ = 0;
+ hasDelayedGrayMarking_ = 0;
+ nextDelayedMarkingArena_ = 0;
+ bufferedCells_ = nullptr;
+
+ MOZ_ASSERT(!allocated());
+ }
+
+ // Return an allocated arena to its unallocated state.
+ inline void release(const AutoLockGC& lock);
+
+ uintptr_t address() const {
+ checkAddress();
+ return uintptr_t(this);
+ }
+
+ inline void checkAddress() const;
+
+ inline TenuredChunk* chunk() const;
+
+ bool allocated() const {
+ MOZ_ASSERT(IsAllocKind(AllocKind(allocKind)));
+ return IsValidAllocKind(AllocKind(allocKind));
+ }
+
+ AllocKind getAllocKind() const {
+ MOZ_ASSERT(allocated());
+ return allocKind;
+ }
+
+ FreeSpan* getFirstFreeSpan() { return &firstFreeSpan; }
+
+ static size_t thingSize(AllocKind kind) { return ThingSizes[size_t(kind)]; }
+ static size_t thingsPerArena(AllocKind kind) {
+ return ThingsPerArena[size_t(kind)];
+ }
+ static size_t thingsSpan(AllocKind kind) {
+ return thingsPerArena(kind) * thingSize(kind);
+ }
+
+ static size_t firstThingOffset(AllocKind kind) {
+ return FirstThingOffsets[size_t(kind)];
+ }
+ static size_t lastThingOffset(AllocKind kind) {
+ return ArenaSize - thingSize(kind);
+ }
+
+ size_t getThingSize() const { return thingSize(getAllocKind()); }
+ size_t getThingsPerArena() const { return thingsPerArena(getAllocKind()); }
+ size_t getThingsSpan() const { return getThingsPerArena() * getThingSize(); }
+ size_t getFirstThingOffset() const {
+ return firstThingOffset(getAllocKind());
+ }
+
+ uintptr_t thingsStart() const { return address() + getFirstThingOffset(); }
+ uintptr_t thingsEnd() const { return address() + ArenaSize; }
+
+ bool isEmpty() const {
+ // Arena is empty if its first span covers the whole arena.
+ firstFreeSpan.checkSpan(this);
+ AllocKind kind = getAllocKind();
+ return firstFreeSpan.first == firstThingOffset(kind) &&
+ firstFreeSpan.last == lastThingOffset(kind);
+ }
+
+ bool hasFreeThings() const { return !firstFreeSpan.isEmpty(); }
+
+ size_t numFreeThings(size_t thingSize) const {
+ firstFreeSpan.checkSpan(this);
+ size_t numFree = 0;
+ const FreeSpan* span = &firstFreeSpan;
+ for (; !span->isEmpty(); span = span->nextSpan(this)) {
+ numFree += (span->last - span->first) / thingSize + 1;
+ }
+ return numFree;
+ }
+
+ size_t countFreeCells() { return numFreeThings(getThingSize()); }
+ size_t countUsedCells() { return getThingsPerArena() - countFreeCells(); }
+
+#ifdef DEBUG
+ bool inFreeList(uintptr_t thing) {
+ uintptr_t base = address();
+ const FreeSpan* span = &firstFreeSpan;
+ for (; !span->isEmpty(); span = span->nextSpan(this)) {
+ // If the thing comes before the current span, it's not free.
+ if (thing < base + span->first) {
+ return false;
+ }
+
+ // If we find it before the end of the span, it's free.
+ if (thing <= base + span->last) {
+ return true;
+ }
+ }
+ return false;
+ }
+#endif
+
+ static bool isAligned(uintptr_t thing, size_t thingSize) {
+ /* Things ends at the arena end. */
+ uintptr_t tailOffset = ArenaSize - (thing & ArenaMask);
+ return tailOffset % thingSize == 0;
+ }
+
+ bool isNewlyCreated() const { return isNewlyCreated_; }
+
+ bool onDelayedMarkingList() const { return onDelayedMarkingList_; }
+
+ Arena* getNextDelayedMarking() const {
+ MOZ_ASSERT(onDelayedMarkingList_);
+ return reinterpret_cast<Arena*>(nextDelayedMarkingArena_ << ArenaShift);
+ }
+
+ void setNextDelayedMarkingArena(Arena* arena) {
+ MOZ_ASSERT(!(uintptr_t(arena) & ArenaMask));
+ MOZ_ASSERT(!onDelayedMarkingList_);
+ MOZ_ASSERT(!hasDelayedBlackMarking_);
+ MOZ_ASSERT(!hasDelayedGrayMarking_);
+ MOZ_ASSERT(!nextDelayedMarkingArena_);
+ onDelayedMarkingList_ = 1;
+ if (arena) {
+ nextDelayedMarkingArena_ = arena->address() >> ArenaShift;
+ }
+ }
+
+ void updateNextDelayedMarkingArena(Arena* arena) {
+ MOZ_ASSERT(!(uintptr_t(arena) & ArenaMask));
+ MOZ_ASSERT(onDelayedMarkingList_);
+ nextDelayedMarkingArena_ = arena ? arena->address() >> ArenaShift : 0;
+ }
+
+ bool hasDelayedMarking(MarkColor color) const {
+ MOZ_ASSERT(onDelayedMarkingList_);
+ return color == MarkColor::Black ? hasDelayedBlackMarking_
+ : hasDelayedGrayMarking_;
+ }
+
+ bool hasAnyDelayedMarking() const {
+ MOZ_ASSERT(onDelayedMarkingList_);
+ return hasDelayedBlackMarking_ || hasDelayedGrayMarking_;
+ }
+
+ void setHasDelayedMarking(MarkColor color, bool value) {
+ MOZ_ASSERT(onDelayedMarkingList_);
+ if (color == MarkColor::Black) {
+ hasDelayedBlackMarking_ = value;
+ } else {
+ hasDelayedGrayMarking_ = value;
+ }
+ }
+
+ void clearDelayedMarkingState() {
+ MOZ_ASSERT(onDelayedMarkingList_);
+ onDelayedMarkingList_ = 0;
+ hasDelayedBlackMarking_ = 0;
+ hasDelayedGrayMarking_ = 0;
+ nextDelayedMarkingArena_ = 0;
+ }
+
+ inline ArenaCellSet*& bufferedCells();
+ inline size_t& atomBitmapStart();
+
+ template <typename T>
+ size_t finalize(JS::GCContext* gcx, AllocKind thingKind, size_t thingSize);
+
+ static void staticAsserts();
+ static void checkLookupTables();
+
+ void unmarkAll();
+ void unmarkPreMarkedFreeCells();
+
+ void arenaAllocatedDuringGC();
+
+#ifdef DEBUG
+ void checkNoMarkedFreeCells();
+ void checkAllCellsMarkedBlack();
+#endif
+
+#if defined(DEBUG) || defined(JS_GC_ZEAL)
+ void checkNoMarkedCells();
+#endif
+};
+
+static_assert(ArenaZoneOffset == offsetof(Arena, zone),
+ "The hardcoded API zone offset must match the actual offset.");
+
+static_assert(sizeof(Arena) == ArenaSize,
+ "ArenaSize must match the actual size of the Arena structure.");
+
+static_assert(
+ offsetof(Arena, data) == ArenaHeaderSize,
+ "ArenaHeaderSize must match the actual size of the header fields.");
+
+inline Arena* FreeSpan::getArena() {
+ Arena* arena = getArenaUnchecked();
+ arena->checkAddress();
+ return arena;
+}
+
+inline void FreeSpan::checkSpan(const Arena* arena) const {
+#ifdef DEBUG
+ if (!first) {
+ MOZ_ASSERT(!first && !last);
+ return;
+ }
+
+ arena->checkAddress();
+ checkRange(first, last, arena);
+
+ // If there's a following span, it must have a higher address,
+ // and the gap must be at least 2 * thingSize.
+ const FreeSpan* next = nextSpanUnchecked(arena);
+ if (next->first) {
+ checkRange(next->first, next->last, arena);
+ size_t thingSize = arena->getThingSize();
+ MOZ_ASSERT(last + 2 * thingSize <= next->first);
+ }
+#endif
+}
+
+inline void FreeSpan::checkRange(uintptr_t first, uintptr_t last,
+ const Arena* arena) const {
+#ifdef DEBUG
+ MOZ_ASSERT(arena);
+ MOZ_ASSERT(first <= last);
+ AllocKind thingKind = arena->getAllocKind();
+ MOZ_ASSERT(first >= Arena::firstThingOffset(thingKind));
+ MOZ_ASSERT(last <= Arena::lastThingOffset(thingKind));
+ MOZ_ASSERT((last - first) % Arena::thingSize(thingKind) == 0);
+#endif
+}
+
+// Mark bitmap API:
+
+MOZ_ALWAYS_INLINE bool MarkBitmap::markBit(const TenuredCell* cell,
+ ColorBit colorBit) {
+ MarkBitmapWord* word;
+ uintptr_t mask;
+ getMarkWordAndMask(cell, colorBit, &word, &mask);
+ return *word & mask;
+}
+
+MOZ_ALWAYS_INLINE bool MarkBitmap::isMarkedAny(const TenuredCell* cell) {
+ return markBit(cell, ColorBit::BlackBit) ||
+ markBit(cell, ColorBit::GrayOrBlackBit);
+}
+
+MOZ_ALWAYS_INLINE bool MarkBitmap::isMarkedBlack(const TenuredCell* cell) {
+ return markBit(cell, ColorBit::BlackBit);
+}
+
+MOZ_ALWAYS_INLINE bool MarkBitmap::isMarkedGray(const TenuredCell* cell) {
+ return !markBit(cell, ColorBit::BlackBit) &&
+ markBit(cell, ColorBit::GrayOrBlackBit);
+}
+
+// The following methods that update the mark bits are not thread safe and must
+// not be called in parallel with each other.
+//
+// They use separate read and write operations to avoid an unnecessarily strict
+// atomic update on the marking bitmap.
+//
+// They may be called in parallel with read operations on the mark bitmap where
+// there is no required ordering between the operations. This happens when gray
+// unmarking occurs in parallel with background sweeping.
+
+// The return value indicates if the cell went from unmarked to marked.
+MOZ_ALWAYS_INLINE bool MarkBitmap::markIfUnmarked(const TenuredCell* cell,
+ MarkColor color) {
+ MarkBitmapWord* word;
+ uintptr_t mask;
+ getMarkWordAndMask(cell, ColorBit::BlackBit, &word, &mask);
+ if (*word & mask) {
+ return false;
+ }
+ if (color == MarkColor::Black) {
+ uintptr_t bits = *word;
+ *word = bits | mask;
+ } else {
+ // We use getMarkWordAndMask to recalculate both mask and word as doing just
+ // mask << color may overflow the mask.
+ getMarkWordAndMask(cell, ColorBit::GrayOrBlackBit, &word, &mask);
+ if (*word & mask) {
+ return false;
+ }
+ uintptr_t bits = *word;
+ *word = bits | mask;
+ }
+ return true;
+}
+
+MOZ_ALWAYS_INLINE bool MarkBitmap::markIfUnmarkedAtomic(const TenuredCell* cell,
+ MarkColor color) {
+ // This version of the method is safe in the face of concurrent writes to the
+ // mark bitmap but may return false positives. The extra synchronisation
+ // necessary to avoid this resulted in worse performance overall.
+
+ MarkBitmapWord* word;
+ uintptr_t mask;
+ getMarkWordAndMask(cell, ColorBit::BlackBit, &word, &mask);
+ if (*word & mask) {
+ return false;
+ }
+ if (color == MarkColor::Black) {
+ *word |= mask;
+ } else {
+ // We use getMarkWordAndMask to recalculate both mask and word as doing just
+ // mask << color may overflow the mask.
+ getMarkWordAndMask(cell, ColorBit::GrayOrBlackBit, &word, &mask);
+ if (*word & mask) {
+ return false;
+ }
+ *word |= mask;
+ }
+ return true;
+}
+
+MOZ_ALWAYS_INLINE void MarkBitmap::markBlack(const TenuredCell* cell) {
+ MarkBitmapWord* word;
+ uintptr_t mask;
+ getMarkWordAndMask(cell, ColorBit::BlackBit, &word, &mask);
+ uintptr_t bits = *word;
+ *word = bits | mask;
+}
+
+MOZ_ALWAYS_INLINE void MarkBitmap::markBlackAtomic(const TenuredCell* cell) {
+ MarkBitmapWord* word;
+ uintptr_t mask;
+ getMarkWordAndMask(cell, ColorBit::BlackBit, &word, &mask);
+ *word |= mask;
+}
+
+MOZ_ALWAYS_INLINE void MarkBitmap::copyMarkBit(TenuredCell* dst,
+ const TenuredCell* src,
+ ColorBit colorBit) {
+ TenuredChunkBase* srcChunk = detail::GetCellChunkBase(src);
+ MarkBitmapWord* srcWord;
+ uintptr_t srcMask;
+ srcChunk->markBits.getMarkWordAndMask(src, colorBit, &srcWord, &srcMask);
+
+ MarkBitmapWord* dstWord;
+ uintptr_t dstMask;
+ getMarkWordAndMask(dst, colorBit, &dstWord, &dstMask);
+
+ uintptr_t bits = *dstWord;
+ bits &= ~dstMask;
+ if (*srcWord & srcMask) {
+ bits |= dstMask;
+ }
+ *dstWord = bits;
+}
+
+MOZ_ALWAYS_INLINE void MarkBitmap::unmark(const TenuredCell* cell) {
+ MarkBitmapWord* word;
+ uintptr_t mask;
+ uintptr_t bits;
+ getMarkWordAndMask(cell, ColorBit::BlackBit, &word, &mask);
+ bits = *word;
+ *word = bits & ~mask;
+ getMarkWordAndMask(cell, ColorBit::GrayOrBlackBit, &word, &mask);
+ bits = *word;
+ *word = bits & ~mask;
+}
+
+inline MarkBitmapWord* MarkBitmap::arenaBits(Arena* arena) {
+ static_assert(
+ ArenaBitmapBits == ArenaBitmapWords * JS_BITS_PER_WORD,
+ "We assume that the part of the bitmap corresponding to the arena "
+ "has the exact number of words so we do not need to deal with a word "
+ "that covers bits from two arenas.");
+
+ MarkBitmapWord* word;
+ uintptr_t unused;
+ getMarkWordAndMask(reinterpret_cast<TenuredCell*>(arena->address()),
+ ColorBit::BlackBit, &word, &unused);
+ return word;
+}
+
+/*
+ * A chunk in the tenured heap. TenuredChunks contain arenas and associated data
+ * structures (mark bitmap, delayed marking state).
+ */
+class TenuredChunk : public TenuredChunkBase {
+ Arena arenas[ArenasPerChunk];
+
+ friend class GCRuntime;
+ friend class MarkingValidator;
+
+ public:
+ static TenuredChunk* fromAddress(uintptr_t addr) {
+ addr &= ~ChunkMask;
+ return reinterpret_cast<TenuredChunk*>(addr);
+ }
+
+ static bool withinValidRange(uintptr_t addr) {
+ uintptr_t offset = addr & ChunkMask;
+ if (TenuredChunk::fromAddress(addr)->isNurseryChunk()) {
+ return offset >= sizeof(ChunkBase) && offset < ChunkSize;
+ }
+ return offset >= offsetof(TenuredChunk, arenas) && offset < ChunkSize;
+ }
+
+ static size_t arenaIndex(const Arena* arena) {
+ uintptr_t addr = arena->address();
+ MOZ_ASSERT(!TenuredChunk::fromAddress(addr)->isNurseryChunk());
+ MOZ_ASSERT(withinValidRange(addr));
+ uintptr_t offset = addr & ChunkMask;
+ return (offset - offsetof(TenuredChunk, arenas)) >> ArenaShift;
+ }
+
+ explicit TenuredChunk(JSRuntime* runtime) : TenuredChunkBase(runtime) {}
+
+ uintptr_t address() const {
+ uintptr_t addr = reinterpret_cast<uintptr_t>(this);
+ MOZ_ASSERT(!(addr & ChunkMask));
+ return addr;
+ }
+
+ bool unused() const { return info.numArenasFree == ArenasPerChunk; }
+
+ bool hasAvailableArenas() const { return info.numArenasFree != 0; }
+
+ bool isNurseryChunk() const { return storeBuffer; }
+
+ Arena* allocateArena(GCRuntime* gc, JS::Zone* zone, AllocKind kind,
+ const AutoLockGC& lock);
+
+ void releaseArena(GCRuntime* gc, Arena* arena, const AutoLockGC& lock);
+ void recycleArena(Arena* arena, SortedArenaList& dest, size_t thingsPerArena);
+
+ void decommitFreeArenas(GCRuntime* gc, const bool& cancel, AutoLockGC& lock);
+ [[nodiscard]] bool decommitOneFreePage(GCRuntime* gc, size_t pageIndex,
+ AutoLockGC& lock);
+ void decommitAllArenas();
+
+ // This will decommit each unused not-already decommitted arena. It performs a
+ // system call for each arena but is only used during OOM.
+ void decommitFreeArenasWithoutUnlocking(const AutoLockGC& lock);
+
+ static void* allocate(GCRuntime* gc);
+ static TenuredChunk* emplace(void* ptr, GCRuntime* gc,
+ bool allMemoryCommitted);
+
+ /* Unlink and return the freeArenasHead. */
+ Arena* fetchNextFreeArena(GCRuntime* gc);
+
+#ifdef DEBUG
+ void verify() const;
+#else
+ void verify() const {}
+#endif
+
+ private:
+ void commitOnePage(GCRuntime* gc);
+
+ void updateChunkListAfterAlloc(GCRuntime* gc, const AutoLockGC& lock);
+ void updateChunkListAfterFree(GCRuntime* gc, size_t numArenasFree,
+ const AutoLockGC& lock);
+
+ // Check if all arenas in a page are free.
+ bool canDecommitPage(size_t pageIndex) const;
+
+ // Check the arena from freeArenasList is located in a free page.
+ // Unlike the isPageFree(size_t) version, this isPageFree(Arena*) will see the
+ // following arenas from the freeArenasHead are also located in the same page,
+ // to prevent not to access the arenas mprotect'ed during compaction in debug
+ // build.
+ bool isPageFree(const Arena* arena) const;
+
+ // Get the page index of the arena.
+ size_t pageIndex(const Arena* arena) const {
+ return pageIndex(arenaIndex(arena));
+ }
+ size_t pageIndex(size_t arenaIndex) const {
+ return arenaIndex / ArenasPerPage;
+ }
+
+ Arena* pageAddress(size_t pageIndex) {
+ return &arenas[pageIndex * ArenasPerPage];
+ }
+};
+
+inline void Arena::checkAddress() const {
+ mozilla::DebugOnly<uintptr_t> addr = uintptr_t(this);
+ MOZ_ASSERT(addr);
+ MOZ_ASSERT(!(addr & ArenaMask));
+ MOZ_ASSERT(TenuredChunk::withinValidRange(addr));
+}
+
+inline TenuredChunk* Arena::chunk() const {
+ return TenuredChunk::fromAddress(address());
+}
+
+// Cell header stored before all nursery cells.
+struct alignas(gc::CellAlignBytes) NurseryCellHeader {
+ // Store zone pointer with the trace kind in the lowest three bits.
+ const uintptr_t allocSiteAndTraceKind;
+
+ // We only need to store a subset of trace kinds so this doesn't cover the
+ // full range.
+ static const uintptr_t TraceKindMask = 3;
+
+ static uintptr_t MakeValue(AllocSite* const site, JS::TraceKind kind) {
+ MOZ_ASSERT(uintptr_t(kind) < TraceKindMask);
+ MOZ_ASSERT((uintptr_t(site) & TraceKindMask) == 0);
+ return uintptr_t(site) | uintptr_t(kind);
+ }
+
+ inline NurseryCellHeader(AllocSite* site, JS::TraceKind kind);
+
+ AllocSite* allocSite() const {
+ return reinterpret_cast<AllocSite*>(allocSiteAndTraceKind & ~TraceKindMask);
+ }
+
+ JS::Zone* zone() const { return allocSite()->zone(); }
+
+ JS::TraceKind traceKind() const {
+ return JS::TraceKind(allocSiteAndTraceKind & TraceKindMask);
+ }
+
+ static const NurseryCellHeader* from(const Cell* cell) {
+ MOZ_ASSERT(IsInsideNursery(cell));
+ return reinterpret_cast<const NurseryCellHeader*>(
+ uintptr_t(cell) - sizeof(NurseryCellHeader));
+ }
+};
+
+static_assert(uintptr_t(JS::TraceKind::Object) <=
+ NurseryCellHeader::TraceKindMask);
+static_assert(uintptr_t(JS::TraceKind::String) <=
+ NurseryCellHeader::TraceKindMask);
+static_assert(uintptr_t(JS::TraceKind::BigInt) <=
+ NurseryCellHeader::TraceKindMask);
+
+} /* namespace gc */
+
+namespace debug {
+
+// Utility functions meant to be called from an interactive debugger.
+enum class MarkInfo : int {
+ BLACK = 0,
+ GRAY = 1,
+ UNMARKED = -1,
+ NURSERY = -2,
+};
+
+// Get the mark color for a cell, in a way easily usable from a debugger.
+MOZ_NEVER_INLINE MarkInfo GetMarkInfo(js::gc::Cell* cell);
+
+// Sample usage from gdb:
+//
+// (gdb) p $word = js::debug::GetMarkWordAddress(obj)
+// $1 = (uintptr_t *) 0x7fa56d5fe360
+// (gdb) p/x $mask = js::debug::GetMarkMask(obj, js::gc::GRAY)
+// $2 = 0x200000000
+// (gdb) watch *$word
+// Hardware watchpoint 7: *$word
+// (gdb) cond 7 *$word & $mask
+// (gdb) cont
+//
+// Note that this is *not* a watchpoint on a single bit. It is a watchpoint on
+// the whole word, which will trigger whenever the word changes and the
+// selected bit is set after the change.
+//
+// So if the bit changing is the desired one, this is exactly what you want.
+// But if a different bit changes (either set or cleared), you may still stop
+// execution if the $mask bit happened to already be set. gdb does not expose
+// enough information to restrict the watchpoint to just a single bit.
+
+// Return the address of the word containing the mark bits for the given cell,
+// or nullptr if the cell is in the nursery.
+MOZ_NEVER_INLINE uintptr_t* GetMarkWordAddress(js::gc::Cell* cell);
+
+// Return the mask for the given cell and color bit, or 0 if the cell is in the
+// nursery.
+MOZ_NEVER_INLINE uintptr_t GetMarkMask(js::gc::Cell* cell, uint32_t colorBit);
+
+} /* namespace debug */
+} /* namespace js */
+
+#endif /* gc_Heap_h */
diff --git a/js/src/gc/IteratorUtils.h b/js/src/gc/IteratorUtils.h
new file mode 100644
index 0000000000..614fd12100
--- /dev/null
+++ b/js/src/gc/IteratorUtils.h
@@ -0,0 +1,121 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef gc_IteratorUtils_h
+#define gc_IteratorUtils_h
+
+#include "mozilla/Array.h"
+#include "mozilla/Maybe.h"
+
+#include <initializer_list>
+
+namespace js {
+
+/*
+ * Create an iterator that yields the values from IteratorB(a) for all a in
+ * IteratorA(). Equivalent to nested for loops over IteratorA and IteratorB
+ * where IteratorB is constructed with a value from IteratorA.
+ */
+template <typename IteratorA, typename IteratorB>
+class NestedIterator {
+ using T = decltype(std::declval<IteratorB>().get());
+
+ IteratorA a;
+ mozilla::Maybe<IteratorB> b;
+
+ public:
+ template <typename... Args>
+ explicit NestedIterator(Args&&... args) : a(std::forward<Args>(args)...) {
+ settle();
+ }
+
+ bool done() const { return b.isNothing(); }
+
+ T get() const {
+ MOZ_ASSERT(!done());
+ return b.ref().get();
+ }
+
+ void next() {
+ MOZ_ASSERT(!done());
+ b->next();
+ if (b->done()) {
+ b.reset();
+ a.next();
+ settle();
+ }
+ }
+
+ const IteratorB& ref() const { return *b; }
+
+ operator T() const { return get(); }
+
+ T operator->() const { return get(); }
+
+ private:
+ void settle() {
+ MOZ_ASSERT(b.isNothing());
+ while (!a.done()) {
+ b.emplace(a.get());
+ if (!b->done()) {
+ break;
+ }
+ b.reset();
+ a.next();
+ }
+ }
+};
+
+/*
+ * An iterator the yields values from each of N of instances of Iterator in
+ * sequence.
+ */
+template <typename Iterator, size_t N>
+class ChainedIterator {
+ using T = decltype(std::declval<Iterator>().get());
+
+ mozilla::Array<Iterator, N> iterators;
+ size_t index = 0;
+
+ public:
+ template <typename... Args>
+ MOZ_IMPLICIT ChainedIterator(Args&&... args)
+ : iterators(Iterator(std::forward<Args>(args))...) {
+ static_assert(N > 1);
+ settle();
+ }
+
+ bool done() const { return index == N; }
+
+ void next() {
+ MOZ_ASSERT(!done());
+ iterators[index].next();
+ settle();
+ }
+
+ T get() const {
+ MOZ_ASSERT(!done());
+ return iterators[index].get();
+ }
+
+ operator T() const { return get(); }
+ T operator->() const { return get(); }
+
+ private:
+ void settle() {
+ MOZ_ASSERT(!done());
+ while (iterators[index].done()) {
+ index++;
+ if (done()) {
+ break;
+ }
+ }
+ }
+};
+
+} /* namespace js */
+
+#endif // gc_IteratorUtils_h
diff --git a/js/src/gc/MallocedBlockCache.cpp b/js/src/gc/MallocedBlockCache.cpp
new file mode 100644
index 0000000000..602d1a80b0
--- /dev/null
+++ b/js/src/gc/MallocedBlockCache.cpp
@@ -0,0 +1,144 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sw=2 et tw=80:
+ *
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "gc/MallocedBlockCache.h"
+#include "mozilla/MemoryChecking.h"
+
+using js::PointerAndUint7;
+using js::gc::MallocedBlockCache;
+
+MallocedBlockCache::~MallocedBlockCache() { clear(); }
+
+PointerAndUint7 MallocedBlockCache::alloc(size_t size) {
+ // Figure out which free list can give us a block of size `size`, after it
+ // has been rounded up to a multiple of `step`.
+ //
+ // Example mapping for STEP = 16 and NUM_LISTS = 8, after rounding up:
+ // 0 never holds any blocks (denotes "too large")
+ // 1 holds blocks of size 16
+ // 2 holds blocks of size 32
+ // 3 holds blocks of size 48
+ // 4 holds blocks of size 64
+ // 5 holds blocks of size 80
+ // 6 holds blocks of size 96
+ // 7 holds blocks of size 112
+ //
+ // For a request of size n:
+ // * if n == 0, fail
+ // * else
+ // round n up to a multiple of STEP
+ // let i = n / STEP
+ // if i >= NUM_LISTS
+ // alloc direct from js_malloc, and return listID = 0
+ // if lists[i] is nonempty, use lists[i] and return listID = i.
+ // else
+ // let p = js_malloc(n)
+ // return p and listID = i.
+
+ // We're never expected to handle zero-sized blocks.
+ MOZ_ASSERT(size > 0);
+
+ size = js::RoundUp(size, STEP);
+ size_t i = size / STEP;
+
+ // Too large to cache; go straight to js_malloc.
+ if (MOZ_UNLIKELY(i >= NUM_LISTS)) {
+ void* p = js_malloc(size);
+ // If p is nullptr, that fact is carried into the PointerAndUint7, and the
+ // caller is expected to check that.
+ return PointerAndUint7(p, OVERSIZE_BLOCK_LIST_ID);
+ }
+
+ // The case we hope is common. First, see if we can pull a block from the
+ // relevant list.
+ MOZ_ASSERT(i >= 1 && i < NUM_LISTS);
+ // Check that i is the right list
+ MOZ_ASSERT(i * STEP == size);
+ if (MOZ_LIKELY(!lists[i].empty())) {
+ void* block = lists[i].popCopy();
+ return PointerAndUint7(block, i);
+ }
+
+ // No luck.
+ void* p = js_malloc(size);
+ if (MOZ_UNLIKELY(!p)) {
+ return PointerAndUint7(nullptr, 0); // OOM
+ }
+ return PointerAndUint7(p, i);
+}
+
+void MallocedBlockCache::free(PointerAndUint7 blockAndListID) {
+ // This is a whole lot simpler than the ::alloc case, since we are given the
+ // listId and don't have to compute it (not that we have any way to).
+ void* block = blockAndListID.pointer();
+ uint32_t listID = blockAndListID.uint7();
+ MOZ_ASSERT(block);
+ MOZ_ASSERT(listID < NUM_LISTS);
+ if (MOZ_UNLIKELY(listID == OVERSIZE_BLOCK_LIST_ID)) {
+ // It was too large for recycling; go straight to js_free.
+ js_free(block);
+ return;
+ }
+
+ // Put it back on list `listId`, first poisoning it for safety.
+ memset(block, JS_NOTINUSE_TRAILER_PATTERN, listID * STEP);
+ MOZ_MAKE_MEM_UNDEFINED(block, listID * STEP);
+ if (MOZ_UNLIKELY(!lists[listID].append(block))) {
+ // OOM'd while doing admin. Hand it off to js_free and forget about the
+ // OOM.
+ js_free(block);
+ }
+}
+
+void MallocedBlockCache::preen(float percentOfBlocksToDiscard) {
+ MOZ_ASSERT(percentOfBlocksToDiscard >= 0.0 &&
+ percentOfBlocksToDiscard <= 100.0);
+ MOZ_ASSERT(lists[OVERSIZE_BLOCK_LIST_ID].empty());
+ for (size_t listID = 1; listID < NUM_LISTS; listID++) {
+ MallocedBlockVector& list = lists[listID];
+ size_t numToFree =
+ size_t(float(list.length()) * (percentOfBlocksToDiscard / 100.0));
+ MOZ_RELEASE_ASSERT(numToFree <= list.length());
+ while (numToFree > 0) {
+ void* block = list.popCopy();
+ MOZ_ASSERT(block);
+ js_free(block);
+ numToFree--;
+ }
+ }
+}
+
+void MallocedBlockCache::clear() {
+ MOZ_ASSERT(lists[OVERSIZE_BLOCK_LIST_ID].empty());
+ for (size_t i = 1; i < NUM_LISTS; i++) {
+ MallocedBlockVector& list = lists[i];
+ for (size_t j = 0; j < list.length(); j++) {
+ MOZ_ASSERT(list[j]);
+ js_free(list[j]);
+ list[j] = nullptr; // for safety
+ }
+ list.clear();
+ }
+}
+
+size_t MallocedBlockCache::sizeOfExcludingThis(
+ mozilla::MallocSizeOf mallocSizeOf) const {
+ MOZ_ASSERT(lists[OVERSIZE_BLOCK_LIST_ID].empty());
+ size_t nBytes = 0;
+ for (size_t listID = 0; listID < NUM_LISTS; listID++) {
+ const MallocedBlockVector& list = lists[listID];
+ nBytes += list.sizeOfExcludingThis(mallocSizeOf);
+ // The payload size of each block in `list` is the same. Hence, we could
+ // possibly do better here (measure once and multiply by the length) if we
+ // believe that the metadata size for each block is also the same.
+ for (size_t i = 0; i < list.length(); i++) {
+ MOZ_ASSERT(list[i]);
+ nBytes += mallocSizeOf(list[i]);
+ }
+ }
+ return nBytes;
+}
diff --git a/js/src/gc/MallocedBlockCache.h b/js/src/gc/MallocedBlockCache.h
new file mode 100644
index 0000000000..546d7e50af
--- /dev/null
+++ b/js/src/gc/MallocedBlockCache.h
@@ -0,0 +1,91 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sw=2 et tw=80:
+ *
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef gc_MallocedBlockCache_h
+#define gc_MallocedBlockCache_h
+
+#include "ds/PointerAndUint7.h"
+#include "js/AllocPolicy.h"
+#include "js/Vector.h"
+#include "util/Poison.h"
+
+namespace js {
+namespace gc {
+
+// MallocedBlockCache implements a lightweight wrapper around js_malloc/js_free.
+//
+// Blocks are requested by ::alloc and must be returned with ::free, at which
+// point the cache may decide to hold on to the block rather than hand it back
+// to js_free. Subsequent ::alloc calls may be satisfied from the cached
+// blocks rather than calling js_malloc. The mechanism is designed to be much
+// cheaper than calling js_malloc/js_free directly. One consequence is that
+// there is no locking; it is essential therefore to use each cache only from
+// a single thread.
+//
+// The intended use is for lightweight management of OOL (malloc'd) storage
+// associated with WasmStructObject and WasmArrayObject. The mechanism is
+// general and potentially has other uses. Blocks of size STEP * NUM_LISTS
+// and larger are never cached, though.
+//
+// Request sizes are rounded up to a multiple of STEP. There are NUM_LISTS-1
+// free lists, with a "List ID" indicating the multiple of STEP stored on the
+// list. So for example, blocks of size 3 * STEP (after rounding up) are
+// stored on the list with ID 3. List ID 0 indicates blocks which are too
+// large to live on any freelist. With the default settings, this gives
+// separate freelists for blocks of size 16, 32, 48, .. 496. Blocks of size
+// zero are not supported, and `lists[0]` will always be empty.
+//
+// Allocation of a block produces not only the block's address but also its
+// List ID. When freeing, both values must be presented, because there is
+// otherwise no way for ::free to know the size of the original allocation,
+// and hence which freelist it should go on. For this reason, the ::alloc and
+// ::free methods produce and take a `PointerAndUint7`, not a `void*`.
+//
+// Resizing of blocks is not supported.
+
+class MallocedBlockCache {
+ public:
+ static const size_t STEP = 16;
+
+ static const size_t NUM_LISTS = 32;
+ // This limitation exists because allocation returns a PointerAndUint7, and
+ // a List-ID value (viz, 0 .. NUM_LISTS-1) is stored in the Uint7 part.
+ static_assert(NUM_LISTS <= (1 << 7));
+
+ // list[0] must always remain empty. List ID 0 indicates a block which
+ // cannot participate in the freelist machinery because it is too large.
+ //
+ // list[i], for 1 <= i < NUM_LISTS, holds blocks of size i * STEP only.
+ // All requests are rounded up to multiple of SIZE.
+ //
+ // We do not expect to be required to issue or accept blocks of size zero.
+ static const size_t OVERSIZE_BLOCK_LIST_ID = 0;
+ using MallocedBlockVector = Vector<void*, 0, SystemAllocPolicy>;
+
+ MallocedBlockVector lists[NUM_LISTS];
+
+ ~MallocedBlockCache();
+
+ // Allocation and freeing.
+ [[nodiscard]] PointerAndUint7 alloc(size_t size);
+ void free(PointerAndUint7 blockAndListID);
+
+ // Allows users to gradually hand blocks back to js_free, so as to avoid
+ // space leaks in long-running scenarios. The specified percentage of
+ // blocks in each list is discarded.
+ void preen(float percentOfBlocksToDiscard);
+
+ // Return all blocks in the cache to js_free.
+ void clear();
+
+ size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const;
+};
+
+} // namespace gc
+} // namespace js
+
+#endif // gc_MallocedBlockCache_h
diff --git a/js/src/gc/Marking-inl.h b/js/src/gc/Marking-inl.h
new file mode 100644
index 0000000000..07cfffb8da
--- /dev/null
+++ b/js/src/gc/Marking-inl.h
@@ -0,0 +1,196 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef gc_Marking_inl_h
+#define gc_Marking_inl_h
+
+#include "gc/Marking.h"
+
+#include <type_traits>
+
+#include "gc/RelocationOverlay.h"
+#include "js/Id.h"
+#include "js/Value.h"
+#include "vm/StringType.h"
+#include "vm/TaggedProto.h"
+
+#include "gc/Nursery-inl.h"
+
+namespace js {
+namespace gc {
+
+// An abstraction to re-wrap any kind of typed pointer back to the tagged
+// pointer it came from with |TaggedPtr<TargetType>::wrap(sourcePtr)|.
+template <typename T>
+struct TaggedPtr {};
+
+template <>
+struct TaggedPtr<JS::Value> {
+ static JS::Value wrap(JSObject* obj) {
+ if (!obj) {
+ return JS::NullValue();
+ }
+#ifdef ENABLE_RECORD_TUPLE
+ if (MaybeForwardedIsExtendedPrimitive(*obj)) {
+ return JS::ExtendedPrimitiveValue(*obj);
+ }
+#endif
+ return JS::ObjectValue(*obj);
+ }
+ static JS::Value wrap(JSString* str) { return JS::StringValue(str); }
+ static JS::Value wrap(JS::Symbol* sym) { return JS::SymbolValue(sym); }
+ static JS::Value wrap(JS::BigInt* bi) { return JS::BigIntValue(bi); }
+ template <typename T>
+ static JS::Value wrap(T* priv) {
+ static_assert(std::is_base_of_v<Cell, T>,
+ "Type must be a GC thing derived from js::gc::Cell");
+ return JS::PrivateGCThingValue(priv);
+ }
+ static JS::Value empty() { return JS::UndefinedValue(); }
+};
+
+template <>
+struct TaggedPtr<jsid> {
+ static jsid wrap(JSString* str) { return JS::PropertyKey::NonIntAtom(str); }
+ static jsid wrap(JS::Symbol* sym) { return PropertyKey::Symbol(sym); }
+ static jsid empty() { return JS::PropertyKey::Void(); }
+};
+
+template <>
+struct TaggedPtr<TaggedProto> {
+ static TaggedProto wrap(JSObject* obj) { return TaggedProto(obj); }
+ static TaggedProto empty() { return TaggedProto(); }
+};
+
+template <typename T>
+struct MightBeForwarded {
+ static_assert(std::is_base_of_v<Cell, T>);
+ static_assert(!std::is_same_v<Cell, T> && !std::is_same_v<TenuredCell, T>);
+
+#define CAN_FORWARD_KIND_OR(_1, _2, Type, _3, _4, _5, canCompact) \
+ std::is_base_of_v<Type, T> ? canCompact:
+
+ // FOR_EACH_ALLOCKIND doesn't cover every possible type: make sure
+ // to default to `true` for unknown types.
+ static constexpr bool value = FOR_EACH_ALLOCKIND(CAN_FORWARD_KIND_OR) true;
+#undef CAN_FORWARD_KIND_OR
+};
+
+template <typename T>
+inline bool IsForwarded(const T* t) {
+ if (!MightBeForwarded<T>::value) {
+ MOZ_ASSERT(!t->isForwarded());
+ return false;
+ }
+
+ return t->isForwarded();
+}
+
+template <typename T>
+inline T* Forwarded(const T* t) {
+ const RelocationOverlay* overlay = RelocationOverlay::fromCell(t);
+ MOZ_ASSERT(overlay->isForwarded());
+ return reinterpret_cast<T*>(overlay->forwardingAddress());
+}
+
+template <typename T>
+inline T MaybeForwarded(T t) {
+ if (IsForwarded(t)) {
+ t = Forwarded(t);
+ }
+ MOZ_ASSERT(!IsForwarded(t));
+ return t;
+}
+
+inline const JSClass* MaybeForwardedObjectClass(const JSObject* obj) {
+ Shape* shape = MaybeForwarded(obj->shapeMaybeForwarded());
+ BaseShape* baseShape = MaybeForwarded(shape->base());
+ return baseShape->clasp();
+}
+
+template <typename T>
+inline bool MaybeForwardedObjectIs(const JSObject* obj) {
+ MOZ_ASSERT(!obj->isForwarded());
+ return MaybeForwardedObjectClass(obj) == &T::class_;
+}
+
+template <typename T>
+inline T& MaybeForwardedObjectAs(JSObject* obj) {
+ MOZ_ASSERT(MaybeForwardedObjectIs<T>(obj));
+ return *static_cast<T*>(obj);
+}
+
+inline RelocationOverlay::RelocationOverlay(Cell* dst) {
+ MOZ_ASSERT(dst->flags() == 0);
+ uintptr_t ptr = uintptr_t(dst);
+ header_.setForwardingAddress(ptr);
+}
+
+/* static */
+inline RelocationOverlay* RelocationOverlay::forwardCell(Cell* src, Cell* dst) {
+ MOZ_ASSERT(!src->isForwarded());
+ MOZ_ASSERT(!dst->isForwarded());
+ return new (src) RelocationOverlay(dst);
+}
+
+inline bool IsAboutToBeFinalizedDuringMinorSweep(Cell** cellp) {
+ MOZ_ASSERT(JS::RuntimeHeapIsMinorCollecting());
+
+ if ((*cellp)->isTenured()) {
+ return false;
+ }
+
+ return !Nursery::getForwardedPointer(cellp);
+}
+
+// Special case pre-write barrier for strings used during rope flattening. This
+// avoids eager marking of ropes which does not immediately mark the cells if we
+// hit OOM. This does not traverse ropes and is instead called on every node in
+// a rope during flattening.
+inline void PreWriteBarrierDuringFlattening(JSString* str) {
+ MOZ_ASSERT(str);
+ MOZ_ASSERT(!JS::RuntimeHeapIsMajorCollecting());
+
+ if (IsInsideNursery(str)) {
+ return;
+ }
+
+ auto* cell = reinterpret_cast<TenuredCell*>(str);
+ JS::shadow::Zone* zone = cell->shadowZoneFromAnyThread();
+ if (!zone->needsIncrementalBarrier()) {
+ return;
+ }
+
+ MOZ_ASSERT(!str->isPermanentAndMayBeShared());
+ MOZ_ASSERT(CurrentThreadCanAccessRuntime(zone->runtimeFromAnyThread()));
+ PerformIncrementalBarrierDuringFlattening(str);
+}
+
+#ifdef JSGC_HASH_TABLE_CHECKS
+
+template <typename T>
+inline bool IsGCThingValidAfterMovingGC(T* t) {
+ return !IsInsideNursery(t) && !t->isForwarded();
+}
+
+template <typename T>
+inline void CheckGCThingAfterMovingGC(T* t) {
+ if (t) {
+ MOZ_RELEASE_ASSERT(IsGCThingValidAfterMovingGC(t));
+ }
+}
+
+template <typename T>
+inline void CheckGCThingAfterMovingGC(const WeakHeapPtr<T*>& t) {
+ CheckGCThingAfterMovingGC(t.unbarrieredGet());
+}
+
+#endif // JSGC_HASH_TABLE_CHECKS
+
+} /* namespace gc */
+} /* namespace js */
+
+#endif // gc_Marking_inl_h
diff --git a/js/src/gc/Marking.cpp b/js/src/gc/Marking.cpp
new file mode 100644
index 0000000000..b92cd5f3ac
--- /dev/null
+++ b/js/src/gc/Marking.cpp
@@ -0,0 +1,2774 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "gc/Marking-inl.h"
+
+#include "mozilla/DebugOnly.h"
+#include "mozilla/IntegerRange.h"
+#include "mozilla/MathAlgorithms.h"
+#include "mozilla/Maybe.h"
+#include "mozilla/PodOperations.h"
+#include "mozilla/ScopeExit.h"
+
+#include <algorithm>
+#include <type_traits>
+
+#include "gc/GCInternals.h"
+#include "gc/ParallelMarking.h"
+#include "gc/TraceKind.h"
+#include "jit/JitCode.h"
+#include "js/GCTypeMacros.h" // JS_FOR_EACH_PUBLIC_{,TAGGED_}GC_POINTER_TYPE
+#include "js/SliceBudget.h"
+#include "util/Poison.h"
+#include "vm/GeneratorObject.h"
+
+#include "gc/GC-inl.h"
+#include "gc/PrivateIterators-inl.h"
+#include "gc/TraceMethods-inl.h"
+#include "gc/WeakMap-inl.h"
+#include "vm/GeckoProfiler-inl.h"
+
+using namespace js;
+using namespace js::gc;
+
+using JS::MapTypeToTraceKind;
+
+using mozilla::DebugOnly;
+using mozilla::IntegerRange;
+using mozilla::PodCopy;
+
+// [SMDOC] GC Tracing
+//
+// Tracing Overview
+// ================
+//
+// Tracing, in this context, refers to an abstract visitation of some or all of
+// the GC-controlled heap. The effect of tracing an edge of the graph depends
+// on the subclass of the JSTracer on whose behalf we are tracing.
+//
+// Marking
+// -------
+//
+// The primary JSTracer is the GCMarker. The marking tracer causes the target
+// of each traversed edge to be marked black and the target edge's children to
+// be marked either gray (in the gc algorithm sense) or immediately black.
+//
+// Callback
+// --------
+//
+// The secondary JSTracer is the CallbackTracer. This simply invokes a callback
+// on each edge in a child.
+//
+// The following is a rough outline of the general struture of the tracing
+// internals.
+//
+/* clang-format off */
+//
+// +-------------------+ ......................
+// | | : :
+// | v v +---+---+
+// | TraceRoot TraceEdge TraceRange GCMarker:: | |
+// | | | | processMarkStackTop | Mark |
+// | +-----------------------+ | | Stack |
+// | | | | |
+// | v | +---+---+
+// | TraceEdgeInternal | ^
+// | | +<-------------+ :
+// | | | | :
+// | v v | :
+// | CallbackTracer:: markAndTraverseEdge | :
+// | onSomeEdge | | :
+// | | | | :
+// | | | | :
+// | +-------------+---------------+ | :
+// | | | :
+// | v | :
+// | markAndTraverse | :
+// | | | :
+// | | | :
+// | traverse | :
+// | | | :
+// | +--------------------------------------+ | :
+// | | | | | :
+// | v v v | :
+// | markAndTraceChildren markAndPush eagerlyMarkChildren | :
+// | | : | | :
+// | v : +-----------+ :
+// | T::traceChildren : :
+// | | : :
+// +-------------+ ......................................
+//
+// Legend:
+// ------- Direct calls
+// ....... Data flow
+//
+/* clang-format on */
+
+/*** Tracing Invariants *****************************************************/
+
+template <typename T>
+static inline bool IsOwnedByOtherRuntime(JSRuntime* rt, T thing) {
+ bool other = thing->runtimeFromAnyThread() != rt;
+ MOZ_ASSERT_IF(other, thing->isPermanentAndMayBeShared());
+ return other;
+}
+
+#ifdef DEBUG
+
+static inline bool IsInFreeList(TenuredCell* cell) {
+ Arena* arena = cell->arena();
+ uintptr_t addr = reinterpret_cast<uintptr_t>(cell);
+ MOZ_ASSERT(Arena::isAligned(addr, arena->getThingSize()));
+ return arena->inFreeList(addr);
+}
+
+template <typename T>
+void js::CheckTracedThing(JSTracer* trc, T* thing) {
+ MOZ_ASSERT(trc);
+ MOZ_ASSERT(thing);
+
+ if (IsForwarded(thing)) {
+ JS::TracerKind kind = trc->kind();
+ MOZ_ASSERT(kind == JS::TracerKind::Tenuring ||
+ kind == JS::TracerKind::MinorSweeping ||
+ kind == JS::TracerKind::Moving);
+ thing = Forwarded(thing);
+ }
+
+ /* This function uses data that's not available in the nursery. */
+ if (IsInsideNursery(thing)) {
+ return;
+ }
+
+ /*
+ * Permanent shared things that are not associated with this runtime will be
+ * ignored during marking.
+ */
+ Zone* zone = thing->zoneFromAnyThread();
+ if (IsOwnedByOtherRuntime(trc->runtime(), thing)) {
+ MOZ_ASSERT(!zone->wasGCStarted());
+ MOZ_ASSERT(thing->isMarkedBlack());
+ return;
+ }
+
+ JSRuntime* rt = trc->runtime();
+ MOZ_ASSERT(zone->runtimeFromAnyThread() == rt);
+
+ bool isGcMarkingTracer = trc->isMarkingTracer();
+ bool isUnmarkGrayTracer = IsTracerKind(trc, JS::TracerKind::UnmarkGray);
+ bool isClearEdgesTracer = IsTracerKind(trc, JS::TracerKind::ClearEdges);
+
+ if (TlsContext.get()) {
+ // If we're on the main thread we must have access to the runtime and zone.
+ MOZ_ASSERT(CurrentThreadCanAccessRuntime(rt));
+ MOZ_ASSERT(CurrentThreadCanAccessZone(zone));
+ } else {
+ MOZ_ASSERT(isGcMarkingTracer || isUnmarkGrayTracer || isClearEdgesTracer ||
+ IsTracerKind(trc, JS::TracerKind::Moving) ||
+ IsTracerKind(trc, JS::TracerKind::Sweeping));
+ MOZ_ASSERT_IF(!isClearEdgesTracer, CurrentThreadIsPerformingGC());
+ }
+
+ MOZ_ASSERT(thing->isAligned());
+ MOZ_ASSERT(MapTypeToTraceKind<std::remove_pointer_t<T>>::kind ==
+ thing->getTraceKind());
+
+ /*
+ * Check that we only mark allocated cells.
+ *
+ * This check is restricted to marking for two reasons: Firstly, if background
+ * sweeping is running and concurrently modifying the free list then it is not
+ * safe. Secondly, it was thought to be slow so this is a compromise so as to
+ * not affect test times too much.
+ */
+ MOZ_ASSERT_IF(zone->isGCMarking(), !IsInFreeList(&thing->asTenured()));
+}
+
+template <typename T>
+void js::CheckTracedThing(JSTracer* trc, const T& thing) {
+ ApplyGCThingTyped(thing, [trc](auto t) { CheckTracedThing(trc, t); });
+}
+
+template <typename T>
+static void CheckMarkedThing(GCMarker* gcMarker, T* thing) {
+ Zone* zone = thing->zoneFromAnyThread();
+
+ MOZ_ASSERT(zone->shouldMarkInZone(gcMarker->markColor()));
+
+ MOZ_ASSERT_IF(gcMarker->shouldCheckCompartments(),
+ zone->isCollectingFromAnyThread() || zone->isAtomsZone());
+
+ MOZ_ASSERT_IF(gcMarker->markColor() == MarkColor::Gray,
+ !zone->isGCMarkingBlackOnly() || zone->isAtomsZone());
+
+ MOZ_ASSERT(!(zone->isGCSweeping() || zone->isGCFinished() ||
+ zone->isGCCompacting()));
+
+ // Check that we don't stray from the current compartment and zone without
+ // using TraceCrossCompartmentEdge.
+ Compartment* comp = thing->maybeCompartment();
+ MOZ_ASSERT_IF(gcMarker->tracingCompartment && comp,
+ gcMarker->tracingCompartment == comp);
+ MOZ_ASSERT_IF(gcMarker->tracingZone,
+ gcMarker->tracingZone == zone || zone->isAtomsZone());
+}
+
+namespace js {
+
+# define IMPL_CHECK_TRACED_THING(_, type, _1, _2) \
+ template void CheckTracedThing<type>(JSTracer*, type*);
+JS_FOR_EACH_TRACEKIND(IMPL_CHECK_TRACED_THING);
+# undef IMPL_CHECK_TRACED_THING
+
+template void CheckTracedThing<Value>(JSTracer*, const Value&);
+
+} // namespace js
+
+#endif
+
+static inline bool ShouldMarkCrossCompartment(GCMarker* marker, JSObject* src,
+ Cell* dstCell) {
+ MarkColor color = marker->markColor();
+
+ if (!dstCell->isTenured()) {
+#ifdef DEBUG
+ // Bug 1743098: This shouldn't be possible but it does seem to happen. Log
+ // some useful information in debug builds.
+ if (color != MarkColor::Black) {
+ fprintf(stderr,
+ "ShouldMarkCrossCompartment: cross compartment edge from gray "
+ "object to nursery thing\n");
+ fprintf(stderr, "src: ");
+ src->dump();
+ fprintf(stderr, "dst: ");
+ dstCell->dump();
+ }
+#endif
+ MOZ_ASSERT(color == MarkColor::Black);
+ return false;
+ }
+ TenuredCell& dst = dstCell->asTenured();
+
+ JS::Zone* dstZone = dst.zone();
+ if (!src->zone()->isGCMarking() && !dstZone->isGCMarking()) {
+ return false;
+ }
+
+ if (color == MarkColor::Black) {
+ // Check our sweep groups are correct: we should never have to
+ // mark something in a zone that we have started sweeping.
+ MOZ_ASSERT_IF(!dst.isMarkedBlack(), !dstZone->isGCSweeping());
+
+ /*
+ * Having black->gray edges violates our promise to the cycle collector so
+ * we ensure that gray things we encounter when marking black end up getting
+ * marked black.
+ *
+ * This can happen for two reasons:
+ *
+ * 1) If we're collecting a compartment and it has an edge to an uncollected
+ * compartment it's possible that the source and destination of the
+ * cross-compartment edge should be gray, but the source was marked black by
+ * the write barrier.
+ *
+ * 2) If we yield during gray marking and the write barrier marks a gray
+ * thing black.
+ *
+ * We handle the first case before returning whereas the second case happens
+ * as part of normal marking.
+ */
+ if (dst.isMarkedGray() && !dstZone->isGCMarking()) {
+ UnmarkGrayGCThingUnchecked(marker,
+ JS::GCCellPtr(&dst, dst.getTraceKind()));
+ return false;
+ }
+
+ return dstZone->isGCMarking();
+ } else {
+ // Check our sweep groups are correct as above.
+ MOZ_ASSERT_IF(!dst.isMarkedAny(), !dstZone->isGCSweeping());
+
+ if (dstZone->isGCMarkingBlackOnly()) {
+ /*
+ * The destination compartment is being not being marked gray now,
+ * but it will be later, so record the cell so it can be marked gray
+ * at the appropriate time.
+ */
+ if (!dst.isMarkedAny()) {
+ DelayCrossCompartmentGrayMarking(marker, src);
+ }
+ return false;
+ }
+
+ return dstZone->isGCMarkingBlackAndGray();
+ }
+}
+
+static bool ShouldTraceCrossCompartment(JSTracer* trc, JSObject* src,
+ Cell* dstCell) {
+ if (!trc->isMarkingTracer()) {
+ return true;
+ }
+
+ return ShouldMarkCrossCompartment(GCMarker::fromTracer(trc), src, dstCell);
+}
+
+static bool ShouldTraceCrossCompartment(JSTracer* trc, JSObject* src,
+ const Value& val) {
+ return val.isGCThing() &&
+ ShouldTraceCrossCompartment(trc, src, val.toGCThing());
+}
+
+#ifdef DEBUG
+
+inline void js::gc::AssertShouldMarkInZone(GCMarker* marker, Cell* thing) {
+ if (!thing->isMarkedBlack()) {
+ Zone* zone = thing->zone();
+ MOZ_ASSERT(zone->isAtomsZone() ||
+ zone->shouldMarkInZone(marker->markColor()));
+ }
+}
+
+void js::gc::AssertRootMarkingPhase(JSTracer* trc) {
+ MOZ_ASSERT_IF(trc->isMarkingTracer(),
+ trc->runtime()->gc.state() == State::NotActive ||
+ trc->runtime()->gc.state() == State::MarkRoots);
+}
+
+#endif // DEBUG
+
+/*** Tracing Interface ******************************************************/
+
+template <typename T>
+static void TraceExternalEdgeHelper(JSTracer* trc, T* thingp,
+ const char* name) {
+ MOZ_ASSERT(InternalBarrierMethods<T>::isMarkable(*thingp));
+ TraceEdgeInternal(trc, ConvertToBase(thingp), name);
+}
+
+JS_PUBLIC_API void js::UnsafeTraceManuallyBarrieredEdge(JSTracer* trc,
+ JSObject** thingp,
+ const char* name) {
+ TraceEdgeInternal(trc, ConvertToBase(thingp), name);
+}
+
+template <typename T>
+static void TraceRootHelper(JSTracer* trc, T* thingp, const char* name) {
+ MOZ_ASSERT(thingp);
+ js::TraceNullableRoot(trc, thingp, name);
+}
+
+namespace js {
+class AbstractGeneratorObject;
+class SavedFrame;
+} // namespace js
+
+#define DEFINE_TRACE_EXTERNAL_EDGE_FUNCTION(type) \
+ JS_PUBLIC_API void js::gc::TraceExternalEdge(JSTracer* trc, type* thingp, \
+ const char* name) { \
+ TraceExternalEdgeHelper(trc, thingp, name); \
+ }
+
+// Define TraceExternalEdge for each public GC pointer type.
+JS_FOR_EACH_PUBLIC_GC_POINTER_TYPE(DEFINE_TRACE_EXTERNAL_EDGE_FUNCTION)
+JS_FOR_EACH_PUBLIC_TAGGED_GC_POINTER_TYPE(DEFINE_TRACE_EXTERNAL_EDGE_FUNCTION)
+
+#undef DEFINE_TRACE_EXTERNAL_EDGE_FUNCTION
+
+#define DEFINE_UNSAFE_TRACE_ROOT_FUNCTION(type) \
+ JS_PUBLIC_API void JS::TraceRoot(JSTracer* trc, type* thingp, \
+ const char* name) { \
+ TraceRootHelper(trc, thingp, name); \
+ }
+
+// Define TraceRoot for each public GC pointer type.
+JS_FOR_EACH_PUBLIC_GC_POINTER_TYPE(DEFINE_UNSAFE_TRACE_ROOT_FUNCTION)
+JS_FOR_EACH_PUBLIC_TAGGED_GC_POINTER_TYPE(DEFINE_UNSAFE_TRACE_ROOT_FUNCTION)
+
+// Also, for the moment, define TraceRoot for internal GC pointer types.
+DEFINE_UNSAFE_TRACE_ROOT_FUNCTION(AbstractGeneratorObject*)
+DEFINE_UNSAFE_TRACE_ROOT_FUNCTION(SavedFrame*)
+
+#undef DEFINE_UNSAFE_TRACE_ROOT_FUNCTION
+
+namespace js {
+namespace gc {
+
+#define INSTANTIATE_INTERNAL_TRACE_FUNCTIONS(type) \
+ template void TraceRangeInternal<type>(JSTracer*, size_t len, type*, \
+ const char*);
+
+#define INSTANTIATE_INTERNAL_TRACE_FUNCTIONS_FROM_TRACEKIND(_1, type, _2, _3) \
+ INSTANTIATE_INTERNAL_TRACE_FUNCTIONS(type*)
+
+JS_FOR_EACH_TRACEKIND(INSTANTIATE_INTERNAL_TRACE_FUNCTIONS_FROM_TRACEKIND)
+JS_FOR_EACH_PUBLIC_TAGGED_GC_POINTER_TYPE(INSTANTIATE_INTERNAL_TRACE_FUNCTIONS)
+INSTANTIATE_INTERNAL_TRACE_FUNCTIONS(TaggedProto)
+
+#undef INSTANTIATE_INTERNAL_TRACE_FUNCTIONS_FROM_TRACEKIND
+#undef INSTANTIATE_INTERNAL_TRACE_FUNCTIONS
+
+} // namespace gc
+} // namespace js
+
+// In debug builds, makes a note of the current compartment before calling a
+// trace hook or traceChildren() method on a GC thing.
+class MOZ_RAII AutoSetTracingSource {
+#ifndef DEBUG
+ public:
+ template <typename T>
+ AutoSetTracingSource(JSTracer* trc, T* thing) {}
+ ~AutoSetTracingSource() {}
+#else
+ GCMarker* marker = nullptr;
+
+ public:
+ template <typename T>
+ AutoSetTracingSource(JSTracer* trc, T* thing) {
+ if (trc->isMarkingTracer() && thing) {
+ marker = GCMarker::fromTracer(trc);
+ MOZ_ASSERT(!marker->tracingZone);
+ marker->tracingZone = thing->asTenured().zone();
+ MOZ_ASSERT(!marker->tracingCompartment);
+ marker->tracingCompartment = thing->maybeCompartment();
+ }
+ }
+
+ ~AutoSetTracingSource() {
+ if (marker) {
+ marker->tracingZone = nullptr;
+ marker->tracingCompartment = nullptr;
+ }
+ }
+#endif
+};
+
+// In debug builds, clear the trace hook compartment. This happens after the
+// trace hook has called back into one of our trace APIs and we've checked the
+// traced thing.
+class MOZ_RAII AutoClearTracingSource {
+#ifndef DEBUG
+ public:
+ explicit AutoClearTracingSource(GCMarker* marker) {}
+ explicit AutoClearTracingSource(JSTracer* trc) {}
+ ~AutoClearTracingSource() {}
+#else
+ GCMarker* marker = nullptr;
+ JS::Zone* prevZone = nullptr;
+ Compartment* prevCompartment = nullptr;
+
+ public:
+ explicit AutoClearTracingSource(JSTracer* trc) {
+ if (trc->isMarkingTracer()) {
+ marker = GCMarker::fromTracer(trc);
+ prevZone = marker->tracingZone;
+ marker->tracingZone = nullptr;
+ prevCompartment = marker->tracingCompartment;
+ marker->tracingCompartment = nullptr;
+ }
+ }
+ ~AutoClearTracingSource() {
+ if (marker) {
+ marker->tracingZone = prevZone;
+ marker->tracingCompartment = prevCompartment;
+ }
+ }
+#endif
+};
+
+template <typename T>
+void js::TraceManuallyBarrieredCrossCompartmentEdge(JSTracer* trc,
+ JSObject* src, T* dst,
+ const char* name) {
+ // Clear expected compartment for cross-compartment edge.
+ AutoClearTracingSource acts(trc);
+
+ if (ShouldTraceCrossCompartment(trc, src, *dst)) {
+ TraceEdgeInternal(trc, dst, name);
+ }
+}
+template void js::TraceManuallyBarrieredCrossCompartmentEdge<Value>(
+ JSTracer*, JSObject*, Value*, const char*);
+template void js::TraceManuallyBarrieredCrossCompartmentEdge<JSObject*>(
+ JSTracer*, JSObject*, JSObject**, const char*);
+template void js::TraceManuallyBarrieredCrossCompartmentEdge<BaseScript*>(
+ JSTracer*, JSObject*, BaseScript**, const char*);
+
+template <typename T>
+void js::TraceSameZoneCrossCompartmentEdge(JSTracer* trc,
+ const WriteBarriered<T>* dst,
+ const char* name) {
+#ifdef DEBUG
+ if (trc->isMarkingTracer()) {
+ MOZ_ASSERT((*dst)->maybeCompartment(),
+ "Use TraceEdge for GC things without a compartment");
+
+ GCMarker* gcMarker = GCMarker::fromTracer(trc);
+ MOZ_ASSERT_IF(gcMarker->tracingZone,
+ (*dst)->zone() == gcMarker->tracingZone);
+ }
+
+ // Skip compartment checks for this edge.
+ if (trc->kind() == JS::TracerKind::CompartmentCheck) {
+ return;
+ }
+#endif
+
+ // Clear expected compartment for cross-compartment edge.
+ AutoClearTracingSource acts(trc);
+ TraceEdgeInternal(trc, ConvertToBase(dst->unbarrieredAddress()), name);
+}
+template void js::TraceSameZoneCrossCompartmentEdge(
+ JSTracer*, const WriteBarriered<Shape*>*, const char*);
+
+template <typename T>
+void js::TraceWeakMapKeyEdgeInternal(JSTracer* trc, Zone* weakMapZone,
+ T** thingp, const char* name) {
+ // We can't use ShouldTraceCrossCompartment here because that assumes the
+ // source of the edge is a CCW object which could be used to delay gray
+ // marking. Instead, assert that the weak map zone is in the same marking
+ // state as the target thing's zone and therefore we can go ahead and mark it.
+#ifdef DEBUG
+ auto thing = *thingp;
+ if (trc->isMarkingTracer()) {
+ MOZ_ASSERT(weakMapZone->isGCMarking());
+ MOZ_ASSERT(weakMapZone->gcState() == thing->zone()->gcState());
+ }
+#endif
+
+ // Clear expected compartment for cross-compartment edge.
+ AutoClearTracingSource acts(trc);
+
+ TraceEdgeInternal(trc, thingp, name);
+}
+
+template void js::TraceWeakMapKeyEdgeInternal<JSObject>(JSTracer*, Zone*,
+ JSObject**,
+ const char*);
+template void js::TraceWeakMapKeyEdgeInternal<BaseScript>(JSTracer*, Zone*,
+ BaseScript**,
+ const char*);
+
+static Cell* TraceGenericPointerRootAndType(JSTracer* trc, Cell* thing,
+ JS::TraceKind kind,
+ const char* name) {
+ return MapGCThingTyped(thing, kind, [trc, name](auto t) -> Cell* {
+ TraceRoot(trc, &t, name);
+ return t;
+ });
+}
+
+void js::TraceGenericPointerRoot(JSTracer* trc, Cell** thingp,
+ const char* name) {
+ MOZ_ASSERT(thingp);
+ Cell* thing = *thingp;
+ if (!thing) {
+ return;
+ }
+
+ Cell* traced =
+ TraceGenericPointerRootAndType(trc, thing, thing->getTraceKind(), name);
+ if (traced != thing) {
+ *thingp = traced;
+ }
+}
+
+void js::TraceManuallyBarrieredGenericPointerEdge(JSTracer* trc, Cell** thingp,
+ const char* name) {
+ MOZ_ASSERT(thingp);
+ Cell* thing = *thingp;
+ if (!*thingp) {
+ return;
+ }
+
+ auto traced = MapGCThingTyped(thing, thing->getTraceKind(),
+ [trc, name](auto t) -> Cell* {
+ TraceManuallyBarrieredEdge(trc, &t, name);
+ return t;
+ });
+ if (traced != thing) {
+ *thingp = traced;
+ }
+}
+
+void js::TraceGCCellPtrRoot(JSTracer* trc, JS::GCCellPtr* thingp,
+ const char* name) {
+ Cell* thing = thingp->asCell();
+ if (!thing) {
+ return;
+ }
+
+ Cell* traced =
+ TraceGenericPointerRootAndType(trc, thing, thingp->kind(), name);
+
+ if (!traced) {
+ *thingp = JS::GCCellPtr();
+ } else if (traced != thingp->asCell()) {
+ *thingp = JS::GCCellPtr(traced, thingp->kind());
+ }
+}
+
+void js::TraceManuallyBarrieredGCCellPtr(JSTracer* trc, JS::GCCellPtr* thingp,
+ const char* name) {
+ Cell* thing = thingp->asCell();
+ if (!thing) {
+ return;
+ }
+
+ Cell* traced = MapGCThingTyped(thing, thing->getTraceKind(),
+ [trc, name](auto t) -> Cell* {
+ TraceManuallyBarrieredEdge(trc, &t, name);
+ return t;
+ });
+
+ if (!traced) {
+ // If we are clearing edges, also erase the type. This happens when using
+ // ClearEdgesTracer.
+ *thingp = JS::GCCellPtr();
+ } else if (traced != thingp->asCell()) {
+ *thingp = JS::GCCellPtr(traced, thingp->kind());
+ }
+}
+
+template <typename T>
+inline bool TraceTaggedPtrEdge(JSTracer* trc, T* thingp, const char* name) {
+ // Return true by default. For some types the lambda below won't be called.
+ bool ret = true;
+ auto thing = MapGCThingTyped(*thingp, [&](auto thing) {
+ if (!TraceEdgeInternal(trc, &thing, name)) {
+ ret = false;
+ return TaggedPtr<T>::empty();
+ }
+
+ return TaggedPtr<T>::wrap(thing);
+ });
+
+ // Only update *thingp if the value changed, to avoid TSan false positives for
+ // template objects when using DumpHeapTracer or UbiNode tracers while Ion
+ // compiling off-thread.
+ if (thing.isSome() && thing.value() != *thingp) {
+ *thingp = thing.value();
+ }
+
+ return ret;
+}
+
+bool js::gc::TraceEdgeInternal(JSTracer* trc, Value* thingp, const char* name) {
+ return TraceTaggedPtrEdge(trc, thingp, name);
+}
+bool js::gc::TraceEdgeInternal(JSTracer* trc, jsid* thingp, const char* name) {
+ return TraceTaggedPtrEdge(trc, thingp, name);
+}
+bool js::gc::TraceEdgeInternal(JSTracer* trc, TaggedProto* thingp,
+ const char* name) {
+ return TraceTaggedPtrEdge(trc, thingp, name);
+}
+
+template <typename T>
+void js::gc::TraceRangeInternal(JSTracer* trc, size_t len, T* vec,
+ const char* name) {
+ JS::AutoTracingIndex index(trc);
+ for (auto i : IntegerRange(len)) {
+ if (InternalBarrierMethods<T>::isMarkable(vec[i])) {
+ TraceEdgeInternal(trc, &vec[i], name);
+ }
+ ++index;
+ }
+}
+
+/*** GC Marking Interface ***************************************************/
+
+namespace js {
+
+using HasNoImplicitEdgesType = bool;
+
+template <typename T>
+struct ImplicitEdgeHolderType {
+ using Type = HasNoImplicitEdgesType;
+};
+
+// For now, we only handle JSObject* and BaseScript* keys, but the linear time
+// algorithm can be easily extended by adding in more types here, then making
+// GCMarker::traverse<T> call markImplicitEdges.
+template <>
+struct ImplicitEdgeHolderType<JSObject*> {
+ using Type = JSObject*;
+};
+
+template <>
+struct ImplicitEdgeHolderType<BaseScript*> {
+ using Type = BaseScript*;
+};
+
+void GCMarker::markEphemeronEdges(EphemeronEdgeVector& edges,
+ gc::CellColor srcColor) {
+ // This is called as part of GC weak marking or by barriers outside of GC.
+ MOZ_ASSERT_IF(CurrentThreadIsPerformingGC(),
+ state == MarkingState::WeakMarking);
+
+ DebugOnly<size_t> initialLength = edges.length();
+
+ for (auto& edge : edges) {
+ CellColor targetColor = std::min(srcColor, edge.color);
+ MOZ_ASSERT(CellColor(markColor()) >= targetColor);
+ if (targetColor == markColor()) {
+ ApplyGCThingTyped(
+ edge.target, edge.target->getTraceKind(),
+ [this](auto t) { markAndTraverse<NormalMarkingOptions>(t); });
+ }
+ }
+
+ // The above marking always goes through markAndPush, which will not cause
+ // 'edges' to be appended to while iterating.
+ MOZ_ASSERT(edges.length() == initialLength);
+
+ // This is not just an optimization. When nuking a CCW, we conservatively
+ // mark through the related edges and then lose the CCW->target connection
+ // that induces a sweep group edge. As a result, it is possible for the
+ // delegate zone to get marked later, look up an edge in this table, and
+ // then try to mark something in a Zone that is no longer marking.
+ if (srcColor == CellColor::Black && markColor() == MarkColor::Black) {
+ edges.eraseIf([](auto& edge) { return edge.color == MarkColor::Black; });
+ }
+}
+
+// 'delegate' is no longer the delegate of 'key'.
+void GCMarker::severWeakDelegate(JSObject* key, JSObject* delegate) {
+ MOZ_ASSERT(CurrentThreadIsMainThread());
+
+ JS::Zone* zone = delegate->zone();
+ if (!zone->needsIncrementalBarrier()) {
+ MOZ_ASSERT(
+ !zone->gcEphemeronEdges(delegate).get(delegate),
+ "non-collecting zone should not have populated gcEphemeronEdges");
+ return;
+ }
+ auto* p = zone->gcEphemeronEdges(delegate).get(delegate);
+ if (!p) {
+ return;
+ }
+
+ // We are losing 3 edges here: key -> delegate, delegate -> key, and
+ // <delegate, map> -> value. Maintain snapshot-at-beginning (hereafter,
+ // S-A-B) by conservatively assuming the delegate will end up black and
+ // marking through the latter 2 edges.
+ //
+ // Note that this does not fully give S-A-B:
+ //
+ // 1. If the map is gray, then the value will only be marked gray here even
+ // though the map could later be discovered to be black.
+ //
+ // 2. If the map has not yet been marked, we won't have any entries to mark
+ // here in the first place.
+ //
+ // 3. We're not marking the delegate, since that would cause eg nukeAllCCWs
+ // to keep everything alive for another collection.
+ //
+ // We can't even assume that the delegate passed in here is live, because we
+ // could have gotten here from nukeAllCCWs, which iterates over all CCWs
+ // including dead ones.
+ //
+ // This is ok because S-A-B is only needed to prevent the case where an
+ // unmarked object is removed from the graph and then re-inserted where it is
+ // reachable only by things that have already been marked. None of the 3
+ // target objects will be re-inserted anywhere as a result of this action.
+
+ EphemeronEdgeVector& edges = p->value;
+ MOZ_ASSERT(markColor() == MarkColor::Black);
+ markEphemeronEdges(edges, MarkColor::Black);
+}
+
+// 'delegate' is now the delegate of 'key'. Update weakmap marking state.
+void GCMarker::restoreWeakDelegate(JSObject* key, JSObject* delegate) {
+ MOZ_ASSERT(CurrentThreadIsMainThread());
+
+ if (!key->zone()->needsIncrementalBarrier()) {
+ // Temporary diagnostic printouts for when this would have asserted.
+ if (key->zone()->gcEphemeronEdges(key).has(key)) {
+ fprintf(stderr, "key zone: %d\n", int(key->zone()->gcState()));
+#ifdef DEBUG
+ key->dump();
+#endif
+ fprintf(stderr, "delegate zone: %d\n", int(delegate->zone()->gcState()));
+#ifdef DEBUG
+ delegate->dump();
+#endif
+ }
+ MOZ_ASSERT(
+ !key->zone()->gcEphemeronEdges(key).has(key),
+ "non-collecting zone should not have populated gcEphemeronEdges");
+ return;
+ }
+ if (!delegate->zone()->needsIncrementalBarrier()) {
+ // Normally we should not have added the key -> value edge if the delegate
+ // zone is not marking (because the delegate would have been seen as black,
+ // so we would mark the key immediately instead). But if there wasn't a
+ // delegate (the key was nuked), then we won't have consulted it. So we
+ // can't do the same assertion as above.
+ //
+ // Specifically, the sequence would be:
+ // 1. Nuke the key.
+ // 2. Start the incremental GC.
+ // 3. Mark the WeakMap. Insert a key->value edge with a DeadObjectProxy key.
+ // 4. Un-nuke the key with a delegate in a nonmarking Zone.
+ //
+ // The result is an ephemeron edge (from <map,key> to value, but stored
+ // as key to value) involving a key with a delegate in a nonmarking Zone,
+ // something that ordinarily would not happen.
+ return;
+ }
+ auto* p = key->zone()->gcEphemeronEdges(key).get(key);
+ if (!p) {
+ return;
+ }
+
+ // Similar to severWeakDelegate above, mark through the key -> value edge.
+ EphemeronEdgeVector& edges = p->value;
+ MOZ_ASSERT(markColor() == MarkColor::Black);
+ markEphemeronEdges(edges, MarkColor::Black);
+}
+
+template <typename T>
+void GCMarker::markImplicitEdgesHelper(T markedThing) {
+ if (!isWeakMarking()) {
+ return;
+ }
+
+ Zone* zone = markedThing->asTenured().zone();
+ MOZ_ASSERT(zone->isGCMarking());
+ MOZ_ASSERT(!zone->isGCSweeping());
+
+ auto p = zone->gcEphemeronEdges().get(markedThing);
+ if (!p) {
+ return;
+ }
+ EphemeronEdgeVector& edges = p->value;
+
+ // markedThing might be a key in a debugger weakmap, which can end up marking
+ // values that are in a different compartment.
+ AutoClearTracingSource acts(tracer());
+
+ CellColor thingColor = gc::detail::GetEffectiveColor(this, markedThing);
+ markEphemeronEdges(edges, thingColor);
+}
+
+template <>
+void GCMarker::markImplicitEdgesHelper(HasNoImplicitEdgesType) {}
+
+template <typename T>
+void GCMarker::markImplicitEdges(T* thing) {
+ markImplicitEdgesHelper<typename ImplicitEdgeHolderType<T*>::Type>(thing);
+}
+
+template void GCMarker::markImplicitEdges(JSObject*);
+template void GCMarker::markImplicitEdges(BaseScript*);
+
+} // namespace js
+
+template <typename T>
+static inline bool ShouldMark(GCMarker* gcmarker, T* thing) {
+ // We may encounter nursery things during normal marking since we don't
+ // collect the nursery at the start of every GC slice.
+ if (!thing->isTenured()) {
+ return false;
+ }
+
+ // Don't mark things outside a zone if we are in a per-zone GC. Don't mark
+ // permanent shared things owned by other runtimes (we will never observe
+ // their zone being collected).
+ Zone* zone = thing->asTenured().zoneFromAnyThread();
+ return zone->shouldMarkInZone(gcmarker->markColor());
+}
+
+template <uint32_t opts>
+MarkingTracerT<opts>::MarkingTracerT(JSRuntime* runtime, GCMarker* marker)
+ : GenericTracerImpl<MarkingTracerT<opts>>(
+ runtime, JS::TracerKind::Marking,
+ JS::TraceOptions(JS::WeakMapTraceAction::Expand,
+ JS::WeakEdgeTraceAction::Skip)) {
+ // Marking tracers are owned by (and part of) a GCMarker.
+ MOZ_ASSERT(this == marker->tracer());
+ MOZ_ASSERT(getMarker() == marker);
+}
+
+template <uint32_t opts>
+MOZ_ALWAYS_INLINE GCMarker* MarkingTracerT<opts>::getMarker() {
+ return GCMarker::fromTracer(this);
+}
+
+template <uint32_t opts>
+template <typename T>
+void MarkingTracerT<opts>::onEdge(T** thingp, const char* name) {
+ T* thing = *thingp;
+
+ // Do per-type marking precondition checks.
+ GCMarker* marker = getMarker();
+ if (!ShouldMark(marker, thing)) {
+ MOZ_ASSERT(gc::detail::GetEffectiveColor(marker, thing) ==
+ js::gc::CellColor::Black);
+ return;
+ }
+
+ MOZ_ASSERT(!IsOwnedByOtherRuntime(this->runtime(), thing));
+
+#ifdef DEBUG
+ CheckMarkedThing(marker, thing);
+#endif
+
+ AutoClearTracingSource acts(this);
+ marker->markAndTraverse<opts>(thing);
+}
+
+#define INSTANTIATE_ONEDGE_METHOD(name, type, _1, _2) \
+ template void MarkingTracerT<NormalMarkingOptions>::onEdge<type>( \
+ type * *thingp, const char* name); \
+ template void \
+ MarkingTracerT<MarkingOptions::MarkRootCompartments>::onEdge<type>( \
+ type * *thingp, const char* name);
+JS_FOR_EACH_TRACEKIND(INSTANTIATE_ONEDGE_METHOD)
+#undef INSTANTIATE_ONEDGE_METHOD
+
+static void TraceEdgeForBarrier(GCMarker* gcmarker, TenuredCell* thing,
+ JS::TraceKind kind) {
+ // Dispatch to markAndTraverse without checking ShouldMark.
+ ApplyGCThingTyped(thing, kind, [gcmarker](auto thing) {
+ MOZ_ASSERT(ShouldMark(gcmarker, thing));
+ CheckTracedThing(gcmarker->tracer(), thing);
+ AutoClearTracingSource acts(gcmarker->tracer());
+ gcmarker->markAndTraverse<NormalMarkingOptions>(thing);
+ });
+}
+
+JS_PUBLIC_API void js::gc::PerformIncrementalReadBarrier(JS::GCCellPtr thing) {
+ // Optimized marking for read barriers. This is called from
+ // ExposeGCThingToActiveJS which has already checked the prerequisites for
+ // performing a read barrier. This means we can skip a bunch of checks and
+ // call into the tracer directly.
+
+ MOZ_ASSERT(thing);
+ MOZ_ASSERT(!JS::RuntimeHeapIsCollecting());
+
+ TenuredCell* cell = &thing.asCell()->asTenured();
+ MOZ_ASSERT(!cell->isMarkedBlack());
+
+ Zone* zone = cell->zone();
+ MOZ_ASSERT(zone->needsIncrementalBarrier());
+
+ // Skip dispatching on known tracer type.
+ GCMarker* gcmarker = GCMarker::fromTracer(zone->barrierTracer());
+ TraceEdgeForBarrier(gcmarker, cell, thing.kind());
+}
+
+void js::gc::PerformIncrementalReadBarrier(TenuredCell* cell) {
+ // Internal version of previous function.
+
+ MOZ_ASSERT(cell);
+ MOZ_ASSERT(!JS::RuntimeHeapIsCollecting());
+
+ if (cell->isMarkedBlack()) {
+ return;
+ }
+
+ Zone* zone = cell->zone();
+ MOZ_ASSERT(zone->needsIncrementalBarrier());
+
+ // Skip dispatching on known tracer type.
+ GCMarker* gcmarker = GCMarker::fromTracer(zone->barrierTracer());
+ TraceEdgeForBarrier(gcmarker, cell, cell->getTraceKind());
+}
+
+void js::gc::PerformIncrementalPreWriteBarrier(TenuredCell* cell) {
+ // The same as PerformIncrementalReadBarrier except for an extra check on the
+ // runtime for cells in atoms zone.
+
+ Zone* zone = cell->zoneFromAnyThread();
+ MOZ_ASSERT(zone->needsIncrementalBarrier());
+
+ MOZ_ASSERT(cell);
+ if (cell->isMarkedBlack()) {
+ return;
+ }
+
+ // Barriers can be triggered off the main thread by background finalization of
+ // HeapPtrs to the atoms zone. We don't want to trigger the barrier in this
+ // case.
+ bool checkThread = zone->isAtomsZone();
+ JSRuntime* runtime = cell->runtimeFromAnyThread();
+ if (checkThread && !CurrentThreadCanAccessRuntime(runtime)) {
+ MOZ_ASSERT(CurrentThreadIsGCFinalizing());
+ return;
+ }
+
+ MOZ_ASSERT(CurrentThreadIsMainThread());
+ MOZ_ASSERT(!JS::RuntimeHeapIsMajorCollecting());
+
+ // Skip dispatching on known tracer type.
+ GCMarker* gcmarker = GCMarker::fromTracer(zone->barrierTracer());
+ TraceEdgeForBarrier(gcmarker, cell, cell->getTraceKind());
+}
+
+void js::gc::PerformIncrementalBarrierDuringFlattening(JSString* str) {
+ TenuredCell* cell = &str->asTenured();
+
+ // Skip eager marking of ropes during flattening. Their children will also be
+ // barriered by flattening process so we don't need to traverse them.
+ if (str->isRope()) {
+ cell->markBlack();
+ return;
+ }
+
+ PerformIncrementalPreWriteBarrier(cell);
+}
+
+template <uint32_t opts, typename T>
+void js::GCMarker::markAndTraverse(T* thing) {
+ if (mark<opts>(thing)) {
+ // We only mark permanent things during initialization.
+ MOZ_ASSERT_IF(thing->isPermanentAndMayBeShared(),
+ !runtime()->permanentAtomsPopulated());
+
+ // We don't need to pass MarkRootCompartments options on to children.
+ constexpr uint32_t traverseOpts =
+ opts & ~MarkingOptions::MarkRootCompartments;
+
+ traverse<traverseOpts>(thing);
+
+ if constexpr (bool(opts & MarkingOptions::MarkRootCompartments)) {
+ // Mark the compartment as live.
+ SetCompartmentHasMarkedCells(thing);
+ }
+ }
+}
+
+// The |traverse| method overloads select the traversal strategy for each kind.
+//
+// There are three possible strategies:
+//
+// 1. traceChildren
+//
+// The simplest traversal calls out to the fully generic traceChildren
+// function to visit the child edges. In the absence of other traversal
+// mechanisms, this function will rapidly grow the stack past its bounds and
+// crash the process. Thus, this generic tracing should only be used in cases
+// where subsequent tracing will not recurse.
+//
+// 2. scanChildren
+//
+// Strings, Shapes, and Scopes are extremely common, but have simple patterns
+// of recursion. We traverse trees of these edges immediately, with
+// aggressive, manual inlining, implemented by eagerlyTraceChildren.
+//
+// 3. pushThing
+//
+// Objects are extremely common and can contain arbitrarily nested graphs, so
+// are not trivially inlined. In this case we use the mark stack to control
+// recursion. JitCode shares none of these properties, but is included for
+// historical reasons. JSScript normally cannot recurse, but may be used as a
+// weakmap key and thereby recurse into weakmapped values.
+
+template <uint32_t opts>
+void GCMarker::traverse(BaseShape* thing) {
+ traceChildren<opts>(thing);
+}
+template <uint32_t opts>
+void GCMarker::traverse(GetterSetter* thing) {
+ traceChildren<opts>(thing);
+}
+template <uint32_t opts>
+void GCMarker::traverse(JS::Symbol* thing) {
+ traceChildren<opts>(thing);
+}
+template <uint32_t opts>
+void GCMarker::traverse(JS::BigInt* thing) {
+ traceChildren<opts>(thing);
+}
+template <uint32_t opts>
+void GCMarker::traverse(RegExpShared* thing) {
+ traceChildren<opts>(thing);
+}
+template <uint32_t opts>
+void GCMarker::traverse(JSString* thing) {
+ scanChildren<opts>(thing);
+}
+template <uint32_t opts>
+void GCMarker::traverse(Shape* thing) {
+ scanChildren<opts>(thing);
+}
+template <uint32_t opts>
+void GCMarker::traverse(PropMap* thing) {
+ scanChildren<opts>(thing);
+}
+template <uint32_t opts>
+void GCMarker::traverse(js::Scope* thing) {
+ scanChildren<opts>(thing);
+}
+template <uint32_t opts>
+void GCMarker::traverse(JSObject* thing) {
+ pushThing<opts>(thing);
+}
+template <uint32_t opts>
+void GCMarker::traverse(jit::JitCode* thing) {
+ pushThing<opts>(thing);
+}
+template <uint32_t opts>
+void GCMarker::traverse(BaseScript* thing) {
+ pushThing<opts>(thing);
+}
+
+template <uint32_t opts, typename T>
+void js::GCMarker::traceChildren(T* thing) {
+ MOZ_ASSERT(!thing->isPermanentAndMayBeShared());
+ MOZ_ASSERT(thing->isMarkedAny());
+ AutoSetTracingSource asts(tracer(), thing);
+ thing->traceChildren(tracer());
+}
+
+template <uint32_t opts, typename T>
+void js::GCMarker::scanChildren(T* thing) {
+ MOZ_ASSERT(!thing->isPermanentAndMayBeShared());
+ MOZ_ASSERT(thing->isMarkedAny());
+ eagerlyMarkChildren<opts>(thing);
+}
+
+template <uint32_t opts, typename T>
+void js::GCMarker::pushThing(T* thing) {
+ MOZ_ASSERT(!thing->isPermanentAndMayBeShared());
+ MOZ_ASSERT(thing->isMarkedAny());
+ pushTaggedPtr(thing);
+}
+
+template void js::GCMarker::markAndTraverse<NormalMarkingOptions, JSObject>(
+ JSObject* thing);
+template void js::GCMarker::markAndTraverse<
+ MarkingOptions::MarkRootCompartments, JSObject>(JSObject* thing);
+
+#ifdef DEBUG
+void GCMarker::setCheckAtomMarking(bool check) {
+ MOZ_ASSERT(check != checkAtomMarking);
+ checkAtomMarking = check;
+}
+#endif
+
+template <typename S, typename T>
+inline void GCMarker::checkTraversedEdge(S source, T* target) {
+#ifdef DEBUG
+ // Atoms and Symbols do not have or mark their internal pointers,
+ // respectively.
+ MOZ_ASSERT(!source->isPermanentAndMayBeShared());
+
+ // Shared things are already black so we will not mark them.
+ if (target->isPermanentAndMayBeShared()) {
+ Zone* zone = target->zoneFromAnyThread();
+ MOZ_ASSERT(!zone->wasGCStarted());
+ MOZ_ASSERT(!zone->needsIncrementalBarrier());
+ MOZ_ASSERT(target->isMarkedBlack());
+ MOZ_ASSERT(!target->maybeCompartment());
+ return;
+ }
+
+ Zone* sourceZone = source->zone();
+ Zone* targetZone = target->zone();
+
+ // Atoms and Symbols do not have access to a compartment pointer, or we'd need
+ // to adjust the subsequent check to catch that case.
+ MOZ_ASSERT_IF(targetZone->isAtomsZone(), !target->maybeCompartment());
+
+ // The Zones must match, unless the target is an atom.
+ MOZ_ASSERT(targetZone == sourceZone || targetZone->isAtomsZone());
+
+ // If we are marking an atom, that atom must be marked in the source zone's
+ // atom bitmap.
+ if (checkAtomMarking && !sourceZone->isAtomsZone() &&
+ targetZone->isAtomsZone()) {
+ MOZ_ASSERT(target->runtimeFromAnyThread()->gc.atomMarking.atomIsMarked(
+ sourceZone, reinterpret_cast<TenuredCell*>(target)));
+ }
+
+ // If we have access to a compartment pointer for both things, they must
+ // match.
+ MOZ_ASSERT_IF(source->maybeCompartment() && target->maybeCompartment(),
+ source->maybeCompartment() == target->maybeCompartment());
+#endif
+}
+
+template <uint32_t opts, typename S, typename T>
+void js::GCMarker::markAndTraverseEdge(S source, T* target) {
+ checkTraversedEdge(source, target);
+ markAndTraverse<opts>(target);
+}
+
+template <uint32_t opts, typename S, typename T>
+void js::GCMarker::markAndTraverseEdge(S source, const T& thing) {
+ ApplyGCThingTyped(thing, [this, source](auto t) {
+ this->markAndTraverseEdge<opts>(source, t);
+ });
+}
+
+template <uint32_t opts, typename T>
+bool js::GCMarker::mark(T* thing) {
+ if (!thing->isTenured()) {
+ return false;
+ }
+
+ AssertShouldMarkInZone(this, thing);
+
+ MarkColor color =
+ TraceKindCanBeGray<T>::value ? markColor() : MarkColor::Black;
+
+ if constexpr (bool(opts & MarkingOptions::ParallelMarking)) {
+ return thing->asTenured().markIfUnmarkedAtomic(color);
+ }
+
+ return thing->asTenured().markIfUnmarked(color);
+}
+
+/*** Mark-stack Marking *****************************************************/
+
+// Call the trace hook set on the object, if present.
+static inline void CallTraceHook(JSTracer* trc, JSObject* obj) {
+ const JSClass* clasp = obj->getClass();
+ MOZ_ASSERT(clasp);
+
+ if (clasp->hasTrace()) {
+ AutoSetTracingSource asts(trc, obj);
+ clasp->doTrace(trc, obj);
+ }
+}
+
+static gcstats::PhaseKind GrayMarkingPhaseForCurrentPhase(
+ const gcstats::Statistics& stats) {
+ using namespace gcstats;
+ switch (stats.currentPhaseKind()) {
+ case PhaseKind::MARK:
+ return PhaseKind::MARK_GRAY;
+ case PhaseKind::MARK_WEAK:
+ return PhaseKind::MARK_GRAY_WEAK;
+ default:
+ MOZ_CRASH("Unexpected current phase");
+ }
+}
+
+void GCMarker::moveWork(GCMarker* dst, GCMarker* src) {
+ MOZ_ASSERT(dst->stack.isEmpty());
+ MOZ_ASSERT(src->canDonateWork());
+
+ MarkStack::moveWork(dst->stack, src->stack);
+}
+
+bool GCMarker::markUntilBudgetExhausted(SliceBudget& budget,
+ ShouldReportMarkTime reportTime) {
+#ifdef DEBUG
+ MOZ_ASSERT(!strictCompartmentChecking);
+ strictCompartmentChecking = true;
+ auto acc = mozilla::MakeScopeExit([&] { strictCompartmentChecking = false; });
+#endif
+
+ if (budget.isOverBudget()) {
+ return false;
+ }
+
+ return doMarking<NormalMarkingOptions>(budget, reportTime);
+}
+
+template <uint32_t opts>
+bool GCMarker::doMarking(SliceBudget& budget, ShouldReportMarkTime reportTime) {
+ GCRuntime& gc = runtime()->gc;
+
+ // This method leaves the mark color as it found it.
+
+ if (hasBlackEntries() && !markOneColor<opts, MarkColor::Black>(budget)) {
+ return false;
+ }
+
+ if (hasGrayEntries()) {
+ mozilla::Maybe<gcstats::AutoPhase> ap;
+ if (reportTime) {
+ auto& stats = runtime()->gc.stats();
+ ap.emplace(stats, GrayMarkingPhaseForCurrentPhase(stats));
+ }
+
+ if (!markOneColor<opts, MarkColor::Gray>(budget)) {
+ return false;
+ }
+ }
+
+ // Mark children of things that caused too deep recursion during the above
+ // tracing. All normal marking happens before any delayed marking.
+ if (gc.hasDelayedMarking()) {
+ gc.markAllDelayedChildren(reportTime);
+ }
+
+ MOZ_ASSERT(!gc.hasDelayedMarking());
+ MOZ_ASSERT(isDrained());
+
+ return true;
+}
+
+template <uint32_t opts, MarkColor color>
+bool GCMarker::markOneColor(SliceBudget& budget) {
+ AutoSetMarkColor setColor(*this, color);
+
+ while (processMarkStackTop<opts>(budget)) {
+ if (stack.isEmpty()) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+bool GCMarker::markCurrentColorInParallel(SliceBudget& budget) {
+ ParallelMarker::AtomicCount& waitingTaskCount =
+ parallelMarker_->waitingTaskCountRef();
+
+ while (processMarkStackTop<MarkingOptions::ParallelMarking>(budget)) {
+ if (stack.isEmpty()) {
+ return true;
+ }
+
+ // TODO: It might be better to only check this occasionally, possibly
+ // combined with the slice budget check. Experiments with giving this its
+ // own counter resulted in worse performance.
+ if (waitingTaskCount && canDonateWork()) {
+ parallelMarker_->donateWorkFrom(this);
+ }
+ }
+
+ return false;
+}
+
+static inline void CheckForCompartmentMismatch(JSObject* obj, JSObject* obj2) {
+#ifdef DEBUG
+ if (MOZ_UNLIKELY(obj->compartment() != obj2->compartment())) {
+ fprintf(
+ stderr,
+ "Compartment mismatch in pointer from %s object slot to %s object\n",
+ obj->getClass()->name, obj2->getClass()->name);
+ MOZ_CRASH("Compartment mismatch");
+ }
+#endif
+}
+
+static inline size_t NumUsedFixedSlots(NativeObject* obj) {
+ return std::min(obj->numFixedSlots(), obj->slotSpan());
+}
+
+static inline size_t NumUsedDynamicSlots(NativeObject* obj) {
+ size_t nfixed = obj->numFixedSlots();
+ size_t nslots = obj->slotSpan();
+ if (nslots < nfixed) {
+ return 0;
+ }
+
+ return nslots - nfixed;
+}
+
+template <uint32_t opts>
+inline bool GCMarker::processMarkStackTop(SliceBudget& budget) {
+ /*
+ * This function uses explicit goto and scans objects directly. This allows us
+ * to eliminate tail recursion and significantly improve the marking
+ * performance, see bug 641025.
+ *
+ * Note that the mutator can change the size and layout of objects between
+ * marking slices, so we must check slots and element ranges read from the
+ * stack.
+ */
+
+ MOZ_ASSERT(!stack.isEmpty());
+ MOZ_ASSERT_IF(markColor() == MarkColor::Gray, !hasBlackEntries());
+
+ JSObject* obj; // The object being scanned.
+ SlotsOrElementsKind kind; // The kind of slot range being scanned, if any.
+ HeapSlot* base; // Slot range base pointer.
+ size_t index; // Index of the next slot to mark.
+ size_t end; // End of slot range to mark.
+
+ if (stack.peekTag() == MarkStack::SlotsOrElementsRangeTag) {
+ auto range = stack.popSlotsOrElementsRange();
+ obj = range.ptr().asRangeObject();
+ NativeObject* nobj = &obj->as<NativeObject>();
+ kind = range.kind();
+ index = range.start();
+
+ switch (kind) {
+ case SlotsOrElementsKind::FixedSlots: {
+ base = nobj->fixedSlots();
+ end = NumUsedFixedSlots(nobj);
+ break;
+ }
+
+ case SlotsOrElementsKind::DynamicSlots: {
+ base = nobj->slots_;
+ end = NumUsedDynamicSlots(nobj);
+ break;
+ }
+
+ case SlotsOrElementsKind::Elements: {
+ base = nobj->getDenseElements();
+
+ // Account for shifted elements.
+ size_t numShifted = nobj->getElementsHeader()->numShiftedElements();
+ size_t initlen = nobj->getDenseInitializedLength();
+ index = std::max(index, numShifted) - numShifted;
+ end = initlen;
+ break;
+ }
+
+ case SlotsOrElementsKind::Unused: {
+ MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("Unused SlotsOrElementsKind");
+ }
+ }
+
+ goto scan_value_range;
+ }
+
+ budget.step();
+ if (budget.isOverBudget()) {
+ return false;
+ }
+
+ {
+ MarkStack::TaggedPtr ptr = stack.popPtr();
+ switch (ptr.tag()) {
+ case MarkStack::ObjectTag: {
+ obj = ptr.as<JSObject>();
+ AssertShouldMarkInZone(this, obj);
+ goto scan_obj;
+ }
+
+ case MarkStack::JitCodeTag: {
+ auto* code = ptr.as<jit::JitCode>();
+ AutoSetTracingSource asts(tracer(), code);
+ code->traceChildren(tracer());
+ return true;
+ }
+
+ case MarkStack::ScriptTag: {
+ auto* script = ptr.as<BaseScript>();
+ if constexpr (bool(opts & MarkingOptions::MarkImplicitEdges)) {
+ markImplicitEdges(script);
+ }
+ AutoSetTracingSource asts(tracer(), script);
+ script->traceChildren(tracer());
+ return true;
+ }
+
+ default:
+ MOZ_CRASH("Invalid tag in mark stack");
+ }
+ }
+
+ return true;
+
+scan_value_range:
+ while (index < end) {
+ budget.step();
+ if (budget.isOverBudget()) {
+ pushValueRange(obj, kind, index, end);
+ return false;
+ }
+
+ const Value& v = base[index];
+ index++;
+
+ if (v.isString()) {
+ markAndTraverseEdge<opts>(obj, v.toString());
+ } else if (v.hasObjectPayload()) {
+ JSObject* obj2 = &v.getObjectPayload();
+#ifdef DEBUG
+ if (!obj2) {
+ fprintf(stderr,
+ "processMarkStackTop found ObjectValue(nullptr) "
+ "at %zu Values from end of range in object:\n",
+ size_t(end - (index - 1)));
+ obj->dump();
+ }
+#endif
+ CheckForCompartmentMismatch(obj, obj2);
+ if (mark<opts>(obj2)) {
+ // Save the rest of this value range for later and start scanning obj2's
+ // children.
+ pushValueRange(obj, kind, index, end);
+ obj = obj2;
+ goto scan_obj;
+ }
+ } else if (v.isSymbol()) {
+ markAndTraverseEdge<opts>(obj, v.toSymbol());
+ } else if (v.isBigInt()) {
+ markAndTraverseEdge<opts>(obj, v.toBigInt());
+ } else if (v.isPrivateGCThing()) {
+ // v.toGCCellPtr cannot be inlined, so construct one manually.
+ Cell* cell = v.toGCThing();
+ markAndTraverseEdge<opts>(obj, JS::GCCellPtr(cell, cell->getTraceKind()));
+ }
+ }
+
+ return true;
+
+scan_obj : {
+ AssertShouldMarkInZone(this, obj);
+
+ if constexpr (bool(opts & MarkingOptions::MarkImplicitEdges)) {
+ markImplicitEdges(obj);
+ }
+ markAndTraverseEdge<opts>(obj, obj->shape());
+
+ CallTraceHook(tracer(), obj);
+
+ if (!obj->is<NativeObject>()) {
+ return true;
+ }
+
+ NativeObject* nobj = &obj->as<NativeObject>();
+
+ unsigned nslots = nobj->slotSpan();
+
+ do {
+ if (nobj->hasEmptyElements()) {
+ break;
+ }
+
+ base = nobj->getDenseElements();
+ kind = SlotsOrElementsKind::Elements;
+ index = 0;
+ end = nobj->getDenseInitializedLength();
+
+ if (!nslots) {
+ goto scan_value_range;
+ }
+ pushValueRange(nobj, kind, index, end);
+ } while (false);
+
+ unsigned nfixed = nobj->numFixedSlots();
+
+ base = nobj->fixedSlots();
+ kind = SlotsOrElementsKind::FixedSlots;
+ index = 0;
+
+ if (nslots > nfixed) {
+ pushValueRange(nobj, kind, index, nfixed);
+ kind = SlotsOrElementsKind::DynamicSlots;
+ base = nobj->slots_;
+ end = nslots - nfixed;
+ goto scan_value_range;
+ }
+
+ MOZ_ASSERT(nslots <= nobj->numFixedSlots());
+ end = nslots;
+ goto scan_value_range;
+}
+}
+
+/*** Mark Stack *************************************************************/
+
+static_assert(sizeof(MarkStack::TaggedPtr) == sizeof(uintptr_t),
+ "A TaggedPtr should be the same size as a pointer");
+static_assert((sizeof(MarkStack::SlotsOrElementsRange) % sizeof(uintptr_t)) ==
+ 0,
+ "SlotsOrElementsRange size should be a multiple of "
+ "the pointer size");
+
+static const size_t ValueRangeWords =
+ sizeof(MarkStack::SlotsOrElementsRange) / sizeof(uintptr_t);
+
+template <typename T>
+struct MapTypeToMarkStackTag {};
+template <>
+struct MapTypeToMarkStackTag<JSObject*> {
+ static const auto value = MarkStack::ObjectTag;
+};
+template <>
+struct MapTypeToMarkStackTag<jit::JitCode*> {
+ static const auto value = MarkStack::JitCodeTag;
+};
+template <>
+struct MapTypeToMarkStackTag<BaseScript*> {
+ static const auto value = MarkStack::ScriptTag;
+};
+
+#ifdef DEBUG
+static inline bool TagIsRangeTag(MarkStack::Tag tag) {
+ return tag == MarkStack::SlotsOrElementsRangeTag;
+}
+#endif
+
+inline MarkStack::TaggedPtr::TaggedPtr(Tag tag, Cell* ptr)
+ : bits(tag | uintptr_t(ptr)) {
+ assertValid();
+}
+
+inline uintptr_t MarkStack::TaggedPtr::tagUnchecked() const {
+ return bits & TagMask;
+}
+
+inline MarkStack::Tag MarkStack::TaggedPtr::tag() const {
+ auto tag = Tag(bits & TagMask);
+ MOZ_ASSERT(tag <= LastTag);
+ return tag;
+}
+
+inline Cell* MarkStack::TaggedPtr::ptr() const {
+ return reinterpret_cast<Cell*>(bits & ~TagMask);
+}
+
+inline void MarkStack::TaggedPtr::assertValid() const {
+ (void)tag();
+ MOZ_ASSERT(IsCellPointerValid(ptr()));
+}
+
+template <typename T>
+inline T* MarkStack::TaggedPtr::as() const {
+ MOZ_ASSERT(tag() == MapTypeToMarkStackTag<T*>::value);
+ MOZ_ASSERT(ptr()->isTenured());
+ MOZ_ASSERT(ptr()->is<T>());
+ return static_cast<T*>(ptr());
+}
+
+inline JSObject* MarkStack::TaggedPtr::asRangeObject() const {
+ MOZ_ASSERT(TagIsRangeTag(tag()));
+ MOZ_ASSERT(ptr()->isTenured());
+ return ptr()->as<JSObject>();
+}
+
+inline JSRope* MarkStack::TaggedPtr::asTempRope() const {
+ MOZ_ASSERT(tag() == TempRopeTag);
+ return &ptr()->as<JSString>()->asRope();
+}
+
+inline MarkStack::SlotsOrElementsRange::SlotsOrElementsRange(
+ SlotsOrElementsKind kindArg, JSObject* obj, size_t startArg)
+ : startAndKind_((startArg << StartShift) | size_t(kindArg)),
+ ptr_(SlotsOrElementsRangeTag, obj) {
+ assertValid();
+ MOZ_ASSERT(kind() == kindArg);
+ MOZ_ASSERT(start() == startArg);
+}
+
+inline void MarkStack::SlotsOrElementsRange::assertValid() const {
+ ptr_.assertValid();
+ MOZ_ASSERT(TagIsRangeTag(ptr_.tag()));
+}
+
+inline SlotsOrElementsKind MarkStack::SlotsOrElementsRange::kind() const {
+ return SlotsOrElementsKind(startAndKind_ & KindMask);
+}
+
+inline size_t MarkStack::SlotsOrElementsRange::start() const {
+ return startAndKind_ >> StartShift;
+}
+
+inline MarkStack::TaggedPtr MarkStack::SlotsOrElementsRange::ptr() const {
+ return ptr_;
+}
+
+MarkStack::MarkStack() { MOZ_ASSERT(isEmpty()); }
+
+MarkStack::~MarkStack() { MOZ_ASSERT(isEmpty()); }
+
+MarkStack::MarkStack(const MarkStack& other) {
+ MOZ_CRASH("Compiler requires this but doesn't call it");
+}
+
+MarkStack& MarkStack::operator=(const MarkStack& other) {
+ new (this) MarkStack(other);
+ return *this;
+}
+
+MarkStack::MarkStack(MarkStack&& other)
+ : stack_(std::move(other.stack_.ref())),
+ topIndex_(other.topIndex_.ref())
+#ifdef JS_GC_ZEAL
+ ,
+ maxCapacity_(other.maxCapacity_)
+#endif
+{
+ other.topIndex_ = 0;
+}
+
+MarkStack& MarkStack::operator=(MarkStack&& other) {
+ new (this) MarkStack(std::move(other));
+ return *this;
+}
+
+bool MarkStack::init() { return resetStackCapacity(); }
+
+bool MarkStack::resetStackCapacity() {
+ MOZ_ASSERT(isEmpty());
+
+ size_t capacity = MARK_STACK_BASE_CAPACITY;
+
+#ifdef JS_GC_ZEAL
+ capacity = std::min(capacity, maxCapacity_.ref());
+#endif
+
+ return resize(capacity);
+}
+
+#ifdef JS_GC_ZEAL
+void MarkStack::setMaxCapacity(size_t maxCapacity) {
+ MOZ_ASSERT(maxCapacity != 0);
+ MOZ_ASSERT(isEmpty());
+
+ maxCapacity_ = maxCapacity;
+ if (capacity() > maxCapacity_) {
+ // If the realloc fails, just keep using the existing stack; it's
+ // not ideal but better than failing.
+ (void)resize(maxCapacity_);
+ }
+}
+#endif
+
+MOZ_ALWAYS_INLINE bool MarkStack::indexIsEntryBase(size_t index) const {
+ // The mark stack holds both TaggedPtr and SlotsOrElementsRange entries, which
+ // are one or two words long respectively. Determine whether |index| points to
+ // the base of an entry (i.e. the lowest word in memory).
+ //
+ // The possible cases are that |index| points to:
+ // 1. a single word TaggedPtr entry => true
+ // 2. the startAndKind_ word of SlotsOrElementsRange => true
+ // (startAndKind_ is a uintptr_t tagged with SlotsOrElementsKind)
+ // 3. the ptr_ word of SlotsOrElementsRange (itself a TaggedPtr) => false
+ //
+ // To check for case 3, interpret the word as a TaggedPtr: if it is tagged as
+ // a SlotsOrElementsRange tagged pointer then we are inside such a range and
+ // |index| does not point to the base of an entry. This requires that no
+ // startAndKind_ word can be interpreted as such, which is arranged by making
+ // SlotsOrElementsRangeTag zero and all SlotsOrElementsKind tags non-zero.
+
+ MOZ_ASSERT(index < position());
+ return stack()[index].tagUnchecked() != SlotsOrElementsRangeTag;
+}
+
+/* static */
+void MarkStack::moveWork(MarkStack& dst, MarkStack& src) {
+ // Move some work from |src| to |dst|. Assumes |dst| is empty.
+ //
+ // When this method runs during parallel marking, we are on the thread that
+ // owns |src|, and the thread that owns |dst| is blocked waiting on the
+ // ParallelMarkTask::resumed condition variable.
+
+ // Limit the size of moves to stop threads with work spending too much time
+ // donating.
+ static const size_t MaxWordsToMove = 4096;
+
+ size_t totalWords = src.position();
+ size_t wordsToMove = std::min(totalWords / 2, MaxWordsToMove);
+
+ size_t targetPos = src.position() - wordsToMove;
+
+ // Adjust the target position in case it points to the middle of a two word
+ // entry.
+ if (!src.indexIsEntryBase(targetPos)) {
+ targetPos--;
+ wordsToMove++;
+ }
+ MOZ_ASSERT(src.indexIsEntryBase(targetPos));
+ MOZ_ASSERT(targetPos < src.position());
+ MOZ_ASSERT(targetPos > 0);
+ MOZ_ASSERT(wordsToMove == src.position() - targetPos);
+
+ if (!dst.ensureSpace(wordsToMove)) {
+ return;
+ }
+
+ // TODO: This doesn't have good cache behaviour when moving work between
+ // threads. It might be better if the original thread ended up with the top
+ // part of the stack, in src words if this method stole from the bottom of
+ // the stack rather than the top.
+
+ mozilla::PodCopy(dst.topPtr(), src.stack().begin() + targetPos, wordsToMove);
+ dst.topIndex_ += wordsToMove;
+ dst.peekPtr().assertValid();
+
+ src.topIndex_ = targetPos;
+#ifdef DEBUG
+ src.poisonUnused();
+#endif
+ src.peekPtr().assertValid();
+}
+
+void MarkStack::clearAndResetCapacity() {
+ // Fall back to the smaller initial capacity so we don't hold on to excess
+ // memory between GCs.
+ stack().clear();
+ topIndex_ = 0;
+ (void)resetStackCapacity();
+}
+
+void MarkStack::clearAndFreeStack() {
+ // Free all stack memory so we don't hold on to excess memory between GCs.
+ stack().clearAndFree();
+ topIndex_ = 0;
+}
+
+inline MarkStack::TaggedPtr* MarkStack::topPtr() { return &stack()[topIndex_]; }
+
+template <typename T>
+inline bool MarkStack::push(T* ptr) {
+ return push(TaggedPtr(MapTypeToMarkStackTag<T*>::value, ptr));
+}
+
+inline bool MarkStack::pushTempRope(JSRope* rope) {
+ return push(TaggedPtr(TempRopeTag, rope));
+}
+
+inline bool MarkStack::push(const TaggedPtr& ptr) {
+ if (!ensureSpace(1)) {
+ return false;
+ }
+
+ infalliblePush(ptr);
+ return true;
+}
+
+inline void MarkStack::infalliblePush(const TaggedPtr& ptr) {
+ *topPtr() = ptr;
+ topIndex_++;
+ MOZ_ASSERT(position() <= capacity());
+}
+
+inline bool MarkStack::push(JSObject* obj, SlotsOrElementsKind kind,
+ size_t start) {
+ return push(SlotsOrElementsRange(kind, obj, start));
+}
+
+inline bool MarkStack::push(const SlotsOrElementsRange& array) {
+ array.assertValid();
+
+ if (!ensureSpace(ValueRangeWords)) {
+ return false;
+ }
+
+ infalliblePush(array);
+ return true;
+}
+
+inline void MarkStack::infalliblePush(const SlotsOrElementsRange& array) {
+ *reinterpret_cast<SlotsOrElementsRange*>(topPtr()) = array;
+ topIndex_ += ValueRangeWords;
+ MOZ_ASSERT(position() <= capacity());
+ MOZ_ASSERT(TagIsRangeTag(peekTag()));
+}
+
+inline const MarkStack::TaggedPtr& MarkStack::peekPtr() const {
+ MOZ_ASSERT(!isEmpty());
+ return stack()[topIndex_ - 1];
+}
+
+inline MarkStack::Tag MarkStack::peekTag() const {
+ MOZ_ASSERT(!isEmpty());
+ return peekPtr().tag();
+}
+
+inline MarkStack::TaggedPtr MarkStack::popPtr() {
+ MOZ_ASSERT(!isEmpty());
+ MOZ_ASSERT(!TagIsRangeTag(peekTag()));
+ peekPtr().assertValid();
+ topIndex_--;
+ return *topPtr();
+}
+
+inline MarkStack::SlotsOrElementsRange MarkStack::popSlotsOrElementsRange() {
+ MOZ_ASSERT(!isEmpty());
+ MOZ_ASSERT(TagIsRangeTag(peekTag()));
+ MOZ_ASSERT(position() >= ValueRangeWords);
+
+ topIndex_ -= ValueRangeWords;
+ const auto& array = *reinterpret_cast<SlotsOrElementsRange*>(topPtr());
+ array.assertValid();
+ return array;
+}
+
+inline bool MarkStack::ensureSpace(size_t count) {
+ if (MOZ_LIKELY((topIndex_ + count) <= capacity())) {
+ return !js::oom::ShouldFailWithOOM();
+ }
+
+ return enlarge(count);
+}
+
+MOZ_NEVER_INLINE bool MarkStack::enlarge(size_t count) {
+ size_t required = capacity() + count;
+ size_t newCapacity = mozilla::RoundUpPow2(required);
+
+#ifdef JS_GC_ZEAL
+ newCapacity = std::min(newCapacity, maxCapacity_.ref());
+ if (newCapacity < required) {
+ return false;
+ }
+#endif
+
+ return resize(newCapacity);
+}
+
+bool MarkStack::resize(size_t newCapacity) {
+ MOZ_ASSERT(newCapacity != 0);
+ MOZ_ASSERT(newCapacity >= position());
+
+ if (!stack().resize(newCapacity)) {
+ return false;
+ }
+
+ poisonUnused();
+ return true;
+}
+
+inline void MarkStack::poisonUnused() {
+ static_assert((JS_FRESH_MARK_STACK_PATTERN & TagMask) > LastTag,
+ "The mark stack poison pattern must not look like a valid "
+ "tagged pointer");
+
+ AlwaysPoison(stack().begin() + topIndex_, JS_FRESH_MARK_STACK_PATTERN,
+ stack().capacity() - topIndex_, MemCheckKind::MakeUndefined);
+}
+
+size_t MarkStack::sizeOfExcludingThis(
+ mozilla::MallocSizeOf mallocSizeOf) const {
+ return stack().sizeOfExcludingThis(mallocSizeOf);
+}
+
+/*** GCMarker ***************************************************************/
+
+/*
+ * WeakMapTraceAction::Expand: the GC is recomputing the liveness of WeakMap
+ * entries by expanding each live WeakMap into its constituent key->value edges,
+ * a table of which will be consulted in a later phase whenever marking a
+ * potential key.
+ */
+GCMarker::GCMarker(JSRuntime* rt)
+ : tracer_(mozilla::VariantType<MarkingTracer>(), rt, this),
+ runtime_(rt),
+ haveSwappedStacks(false),
+ markColor_(MarkColor::Black),
+ state(NotActive),
+ incrementalWeakMapMarkingEnabled(
+ TuningDefaults::IncrementalWeakMapMarkingEnabled)
+#ifdef DEBUG
+ ,
+ checkAtomMarking(true),
+ strictCompartmentChecking(false)
+#endif
+{
+}
+
+bool GCMarker::init() { return stack.init(); }
+
+void GCMarker::start() {
+ MOZ_ASSERT(state == NotActive);
+ MOZ_ASSERT(stack.isEmpty());
+ state = RegularMarking;
+ haveAllImplicitEdges = true;
+ setMarkColor(MarkColor::Black);
+}
+
+static void ClearEphemeronEdges(JSRuntime* rt) {
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ for (GCZonesIter zone(rt); !zone.done(); zone.next()) {
+ if (!zone->gcEphemeronEdges().clear()) {
+ oomUnsafe.crash("clearing weak keys in GCMarker::stop()");
+ }
+ if (!zone->gcNurseryEphemeronEdges().clear()) {
+ oomUnsafe.crash("clearing (nursery) weak keys in GCMarker::stop()");
+ }
+ }
+}
+
+void GCMarker::stop() {
+ MOZ_ASSERT(isDrained());
+ MOZ_ASSERT(markColor() == MarkColor::Black);
+ MOZ_ASSERT(!haveSwappedStacks);
+
+ if (state == NotActive) {
+ return;
+ }
+ state = NotActive;
+
+ (void)stack.resetStackCapacity();
+ otherStack.clearAndFreeStack();
+ ClearEphemeronEdges(runtime());
+ unmarkGrayStack.clearAndFree();
+}
+
+void GCMarker::reset() {
+ stack.clearAndResetCapacity();
+ otherStack.clearAndFreeStack();
+ ClearEphemeronEdges(runtime());
+ MOZ_ASSERT(isDrained());
+
+ setMarkColor(MarkColor::Black);
+ MOZ_ASSERT(!haveSwappedStacks);
+
+ unmarkGrayStack.clearAndFree();
+}
+
+void GCMarker::setMarkColor(gc::MarkColor newColor) {
+ if (markColor_ == newColor) {
+ return;
+ }
+
+ // We don't support gray marking while there is black marking work to do.
+ MOZ_ASSERT(!hasBlackEntries());
+
+ markColor_ = newColor;
+
+ // Switch stacks. We only need to do this if there are any stack entries (as
+ // empty stacks are interchangeable) or to swtich back to the original stack.
+ if (!isDrained() || haveSwappedStacks) {
+ std::swap(stack, otherStack);
+ haveSwappedStacks = !haveSwappedStacks;
+ }
+}
+
+bool GCMarker::hasEntries(MarkColor color) const {
+ const MarkStack& stackForColor = color == markColor() ? stack : otherStack;
+ return stackForColor.hasEntries();
+}
+
+template <typename T>
+inline void GCMarker::pushTaggedPtr(T* ptr) {
+ checkZone(ptr);
+ if (!stack.push(ptr)) {
+ delayMarkingChildrenOnOOM(ptr);
+ }
+}
+
+inline void GCMarker::pushValueRange(JSObject* obj, SlotsOrElementsKind kind,
+ size_t start, size_t end) {
+ checkZone(obj);
+ MOZ_ASSERT(obj->is<NativeObject>());
+ MOZ_ASSERT(start <= end);
+
+ if (start == end) {
+ return;
+ }
+
+ if (MOZ_UNLIKELY(!stack.push(obj, kind, start))) {
+ delayMarkingChildrenOnOOM(obj);
+ }
+}
+
+void GCMarker::repush(JSObject* obj) {
+ MOZ_ASSERT(obj->asTenured().isMarkedAtLeast(markColor()));
+ pushTaggedPtr(obj);
+}
+
+void GCMarker::setRootMarkingMode(bool newState) {
+ if (newState) {
+ setMarkingStateAndTracer<RootMarkingTracer>(RegularMarking, RootMarking);
+ } else {
+ setMarkingStateAndTracer<MarkingTracer>(RootMarking, RegularMarking);
+ }
+}
+
+void GCMarker::enterParallelMarkingMode(ParallelMarker* pm) {
+ MOZ_ASSERT(pm);
+ MOZ_ASSERT(!parallelMarker_);
+ setMarkingStateAndTracer<ParallelMarkingTracer>(RegularMarking,
+ ParallelMarking);
+ parallelMarker_ = pm;
+}
+
+void GCMarker::leaveParallelMarkingMode() {
+ MOZ_ASSERT(parallelMarker_);
+ setMarkingStateAndTracer<MarkingTracer>(ParallelMarking, RegularMarking);
+ parallelMarker_ = nullptr;
+}
+
+bool GCMarker::canDonateWork() const {
+ // It's not worth the overhead of donating very few entries. For some
+ // (non-parallelizable) workloads this can lead to constantly interrupting
+ // marking work and makes parallel marking slower than single threaded.
+ constexpr size_t MinWordCount = 12;
+
+ static_assert(MinWordCount >= ValueRangeWords,
+ "We must always leave at least one stack entry.");
+
+ return stack.position() > MinWordCount;
+}
+
+template <typename Tracer>
+void GCMarker::setMarkingStateAndTracer(MarkingState prev, MarkingState next) {
+ MOZ_ASSERT(state == prev);
+ state = next;
+ tracer_.emplace<Tracer>(runtime(), this);
+}
+
+bool GCMarker::enterWeakMarkingMode() {
+ MOZ_ASSERT(tracer()->weakMapAction() == JS::WeakMapTraceAction::Expand);
+ MOZ_ASSERT(state == RegularMarking);
+ if (!haveAllImplicitEdges) {
+ return false;
+ }
+
+ // During weak marking mode, we maintain a table mapping weak keys to
+ // entries in known-live weakmaps. Initialize it with the keys of marked
+ // weakmaps -- or more precisely, the keys of marked weakmaps that are
+ // mapped to not yet live values. (Once bug 1167452 implements incremental
+ // weakmap marking, this initialization step will become unnecessary, as
+ // the table will already hold all such keys.)
+
+ // Set state before doing anything else, so any new key that is marked
+ // during the following gcEphemeronEdges scan will itself be looked up in
+ // gcEphemeronEdges and marked according to ephemeron rules.
+ state = WeakMarking;
+
+ return true;
+}
+
+IncrementalProgress JS::Zone::enterWeakMarkingMode(GCMarker* marker,
+ SliceBudget& budget) {
+ MOZ_ASSERT(marker->isWeakMarking());
+
+ if (!marker->incrementalWeakMapMarkingEnabled) {
+ for (WeakMapBase* m : gcWeakMapList()) {
+ if (m->mapColor) {
+ (void)m->markEntries(marker);
+ }
+ }
+ return IncrementalProgress::Finished;
+ }
+
+ // gcEphemeronEdges contains the keys from all weakmaps marked so far, or at
+ // least the keys that might still need to be marked through. Scan through
+ // gcEphemeronEdges and mark all values whose keys are marked. This marking
+ // may recursively mark through other weakmap entries (immediately since we
+ // are now in WeakMarking mode). The end result is a consistent state where
+ // all values are marked if both their map and key are marked -- though note
+ // that we may later leave weak marking mode, do some more marking, and then
+ // enter back in.
+ if (!isGCMarking()) {
+ return IncrementalProgress::Finished;
+ }
+
+ MOZ_ASSERT(gcNurseryEphemeronEdges().count() == 0);
+
+ // An OrderedHashMap::MutableRange stays valid even when the underlying table
+ // (zone->gcEphemeronEdges) is mutated, which is useful here since we may add
+ // additional entries while iterating over the Range.
+ EphemeronEdgeTable::MutableRange r = gcEphemeronEdges().mutableAll();
+ while (!r.empty()) {
+ Cell* src = r.front().key;
+ CellColor srcColor = gc::detail::GetEffectiveColor(marker, src);
+ auto& edges = r.front().value;
+ r.popFront(); // Pop before any mutations happen.
+
+ if (edges.length() > 0) {
+ uint32_t steps = edges.length();
+ marker->markEphemeronEdges(edges, srcColor);
+ budget.step(steps);
+ if (budget.isOverBudget()) {
+ return NotFinished;
+ }
+ }
+ }
+
+ return IncrementalProgress::Finished;
+}
+
+void GCMarker::leaveWeakMarkingMode() {
+ MOZ_ASSERT(state == WeakMarking || state == RegularMarking);
+ state = RegularMarking;
+
+ // The gcEphemeronEdges table is still populated and may be used during a
+ // future weak marking mode within this GC.
+}
+
+void GCMarker::abortLinearWeakMarking() {
+ haveAllImplicitEdges = false;
+ if (state == WeakMarking) {
+ leaveWeakMarkingMode();
+ }
+}
+
+MOZ_NEVER_INLINE void GCMarker::delayMarkingChildrenOnOOM(Cell* cell) {
+ runtime()->gc.delayMarkingChildren(cell, markColor());
+}
+
+bool GCRuntime::hasDelayedMarking() const {
+ bool result = delayedMarkingList;
+ MOZ_ASSERT(result == (markLaterArenas != 0));
+ return result;
+}
+
+void GCRuntime::delayMarkingChildren(Cell* cell, MarkColor color) {
+ // Synchronize access to delayed marking state during parallel marking.
+ LockGuard<Mutex> lock(delayedMarkingLock);
+
+ Arena* arena = cell->asTenured().arena();
+ if (!arena->onDelayedMarkingList()) {
+ arena->setNextDelayedMarkingArena(delayedMarkingList);
+ delayedMarkingList = arena;
+#ifdef DEBUG
+ markLaterArenas++;
+#endif
+ }
+
+ if (!arena->hasDelayedMarking(color)) {
+ arena->setHasDelayedMarking(color, true);
+ delayedMarkingWorkAdded = true;
+ }
+}
+
+void GCRuntime::markDelayedChildren(Arena* arena, MarkColor color) {
+ JSTracer* trc = marker().tracer();
+ JS::TraceKind kind = MapAllocToTraceKind(arena->getAllocKind());
+ MarkColor colorToCheck =
+ TraceKindCanBeMarkedGray(kind) ? color : MarkColor::Black;
+
+ for (ArenaCellIterUnderGC cell(arena); !cell.done(); cell.next()) {
+ if (cell->isMarked(colorToCheck)) {
+ ApplyGCThingTyped(cell, kind, [trc, this](auto t) {
+ t->traceChildren(trc);
+ marker().markImplicitEdges(t);
+ });
+ }
+ }
+}
+
+/*
+ * Process arenas from |delayedMarkingList| by marking the unmarked children of
+ * marked cells of color |color|.
+ *
+ * This is called twice, first to mark gray children and then to mark black
+ * children.
+ */
+void GCRuntime::processDelayedMarkingList(MarkColor color) {
+ // Marking delayed children may add more arenas to the list, including arenas
+ // we are currently processing or have previously processed. Handle this by
+ // clearing a flag on each arena before marking its children. This flag will
+ // be set again if the arena is re-added. Iterate the list until no new arenas
+ // were added.
+
+ AutoSetMarkColor setColor(marker(), color);
+
+ do {
+ delayedMarkingWorkAdded = false;
+ for (Arena* arena = delayedMarkingList; arena;
+ arena = arena->getNextDelayedMarking()) {
+ if (arena->hasDelayedMarking(color)) {
+ arena->setHasDelayedMarking(color, false);
+ markDelayedChildren(arena, color);
+ }
+ }
+ while (marker().hasEntriesForCurrentColor()) {
+ SliceBudget budget = SliceBudget::unlimited();
+ MOZ_ALWAYS_TRUE(
+ marker().processMarkStackTop<NormalMarkingOptions>(budget));
+ }
+ } while (delayedMarkingWorkAdded);
+
+ MOZ_ASSERT(marker().isDrained());
+}
+
+void GCRuntime::markAllDelayedChildren(ShouldReportMarkTime reportTime) {
+ MOZ_ASSERT(CurrentThreadIsMainThread() || CurrentThreadIsPerformingGC());
+ MOZ_ASSERT(marker().isDrained());
+ MOZ_ASSERT(hasDelayedMarking());
+
+ mozilla::Maybe<gcstats::AutoPhase> ap;
+ if (reportTime) {
+ ap.emplace(stats(), gcstats::PhaseKind::MARK_DELAYED);
+ }
+
+ // We have a list of arenas containing marked cells with unmarked children
+ // where we ran out of stack space during marking. Both black and gray cells
+ // in these arenas may have unmarked children. Mark black children first.
+
+ const MarkColor colors[] = {MarkColor::Black, MarkColor::Gray};
+ for (MarkColor color : colors) {
+ processDelayedMarkingList(color);
+ rebuildDelayedMarkingList();
+ }
+
+ MOZ_ASSERT(!hasDelayedMarking());
+}
+
+void GCRuntime::rebuildDelayedMarkingList() {
+ // Rebuild the delayed marking list, removing arenas which do not need further
+ // marking.
+
+ Arena* listTail = nullptr;
+ forEachDelayedMarkingArena([&](Arena* arena) {
+ if (!arena->hasAnyDelayedMarking()) {
+ arena->clearDelayedMarkingState();
+#ifdef DEBUG
+ MOZ_ASSERT(markLaterArenas);
+ markLaterArenas--;
+#endif
+ return;
+ }
+
+ appendToDelayedMarkingList(&listTail, arena);
+ });
+ appendToDelayedMarkingList(&listTail, nullptr);
+}
+
+void GCRuntime::resetDelayedMarking() {
+ MOZ_ASSERT(CurrentThreadIsMainThread());
+
+ forEachDelayedMarkingArena([&](Arena* arena) {
+ MOZ_ASSERT(arena->onDelayedMarkingList());
+ arena->clearDelayedMarkingState();
+#ifdef DEBUG
+ MOZ_ASSERT(markLaterArenas);
+ markLaterArenas--;
+#endif
+ });
+ delayedMarkingList = nullptr;
+ MOZ_ASSERT(!markLaterArenas);
+}
+
+inline void GCRuntime::appendToDelayedMarkingList(Arena** listTail,
+ Arena* arena) {
+ if (*listTail) {
+ (*listTail)->updateNextDelayedMarkingArena(arena);
+ } else {
+ delayedMarkingList = arena;
+ }
+ *listTail = arena;
+}
+
+template <typename F>
+inline void GCRuntime::forEachDelayedMarkingArena(F&& f) {
+ Arena* arena = delayedMarkingList;
+ Arena* next;
+ while (arena) {
+ next = arena->getNextDelayedMarking();
+ f(arena);
+ arena = next;
+ }
+}
+
+#ifdef DEBUG
+void GCMarker::checkZone(void* p) {
+ MOZ_ASSERT(state != NotActive);
+ DebugOnly<Cell*> cell = static_cast<Cell*>(p);
+ MOZ_ASSERT_IF(cell->isTenured(),
+ cell->asTenured().zone()->isCollectingFromAnyThread());
+}
+#endif
+
+size_t GCMarker::sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
+ return mallocSizeOf(this) + stack.sizeOfExcludingThis(mallocSizeOf) +
+ otherStack.sizeOfExcludingThis(mallocSizeOf);
+}
+
+/*** IsMarked / IsAboutToBeFinalized ****************************************/
+
+template <typename T>
+static inline void CheckIsMarkedThing(T* thing) {
+#define IS_SAME_TYPE_OR(name, type, _, _1) std::is_same_v<type, T> ||
+ static_assert(JS_FOR_EACH_TRACEKIND(IS_SAME_TYPE_OR) false,
+ "Only the base cell layout types are allowed into "
+ "marking/tracing internals");
+#undef IS_SAME_TYPE_OR
+
+#ifdef DEBUG
+ MOZ_ASSERT(thing);
+
+ // Allow any thread access to uncollected things.
+ Zone* zone = thing->zoneFromAnyThread();
+ if (thing->isPermanentAndMayBeShared()) {
+ MOZ_ASSERT(!zone->wasGCStarted());
+ MOZ_ASSERT(!zone->needsIncrementalBarrier());
+ MOZ_ASSERT(thing->isMarkedBlack());
+ return;
+ }
+
+ // Allow the current thread access if it is sweeping or in sweep-marking, but
+ // try to check the zone. Some threads have access to all zones when sweeping.
+ JS::GCContext* gcx = TlsGCContext.get();
+ MOZ_ASSERT(gcx->gcUse() != GCUse::Finalizing);
+ if (gcx->gcUse() == GCUse::Sweeping || gcx->gcUse() == GCUse::Marking) {
+ MOZ_ASSERT_IF(gcx->gcSweepZone(),
+ gcx->gcSweepZone() == zone || zone->isAtomsZone());
+ return;
+ }
+
+ // Otherwise only allow access from the main thread or this zone's associated
+ // thread.
+ MOZ_ASSERT(CurrentThreadCanAccessRuntime(thing->runtimeFromAnyThread()) ||
+ CurrentThreadCanAccessZone(thing->zoneFromAnyThread()));
+#endif
+}
+
+template <typename T>
+bool js::gc::IsMarkedInternal(JSRuntime* rt, T* thing) {
+ // Don't depend on the mark state of other cells during finalization.
+ MOZ_ASSERT(!CurrentThreadIsGCFinalizing());
+ MOZ_ASSERT(rt->heapState() != JS::HeapState::MinorCollecting);
+ MOZ_ASSERT(thing);
+ CheckIsMarkedThing(thing);
+
+ // This is not used during minor sweeping nor used to update moved GC things.
+ MOZ_ASSERT(!IsForwarded(thing));
+
+ // Permanent things are never marked by non-owning runtimes.
+ TenuredCell* cell = &thing->asTenured();
+ Zone* zone = cell->zoneFromAnyThread();
+#ifdef DEBUG
+ if (IsOwnedByOtherRuntime(rt, thing)) {
+ MOZ_ASSERT(!zone->wasGCStarted());
+ MOZ_ASSERT(thing->isMarkedBlack());
+ }
+#endif
+
+ return !zone->isGCMarking() || TenuredThingIsMarkedAny(thing);
+}
+
+template <typename T>
+bool js::gc::IsAboutToBeFinalizedInternal(T* thing) {
+ // Don't depend on the mark state of other cells during finalization.
+ MOZ_ASSERT(!CurrentThreadIsGCFinalizing());
+ MOZ_ASSERT(thing);
+ CheckIsMarkedThing(thing);
+
+ // This is not used during minor sweeping nor used to update moved GC things.
+ MOZ_ASSERT(!IsForwarded(thing));
+
+ if (!thing->isTenured()) {
+ return false;
+ }
+
+ // Permanent things are never finalized by non-owning runtimes.
+ TenuredCell* cell = &thing->asTenured();
+ Zone* zone = cell->zoneFromAnyThread();
+#ifdef DEBUG
+ JSRuntime* rt = TlsGCContext.get()->runtimeFromAnyThread();
+ if (IsOwnedByOtherRuntime(rt, thing)) {
+ MOZ_ASSERT(!zone->wasGCStarted());
+ MOZ_ASSERT(thing->isMarkedBlack());
+ }
+#endif
+
+ return zone->isGCSweeping() && !TenuredThingIsMarkedAny(thing);
+}
+
+template <typename T>
+bool js::gc::IsAboutToBeFinalizedInternal(const T& thing) {
+ bool dying = false;
+ ApplyGCThingTyped(
+ thing, [&dying](auto t) { dying = IsAboutToBeFinalizedInternal(t); });
+ return dying;
+}
+
+SweepingTracer::SweepingTracer(JSRuntime* rt)
+ : GenericTracerImpl(rt, JS::TracerKind::Sweeping,
+ JS::WeakMapTraceAction::TraceKeysAndValues) {}
+
+template <typename T>
+inline void SweepingTracer::onEdge(T** thingp, const char* name) {
+ T* thing = *thingp;
+ CheckIsMarkedThing(thing);
+
+ if (!thing->isTenured()) {
+ return;
+ }
+
+ // Permanent things are never finalized by non-owning runtimes.
+ TenuredCell* cell = &thing->asTenured();
+ Zone* zone = cell->zoneFromAnyThread();
+#ifdef DEBUG
+ if (IsOwnedByOtherRuntime(runtime(), thing)) {
+ MOZ_ASSERT(!zone->wasGCStarted());
+ MOZ_ASSERT(thing->isMarkedBlack());
+ }
+#endif
+
+ // It would be nice if we could assert that the zone of the tenured cell is in
+ // the Sweeping state, but that isn't always true for:
+ // - atoms
+ // - the jitcode map
+ // - the mark queue
+ if (zone->isGCSweeping() && !cell->isMarkedAny()) {
+ *thingp = nullptr;
+ }
+}
+
+namespace js {
+namespace gc {
+
+template <typename T>
+JS_PUBLIC_API bool TraceWeakEdge(JSTracer* trc, JS::Heap<T>* thingp) {
+ return TraceEdgeInternal(trc, gc::ConvertToBase(thingp->unsafeGet()),
+ "JS::Heap edge");
+}
+
+template <typename T>
+JS_PUBLIC_API bool EdgeNeedsSweepUnbarrieredSlow(T* thingp) {
+ return IsAboutToBeFinalizedInternal(*ConvertToBase(thingp));
+}
+
+// Instantiate a copy of the Tracing templates for each public GC type.
+#define INSTANTIATE_ALL_VALID_HEAP_TRACE_FUNCTIONS(type) \
+ template JS_PUBLIC_API bool TraceWeakEdge<type>(JSTracer * trc, \
+ JS::Heap<type>*); \
+ template JS_PUBLIC_API bool EdgeNeedsSweepUnbarrieredSlow<type>(type*);
+JS_FOR_EACH_PUBLIC_GC_POINTER_TYPE(INSTANTIATE_ALL_VALID_HEAP_TRACE_FUNCTIONS)
+JS_FOR_EACH_PUBLIC_TAGGED_GC_POINTER_TYPE(
+ INSTANTIATE_ALL_VALID_HEAP_TRACE_FUNCTIONS)
+
+#define INSTANTIATE_INTERNAL_IS_MARKED_FUNCTION(type) \
+ template bool IsMarkedInternal(JSRuntime* rt, type thing);
+
+#define INSTANTIATE_INTERNAL_IATBF_FUNCTION(type) \
+ template bool IsAboutToBeFinalizedInternal(type thingp);
+
+#define INSTANTIATE_INTERNAL_MARKING_FUNCTIONS_FROM_TRACEKIND(_1, type, _2, \
+ _3) \
+ INSTANTIATE_INTERNAL_IS_MARKED_FUNCTION(type*) \
+ INSTANTIATE_INTERNAL_IATBF_FUNCTION(type*)
+
+JS_FOR_EACH_TRACEKIND(INSTANTIATE_INTERNAL_MARKING_FUNCTIONS_FROM_TRACEKIND)
+
+#define INSTANTIATE_IATBF_FUNCTION_FOR_TAGGED_POINTER(type) \
+ INSTANTIATE_INTERNAL_IATBF_FUNCTION(const type&)
+
+JS_FOR_EACH_PUBLIC_TAGGED_GC_POINTER_TYPE(
+ INSTANTIATE_IATBF_FUNCTION_FOR_TAGGED_POINTER)
+
+#undef INSTANTIATE_INTERNAL_IS_MARKED_FUNCTION
+#undef INSTANTIATE_INTERNAL_IATBF_FUNCTION
+#undef INSTANTIATE_INTERNAL_MARKING_FUNCTIONS_FROM_TRACEKIND
+#undef INSTANTIATE_IATBF_FUNCTION_FOR_TAGGED_POINTER
+
+} /* namespace gc */
+} /* namespace js */
+
+/*** Cycle Collector Barrier Implementation *********************************/
+
+/*
+ * The GC and CC are run independently. Consequently, the following sequence of
+ * events can occur:
+ * 1. GC runs and marks an object gray.
+ * 2. The mutator runs (specifically, some C++ code with access to gray
+ * objects) and creates a pointer from a JS root or other black object to
+ * the gray object. If we re-ran a GC at this point, the object would now be
+ * black.
+ * 3. Now we run the CC. It may think it can collect the gray object, even
+ * though it's reachable from the JS heap.
+ *
+ * To prevent this badness, we unmark the gray bit of an object when it is
+ * accessed by callers outside XPConnect. This would cause the object to go
+ * black in step 2 above. This must be done on everything reachable from the
+ * object being returned. The following code takes care of the recursive
+ * re-coloring.
+ *
+ * There is an additional complication for certain kinds of edges that are not
+ * contained explicitly in the source object itself, such as from a weakmap key
+ * to its value. These "implicit edges" are represented in some other
+ * container object, such as the weakmap itself. In these
+ * cases, calling unmark gray on an object won't find all of its children.
+ *
+ * Handling these implicit edges has two parts:
+ * - A special pass enumerating all of the containers that know about the
+ * implicit edges to fix any black-gray edges that have been created. This
+ * is implemented in nsXPConnect::FixWeakMappingGrayBits.
+ * - To prevent any incorrectly gray objects from escaping to live JS outside
+ * of the containers, we must add unmark-graying read barriers to these
+ * containers.
+ */
+
+#ifdef DEBUG
+struct AssertNonGrayTracer final : public JS::CallbackTracer {
+ // This is used by the UnmarkGray tracer only, and needs to report itself as
+ // the non-gray tracer to not trigger assertions. Do not use it in another
+ // context without making this more generic.
+ explicit AssertNonGrayTracer(JSRuntime* rt)
+ : JS::CallbackTracer(rt, JS::TracerKind::UnmarkGray) {}
+ void onChild(JS::GCCellPtr thing, const char* name) override {
+ MOZ_ASSERT(!thing.asCell()->isMarkedGray());
+ }
+};
+#endif
+
+class js::gc::UnmarkGrayTracer final : public JS::CallbackTracer {
+ public:
+ // We set weakMapAction to WeakMapTraceAction::Skip because the cycle
+ // collector will fix up any color mismatches involving weakmaps when it runs.
+ explicit UnmarkGrayTracer(GCMarker* marker)
+ : JS::CallbackTracer(marker->runtime(), JS::TracerKind::UnmarkGray,
+ JS::WeakMapTraceAction::Skip),
+ unmarkedAny(false),
+ oom(false),
+ marker(marker),
+ stack(marker->unmarkGrayStack) {}
+
+ void unmark(JS::GCCellPtr cell);
+
+ // Whether we unmarked anything.
+ bool unmarkedAny;
+
+ // Whether we ran out of memory.
+ bool oom;
+
+ private:
+ // Marker to use if we need to unmark in zones that are currently being
+ // marked.
+ GCMarker* marker;
+
+ // Stack of cells to traverse.
+ Vector<JS::GCCellPtr, 0, SystemAllocPolicy>& stack;
+
+ void onChild(JS::GCCellPtr thing, const char* name) override;
+};
+
+void UnmarkGrayTracer::onChild(JS::GCCellPtr thing, const char* name) {
+ Cell* cell = thing.asCell();
+
+ // Cells in the nursery cannot be gray, and nor can certain kinds of tenured
+ // cells. These must necessarily point only to black edges.
+ if (!cell->isTenured() || !TraceKindCanBeMarkedGray(thing.kind())) {
+#ifdef DEBUG
+ MOZ_ASSERT(!cell->isMarkedGray());
+ AssertNonGrayTracer nongray(runtime());
+ JS::TraceChildren(&nongray, thing);
+#endif
+ return;
+ }
+
+ TenuredCell& tenured = cell->asTenured();
+ Zone* zone = tenured.zone();
+
+ // If the cell is in a zone whose mark bits are being cleared, then it will
+ // end up white.
+ if (zone->isGCPreparing()) {
+ return;
+ }
+
+ // If the cell is in a zone that we're currently marking, then it's possible
+ // that it is currently white but will end up gray. To handle this case,
+ // trigger the barrier for any cells in zones that are currently being
+ // marked. This will ensure they will eventually get marked black.
+ if (zone->isGCMarking()) {
+ if (!cell->isMarkedBlack()) {
+ TraceEdgeForBarrier(marker, &tenured, thing.kind());
+ unmarkedAny = true;
+ }
+ return;
+ }
+
+ if (!tenured.isMarkedGray()) {
+ return;
+ }
+
+ // TODO: It may be a small improvement to only use the atomic version during
+ // parallel marking.
+ tenured.markBlackAtomic();
+ unmarkedAny = true;
+
+ if (!stack.append(thing)) {
+ oom = true;
+ }
+}
+
+void UnmarkGrayTracer::unmark(JS::GCCellPtr cell) {
+ MOZ_ASSERT(stack.empty());
+
+ onChild(cell, "unmarking root");
+
+ while (!stack.empty() && !oom) {
+ TraceChildren(this, stack.popCopy());
+ }
+
+ if (oom) {
+ // If we run out of memory, we take a drastic measure: require that we
+ // GC again before the next CC.
+ stack.clear();
+ runtime()->gc.setGrayBitsInvalid();
+ return;
+ }
+}
+
+bool js::gc::UnmarkGrayGCThingUnchecked(GCMarker* marker, JS::GCCellPtr thing) {
+ MOZ_ASSERT(thing);
+ MOZ_ASSERT(thing.asCell()->isMarkedGray());
+
+ mozilla::Maybe<AutoGeckoProfilerEntry> profilingStackFrame;
+ if (JSContext* cx = TlsContext.get()) {
+ profilingStackFrame.emplace(cx, "UnmarkGrayGCThing",
+ JS::ProfilingCategoryPair::GCCC_UnmarkGray);
+ }
+
+ UnmarkGrayTracer unmarker(marker);
+ unmarker.unmark(thing);
+ return unmarker.unmarkedAny;
+}
+
+JS_PUBLIC_API bool JS::UnmarkGrayGCThingRecursively(JS::GCCellPtr thing) {
+ MOZ_ASSERT(!JS::RuntimeHeapIsCollecting());
+ MOZ_ASSERT(!JS::RuntimeHeapIsCycleCollecting());
+
+ JSRuntime* rt = thing.asCell()->runtimeFromMainThread();
+ if (thing.asCell()->zone()->isGCPreparing()) {
+ // Mark bits are being cleared in preparation for GC.
+ return false;
+ }
+
+ return UnmarkGrayGCThingUnchecked(&rt->gc.marker(), thing);
+}
+
+void js::gc::UnmarkGrayGCThingRecursively(TenuredCell* cell) {
+ JS::UnmarkGrayGCThingRecursively(JS::GCCellPtr(cell, cell->getTraceKind()));
+}
+
+bool js::UnmarkGrayShapeRecursively(Shape* shape) {
+ return JS::UnmarkGrayGCThingRecursively(JS::GCCellPtr(shape));
+}
+
+#ifdef DEBUG
+Cell* js::gc::UninlinedForwarded(const Cell* cell) { return Forwarded(cell); }
+#endif
+
+namespace js {
+namespace debug {
+
+MarkInfo GetMarkInfo(Cell* rawCell) {
+ if (!rawCell->isTenured()) {
+ return MarkInfo::NURSERY;
+ }
+
+ TenuredCell* cell = &rawCell->asTenured();
+ if (cell->isMarkedGray()) {
+ return MarkInfo::GRAY;
+ }
+ if (cell->isMarkedBlack()) {
+ return MarkInfo::BLACK;
+ }
+ return MarkInfo::UNMARKED;
+}
+
+uintptr_t* GetMarkWordAddress(Cell* cell) {
+ if (!cell->isTenured()) {
+ return nullptr;
+ }
+
+ MarkBitmapWord* wordp;
+ uintptr_t mask;
+ TenuredChunkBase* chunk = gc::detail::GetCellChunkBase(&cell->asTenured());
+ chunk->markBits.getMarkWordAndMask(&cell->asTenured(), ColorBit::BlackBit,
+ &wordp, &mask);
+ return reinterpret_cast<uintptr_t*>(wordp);
+}
+
+uintptr_t GetMarkMask(Cell* cell, uint32_t colorBit) {
+ MOZ_ASSERT(colorBit == 0 || colorBit == 1);
+
+ if (!cell->isTenured()) {
+ return 0;
+ }
+
+ ColorBit bit = colorBit == 0 ? ColorBit::BlackBit : ColorBit::GrayOrBlackBit;
+ MarkBitmapWord* wordp;
+ uintptr_t mask;
+ TenuredChunkBase* chunk = gc::detail::GetCellChunkBase(&cell->asTenured());
+ chunk->markBits.getMarkWordAndMask(&cell->asTenured(), bit, &wordp, &mask);
+ return mask;
+}
+
+} // namespace debug
+} // namespace js
diff --git a/js/src/gc/Marking.h b/js/src/gc/Marking.h
new file mode 100644
index 0000000000..7043eccc53
--- /dev/null
+++ b/js/src/gc/Marking.h
@@ -0,0 +1,150 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * Marking and sweeping APIs for use by implementations of different GC cell
+ * kinds.
+ */
+
+#ifndef gc_Marking_h
+#define gc_Marking_h
+
+#include "gc/Barrier.h"
+#include "js/TypeDecls.h"
+
+class JSTracer;
+struct JSClass;
+
+namespace js {
+class GCMarker;
+class Shape;
+class WeakMapBase;
+
+namespace gc {
+
+struct Cell;
+
+/*** Liveness ***/
+
+// The IsMarkedInternal and IsAboutToBeFinalizedInternal function templates are
+// used to implement the IsMarked and IsAboutToBeFinalized set of functions.
+// These internal functions are instantiated for the base GC types and should
+// not be called directly.
+//
+// Note that there are two function templates declared for each, not one
+// template and a specialization. This is necessary so that pointer arguments
+// (e.g. JSObject**) and tagged value arguments (e.g. JS::Value*) are routed to
+// separate implementations.
+
+template <typename T>
+bool IsMarkedInternal(JSRuntime* rt, T* thing);
+
+template <typename T>
+bool IsAboutToBeFinalizedInternal(T* thing);
+template <typename T>
+bool IsAboutToBeFinalizedInternal(const T& thing);
+
+// Report whether a GC thing has been marked with any color. Things which are in
+// zones that are not currently being collected or are owned by another runtime
+// are always reported as being marked.
+template <typename T>
+inline bool IsMarked(JSRuntime* rt, const BarrieredBase<T>& thing) {
+ return IsMarkedInternal(rt, *ConvertToBase(thing.unbarrieredAddress()));
+}
+template <typename T>
+inline bool IsMarkedUnbarriered(JSRuntime* rt, T thing) {
+ return IsMarkedInternal(rt, *ConvertToBase(&thing));
+}
+
+// Report whether a GC thing is dead and will be finalized in the current sweep
+// group. This is mainly used in read barriers for incremental sweeping.
+//
+// This no longer updates pointers moved by the GC (tracing should be used for
+// this instead).
+template <typename T>
+inline bool IsAboutToBeFinalized(const BarrieredBase<T>& thing) {
+ return IsAboutToBeFinalizedInternal(
+ *ConvertToBase(thing.unbarrieredAddress()));
+}
+template <typename T>
+inline bool IsAboutToBeFinalizedUnbarriered(T thing) {
+ return IsAboutToBeFinalizedInternal(*ConvertToBase(&thing));
+}
+
+inline bool IsAboutToBeFinalizedDuringMinorSweep(Cell* cell);
+
+inline Cell* ToMarkable(const Value& v) {
+ if (v.isGCThing()) {
+ return (Cell*)v.toGCThing();
+ }
+ return nullptr;
+}
+
+inline Cell* ToMarkable(Cell* cell) { return cell; }
+
+bool UnmarkGrayGCThingUnchecked(GCMarker* marker, JS::GCCellPtr thing);
+
+} /* namespace gc */
+
+// The return value indicates if anything was unmarked.
+bool UnmarkGrayShapeRecursively(Shape* shape);
+
+namespace gc {
+
+// Functions for checking and updating GC thing pointers that might have been
+// moved by compacting GC. Overloads are also provided that work with Values.
+//
+// IsForwarded - check whether a pointer refers to an GC thing that has been
+// moved.
+//
+// Forwarded - return a pointer to the new location of a GC thing given a
+// pointer to old location.
+//
+// MaybeForwarded - used before dereferencing a pointer that may refer to a
+// moved GC thing without updating it. For JSObjects this will
+// also update the object's shape pointer if it has been moved
+// to allow slots to be accessed.
+
+template <typename T>
+inline bool IsForwarded(const T* t);
+
+template <typename T>
+inline T* Forwarded(const T* t);
+
+inline Value Forwarded(const JS::Value& value);
+
+template <typename T>
+inline T MaybeForwarded(T t);
+
+// Helper functions for use in situations where the object's group might be
+// forwarded, for example while marking.
+
+inline const JSClass* MaybeForwardedObjectClass(const JSObject* obj);
+
+template <typename T>
+inline bool MaybeForwardedObjectIs(const JSObject* obj);
+
+template <typename T>
+inline T& MaybeForwardedObjectAs(JSObject* obj);
+
+#ifdef JSGC_HASH_TABLE_CHECKS
+
+template <typename T>
+inline bool IsGCThingValidAfterMovingGC(T* t);
+
+template <typename T>
+inline void CheckGCThingAfterMovingGC(T* t);
+
+template <typename T>
+inline void CheckGCThingAfterMovingGC(const WeakHeapPtr<T*>& t);
+
+#endif // JSGC_HASH_TABLE_CHECKS
+
+} /* namespace gc */
+
+} /* namespace js */
+
+#endif /* gc_Marking_h */
diff --git a/js/src/gc/MaybeRooted.h b/js/src/gc/MaybeRooted.h
new file mode 100644
index 0000000000..fd615a1efb
--- /dev/null
+++ b/js/src/gc/MaybeRooted.h
@@ -0,0 +1,141 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * Template types for use in generic code: to use Rooted/Handle/MutableHandle in
+ * cases where GC may occur, or to use mock versions of those types that perform
+ * no rooting or root list manipulation when GC cannot occur.
+ */
+
+#ifndef gc_MaybeRooted_h
+#define gc_MaybeRooted_h
+
+#include "mozilla/Attributes.h" // MOZ_IMPLICIT, MOZ_RAII
+
+#include <type_traits> // std::true_type
+
+#include "gc/Allocator.h" // js::AllowGC, js::CanGC, js::NoGC
+#include "js/ComparisonOperators.h" // JS::detail::DefineComparisonOps
+#include "js/RootingAPI.h" // js::{Rooted,MutableHandle}Base, JS::SafelyInitialized, DECLARE_POINTER_{CONSTREF,ASSIGN}_OPS, DECLARE_NONPOINTER_{,MUTABLE_}ACCESSOR_METHODS, JS::Rooted, JS::{,Mutable}Handle
+
+namespace js {
+
+/**
+ * Interface substitute for Rooted<T> which does not root the variable's
+ * memory.
+ */
+template <typename T>
+class MOZ_RAII FakeRooted : public RootedOperations<T, FakeRooted<T>> {
+ public:
+ using ElementType = T;
+
+ explicit FakeRooted(JSContext* cx)
+ : ptr(JS::SafelyInitialized<T>::create()) {}
+
+ FakeRooted(JSContext* cx, T initial) : ptr(initial) {}
+
+ DECLARE_POINTER_CONSTREF_OPS(T);
+ DECLARE_POINTER_ASSIGN_OPS(FakeRooted, T);
+ DECLARE_NONPOINTER_ACCESSOR_METHODS(ptr);
+ DECLARE_NONPOINTER_MUTABLE_ACCESSOR_METHODS(ptr);
+
+ private:
+ T ptr;
+
+ void set(const T& value) { ptr = value; }
+
+ FakeRooted(const FakeRooted&) = delete;
+};
+
+} // namespace js
+
+namespace JS {
+
+namespace detail {
+
+template <typename T>
+struct DefineComparisonOps<js::FakeRooted<T>> : std::true_type {
+ static const T& get(const js::FakeRooted<T>& v) { return v.get(); }
+};
+
+} // namespace detail
+
+} // namespace JS
+
+namespace js {
+
+/**
+ * Interface substitute for MutableHandle<T> which is not required to point to
+ * rooted memory.
+ */
+template <typename T>
+class FakeMutableHandle
+ : public js::MutableHandleOperations<T, FakeMutableHandle<T>> {
+ public:
+ using ElementType = T;
+
+ MOZ_IMPLICIT FakeMutableHandle(T* t) : ptr(t) {}
+
+ MOZ_IMPLICIT FakeMutableHandle(FakeRooted<T>* root) : ptr(root->address()) {}
+
+ void set(const T& v) { *ptr = v; }
+
+ DECLARE_POINTER_CONSTREF_OPS(T);
+ DECLARE_NONPOINTER_ACCESSOR_METHODS(*ptr);
+ DECLARE_NONPOINTER_MUTABLE_ACCESSOR_METHODS(*ptr);
+
+ private:
+ FakeMutableHandle() : ptr(nullptr) {}
+ DELETE_ASSIGNMENT_OPS(FakeMutableHandle, T);
+
+ T* ptr;
+};
+
+} // namespace js
+
+namespace JS {
+
+namespace detail {
+
+template <typename T>
+struct DefineComparisonOps<js::FakeMutableHandle<T>> : std::true_type {
+ static const T& get(const js::FakeMutableHandle<T>& v) { return v.get(); }
+};
+
+} // namespace detail
+
+} // namespace JS
+
+namespace js {
+
+/**
+ * Types for a variable that either should or shouldn't be rooted, depending on
+ * the template parameter allowGC. Used for implementing functions that can
+ * operate on either rooted or unrooted data.
+ */
+
+template <typename T, AllowGC allowGC>
+class MaybeRooted;
+
+template <typename T>
+class MaybeRooted<T, CanGC> {
+ public:
+ using HandleType = JS::Handle<T>;
+ using RootType = JS::Rooted<T>;
+ using MutableHandleType = JS::MutableHandle<T>;
+};
+
+template <typename T>
+class MaybeRooted<T, NoGC> {
+ public:
+ using HandleType = const T&;
+ using RootType = FakeRooted<T>;
+ using MutableHandleType = FakeMutableHandle<T>;
+};
+
+} // namespace js
+
+#endif // gc_MaybeRooted_h
diff --git a/js/src/gc/Memory.cpp b/js/src/gc/Memory.cpp
new file mode 100644
index 0000000000..c5bdaa14bf
--- /dev/null
+++ b/js/src/gc/Memory.cpp
@@ -0,0 +1,1050 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "gc/Memory.h"
+
+#include "mozilla/Atomics.h"
+#include "mozilla/MathAlgorithms.h"
+#include "mozilla/RandomNum.h"
+#include "mozilla/TaggedAnonymousMemory.h"
+
+#include "jit/JitOptions.h"
+#include "js/HeapAPI.h"
+#include "js/Utility.h"
+#include "util/Memory.h"
+
+#ifdef XP_WIN
+
+# include "util/WindowsWrapper.h"
+# include <psapi.h>
+
+#else
+
+# include <algorithm>
+# include <errno.h>
+# include <unistd.h>
+
+# if !defined(__wasi__)
+# include <sys/mman.h>
+# include <sys/resource.h>
+# include <sys/stat.h>
+# include <sys/types.h>
+# endif // !defined(__wasi__)
+
+#endif // !XP_WIN
+
+namespace js {
+namespace gc {
+
+/*
+ * System allocation functions generally require the allocation size
+ * to be an integer multiple of the page size of the running process.
+ */
+static size_t pageSize = 0;
+
+/* The OS allocation granularity may not match the page size. */
+static size_t allocGranularity = 0;
+
+/* The number of bits used by addresses on this platform. */
+static size_t numAddressBits = 0;
+
+/* An estimate of the number of bytes available for virtual memory. */
+static size_t virtualMemoryLimit = size_t(-1);
+
+/*
+ * System allocation functions may hand out regions of memory in increasing or
+ * decreasing order. This ordering is used as a hint during chunk alignment to
+ * reduce the number of system calls. On systems with 48-bit addresses, our
+ * workarounds to obtain 47-bit pointers cause addresses to be handed out in
+ * increasing order.
+ *
+ * We do not use the growth direction on Windows, as constraints on VirtualAlloc
+ * would make its application failure prone and complex. Tests indicate that
+ * VirtualAlloc always hands out regions of memory in increasing order.
+ */
+#if defined(XP_DARWIN)
+static mozilla::Atomic<int, mozilla::Relaxed> growthDirection(1);
+#elif defined(XP_UNIX)
+static mozilla::Atomic<int, mozilla::Relaxed> growthDirection(0);
+#endif
+
+/*
+ * Data from OOM crashes shows there may be up to 24 chunk-sized but unusable
+ * chunks available in low memory situations. These chunks may all need to be
+ * used up before we gain access to remaining *alignable* chunk-sized regions,
+ * so we use a generous limit of 32 unusable chunks to ensure we reach them.
+ */
+static const int MaxLastDitchAttempts = 32;
+
+#ifdef JS_64BIT
+/*
+ * On some 64-bit platforms we can use a random, scattershot allocator that
+ * tries addresses from the available range at random. If the address range
+ * is large enough this will have a high chance of success and additionally
+ * makes the memory layout of our process less predictable.
+ *
+ * However, not all 64-bit platforms have a very large address range. For
+ * example, AArch64 on Linux defaults to using 39-bit addresses to limit the
+ * number of translation tables used. On such configurations the scattershot
+ * approach to allocation creates a conflict with our desire to reserve large
+ * regions of memory for applications like WebAssembly: Small allocations may
+ * inadvertently block off all available 4-6GiB regions, and conversely
+ * reserving such regions may lower the success rate for smaller allocations to
+ * unacceptable levels.
+ *
+ * So we make a compromise: Instead of using the scattershot on all 64-bit
+ * platforms, we only use it on platforms that meet a minimum requirement for
+ * the available address range. In addition we split the address range,
+ * reserving the upper half for huge allocations and the lower half for smaller
+ * allocations. We use a limit of 43 bits so that at least 42 bits are available
+ * for huge allocations - this matches the 8TiB per process address space limit
+ * that we're already subject to on Windows.
+ */
+static const size_t MinAddressBitsForRandomAlloc = 43;
+
+/* The lower limit for huge allocations. This is fairly arbitrary. */
+static const size_t HugeAllocationSize = 1024 * 1024 * 1024;
+
+/* The minimum and maximum valid addresses that can be allocated into. */
+static size_t minValidAddress = 0;
+static size_t maxValidAddress = 0;
+
+/* The upper limit for smaller allocations and the lower limit for huge ones. */
+static size_t hugeSplit = 0;
+#endif
+
+size_t SystemPageSize() { return pageSize; }
+
+size_t SystemAddressBits() { return numAddressBits; }
+
+size_t VirtualMemoryLimit() { return virtualMemoryLimit; }
+
+bool UsingScattershotAllocator() {
+#ifdef JS_64BIT
+ return numAddressBits >= MinAddressBitsForRandomAlloc;
+#else
+ return false;
+#endif
+}
+
+enum class Commit : bool {
+ No = false,
+ Yes = true,
+};
+
+#ifdef XP_WIN
+enum class PageAccess : DWORD {
+ None = PAGE_NOACCESS,
+ Read = PAGE_READONLY,
+ ReadWrite = PAGE_READWRITE,
+ Execute = PAGE_EXECUTE,
+ ReadExecute = PAGE_EXECUTE_READ,
+ ReadWriteExecute = PAGE_EXECUTE_READWRITE,
+};
+#elif defined(__wasi__)
+enum class PageAccess : int {
+ None = 0,
+ Read = 0,
+ ReadWrite = 0,
+ Execute = 0,
+ ReadExecute = 0,
+ ReadWriteExecute = 0,
+};
+#else
+enum class PageAccess : int {
+ None = PROT_NONE,
+ Read = PROT_READ,
+ ReadWrite = PROT_READ | PROT_WRITE,
+ Execute = PROT_EXEC,
+ ReadExecute = PROT_READ | PROT_EXEC,
+ ReadWriteExecute = PROT_READ | PROT_WRITE | PROT_EXEC,
+};
+#endif
+
+template <bool AlwaysGetNew = true>
+static bool TryToAlignChunk(void** aRegion, void** aRetainedRegion,
+ size_t length, size_t alignment);
+
+#ifndef __wasi__
+static void* MapAlignedPagesSlow(size_t length, size_t alignment);
+#endif // wasi
+static void* MapAlignedPagesLastDitch(size_t length, size_t alignment);
+
+#ifdef JS_64BIT
+static void* MapAlignedPagesRandom(size_t length, size_t alignment);
+#endif
+
+void* TestMapAlignedPagesLastDitch(size_t length, size_t alignment) {
+ return MapAlignedPagesLastDitch(length, alignment);
+}
+
+bool DecommitEnabled() { return SystemPageSize() == PageSize; }
+
+/* Returns the offset from the nearest aligned address at or below |region|. */
+static inline size_t OffsetFromAligned(void* region, size_t alignment) {
+ return uintptr_t(region) % alignment;
+}
+
+template <Commit commit, PageAccess prot>
+static inline void* MapInternal(void* desired, size_t length) {
+ void* region = nullptr;
+#ifdef XP_WIN
+ DWORD flags =
+ (commit == Commit::Yes ? MEM_RESERVE | MEM_COMMIT : MEM_RESERVE);
+ region = VirtualAlloc(desired, length, flags, DWORD(prot));
+#elif defined(__wasi__)
+ if (int err = posix_memalign(&region, gc::SystemPageSize(), length)) {
+ MOZ_RELEASE_ASSERT(err == ENOMEM);
+ return nullptr;
+ }
+ if (region) {
+ memset(region, 0, length);
+ }
+#else
+ int flags = MAP_PRIVATE | MAP_ANON;
+ region = MozTaggedAnonymousMmap(desired, length, int(prot), flags, -1, 0,
+ "js-gc-heap");
+ if (region == MAP_FAILED) {
+ return nullptr;
+ }
+#endif
+ return region;
+}
+
+static inline void UnmapInternal(void* region, size_t length) {
+ MOZ_ASSERT(region && OffsetFromAligned(region, allocGranularity) == 0);
+ MOZ_ASSERT(length > 0 && length % pageSize == 0);
+
+#ifdef XP_WIN
+ MOZ_RELEASE_ASSERT(VirtualFree(region, 0, MEM_RELEASE) != 0);
+#elif defined(__wasi__)
+ free(region);
+#else
+ if (munmap(region, length)) {
+ MOZ_RELEASE_ASSERT(errno == ENOMEM);
+ }
+#endif
+}
+
+template <Commit commit = Commit::Yes, PageAccess prot = PageAccess::ReadWrite>
+static inline void* MapMemory(size_t length) {
+ MOZ_ASSERT(length > 0);
+
+ return MapInternal<commit, prot>(nullptr, length);
+}
+
+/*
+ * Attempts to map memory at the given address, but allows the system
+ * to return a different address that may still be suitable.
+ */
+template <Commit commit = Commit::Yes, PageAccess prot = PageAccess::ReadWrite>
+static inline void* MapMemoryAtFuzzy(void* desired, size_t length) {
+ MOZ_ASSERT(desired && OffsetFromAligned(desired, allocGranularity) == 0);
+ MOZ_ASSERT(length > 0);
+
+ // Note that some platforms treat the requested address as a hint, so the
+ // returned address might not match the requested address.
+ return MapInternal<commit, prot>(desired, length);
+}
+
+/*
+ * Attempts to map memory at the given address, returning nullptr if
+ * the system returns any address other than the requested one.
+ */
+template <Commit commit = Commit::Yes, PageAccess prot = PageAccess::ReadWrite>
+static inline void* MapMemoryAt(void* desired, size_t length) {
+ MOZ_ASSERT(desired && OffsetFromAligned(desired, allocGranularity) == 0);
+ MOZ_ASSERT(length > 0);
+
+ void* region = MapInternal<commit, prot>(desired, length);
+ if (!region) {
+ return nullptr;
+ }
+
+ // On some platforms mmap treats the desired address as a hint, so
+ // check that the address we got is the address we requested.
+ if (region != desired) {
+ UnmapInternal(region, length);
+ return nullptr;
+ }
+ return region;
+}
+
+#ifdef JS_64BIT
+
+/* Returns a random number in the given range. */
+static inline uint64_t GetNumberInRange(uint64_t minNum, uint64_t maxNum) {
+ const uint64_t MaxRand = UINT64_C(0xffffffffffffffff);
+ maxNum -= minNum;
+ uint64_t binSize = 1 + (MaxRand - maxNum) / (maxNum + 1);
+
+ uint64_t rndNum;
+ do {
+ mozilla::Maybe<uint64_t> result;
+ do {
+ result = mozilla::RandomUint64();
+ } while (!result);
+ rndNum = result.value() / binSize;
+ } while (rndNum > maxNum);
+
+ return minNum + rndNum;
+}
+
+# ifndef XP_WIN
+static inline uint64_t FindAddressLimitInner(size_t highBit, size_t tries);
+
+/*
+ * The address range available to applications depends on both hardware and
+ * kernel configuration. For example, AArch64 on Linux uses addresses with
+ * 39 significant bits by default, but can be configured to use addresses with
+ * 48 significant bits by enabling a 4th translation table. Unfortunately,
+ * there appears to be no standard way to query the limit at runtime
+ * (Windows exposes this via GetSystemInfo()).
+ *
+ * This function tries to find the address limit by performing a binary search
+ * on the index of the most significant set bit in the addresses it attempts to
+ * allocate. As the requested address is often treated as a hint by the
+ * operating system, we use the actual returned addresses to narrow the range.
+ * We return the number of bits of an address that may be set.
+ */
+static size_t FindAddressLimit() {
+ // Use 32 bits as a lower bound in case we keep getting nullptr.
+ uint64_t low = 31;
+ uint64_t highestSeen = (UINT64_C(1) << 32) - allocGranularity - 1;
+
+ // Exclude 48-bit and 47-bit addresses first.
+ uint64_t high = 47;
+ for (; high >= std::max(low, UINT64_C(46)); --high) {
+ highestSeen = std::max(FindAddressLimitInner(high, 4), highestSeen);
+ low = mozilla::FloorLog2(highestSeen);
+ }
+ // If those didn't work, perform a modified binary search.
+ while (high - 1 > low) {
+ uint64_t middle = low + (high - low) / 2;
+ highestSeen = std::max(FindAddressLimitInner(middle, 4), highestSeen);
+ low = mozilla::FloorLog2(highestSeen);
+ if (highestSeen < (UINT64_C(1) << middle)) {
+ high = middle;
+ }
+ }
+ // We can be sure of the lower bound, but check the upper bound again.
+ do {
+ high = low + 1;
+ highestSeen = std::max(FindAddressLimitInner(high, 8), highestSeen);
+ low = mozilla::FloorLog2(highestSeen);
+ } while (low >= high);
+
+ // `low` is the highest set bit, so `low + 1` is the number of bits.
+ return low + 1;
+}
+
+static inline uint64_t FindAddressLimitInner(size_t highBit, size_t tries) {
+ const size_t length = allocGranularity; // Used as both length and alignment.
+
+ uint64_t highestSeen = 0;
+ uint64_t startRaw = UINT64_C(1) << highBit;
+ uint64_t endRaw = 2 * startRaw - length - 1;
+ uint64_t start = (startRaw + length - 1) / length;
+ uint64_t end = (endRaw - (length - 1)) / length;
+ for (size_t i = 0; i < tries; ++i) {
+ uint64_t desired = length * GetNumberInRange(start, end);
+ void* address = MapMemoryAtFuzzy(reinterpret_cast<void*>(desired), length);
+ uint64_t actual = uint64_t(address);
+ if (address) {
+ UnmapInternal(address, length);
+ }
+ if (actual > highestSeen) {
+ highestSeen = actual;
+ if (actual >= startRaw) {
+ break;
+ }
+ }
+ }
+ return highestSeen;
+}
+# endif // !defined(XP_WIN)
+
+#endif // defined(JS_64BIT)
+
+void InitMemorySubsystem() {
+ if (pageSize == 0) {
+#ifdef XP_WIN
+ SYSTEM_INFO sysinfo;
+ GetSystemInfo(&sysinfo);
+ pageSize = sysinfo.dwPageSize;
+ allocGranularity = sysinfo.dwAllocationGranularity;
+#else
+ pageSize = size_t(sysconf(_SC_PAGESIZE));
+ allocGranularity = pageSize;
+#endif
+#ifdef JS_64BIT
+# ifdef XP_WIN
+ minValidAddress = size_t(sysinfo.lpMinimumApplicationAddress);
+ maxValidAddress = size_t(sysinfo.lpMaximumApplicationAddress);
+ numAddressBits = mozilla::FloorLog2(maxValidAddress) + 1;
+# else
+ // No standard way to determine these, so fall back to FindAddressLimit().
+ numAddressBits = FindAddressLimit();
+ minValidAddress = allocGranularity;
+ maxValidAddress = (UINT64_C(1) << numAddressBits) - 1 - allocGranularity;
+# endif
+ // Sanity check the address to ensure we don't use more than 47 bits.
+ uint64_t maxJSAddress = UINT64_C(0x00007fffffffffff) - allocGranularity;
+ if (maxValidAddress > maxJSAddress) {
+ maxValidAddress = maxJSAddress;
+ hugeSplit = UINT64_C(0x00003fffffffffff) - allocGranularity;
+ } else {
+ hugeSplit = (UINT64_C(1) << (numAddressBits - 1)) - 1 - allocGranularity;
+ }
+#else // !defined(JS_64BIT)
+ numAddressBits = 32;
+#endif
+#ifdef RLIMIT_AS
+ if (jit::HasJitBackend()) {
+ rlimit as_limit;
+ if (getrlimit(RLIMIT_AS, &as_limit) == 0 &&
+ as_limit.rlim_max != RLIM_INFINITY) {
+ virtualMemoryLimit = as_limit.rlim_max;
+ }
+ }
+#endif
+ }
+}
+
+#ifdef JS_64BIT
+/* The JS engine uses 47-bit pointers; all higher bits must be clear. */
+static inline bool IsInvalidRegion(void* region, size_t length) {
+ const uint64_t invalidPointerMask = UINT64_C(0xffff800000000000);
+ return (uintptr_t(region) + length - 1) & invalidPointerMask;
+}
+#endif
+
+void* MapAlignedPages(size_t length, size_t alignment) {
+ MOZ_RELEASE_ASSERT(length > 0 && alignment > 0);
+ MOZ_RELEASE_ASSERT(length % pageSize == 0);
+ MOZ_RELEASE_ASSERT(std::max(alignment, allocGranularity) %
+ std::min(alignment, allocGranularity) ==
+ 0);
+
+ // Smaller alignments aren't supported by the allocation functions.
+ if (alignment < allocGranularity) {
+ alignment = allocGranularity;
+ }
+
+#ifdef __wasi__
+ void* region = nullptr;
+ if (int err = posix_memalign(&region, alignment, length)) {
+ MOZ_ASSERT(err == ENOMEM);
+ return nullptr;
+ }
+ MOZ_ASSERT(region != nullptr);
+ memset(region, 0, length);
+ return region;
+#else
+
+# ifdef JS_64BIT
+ // Use the scattershot allocator if the address range is large enough.
+ if (UsingScattershotAllocator()) {
+ void* region = MapAlignedPagesRandom(length, alignment);
+
+ MOZ_RELEASE_ASSERT(!IsInvalidRegion(region, length));
+ MOZ_ASSERT(OffsetFromAligned(region, alignment) == 0);
+
+ return region;
+ }
+# endif
+
+ // Try to allocate the region. If the returned address is aligned,
+ // either we OOMed (region is nullptr) or we're done.
+ void* region = MapMemory(length);
+ if (OffsetFromAligned(region, alignment) == 0) {
+ return region;
+ }
+
+ // Try to align the region. On success, TryToAlignChunk() returns
+ // true and we can return the aligned region immediately.
+ void* retainedRegion;
+ if (TryToAlignChunk(&region, &retainedRegion, length, alignment)) {
+ MOZ_ASSERT(region && OffsetFromAligned(region, alignment) == 0);
+ MOZ_ASSERT(!retainedRegion);
+ return region;
+ }
+
+ // On failure, the unaligned region is retained unless we OOMed. We don't
+ // use the retained region on this path (see the last ditch allocator).
+ if (retainedRegion) {
+ UnmapInternal(retainedRegion, length);
+ }
+
+ // If it fails to align the given region, TryToAlignChunk() returns the
+ // next valid region that we might be able to align (unless we OOMed).
+ if (region) {
+ MOZ_ASSERT(OffsetFromAligned(region, alignment) != 0);
+ UnmapInternal(region, length);
+ }
+
+ // Since we couldn't align the first region, fall back to allocating a
+ // region large enough that we can definitely align it.
+ region = MapAlignedPagesSlow(length, alignment);
+ if (!region) {
+ // If there wasn't enough contiguous address space left for that,
+ // try to find an alignable region using the last ditch allocator.
+ region = MapAlignedPagesLastDitch(length, alignment);
+ }
+
+ // At this point we should either have an aligned region or nullptr.
+ MOZ_ASSERT(OffsetFromAligned(region, alignment) == 0);
+ return region;
+#endif // !__wasi__
+}
+
+#ifdef JS_64BIT
+
+/*
+ * This allocator takes advantage of the large address range on some 64-bit
+ * platforms to allocate in a scattershot manner, choosing addresses at random
+ * from the range. By controlling the range we can avoid returning addresses
+ * that have more than 47 significant bits (as required by SpiderMonkey).
+ * This approach also has some other advantages over the methods employed by
+ * the other allocation functions in this file:
+ * 1) Allocations are extremely likely to succeed on the first try.
+ * 2) The randomness makes our memory layout becomes harder to predict.
+ * 3) The low probability of reusing regions guards against use-after-free.
+ *
+ * The main downside is that detecting physical OOM situations becomes more
+ * difficult; to guard against this, we occasionally try a regular allocation.
+ * In addition, sprinkling small allocations throughout the full address range
+ * might get in the way of large address space reservations such as those
+ * employed by WebAssembly. To avoid this (or the opposite problem of such
+ * reservations reducing the chance of success for smaller allocations) we
+ * split the address range in half, with one half reserved for huge allocations
+ * and the other for regular (usually chunk sized) allocations.
+ */
+static void* MapAlignedPagesRandom(size_t length, size_t alignment) {
+ uint64_t minNum, maxNum;
+ if (length < HugeAllocationSize) {
+ // Use the lower half of the range.
+ minNum = (minValidAddress + alignment - 1) / alignment;
+ maxNum = (hugeSplit - (length - 1)) / alignment;
+ } else {
+ // Use the upper half of the range.
+ minNum = (hugeSplit + 1 + alignment - 1) / alignment;
+ maxNum = (maxValidAddress - (length - 1)) / alignment;
+ }
+
+ // Try to allocate in random aligned locations.
+ void* region = nullptr;
+ for (size_t i = 1; i <= 1024; ++i) {
+ if (i & 0xf) {
+ uint64_t desired = alignment * GetNumberInRange(minNum, maxNum);
+ region = MapMemoryAtFuzzy(reinterpret_cast<void*>(desired), length);
+ if (!region) {
+ continue;
+ }
+ } else {
+ // Check for OOM.
+ region = MapMemory(length);
+ if (!region) {
+ return nullptr;
+ }
+ }
+ if (IsInvalidRegion(region, length)) {
+ UnmapInternal(region, length);
+ continue;
+ }
+ if (OffsetFromAligned(region, alignment) == 0) {
+ return region;
+ }
+ void* retainedRegion = nullptr;
+ if (TryToAlignChunk<false>(&region, &retainedRegion, length, alignment)) {
+ MOZ_ASSERT(region && OffsetFromAligned(region, alignment) == 0);
+ MOZ_ASSERT(!retainedRegion);
+ return region;
+ }
+ MOZ_ASSERT(region && !retainedRegion);
+ UnmapInternal(region, length);
+ }
+
+ if (numAddressBits < 48) {
+ // Try the reliable fallback of overallocating.
+ // Note: This will not respect the address space split.
+ region = MapAlignedPagesSlow(length, alignment);
+ if (region) {
+ return region;
+ }
+ }
+ if (length < HugeAllocationSize) {
+ MOZ_CRASH("Couldn't allocate even after 1000 tries!");
+ }
+
+ return nullptr;
+}
+
+#endif // defined(JS_64BIT)
+
+#ifndef __wasi__
+static void* MapAlignedPagesSlow(size_t length, size_t alignment) {
+ void* alignedRegion = nullptr;
+ do {
+ size_t reserveLength = length + alignment - pageSize;
+# ifdef XP_WIN
+ // Don't commit the requested pages as we won't use the region directly.
+ void* region = MapMemory<Commit::No>(reserveLength);
+# else
+ void* region = MapMemory(reserveLength);
+# endif
+ if (!region) {
+ return nullptr;
+ }
+ alignedRegion =
+ reinterpret_cast<void*>(AlignBytes(uintptr_t(region), alignment));
+# ifdef XP_WIN
+ // Windows requires that map and unmap calls be matched, so deallocate
+ // and immediately reallocate at the desired (aligned) address.
+ UnmapInternal(region, reserveLength);
+ alignedRegion = MapMemoryAt(alignedRegion, length);
+# else
+ // munmap allows us to simply unmap the pages that don't interest us.
+ if (alignedRegion != region) {
+ UnmapInternal(region, uintptr_t(alignedRegion) - uintptr_t(region));
+ }
+ void* regionEnd =
+ reinterpret_cast<void*>(uintptr_t(region) + reserveLength);
+ void* alignedEnd =
+ reinterpret_cast<void*>(uintptr_t(alignedRegion) + length);
+ if (alignedEnd != regionEnd) {
+ UnmapInternal(alignedEnd, uintptr_t(regionEnd) - uintptr_t(alignedEnd));
+ }
+# endif
+ // On Windows we may have raced with another thread; if so, try again.
+ } while (!alignedRegion);
+
+ return alignedRegion;
+}
+#endif // wasi
+
+/*
+ * In a low memory or high fragmentation situation, alignable chunks of the
+ * desired length may still be available, even if there are no more contiguous
+ * free chunks that meet the |length + alignment - pageSize| requirement of
+ * MapAlignedPagesSlow. In this case, try harder to find an alignable chunk
+ * by temporarily holding onto the unaligned parts of each chunk until the
+ * allocator gives us a chunk that either is, or can be aligned.
+ */
+static void* MapAlignedPagesLastDitch(size_t length, size_t alignment) {
+ void* tempMaps[MaxLastDitchAttempts];
+ int attempt = 0;
+ void* region = MapMemory(length);
+ if (OffsetFromAligned(region, alignment) == 0) {
+ return region;
+ }
+ for (; attempt < MaxLastDitchAttempts; ++attempt) {
+ if (TryToAlignChunk(&region, tempMaps + attempt, length, alignment)) {
+ MOZ_ASSERT(region && OffsetFromAligned(region, alignment) == 0);
+ MOZ_ASSERT(!tempMaps[attempt]);
+ break; // Success!
+ }
+ if (!region || !tempMaps[attempt]) {
+ break; // We ran out of memory, so give up.
+ }
+ }
+ if (OffsetFromAligned(region, alignment)) {
+ UnmapInternal(region, length);
+ region = nullptr;
+ }
+ while (--attempt >= 0) {
+ UnmapInternal(tempMaps[attempt], length);
+ }
+ return region;
+}
+
+#ifdef XP_WIN
+
+/*
+ * On Windows, map and unmap calls must be matched, so we deallocate the
+ * unaligned chunk, then reallocate the unaligned part to block off the
+ * old address and force the allocator to give us a new one.
+ */
+template <bool>
+static bool TryToAlignChunk(void** aRegion, void** aRetainedRegion,
+ size_t length, size_t alignment) {
+ void* region = *aRegion;
+ MOZ_ASSERT(region && OffsetFromAligned(region, alignment) != 0);
+
+ size_t retainedLength = 0;
+ void* retainedRegion = nullptr;
+ do {
+ size_t offset = OffsetFromAligned(region, alignment);
+ if (offset == 0) {
+ // If the address is aligned, either we hit OOM or we're done.
+ break;
+ }
+ UnmapInternal(region, length);
+ retainedLength = alignment - offset;
+ retainedRegion = MapMemoryAt<Commit::No>(region, retainedLength);
+ region = MapMemory(length);
+
+ // If retainedRegion is null here, we raced with another thread.
+ } while (!retainedRegion);
+
+ bool result = OffsetFromAligned(region, alignment) == 0;
+ if (result && retainedRegion) {
+ UnmapInternal(retainedRegion, retainedLength);
+ retainedRegion = nullptr;
+ }
+
+ *aRegion = region;
+ *aRetainedRegion = retainedRegion;
+ return region && result;
+}
+
+#else // !defined(XP_WIN)
+
+/*
+ * mmap calls don't have to be matched with calls to munmap, so we can unmap
+ * just the pages we don't need. However, as we don't know a priori if addresses
+ * are handed out in increasing or decreasing order, we have to try both
+ * directions (depending on the environment, one will always fail).
+ */
+template <bool AlwaysGetNew>
+static bool TryToAlignChunk(void** aRegion, void** aRetainedRegion,
+ size_t length, size_t alignment) {
+ void* regionStart = *aRegion;
+ MOZ_ASSERT(regionStart && OffsetFromAligned(regionStart, alignment) != 0);
+
+ bool addressesGrowUpward = growthDirection > 0;
+ bool directionUncertain = -8 < growthDirection && growthDirection <= 8;
+ size_t offsetLower = OffsetFromAligned(regionStart, alignment);
+ size_t offsetUpper = alignment - offsetLower;
+ for (size_t i = 0; i < 2; ++i) {
+ if (addressesGrowUpward) {
+ void* upperStart =
+ reinterpret_cast<void*>(uintptr_t(regionStart) + offsetUpper);
+ void* regionEnd =
+ reinterpret_cast<void*>(uintptr_t(regionStart) + length);
+ if (MapMemoryAt(regionEnd, offsetUpper)) {
+ UnmapInternal(regionStart, offsetUpper);
+ if (directionUncertain) {
+ ++growthDirection;
+ }
+ regionStart = upperStart;
+ break;
+ }
+ } else {
+ auto* lowerStart =
+ reinterpret_cast<void*>(uintptr_t(regionStart) - offsetLower);
+ auto* lowerEnd = reinterpret_cast<void*>(uintptr_t(lowerStart) + length);
+ if (MapMemoryAt(lowerStart, offsetLower)) {
+ UnmapInternal(lowerEnd, offsetLower);
+ if (directionUncertain) {
+ --growthDirection;
+ }
+ regionStart = lowerStart;
+ break;
+ }
+ }
+ // If we're confident in the growth direction, don't try the other.
+ if (!directionUncertain) {
+ break;
+ }
+ addressesGrowUpward = !addressesGrowUpward;
+ }
+
+ void* retainedRegion = nullptr;
+ bool result = OffsetFromAligned(regionStart, alignment) == 0;
+ if (AlwaysGetNew && !result) {
+ // If our current chunk cannot be aligned, just get a new one.
+ retainedRegion = regionStart;
+ regionStart = MapMemory(length);
+ // Our new region might happen to already be aligned.
+ result = OffsetFromAligned(regionStart, alignment) == 0;
+ if (result) {
+ UnmapInternal(retainedRegion, length);
+ retainedRegion = nullptr;
+ }
+ }
+
+ *aRegion = regionStart;
+ *aRetainedRegion = retainedRegion;
+ return regionStart && result;
+}
+
+#endif
+
+void UnmapPages(void* region, size_t length) {
+ MOZ_RELEASE_ASSERT(region &&
+ OffsetFromAligned(region, allocGranularity) == 0);
+ MOZ_RELEASE_ASSERT(length > 0 && length % pageSize == 0);
+
+ // ASan does not automatically unpoison memory, so we have to do this here.
+ MOZ_MAKE_MEM_UNDEFINED(region, length);
+
+ UnmapInternal(region, length);
+}
+
+static void CheckDecommit(void* region, size_t length) {
+ MOZ_RELEASE_ASSERT(region);
+ MOZ_RELEASE_ASSERT(length > 0);
+
+ // pageSize == ArenaSize doesn't necessarily hold, but this function is
+ // used by the GC to decommit unused Arenas, so we don't want to assert
+ // if pageSize > ArenaSize.
+ MOZ_ASSERT(OffsetFromAligned(region, ArenaSize) == 0);
+ MOZ_ASSERT(length % ArenaSize == 0);
+
+ MOZ_RELEASE_ASSERT(OffsetFromAligned(region, pageSize) == 0);
+ MOZ_RELEASE_ASSERT(length % pageSize == 0);
+}
+
+bool MarkPagesUnusedSoft(void* region, size_t length) {
+ MOZ_ASSERT(DecommitEnabled());
+ CheckDecommit(region, length);
+
+ MOZ_MAKE_MEM_NOACCESS(region, length);
+
+#if defined(XP_WIN)
+ return VirtualAlloc(region, length, MEM_RESET,
+ DWORD(PageAccess::ReadWrite)) == region;
+#elif defined(__wasi__)
+ return 0;
+#else
+ int status;
+ do {
+# if defined(XP_DARWIN)
+ status = madvise(region, length, MADV_FREE_REUSABLE);
+# elif defined(XP_SOLARIS)
+ status = posix_madvise(region, length, POSIX_MADV_DONTNEED);
+# else
+ status = madvise(region, length, MADV_DONTNEED);
+# endif
+ } while (status == -1 && errno == EAGAIN);
+ return status == 0;
+#endif
+}
+
+bool MarkPagesUnusedHard(void* region, size_t length) {
+ CheckDecommit(region, length);
+
+ MOZ_MAKE_MEM_NOACCESS(region, length);
+
+ if (!DecommitEnabled()) {
+ return true;
+ }
+
+#if defined(XP_WIN)
+ return VirtualFree(region, length, MEM_DECOMMIT);
+#else
+ return MarkPagesUnusedSoft(region, length);
+#endif
+}
+
+void MarkPagesInUseSoft(void* region, size_t length) {
+ MOZ_ASSERT(DecommitEnabled());
+ CheckDecommit(region, length);
+
+#if defined(XP_DARWIN)
+ while (madvise(region, length, MADV_FREE_REUSE) == -1 && errno == EAGAIN) {
+ }
+#endif
+
+ MOZ_MAKE_MEM_UNDEFINED(region, length);
+}
+
+bool MarkPagesInUseHard(void* region, size_t length) {
+ if (js::oom::ShouldFailWithOOM()) {
+ return false;
+ }
+
+ CheckDecommit(region, length);
+
+ MOZ_MAKE_MEM_UNDEFINED(region, length);
+
+ if (!DecommitEnabled()) {
+ return true;
+ }
+
+#if defined(XP_WIN)
+ return VirtualAlloc(region, length, MEM_COMMIT,
+ DWORD(PageAccess::ReadWrite)) == region;
+#else
+ return true;
+#endif
+}
+
+size_t GetPageFaultCount() {
+#ifdef XP_WIN
+ PROCESS_MEMORY_COUNTERS pmc;
+ if (GetProcessMemoryInfo(GetCurrentProcess(), &pmc, sizeof(pmc)) == 0) {
+ return 0;
+ }
+ return pmc.PageFaultCount;
+#elif defined(__wasi__)
+ return 0;
+#else
+ struct rusage usage;
+ int err = getrusage(RUSAGE_SELF, &usage);
+ if (err) {
+ return 0;
+ }
+ return usage.ru_majflt;
+#endif
+}
+
+void* AllocateMappedContent(int fd, size_t offset, size_t length,
+ size_t alignment) {
+#ifdef __wasi__
+ MOZ_CRASH("Not yet supported for WASI");
+#else
+ if (length == 0 || alignment == 0 || offset % alignment != 0 ||
+ std::max(alignment, allocGranularity) %
+ std::min(alignment, allocGranularity) !=
+ 0) {
+ return nullptr;
+ }
+
+ size_t alignedOffset = offset - (offset % allocGranularity);
+ size_t alignedLength = length + (offset % allocGranularity);
+
+ // We preallocate the mapping using MapAlignedPages, which expects
+ // the length parameter to be an integer multiple of the page size.
+ size_t mappedLength = alignedLength;
+ if (alignedLength % pageSize != 0) {
+ mappedLength += pageSize - alignedLength % pageSize;
+ }
+
+# ifdef XP_WIN
+ HANDLE hFile = reinterpret_cast<HANDLE>(intptr_t(fd));
+
+ // This call will fail if the file does not exist.
+ HANDLE hMap =
+ CreateFileMappingW(hFile, nullptr, PAGE_READONLY, 0, 0, nullptr);
+ if (!hMap) {
+ return nullptr;
+ }
+
+ DWORD offsetH = uint32_t(uint64_t(alignedOffset) >> 32);
+ DWORD offsetL = uint32_t(alignedOffset);
+
+ uint8_t* map = nullptr;
+ for (;;) {
+ // The value of a pointer is technically only defined while the region
+ // it points to is allocated, so explicitly treat this one as a number.
+ uintptr_t region = uintptr_t(MapAlignedPages(mappedLength, alignment));
+ if (region == 0) {
+ break;
+ }
+ UnmapInternal(reinterpret_cast<void*>(region), mappedLength);
+ // If the offset or length are out of bounds, this call will fail.
+ map = static_cast<uint8_t*>(
+ MapViewOfFileEx(hMap, FILE_MAP_COPY, offsetH, offsetL, alignedLength,
+ reinterpret_cast<void*>(region)));
+
+ // Retry if another thread mapped the address we were trying to use.
+ if (map || GetLastError() != ERROR_INVALID_ADDRESS) {
+ break;
+ }
+ }
+
+ // This just decreases the file mapping object's internal reference count;
+ // it won't actually be destroyed until we unmap the associated view.
+ CloseHandle(hMap);
+
+ if (!map) {
+ return nullptr;
+ }
+# else // !defined(XP_WIN)
+ // Sanity check the offset and length, as mmap does not do this for us.
+ struct stat st;
+ if (fstat(fd, &st) || offset >= uint64_t(st.st_size) ||
+ length > uint64_t(st.st_size) - offset) {
+ return nullptr;
+ }
+
+ void* region = MapAlignedPages(mappedLength, alignment);
+ if (!region) {
+ return nullptr;
+ }
+
+ // Calling mmap with MAP_FIXED will replace the previous mapping, allowing
+ // us to reuse the region we obtained without racing with other threads.
+ uint8_t* map =
+ static_cast<uint8_t*>(mmap(region, alignedLength, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_FIXED, fd, alignedOffset));
+ if (map == MAP_FAILED) {
+ UnmapInternal(region, mappedLength);
+ return nullptr;
+ }
+# endif
+
+# ifdef DEBUG
+ // Zero out data before and after the desired mapping to catch errors early.
+ if (offset != alignedOffset) {
+ memset(map, 0, offset - alignedOffset);
+ }
+ if (alignedLength % pageSize) {
+ memset(map + alignedLength, 0, pageSize - (alignedLength % pageSize));
+ }
+# endif
+
+ return map + (offset - alignedOffset);
+#endif // __wasi__
+}
+
+void DeallocateMappedContent(void* region, size_t length) {
+#ifdef __wasi__
+ MOZ_CRASH("Not yet supported for WASI");
+#else
+ if (!region) {
+ return;
+ }
+
+ // Due to bug 1502562, the following assertion does not currently hold.
+ // MOZ_RELEASE_ASSERT(length > 0);
+
+ // Calculate the address originally returned by the system call.
+ // This is needed because AllocateMappedContent returns a pointer
+ // that might be offset from the mapping, as the beginning of a
+ // mapping must be aligned with the allocation granularity.
+ uintptr_t map = uintptr_t(region) - (uintptr_t(region) % allocGranularity);
+# ifdef XP_WIN
+ MOZ_RELEASE_ASSERT(UnmapViewOfFile(reinterpret_cast<void*>(map)) != 0);
+# else
+ size_t alignedLength = length + (uintptr_t(region) % allocGranularity);
+ if (munmap(reinterpret_cast<void*>(map), alignedLength)) {
+ MOZ_RELEASE_ASSERT(errno == ENOMEM);
+ }
+# endif
+#endif // __wasi__
+}
+
+static inline void ProtectMemory(void* region, size_t length, PageAccess prot) {
+ MOZ_RELEASE_ASSERT(region && OffsetFromAligned(region, pageSize) == 0);
+ MOZ_RELEASE_ASSERT(length > 0 && length % pageSize == 0);
+#ifdef XP_WIN
+ DWORD oldProtect;
+ MOZ_RELEASE_ASSERT(VirtualProtect(region, length, DWORD(prot), &oldProtect) !=
+ 0);
+#elif defined(__wasi__)
+ /* nothing */
+#else
+ MOZ_RELEASE_ASSERT(mprotect(region, length, int(prot)) == 0);
+#endif
+}
+
+void ProtectPages(void* region, size_t length) {
+ ProtectMemory(region, length, PageAccess::None);
+}
+
+void MakePagesReadOnly(void* region, size_t length) {
+ ProtectMemory(region, length, PageAccess::Read);
+}
+
+void UnprotectPages(void* region, size_t length) {
+ ProtectMemory(region, length, PageAccess::ReadWrite);
+}
+
+} // namespace gc
+} // namespace js
diff --git a/js/src/gc/Memory.h b/js/src/gc/Memory.h
new file mode 100644
index 0000000000..e49a10f04b
--- /dev/null
+++ b/js/src/gc/Memory.h
@@ -0,0 +1,84 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef gc_Memory_h
+#define gc_Memory_h
+
+#include <stddef.h>
+
+namespace js {
+namespace gc {
+
+// Sanity check that our compiled configuration matches the currently
+// running instance and initialize any runtime data needed for allocation.
+void InitMemorySubsystem();
+
+// The page size as reported by the operating system.
+size_t SystemPageSize();
+
+// The number of bits that may be set in a valid address, as
+// reported by the operating system or measured at startup.
+size_t SystemAddressBits();
+
+// The number of bytes of virtual memory that may be allocated or mapped, as
+// reported by the operating system on certain platforms. If no limit was able
+// to be determined, then it will be size_t(-1).
+size_t VirtualMemoryLimit();
+
+// The scattershot allocator is used on platforms that have a large address
+// range. On these platforms we allocate at random addresses.
+bool UsingScattershotAllocator();
+
+// Allocate or deallocate pages from the system with the given alignment.
+// Pages will be read/write-able.
+void* MapAlignedPages(size_t length, size_t alignment);
+void UnmapPages(void* region, size_t length);
+
+// We can only decommit unused pages if the page size is less than or equal to
+// the hardcoded Arena size for the running process.
+bool DecommitEnabled();
+
+// Tell the OS that the given pages are not in use, so they should not be
+// written to a paging file. This may be a no-op on some platforms.
+bool MarkPagesUnusedSoft(void* region, size_t length);
+
+// Tell the OS that the given pages are not in use and it can decommit them
+// immediately. This may defer to MarkPagesUnusedSoft and must be paired with
+// MarkPagesInUse to use the pages again.
+bool MarkPagesUnusedHard(void* region, size_t length);
+
+// Undo |MarkPagesUnusedSoft|: tell the OS that the given pages are of interest
+// and should be paged in and out normally. This may be a no-op on some
+// platforms. May make pages read/write-able.
+void MarkPagesInUseSoft(void* region, size_t length);
+
+// Undo |MarkPagesUnusedHard|: tell the OS that the given pages are of interest
+// and should be paged in and out normally. This may be a no-op on some
+// platforms. Callers must check the result, false could mean that the pages
+// are not available. May make pages read/write.
+[[nodiscard]] bool MarkPagesInUseHard(void* region, size_t length);
+
+// Returns #(hard faults) + #(soft faults)
+size_t GetPageFaultCount();
+
+// Allocate memory mapped content.
+// The offset must be aligned according to alignment requirement.
+void* AllocateMappedContent(int fd, size_t offset, size_t length,
+ size_t alignment);
+
+// Deallocate memory mapped content.
+void DeallocateMappedContent(void* region, size_t length);
+
+void* TestMapAlignedPagesLastDitch(size_t size, size_t alignment);
+
+void ProtectPages(void* region, size_t length);
+void MakePagesReadOnly(void* region, size_t length);
+void UnprotectPages(void* region, size_t length);
+
+} // namespace gc
+} // namespace js
+
+#endif /* gc_Memory_h */
diff --git a/js/src/gc/Nursery-inl.h b/js/src/gc/Nursery-inl.h
new file mode 100644
index 0000000000..48f25a536e
--- /dev/null
+++ b/js/src/gc/Nursery-inl.h
@@ -0,0 +1,172 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=4 sw=2 et tw=80 ft=cpp:
+ *
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef gc_Nursery_inl_h
+#define gc_Nursery_inl_h
+
+#include "gc/Nursery.h"
+
+#include "gc/RelocationOverlay.h"
+#include "js/TracingAPI.h"
+#include "vm/JSContext.h"
+#include "vm/Runtime.h"
+
+#include "vm/JSContext-inl.h"
+
+inline JSRuntime* js::Nursery::runtime() const { return gc->rt; }
+
+template <typename T>
+bool js::Nursery::isInside(const SharedMem<T>& p) const {
+ return isInside(p.unwrap(/*safe - used for value in comparison above*/));
+}
+
+MOZ_ALWAYS_INLINE /* static */ bool js::Nursery::getForwardedPointer(
+ js::gc::Cell** ref) {
+ js::gc::Cell* cell = (*ref);
+ MOZ_ASSERT(IsInsideNursery(cell));
+ if (!cell->isForwarded()) {
+ return false;
+ }
+ const gc::RelocationOverlay* overlay = gc::RelocationOverlay::fromCell(cell);
+ *ref = overlay->forwardingAddress();
+ return true;
+}
+
+inline void js::Nursery::maybeSetForwardingPointer(JSTracer* trc, void* oldData,
+ void* newData, bool direct) {
+ if (trc->isTenuringTracer()) {
+ setForwardingPointerWhileTenuring(oldData, newData, direct);
+ }
+}
+
+inline void js::Nursery::setForwardingPointerWhileTenuring(void* oldData,
+ void* newData,
+ bool direct) {
+ if (isInside(oldData)) {
+ setForwardingPointer(oldData, newData, direct);
+ }
+}
+
+inline void js::Nursery::setSlotsForwardingPointer(HeapSlot* oldSlots,
+ HeapSlot* newSlots,
+ uint32_t nslots) {
+ // Slot arrays always have enough space for a forwarding pointer, since the
+ // number of slots is never zero.
+ MOZ_ASSERT(nslots > 0);
+ setDirectForwardingPointer(oldSlots, newSlots);
+}
+
+inline void js::Nursery::setElementsForwardingPointer(ObjectElements* oldHeader,
+ ObjectElements* newHeader,
+ uint32_t capacity) {
+ // Only use a direct forwarding pointer if there is enough space for one.
+ setForwardingPointer(oldHeader->elements(), newHeader->elements(),
+ capacity > 0);
+}
+
+inline void js::Nursery::setForwardingPointer(void* oldData, void* newData,
+ bool direct) {
+ if (direct) {
+ setDirectForwardingPointer(oldData, newData);
+ return;
+ }
+
+ setIndirectForwardingPointer(oldData, newData);
+}
+
+inline void js::Nursery::setDirectForwardingPointer(void* oldData,
+ void* newData) {
+ MOZ_ASSERT(isInside(oldData));
+ MOZ_ASSERT(!isInside(newData));
+
+ new (oldData) BufferRelocationOverlay{newData};
+}
+
+namespace js {
+
+// The allocation methods below will not run the garbage collector. If the
+// nursery cannot accomodate the allocation, the malloc heap will be used
+// instead.
+
+template <typename T>
+static inline T* AllocateObjectBuffer(JSContext* cx, uint32_t count) {
+ size_t nbytes = RoundUp(count * sizeof(T), sizeof(Value));
+ auto* buffer =
+ static_cast<T*>(cx->nursery().allocateBuffer(cx->zone(), nbytes));
+ if (!buffer) {
+ ReportOutOfMemory(cx);
+ }
+ return buffer;
+}
+
+template <typename T>
+static inline T* AllocateObjectBuffer(JSContext* cx, JSObject* obj,
+ uint32_t count) {
+ MOZ_ASSERT(cx->isMainThreadContext());
+
+ size_t nbytes = RoundUp(count * sizeof(T), sizeof(Value));
+ auto* buffer =
+ static_cast<T*>(cx->nursery().allocateBuffer(cx->zone(), obj, nbytes));
+ if (!buffer) {
+ ReportOutOfMemory(cx);
+ }
+ return buffer;
+}
+
+// If this returns null then the old buffer will be left alone.
+template <typename T>
+static inline T* ReallocateObjectBuffer(JSContext* cx, JSObject* obj,
+ T* oldBuffer, uint32_t oldCount,
+ uint32_t newCount) {
+ MOZ_ASSERT(cx->isMainThreadContext());
+
+ T* buffer = static_cast<T*>(cx->nursery().reallocateBuffer(
+ obj->zone(), obj, oldBuffer, oldCount * sizeof(T), newCount * sizeof(T)));
+ if (!buffer) {
+ ReportOutOfMemory(cx);
+ }
+
+ return buffer;
+}
+
+static inline JS::BigInt::Digit* AllocateBigIntDigits(JSContext* cx,
+ JS::BigInt* bi,
+ uint32_t length) {
+ MOZ_ASSERT(cx->isMainThreadContext());
+
+ size_t nbytes = RoundUp(length * sizeof(JS::BigInt::Digit), sizeof(Value));
+ auto* digits =
+ static_cast<JS::BigInt::Digit*>(cx->nursery().allocateBuffer(bi, nbytes));
+ if (!digits) {
+ ReportOutOfMemory(cx);
+ }
+
+ return digits;
+}
+
+static inline JS::BigInt::Digit* ReallocateBigIntDigits(
+ JSContext* cx, JS::BigInt* bi, JS::BigInt::Digit* oldDigits,
+ uint32_t oldLength, uint32_t newLength) {
+ MOZ_ASSERT(cx->isMainThreadContext());
+
+ size_t oldBytes =
+ RoundUp(oldLength * sizeof(JS::BigInt::Digit), sizeof(Value));
+ size_t newBytes =
+ RoundUp(newLength * sizeof(JS::BigInt::Digit), sizeof(Value));
+
+ auto* digits = static_cast<JS::BigInt::Digit*>(cx->nursery().reallocateBuffer(
+ bi->zone(), bi, oldDigits, oldBytes, newBytes));
+ if (!digits) {
+ ReportOutOfMemory(cx);
+ }
+
+ return digits;
+}
+
+} // namespace js
+
+#endif /* gc_Nursery_inl_h */
diff --git a/js/src/gc/Nursery.cpp b/js/src/gc/Nursery.cpp
new file mode 100644
index 0000000000..17831b7f97
--- /dev/null
+++ b/js/src/gc/Nursery.cpp
@@ -0,0 +1,2083 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sw=2 et tw=80:
+ *
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "gc/Nursery-inl.h"
+
+#include "mozilla/DebugOnly.h"
+#include "mozilla/IntegerPrintfMacros.h"
+#include "mozilla/ScopeExit.h"
+#include "mozilla/Sprintf.h"
+#include "mozilla/TimeStamp.h"
+
+#include <algorithm>
+#include <cmath>
+#include <utility>
+
+#include "builtin/MapObject.h"
+#include "debugger/DebugAPI.h"
+#include "gc/GCInternals.h"
+#include "gc/GCLock.h"
+#include "gc/GCParallelTask.h"
+#include "gc/GCProbes.h"
+#include "gc/Memory.h"
+#include "gc/PublicIterators.h"
+#include "gc/Tenuring.h"
+#include "jit/JitFrames.h"
+#include "jit/JitRealm.h"
+#include "js/Printer.h"
+#include "util/DifferentialTesting.h"
+#include "util/GetPidProvider.h" // getpid()
+#include "util/Poison.h"
+#include "vm/JSONPrinter.h"
+#include "vm/Realm.h"
+#include "vm/Time.h"
+
+#include "gc/Heap-inl.h"
+#include "gc/Marking-inl.h"
+#include "gc/StableCellHasher-inl.h"
+#include "vm/GeckoProfiler-inl.h"
+
+using namespace js;
+using namespace js::gc;
+
+using mozilla::DebugOnly;
+using mozilla::PodCopy;
+using mozilla::TimeDuration;
+using mozilla::TimeStamp;
+
+namespace js {
+
+struct NurseryChunk : public ChunkBase {
+ char data[Nursery::NurseryChunkUsableSize];
+
+ static NurseryChunk* fromChunk(gc::TenuredChunk* chunk);
+
+ explicit NurseryChunk(JSRuntime* runtime)
+ : ChunkBase(runtime, &runtime->gc.storeBuffer()) {}
+
+ void poisonAndInit(JSRuntime* rt, size_t size = ChunkSize);
+ void poisonRange(size_t from, size_t size, uint8_t value,
+ MemCheckKind checkKind);
+ void poisonAfterEvict(size_t extent = ChunkSize);
+
+ // Mark pages from startOffset to the end of the chunk as unused. The start
+ // offset must be after the first page, which contains the chunk header and is
+ // not marked as unused.
+ void markPagesUnusedHard(size_t startOffset);
+
+ // Mark pages from the second page of the chunk to endOffset as in use,
+ // following a call to markPagesUnusedHard.
+ [[nodiscard]] bool markPagesInUseHard(size_t endOffset);
+
+ uintptr_t start() const { return uintptr_t(&data); }
+ uintptr_t end() const { return uintptr_t(this) + ChunkSize; }
+};
+static_assert(sizeof(js::NurseryChunk) == gc::ChunkSize,
+ "Nursery chunk size must match gc::Chunk size.");
+
+class NurseryDecommitTask : public GCParallelTask {
+ public:
+ explicit NurseryDecommitTask(gc::GCRuntime* gc);
+ bool reserveSpaceForBytes(size_t nbytes);
+
+ bool isEmpty(const AutoLockHelperThreadState& lock) const;
+
+ void queueChunk(NurseryChunk* chunk, const AutoLockHelperThreadState& lock);
+ void queueRange(size_t newCapacity, NurseryChunk& chunk,
+ const AutoLockHelperThreadState& lock);
+
+ private:
+ using NurseryChunkVector = Vector<NurseryChunk*, 0, SystemAllocPolicy>;
+
+ void run(AutoLockHelperThreadState& lock) override;
+
+ NurseryChunkVector& chunksToDecommit() { return chunksToDecommit_.ref(); }
+ const NurseryChunkVector& chunksToDecommit() const {
+ return chunksToDecommit_.ref();
+ }
+
+ MainThreadOrGCTaskData<NurseryChunkVector> chunksToDecommit_;
+
+ MainThreadOrGCTaskData<NurseryChunk*> partialChunk;
+ MainThreadOrGCTaskData<size_t> partialCapacity;
+};
+
+} // namespace js
+
+inline void js::NurseryChunk::poisonAndInit(JSRuntime* rt, size_t size) {
+ MOZ_ASSERT(size >= sizeof(ChunkBase));
+ MOZ_ASSERT(size <= ChunkSize);
+ poisonRange(0, size, JS_FRESH_NURSERY_PATTERN, MemCheckKind::MakeUndefined);
+ new (this) NurseryChunk(rt);
+}
+
+inline void js::NurseryChunk::poisonRange(size_t from, size_t size,
+ uint8_t value,
+ MemCheckKind checkKind) {
+ MOZ_ASSERT(from + size <= ChunkSize);
+
+ auto* start = reinterpret_cast<uint8_t*>(this) + from;
+
+ // We can poison the same chunk more than once, so first make sure memory
+ // sanitizers will let us poison it.
+ MOZ_MAKE_MEM_UNDEFINED(start, size);
+ Poison(start, value, size, checkKind);
+}
+
+inline void js::NurseryChunk::poisonAfterEvict(size_t extent) {
+ MOZ_ASSERT(extent <= ChunkSize);
+ poisonRange(sizeof(ChunkBase), extent - sizeof(ChunkBase),
+ JS_SWEPT_NURSERY_PATTERN, MemCheckKind::MakeNoAccess);
+}
+
+inline void js::NurseryChunk::markPagesUnusedHard(size_t startOffset) {
+ MOZ_ASSERT(startOffset >= sizeof(ChunkBase)); // Don't touch the header.
+ MOZ_ASSERT(startOffset >= SystemPageSize());
+ MOZ_ASSERT(startOffset <= ChunkSize);
+ uintptr_t start = uintptr_t(this) + startOffset;
+ size_t length = ChunkSize - startOffset;
+ MarkPagesUnusedHard(reinterpret_cast<void*>(start), length);
+}
+
+inline bool js::NurseryChunk::markPagesInUseHard(size_t endOffset) {
+ MOZ_ASSERT(endOffset >= sizeof(ChunkBase));
+ MOZ_ASSERT(endOffset >= SystemPageSize());
+ MOZ_ASSERT(endOffset <= ChunkSize);
+ uintptr_t start = uintptr_t(this) + SystemPageSize();
+ size_t length = endOffset - SystemPageSize();
+ return MarkPagesInUseHard(reinterpret_cast<void*>(start), length);
+}
+
+// static
+inline js::NurseryChunk* js::NurseryChunk::fromChunk(TenuredChunk* chunk) {
+ return reinterpret_cast<NurseryChunk*>(chunk);
+}
+
+js::NurseryDecommitTask::NurseryDecommitTask(gc::GCRuntime* gc)
+ : GCParallelTask(gc, gcstats::PhaseKind::NONE) {
+ // This can occur outside GCs so doesn't have a stats phase.
+}
+
+bool js::NurseryDecommitTask::isEmpty(
+ const AutoLockHelperThreadState& lock) const {
+ return chunksToDecommit().empty() && !partialChunk;
+}
+
+bool js::NurseryDecommitTask::reserveSpaceForBytes(size_t nbytes) {
+ MOZ_ASSERT(isIdle());
+ size_t nchunks = HowMany(nbytes, ChunkSize);
+ return chunksToDecommit().reserve(nchunks);
+}
+
+void js::NurseryDecommitTask::queueChunk(
+ NurseryChunk* chunk, const AutoLockHelperThreadState& lock) {
+ MOZ_ASSERT(isIdle(lock));
+ MOZ_ALWAYS_TRUE(chunksToDecommit().append(chunk));
+}
+
+void js::NurseryDecommitTask::queueRange(
+ size_t newCapacity, NurseryChunk& newChunk,
+ const AutoLockHelperThreadState& lock) {
+ MOZ_ASSERT(isIdle(lock));
+ MOZ_ASSERT(!partialChunk);
+ MOZ_ASSERT(newCapacity < ChunkSize);
+ MOZ_ASSERT(newCapacity % SystemPageSize() == 0);
+
+ partialChunk = &newChunk;
+ partialCapacity = newCapacity;
+}
+
+void js::NurseryDecommitTask::run(AutoLockHelperThreadState& lock) {
+ while (!chunksToDecommit().empty()) {
+ NurseryChunk* nurseryChunk = chunksToDecommit().popCopy();
+ AutoUnlockHelperThreadState unlock(lock);
+ nurseryChunk->~NurseryChunk();
+ TenuredChunk* tenuredChunk = TenuredChunk::emplace(
+ nurseryChunk, gc, /* allMemoryCommitted = */ false);
+ AutoLockGC lock(gc);
+ gc->recycleChunk(tenuredChunk, lock);
+ }
+
+ if (partialChunk) {
+ {
+ AutoUnlockHelperThreadState unlock(lock);
+ partialChunk->markPagesUnusedHard(partialCapacity);
+ }
+ partialChunk = nullptr;
+ partialCapacity = 0;
+ }
+}
+
+js::Nursery::Nursery(GCRuntime* gc)
+ : position_(0),
+ currentEnd_(0),
+ gc(gc),
+ currentChunk_(0),
+ currentStartChunk_(0),
+ currentStartPosition_(0),
+ capacity_(0),
+ timeInChunkAlloc_(0),
+ enableProfiling_(false),
+ profileThreshold_(0),
+ canAllocateStrings_(true),
+ canAllocateBigInts_(true),
+ reportDeduplications_(false),
+ reportPretenuring_(false),
+ reportPretenuringThreshold_(0),
+ minorGCTriggerReason_(JS::GCReason::NO_REASON),
+ hasRecentGrowthData(false),
+ smoothedTargetSize(0.0) {
+ const char* env = getenv("MOZ_NURSERY_STRINGS");
+ if (env && *env) {
+ canAllocateStrings_ = (*env == '1');
+ }
+ env = getenv("MOZ_NURSERY_BIGINTS");
+ if (env && *env) {
+ canAllocateBigInts_ = (*env == '1');
+ }
+}
+
+static void PrintAndExit(const char* message) {
+ fprintf(stderr, "%s", message);
+ exit(0);
+}
+
+static const char* GetEnvVar(const char* name, const char* helpMessage) {
+ const char* value = getenv(name);
+ if (!value) {
+ return nullptr;
+ }
+
+ if (strcmp(value, "help") == 0) {
+ PrintAndExit(helpMessage);
+ }
+
+ return value;
+}
+
+static bool GetBoolEnvVar(const char* name, const char* helpMessage) {
+ const char* env = GetEnvVar(name, helpMessage);
+ return env && bool(atoi(env));
+}
+
+static void ReadReportPretenureEnv(const char* name, const char* helpMessage,
+ bool* enabled, size_t* threshold) {
+ *enabled = false;
+ *threshold = 0;
+
+ const char* env = GetEnvVar(name, helpMessage);
+ if (!env) {
+ return;
+ }
+
+ char* end;
+ *threshold = strtol(env, &end, 10);
+ if (end == env || *end) {
+ PrintAndExit(helpMessage);
+ }
+
+ *enabled = true;
+}
+
+bool js::Nursery::init(AutoLockGCBgAlloc& lock) {
+ ReadProfileEnv("JS_GC_PROFILE_NURSERY",
+ "Report minor GCs taking at least N microseconds.\n",
+ &enableProfiling_, &profileWorkers_, &profileThreshold_);
+
+ reportDeduplications_ = GetBoolEnvVar(
+ "JS_GC_REPORT_STATS",
+ "JS_GC_REPORT_STATS=1\n"
+ "\tAfter a minor GC, report how many strings were deduplicated.\n");
+
+ ReadReportPretenureEnv(
+ "JS_GC_REPORT_PRETENURE",
+ "JS_GC_REPORT_PRETENURE=N\n"
+ "\tAfter a minor GC, report information about pretenuring, including\n"
+ "\tallocation sites with at least N allocations.\n",
+ &reportPretenuring_, &reportPretenuringThreshold_);
+
+ decommitTask = MakeUnique<NurseryDecommitTask>(gc);
+ if (!decommitTask) {
+ return false;
+ }
+
+ if (!gc->storeBuffer().enable()) {
+ return false;
+ }
+
+ return initFirstChunk(lock);
+}
+
+js::Nursery::~Nursery() { disable(); }
+
+void js::Nursery::enable() {
+ MOZ_ASSERT(isEmpty());
+ MOZ_ASSERT(!gc->isVerifyPreBarriersEnabled());
+ if (isEnabled()) {
+ return;
+ }
+
+ {
+ AutoLockGCBgAlloc lock(gc);
+ if (!initFirstChunk(lock)) {
+ // If we fail to allocate memory, the nursery will not be enabled.
+ return;
+ }
+ }
+
+#ifdef JS_GC_ZEAL
+ if (gc->hasZealMode(ZealMode::GenerationalGC)) {
+ enterZealMode();
+ }
+#endif
+
+ updateAllZoneAllocFlags();
+
+ // This should always succeed after the first time it's called.
+ MOZ_ALWAYS_TRUE(gc->storeBuffer().enable());
+}
+
+bool js::Nursery::initFirstChunk(AutoLockGCBgAlloc& lock) {
+ MOZ_ASSERT(!isEnabled());
+
+ capacity_ = tunables().gcMinNurseryBytes();
+
+ if (!decommitTask->reserveSpaceForBytes(capacity_) ||
+ !allocateNextChunk(0, lock)) {
+ capacity_ = 0;
+ return false;
+ }
+
+ setCurrentChunk(0);
+ setStartPosition();
+ poisonAndInitCurrentChunk();
+
+ // Clear any information about previous collections.
+ clearRecentGrowthData();
+
+ return true;
+}
+
+void js::Nursery::disable() {
+ MOZ_ASSERT(isEmpty());
+ if (!isEnabled()) {
+ return;
+ }
+
+ // Free all chunks.
+ decommitTask->join();
+ freeChunksFrom(0);
+ decommitTask->runFromMainThread();
+
+ capacity_ = 0;
+
+ // We must reset currentEnd_ so that there is no space for anything in the
+ // nursery. JIT'd code uses this even if the nursery is disabled.
+ currentEnd_ = 0;
+ position_ = 0;
+ gc->storeBuffer().disable();
+
+ if (gc->wasInitialized()) {
+ // This assumes there is an atoms zone.
+ updateAllZoneAllocFlags();
+ }
+}
+
+void js::Nursery::enableStrings() {
+ MOZ_ASSERT(isEmpty());
+ canAllocateStrings_ = true;
+ updateAllZoneAllocFlags();
+}
+
+void js::Nursery::disableStrings() {
+ MOZ_ASSERT(isEmpty());
+ canAllocateStrings_ = false;
+ updateAllZoneAllocFlags();
+}
+
+void js::Nursery::enableBigInts() {
+ MOZ_ASSERT(isEmpty());
+ canAllocateBigInts_ = true;
+ updateAllZoneAllocFlags();
+}
+
+void js::Nursery::disableBigInts() {
+ MOZ_ASSERT(isEmpty());
+ canAllocateBigInts_ = false;
+ updateAllZoneAllocFlags();
+}
+
+void js::Nursery::updateAllZoneAllocFlags() {
+ // The alloc flags are not relevant for the atoms zone, and flushing
+ // jit-related information can be problematic for the atoms zone.
+ for (ZonesIter zone(gc, SkipAtoms); !zone.done(); zone.next()) {
+ updateAllocFlagsForZone(zone);
+ }
+}
+
+void js::Nursery::getAllocFlagsForZone(JS::Zone* zone, bool* allocObjectsOut,
+ bool* allocStringsOut,
+ bool* allocBigIntsOut) {
+ *allocObjectsOut = isEnabled();
+ *allocStringsOut =
+ isEnabled() && canAllocateStrings() && !zone->nurseryStringsDisabled;
+ *allocBigIntsOut =
+ isEnabled() && canAllocateBigInts() && !zone->nurseryBigIntsDisabled;
+}
+
+void js::Nursery::setAllocFlagsForZone(JS::Zone* zone) {
+ bool allocObjects;
+ bool allocStrings;
+ bool allocBigInts;
+
+ getAllocFlagsForZone(zone, &allocObjects, &allocStrings, &allocBigInts);
+ zone->setNurseryAllocFlags(allocObjects, allocStrings, allocBigInts);
+}
+
+void js::Nursery::updateAllocFlagsForZone(JS::Zone* zone) {
+ bool allocObjects;
+ bool allocStrings;
+ bool allocBigInts;
+
+ getAllocFlagsForZone(zone, &allocObjects, &allocStrings, &allocBigInts);
+
+ if (allocObjects != zone->allocNurseryObjects() ||
+ allocStrings != zone->allocNurseryStrings() ||
+ allocBigInts != zone->allocNurseryBigInts()) {
+ CancelOffThreadIonCompile(zone);
+ zone->setNurseryAllocFlags(allocObjects, allocStrings, allocBigInts);
+ discardCodeAndSetJitFlagsForZone(zone);
+ }
+}
+
+void js::Nursery::discardCodeAndSetJitFlagsForZone(JS::Zone* zone) {
+ zone->forceDiscardJitCode(runtime()->gcContext());
+
+ for (RealmsInZoneIter r(zone); !r.done(); r.next()) {
+ if (jit::JitRealm* jitRealm = r->jitRealm()) {
+ jitRealm->discardStubs();
+ jitRealm->setStringsCanBeInNursery(zone->allocNurseryStrings());
+ }
+ }
+}
+
+bool js::Nursery::isEmpty() const {
+ if (!isEnabled()) {
+ return true;
+ }
+
+ if (!gc->hasZealMode(ZealMode::GenerationalGC)) {
+ MOZ_ASSERT(currentStartChunk_ == 0);
+ MOZ_ASSERT(currentStartPosition_ == chunk(0).start());
+ }
+ return position() == currentStartPosition_;
+}
+
+#ifdef JS_GC_ZEAL
+void js::Nursery::enterZealMode() {
+ if (!isEnabled()) {
+ return;
+ }
+
+ MOZ_ASSERT(isEmpty());
+
+ decommitTask->join();
+
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+
+ if (isSubChunkMode()) {
+ {
+ if (!chunk(0).markPagesInUseHard(ChunkSize)) {
+ oomUnsafe.crash("Out of memory trying to extend chunk for zeal mode");
+ }
+ }
+
+ // It'd be simpler to poison the whole chunk, but we can't do that
+ // because the nursery might be partially used.
+ chunk(0).poisonRange(capacity_, ChunkSize - capacity_,
+ JS_FRESH_NURSERY_PATTERN, MemCheckKind::MakeUndefined);
+ }
+
+ capacity_ = RoundUp(tunables().gcMaxNurseryBytes(), ChunkSize);
+
+ if (!decommitTask->reserveSpaceForBytes(capacity_)) {
+ oomUnsafe.crash("Nursery::enterZealMode");
+ }
+
+ setCurrentEnd();
+}
+
+void js::Nursery::leaveZealMode() {
+ if (!isEnabled()) {
+ return;
+ }
+
+ MOZ_ASSERT(isEmpty());
+
+ setCurrentChunk(0);
+ setStartPosition();
+ poisonAndInitCurrentChunk();
+}
+#endif // JS_GC_ZEAL
+
+void* js::Nursery::allocateCell(gc::AllocSite* site, size_t size,
+ JS::TraceKind kind) {
+ // Ensure there's enough space to replace the contents with a
+ // RelocationOverlay.
+ MOZ_ASSERT(size >= sizeof(RelocationOverlay));
+ MOZ_ASSERT(size % CellAlignBytes == 0);
+ MOZ_ASSERT(size_t(kind) < NurseryTraceKinds);
+ MOZ_ASSERT_IF(kind == JS::TraceKind::String, canAllocateStrings());
+ MOZ_ASSERT_IF(kind == JS::TraceKind::BigInt, canAllocateBigInts());
+
+ void* ptr = allocate(sizeof(NurseryCellHeader) + size);
+ if (!ptr) {
+ return nullptr;
+ }
+
+ new (ptr) NurseryCellHeader(site, kind);
+
+ void* cell =
+ reinterpret_cast<void*>(uintptr_t(ptr) + sizeof(NurseryCellHeader));
+
+ // Update the allocation site. This code is also inlined in
+ // MacroAssembler::updateAllocSite.
+ uint32_t allocCount = site->incAllocCount();
+ if (allocCount == 1) {
+ pretenuringNursery.insertIntoAllocatedList(site);
+ } else {
+ MOZ_ASSERT_IF(site->isNormal(), site->isInAllocatedList());
+ }
+
+ gcprobes::NurseryAlloc(cell, kind);
+ return cell;
+}
+
+inline void* js::Nursery::allocate(size_t size) {
+ MOZ_ASSERT(isEnabled());
+ MOZ_ASSERT(!JS::RuntimeHeapIsBusy());
+ MOZ_ASSERT(CurrentThreadCanAccessRuntime(runtime()));
+ MOZ_ASSERT_IF(currentChunk_ == currentStartChunk_,
+ position() >= currentStartPosition_);
+ MOZ_ASSERT(position() % CellAlignBytes == 0);
+ MOZ_ASSERT(size % CellAlignBytes == 0);
+
+ if (MOZ_UNLIKELY(currentEnd() < position() + size)) {
+ return moveToNextChunkAndAllocate(size);
+ }
+
+ void* thing = (void*)position();
+ position_ = position() + size;
+
+ DebugOnlyPoison(thing, JS_ALLOCATED_NURSERY_PATTERN, size,
+ MemCheckKind::MakeUndefined);
+
+ return thing;
+}
+
+void* Nursery::moveToNextChunkAndAllocate(size_t size) {
+ MOZ_ASSERT(currentEnd() < position() + size);
+
+ unsigned chunkno = currentChunk_ + 1;
+ MOZ_ASSERT(chunkno <= maxChunkCount());
+ MOZ_ASSERT(chunkno <= allocatedChunkCount());
+ if (chunkno == maxChunkCount()) {
+ return nullptr;
+ }
+ if (chunkno == allocatedChunkCount()) {
+ TimeStamp start = TimeStamp::Now();
+ {
+ AutoLockGCBgAlloc lock(gc);
+ if (!allocateNextChunk(chunkno, lock)) {
+ return nullptr;
+ }
+ }
+ timeInChunkAlloc_ += TimeStamp::Now() - start;
+ MOZ_ASSERT(chunkno < allocatedChunkCount());
+ }
+ setCurrentChunk(chunkno);
+ poisonAndInitCurrentChunk();
+
+ // We know there's enough space to allocate now so we can call allocate()
+ // recursively.
+ MOZ_ASSERT(currentEnd() >= position() + size);
+ return allocate(size);
+}
+void* js::Nursery::allocateBuffer(Zone* zone, size_t nbytes) {
+ MOZ_ASSERT(nbytes > 0);
+
+ if (nbytes <= MaxNurseryBufferSize) {
+ void* buffer = allocate(nbytes);
+ if (buffer) {
+ return buffer;
+ }
+ }
+
+ void* buffer = zone->pod_malloc<uint8_t>(nbytes);
+ if (buffer && !registerMallocedBuffer(buffer, nbytes)) {
+ js_free(buffer);
+ return nullptr;
+ }
+ return buffer;
+}
+
+void* js::Nursery::allocateBuffer(Zone* zone, JSObject* obj, size_t nbytes) {
+ MOZ_ASSERT(obj);
+ MOZ_ASSERT(nbytes > 0);
+
+ if (!IsInsideNursery(obj)) {
+ return zone->pod_malloc<uint8_t>(nbytes);
+ }
+
+ return allocateBuffer(zone, nbytes);
+}
+
+void* js::Nursery::allocateBufferSameLocation(JSObject* obj, size_t nbytes) {
+ MOZ_ASSERT(obj);
+ MOZ_ASSERT(nbytes > 0);
+ MOZ_ASSERT(nbytes <= MaxNurseryBufferSize);
+
+ if (!IsInsideNursery(obj)) {
+ return obj->zone()->pod_malloc<uint8_t>(nbytes);
+ }
+
+ return allocate(nbytes);
+}
+
+void* js::Nursery::allocateZeroedBuffer(
+ Zone* zone, size_t nbytes, arena_id_t arena /*= js::MallocArena*/) {
+ MOZ_ASSERT(nbytes > 0);
+
+ if (nbytes <= MaxNurseryBufferSize) {
+ void* buffer = allocate(nbytes);
+ if (buffer) {
+ memset(buffer, 0, nbytes);
+ return buffer;
+ }
+ }
+
+ void* buffer = zone->pod_arena_calloc<uint8_t>(arena, nbytes);
+ if (buffer && !registerMallocedBuffer(buffer, nbytes)) {
+ js_free(buffer);
+ return nullptr;
+ }
+ return buffer;
+}
+
+void* js::Nursery::allocateZeroedBuffer(
+ JSObject* obj, size_t nbytes, arena_id_t arena /*= js::MallocArena*/) {
+ MOZ_ASSERT(obj);
+ MOZ_ASSERT(nbytes > 0);
+
+ if (!IsInsideNursery(obj)) {
+ return obj->zone()->pod_arena_calloc<uint8_t>(arena, nbytes);
+ }
+ return allocateZeroedBuffer(obj->zone(), nbytes, arena);
+}
+
+void* js::Nursery::reallocateBuffer(Zone* zone, Cell* cell, void* oldBuffer,
+ size_t oldBytes, size_t newBytes) {
+ if (!IsInsideNursery(cell)) {
+ MOZ_ASSERT(!isInside(oldBuffer));
+ return zone->pod_realloc<uint8_t>((uint8_t*)oldBuffer, oldBytes, newBytes);
+ }
+
+ if (!isInside(oldBuffer)) {
+ MOZ_ASSERT(mallocedBufferBytes >= oldBytes);
+ void* newBuffer =
+ zone->pod_realloc<uint8_t>((uint8_t*)oldBuffer, oldBytes, newBytes);
+ if (newBuffer) {
+ if (oldBuffer != newBuffer) {
+ MOZ_ALWAYS_TRUE(
+ mallocedBuffers.rekeyAs(oldBuffer, newBuffer, newBuffer));
+ }
+ mallocedBufferBytes -= oldBytes;
+ mallocedBufferBytes += newBytes;
+ }
+ return newBuffer;
+ }
+
+ // The nursery cannot make use of the returned slots data.
+ if (newBytes < oldBytes) {
+ return oldBuffer;
+ }
+
+ void* newBuffer = allocateBuffer(zone, newBytes);
+ if (newBuffer) {
+ PodCopy((uint8_t*)newBuffer, (uint8_t*)oldBuffer, oldBytes);
+ }
+ return newBuffer;
+}
+
+void* js::Nursery::allocateBuffer(JS::BigInt* bi, size_t nbytes) {
+ MOZ_ASSERT(bi);
+ MOZ_ASSERT(nbytes > 0);
+
+ if (!IsInsideNursery(bi)) {
+ return bi->zone()->pod_malloc<uint8_t>(nbytes);
+ }
+ return allocateBuffer(bi->zone(), nbytes);
+}
+
+void js::Nursery::freeBuffer(void* buffer, size_t nbytes) {
+ if (!isInside(buffer)) {
+ removeMallocedBuffer(buffer, nbytes);
+ js_free(buffer);
+ }
+}
+
+#ifdef DEBUG
+/* static */
+inline bool Nursery::checkForwardingPointerLocation(void* ptr,
+ bool expectedInside) {
+ if (isInside(ptr) == expectedInside) {
+ return true;
+ }
+
+ // If a zero-capacity elements header lands right at the end of a chunk then
+ // elements data will appear to be in the next chunk. If we have a pointer to
+ // the very start of a chunk, check the previous chunk.
+ if ((uintptr_t(ptr) & ChunkMask) == 0 &&
+ isInside(reinterpret_cast<uint8_t*>(ptr) - 1) == expectedInside) {
+ return true;
+ }
+
+ return false;
+}
+#endif
+
+void Nursery::setIndirectForwardingPointer(void* oldData, void* newData) {
+ MOZ_ASSERT(checkForwardingPointerLocation(oldData, true));
+ MOZ_ASSERT(checkForwardingPointerLocation(newData, false));
+
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+#ifdef DEBUG
+ if (ForwardedBufferMap::Ptr p = forwardedBuffers.lookup(oldData)) {
+ MOZ_ASSERT(p->value() == newData);
+ }
+#endif
+ if (!forwardedBuffers.put(oldData, newData)) {
+ oomUnsafe.crash("Nursery::setForwardingPointer");
+ }
+}
+
+#ifdef DEBUG
+static bool IsWriteableAddress(void* ptr) {
+ auto* vPtr = reinterpret_cast<volatile uint64_t*>(ptr);
+ *vPtr = *vPtr;
+ return true;
+}
+#endif
+
+void js::Nursery::forwardBufferPointer(uintptr_t* pSlotsElems) {
+ // Read the current pointer value which may be one of:
+ // - Non-nursery pointer
+ // - Nursery-allocated buffer
+ // - A BufferRelocationOverlay inside the nursery
+ //
+ // Note: The buffer has already be relocated. We are just patching stale
+ // pointers now.
+ auto* buffer = reinterpret_cast<void*>(*pSlotsElems);
+
+ if (!isInside(buffer)) {
+ return;
+ }
+
+ // The new location for this buffer is either stored inline with it or in
+ // the forwardedBuffers table.
+ if (ForwardedBufferMap::Ptr p = forwardedBuffers.lookup(buffer)) {
+ buffer = p->value();
+ // It's not valid to assert IsWriteableAddress for indirect forwarding
+ // pointers because the size of the allocation could be less than a word.
+ } else {
+ BufferRelocationOverlay* reloc =
+ static_cast<BufferRelocationOverlay*>(buffer);
+ buffer = *reloc;
+ MOZ_ASSERT(IsWriteableAddress(buffer));
+ }
+
+ MOZ_ASSERT(!isInside(buffer));
+ *pSlotsElems = reinterpret_cast<uintptr_t>(buffer);
+}
+
+inline double js::Nursery::calcPromotionRate(bool* validForTenuring) const {
+ MOZ_ASSERT(validForTenuring);
+
+ if (previousGC.nurseryUsedBytes == 0) {
+ *validForTenuring = false;
+ return 0.0;
+ }
+
+ double used = double(previousGC.nurseryUsedBytes);
+ double capacity = double(previousGC.nurseryCapacity);
+ double tenured = double(previousGC.tenuredBytes);
+
+ // We should only use the promotion rate to make tenuring decisions if it's
+ // likely to be valid. The criterion we use is that the nursery was at least
+ // 90% full.
+ *validForTenuring = used > capacity * 0.9;
+
+ return tenured / used;
+}
+
+void js::Nursery::renderProfileJSON(JSONPrinter& json) const {
+ if (!isEnabled()) {
+ json.beginObject();
+ json.property("status", "nursery disabled");
+ json.endObject();
+ return;
+ }
+
+ if (previousGC.reason == JS::GCReason::NO_REASON) {
+ // If the nursery was empty when the last minorGC was requested, then
+ // no nursery collection will have been performed but JSON may still be
+ // requested. (And as a public API, this function should not crash in
+ // such a case.)
+ json.beginObject();
+ json.property("status", "nursery empty");
+ json.endObject();
+ return;
+ }
+
+ json.beginObject();
+
+ json.property("status", "complete");
+
+ json.property("reason", JS::ExplainGCReason(previousGC.reason));
+ json.property("bytes_tenured", previousGC.tenuredBytes);
+ json.property("cells_tenured", previousGC.tenuredCells);
+ json.property("strings_tenured",
+ stats().getStat(gcstats::STAT_STRINGS_TENURED));
+ json.property("strings_deduplicated",
+ stats().getStat(gcstats::STAT_STRINGS_DEDUPLICATED));
+ json.property("bigints_tenured",
+ stats().getStat(gcstats::STAT_BIGINTS_TENURED));
+ json.property("bytes_used", previousGC.nurseryUsedBytes);
+ json.property("cur_capacity", previousGC.nurseryCapacity);
+ const size_t newCapacity = capacity();
+ if (newCapacity != previousGC.nurseryCapacity) {
+ json.property("new_capacity", newCapacity);
+ }
+ if (previousGC.nurseryCommitted != previousGC.nurseryCapacity) {
+ json.property("lazy_capacity", previousGC.nurseryCommitted);
+ }
+ if (!timeInChunkAlloc_.IsZero()) {
+ json.property("chunk_alloc_us", timeInChunkAlloc_, json.MICROSECONDS);
+ }
+
+ // These counters only contain consistent data if the profiler is enabled,
+ // and then there's no guarentee.
+ if (runtime()->geckoProfiler().enabled()) {
+ json.property("cells_allocated_nursery",
+ pretenuringNursery.totalAllocCount());
+ json.property("cells_allocated_tenured",
+ stats().allocsSinceMinorGCTenured());
+ }
+
+ json.beginObjectProperty("phase_times");
+
+#define EXTRACT_NAME(name, text) #name,
+ static const char* const names[] = {
+ FOR_EACH_NURSERY_PROFILE_TIME(EXTRACT_NAME)
+#undef EXTRACT_NAME
+ ""};
+
+ size_t i = 0;
+ for (auto time : profileDurations_) {
+ json.property(names[i++], time, json.MICROSECONDS);
+ }
+
+ json.endObject(); // timings value
+
+ json.endObject();
+}
+
+// The following macros define nursery GC profile metadata fields that are
+// printed before the timing information defined by
+// FOR_EACH_NURSERY_PROFILE_TIME.
+
+#define FOR_EACH_NURSERY_PROFILE_COMMON_METADATA(_) \
+ _("PID", 7, "%7zu", pid) \
+ _("Runtime", 14, "0x%12p", runtime)
+
+#define FOR_EACH_NURSERY_PROFILE_SLICE_METADATA(_) \
+ _("Timestamp", 10, "%10.6f", timestamp.ToSeconds()) \
+ _("Reason", 20, "%-20.20s", reasonStr) \
+ _("PRate", 6, "%5.1f%%", promotionRatePercent) \
+ _("OldKB", 6, "%6zu", oldSizeKB) \
+ _("NewKB", 6, "%6zu", newSizeKB) \
+ _("Dedup", 6, "%6zu", dedupCount)
+
+#define FOR_EACH_NURSERY_PROFILE_METADATA(_) \
+ FOR_EACH_NURSERY_PROFILE_COMMON_METADATA(_) \
+ FOR_EACH_NURSERY_PROFILE_SLICE_METADATA(_)
+
+void js::Nursery::printCollectionProfile(JS::GCReason reason,
+ double promotionRate) {
+ stats().maybePrintProfileHeaders();
+
+ Sprinter sprinter;
+ if (!sprinter.init() || !sprinter.put(gcstats::MinorGCProfilePrefix)) {
+ return;
+ }
+
+ size_t pid = getpid();
+ JSRuntime* runtime = gc->rt;
+ TimeDuration timestamp = collectionStartTime() - stats().creationTime();
+ const char* reasonStr = ExplainGCReason(reason);
+ double promotionRatePercent = promotionRate * 100;
+ size_t oldSizeKB = previousGC.nurseryCapacity / 1024;
+ size_t newSizeKB = capacity() / 1024;
+ size_t dedupCount = stats().getStat(gcstats::STAT_STRINGS_DEDUPLICATED);
+
+#define PRINT_FIELD_VALUE(_1, _2, format, value) \
+ if (!sprinter.jsprintf(" " format, value)) { \
+ return; \
+ }
+ FOR_EACH_NURSERY_PROFILE_METADATA(PRINT_FIELD_VALUE)
+#undef PRINT_FIELD_VALUE
+
+ printProfileDurations(profileDurations_, sprinter);
+
+ fputs(sprinter.string(), stats().profileFile());
+}
+
+void js::Nursery::printProfileHeader() {
+ Sprinter sprinter;
+ if (!sprinter.init() || !sprinter.put(gcstats::MinorGCProfilePrefix)) {
+ return;
+ }
+
+#define PRINT_FIELD_NAME(name, width, _1, _2) \
+ if (!sprinter.jsprintf(" %-*s", width, name)) { \
+ return; \
+ }
+ FOR_EACH_NURSERY_PROFILE_METADATA(PRINT_FIELD_NAME)
+#undef PRINT_FIELD_NAME
+
+#define PRINT_PROFILE_NAME(_1, text) \
+ if (!sprinter.jsprintf(" %-6.6s", text)) { \
+ return; \
+ }
+ FOR_EACH_NURSERY_PROFILE_TIME(PRINT_PROFILE_NAME)
+#undef PRINT_PROFILE_NAME
+
+ if (!sprinter.put("\n")) {
+ return;
+ }
+
+ fputs(sprinter.string(), stats().profileFile());
+}
+
+// static
+bool js::Nursery::printProfileDurations(const ProfileDurations& times,
+ Sprinter& sprinter) {
+ for (auto time : times) {
+ int64_t micros = int64_t(time.ToMicroseconds());
+ if (!sprinter.jsprintf(" %6" PRIi64, micros)) {
+ return false;
+ }
+ }
+
+ return sprinter.put("\n");
+}
+
+static constexpr size_t NurserySliceMetadataFormatWidth() {
+ size_t fieldCount = 0;
+ size_t totalWidth = 0;
+
+#define UPDATE_COUNT_AND_WIDTH(_1, width, _2, _3) \
+ fieldCount++; \
+ totalWidth += width;
+ FOR_EACH_NURSERY_PROFILE_SLICE_METADATA(UPDATE_COUNT_AND_WIDTH)
+#undef UPDATE_COUNT_AND_WIDTH
+
+ // Add padding between fields.
+ totalWidth += fieldCount - 1;
+
+ return totalWidth;
+}
+
+void js::Nursery::printTotalProfileTimes() {
+ if (!enableProfiling_) {
+ return;
+ }
+
+ Sprinter sprinter;
+ if (!sprinter.init() || !sprinter.put(gcstats::MinorGCProfilePrefix)) {
+ return;
+ }
+
+ size_t pid = getpid();
+ JSRuntime* runtime = gc->rt;
+
+ char collections[32];
+ DebugOnly<int> r = SprintfLiteral(
+ collections, "TOTALS: %7" PRIu64 " collections:", gc->minorGCCount());
+ MOZ_ASSERT(r > 0 && r < int(sizeof(collections)));
+
+#define PRINT_FIELD_VALUE(_1, _2, format, value) \
+ if (!sprinter.jsprintf(" " format, value)) { \
+ return; \
+ }
+ FOR_EACH_NURSERY_PROFILE_COMMON_METADATA(PRINT_FIELD_VALUE)
+#undef PRINT_FIELD_VALUE
+
+ // Use whole width of per-slice metadata to print total slices so the profile
+ // totals that follow line up.
+ size_t width = NurserySliceMetadataFormatWidth();
+ if (!sprinter.jsprintf(" %-*s", int(width), collections)) {
+ return;
+ }
+
+ if (!printProfileDurations(totalDurations_, sprinter)) {
+ return;
+ }
+
+ fputs(sprinter.string(), stats().profileFile());
+}
+
+void js::Nursery::maybeClearProfileDurations() {
+ for (auto& duration : profileDurations_) {
+ duration = mozilla::TimeDuration();
+ }
+}
+
+inline void js::Nursery::startProfile(ProfileKey key) {
+ startTimes_[key] = TimeStamp::Now();
+}
+
+inline void js::Nursery::endProfile(ProfileKey key) {
+ profileDurations_[key] = TimeStamp::Now() - startTimes_[key];
+ totalDurations_[key] += profileDurations_[key];
+}
+
+inline TimeStamp js::Nursery::collectionStartTime() const {
+ return startTimes_[ProfileKey::Total];
+}
+
+inline TimeStamp js::Nursery::lastCollectionEndTime() const {
+ return previousGC.endTime;
+}
+
+bool js::Nursery::shouldCollect() const {
+ if (!isEnabled()) {
+ return false;
+ }
+
+ if (isEmpty() && capacity() == tunables().gcMinNurseryBytes()) {
+ return false;
+ }
+
+ if (minorGCRequested()) {
+ return true;
+ }
+
+ // Eagerly collect the nursery in idle time if it's nearly full.
+ if (isNearlyFull()) {
+ return true;
+ }
+
+ // If the nursery is not being collected often then it may be taking up more
+ // space than necessary.
+ return isUnderused();
+}
+
+inline bool js::Nursery::isNearlyFull() const {
+ bool belowBytesThreshold =
+ freeSpace() < tunables().nurseryFreeThresholdForIdleCollection();
+ bool belowFractionThreshold =
+ double(freeSpace()) / double(capacity()) <
+ tunables().nurseryFreeThresholdForIdleCollectionFraction();
+
+ // We want to use belowBytesThreshold when the nursery is sufficiently large,
+ // and belowFractionThreshold when it's small.
+ //
+ // When the nursery is small then belowBytesThreshold is a lower threshold
+ // (triggered earlier) than belowFractionThreshold. So if the fraction
+ // threshold is true, the bytes one will be true also. The opposite is true
+ // when the nursery is large.
+ //
+ // Therefore, by the time we cross the threshold we care about, we've already
+ // crossed the other one, and we can boolean AND to use either condition
+ // without encoding any "is the nursery big/small" test/threshold. The point
+ // at which they cross is when the nursery is: BytesThreshold /
+ // FractionThreshold large.
+ //
+ // With defaults that's:
+ //
+ // 1MB = 256KB / 0.25
+ //
+ return belowBytesThreshold && belowFractionThreshold;
+}
+
+inline bool js::Nursery::isUnderused() const {
+ if (js::SupportDifferentialTesting() || !previousGC.endTime) {
+ return false;
+ }
+
+ if (capacity() == tunables().gcMinNurseryBytes()) {
+ return false;
+ }
+
+ // If the nursery is above its minimum size, collect it every so often if we
+ // have idle time. This allows the nursery to shrink when it's not being
+ // used. There are other heuristics we could use for this, but this is the
+ // simplest.
+ TimeDuration timeSinceLastCollection = TimeStamp::Now() - previousGC.endTime;
+ return timeSinceLastCollection > tunables().nurseryTimeoutForIdleCollection();
+}
+
+// typeReason is the gcReason for specified type, for example,
+// FULL_CELL_PTR_OBJ_BUFFER is the gcReason for JSObject.
+static inline bool IsFullStoreBufferReason(JS::GCReason reason,
+ JS::GCReason typeReason) {
+ return reason == typeReason ||
+ reason == JS::GCReason::FULL_WHOLE_CELL_BUFFER ||
+ reason == JS::GCReason::FULL_GENERIC_BUFFER ||
+ reason == JS::GCReason::FULL_VALUE_BUFFER ||
+ reason == JS::GCReason::FULL_SLOT_BUFFER ||
+ reason == JS::GCReason::FULL_SHAPE_BUFFER;
+}
+
+void js::Nursery::collect(JS::GCOptions options, JS::GCReason reason) {
+ JSRuntime* rt = runtime();
+ MOZ_ASSERT(!rt->mainContextFromOwnThread()->suppressGC);
+
+ if (!isEnabled() || isEmpty()) {
+ // Our barriers are not always exact, and there may be entries in the
+ // storebuffer even when the nursery is disabled or empty. It's not safe
+ // to keep these entries as they may refer to tenured cells which may be
+ // freed after this point.
+ gc->storeBuffer().clear();
+
+ MOZ_ASSERT(!pretenuringNursery.hasAllocatedSites());
+ }
+
+ if (!isEnabled()) {
+ return;
+ }
+
+ AutoGCSession session(gc, JS::HeapState::MinorCollecting);
+
+ stats().beginNurseryCollection(reason);
+ gcprobes::MinorGCStart();
+
+ maybeClearProfileDurations();
+ startProfile(ProfileKey::Total);
+
+ previousGC.reason = JS::GCReason::NO_REASON;
+ previousGC.nurseryUsedBytes = usedSpace();
+ previousGC.nurseryCapacity = capacity();
+ previousGC.nurseryCommitted = committed();
+ previousGC.nurseryUsedChunkCount = currentChunk_ + 1;
+ previousGC.tenuredBytes = 0;
+ previousGC.tenuredCells = 0;
+
+ // If it isn't empty, it will call doCollection, and possibly after that
+ // isEmpty() will become true, so use another variable to keep track of the
+ // old empty state.
+ bool wasEmpty = isEmpty();
+ if (!wasEmpty) {
+ CollectionResult result = doCollection(session, options, reason);
+ // Don't include chunk headers when calculating nursery space, since this
+ // space does not represent data that can be tenured
+ MOZ_ASSERT(result.tenuredBytes <=
+ (previousGC.nurseryUsedBytes -
+ (sizeof(ChunkBase) * previousGC.nurseryUsedChunkCount)));
+
+ previousGC.reason = reason;
+ previousGC.tenuredBytes = result.tenuredBytes;
+ previousGC.tenuredCells = result.tenuredCells;
+ previousGC.nurseryUsedChunkCount = currentChunk_ + 1;
+ }
+
+ // Resize the nursery.
+ maybeResizeNursery(options, reason);
+
+ // Poison/initialise the first chunk.
+ if (previousGC.nurseryUsedBytes) {
+ // In most cases Nursery::clear() has not poisoned this chunk or marked it
+ // as NoAccess; so we only need to poison the region used during the last
+ // cycle. Also, if the heap was recently expanded we don't want to
+ // re-poison the new memory. In both cases we only need to poison until
+ // previousGC.nurseryUsedBytes.
+ //
+ // In cases where this is not true, like generational zeal mode or subchunk
+ // mode, poisonAndInitCurrentChunk() will ignore its parameter. It will
+ // also clamp the parameter.
+ poisonAndInitCurrentChunk(previousGC.nurseryUsedBytes);
+ }
+
+ bool validPromotionRate;
+ const double promotionRate = calcPromotionRate(&validPromotionRate);
+
+ startProfile(ProfileKey::Pretenure);
+ size_t sitesPretenured = 0;
+ if (!wasEmpty) {
+ sitesPretenured =
+ doPretenuring(rt, reason, validPromotionRate, promotionRate);
+ }
+ endProfile(ProfileKey::Pretenure);
+
+ // We ignore gcMaxBytes when allocating for minor collection. However, if we
+ // overflowed, we disable the nursery. The next time we allocate, we'll fail
+ // because bytes >= gcMaxBytes.
+ if (gc->heapSize.bytes() >= tunables().gcMaxBytes()) {
+ disable();
+ }
+
+ previousGC.endTime =
+ TimeStamp::Now(); // Must happen after maybeResizeNursery.
+ endProfile(ProfileKey::Total);
+ gc->incMinorGcNumber();
+
+ TimeDuration totalTime = profileDurations_[ProfileKey::Total];
+ sendTelemetry(reason, totalTime, wasEmpty, promotionRate, sitesPretenured);
+
+ stats().endNurseryCollection(reason); // Calls GCNurseryCollectionCallback.
+ gcprobes::MinorGCEnd();
+
+ timeInChunkAlloc_ = mozilla::TimeDuration();
+
+ js::StringStats prevStats = gc->stringStats;
+ js::StringStats& currStats = gc->stringStats;
+ currStats = js::StringStats();
+ for (ZonesIter zone(gc, WithAtoms); !zone.done(); zone.next()) {
+ currStats += zone->stringStats;
+ zone->previousGCStringStats = zone->stringStats;
+ }
+ stats().setStat(
+ gcstats::STAT_STRINGS_DEDUPLICATED,
+ currStats.deduplicatedStrings - prevStats.deduplicatedStrings);
+ if (ShouldPrintProfile(runtime(), enableProfiling_, profileWorkers_,
+ profileThreshold_, totalTime)) {
+ printCollectionProfile(reason, promotionRate);
+ }
+
+ if (reportDeduplications_) {
+ printDeduplicationData(prevStats, currStats);
+ }
+}
+
+void js::Nursery::sendTelemetry(JS::GCReason reason, TimeDuration totalTime,
+ bool wasEmpty, double promotionRate,
+ size_t sitesPretenured) {
+ JSRuntime* rt = runtime();
+ rt->metrics().GC_MINOR_REASON(uint32_t(reason));
+
+ // Long minor GCs are those that take more than 1ms.
+ bool wasLongMinorGC = totalTime.ToMilliseconds() > 1.0;
+ if (wasLongMinorGC) {
+ rt->metrics().GC_MINOR_REASON_LONG(uint32_t(reason));
+ }
+ rt->metrics().GC_MINOR_US(totalTime);
+ rt->metrics().GC_NURSERY_BYTES_2(committed());
+
+ if (!wasEmpty) {
+ rt->metrics().GC_PRETENURE_COUNT_2(sitesPretenured);
+ rt->metrics().GC_NURSERY_PROMOTION_RATE(promotionRate * 100);
+ }
+}
+
+void js::Nursery::printDeduplicationData(js::StringStats& prev,
+ js::StringStats& curr) {
+ if (curr.deduplicatedStrings > prev.deduplicatedStrings) {
+ fprintf(stderr,
+ "pid %zu: deduplicated %" PRIi64 " strings, %" PRIu64
+ " chars, %" PRIu64 " malloc bytes\n",
+ size_t(getpid()),
+ curr.deduplicatedStrings - prev.deduplicatedStrings,
+ curr.deduplicatedChars - prev.deduplicatedChars,
+ curr.deduplicatedBytes - prev.deduplicatedBytes);
+ }
+}
+
+void js::Nursery::freeTrailerBlocks(void) {
+ // This routine frees those blocks denoted by the set
+ //
+ // trailersAdded_ (all of it)
+ // - trailersRemoved_ (entries with index below trailersRemovedUsed_)
+ //
+ // For each block, places it back on the nursery's small-malloced-block pool
+ // by calling mallocedBlockCache_.free.
+
+ MOZ_ASSERT(trailersAdded_.length() == trailersRemoved_.length());
+ MOZ_ASSERT(trailersRemovedUsed_ <= trailersRemoved_.length());
+
+ // Sort the removed entries.
+ std::sort(trailersRemoved_.begin(),
+ trailersRemoved_.begin() + trailersRemovedUsed_,
+ [](const void* block1, const void* block2) {
+ return uintptr_t(block1) < uintptr_t(block2);
+ });
+
+ // Use one of two schemes to enumerate the set subtraction.
+ if (trailersRemovedUsed_ < 1000) {
+ // If the number of removed items is relatively small, it isn't worth the
+ // cost of sorting `trailersAdded_`. Instead, walk through the vector in
+ // whatever order it is and use binary search to establish whether each
+ // item is present in trailersRemoved_[0 .. trailersRemovedUsed_ - 1].
+ const size_t nAdded = trailersAdded_.length();
+ for (size_t i = 0; i < nAdded; i++) {
+ const PointerAndUint7 block = trailersAdded_[i];
+ const void* blockPointer = block.pointer();
+ if (!std::binary_search(trailersRemoved_.begin(),
+ trailersRemoved_.begin() + trailersRemovedUsed_,
+ blockPointer)) {
+ mallocedBlockCache_.free(block);
+ }
+ }
+ } else {
+ // The general case, which is algorithmically safer for large inputs.
+ // Sort the added entries, and then walk through both them and the removed
+ // entries in lockstep.
+ std::sort(trailersAdded_.begin(), trailersAdded_.end(),
+ [](const PointerAndUint7& block1, const PointerAndUint7& block2) {
+ return uintptr_t(block1.pointer()) <
+ uintptr_t(block2.pointer());
+ });
+ // Enumerate the set subtraction. This is somewhat simplified by the fact
+ // that all elements of the removed set must also be present in the added
+ // set. (the "inclusion property").
+ const size_t nAdded = trailersAdded_.length();
+ const size_t nRemoved = trailersRemovedUsed_;
+ size_t iAdded;
+ size_t iRemoved = 0;
+ for (iAdded = 0; iAdded < nAdded; iAdded++) {
+ if (iRemoved == nRemoved) {
+ // We've run out of items to skip, so move on to the next loop.
+ break;
+ }
+ const PointerAndUint7 blockAdded = trailersAdded_[iAdded];
+ const void* blockRemoved = trailersRemoved_[iRemoved];
+ if (blockAdded.pointer() < blockRemoved) {
+ mallocedBlockCache_.free(blockAdded);
+ continue;
+ }
+ // If this doesn't hold
+ // (that is, if `blockAdded.pointer() > blockRemoved`),
+ // then the abovementioned inclusion property doesn't hold.
+ MOZ_RELEASE_ASSERT(blockAdded.pointer() == blockRemoved);
+ iRemoved++;
+ }
+ MOZ_ASSERT(iRemoved == nRemoved);
+ // We've used up the removed set, so now finish up the remainder of the
+ // added set.
+ for (/*keep going*/; iAdded < nAdded; iAdded++) {
+ const PointerAndUint7 block = trailersAdded_[iAdded];
+ mallocedBlockCache_.free(block);
+ }
+ }
+
+ // And empty out both sets, but preserve the underlying storage.
+ trailersAdded_.clear();
+ trailersRemoved_.clear();
+ trailersRemovedUsed_ = 0;
+ trailerBytes_ = 0;
+
+ // Discard blocks from the cache at 0.05% per megabyte of nursery capacity,
+ // that is, 0.8% of blocks for a 16-megabyte nursery. This allows the cache
+ // to gradually discard unneeded blocks in long running applications.
+ mallocedBlockCache_.preen(0.05 * float(capacity() / (1024 * 1024)));
+}
+
+size_t Nursery::sizeOfTrailerBlockSets(
+ mozilla::MallocSizeOf mallocSizeOf) const {
+ return trailersAdded_.sizeOfExcludingThis(mallocSizeOf) +
+ trailersRemoved_.sizeOfExcludingThis(mallocSizeOf);
+}
+
+js::Nursery::CollectionResult js::Nursery::doCollection(AutoGCSession& session,
+ JS::GCOptions options,
+ JS::GCReason reason) {
+ JSRuntime* rt = runtime();
+ AutoSetThreadIsPerformingGC performingGC(rt->gcContext());
+ AutoStopVerifyingBarriers av(rt, false);
+ AutoDisableProxyCheck disableStrictProxyChecking;
+ mozilla::DebugOnly<AutoEnterOOMUnsafeRegion> oomUnsafeRegion;
+
+ // Move objects pointed to by roots from the nursery to the major heap.
+ TenuringTracer mover(rt, this);
+
+ // Trace everything considered as a root by a minor GC.
+ traceRoots(session, mover);
+
+ startProfile(ProfileKey::SweepCaches);
+ gc->purgeRuntimeForMinorGC();
+ endProfile(ProfileKey::SweepCaches);
+
+ // Most of the work is done here. This loop iterates over objects that have
+ // been moved to the major heap. If these objects have any outgoing pointers
+ // to the nursery, then those nursery objects get moved as well, until no
+ // objects are left to move. That is, we iterate to a fixed point.
+ startProfile(ProfileKey::CollectToObjFP);
+ mover.collectToObjectFixedPoint();
+ endProfile(ProfileKey::CollectToObjFP);
+
+ startProfile(ProfileKey::CollectToStrFP);
+ mover.collectToStringFixedPoint();
+ endProfile(ProfileKey::CollectToStrFP);
+
+ // Sweep to update any pointers to nursery objects that have now been
+ // tenured.
+ startProfile(ProfileKey::Sweep);
+ sweep();
+ endProfile(ProfileKey::Sweep);
+
+ // Update any slot or element pointers whose destination has been tenured.
+ startProfile(ProfileKey::UpdateJitActivations);
+ js::jit::UpdateJitActivationsForMinorGC(rt);
+ forwardedBuffers.clearAndCompact();
+ endProfile(ProfileKey::UpdateJitActivations);
+
+ startProfile(ProfileKey::ObjectsTenuredCallback);
+ gc->callObjectsTenuredCallback();
+ endProfile(ProfileKey::ObjectsTenuredCallback);
+
+ // Sweep.
+ startProfile(ProfileKey::FreeMallocedBuffers);
+ gc->queueBuffersForFreeAfterMinorGC(mallocedBuffers);
+ mallocedBufferBytes = 0;
+ endProfile(ProfileKey::FreeMallocedBuffers);
+
+ // Give trailer blocks associated with non-tenured Wasm{Struct,Array}Objects
+ // back to our `mallocedBlockCache_`.
+ startProfile(ProfileKey::FreeTrailerBlocks);
+ freeTrailerBlocks();
+ if (options == JS::GCOptions::Shrink || gc::IsOOMReason(reason)) {
+ mallocedBlockCache_.clear();
+ }
+ endProfile(ProfileKey::FreeTrailerBlocks);
+
+ startProfile(ProfileKey::ClearNursery);
+ clear();
+ endProfile(ProfileKey::ClearNursery);
+
+ startProfile(ProfileKey::ClearStoreBuffer);
+ gc->storeBuffer().clear();
+ endProfile(ProfileKey::ClearStoreBuffer);
+
+ // Purge the StringToAtomCache. This has to happen at the end because the
+ // cache is used when tenuring strings.
+ startProfile(ProfileKey::PurgeStringToAtomCache);
+ runtime()->caches().stringToAtomCache.purge();
+ endProfile(ProfileKey::PurgeStringToAtomCache);
+
+ // Make sure hashtables have been updated after the collection.
+ startProfile(ProfileKey::CheckHashTables);
+#ifdef JS_GC_ZEAL
+ if (gc->hasZealMode(ZealMode::CheckHashTablesOnMinorGC)) {
+ runtime()->caches().checkEvalCacheAfterMinorGC();
+ gc->checkHashTablesAfterMovingGC();
+ }
+#endif
+ endProfile(ProfileKey::CheckHashTables);
+
+ return {mover.getTenuredSize(), mover.getTenuredCells()};
+}
+
+void js::Nursery::traceRoots(AutoGCSession& session, TenuringTracer& mover) {
+ {
+ // Suppress the sampling profiler to prevent it observing moved functions.
+ AutoSuppressProfilerSampling suppressProfiler(
+ runtime()->mainContextFromOwnThread());
+
+ // Trace the store buffer. This must happen first.
+ StoreBuffer& sb = gc->storeBuffer();
+
+ // Strings in the whole cell buffer must be traced first, in order to mark
+ // tenured dependent strings' bases as non-deduplicatable. The rest of
+ // nursery collection (whole non-string cells, edges, etc.) can happen
+ // later.
+ startProfile(ProfileKey::TraceWholeCells);
+ sb.traceWholeCells(mover);
+ endProfile(ProfileKey::TraceWholeCells);
+
+ startProfile(ProfileKey::TraceValues);
+ sb.traceValues(mover);
+ endProfile(ProfileKey::TraceValues);
+
+ startProfile(ProfileKey::TraceCells);
+ sb.traceCells(mover);
+ endProfile(ProfileKey::TraceCells);
+
+ startProfile(ProfileKey::TraceSlots);
+ sb.traceSlots(mover);
+ endProfile(ProfileKey::TraceSlots);
+
+ startProfile(ProfileKey::TraceGenericEntries);
+ sb.traceGenericEntries(&mover);
+ endProfile(ProfileKey::TraceGenericEntries);
+
+ startProfile(ProfileKey::MarkRuntime);
+ gc->traceRuntimeForMinorGC(&mover, session);
+ endProfile(ProfileKey::MarkRuntime);
+ }
+
+ startProfile(ProfileKey::MarkDebugger);
+ {
+ gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::MARK_ROOTS);
+ DebugAPI::traceAllForMovingGC(&mover);
+ }
+ endProfile(ProfileKey::MarkDebugger);
+}
+
+size_t js::Nursery::doPretenuring(JSRuntime* rt, JS::GCReason reason,
+ bool validPromotionRate,
+ double promotionRate) {
+ size_t sitesPretenured = pretenuringNursery.doPretenuring(
+ gc, reason, validPromotionRate, promotionRate, reportPretenuring_,
+ reportPretenuringThreshold_);
+
+ bool highPromotionRate =
+ validPromotionRate && promotionRate > tunables().pretenureThreshold();
+
+ bool pretenureStr = false;
+ bool pretenureBigInt = false;
+ if (tunables().attemptPretenuring()) {
+ // Should we check for pretenuring regardless of GCReason?
+ // Use 3MB as the threshold so the pretenuring can be applied on Android.
+ bool pretenureAll =
+ highPromotionRate && previousGC.nurseryUsedBytes >= 3 * 1024 * 1024;
+
+ pretenureStr =
+ pretenureAll ||
+ IsFullStoreBufferReason(reason, JS::GCReason::FULL_CELL_PTR_STR_BUFFER);
+ pretenureBigInt =
+ pretenureAll || IsFullStoreBufferReason(
+ reason, JS::GCReason::FULL_CELL_PTR_BIGINT_BUFFER);
+ }
+
+ uint32_t numStringsTenured = 0;
+ uint32_t numBigIntsTenured = 0;
+ for (ZonesIter zone(gc, SkipAtoms); !zone.done(); zone.next()) {
+ // For some tests in JetStream2 and Kraken, the tenuredRate is high but the
+ // number of allocated strings is low. So we calculate the tenuredRate only
+ // if the number of string allocations is enough.
+ uint32_t zoneNurseryStrings =
+ zone->nurseryAllocCount(JS::TraceKind::String);
+ bool allocThreshold = zoneNurseryStrings > 30000;
+ uint64_t zoneTenuredStrings =
+ zone->stringStats.ref().liveNurseryStrings -
+ zone->previousGCStringStats.ref().liveNurseryStrings;
+ double tenuredRate =
+ allocThreshold ? double(zoneTenuredStrings) / double(zoneNurseryStrings)
+ : 0.0;
+ bool disableNurseryStrings =
+ pretenureStr && zone->allocNurseryStrings() &&
+ tenuredRate > tunables().pretenureStringThreshold();
+ bool disableNurseryBigInts = pretenureBigInt &&
+ zone->allocNurseryBigInts() &&
+ zone->tenuredBigInts >= 30 * 1000;
+ if (disableNurseryStrings || disableNurseryBigInts) {
+ if (disableNurseryStrings) {
+ zone->nurseryStringsDisabled = true;
+ }
+ if (disableNurseryBigInts) {
+ zone->nurseryBigIntsDisabled = true;
+ }
+ updateAllocFlagsForZone(zone);
+ }
+ numStringsTenured += zoneTenuredStrings;
+ numBigIntsTenured += zone->tenuredBigInts;
+ zone->tenuredBigInts = 0;
+ }
+ stats().setStat(gcstats::STAT_STRINGS_TENURED, numStringsTenured);
+ stats().setStat(gcstats::STAT_BIGINTS_TENURED, numBigIntsTenured);
+
+ return sitesPretenured;
+}
+
+bool js::Nursery::registerMallocedBuffer(void* buffer, size_t nbytes) {
+ MOZ_ASSERT(buffer);
+ MOZ_ASSERT(nbytes > 0);
+ if (!mallocedBuffers.putNew(buffer)) {
+ return false;
+ }
+
+ mallocedBufferBytes += nbytes;
+ if (MOZ_UNLIKELY(mallocedBufferBytes > capacity() * 8)) {
+ requestMinorGC(JS::GCReason::NURSERY_MALLOC_BUFFERS);
+ }
+
+ return true;
+}
+
+size_t Nursery::sizeOfMallocedBuffers(
+ mozilla::MallocSizeOf mallocSizeOf) const {
+ size_t total = 0;
+ for (BufferSet::Range r = mallocedBuffers.all(); !r.empty(); r.popFront()) {
+ total += mallocSizeOf(r.front());
+ }
+ total += mallocedBuffers.shallowSizeOfExcludingThis(mallocSizeOf);
+ return total;
+}
+
+void js::Nursery::sweep() {
+ // It's important that the context's GCUse is not Finalizing at this point,
+ // otherwise we will miscount memory attached to nursery objects with
+ // CellAllocPolicy.
+ AutoSetThreadIsSweeping setThreadSweeping(runtime()->gcContext());
+
+ MinorSweepingTracer trc(runtime());
+
+ // Sweep unique IDs first before we sweep any tables that may be keyed based
+ // on them.
+ for (Cell* cell : cellsWithUid_) {
+ auto* obj = static_cast<JSObject*>(cell);
+ if (!IsForwarded(obj)) {
+ gc::RemoveUniqueId(obj);
+ } else {
+ JSObject* dst = Forwarded(obj);
+ gc::TransferUniqueId(dst, obj);
+ }
+ }
+ cellsWithUid_.clear();
+
+ for (ZonesIter zone(runtime(), SkipAtoms); !zone.done(); zone.next()) {
+ zone->sweepAfterMinorGC(&trc);
+ }
+
+ sweepMapAndSetObjects();
+
+ runtime()->caches().sweepAfterMinorGC(&trc);
+}
+
+void js::Nursery::clear() {
+ // Poison the nursery contents so touching a freed object will crash.
+ unsigned firstClearChunk;
+ if (gc->hasZealMode(ZealMode::GenerationalGC)) {
+ // Poison all the chunks used in this cycle. The new start chunk is
+ // reposioned in Nursery::collect() but there's no point optimising that in
+ // this case.
+ firstClearChunk = currentStartChunk_;
+ } else {
+ // In normal mode we start at the second chunk, the first one will be used
+ // in the next cycle and poisoned in Nusery::collect();
+ MOZ_ASSERT(currentStartChunk_ == 0);
+ firstClearChunk = 1;
+ }
+ for (unsigned i = firstClearChunk; i < currentChunk_; ++i) {
+ chunk(i).poisonAfterEvict();
+ }
+ // Clear only the used part of the chunk because that's the part we touched,
+ // but only if it's not going to be re-used immediately (>= firstClearChunk).
+ if (currentChunk_ >= firstClearChunk) {
+ chunk(currentChunk_)
+ .poisonAfterEvict(position() - chunk(currentChunk_).start());
+ }
+
+ // Reset the start chunk & position if we're not in this zeal mode, or we're
+ // in it and close to the end of the nursery.
+ MOZ_ASSERT(maxChunkCount() > 0);
+ if (!gc->hasZealMode(ZealMode::GenerationalGC) ||
+ (gc->hasZealMode(ZealMode::GenerationalGC) &&
+ currentChunk_ + 1 == maxChunkCount())) {
+ setCurrentChunk(0);
+ }
+
+ // Set current start position for isEmpty checks.
+ setStartPosition();
+}
+
+size_t js::Nursery::spaceToEnd(unsigned chunkCount) const {
+ if (chunkCount == 0) {
+ return 0;
+ }
+
+ unsigned lastChunk = chunkCount - 1;
+
+ MOZ_ASSERT(lastChunk >= currentStartChunk_);
+ MOZ_ASSERT(currentStartPosition_ - chunk(currentStartChunk_).start() <=
+ NurseryChunkUsableSize);
+
+ size_t bytes;
+
+ if (chunkCount != 1) {
+ // In the general case we have to add:
+ // + the bytes used in the first
+ // chunk which may be less than the total size of a chunk since in some
+ // zeal modes we start the first chunk at some later position
+ // (currentStartPosition_).
+ // + the size of all the other chunks.
+ bytes = (chunk(currentStartChunk_).end() - currentStartPosition_) +
+ ((lastChunk - currentStartChunk_) * ChunkSize);
+ } else {
+ // In sub-chunk mode, but it also works whenever chunkCount == 1, we need to
+ // use currentEnd_ since it may not refer to a full chunk.
+ bytes = currentEnd_ - currentStartPosition_;
+ }
+
+ MOZ_ASSERT(bytes <= maxChunkCount() * ChunkSize);
+
+ return bytes;
+}
+
+MOZ_ALWAYS_INLINE void js::Nursery::setCurrentChunk(unsigned chunkno) {
+ MOZ_ASSERT(chunkno < allocatedChunkCount());
+
+ currentChunk_ = chunkno;
+ position_ = chunk(chunkno).start();
+ setCurrentEnd();
+}
+
+void js::Nursery::poisonAndInitCurrentChunk(size_t extent) {
+ if (gc->hasZealMode(ZealMode::GenerationalGC) || !isSubChunkMode()) {
+ chunk(currentChunk_).poisonAndInit(runtime());
+ } else {
+ extent = std::min(capacity_, extent);
+ chunk(currentChunk_).poisonAndInit(runtime(), extent);
+ }
+}
+
+MOZ_ALWAYS_INLINE void js::Nursery::setCurrentEnd() {
+ MOZ_ASSERT_IF(isSubChunkMode(),
+ currentChunk_ == 0 && currentEnd_ <= chunk(0).end());
+ currentEnd_ =
+ uintptr_t(&chunk(currentChunk_)) + std::min(capacity_, ChunkSize);
+}
+
+bool js::Nursery::allocateNextChunk(const unsigned chunkno,
+ AutoLockGCBgAlloc& lock) {
+ const unsigned priorCount = allocatedChunkCount();
+ const unsigned newCount = priorCount + 1;
+
+ MOZ_ASSERT((chunkno == currentChunk_ + 1) ||
+ (chunkno == 0 && allocatedChunkCount() == 0));
+ MOZ_ASSERT(chunkno == allocatedChunkCount());
+ MOZ_ASSERT(chunkno < HowMany(capacity(), ChunkSize));
+
+ if (!chunks_.resize(newCount)) {
+ return false;
+ }
+
+ TenuredChunk* newChunk;
+ newChunk = gc->getOrAllocChunk(lock);
+ if (!newChunk) {
+ chunks_.shrinkTo(priorCount);
+ return false;
+ }
+
+ chunks_[chunkno] = NurseryChunk::fromChunk(newChunk);
+ return true;
+}
+
+MOZ_ALWAYS_INLINE void js::Nursery::setStartPosition() {
+ currentStartChunk_ = currentChunk_;
+ currentStartPosition_ = position();
+}
+
+void js::Nursery::maybeResizeNursery(JS::GCOptions options,
+ JS::GCReason reason) {
+#ifdef JS_GC_ZEAL
+ // This zeal mode disabled nursery resizing.
+ if (gc->hasZealMode(ZealMode::GenerationalGC)) {
+ return;
+ }
+#endif
+
+ decommitTask->join();
+
+ size_t newCapacity = mozilla::Clamp(targetSize(options, reason),
+ tunables().gcMinNurseryBytes(),
+ tunables().gcMaxNurseryBytes());
+
+ MOZ_ASSERT(roundSize(newCapacity) == newCapacity);
+ MOZ_ASSERT(newCapacity >= SystemPageSize());
+
+ if (newCapacity > capacity()) {
+ growAllocableSpace(newCapacity);
+ } else if (newCapacity < capacity()) {
+ shrinkAllocableSpace(newCapacity);
+ }
+
+ AutoLockHelperThreadState lock;
+ if (!decommitTask->isEmpty(lock)) {
+ decommitTask->startOrRunIfIdle(lock);
+ }
+}
+
+static inline bool ClampDouble(double* value, double min, double max) {
+ MOZ_ASSERT(!std::isnan(*value) && !std::isnan(min) && !std::isnan(max));
+ MOZ_ASSERT(max >= min);
+
+ if (*value <= min) {
+ *value = min;
+ return true;
+ }
+
+ if (*value >= max) {
+ *value = max;
+ return true;
+ }
+
+ return false;
+}
+
+size_t js::Nursery::targetSize(JS::GCOptions options, JS::GCReason reason) {
+ // Shrink the nursery as much as possible if purging was requested or in low
+ // memory situations.
+ if (options == JS::GCOptions::Shrink || gc::IsOOMReason(reason) ||
+ gc->systemHasLowMemory()) {
+ clearRecentGrowthData();
+ return 0;
+ }
+
+ // Don't resize the nursery during shutdown.
+ if (options == JS::GCOptions::Shutdown) {
+ clearRecentGrowthData();
+ return capacity();
+ }
+
+ TimeStamp now = TimeStamp::Now();
+
+ // If the nursery is completely unused then minimise it.
+ if (hasRecentGrowthData && previousGC.nurseryUsedBytes == 0 &&
+ now - lastCollectionEndTime() >
+ tunables().nurseryTimeoutForIdleCollection() &&
+ !js::SupportDifferentialTesting()) {
+ clearRecentGrowthData();
+ return 0;
+ }
+
+ // Calculate the fraction of the nursery promoted out of its entire
+ // capacity. This gives better results than using the promotion rate (based on
+ // the amount of nursery used) in cases where we collect before the nursery is
+ // full.
+ double fractionPromoted =
+ double(previousGC.tenuredBytes) / double(previousGC.nurseryCapacity);
+
+ // Calculate the duty factor, the fraction of time spent collecting the
+ // nursery.
+ double dutyFactor = 0.0;
+ TimeDuration collectorTime = now - collectionStartTime();
+ if (hasRecentGrowthData && !js::SupportDifferentialTesting()) {
+ TimeDuration totalTime = now - lastCollectionEndTime();
+ dutyFactor = collectorTime.ToSeconds() / totalTime.ToSeconds();
+ }
+
+ // Calculate a growth factor to try to achieve target promotion rate and duty
+ // factor goals.
+ static const double PromotionGoal = 0.02;
+ static const double DutyFactorGoal = 0.01;
+ double promotionGrowth = fractionPromoted / PromotionGoal;
+ double dutyGrowth = dutyFactor / DutyFactorGoal;
+ double growthFactor = std::max(promotionGrowth, dutyGrowth);
+
+ // Decrease the growth factor to try to keep collections shorter than a target
+ // maximum time. Don't do this during page load.
+ static const double MaxTimeGoalMs = 4.0;
+ if (!gc->isInPageLoad() && !js::SupportDifferentialTesting()) {
+ double timeGrowth = MaxTimeGoalMs / collectorTime.ToMilliseconds();
+ growthFactor = std::min(growthFactor, timeGrowth);
+ }
+
+ // Limit the range of the growth factor to prevent transient high promotion
+ // rates from affecting the nursery size too far into the future.
+ static const double GrowthRange = 2.0;
+ bool wasClamped = ClampDouble(&growthFactor, 1.0 / GrowthRange, GrowthRange);
+
+ // Calculate the target size based on data from this collection.
+ double target = double(capacity()) * growthFactor;
+
+ // Use exponential smoothing on the target size to take into account data from
+ // recent previous collections.
+ if (hasRecentGrowthData &&
+ now - lastCollectionEndTime() < TimeDuration::FromMilliseconds(200) &&
+ !js::SupportDifferentialTesting()) {
+ // Pay more attention to large changes.
+ double fraction = wasClamped ? 0.5 : 0.25;
+ smoothedTargetSize =
+ (1 - fraction) * smoothedTargetSize + fraction * target;
+ } else {
+ smoothedTargetSize = target;
+ }
+ hasRecentGrowthData = true;
+
+ // Leave size untouched if we are close to the target.
+ static const double GoalWidth = 1.5;
+ growthFactor = smoothedTargetSize / double(capacity());
+ if (growthFactor > (1.0 / GoalWidth) && growthFactor < GoalWidth) {
+ return capacity();
+ }
+
+ return roundSize(size_t(smoothedTargetSize));
+}
+
+void js::Nursery::clearRecentGrowthData() {
+ if (js::SupportDifferentialTesting()) {
+ return;
+ }
+
+ hasRecentGrowthData = false;
+ smoothedTargetSize = 0.0;
+}
+
+/* static */
+size_t js::Nursery::roundSize(size_t size) {
+ size_t step = size >= ChunkSize ? ChunkSize : SystemPageSize();
+ return Round(size, step);
+}
+
+void js::Nursery::growAllocableSpace(size_t newCapacity) {
+ MOZ_ASSERT_IF(!isSubChunkMode(), newCapacity > currentChunk_ * ChunkSize);
+ MOZ_ASSERT(newCapacity <= tunables().gcMaxNurseryBytes());
+ MOZ_ASSERT(newCapacity > capacity());
+
+ if (!decommitTask->reserveSpaceForBytes(newCapacity)) {
+ return;
+ }
+
+ if (isSubChunkMode()) {
+ MOZ_ASSERT(currentChunk_ == 0);
+
+ // The remainder of the chunk may have been decommitted.
+ if (!chunk(0).markPagesInUseHard(std::min(newCapacity, ChunkSize))) {
+ // The OS won't give us the memory we need, we can't grow.
+ return;
+ }
+
+ // The capacity has changed and since we were in sub-chunk mode we need to
+ // update the poison values / asan information for the now-valid region of
+ // this chunk.
+ size_t size = std::min(newCapacity, ChunkSize) - capacity();
+ chunk(0).poisonRange(capacity(), size, JS_FRESH_NURSERY_PATTERN,
+ MemCheckKind::MakeUndefined);
+ }
+
+ capacity_ = newCapacity;
+
+ setCurrentEnd();
+}
+
+void js::Nursery::freeChunksFrom(const unsigned firstFreeChunk) {
+ MOZ_ASSERT(firstFreeChunk < chunks_.length());
+
+ // The loop below may need to skip the first chunk, so we may use this so we
+ // can modify it.
+ unsigned firstChunkToDecommit = firstFreeChunk;
+
+ if ((firstChunkToDecommit == 0) && isSubChunkMode()) {
+ // Part of the first chunk may be hard-decommitted, un-decommit it so that
+ // the GC's normal chunk-handling doesn't segfault.
+ MOZ_ASSERT(currentChunk_ == 0);
+ if (!chunk(0).markPagesInUseHard(ChunkSize)) {
+ // Free the chunk if we can't allocate its pages.
+ UnmapPages(static_cast<void*>(&chunk(0)), ChunkSize);
+ firstChunkToDecommit = 1;
+ }
+ }
+
+ {
+ AutoLockHelperThreadState lock;
+ for (size_t i = firstChunkToDecommit; i < chunks_.length(); i++) {
+ decommitTask->queueChunk(chunks_[i], lock);
+ }
+ }
+
+ chunks_.shrinkTo(firstFreeChunk);
+}
+
+void js::Nursery::shrinkAllocableSpace(size_t newCapacity) {
+#ifdef JS_GC_ZEAL
+ if (gc->hasZealMode(ZealMode::GenerationalGC)) {
+ return;
+ }
+#endif
+
+ // Don't shrink the nursery to zero (use Nursery::disable() instead)
+ // This can't happen due to the rounding-down performed above because of the
+ // clamping in maybeResizeNursery().
+ MOZ_ASSERT(newCapacity != 0);
+ // Don't attempt to shrink it to the same size.
+ if (newCapacity == capacity_) {
+ return;
+ }
+ MOZ_ASSERT(newCapacity < capacity_);
+
+ unsigned newCount = HowMany(newCapacity, ChunkSize);
+ if (newCount < allocatedChunkCount()) {
+ freeChunksFrom(newCount);
+ }
+
+ size_t oldCapacity = capacity_;
+ capacity_ = newCapacity;
+
+ setCurrentEnd();
+
+ if (isSubChunkMode()) {
+ MOZ_ASSERT(currentChunk_ == 0);
+ size_t size = std::min(oldCapacity, ChunkSize) - newCapacity;
+ chunk(0).poisonRange(newCapacity, size, JS_SWEPT_NURSERY_PATTERN,
+ MemCheckKind::MakeNoAccess);
+
+ AutoLockHelperThreadState lock;
+ decommitTask->queueRange(capacity_, chunk(0), lock);
+ }
+}
+
+uintptr_t js::Nursery::currentEnd() const {
+ // These are separate asserts because it can be useful to see which one
+ // failed.
+ MOZ_ASSERT_IF(isSubChunkMode(), currentChunk_ == 0);
+ MOZ_ASSERT_IF(isSubChunkMode(), currentEnd_ <= chunk(currentChunk_).end());
+ MOZ_ASSERT_IF(!isSubChunkMode(), currentEnd_ == chunk(currentChunk_).end());
+ MOZ_ASSERT(currentEnd_ != chunk(currentChunk_).start());
+ return currentEnd_;
+}
+
+gcstats::Statistics& js::Nursery::stats() const { return gc->stats(); }
+
+MOZ_ALWAYS_INLINE const js::gc::GCSchedulingTunables& js::Nursery::tunables()
+ const {
+ return gc->tunables;
+}
+
+bool js::Nursery::isSubChunkMode() const {
+ return capacity() <= NurseryChunkUsableSize;
+}
+
+void js::Nursery::sweepMapAndSetObjects() {
+ auto gcx = runtime()->gcContext();
+
+ for (auto mapobj : mapsWithNurseryMemory_) {
+ MapObject::sweepAfterMinorGC(gcx, mapobj);
+ }
+ mapsWithNurseryMemory_.clearAndFree();
+
+ for (auto setobj : setsWithNurseryMemory_) {
+ SetObject::sweepAfterMinorGC(gcx, setobj);
+ }
+ setsWithNurseryMemory_.clearAndFree();
+}
+
+void js::Nursery::joinDecommitTask() { decommitTask->join(); }
+
+JS_PUBLIC_API void JS::EnableNurseryStrings(JSContext* cx) {
+ AutoEmptyNursery empty(cx);
+ ReleaseAllJITCode(cx->gcContext());
+ cx->runtime()->gc.nursery().enableStrings();
+}
+
+JS_PUBLIC_API void JS::DisableNurseryStrings(JSContext* cx) {
+ AutoEmptyNursery empty(cx);
+ ReleaseAllJITCode(cx->gcContext());
+ cx->runtime()->gc.nursery().disableStrings();
+}
+
+JS_PUBLIC_API void JS::EnableNurseryBigInts(JSContext* cx) {
+ AutoEmptyNursery empty(cx);
+ ReleaseAllJITCode(cx->gcContext());
+ cx->runtime()->gc.nursery().enableBigInts();
+}
+
+JS_PUBLIC_API void JS::DisableNurseryBigInts(JSContext* cx) {
+ AutoEmptyNursery empty(cx);
+ ReleaseAllJITCode(cx->gcContext());
+ cx->runtime()->gc.nursery().disableBigInts();
+}
diff --git a/js/src/gc/Nursery.h b/js/src/gc/Nursery.h
new file mode 100644
index 0000000000..3dde209d77
--- /dev/null
+++ b/js/src/gc/Nursery.h
@@ -0,0 +1,645 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sw=2 et tw=80:
+ *
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef gc_Nursery_h
+#define gc_Nursery_h
+
+#include "mozilla/EnumeratedArray.h"
+#include "mozilla/TimeStamp.h"
+
+#include "gc/Heap.h"
+#include "gc/MallocedBlockCache.h"
+#include "gc/Pretenuring.h"
+#include "js/AllocPolicy.h"
+#include "js/Class.h"
+#include "js/GCAPI.h"
+#include "js/TypeDecls.h"
+#include "js/UniquePtr.h"
+#include "js/Vector.h"
+
+#define FOR_EACH_NURSERY_PROFILE_TIME(_) \
+ /* Key Header text */ \
+ _(Total, "total") \
+ _(TraceValues, "mkVals") \
+ _(TraceCells, "mkClls") \
+ _(TraceSlots, "mkSlts") \
+ _(TraceWholeCells, "mcWCll") \
+ _(TraceGenericEntries, "mkGnrc") \
+ _(CheckHashTables, "ckTbls") \
+ _(MarkRuntime, "mkRntm") \
+ _(MarkDebugger, "mkDbgr") \
+ _(SweepCaches, "swpCch") \
+ _(CollectToObjFP, "colObj") \
+ _(CollectToStrFP, "colStr") \
+ _(ObjectsTenuredCallback, "tenCB") \
+ _(Sweep, "sweep") \
+ _(UpdateJitActivations, "updtIn") \
+ _(FreeMallocedBuffers, "frSlts") \
+ _(FreeTrailerBlocks, "frTrBs") \
+ _(ClearStoreBuffer, "clrSB") \
+ _(ClearNursery, "clear") \
+ _(PurgeStringToAtomCache, "pStoA") \
+ _(Pretenure, "pretnr")
+
+template <typename T>
+class SharedMem;
+
+namespace js {
+
+struct StringStats;
+class AutoLockGCBgAlloc;
+class ObjectElements;
+struct NurseryChunk;
+class HeapSlot;
+class JSONPrinter;
+class MapObject;
+class SetObject;
+class JS_PUBLIC_API Sprinter;
+
+namespace gc {
+class AutoGCSession;
+struct Cell;
+class GCSchedulingTunables;
+class TenuringTracer;
+} // namespace gc
+
+class Nursery {
+ public:
+ explicit Nursery(gc::GCRuntime* gc);
+ ~Nursery();
+
+ [[nodiscard]] bool init(AutoLockGCBgAlloc& lock);
+
+ // Number of allocated (ready to use) chunks.
+ unsigned allocatedChunkCount() const { return chunks_.length(); }
+
+ // Total number of chunks and the capacity of the nursery. Chunks will be
+ // lazilly allocated and added to the chunks array up to this limit, after
+ // that the nursery must be collected, this limit may be raised during
+ // collection.
+ unsigned maxChunkCount() const {
+ MOZ_ASSERT(capacity());
+ return HowMany(capacity(), gc::ChunkSize);
+ }
+
+ void enable();
+ void disable();
+ bool isEnabled() const { return capacity() != 0; }
+
+ void enableStrings();
+ void disableStrings();
+ bool canAllocateStrings() const { return canAllocateStrings_; }
+
+ void enableBigInts();
+ void disableBigInts();
+ bool canAllocateBigInts() const { return canAllocateBigInts_; }
+
+ // Return true if no allocations have been made since the last collection.
+ bool isEmpty() const;
+
+ // Check whether an arbitrary pointer is within the nursery. This is
+ // slower than IsInsideNursery(Cell*), but works on all types of pointers.
+ MOZ_ALWAYS_INLINE bool isInside(gc::Cell* cellp) const = delete;
+ MOZ_ALWAYS_INLINE bool isInside(const void* p) const {
+ for (auto chunk : chunks_) {
+ if (uintptr_t(p) - uintptr_t(chunk) < gc::ChunkSize) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ template <typename T>
+ inline bool isInside(const SharedMem<T>& p) const;
+
+ // Allocate and return a pointer to a new GC thing. Returns nullptr if the
+ // Nursery is full.
+ void* allocateCell(gc::AllocSite* site, size_t size, JS::TraceKind kind);
+
+ static size_t nurseryCellHeaderSize() {
+ return sizeof(gc::NurseryCellHeader);
+ }
+
+ // Allocate a buffer for a given zone, using the nursery if possible.
+ void* allocateBuffer(JS::Zone* zone, size_t nbytes);
+
+ // Allocate a buffer for a given object, using the nursery if possible and
+ // obj is in the nursery.
+ void* allocateBuffer(JS::Zone* zone, JSObject* obj, size_t nbytes);
+
+ // Allocate a buffer for a given object, always using the nursery if obj is
+ // in the nursery. The requested size must be less than or equal to
+ // MaxNurseryBufferSize.
+ void* allocateBufferSameLocation(JSObject* obj, size_t nbytes);
+
+ // Allocate a zero-initialized buffer for a given zone, using the nursery if
+ // possible. If the buffer isn't allocated in the nursery, the given arena is
+ // used.
+ void* allocateZeroedBuffer(JS::Zone* zone, size_t nbytes,
+ arena_id_t arena = js::MallocArena);
+
+ // Allocate a zero-initialized buffer for a given object, using the nursery if
+ // possible and obj is in the nursery. If the buffer isn't allocated in the
+ // nursery, the given arena is used.
+ void* allocateZeroedBuffer(JSObject* obj, size_t nbytes,
+ arena_id_t arena = js::MallocArena);
+
+ // Resize an existing buffer.
+ void* reallocateBuffer(JS::Zone* zone, gc::Cell* cell, void* oldBuffer,
+ size_t oldBytes, size_t newBytes);
+
+ // Allocate a digits buffer for a given BigInt, using the nursery if possible
+ // and |bi| is in the nursery.
+ void* allocateBuffer(JS::BigInt* bi, size_t nbytes);
+
+ // Free an object buffer.
+ void freeBuffer(void* buffer, size_t nbytes);
+
+ // The maximum number of bytes allowed to reside in nursery buffers.
+ static const size_t MaxNurseryBufferSize = 1024;
+
+ // Do a minor collection.
+ void collect(JS::GCOptions options, JS::GCReason reason);
+
+ // If the thing at |*ref| in the Nursery has been forwarded, set |*ref| to
+ // the new location and return true. Otherwise return false and leave
+ // |*ref| unset.
+ [[nodiscard]] MOZ_ALWAYS_INLINE static bool getForwardedPointer(
+ js::gc::Cell** ref);
+
+ // Forward a slots/elements pointer stored in an Ion frame.
+ void forwardBufferPointer(uintptr_t* pSlotsElems);
+
+ inline void maybeSetForwardingPointer(JSTracer* trc, void* oldData,
+ void* newData, bool direct);
+ inline void setForwardingPointerWhileTenuring(void* oldData, void* newData,
+ bool direct);
+
+ // Register a malloced buffer that is held by a nursery object, which
+ // should be freed at the end of a minor GC. Buffers are unregistered when
+ // their owning objects are tenured.
+ [[nodiscard]] bool registerMallocedBuffer(void* buffer, size_t nbytes);
+
+ // Mark a malloced buffer as no longer needing to be freed.
+ void removeMallocedBuffer(void* buffer, size_t nbytes) {
+ MOZ_ASSERT(mallocedBuffers.has(buffer));
+ MOZ_ASSERT(nbytes > 0);
+ MOZ_ASSERT(mallocedBufferBytes >= nbytes);
+ mallocedBuffers.remove(buffer);
+ mallocedBufferBytes -= nbytes;
+ }
+
+ // Mark a malloced buffer as no longer needing to be freed during minor
+ // GC. There's no need to account for the size here since all remaining
+ // buffers will soon be freed.
+ void removeMallocedBufferDuringMinorGC(void* buffer) {
+ MOZ_ASSERT(JS::RuntimeHeapIsMinorCollecting());
+ MOZ_ASSERT(mallocedBuffers.has(buffer));
+ mallocedBuffers.remove(buffer);
+ }
+
+ [[nodiscard]] bool addedUniqueIdToCell(gc::Cell* cell) {
+ MOZ_ASSERT(IsInsideNursery(cell));
+ MOZ_ASSERT(isEnabled());
+ return cellsWithUid_.append(cell);
+ }
+
+ size_t sizeOfMallocedBuffers(mozilla::MallocSizeOf mallocSizeOf) const;
+
+ // Wasm "trailer" (C++-heap-allocated) blocks.
+ //
+ // All involved blocks are allocated/deallocated via this nursery's
+ // `mallocedBlockCache_`. Hence we must store both the block address and
+ // its freelist ID, wrapped up in a PointerAndUint7.
+ //
+ // Trailer blocks registered here are added to `trailersAdded_`. Those that
+ // are later deregistered as a result of `obj_moved` calls that indicate
+ // tenuring, should be added to `trailersRemoved_`.
+ //
+ // Unfortunately ::unregisterTrailer cannot be allowed to OOM. To get
+ // around this we rely on the observation that all deregistered blocks
+ // should previously have been registered, so the deregistered set can never
+ // be larger than the registered set. Hence ::registerTrailer effectively
+ // preallocates space in `trailersRemoved_` so as to ensure that, in the
+ // worst case, all registered blocks can be handed to ::unregisterTrailer
+ // without needing to resize `trailersRemoved_` in ::unregisterTrailer.
+ //
+ // The downside is that most of the space in `trailersRemoved_` is wasted in
+ // the case where there are few blocks deregistered. This is unfortunate
+ // but it's hard to see how to avoid it.
+ //
+ // At the end of a minor collection, all blocks in the set `trailersAdded_ -
+ // trailersRemoved_[0 .. trailersRemovedUsed_ - 1]` are handed back to the
+ // `mallocedBlockCache_`.
+ [[nodiscard]] bool registerTrailer(PointerAndUint7 blockAndListID,
+ size_t nBytes) {
+ MOZ_ASSERT(trailersAdded_.length() == trailersRemoved_.length());
+ MOZ_ASSERT(nBytes > 0);
+ if (MOZ_UNLIKELY(!trailersAdded_.append(blockAndListID))) {
+ return false;
+ }
+ if (MOZ_UNLIKELY(!trailersRemoved_.append(nullptr))) {
+ trailersAdded_.popBack();
+ return false;
+ }
+
+ // This is a clone of the logic in ::registerMallocedBuffer. It may be
+ // that some other heuristic is better, once we know more about the
+ // typical behaviour of wasm-GC applications.
+ trailerBytes_ += nBytes;
+ if (MOZ_UNLIKELY(trailerBytes_ > capacity() * 8)) {
+ requestMinorGC(JS::GCReason::NURSERY_TRAILERS);
+ }
+ return true;
+ }
+
+ void unregisterTrailer(void* block) {
+ MOZ_ASSERT(trailersRemovedUsed_ < trailersRemoved_.length());
+ trailersRemoved_[trailersRemovedUsed_] = block;
+ trailersRemovedUsed_++;
+ }
+
+ size_t sizeOfTrailerBlockSets(mozilla::MallocSizeOf mallocSizeOf) const;
+
+ // The number of bytes from the start position to the end of the nursery.
+ // pass maxChunkCount(), allocatedChunkCount() or chunkCountLimit()
+ // to calculate the nursery size, current lazy-allocated size or nursery
+ // limit respectively.
+ size_t spaceToEnd(unsigned chunkCount) const;
+
+ size_t capacity() const { return capacity_; }
+ size_t committed() const { return spaceToEnd(allocatedChunkCount()); }
+
+ // Used and free space both include chunk headers for that part of the
+ // nursery.
+ //
+ // usedSpace() + freeSpace() == capacity()
+ //
+ MOZ_ALWAYS_INLINE size_t usedSpace() const {
+ return capacity() - freeSpace();
+ }
+ MOZ_ALWAYS_INLINE size_t freeSpace() const {
+ MOZ_ASSERT(isEnabled());
+ MOZ_ASSERT(currentEnd_ - position_ <= NurseryChunkUsableSize);
+ MOZ_ASSERT(currentChunk_ < maxChunkCount());
+ return (currentEnd_ - position_) +
+ (maxChunkCount() - currentChunk_ - 1) * gc::ChunkSize;
+ }
+
+#ifdef JS_GC_ZEAL
+ void enterZealMode();
+ void leaveZealMode();
+#endif
+
+ // Write profile time JSON on JSONPrinter.
+ void renderProfileJSON(JSONPrinter& json) const;
+
+ // Print header line for profile times.
+ void printProfileHeader();
+
+ // Print total profile times on shutdown.
+ void printTotalProfileTimes();
+
+ void* addressOfPosition() const { return (void**)&position_; }
+ static constexpr int32_t offsetOfCurrentEndFromPosition() {
+ return offsetof(Nursery, currentEnd_) - offsetof(Nursery, position_);
+ }
+
+ void* addressOfNurseryAllocatedSites() {
+ return pretenuringNursery.addressOfAllocatedSites();
+ }
+
+ void requestMinorGC(JS::GCReason reason) const;
+
+ bool minorGCRequested() const {
+ return minorGCTriggerReason_ != JS::GCReason::NO_REASON;
+ }
+ JS::GCReason minorGCTriggerReason() const { return minorGCTriggerReason_; }
+ void clearMinorGCRequest() {
+ minorGCTriggerReason_ = JS::GCReason::NO_REASON;
+ }
+
+ bool shouldCollect() const;
+ bool isNearlyFull() const;
+ bool isUnderused() const;
+
+ bool enableProfiling() const { return enableProfiling_; }
+
+ bool addMapWithNurseryMemory(MapObject* obj) {
+ MOZ_ASSERT_IF(!mapsWithNurseryMemory_.empty(),
+ mapsWithNurseryMemory_.back() != obj);
+ return mapsWithNurseryMemory_.append(obj);
+ }
+ bool addSetWithNurseryMemory(SetObject* obj) {
+ MOZ_ASSERT_IF(!setsWithNurseryMemory_.empty(),
+ setsWithNurseryMemory_.back() != obj);
+ return setsWithNurseryMemory_.append(obj);
+ }
+
+ // The amount of space in the mapped nursery available to allocations.
+ static const size_t NurseryChunkUsableSize =
+ gc::ChunkSize - sizeof(gc::ChunkBase);
+
+ void joinDecommitTask();
+
+ mozilla::TimeStamp collectionStartTime() {
+ return startTimes_[ProfileKey::Total];
+ }
+
+ bool canCreateAllocSite() { return pretenuringNursery.canCreateAllocSite(); }
+ void noteAllocSiteCreated() { pretenuringNursery.noteAllocSiteCreated(); }
+ bool reportPretenuring() const { return reportPretenuring_; }
+ void maybeStopPretenuring(gc::GCRuntime* gc) {
+ pretenuringNursery.maybeStopPretenuring(gc);
+ }
+
+ void setAllocFlagsForZone(JS::Zone* zone);
+
+ // Round a size in bytes to the nearest valid nursery size.
+ static size_t roundSize(size_t size);
+
+ // The malloc'd block cache.
+ gc::MallocedBlockCache& mallocedBlockCache() { return mallocedBlockCache_; }
+ size_t sizeOfMallocedBlockCache(mozilla::MallocSizeOf mallocSizeOf) const {
+ return mallocedBlockCache_.sizeOfExcludingThis(mallocSizeOf);
+ }
+
+ private:
+ // Fields used during allocation fast path are grouped first:
+
+ // Pointer to the first unallocated byte in the nursery.
+ uintptr_t position_;
+
+ // Pointer to the last byte of space in the current chunk.
+ uintptr_t currentEnd_;
+
+ // Other fields not necessarily used during allocation follow:
+
+ gc::GCRuntime* const gc;
+
+ // Vector of allocated chunks to allocate from.
+ Vector<NurseryChunk*, 0, SystemAllocPolicy> chunks_;
+
+ // The index of the chunk that is currently being allocated from.
+ uint32_t currentChunk_;
+
+ // These fields refer to the beginning of the nursery. They're normally 0
+ // and chunk(0).start() respectively. Except when a generational GC zeal
+ // mode is active, then they may be arbitrary (see Nursery::clear()).
+ uint32_t currentStartChunk_;
+ uintptr_t currentStartPosition_;
+
+ // The current nursery capacity measured in bytes. It may grow up to this
+ // value without a collection, allocating chunks on demand. This limit may be
+ // changed by maybeResizeNursery() each collection. It includes chunk headers.
+ size_t capacity_;
+
+ gc::PretenuringNursery pretenuringNursery;
+
+ mozilla::TimeDuration timeInChunkAlloc_;
+
+ // Report minor collections taking at least this long, if enabled.
+ bool enableProfiling_;
+ bool profileWorkers_;
+ mozilla::TimeDuration profileThreshold_;
+
+ // Whether we will nursery-allocate strings.
+ bool canAllocateStrings_;
+
+ // Whether we will nursery-allocate BigInts.
+ bool canAllocateBigInts_;
+
+ // Report how many strings were deduplicated.
+ bool reportDeduplications_;
+
+ // Whether to report information on pretenuring, and if so the allocation
+ // threshold at which to report details of each allocation site.
+ bool reportPretenuring_;
+ size_t reportPretenuringThreshold_;
+
+ // Whether and why a collection of this nursery has been requested. This is
+ // mutable as it is set by the store buffer, which otherwise cannot modify
+ // anything in the nursery.
+ mutable JS::GCReason minorGCTriggerReason_;
+
+ // Profiling data.
+
+ enum class ProfileKey {
+#define DEFINE_TIME_KEY(name, text) name,
+ FOR_EACH_NURSERY_PROFILE_TIME(DEFINE_TIME_KEY)
+#undef DEFINE_TIME_KEY
+ KeyCount
+ };
+
+ using ProfileTimes =
+ mozilla::EnumeratedArray<ProfileKey, ProfileKey::KeyCount,
+ mozilla::TimeStamp>;
+ using ProfileDurations =
+ mozilla::EnumeratedArray<ProfileKey, ProfileKey::KeyCount,
+ mozilla::TimeDuration>;
+
+ ProfileTimes startTimes_;
+ ProfileDurations profileDurations_;
+ ProfileDurations totalDurations_;
+
+ // Data about the previous collection.
+ struct PreviousGC {
+ JS::GCReason reason = JS::GCReason::NO_REASON;
+ size_t nurseryCapacity = 0;
+ size_t nurseryCommitted = 0;
+ size_t nurseryUsedBytes = 0;
+ size_t nurseryUsedChunkCount = 0;
+ size_t tenuredBytes = 0;
+ size_t tenuredCells = 0;
+ mozilla::TimeStamp endTime;
+ };
+ PreviousGC previousGC;
+
+ bool hasRecentGrowthData;
+ double smoothedTargetSize;
+
+ // Calculate the promotion rate of the most recent minor GC.
+ // The valid_for_tenuring parameter is used to return whether this
+ // promotion rate is accurate enough (the nursery was full enough) to be
+ // used for tenuring and other decisions.
+ //
+ // Must only be called if the previousGC data is initialised.
+ double calcPromotionRate(bool* validForTenuring) const;
+
+ // The set of externally malloced buffers potentially kept live by objects
+ // stored in the nursery. Any external buffers that do not belong to a
+ // tenured thing at the end of a minor GC must be freed.
+ using BufferRelocationOverlay = void*;
+ using BufferSet = HashSet<void*, PointerHasher<void*>, SystemAllocPolicy>;
+ BufferSet mallocedBuffers;
+ size_t mallocedBufferBytes = 0;
+
+ // Wasm "trailer" (C++-heap-allocated) blocks. See comments above on
+ // ::registerTrailer and ::unregisterTrailer.
+ Vector<PointerAndUint7, 0, SystemAllocPolicy> trailersAdded_;
+ Vector<void*, 0, SystemAllocPolicy> trailersRemoved_;
+ size_t trailersRemovedUsed_ = 0;
+ size_t trailerBytes_ = 0;
+
+ void freeTrailerBlocks();
+
+ // During a collection most hoisted slot and element buffers indicate their
+ // new location with a forwarding pointer at the base. This does not work
+ // for buffers whose length is less than pointer width, or when different
+ // buffers might overlap each other. For these, an entry in the following
+ // table is used.
+ typedef HashMap<void*, void*, PointerHasher<void*>, SystemAllocPolicy>
+ ForwardedBufferMap;
+ ForwardedBufferMap forwardedBuffers;
+
+ // When we assign a unique id to cell in the nursery, that almost always
+ // means that the cell will be in a hash table, and thus, held live,
+ // automatically moving the uid from the nursery to its new home in
+ // tenured. It is possible, if rare, for an object that acquired a uid to
+ // be dead before the next collection, in which case we need to know to
+ // remove it when we sweep.
+ //
+ // Note: we store the pointers as Cell* here, resulting in an ugly cast in
+ // sweep. This is because this structure is used to help implement
+ // stable object hashing and we have to break the cycle somehow.
+ using CellsWithUniqueIdVector = Vector<gc::Cell*, 8, SystemAllocPolicy>;
+ CellsWithUniqueIdVector cellsWithUid_;
+
+ // Lists of map and set objects allocated in the nursery or with iterators
+ // allocated there. Such objects need to be swept after minor GC.
+ Vector<MapObject*, 0, SystemAllocPolicy> mapsWithNurseryMemory_;
+ Vector<SetObject*, 0, SystemAllocPolicy> setsWithNurseryMemory_;
+
+ UniquePtr<NurseryDecommitTask> decommitTask;
+
+ // A cache of small C++-heap allocated blocks associated with this Nursery.
+ // This provided so as to provide cheap allocation/deallocation of
+ // out-of-line storage areas as used by WasmStructObject and
+ // WasmArrayObject, although the mechanism is general and not specific to
+ // these object types. Regarding lifetimes, because the cache holds only
+ // blocks that are not currently in use, it can be flushed at any point with
+ // no correctness impact, only a performance impact.
+ gc::MallocedBlockCache mallocedBlockCache_;
+
+ NurseryChunk& chunk(unsigned index) const { return *chunks_[index]; }
+
+ // Set the current chunk. This updates the currentChunk_, position_ and
+ // currentEnd_ values as appropriate. It'll also poison the chunk, either a
+ // portion of the chunk if it is already the current chunk, or the whole chunk
+ // if fullPoison is true or it is not the current chunk.
+ void setCurrentChunk(unsigned chunkno);
+
+ bool initFirstChunk(AutoLockGCBgAlloc& lock);
+
+ // extent is advisory, it will be ignored in sub-chunk and generational zeal
+ // modes. It will be clamped to Min(NurseryChunkUsableSize, capacity_).
+ void poisonAndInitCurrentChunk(size_t extent = gc::ChunkSize);
+
+ void setCurrentEnd();
+ void setStartPosition();
+
+ // Allocate the next chunk, or the first chunk for initialization.
+ // Callers will probably want to call setCurrentChunk(0) next.
+ [[nodiscard]] bool allocateNextChunk(unsigned chunkno,
+ AutoLockGCBgAlloc& lock);
+
+ MOZ_ALWAYS_INLINE uintptr_t currentEnd() const;
+
+ uintptr_t position() const { return position_; }
+
+ MOZ_ALWAYS_INLINE bool isSubChunkMode() const;
+
+ JSRuntime* runtime() const;
+ gcstats::Statistics& stats() const;
+
+ const js::gc::GCSchedulingTunables& tunables() const;
+
+ void getAllocFlagsForZone(JS::Zone* zone, bool* allocObjectsOut,
+ bool* allocStringsOut, bool* allocBigIntsOut);
+ void updateAllZoneAllocFlags();
+ void updateAllocFlagsForZone(JS::Zone* zone);
+ void discardCodeAndSetJitFlagsForZone(JS::Zone* zone);
+
+ // Common internal allocator function.
+ void* allocate(size_t size);
+
+ void* moveToNextChunkAndAllocate(size_t size);
+
+ struct CollectionResult {
+ size_t tenuredBytes;
+ size_t tenuredCells;
+ };
+ CollectionResult doCollection(gc::AutoGCSession& session,
+ JS::GCOptions options, JS::GCReason reason);
+ void traceRoots(gc::AutoGCSession& session, gc::TenuringTracer& mover);
+
+ size_t doPretenuring(JSRuntime* rt, JS::GCReason reason,
+ bool validPromotionRate, double promotionRate);
+
+ // Handle relocation of slots/elements pointers stored in Ion frames.
+ inline void setForwardingPointer(void* oldData, void* newData, bool direct);
+
+ inline void setDirectForwardingPointer(void* oldData, void* newData);
+ void setIndirectForwardingPointer(void* oldData, void* newData);
+
+ inline void setSlotsForwardingPointer(HeapSlot* oldSlots, HeapSlot* newSlots,
+ uint32_t nslots);
+ inline void setElementsForwardingPointer(ObjectElements* oldHeader,
+ ObjectElements* newHeader,
+ uint32_t capacity);
+
+#ifdef DEBUG
+ bool checkForwardingPointerLocation(void* ptr, bool expectedInside);
+#endif
+
+ // Updates pointers to nursery objects that have been tenured and discards
+ // pointers to objects that have been freed.
+ void sweep();
+
+ // Reset the current chunk and position after a minor collection. Also poison
+ // the nursery on debug & nightly builds.
+ void clear();
+
+ void sweepMapAndSetObjects();
+
+ // Change the allocable space provided by the nursery.
+ void maybeResizeNursery(JS::GCOptions options, JS::GCReason reason);
+ size_t targetSize(JS::GCOptions options, JS::GCReason reason);
+ void clearRecentGrowthData();
+ void growAllocableSpace(size_t newCapacity);
+ void shrinkAllocableSpace(size_t newCapacity);
+ void minimizeAllocableSpace();
+
+ // Free the chunks starting at firstFreeChunk until the end of the chunks
+ // vector. Shrinks the vector but does not update maxChunkCount().
+ void freeChunksFrom(unsigned firstFreeChunk);
+
+ void sendTelemetry(JS::GCReason reason, mozilla::TimeDuration totalTime,
+ bool wasEmpty, double promotionRate,
+ size_t sitesPretenured);
+
+ void printCollectionProfile(JS::GCReason reason, double promotionRate);
+ void printDeduplicationData(js::StringStats& prev, js::StringStats& curr);
+
+ // Profile recording and printing.
+ void maybeClearProfileDurations();
+ void startProfile(ProfileKey key);
+ void endProfile(ProfileKey key);
+ static bool printProfileDurations(const ProfileDurations& times,
+ Sprinter& sprinter);
+
+ mozilla::TimeStamp collectionStartTime() const;
+ mozilla::TimeStamp lastCollectionEndTime() const;
+
+ friend class gc::GCRuntime;
+ friend class gc::TenuringTracer;
+ friend struct NurseryChunk;
+};
+
+} // namespace js
+
+#endif // gc_Nursery_h
diff --git a/js/src/gc/NurseryAwareHashMap.h b/js/src/gc/NurseryAwareHashMap.h
new file mode 100644
index 0000000000..35c2bebcea
--- /dev/null
+++ b/js/src/gc/NurseryAwareHashMap.h
@@ -0,0 +1,209 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef gc_NurseryAwareHashMap_h
+#define gc_NurseryAwareHashMap_h
+
+#include "gc/Barrier.h"
+#include "gc/Tracer.h"
+#include "js/GCHashTable.h"
+#include "js/GCPolicyAPI.h"
+#include "js/HashTable.h"
+
+namespace js {
+
+namespace detail {
+
+// This class only handles the incremental case and does not deal with nursery
+// pointers. The only users should be for NurseryAwareHashMap; it is defined
+// externally because we need a GCPolicy for its use in the contained map.
+template <typename T>
+class UnsafeBareWeakHeapPtr : public ReadBarriered<T> {
+ public:
+ UnsafeBareWeakHeapPtr()
+ : ReadBarriered<T>(JS::SafelyInitialized<T>::create()) {}
+ MOZ_IMPLICIT UnsafeBareWeakHeapPtr(const T& v) : ReadBarriered<T>(v) {}
+ explicit UnsafeBareWeakHeapPtr(const UnsafeBareWeakHeapPtr& v)
+ : ReadBarriered<T>(v) {}
+ UnsafeBareWeakHeapPtr(UnsafeBareWeakHeapPtr&& v)
+ : ReadBarriered<T>(std::move(v)) {}
+
+ UnsafeBareWeakHeapPtr& operator=(const UnsafeBareWeakHeapPtr& v) {
+ this->value = v.value;
+ return *this;
+ }
+
+ UnsafeBareWeakHeapPtr& operator=(const T& v) {
+ this->value = v;
+ return *this;
+ }
+
+ const T get() const {
+ if (!InternalBarrierMethods<T>::isMarkable(this->value)) {
+ return JS::SafelyInitialized<T>::create();
+ }
+ this->read();
+ return this->value;
+ }
+
+ explicit operator bool() const { return bool(this->value); }
+
+ const T unbarrieredGet() const { return this->value; }
+ T* unsafeGet() { return &this->value; }
+ T const* unsafeGet() const { return &this->value; }
+};
+} // namespace detail
+
+enum : bool { DuplicatesNotPossible, DuplicatesPossible };
+
+// The "nursery aware" hash map is a special case of GCHashMap that is able to
+// treat nursery allocated members weakly during a minor GC: e.g. it allows for
+// nursery allocated objects to be collected during nursery GC where a normal
+// hash table treats such edges strongly.
+//
+// Doing this requires some strong constraints on what can be stored in this
+// table and how it can be accessed. At the moment, this table assumes that all
+// values contain a strong reference to the key. This limits its usefulness to
+// the CrossCompartmentMap at the moment, but might serve as a useful base for
+// other tables in future.
+template <typename Key, typename Value, typename AllocPolicy = TempAllocPolicy,
+ bool AllowDuplicates = DuplicatesNotPossible>
+class NurseryAwareHashMap {
+ using MapKey = UnsafeBarePtr<Key>;
+ using MapValue = detail::UnsafeBareWeakHeapPtr<Value>;
+ using HashPolicy = DefaultHasher<MapKey>;
+ using MapType = GCRekeyableHashMap<MapKey, MapValue, HashPolicy, AllocPolicy>;
+ MapType map;
+
+ // Keep a list of all keys for which key->isTenured() is false. This lets us
+ // avoid a full traversal of the map on each minor GC, keeping the minor GC
+ // times proportional to the nursery heap size.
+ Vector<Key, 0, AllocPolicy> nurseryEntries;
+
+ public:
+ using Lookup = typename MapType::Lookup;
+ using Ptr = typename MapType::Ptr;
+ using Range = typename MapType::Range;
+ using Entry = typename MapType::Entry;
+
+ explicit NurseryAwareHashMap(AllocPolicy a = AllocPolicy())
+ : map(a), nurseryEntries(std::move(a)) {}
+ explicit NurseryAwareHashMap(size_t length) : map(length) {}
+ NurseryAwareHashMap(AllocPolicy a, size_t length)
+ : map(a, length), nurseryEntries(std::move(a)) {}
+
+ bool empty() const { return map.empty(); }
+ Ptr lookup(const Lookup& l) const { return map.lookup(l); }
+ void remove(Ptr p) { map.remove(p); }
+ Range all() const { return map.all(); }
+ struct Enum : public MapType::Enum {
+ explicit Enum(NurseryAwareHashMap& namap) : MapType::Enum(namap.map) {}
+ };
+ size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
+ return map.shallowSizeOfExcludingThis(mallocSizeOf) +
+ nurseryEntries.sizeOfExcludingThis(mallocSizeOf);
+ }
+ size_t sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
+ return map.shallowSizeOfIncludingThis(mallocSizeOf) +
+ nurseryEntries.sizeOfIncludingThis(mallocSizeOf);
+ }
+
+ [[nodiscard]] bool put(const Key& key, const Value& value) {
+ if ((!key->isTenured() || !value->isTenured()) &&
+ !nurseryEntries.append(key)) {
+ return false;
+ }
+
+ auto p = map.lookupForAdd(key);
+ if (p) {
+ p->value() = value;
+ return true;
+ }
+
+ return map.add(p, key, value);
+ }
+
+ void sweepAfterMinorGC(JSTracer* trc) {
+ for (auto& key : nurseryEntries) {
+ auto p = map.lookup(key);
+ if (!p) {
+ continue;
+ }
+
+ // Drop the entry if the value is not marked.
+ if (!JS::GCPolicy<MapValue>::traceWeak(trc, &p->value())) {
+ map.remove(p);
+ continue;
+ }
+
+ // Update and relocate the key, if the value is still needed.
+ //
+ // Non-string Values will contain a strong reference to Key, as per its
+ // use in the CrossCompartmentWrapperMap, so the key will never be dying
+ // here. Strings do *not* have any sort of pointer from wrapper to
+ // wrappee, as they are just copies. The wrapper map entry is merely used
+ // as a cache to avoid re-copying the string, and currently that entire
+ // cache is flushed on major GC.
+ MapKey copy(key);
+ if (!JS::GCPolicy<MapKey>::traceWeak(trc, &copy)) {
+ map.remove(p);
+ continue;
+ }
+
+ if (AllowDuplicates) {
+ // Drop duplicated keys.
+ //
+ // A key can be forwarded to another place. In this case, rekey the
+ // item. If two or more different keys are forwarded to the same new
+ // key, simply drop the later ones.
+ if (key == copy) {
+ // No rekey needed.
+ } else if (map.has(copy)) {
+ // Key was forwarded to the same place that another key was already
+ // forwarded to.
+ map.remove(p);
+ } else {
+ map.rekeyAs(key, copy, copy);
+ }
+ } else {
+ MOZ_ASSERT(key == copy || !map.has(copy));
+ map.rekeyIfMoved(key, copy);
+ }
+ }
+ nurseryEntries.clear();
+ }
+
+ void traceWeak(JSTracer* trc) { map.traceWeak(trc); }
+
+ void clear() {
+ map.clear();
+ nurseryEntries.clear();
+ }
+
+ bool hasNurseryEntries() const { return !nurseryEntries.empty(); }
+};
+
+} // namespace js
+
+namespace JS {
+
+template <typename T>
+struct GCPolicy<js::detail::UnsafeBareWeakHeapPtr<T>> {
+ static void trace(JSTracer* trc, js::detail::UnsafeBareWeakHeapPtr<T>* thingp,
+ const char* name) {
+ js::TraceEdge(trc, thingp, name);
+ }
+ static bool traceWeak(JSTracer* trc,
+ js::detail::UnsafeBareWeakHeapPtr<T>* thingp) {
+ return js::TraceWeakEdge(trc, thingp, "UnsafeBareWeakHeapPtr");
+ }
+};
+
+} // namespace JS
+
+namespace mozilla {} // namespace mozilla
+
+#endif // gc_NurseryAwareHashMap_h
diff --git a/js/src/gc/ObjectKind-inl.h b/js/src/gc/ObjectKind-inl.h
new file mode 100644
index 0000000000..a03b183447
--- /dev/null
+++ b/js/src/gc/ObjectKind-inl.h
@@ -0,0 +1,162 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * GC-internal helper functions for getting the AllocKind used to allocate a
+ * JSObject and related information.
+ */
+
+#ifndef gc_ObjectKind_inl_h
+#define gc_ObjectKind_inl_h
+
+#include "util/Memory.h"
+#include "vm/NativeObject.h"
+
+namespace js {
+namespace gc {
+
+/* Capacity for slotsToThingKind */
+const size_t SLOTS_TO_THING_KIND_LIMIT = 17;
+
+extern const AllocKind slotsToThingKind[];
+
+/* Get the best kind to use when making an object with the given slot count. */
+static inline AllocKind GetGCObjectKind(size_t numSlots) {
+ if (numSlots >= SLOTS_TO_THING_KIND_LIMIT) {
+ return AllocKind::OBJECT16;
+ }
+ return slotsToThingKind[numSlots];
+}
+
+static inline AllocKind GetGCObjectKind(const JSClass* clasp) {
+ MOZ_ASSERT(!clasp->isProxyObject(),
+ "Proxies should use GetProxyGCObjectKind");
+ MOZ_ASSERT(!clasp->isJSFunction());
+
+ uint32_t nslots = JSCLASS_RESERVED_SLOTS(clasp);
+ return GetGCObjectKind(nslots);
+}
+
+static constexpr bool CanUseFixedElementsForArray(size_t numElements) {
+ if (numElements > NativeObject::MAX_DENSE_ELEMENTS_COUNT) {
+ return false;
+ }
+ size_t numSlots = numElements + ObjectElements::VALUES_PER_HEADER;
+ return numSlots < SLOTS_TO_THING_KIND_LIMIT;
+}
+
+/* As for GetGCObjectKind, but for dense array allocation. */
+static inline AllocKind GetGCArrayKind(size_t numElements) {
+ /*
+ * Dense arrays can use their fixed slots to hold their elements array
+ * (less two Values worth of ObjectElements header), but if more than the
+ * maximum number of fixed slots is needed then the fixed slots will be
+ * unused.
+ */
+ static_assert(ObjectElements::VALUES_PER_HEADER == 2);
+ if (!CanUseFixedElementsForArray(numElements)) {
+ return AllocKind::OBJECT2;
+ }
+ return slotsToThingKind[numElements + ObjectElements::VALUES_PER_HEADER];
+}
+
+static inline AllocKind GetGCObjectFixedSlotsKind(size_t numFixedSlots) {
+ MOZ_ASSERT(numFixedSlots < SLOTS_TO_THING_KIND_LIMIT);
+ return slotsToThingKind[numFixedSlots];
+}
+
+// Get the best kind to use when allocating an object that needs a specific
+// number of bytes.
+static inline AllocKind GetGCObjectKindForBytes(size_t nbytes) {
+ MOZ_ASSERT(nbytes <= JSObject::MAX_BYTE_SIZE);
+
+ if (nbytes <= sizeof(NativeObject)) {
+ return AllocKind::OBJECT0;
+ }
+ nbytes -= sizeof(NativeObject);
+
+ size_t dataSlots = AlignBytes(nbytes, sizeof(Value)) / sizeof(Value);
+ MOZ_ASSERT(nbytes <= dataSlots * sizeof(Value));
+ return GetGCObjectKind(dataSlots);
+}
+
+/* Get the number of fixed slots and initial capacity associated with a kind. */
+static constexpr inline size_t GetGCKindSlots(AllocKind thingKind) {
+ // Using a switch in hopes that thingKind will usually be a compile-time
+ // constant.
+ switch (thingKind) {
+ case AllocKind::OBJECT0:
+ case AllocKind::OBJECT0_BACKGROUND:
+ return 0;
+ case AllocKind::OBJECT2:
+ case AllocKind::OBJECT2_BACKGROUND:
+ return 2;
+ case AllocKind::FUNCTION:
+ case AllocKind::OBJECT4:
+ case AllocKind::OBJECT4_BACKGROUND:
+ return 4;
+ case AllocKind::FUNCTION_EXTENDED:
+ return 6;
+ case AllocKind::OBJECT8:
+ case AllocKind::OBJECT8_BACKGROUND:
+ return 8;
+ case AllocKind::OBJECT12:
+ case AllocKind::OBJECT12_BACKGROUND:
+ return 12;
+ case AllocKind::OBJECT16:
+ case AllocKind::OBJECT16_BACKGROUND:
+ return 16;
+ default:
+ MOZ_CRASH("Bad object alloc kind");
+ }
+}
+
+static inline size_t GetGCKindBytes(AllocKind thingKind) {
+ return sizeof(JSObject_Slots0) + GetGCKindSlots(thingKind) * sizeof(Value);
+}
+
+static inline bool CanUseBackgroundAllocKind(const JSClass* clasp) {
+ return !clasp->hasFinalize() || (clasp->flags & JSCLASS_BACKGROUND_FINALIZE);
+}
+
+static inline bool CanChangeToBackgroundAllocKind(AllocKind kind,
+ const JSClass* clasp) {
+ // If a foreground alloc kind is specified but the class has no finalizer or a
+ // finalizer that is safe to call on a different thread, we can change the
+ // alloc kind to one which is finalized on a background thread.
+ //
+ // For example, AllocKind::OBJECT0 calls the finalizer on the main thread, and
+ // AllocKind::OBJECT0_BACKGROUND calls the finalizer on the a helper thread.
+
+ MOZ_ASSERT(IsObjectAllocKind(kind));
+
+ if (IsBackgroundFinalized(kind)) {
+ return false; // This kind is already a background finalized kind.
+ }
+
+ return CanUseBackgroundAllocKind(clasp);
+}
+
+static inline AllocKind ForegroundToBackgroundAllocKind(AllocKind fgKind) {
+ MOZ_ASSERT(IsObjectAllocKind(fgKind));
+ MOZ_ASSERT(IsForegroundFinalized(fgKind));
+
+ // For objects, each background alloc kind is defined just after the
+ // corresponding foreground alloc kind so we can convert between them by
+ // incrementing or decrementing as appropriate.
+ AllocKind bgKind = AllocKind(size_t(fgKind) + 1);
+
+ MOZ_ASSERT(IsObjectAllocKind(bgKind));
+ MOZ_ASSERT(IsBackgroundFinalized(bgKind));
+ MOZ_ASSERT(GetGCKindSlots(bgKind) == GetGCKindSlots(fgKind));
+
+ return bgKind;
+}
+
+} // namespace gc
+} // namespace js
+
+#endif // gc_ObjectKind_inl_h
diff --git a/js/src/gc/ParallelMarking.cpp b/js/src/gc/ParallelMarking.cpp
new file mode 100644
index 0000000000..67c29c02d7
--- /dev/null
+++ b/js/src/gc/ParallelMarking.cpp
@@ -0,0 +1,359 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "gc/ParallelMarking.h"
+
+#include "gc/GCLock.h"
+#include "gc/ParallelWork.h"
+#include "vm/GeckoProfiler.h"
+#include "vm/HelperThreadState.h"
+#include "vm/Runtime.h"
+
+using namespace js;
+using namespace js::gc;
+
+using mozilla::Maybe;
+using mozilla::TimeDuration;
+using mozilla::TimeStamp;
+
+class AutoAddTimeDuration {
+ TimeStamp start;
+ TimeDuration& result;
+
+ public:
+ explicit AutoAddTimeDuration(TimeDuration& result)
+ : start(TimeStamp::Now()), result(result) {}
+ ~AutoAddTimeDuration() { result += TimeSince(start); }
+};
+
+ParallelMarker::ParallelMarker(GCRuntime* gc) : gc(gc) {}
+
+size_t ParallelMarker::workerCount() const { return gc->markers.length(); }
+
+bool ParallelMarker::mark(SliceBudget& sliceBudget) {
+#ifdef DEBUG
+ {
+ AutoLockHelperThreadState lock;
+ MOZ_ASSERT(workerCount() <= HelperThreadState().maxGCParallelThreads(lock));
+
+ // TODO: Even if the thread limits checked above are correct, there may not
+ // be enough threads available to start our mark tasks immediately due to
+ // other runtimes in the same process running GC.
+ }
+#endif
+
+ if (markOneColor(MarkColor::Black, sliceBudget) == NotFinished) {
+ return false;
+ }
+ MOZ_ASSERT(!hasWork(MarkColor::Black));
+
+ if (markOneColor(MarkColor::Gray, sliceBudget) == NotFinished) {
+ return false;
+ }
+ MOZ_ASSERT(!hasWork(MarkColor::Gray));
+
+ // Handle any delayed marking, which is not performed in parallel.
+ if (gc->hasDelayedMarking()) {
+ gc->markAllDelayedChildren(ReportMarkTime);
+ }
+
+ return true;
+}
+
+bool ParallelMarker::markOneColor(MarkColor color, SliceBudget& sliceBudget) {
+ // Run a marking slice and return whether the stack is now empty.
+
+ if (!hasWork(color)) {
+ return true;
+ }
+
+ gcstats::AutoPhase ap(gc->stats(), gcstats::PhaseKind::PARALLEL_MARK);
+
+ MOZ_ASSERT(workerCount() <= MaxParallelWorkers);
+ mozilla::Maybe<ParallelMarkTask> tasks[MaxParallelWorkers];
+
+ for (size_t i = 0; i < workerCount(); i++) {
+ GCMarker* marker = gc->markers[i].get();
+ tasks[i].emplace(this, marker, color, sliceBudget);
+
+ // Attempt to populate empty mark stacks.
+ //
+ // TODO: When tuning for more than two markers we may need to adopt a more
+ // sophisticated approach.
+ if (!marker->hasEntriesForCurrentColor() && gc->marker().canDonateWork()) {
+ GCMarker::moveWork(marker, &gc->marker());
+ }
+ }
+
+ {
+ AutoLockGC lock(gc);
+
+ activeTasks = 0;
+ for (size_t i = 0; i < workerCount(); i++) {
+ ParallelMarkTask& task = *tasks[i];
+ if (task.hasWork()) {
+ incActiveTasks(&task, lock);
+ }
+ }
+ }
+
+ {
+ AutoLockHelperThreadState lock;
+
+ // There should always be enough parallel tasks to run our marking work.
+ MOZ_RELEASE_ASSERT(HelperThreadState().getGCParallelThreadCount(lock) >=
+ workerCount());
+
+ for (size_t i = 0; i < workerCount(); i++) {
+ gc->startTask(*tasks[i], lock);
+ }
+
+ for (size_t i = 0; i < workerCount(); i++) {
+ gc->joinTask(*tasks[i], lock);
+ }
+ }
+
+#ifdef DEBUG
+ {
+ AutoLockGC lock(gc);
+ MOZ_ASSERT(waitingTasks.ref().isEmpty());
+ MOZ_ASSERT(waitingTaskCount == 0);
+ MOZ_ASSERT(activeTasks == 0);
+ }
+#endif
+
+ return !hasWork(color);
+}
+
+bool ParallelMarker::hasWork(MarkColor color) const {
+ for (const auto& marker : gc->markers) {
+ if (marker->hasEntries(color)) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+ParallelMarkTask::ParallelMarkTask(ParallelMarker* pm, GCMarker* marker,
+ MarkColor color, const SliceBudget& budget)
+ : GCParallelTask(pm->gc, gcstats::PhaseKind::PARALLEL_MARK, GCUse::Marking),
+ pm(pm),
+ marker(marker),
+ color(*marker, color),
+ budget(budget) {
+ marker->enterParallelMarkingMode(pm);
+}
+
+ParallelMarkTask::~ParallelMarkTask() {
+ MOZ_ASSERT(!isWaiting.refNoCheck());
+ marker->leaveParallelMarkingMode();
+}
+
+bool ParallelMarkTask::hasWork() const {
+ return marker->hasEntriesForCurrentColor();
+}
+
+void ParallelMarkTask::recordDuration() {
+ gc->stats().recordParallelPhase(gcstats::PhaseKind::PARALLEL_MARK,
+ duration());
+ gc->stats().recordParallelPhase(gcstats::PhaseKind::PARALLEL_MARK_MARK,
+ markTime.ref());
+ gc->stats().recordParallelPhase(gcstats::PhaseKind::PARALLEL_MARK_WAIT,
+ waitTime.ref());
+}
+
+void ParallelMarkTask::run(AutoLockHelperThreadState& lock) {
+ AutoUnlockHelperThreadState unlock(lock);
+
+ AutoLockGC gcLock(pm->gc);
+
+ markOrRequestWork(gcLock);
+
+ MOZ_ASSERT(!isWaiting);
+}
+
+void ParallelMarkTask::markOrRequestWork(AutoLockGC& lock) {
+ for (;;) {
+ if (hasWork()) {
+ if (!tryMarking(lock)) {
+ return;
+ }
+ } else {
+ if (!requestWork(lock)) {
+ return;
+ }
+ }
+ }
+}
+
+bool ParallelMarkTask::tryMarking(AutoLockGC& lock) {
+ MOZ_ASSERT(hasWork());
+ MOZ_ASSERT(marker->isParallelMarking());
+
+ // Mark until budget exceeded or we run out of work.
+ bool finished;
+ {
+ AutoUnlockGC unlock(lock);
+
+ AutoAddTimeDuration time(markTime.ref());
+ finished = marker->markCurrentColorInParallel(budget);
+ }
+
+ MOZ_ASSERT_IF(finished, !hasWork());
+ pm->decActiveTasks(this, lock);
+
+ return finished;
+}
+
+bool ParallelMarkTask::requestWork(AutoLockGC& lock) {
+ MOZ_ASSERT(!hasWork());
+
+ if (!pm->hasActiveTasks(lock)) {
+ return false; // All other tasks are empty. We're finished.
+ }
+
+ budget.stepAndForceCheck();
+ if (budget.isOverBudget()) {
+ return false; // Over budget or interrupted.
+ }
+
+ // Add ourselves to the waiting list and wait for another task to give us
+ // work. The task with work calls ParallelMarker::donateWorkFrom.
+ waitUntilResumed(lock);
+
+ return true;
+}
+
+void ParallelMarkTask::waitUntilResumed(AutoLockGC& lock) {
+ GeckoProfilerRuntime& profiler = gc->rt->geckoProfiler();
+ if (profiler.enabled()) {
+ profiler.markEvent("Parallel marking wait start", "");
+ }
+
+ pm->addTaskToWaitingList(this, lock);
+
+ // Set isWaiting flag and wait for another thread to clear it and resume us.
+ MOZ_ASSERT(!isWaiting);
+ isWaiting = true;
+
+ AutoAddTimeDuration time(waitTime.ref());
+
+ do {
+ MOZ_ASSERT(pm->hasActiveTasks(lock));
+ resumed.wait(lock.guard());
+ } while (isWaiting);
+
+ MOZ_ASSERT(!pm->isTaskInWaitingList(this, lock));
+
+ if (profiler.enabled()) {
+ profiler.markEvent("Parallel marking wait end", "");
+ }
+}
+
+void ParallelMarkTask::resume() {
+ {
+ AutoLockGC lock(gc);
+ MOZ_ASSERT(isWaiting);
+
+ isWaiting = false;
+
+ // Increment the active task count before donateWorkFrom() returns so this
+ // can't reach zero before the waiting task runs again.
+ if (hasWork()) {
+ pm->incActiveTasks(this, lock);
+ }
+ }
+
+ resumed.notify_all();
+}
+
+void ParallelMarkTask::resumeOnFinish(const AutoLockGC& lock) {
+ MOZ_ASSERT(isWaiting);
+ MOZ_ASSERT(!hasWork());
+
+ isWaiting = false;
+ resumed.notify_all();
+}
+
+void ParallelMarker::addTaskToWaitingList(ParallelMarkTask* task,
+ const AutoLockGC& lock) {
+ MOZ_ASSERT(!task->hasWork());
+ MOZ_ASSERT(hasActiveTasks(lock));
+ MOZ_ASSERT(!isTaskInWaitingList(task, lock));
+ MOZ_ASSERT(waitingTaskCount < workerCount() - 1);
+
+ waitingTasks.ref().pushBack(task);
+ waitingTaskCount++;
+}
+
+#ifdef DEBUG
+bool ParallelMarker::isTaskInWaitingList(const ParallelMarkTask* task,
+ const AutoLockGC& lock) const {
+ // The const cast is because ElementProbablyInList is not const.
+ return const_cast<ParallelMarkTaskList&>(waitingTasks.ref())
+ .ElementProbablyInList(const_cast<ParallelMarkTask*>(task));
+}
+#endif
+
+void ParallelMarker::incActiveTasks(ParallelMarkTask* task,
+ const AutoLockGC& lock) {
+ MOZ_ASSERT(task->hasWork());
+ MOZ_ASSERT(activeTasks < workerCount());
+
+ activeTasks++;
+}
+
+void ParallelMarker::decActiveTasks(ParallelMarkTask* task,
+ const AutoLockGC& lock) {
+ MOZ_ASSERT(activeTasks != 0);
+
+ activeTasks--;
+
+ if (activeTasks == 0) {
+ while (!waitingTasks.ref().isEmpty()) {
+ ParallelMarkTask* task = waitingTasks.ref().popFront();
+ MOZ_ASSERT(waitingTaskCount != 0);
+ waitingTaskCount--;
+ task->resumeOnFinish(lock);
+ }
+ }
+}
+
+void ParallelMarker::donateWorkFrom(GCMarker* src) {
+ if (!gc->tryLockGC()) {
+ return;
+ }
+
+ // Check there are tasks waiting for work while holding the lock.
+ if (waitingTaskCount == 0) {
+ gc->unlockGC();
+ return;
+ }
+
+ // Take the first waiting task off the list.
+ ParallelMarkTask* waitingTask = waitingTasks.ref().popFront();
+ waitingTaskCount--;
+
+ // |task| is not running so it's safe to move work to it.
+ MOZ_ASSERT(waitingTask->isWaiting);
+
+ gc->unlockGC();
+
+ // Move some work from this thread's mark stack to the waiting task.
+ MOZ_ASSERT(!waitingTask->hasWork());
+ GCMarker::moveWork(waitingTask->marker, src);
+
+ gc->stats().count(gcstats::COUNT_PARALLEL_MARK_INTERRUPTIONS);
+
+ GeckoProfilerRuntime& profiler = gc->rt->geckoProfiler();
+ if (profiler.enabled()) {
+ profiler.markEvent("Parallel marking donated work", "");
+ }
+
+ // Resume waiting task.
+ waitingTask->resume();
+}
diff --git a/js/src/gc/ParallelMarking.h b/js/src/gc/ParallelMarking.h
new file mode 100644
index 0000000000..46d8f381c6
--- /dev/null
+++ b/js/src/gc/ParallelMarking.h
@@ -0,0 +1,123 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef gc_ParallelMarking_h
+#define gc_ParallelMarking_h
+
+#include "mozilla/Atomics.h"
+#include "mozilla/DoublyLinkedList.h"
+#include "mozilla/TimeStamp.h"
+
+#include "gc/GCMarker.h"
+#include "gc/GCParallelTask.h"
+#include "js/HeapAPI.h"
+#include "js/SliceBudget.h"
+#include "threading/ConditionVariable.h"
+#include "threading/ProtectedData.h"
+
+namespace js {
+
+class AutoLockGC;
+class AutoLockHelperThreadState;
+class AutoUnlockGC;
+
+namespace gc {
+
+class ParallelMarkTask;
+
+// Per-runtime parallel marking state.
+//
+// This class is used on the main thread and coordinates parallel marking using
+// several helper threads running ParallelMarkTasks.
+//
+// This uses a work-requesting approach. Threads mark until they run out of
+// work and then add themselves to a list of waiting tasks and block. Running
+// tasks with enough work may donate work to a waiting task and resume it.
+class MOZ_STACK_CLASS ParallelMarker {
+ public:
+ explicit ParallelMarker(GCRuntime* gc);
+
+ bool mark(SliceBudget& sliceBudget);
+
+ using AtomicCount = mozilla::Atomic<uint32_t, mozilla::Relaxed>;
+ AtomicCount& waitingTaskCountRef() { return waitingTaskCount; }
+ bool hasWaitingTasks() { return waitingTaskCount != 0; }
+ void donateWorkFrom(GCMarker* src);
+
+ private:
+ bool markOneColor(MarkColor color, SliceBudget& sliceBudget);
+
+ bool hasWork(MarkColor color) const;
+
+ void addTask(ParallelMarkTask* task, const AutoLockGC& lock);
+
+ void addTaskToWaitingList(ParallelMarkTask* task, const AutoLockGC& lock);
+#ifdef DEBUG
+ bool isTaskInWaitingList(const ParallelMarkTask* task,
+ const AutoLockGC& lock) const;
+#endif
+
+ bool hasActiveTasks(const AutoLockGC& lock) const { return activeTasks; }
+ void incActiveTasks(ParallelMarkTask* task, const AutoLockGC& lock);
+ void decActiveTasks(ParallelMarkTask* task, const AutoLockGC& lock);
+
+ size_t workerCount() const;
+
+ friend class ParallelMarkTask;
+
+ GCRuntime* const gc;
+
+ using ParallelMarkTaskList = mozilla::DoublyLinkedList<ParallelMarkTask>;
+ GCLockData<ParallelMarkTaskList> waitingTasks;
+ AtomicCount waitingTaskCount;
+
+ GCLockData<size_t> activeTasks;
+};
+
+// A helper thread task that performs parallel marking.
+class alignas(TypicalCacheLineSize) ParallelMarkTask
+ : public GCParallelTask,
+ public mozilla::DoublyLinkedListElement<ParallelMarkTask> {
+ public:
+ friend class ParallelMarker;
+
+ ParallelMarkTask(ParallelMarker* pm, GCMarker* marker, MarkColor color,
+ const SliceBudget& budget);
+ ~ParallelMarkTask();
+
+ void run(AutoLockHelperThreadState& lock) override;
+
+ void recordDuration() override;
+
+ private:
+ void markOrRequestWork(AutoLockGC& lock);
+ bool tryMarking(AutoLockGC& lock);
+ bool requestWork(AutoLockGC& lock);
+
+ void waitUntilResumed(AutoLockGC& lock);
+ void resume();
+ void resumeOnFinish(const AutoLockGC& lock);
+
+ bool hasWork() const;
+
+ // The following fields are only accessed by the marker thread:
+ ParallelMarker* const pm;
+ GCMarker* const marker;
+ AutoSetMarkColor color;
+ SliceBudget budget;
+ ConditionVariable resumed;
+
+ GCLockData<bool> isWaiting;
+
+ // Length of time this task spent blocked waiting for work.
+ MainThreadOrGCTaskData<mozilla::TimeDuration> markTime;
+ MainThreadOrGCTaskData<mozilla::TimeDuration> waitTime;
+};
+
+} // namespace gc
+} // namespace js
+
+#endif /* gc_ParallelMarking_h */
diff --git a/js/src/gc/ParallelWork.h b/js/src/gc/ParallelWork.h
new file mode 100644
index 0000000000..db97c75009
--- /dev/null
+++ b/js/src/gc/ParallelWork.h
@@ -0,0 +1,136 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef gc_ParallelWork_h
+#define gc_ParallelWork_h
+
+#include "mozilla/Maybe.h"
+
+#include <algorithm>
+
+#include "gc/GCParallelTask.h"
+#include "gc/GCRuntime.h"
+#include "js/SliceBudget.h"
+#include "vm/HelperThreads.h"
+
+namespace js {
+
+namespace gcstats {
+enum class PhaseKind : uint8_t;
+}
+
+namespace gc {
+
+template <typename WorkItem>
+using ParallelWorkFunc = size_t (*)(GCRuntime*, const WorkItem&);
+
+// A GCParallelTask task that executes WorkItems from a WorkItemIterator.
+//
+// The WorkItemIterator class must supply done(), next() and get() methods. The
+// get() method must return WorkItems objects.
+template <typename WorkItem, typename WorkItemIterator>
+class ParallelWorker : public GCParallelTask {
+ public:
+ using WorkFunc = ParallelWorkFunc<WorkItem>;
+
+ ParallelWorker(GCRuntime* gc, gcstats::PhaseKind phaseKind, GCUse use,
+ WorkFunc func, WorkItemIterator& work,
+ const SliceBudget& budget, AutoLockHelperThreadState& lock)
+ : GCParallelTask(gc, phaseKind, use),
+ func_(func),
+ work_(work),
+ budget_(budget),
+ item_(work.get()) {
+ // Consume a work item on creation so that we can stop creating workers if
+ // the number of workers exceeds the number of work items.
+ work.next();
+ }
+
+ void run(AutoLockHelperThreadState& lock) {
+ AutoUnlockHelperThreadState unlock(lock);
+
+ for (;;) {
+ size_t steps = func_(gc, item_);
+ budget_.step(std::max(steps, size_t(1)));
+ if (budget_.isOverBudget()) {
+ break;
+ }
+
+ AutoLockHelperThreadState lock;
+ if (work().done()) {
+ break;
+ }
+
+ item_ = work().get();
+ work().next();
+ }
+ }
+
+ private:
+ WorkItemIterator& work() { return work_.ref(); }
+
+ // A function to execute work items on the helper thread.
+ WorkFunc func_;
+
+ // An iterator which produces work items to execute.
+ HelperThreadLockData<WorkItemIterator&> work_;
+
+ // The budget that determines how long to run for.
+ SliceBudget budget_;
+
+ // The next work item to process.
+ WorkItem item_;
+};
+
+static constexpr size_t MaxParallelWorkers = 8;
+
+// An RAII class that starts a number of ParallelWorkers and waits for them to
+// finish.
+template <typename WorkItem, typename WorkItemIterator>
+class MOZ_RAII AutoRunParallelWork {
+ public:
+ using Worker = ParallelWorker<WorkItem, WorkItemIterator>;
+ using WorkFunc = ParallelWorkFunc<WorkItem>;
+
+ AutoRunParallelWork(GCRuntime* gc, WorkFunc func,
+ gcstats::PhaseKind phaseKind, GCUse use,
+ WorkItemIterator& work, const SliceBudget& budget,
+ AutoLockHelperThreadState& lock)
+ : gc(gc), phaseKind(phaseKind), lock(lock), tasksStarted(0) {
+ size_t workerCount = gc->parallelWorkerCount();
+ MOZ_ASSERT(workerCount <= MaxParallelWorkers);
+ MOZ_ASSERT_IF(workerCount == 0, work.done());
+
+ for (size_t i = 0; i < workerCount && !work.done(); i++) {
+ tasks[i].emplace(gc, phaseKind, use, func, work, budget, lock);
+ gc->startTask(*tasks[i], lock);
+ tasksStarted++;
+ }
+ }
+
+ ~AutoRunParallelWork() {
+ gHelperThreadLock.assertOwnedByCurrentThread();
+
+ for (size_t i = 0; i < tasksStarted; i++) {
+ gc->joinTask(*tasks[i], lock);
+ }
+ for (size_t i = tasksStarted; i < MaxParallelWorkers; i++) {
+ MOZ_ASSERT(tasks[i].isNothing());
+ }
+ }
+
+ private:
+ GCRuntime* gc;
+ gcstats::PhaseKind phaseKind;
+ AutoLockHelperThreadState& lock;
+ size_t tasksStarted;
+ mozilla::Maybe<Worker> tasks[MaxParallelWorkers];
+};
+
+} /* namespace gc */
+} /* namespace js */
+
+#endif /* gc_ParallelWork_h */
diff --git a/js/src/gc/Policy.h b/js/src/gc/Policy.h
new file mode 100644
index 0000000000..290e4ed177
--- /dev/null
+++ b/js/src/gc/Policy.h
@@ -0,0 +1,101 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* JS Garbage Collector. */
+
+#ifndef gc_Policy_h
+#define gc_Policy_h
+
+#include <type_traits>
+
+#include "gc/Barrier.h"
+#include "gc/Tracer.h"
+#include "js/GCPolicyAPI.h"
+
+namespace js {
+
+// Define the GCPolicy for all internal pointers.
+template <typename T>
+struct InternalGCPointerPolicy : public JS::GCPointerPolicy<T> {
+ using Type = std::remove_pointer_t<T>;
+
+#define IS_BASE_OF_OR(_1, BaseType, _2, _3) std::is_base_of_v<BaseType, Type> ||
+ static_assert(
+ JS_FOR_EACH_TRACEKIND(IS_BASE_OF_OR) false,
+ "InternalGCPointerPolicy must only be used for GC thing pointers");
+#undef IS_BASE_OF_OR
+
+ static void trace(JSTracer* trc, T* vp, const char* name) {
+ // It's not safe to trace unbarriered pointers except as part of root
+ // marking. If you get an assertion here you probably need to add a barrier,
+ // e.g. HeapPtr<T>.
+ TraceNullableRoot(trc, vp, name);
+ }
+};
+
+} // namespace js
+
+namespace JS {
+
+// Internally, all pointer types are treated as pointers to GC things by
+// default.
+template <typename T>
+struct GCPolicy<T*> : public js::InternalGCPointerPolicy<T*> {};
+template <typename T>
+struct GCPolicy<T* const> : public js::InternalGCPointerPolicy<T* const> {};
+
+template <typename T>
+struct GCPolicy<js::HeapPtr<T>> {
+ static void trace(JSTracer* trc, js::HeapPtr<T>* thingp, const char* name) {
+ js::TraceNullableEdge(trc, thingp, name);
+ }
+ static bool traceWeak(JSTracer* trc, js::HeapPtr<T>* thingp) {
+ return js::TraceWeakEdge(trc, thingp, "HeapPtr");
+ }
+};
+
+template <typename T>
+struct GCPolicy<js::PreBarriered<T>> {
+ static void trace(JSTracer* trc, js::PreBarriered<T>* thingp,
+ const char* name) {
+ js::TraceNullableEdge(trc, thingp, name);
+ }
+};
+
+template <typename T>
+struct GCPolicy<js::WeakHeapPtr<T>> {
+ static void trace(JSTracer* trc, js::WeakHeapPtr<T>* thingp,
+ const char* name) {
+ js::TraceEdge(trc, thingp, name);
+ }
+ static bool traceWeak(JSTracer* trc, js::WeakHeapPtr<T>* thingp) {
+ return js::TraceWeakEdge(trc, thingp, "traceWeak");
+ }
+};
+
+template <typename T>
+struct GCPolicy<js::UnsafeBarePtr<T>> {
+ static bool traceWeak(JSTracer* trc, js::UnsafeBarePtr<T>* vp) {
+ if (*vp) {
+ return js::TraceManuallyBarrieredWeakEdge(trc, vp->unbarrieredAddress(),
+ "UnsafeBarePtr");
+ }
+ return true;
+ }
+};
+
+template <>
+struct GCPolicy<JS::GCCellPtr> {
+ static void trace(JSTracer* trc, JS::GCCellPtr* thingp, const char* name) {
+ // It's not safe to trace unbarriered pointers except as part of root
+ // marking.
+ js::TraceGCCellPtrRoot(trc, thingp, name);
+ }
+};
+
+} // namespace JS
+
+#endif // gc_Policy_h
diff --git a/js/src/gc/Pretenuring.cpp b/js/src/gc/Pretenuring.cpp
new file mode 100644
index 0000000000..5703663e47
--- /dev/null
+++ b/js/src/gc/Pretenuring.cpp
@@ -0,0 +1,459 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sw=2 et tw=80:
+ *
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "gc/Pretenuring.h"
+
+#include "mozilla/Sprintf.h"
+
+#include "gc/GCInternals.h"
+#include "gc/PublicIterators.h"
+#include "jit/Invalidation.h"
+
+#include "gc/PrivateIterators-inl.h"
+#include "vm/JSScript-inl.h"
+
+using namespace js;
+using namespace js::gc;
+
+// The number of nursery allocations at which to pay attention to an allocation
+// site. This must be large enough to ensure we have enough information to infer
+// the lifetime and also large enough to avoid pretenuring low volume allocation
+// sites.
+static constexpr size_t AllocSiteAttentionThreshold = 500;
+
+// The maximum number of alloc sites to create between each minor
+// collection. Stop tracking allocation after this limit is reached. This
+// prevents unbounded time traversing the list during minor GC.
+static constexpr size_t MaxAllocSitesPerMinorGC = 500;
+
+// The maximum number of times to invalidate JIT code for a site. After this we
+// leave the site's state as Unknown and don't pretenure allocations.
+// Note we use 4 bits to store the invalidation count.
+static constexpr size_t MaxInvalidationCount = 5;
+
+// The minimum number of allocated cells needed to determine the survival rate
+// of cells in newly created arenas.
+static constexpr size_t MinCellsRequiredForSurvivalRate = 100;
+
+// The young survival rate below which a major collection is determined to have
+// a low young survival rate.
+static constexpr double LowYoungSurvivalThreshold = 0.05;
+
+// The number of consecutive major collections with a low young survival rate
+// that must occur before recovery is attempted.
+static constexpr size_t LowYoungSurvivalCountBeforeRecovery = 2;
+
+// The proportion of the nursery that must be tenured above which a minor
+// collection may be determined to have a high nursery survival rate.
+static constexpr double HighNurserySurvivalPromotionThreshold = 0.6;
+
+// The number of nursery allocations made by optimized JIT code that must be
+// tenured above which a minor collection may be determined to have a high
+// nursery survival rate.
+static constexpr size_t HighNurserySurvivalOptimizedAllocThreshold = 10000;
+
+// The number of consecutive minor collections with a high nursery survival rate
+// that must occur before recovery is attempted.
+static constexpr size_t HighNurserySurvivalCountBeforeRecovery = 2;
+
+AllocSite* const AllocSite::EndSentinel = reinterpret_cast<AllocSite*>(1);
+JSScript* const AllocSite::WasmScript =
+ reinterpret_cast<JSScript*>(AllocSite::STATE_MASK + 1);
+
+static bool SiteBasedPretenuringEnabled = true;
+
+JS_PUBLIC_API void JS::SetSiteBasedPretenuringEnabled(bool enable) {
+ SiteBasedPretenuringEnabled = enable;
+}
+
+bool PretenuringNursery::canCreateAllocSite() {
+ MOZ_ASSERT(allocSitesCreated <= MaxAllocSitesPerMinorGC);
+ return SiteBasedPretenuringEnabled &&
+ allocSitesCreated < MaxAllocSitesPerMinorGC;
+}
+
+size_t PretenuringNursery::doPretenuring(GCRuntime* gc, JS::GCReason reason,
+ bool validPromotionRate,
+ double promotionRate, bool reportInfo,
+ size_t reportThreshold) {
+ size_t sitesActive = 0;
+ size_t sitesPretenured = 0;
+ size_t sitesInvalidated = 0;
+ size_t zonesWithHighNurserySurvival = 0;
+
+ // Zero allocation counts.
+ totalAllocCount_ = 0;
+ for (ZonesIter zone(gc, SkipAtoms); !zone.done(); zone.next()) {
+ for (auto& count : zone->pretenuring.nurseryAllocCounts) {
+ count = 0;
+ }
+ }
+
+ // Check whether previously optimized code has changed its behaviour and
+ // needs to be recompiled so that it can pretenure its allocations.
+ if (validPromotionRate) {
+ for (ZonesIter zone(gc, SkipAtoms); !zone.done(); zone.next()) {
+ bool highNurserySurvivalRate =
+ promotionRate > HighNurserySurvivalPromotionThreshold &&
+ zone->optimizedAllocSite()->nurseryTenuredCount >=
+ HighNurserySurvivalOptimizedAllocThreshold;
+ zone->pretenuring.noteHighNurserySurvivalRate(highNurserySurvivalRate);
+ if (highNurserySurvivalRate) {
+ zonesWithHighNurserySurvival++;
+ }
+ }
+ }
+
+ if (reportInfo) {
+ AllocSite::printInfoHeader(reason, promotionRate);
+ }
+
+ AllocSite* site = allocatedSites;
+ allocatedSites = AllocSite::EndSentinel;
+ while (site != AllocSite::EndSentinel) {
+ AllocSite* next = site->nextNurseryAllocated;
+ site->nextNurseryAllocated = nullptr;
+
+ MOZ_ASSERT_IF(site->isNormal(),
+ site->nurseryAllocCount >= site->nurseryTenuredCount);
+
+ if (site->isNormal()) {
+ processSite(gc, site, sitesActive, sitesPretenured, sitesInvalidated,
+ reportInfo, reportThreshold);
+ }
+
+ site = next;
+ }
+
+ // Catch-all sites don't end up on the list if they are only used from
+ // optimized JIT code, so process them here.
+ for (ZonesIter zone(gc, SkipAtoms); !zone.done(); zone.next()) {
+ for (auto& site : zone->pretenuring.unknownAllocSites) {
+ processCatchAllSite(&site, reportInfo, reportThreshold);
+ }
+ processCatchAllSite(zone->optimizedAllocSite(), reportInfo,
+ reportThreshold);
+ }
+
+ if (reportInfo) {
+ AllocSite::printInfoFooter(allocSitesCreated, sitesActive, sitesPretenured,
+ sitesInvalidated);
+ if (zonesWithHighNurserySurvival) {
+ fprintf(stderr, " %zu zones with high nursery survival rate\n",
+ zonesWithHighNurserySurvival);
+ }
+ }
+
+ allocSitesCreated = 0;
+
+ return sitesPretenured;
+}
+
+void PretenuringNursery::processSite(GCRuntime* gc, AllocSite* site,
+ size_t& sitesActive,
+ size_t& sitesPretenured,
+ size_t& sitesInvalidated, bool reportInfo,
+ size_t reportThreshold) {
+ sitesActive++;
+
+ updateAllocCounts(site);
+
+ bool hasPromotionRate = false;
+ double promotionRate = 0.0;
+ bool wasInvalidated = false;
+ if (site->nurseryAllocCount > AllocSiteAttentionThreshold) {
+ promotionRate =
+ double(site->nurseryTenuredCount) / double(site->nurseryAllocCount);
+ hasPromotionRate = true;
+
+ AllocSite::State prevState = site->state();
+ site->updateStateOnMinorGC(promotionRate);
+ AllocSite::State newState = site->state();
+
+ if (prevState == AllocSite::State::Unknown &&
+ newState == AllocSite::State::LongLived) {
+ sitesPretenured++;
+
+ // We can optimize JIT code before we realise that a site should be
+ // pretenured. Make sure we invalidate any existing optimized code.
+ if (site->hasScript()) {
+ wasInvalidated = site->invalidateScript(gc);
+ if (wasInvalidated) {
+ sitesInvalidated++;
+ }
+ }
+ }
+ }
+
+ if (reportInfo && site->allocCount() >= reportThreshold) {
+ site->printInfo(hasPromotionRate, promotionRate, wasInvalidated);
+ }
+
+ site->resetNurseryAllocations();
+}
+
+void PretenuringNursery::processCatchAllSite(AllocSite* site, bool reportInfo,
+ size_t reportThreshold) {
+ if (!site->hasNurseryAllocations()) {
+ return;
+ }
+
+ updateAllocCounts(site);
+
+ if (reportInfo && site->allocCount() >= reportThreshold) {
+ site->printInfo(false, 0.0, false);
+ }
+
+ site->resetNurseryAllocations();
+}
+
+void PretenuringNursery::updateAllocCounts(AllocSite* site) {
+ JS::TraceKind kind = site->traceKind();
+ totalAllocCount_ += site->nurseryAllocCount;
+ PretenuringZone& zone = site->zone()->pretenuring;
+ zone.nurseryAllocCount(kind) += site->nurseryAllocCount;
+}
+
+bool AllocSite::invalidateScript(GCRuntime* gc) {
+ CancelOffThreadIonCompile(script());
+
+ if (!script()->hasIonScript()) {
+ return false;
+ }
+
+ if (invalidationLimitReached()) {
+ MOZ_ASSERT(state() == State::Unknown);
+ return false;
+ }
+
+ invalidationCount++;
+ if (invalidationLimitReached()) {
+ setState(State::Unknown);
+ }
+
+ JSContext* cx = gc->rt->mainContextFromOwnThread();
+ jit::Invalidate(cx, script(),
+ /* resetUses = */ false,
+ /* cancelOffThread = */ true);
+ return true;
+}
+
+bool AllocSite::invalidationLimitReached() const {
+ MOZ_ASSERT(invalidationCount <= MaxInvalidationCount);
+ return invalidationCount == MaxInvalidationCount;
+}
+
+void PretenuringNursery::maybeStopPretenuring(GCRuntime* gc) {
+ for (GCZonesIter zone(gc); !zone.done(); zone.next()) {
+ double rate;
+ if (zone->pretenuring.calculateYoungTenuredSurvivalRate(&rate)) {
+ bool lowYoungSurvivalRate = rate < LowYoungSurvivalThreshold;
+ zone->pretenuring.noteLowYoungTenuredSurvivalRate(lowYoungSurvivalRate);
+ }
+ }
+}
+
+AllocSite::Kind AllocSite::kind() const {
+ if (isNormal()) {
+ return Kind::Normal;
+ }
+
+ if (this == zone()->optimizedAllocSite()) {
+ return Kind::Optimized;
+ }
+
+ MOZ_ASSERT(this == zone()->unknownAllocSite(traceKind()));
+ return Kind::Unknown;
+}
+
+void AllocSite::updateStateOnMinorGC(double promotionRate) {
+ // The state changes based on whether the promotion rate is deemed high
+ // (greater that 90%):
+ //
+ // high high
+ // ------------------> ------------------>
+ // ShortLived Unknown LongLived
+ // <------------------ <------------------
+ // !high !high
+ //
+ // The nursery is used to allocate if the site's state is Unknown or
+ // ShortLived. There are no direct transition between ShortLived and LongLived
+ // to avoid pretenuring sites that we've recently observed being short-lived.
+
+ if (invalidationLimitReached()) {
+ MOZ_ASSERT(state() == State::Unknown);
+ return;
+ }
+
+ bool highPromotionRate = promotionRate >= 0.9;
+
+ switch (state()) {
+ case State::Unknown:
+ if (highPromotionRate) {
+ setState(State::LongLived);
+ } else {
+ setState(State::ShortLived);
+ }
+ break;
+
+ case State::ShortLived: {
+ if (highPromotionRate) {
+ setState(State::Unknown);
+ }
+ break;
+ }
+
+ case State::LongLived: {
+ if (!highPromotionRate) {
+ setState(State::Unknown);
+ }
+ break;
+ }
+ }
+}
+
+bool AllocSite::maybeResetState() {
+ if (invalidationLimitReached()) {
+ MOZ_ASSERT(state() == State::Unknown);
+ return false;
+ }
+
+ invalidationCount++;
+ setState(State::Unknown);
+ return true;
+}
+
+void AllocSite::trace(JSTracer* trc) {
+ if (JSScript* s = script()) {
+ TraceManuallyBarrieredEdge(trc, &s, "AllocSite script");
+ if (s != script()) {
+ setScript(s);
+ }
+ }
+}
+
+bool PretenuringZone::calculateYoungTenuredSurvivalRate(double* rateOut) {
+ MOZ_ASSERT(allocCountInNewlyCreatedArenas >=
+ survivorCountInNewlyCreatedArenas);
+ if (allocCountInNewlyCreatedArenas < MinCellsRequiredForSurvivalRate) {
+ return false;
+ }
+
+ *rateOut = double(survivorCountInNewlyCreatedArenas) /
+ double(allocCountInNewlyCreatedArenas);
+ return true;
+}
+
+void PretenuringZone::noteLowYoungTenuredSurvivalRate(
+ bool lowYoungSurvivalRate) {
+ if (lowYoungSurvivalRate) {
+ lowYoungTenuredSurvivalCount++;
+ } else {
+ lowYoungTenuredSurvivalCount = 0;
+ }
+}
+
+void PretenuringZone::noteHighNurserySurvivalRate(
+ bool highNurserySurvivalRate) {
+ if (highNurserySurvivalRate) {
+ highNurserySurvivalCount++;
+ } else {
+ highNurserySurvivalCount = 0;
+ }
+}
+
+bool PretenuringZone::shouldResetNurseryAllocSites() {
+ bool shouldReset =
+ highNurserySurvivalCount >= HighNurserySurvivalCountBeforeRecovery;
+ if (shouldReset) {
+ highNurserySurvivalCount = 0;
+ }
+ return shouldReset;
+}
+
+bool PretenuringZone::shouldResetPretenuredAllocSites() {
+ bool shouldReset =
+ lowYoungTenuredSurvivalCount >= LowYoungSurvivalCountBeforeRecovery;
+ if (shouldReset) {
+ lowYoungTenuredSurvivalCount = 0;
+ }
+ return shouldReset;
+}
+
+/* static */
+void AllocSite::printInfoHeader(JS::GCReason reason, double promotionRate) {
+ fprintf(stderr,
+ "Pretenuring info after %s minor GC with %4.1f%% promotion rate:\n",
+ ExplainGCReason(reason), promotionRate * 100.0);
+}
+
+/* static */
+void AllocSite::printInfoFooter(size_t sitesCreated, size_t sitesActive,
+ size_t sitesPretenured,
+ size_t sitesInvalidated) {
+ fprintf(stderr,
+ " %zu alloc sites created, %zu active, %zu pretenured, %zu "
+ "invalidated\n",
+ sitesCreated, sitesActive, sitesPretenured, sitesInvalidated);
+}
+
+void AllocSite::printInfo(bool hasPromotionRate, double promotionRate,
+ bool wasInvalidated) const {
+ // Zone.
+ fprintf(stderr, " %p %p", this, zone());
+
+ // Script, or which kind of catch-all site this is.
+ if (!hasScript()) {
+ fprintf(stderr, " %16s",
+ kind() == Kind::Optimized
+ ? "optimized"
+ : (kind() == Kind::Normal ? "normal" : "unknown"));
+ } else {
+ fprintf(stderr, " %16p", script());
+ }
+
+ // Nursery allocation count, missing for optimized sites.
+ char buffer[16] = {'\0'};
+ if (kind() != Kind::Optimized) {
+ SprintfLiteral(buffer, "%8" PRIu32, nurseryAllocCount);
+ }
+ fprintf(stderr, " %8s", buffer);
+
+ // Nursery tenure count.
+ fprintf(stderr, " %8" PRIu32, nurseryTenuredCount);
+
+ // Promotion rate, if there were enough allocations.
+ buffer[0] = '\0';
+ if (hasPromotionRate) {
+ SprintfLiteral(buffer, "%5.1f%%", std::min(1.0, promotionRate) * 100);
+ }
+ fprintf(stderr, " %6s", buffer);
+
+ // Current state for sites associated with a script.
+ const char* state = isNormal() ? stateName() : "";
+ fprintf(stderr, " %10s", state);
+
+ // Whether the associated script was invalidated.
+ if (wasInvalidated) {
+ fprintf(stderr, " invalidated");
+ }
+
+ fprintf(stderr, "\n");
+}
+
+const char* AllocSite::stateName() const {
+ switch (state()) {
+ case State::ShortLived:
+ return "ShortLived";
+ case State::Unknown:
+ return "Unknown";
+ case State::LongLived:
+ return "LongLived";
+ }
+
+ MOZ_CRASH("Unknown state");
+}
diff --git a/js/src/gc/Pretenuring.h b/js/src/gc/Pretenuring.h
new file mode 100644
index 0000000000..ed156cbbd0
--- /dev/null
+++ b/js/src/gc/Pretenuring.h
@@ -0,0 +1,348 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * Pretenuring.
+ *
+ * Some kinds of GC cells can be allocated in either the nursery or the tenured
+ * heap. The pretenuring system decides where to allocate such cells based on
+ * their expected lifetime with the aim of minimising total collection time.
+ *
+ * Lifetime is predicted based on data gathered about the cells' allocation
+ * site. This data is gathered in the middle JIT tiers, after code has stopped
+ * executing in the interpreter and before we generate fully optimized code.
+ */
+
+#ifndef gc_Pretenuring_h
+#define gc_Pretenuring_h
+
+#include <algorithm>
+
+#include "gc/AllocKind.h"
+#include "js/TypeDecls.h"
+
+class JS_PUBLIC_API JSTracer;
+
+namespace JS {
+enum class GCReason;
+} // namespace JS
+
+namespace js {
+namespace gc {
+
+class GCRuntime;
+class PretenuringNursery;
+
+// Number of trace kinds supportd by the nursery. These are arranged at the
+// start of JS::TraceKind.
+static constexpr size_t NurseryTraceKinds = 3;
+
+enum class CatchAllAllocSite { Unknown, Optimized };
+
+// Information about an allocation site.
+//
+// Nursery cells contain a pointer to one of these in their cell header (stored
+// before the cell). The site can relate to either a specific JS bytecode
+// instruction, a specific WebAssembly type, or can be a catch-all instance for
+// unknown sites or JS JIT optimized code.
+class AllocSite {
+ public:
+ enum class State : uint32_t { ShortLived = 0, Unknown = 1, LongLived = 2 };
+
+ // The JIT depends on being able to tell the states apart by checking a single
+ // bit.
+ static constexpr int32_t LONG_LIVED_BIT = int32_t(State::LongLived);
+ static_assert((LONG_LIVED_BIT & int32_t(State::Unknown)) == 0);
+ static_assert((AllocSite::LONG_LIVED_BIT & int32_t(State::ShortLived)) == 0);
+
+ private:
+ JS::Zone* zone_ = nullptr;
+
+ // Word storing JSScript pointer and site state.
+ //
+ // The script pointer is the script that owns this allocation site, a special
+ // sentinel script for wasm sites, or null for unknown sites. This is used
+ // when we need to invalidate the script.
+ uintptr_t scriptAndState = uintptr_t(State::Unknown);
+ static constexpr uintptr_t STATE_MASK = BitMask(2);
+
+ // Next pointer forming a linked list of sites at which nursery allocation
+ // happened since the last nursery collection.
+ AllocSite* nextNurseryAllocated = nullptr;
+
+ // Number of nursery allocations at this site since last nursery collection.
+ uint32_t nurseryAllocCount = 0;
+
+ // Number of nursery allocations that survived. Used during collection.
+ uint32_t nurseryTenuredCount : 24;
+
+ // Number of times the script has been invalidated.
+ uint32_t invalidationCount : 4;
+
+ // The trace kind of the allocation. Only kinds up to NurseryTraceKinds are
+ // allowed.
+ uint32_t traceKind_ : 4;
+
+ static AllocSite* const EndSentinel;
+
+ // Sentinel script for wasm sites.
+ static JSScript* const WasmScript;
+
+ friend class PretenuringZone;
+ friend class PretenuringNursery;
+
+ uintptr_t rawScript() const { return scriptAndState & ~STATE_MASK; }
+
+ public:
+ AllocSite() : nurseryTenuredCount(0), invalidationCount(0), traceKind_(0) {}
+
+ // Create a dummy site to use for unknown allocations.
+ explicit AllocSite(JS::Zone* zone, JS::TraceKind kind)
+ : zone_(zone),
+ nurseryTenuredCount(0),
+ invalidationCount(0),
+ traceKind_(uint32_t(kind)) {
+ MOZ_ASSERT(traceKind_ < NurseryTraceKinds);
+ }
+
+ // Create a site for an opcode in the given script.
+ AllocSite(JS::Zone* zone, JSScript* script, JS::TraceKind kind)
+ : AllocSite(zone, kind) {
+ MOZ_ASSERT(script != WasmScript);
+ setScript(script);
+ }
+
+ void initUnknownSite(JS::Zone* zone, JS::TraceKind kind) {
+ MOZ_ASSERT(!zone_ && scriptAndState == uintptr_t(State::Unknown));
+ zone_ = zone;
+ nurseryTenuredCount = 0;
+ invalidationCount = 0;
+ traceKind_ = uint32_t(kind);
+ MOZ_ASSERT(traceKind_ < NurseryTraceKinds);
+ }
+
+ // Initialize a site to be a wasm site.
+ void initWasm(JS::Zone* zone) {
+ MOZ_ASSERT(!zone_ && scriptAndState == uintptr_t(State::Unknown));
+ zone_ = zone;
+ setScript(WasmScript);
+ nurseryTenuredCount = 0;
+ invalidationCount = 0;
+ traceKind_ = uint32_t(JS::TraceKind::Object);
+ }
+
+ JS::Zone* zone() const { return zone_; }
+
+ JS::TraceKind traceKind() const { return JS::TraceKind(traceKind_); }
+
+ State state() const { return State(scriptAndState & STATE_MASK); }
+
+ // Whether this site has a script associated with it. This is not true if
+ // this site is for a wasm site.
+ bool hasScript() const { return rawScript() != uintptr_t(WasmScript); }
+ JSScript* script() const {
+ MOZ_ASSERT(hasScript());
+ return reinterpret_cast<JSScript*>(rawScript());
+ }
+
+ // Whether this site is not an unknown or optimized site.
+ bool isNormal() const { return rawScript() != 0; }
+
+ enum class Kind : uint32_t { Normal, Unknown, Optimized };
+ Kind kind() const;
+
+ bool isInAllocatedList() const { return nextNurseryAllocated; }
+
+ // Whether allocations at this site should be allocated in the nursery or the
+ // tenured heap.
+ Heap initialHeap() const {
+ return state() == State::LongLived ? Heap::Tenured : Heap::Default;
+ }
+
+ bool hasNurseryAllocations() const {
+ return nurseryAllocCount != 0 || nurseryTenuredCount != 0;
+ }
+ void resetNurseryAllocations() {
+ nurseryAllocCount = 0;
+ nurseryTenuredCount = 0;
+ }
+
+ uint32_t incAllocCount() { return ++nurseryAllocCount; }
+ uint32_t* nurseryAllocCountAddress() { return &nurseryAllocCount; }
+
+ void incTenuredCount() {
+ // The nursery is not large enough for this to overflow.
+ nurseryTenuredCount++;
+ MOZ_ASSERT(nurseryTenuredCount != 0);
+ }
+
+ size_t allocCount() const {
+ return std::max(nurseryAllocCount, nurseryTenuredCount);
+ }
+
+ void updateStateOnMinorGC(double promotionRate);
+
+ // Reset the state to 'Unknown' unless we have reached the invalidation limit
+ // for this site. Return whether the state was reset.
+ bool maybeResetState();
+
+ bool invalidationLimitReached() const;
+ bool invalidateScript(GCRuntime* gc);
+
+ void trace(JSTracer* trc);
+
+ static void printInfoHeader(JS::GCReason reason, double promotionRate);
+ static void printInfoFooter(size_t sitesCreated, size_t sitesActive,
+ size_t sitesPretenured, size_t sitesInvalidated);
+ void printInfo(bool hasPromotionRate, double promotionRate,
+ bool wasInvalidated) const;
+
+ static constexpr size_t offsetOfScriptAndState() {
+ return offsetof(AllocSite, scriptAndState);
+ }
+ static constexpr size_t offsetOfNurseryAllocCount() {
+ return offsetof(AllocSite, nurseryAllocCount);
+ }
+ static constexpr size_t offsetOfNextNurseryAllocated() {
+ return offsetof(AllocSite, nextNurseryAllocated);
+ }
+
+ private:
+ void setScript(JSScript* newScript) {
+ MOZ_ASSERT((uintptr_t(newScript) & STATE_MASK) == 0);
+ scriptAndState = uintptr_t(newScript) | uintptr_t(state());
+ }
+
+ void setState(State newState) {
+ MOZ_ASSERT((uintptr_t(newState) & ~STATE_MASK) == 0);
+ scriptAndState = rawScript() | uintptr_t(newState);
+ }
+
+ const char* stateName() const;
+};
+
+// Pretenuring information stored per zone.
+class PretenuringZone {
+ public:
+ // Catch-all allocation site instance used when the actual site is unknown, or
+ // when optimized JIT code allocates a GC thing that's not handled by the
+ // pretenuring system.
+ AllocSite unknownAllocSites[NurseryTraceKinds];
+
+ // Catch-all allocation instance used by optimized JIT code when allocating GC
+ // things that are handled by the pretenuring system. Allocation counts are
+ // not recorded by optimized JIT code.
+ AllocSite optimizedAllocSite;
+
+ // Count of tenured cell allocations made between each major collection and
+ // how many survived.
+ uint32_t allocCountInNewlyCreatedArenas = 0;
+ uint32_t survivorCountInNewlyCreatedArenas = 0;
+
+ // Count of successive collections that had a low young tenured survival
+ // rate. Used to discard optimized code if we get the pretenuring decision
+ // wrong.
+ uint32_t lowYoungTenuredSurvivalCount = 0;
+
+ // Count of successive nursery collections that had a high survival rate for
+ // objects allocated by optimized code. Used to discard optimized code if we
+ // get the pretenuring decision wrong.
+ uint32_t highNurserySurvivalCount = 0;
+
+ // Total allocation count by trace kind (ignoring optimized
+ // allocations). Calculated during nursery collection.
+ uint32_t nurseryAllocCounts[NurseryTraceKinds] = {0};
+
+ explicit PretenuringZone(JS::Zone* zone)
+ : optimizedAllocSite(zone, JS::TraceKind::Object) {
+ for (uint32_t i = 0; i < NurseryTraceKinds; i++) {
+ unknownAllocSites[i].initUnknownSite(zone, JS::TraceKind(i));
+ }
+ }
+
+ AllocSite& unknownAllocSite(JS::TraceKind kind) {
+ size_t i = size_t(kind);
+ MOZ_ASSERT(i < NurseryTraceKinds);
+ return unknownAllocSites[i];
+ }
+
+ void clearCellCountsInNewlyCreatedArenas() {
+ allocCountInNewlyCreatedArenas = 0;
+ survivorCountInNewlyCreatedArenas = 0;
+ }
+ void updateCellCountsInNewlyCreatedArenas(uint32_t allocCount,
+ uint32_t survivorCount) {
+ allocCountInNewlyCreatedArenas += allocCount;
+ survivorCountInNewlyCreatedArenas += survivorCount;
+ }
+
+ bool calculateYoungTenuredSurvivalRate(double* rateOut);
+
+ void noteLowYoungTenuredSurvivalRate(bool lowYoungSurvivalRate);
+ void noteHighNurserySurvivalRate(bool highNurserySurvivalRate);
+
+ // Recovery: if code behaviour change we may need to reset allocation site
+ // state and invalidate JIT code.
+ bool shouldResetNurseryAllocSites();
+ bool shouldResetPretenuredAllocSites();
+
+ uint32_t& nurseryAllocCount(JS::TraceKind kind) {
+ size_t i = size_t(kind);
+ MOZ_ASSERT(i < NurseryTraceKinds);
+ return nurseryAllocCounts[i];
+ }
+ uint32_t nurseryAllocCount(JS::TraceKind kind) const {
+ return const_cast<PretenuringZone*>(this)->nurseryAllocCount(kind);
+ }
+};
+
+// Pretenuring information stored as part of the the GC nursery.
+class PretenuringNursery {
+ gc::AllocSite* allocatedSites;
+
+ size_t allocSitesCreated = 0;
+
+ uint32_t totalAllocCount_ = 0;
+
+ public:
+ PretenuringNursery() : allocatedSites(AllocSite::EndSentinel) {}
+
+ bool hasAllocatedSites() const {
+ return allocatedSites != AllocSite::EndSentinel;
+ }
+
+ bool canCreateAllocSite();
+ void noteAllocSiteCreated() { allocSitesCreated++; }
+
+ void insertIntoAllocatedList(AllocSite* site) {
+ MOZ_ASSERT(!site->isInAllocatedList());
+ site->nextNurseryAllocated = allocatedSites;
+ allocatedSites = site;
+ }
+
+ size_t doPretenuring(GCRuntime* gc, JS::GCReason reason,
+ bool validPromotionRate, double promotionRate,
+ bool reportInfo, size_t reportThreshold);
+
+ void maybeStopPretenuring(GCRuntime* gc);
+
+ uint32_t totalAllocCount() const { return totalAllocCount_; }
+
+ void* addressOfAllocatedSites() { return &allocatedSites; }
+
+ private:
+ void processSite(GCRuntime* gc, AllocSite* site, size_t& sitesActive,
+ size_t& sitesPretenured, size_t& sitesInvalidated,
+ bool reportInfo, size_t reportThreshold);
+ void processCatchAllSite(AllocSite* site, bool reportInfo,
+ size_t reportThreshold);
+ void updateAllocCounts(AllocSite* site);
+};
+
+} // namespace gc
+} // namespace js
+
+#endif /* gc_Pretenuring_h */
diff --git a/js/src/gc/PrivateIterators-inl.h b/js/src/gc/PrivateIterators-inl.h
new file mode 100644
index 0000000000..359e6b22b7
--- /dev/null
+++ b/js/src/gc/PrivateIterators-inl.h
@@ -0,0 +1,164 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * GC-internal iterators for various data structures.
+ */
+
+#ifndef gc_PrivateIterators_inl_h
+#define gc_PrivateIterators_inl_h
+
+#include "gc/PublicIterators.h"
+
+#include "gc/GC-inl.h"
+
+namespace js {
+namespace gc {
+
+class ArenaCellIterUnderGC : public ArenaCellIter {
+ public:
+ explicit ArenaCellIterUnderGC(Arena* arena) : ArenaCellIter(arena) {
+ MOZ_ASSERT(CurrentThreadIsPerformingGC());
+ }
+};
+
+class ArenaCellIterUnderFinalize : public ArenaCellIter {
+ public:
+ explicit ArenaCellIterUnderFinalize(Arena* arena) : ArenaCellIter(arena) {
+ MOZ_ASSERT(CurrentThreadIsGCFinalizing());
+ }
+};
+
+class GrayObjectIter : public ZoneAllCellIter<js::gc::TenuredCell> {
+ public:
+ explicit GrayObjectIter(JS::Zone* zone, AllocKind kind)
+ : ZoneAllCellIter<js::gc::TenuredCell>() {
+ initForTenuredIteration(zone, kind);
+ }
+
+ JSObject* get() const {
+ return ZoneAllCellIter<js::gc::TenuredCell>::get<JSObject>();
+ }
+ operator JSObject*() const { return get(); }
+ JSObject* operator->() const { return get(); }
+};
+
+class GCZonesIter {
+ AllZonesIter zone;
+
+ public:
+ explicit GCZonesIter(GCRuntime* gc) : zone(gc) {
+ MOZ_ASSERT(gc->heapState() != JS::HeapState::Idle);
+ if (!done() && !zone->wasGCStarted()) {
+ next();
+ }
+ }
+ explicit GCZonesIter(JSRuntime* rt) : GCZonesIter(&rt->gc) {}
+
+ bool done() const { return zone.done(); }
+
+ void next() {
+ MOZ_ASSERT(!done());
+ do {
+ zone.next();
+ } while (!zone.done() && !zone->wasGCStarted());
+ }
+
+ JS::Zone* get() const {
+ MOZ_ASSERT(!done());
+ return zone;
+ }
+
+ operator JS::Zone*() const { return get(); }
+ JS::Zone* operator->() const { return get(); }
+};
+
+using GCCompartmentsIter =
+ CompartmentsOrRealmsIterT<GCZonesIter, CompartmentsInZoneIter>;
+using GCRealmsIter = CompartmentsOrRealmsIterT<GCZonesIter, RealmsInZoneIter>;
+
+/* Iterates over all zones in the current sweep group. */
+class SweepGroupZonesIter {
+ JS::Zone* current;
+
+ public:
+ explicit SweepGroupZonesIter(GCRuntime* gc)
+ : current(gc->getCurrentSweepGroup()) {
+ MOZ_ASSERT(CurrentThreadIsPerformingGC());
+ }
+ explicit SweepGroupZonesIter(JSRuntime* rt) : SweepGroupZonesIter(&rt->gc) {}
+
+ bool done() const { return !current; }
+
+ void next() {
+ MOZ_ASSERT(!done());
+ current = current->nextNodeInGroup();
+ }
+
+ JS::Zone* get() const {
+ MOZ_ASSERT(!done());
+ return current;
+ }
+
+ operator JS::Zone*() const { return get(); }
+ JS::Zone* operator->() const { return get(); }
+};
+
+using SweepGroupCompartmentsIter =
+ CompartmentsOrRealmsIterT<SweepGroupZonesIter, CompartmentsInZoneIter>;
+using SweepGroupRealmsIter =
+ CompartmentsOrRealmsIterT<SweepGroupZonesIter, RealmsInZoneIter>;
+
+// Iterate the free cells in an arena. See also ArenaCellIter which iterates
+// the allocated cells.
+class ArenaFreeCellIter {
+ Arena* arena;
+ size_t thingSize;
+ FreeSpan span;
+ uint_fast16_t thing;
+
+ public:
+ explicit ArenaFreeCellIter(Arena* arena)
+ : arena(arena),
+ thingSize(arena->getThingSize()),
+ span(*arena->getFirstFreeSpan()),
+ thing(span.first) {
+ MOZ_ASSERT(arena);
+ MOZ_ASSERT(thing < ArenaSize);
+ }
+
+ bool done() const {
+ MOZ_ASSERT(thing < ArenaSize);
+ return !thing;
+ }
+
+ TenuredCell* get() const {
+ MOZ_ASSERT(!done());
+ return reinterpret_cast<TenuredCell*>(uintptr_t(arena) + thing);
+ }
+
+ void next() {
+ MOZ_ASSERT(!done());
+ MOZ_ASSERT(thing >= span.first && thing <= span.last);
+
+ if (thing == span.last) {
+ span = *span.nextSpan(arena);
+ thing = span.first;
+ } else {
+ thing += thingSize;
+ }
+
+ MOZ_ASSERT(thing < ArenaSize);
+ }
+
+ operator TenuredCell*() const { return get(); }
+ TenuredCell* operator->() const { return get(); }
+};
+
+} // namespace gc
+} // namespace js
+
+#endif // gc_PrivateIterators_inl_h
diff --git a/js/src/gc/PublicIterators.cpp b/js/src/gc/PublicIterators.cpp
new file mode 100644
index 0000000000..582a21ddf3
--- /dev/null
+++ b/js/src/gc/PublicIterators.cpp
@@ -0,0 +1,272 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "gc/GCInternals.h"
+#include "gc/GCLock.h"
+#include "vm/Realm.h"
+#include "vm/Runtime.h"
+
+#include "gc/PrivateIterators-inl.h"
+
+using namespace js;
+using namespace js::gc;
+
+static void IterateRealmsArenasCellsUnbarriered(
+ JSContext* cx, Zone* zone, void* data,
+ JS::IterateRealmCallback realmCallback, IterateArenaCallback arenaCallback,
+ IterateCellCallback cellCallback, const JS::AutoRequireNoGC& nogc) {
+ {
+ Rooted<Realm*> realm(cx);
+ for (RealmsInZoneIter r(zone); !r.done(); r.next()) {
+ realm = r;
+ (*realmCallback)(cx, data, realm, nogc);
+ }
+ }
+
+ for (auto thingKind : AllAllocKinds()) {
+ JS::TraceKind traceKind = MapAllocToTraceKind(thingKind);
+ size_t thingSize = Arena::thingSize(thingKind);
+
+ for (ArenaIter aiter(zone, thingKind); !aiter.done(); aiter.next()) {
+ Arena* arena = aiter.get();
+ (*arenaCallback)(cx->runtime(), data, arena, traceKind, thingSize, nogc);
+ for (ArenaCellIter cell(arena); !cell.done(); cell.next()) {
+ (*cellCallback)(cx->runtime(), data, JS::GCCellPtr(cell, traceKind),
+ thingSize, nogc);
+ }
+ }
+ }
+}
+
+void js::IterateHeapUnbarriered(JSContext* cx, void* data,
+ IterateZoneCallback zoneCallback,
+ JS::IterateRealmCallback realmCallback,
+ IterateArenaCallback arenaCallback,
+ IterateCellCallback cellCallback) {
+ AutoPrepareForTracing prep(cx);
+ JS::AutoSuppressGCAnalysis nogc(cx);
+
+ auto iterateZone = [&](Zone* zone) -> void {
+ (*zoneCallback)(cx->runtime(), data, zone, nogc);
+ IterateRealmsArenasCellsUnbarriered(cx, zone, data, realmCallback,
+ arenaCallback, cellCallback, nogc);
+ };
+
+ // Include the shared atoms zone if present.
+ if (Zone* zone = cx->runtime()->gc.maybeSharedAtomsZone()) {
+ iterateZone(zone);
+ }
+
+ for (ZonesIter zone(cx->runtime(), WithAtoms); !zone.done(); zone.next()) {
+ iterateZone(zone);
+ }
+}
+
+void js::IterateHeapUnbarrieredForZone(JSContext* cx, Zone* zone, void* data,
+ IterateZoneCallback zoneCallback,
+ JS::IterateRealmCallback realmCallback,
+ IterateArenaCallback arenaCallback,
+ IterateCellCallback cellCallback) {
+ AutoPrepareForTracing prep(cx);
+ JS::AutoSuppressGCAnalysis nogc(cx);
+
+ (*zoneCallback)(cx->runtime(), data, zone, nogc);
+ IterateRealmsArenasCellsUnbarriered(cx, zone, data, realmCallback,
+ arenaCallback, cellCallback, nogc);
+}
+
+void js::IterateChunks(JSContext* cx, void* data,
+ IterateChunkCallback chunkCallback) {
+ AutoPrepareForTracing prep(cx);
+ AutoLockGC lock(cx->runtime());
+ JS::AutoSuppressGCAnalysis nogc(cx);
+
+ for (auto chunk = cx->runtime()->gc.allNonEmptyChunks(lock); !chunk.done();
+ chunk.next()) {
+ chunkCallback(cx->runtime(), data, chunk, nogc);
+ }
+}
+
+static void TraverseInnerLazyScriptsForLazyScript(
+ JSContext* cx, void* data, BaseScript* enclosingScript,
+ IterateScriptCallback lazyScriptCallback, const JS::AutoRequireNoGC& nogc) {
+ for (JS::GCCellPtr gcThing : enclosingScript->gcthings()) {
+ if (!gcThing.is<JSObject>()) {
+ continue;
+ }
+ JSObject* obj = &gcThing.as<JSObject>();
+
+ MOZ_ASSERT(obj->is<JSFunction>(),
+ "All objects in lazy scripts should be functions");
+ JSFunction* fun = &obj->as<JSFunction>();
+
+ if (!fun->hasBaseScript()) {
+ // Ignore asm.js.
+ continue;
+ }
+ MOZ_ASSERT(fun->baseScript());
+ if (!fun->baseScript()) {
+ // If the function doesn't have script, ignore it.
+ continue;
+ }
+
+ if (fun->hasBytecode()) {
+ // Ignore non lazy function.
+ continue;
+ }
+
+ // If the function is "ghost", we shouldn't expose it to the debugger.
+ //
+ // See GHOST_FUNCTION in FunctionFlags.h for more details.
+ if (fun->isGhost()) {
+ continue;
+ }
+
+ BaseScript* script = fun->baseScript();
+ MOZ_ASSERT_IF(script->hasEnclosingScript(),
+ script->enclosingScript() == enclosingScript);
+
+ lazyScriptCallback(cx->runtime(), data, script, nogc);
+
+ TraverseInnerLazyScriptsForLazyScript(cx, data, script, lazyScriptCallback,
+ nogc);
+ }
+}
+
+static inline void DoScriptCallback(JSContext* cx, void* data,
+ BaseScript* script,
+ IterateScriptCallback callback,
+ const JS::AutoRequireNoGC& nogc) {
+ // Exclude any scripts that may be the result of a failed compile. Check that
+ // script either has bytecode or is ready to delazify.
+ //
+ // This excludes lazy scripts that do not have an enclosing scope because we
+ // cannot distinguish a failed compile fragment from a lazy script with a lazy
+ // parent.
+ if (!script->hasBytecode() && !script->isReadyForDelazification()) {
+ return;
+ }
+
+ // Invoke callback.
+ callback(cx->runtime(), data, script, nogc);
+
+ // The check above excluded lazy scripts with lazy parents, so explicitly
+ // visit inner scripts now if we are lazy with a successfully compiled parent.
+ if (!script->hasBytecode()) {
+ TraverseInnerLazyScriptsForLazyScript(cx, data, script, callback, nogc);
+ }
+}
+
+void js::IterateScripts(JSContext* cx, Realm* realm, void* data,
+ IterateScriptCallback scriptCallback) {
+ MOZ_ASSERT(!cx->suppressGC);
+ AutoEmptyNurseryAndPrepareForTracing prep(cx);
+ JS::AutoSuppressGCAnalysis nogc;
+
+ if (realm) {
+ Zone* zone = realm->zone();
+ for (auto iter = zone->cellIter<BaseScript>(prep); !iter.done();
+ iter.next()) {
+ if (iter->realm() != realm) {
+ continue;
+ }
+ DoScriptCallback(cx, data, iter.get(), scriptCallback, nogc);
+ }
+ } else {
+ for (ZonesIter zone(cx->runtime(), SkipAtoms); !zone.done(); zone.next()) {
+ for (auto iter = zone->cellIter<BaseScript>(prep); !iter.done();
+ iter.next()) {
+ DoScriptCallback(cx, data, iter.get(), scriptCallback, nogc);
+ }
+ }
+ }
+}
+
+void js::IterateGrayObjects(Zone* zone, IterateGCThingCallback cellCallback,
+ void* data) {
+ MOZ_ASSERT(!JS::RuntimeHeapIsBusy());
+
+ JSContext* cx = TlsContext.get();
+ AutoPrepareForTracing prep(cx);
+ JS::AutoSuppressGCAnalysis nogc(cx);
+
+ for (auto kind : ObjectAllocKinds()) {
+ for (GrayObjectIter obj(zone, kind); !obj.done(); obj.next()) {
+ if (obj->asTenured().isMarkedGray()) {
+ cellCallback(data, JS::GCCellPtr(obj.get()), nogc);
+ }
+ }
+ }
+}
+
+JS_PUBLIC_API void JS_IterateCompartments(
+ JSContext* cx, void* data,
+ JSIterateCompartmentCallback compartmentCallback) {
+ AutoTraceSession session(cx->runtime());
+
+ for (CompartmentsIter c(cx->runtime()); !c.done(); c.next()) {
+ if ((*compartmentCallback)(cx, data, c) ==
+ JS::CompartmentIterResult::Stop) {
+ break;
+ }
+ }
+}
+
+JS_PUBLIC_API void JS_IterateCompartmentsInZone(
+ JSContext* cx, JS::Zone* zone, void* data,
+ JSIterateCompartmentCallback compartmentCallback) {
+ AutoTraceSession session(cx->runtime());
+
+ for (CompartmentsInZoneIter c(zone); !c.done(); c.next()) {
+ if ((*compartmentCallback)(cx, data, c) ==
+ JS::CompartmentIterResult::Stop) {
+ break;
+ }
+ }
+}
+
+JS_PUBLIC_API void JS::IterateRealms(JSContext* cx, void* data,
+ JS::IterateRealmCallback realmCallback) {
+ AutoTraceSession session(cx->runtime());
+ JS::AutoSuppressGCAnalysis nogc(cx);
+
+ Rooted<Realm*> realm(cx);
+ for (RealmsIter r(cx->runtime()); !r.done(); r.next()) {
+ realm = r;
+ (*realmCallback)(cx, data, realm, nogc);
+ }
+}
+
+JS_PUBLIC_API void JS::IterateRealmsWithPrincipals(
+ JSContext* cx, JSPrincipals* principals, void* data,
+ JS::IterateRealmCallback realmCallback) {
+ MOZ_ASSERT(principals);
+
+ AutoTraceSession session(cx->runtime());
+ JS::AutoSuppressGCAnalysis nogc(cx);
+
+ Rooted<Realm*> realm(cx);
+ for (RealmsIter r(cx->runtime()); !r.done(); r.next()) {
+ if (r->principals() != principals) {
+ continue;
+ }
+ realm = r;
+ (*realmCallback)(cx, data, realm, nogc);
+ }
+}
+
+JS_PUBLIC_API void JS::IterateRealmsInCompartment(
+ JSContext* cx, JS::Compartment* compartment, void* data,
+ JS::IterateRealmCallback realmCallback) {
+ AutoTraceSession session(cx->runtime());
+ JS::AutoSuppressGCAnalysis nogc(cx);
+
+ Rooted<Realm*> realm(cx);
+ for (RealmsInCompartmentIter r(compartment); !r.done(); r.next()) {
+ realm = r;
+ (*realmCallback)(cx, data, realm, nogc);
+ }
+}
diff --git a/js/src/gc/PublicIterators.h b/js/src/gc/PublicIterators.h
new file mode 100644
index 0000000000..d1072cfe98
--- /dev/null
+++ b/js/src/gc/PublicIterators.h
@@ -0,0 +1,158 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * Iterators for various data structures.
+ */
+
+#ifndef gc_PublicIterators_h
+#define gc_PublicIterators_h
+
+#include "jstypes.h"
+#include "gc/GCRuntime.h"
+#include "gc/IteratorUtils.h"
+#include "gc/Zone.h"
+#include "vm/Compartment.h"
+#include "vm/Runtime.h"
+
+namespace JS {
+class JS_PUBLIC_API Realm;
+}
+
+namespace js {
+
+enum ZoneSelector { WithAtoms, SkipAtoms };
+
+// Iterate over all zones in the runtime. May or may not include the atoms zone.
+class ZonesIter {
+ gc::AutoEnterIteration iterMarker;
+ JS::Zone** it;
+ JS::Zone** const end;
+
+ public:
+ ZonesIter(gc::GCRuntime* gc, ZoneSelector selector)
+ : iterMarker(gc), it(gc->zones().begin()), end(gc->zones().end()) {
+ if (selector == SkipAtoms) {
+ MOZ_ASSERT(get()->isAtomsZone());
+ next();
+ }
+ }
+ ZonesIter(JSRuntime* rt, ZoneSelector selector)
+ : ZonesIter(&rt->gc, selector) {}
+
+ bool done() const { return it == end; }
+
+ void next() {
+ MOZ_ASSERT(!done());
+ it++;
+ }
+
+ JS::Zone* get() const {
+ MOZ_ASSERT(!done());
+ return *it;
+ }
+
+ operator JS::Zone*() const { return get(); }
+ JS::Zone* operator->() const { return get(); }
+};
+
+// Iterate over all zones in the runtime apart from the atoms zone.
+class NonAtomZonesIter : public ZonesIter {
+ public:
+ explicit NonAtomZonesIter(gc::GCRuntime* gc) : ZonesIter(gc, SkipAtoms) {}
+ explicit NonAtomZonesIter(JSRuntime* rt) : NonAtomZonesIter(&rt->gc) {}
+};
+
+// Iterate over all zones in the runtime, except those which may be in use by
+// parse threads.
+class AllZonesIter : public ZonesIter {
+ public:
+ explicit AllZonesIter(gc::GCRuntime* gc) : ZonesIter(gc, WithAtoms) {}
+ explicit AllZonesIter(JSRuntime* rt) : AllZonesIter(&rt->gc) {}
+};
+
+struct CompartmentsInZoneIter {
+ explicit CompartmentsInZoneIter(JS::Zone* zone) : zone(zone) {
+ it = zone->compartments().begin();
+ }
+
+ bool done() const {
+ MOZ_ASSERT(it);
+ return it < zone->compartments().begin() ||
+ it >= zone->compartments().end();
+ }
+ void next() {
+ MOZ_ASSERT(!done());
+ it++;
+ }
+
+ JS::Compartment* get() const {
+ MOZ_ASSERT(it);
+ return *it;
+ }
+
+ operator JS::Compartment*() const { return get(); }
+ JS::Compartment* operator->() const { return get(); }
+
+ private:
+ JS::Zone* zone;
+ JS::Compartment** it;
+};
+
+class RealmsInCompartmentIter {
+ JS::Compartment* comp;
+ JS::Realm** it;
+
+ public:
+ explicit RealmsInCompartmentIter(JS::Compartment* comp) : comp(comp) {
+ it = comp->realms().begin();
+ MOZ_ASSERT(!done(), "Compartments must have at least one realm");
+ }
+
+ bool done() const {
+ MOZ_ASSERT(it);
+ return it < comp->realms().begin() || it >= comp->realms().end();
+ }
+ void next() {
+ MOZ_ASSERT(!done());
+ it++;
+ }
+
+ JS::Realm* get() const {
+ MOZ_ASSERT(!done());
+ return *it;
+ }
+
+ operator JS::Realm*() const { return get(); }
+ JS::Realm* operator->() const { return get(); }
+};
+
+using RealmsInZoneIter =
+ NestedIterator<CompartmentsInZoneIter, RealmsInCompartmentIter>;
+
+// This iterator iterates over all the compartments or realms in a given set of
+// zones. The set of zones is determined by iterating ZoneIterT. The set of
+// compartments or realms is determined by InnerIterT.
+template <class ZonesIterT, class InnerIterT>
+class CompartmentsOrRealmsIterT
+ : public NestedIterator<ZonesIterT, InnerIterT> {
+ gc::AutoEnterIteration iterMarker;
+
+ public:
+ explicit CompartmentsOrRealmsIterT(gc::GCRuntime* gc)
+ : NestedIterator<ZonesIterT, InnerIterT>(gc), iterMarker(gc) {}
+ explicit CompartmentsOrRealmsIterT(JSRuntime* rt)
+ : CompartmentsOrRealmsIterT(&rt->gc) {}
+};
+
+using CompartmentsIter =
+ CompartmentsOrRealmsIterT<NonAtomZonesIter, CompartmentsInZoneIter>;
+using RealmsIter =
+ CompartmentsOrRealmsIterT<NonAtomZonesIter, RealmsInZoneIter>;
+
+} // namespace js
+
+#endif // gc_PublicIterators_h
diff --git a/js/src/gc/RelocationOverlay.h b/js/src/gc/RelocationOverlay.h
new file mode 100644
index 0000000000..047e763d44
--- /dev/null
+++ b/js/src/gc/RelocationOverlay.h
@@ -0,0 +1,66 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * GC-internal definition of relocation overlay used while moving cells.
+ */
+
+#ifndef gc_RelocationOverlay_h
+#define gc_RelocationOverlay_h
+
+#include "mozilla/Assertions.h"
+
+#include <stdint.h>
+
+#include "gc/Cell.h"
+
+namespace js {
+namespace gc {
+
+/*
+ * This structure overlays a Cell that has been moved and provides a way to find
+ * its new location. It's used during generational and compacting GC.
+ */
+class RelocationOverlay : public Cell {
+ public:
+ /* The location the cell has been moved to, stored in the cell header. */
+ Cell* forwardingAddress() const {
+ MOZ_ASSERT(isForwarded());
+ return reinterpret_cast<Cell*>(header_.getForwardingAddress());
+ }
+
+ protected:
+ /* A list entry to track all relocated things. */
+ RelocationOverlay* next_;
+
+ explicit RelocationOverlay(Cell* dst);
+
+ public:
+ static const RelocationOverlay* fromCell(const Cell* cell) {
+ return static_cast<const RelocationOverlay*>(cell);
+ }
+
+ static RelocationOverlay* fromCell(Cell* cell) {
+ return static_cast<RelocationOverlay*>(cell);
+ }
+
+ static RelocationOverlay* forwardCell(Cell* src, Cell* dst);
+
+ void setNext(RelocationOverlay* next) {
+ MOZ_ASSERT(isForwarded());
+ next_ = next;
+ }
+
+ RelocationOverlay* next() const {
+ MOZ_ASSERT(isForwarded());
+ return next_;
+ }
+};
+
+} // namespace gc
+} // namespace js
+
+#endif /* gc_RelocationOverlay_h */
diff --git a/js/src/gc/RootMarking.cpp b/js/src/gc/RootMarking.cpp
new file mode 100644
index 0000000000..c397711933
--- /dev/null
+++ b/js/src/gc/RootMarking.cpp
@@ -0,0 +1,467 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifdef MOZ_VALGRIND
+# include <valgrind/memcheck.h>
+#endif
+
+#include "jstypes.h"
+
+#include "debugger/DebugAPI.h"
+#include "gc/ClearEdgesTracer.h"
+#include "gc/GCInternals.h"
+#include "gc/PublicIterators.h"
+#include "jit/JitFrames.h"
+#include "jit/JitRuntime.h"
+#include "js/ValueArray.h"
+#include "vm/BigIntType.h"
+#include "vm/Compartment.h"
+#include "vm/HelperThreadState.h"
+#include "vm/JSContext.h"
+
+using namespace js;
+using namespace js::gc;
+
+using mozilla::LinkedList;
+
+using JS::AutoGCRooter;
+
+using RootRange = RootedValueMap::Range;
+using RootEntry = RootedValueMap::Entry;
+using RootEnum = RootedValueMap::Enum;
+
+template <typename Base, typename T>
+inline void TypedRootedGCThingBase<Base, T>::trace(JSTracer* trc,
+ const char* name) {
+ auto* self = this->template derived<T>();
+ TraceNullableRoot(trc, self->address(), name);
+}
+
+template <typename T>
+static inline void TraceExactStackRootList(JSTracer* trc,
+ StackRootedBase* listHead,
+ const char* name) {
+ // Check size of Rooted<T> does not increase.
+ static_assert(sizeof(Rooted<T>) == sizeof(T) + 2 * sizeof(uintptr_t));
+
+ for (StackRootedBase* root = listHead; root; root = root->previous()) {
+ static_cast<Rooted<T>*>(root)->trace(trc, name);
+ }
+}
+
+static inline void TraceExactStackRootTraceableList(JSTracer* trc,
+ StackRootedBase* listHead,
+ const char* name) {
+ for (StackRootedBase* root = listHead; root; root = root->previous()) {
+ static_cast<StackRootedTraceableBase*>(root)->trace(trc, name);
+ }
+}
+
+static inline void TraceStackRoots(JSTracer* trc,
+ JS::RootedListHeads& stackRoots) {
+#define TRACE_ROOTS(name, type, _, _1) \
+ TraceExactStackRootList<type*>(trc, stackRoots[JS::RootKind::name], \
+ "exact-" #name);
+ JS_FOR_EACH_TRACEKIND(TRACE_ROOTS)
+#undef TRACE_ROOTS
+ TraceExactStackRootList<jsid>(trc, stackRoots[JS::RootKind::Id], "exact-id");
+ TraceExactStackRootList<Value>(trc, stackRoots[JS::RootKind::Value],
+ "exact-value");
+
+ // RootedTraceable uses virtual dispatch.
+ JS::AutoSuppressGCAnalysis nogc;
+
+ TraceExactStackRootTraceableList(trc, stackRoots[JS::RootKind::Traceable],
+ "Traceable");
+}
+
+void JS::RootingContext::traceStackRoots(JSTracer* trc) {
+ TraceStackRoots(trc, stackRoots_);
+}
+
+static void TraceExactStackRoots(JSContext* cx, JSTracer* trc) {
+ cx->traceStackRoots(trc);
+}
+
+template <typename T>
+static inline void TracePersistentRootedList(
+ JSTracer* trc, LinkedList<PersistentRootedBase>& list, const char* name) {
+ for (PersistentRootedBase* root : list) {
+ static_cast<PersistentRooted<T>*>(root)->trace(trc, name);
+ }
+}
+
+static inline void TracePersistentRootedTraceableList(
+ JSTracer* trc, LinkedList<PersistentRootedBase>& list, const char* name) {
+ for (PersistentRootedBase* root : list) {
+ static_cast<PersistentRootedTraceableBase*>(root)->trace(trc, name);
+ }
+}
+
+void JSRuntime::tracePersistentRoots(JSTracer* trc) {
+#define TRACE_ROOTS(name, type, _, _1) \
+ TracePersistentRootedList<type*>(trc, heapRoots.ref()[JS::RootKind::name], \
+ "persistent-" #name);
+ JS_FOR_EACH_TRACEKIND(TRACE_ROOTS)
+#undef TRACE_ROOTS
+ TracePersistentRootedList<jsid>(trc, heapRoots.ref()[JS::RootKind::Id],
+ "persistent-id");
+ TracePersistentRootedList<Value>(trc, heapRoots.ref()[JS::RootKind::Value],
+ "persistent-value");
+
+ // RootedTraceable uses virtual dispatch.
+ JS::AutoSuppressGCAnalysis nogc;
+
+ TracePersistentRootedTraceableList(
+ trc, heapRoots.ref()[JS::RootKind::Traceable], "persistent-traceable");
+}
+
+static void TracePersistentRooted(JSRuntime* rt, JSTracer* trc) {
+ rt->tracePersistentRoots(trc);
+}
+
+template <typename T>
+static void FinishPersistentRootedChain(
+ LinkedList<PersistentRootedBase>& list) {
+ while (!list.isEmpty()) {
+ static_cast<PersistentRooted<T>*>(list.getFirst())->reset();
+ }
+}
+
+void JSRuntime::finishPersistentRoots() {
+#define FINISH_ROOT_LIST(name, type, _, _1) \
+ FinishPersistentRootedChain<type*>(heapRoots.ref()[JS::RootKind::name]);
+ JS_FOR_EACH_TRACEKIND(FINISH_ROOT_LIST)
+#undef FINISH_ROOT_LIST
+ FinishPersistentRootedChain<jsid>(heapRoots.ref()[JS::RootKind::Id]);
+ FinishPersistentRootedChain<Value>(heapRoots.ref()[JS::RootKind::Value]);
+
+ // Note that we do not finalize the Traceable list as we do not know how to
+ // safely clear members. We instead assert that none escape the RootLists.
+ // See the comment on RootLists::~RootLists for details.
+}
+
+JS_PUBLIC_API void js::TraceValueArray(JSTracer* trc, size_t length,
+ Value* elements) {
+ TraceRootRange(trc, length, elements, "JS::RootedValueArray");
+}
+
+void AutoGCRooter::trace(JSTracer* trc) {
+ switch (kind_) {
+ case Kind::Wrapper:
+ static_cast<AutoWrapperRooter*>(this)->trace(trc);
+ break;
+
+ case Kind::WrapperVector:
+ static_cast<AutoWrapperVector*>(this)->trace(trc);
+ break;
+
+ case Kind::Custom:
+ static_cast<JS::CustomAutoRooter*>(this)->trace(trc);
+ break;
+
+ default:
+ MOZ_CRASH("Bad AutoGCRooter::Kind");
+ break;
+ }
+}
+
+void AutoWrapperRooter::trace(JSTracer* trc) {
+ /*
+ * We need to use TraceManuallyBarrieredEdge here because we trace wrapper
+ * roots in every slice. This is because of some rule-breaking in
+ * RemapAllWrappersForObject; see comment there.
+ */
+ TraceManuallyBarrieredEdge(trc, &value.get(), "js::AutoWrapperRooter.value");
+}
+
+void AutoWrapperVector::trace(JSTracer* trc) {
+ /*
+ * We need to use TraceManuallyBarrieredEdge here because we trace wrapper
+ * roots in every slice. This is because of some rule-breaking in
+ * RemapAllWrappersForObject; see comment there.
+ */
+ for (WrapperValue& value : *this) {
+ TraceManuallyBarrieredEdge(trc, &value.get(),
+ "js::AutoWrapperVector.vector");
+ }
+}
+
+void JS::RootingContext::traceAllGCRooters(JSTracer* trc) {
+ for (AutoGCRooter* list : autoGCRooters_) {
+ traceGCRooterList(trc, list);
+ }
+}
+
+void JS::RootingContext::traceWrapperGCRooters(JSTracer* trc) {
+ traceGCRooterList(trc, autoGCRooters_[AutoGCRooter::Kind::Wrapper]);
+ traceGCRooterList(trc, autoGCRooters_[AutoGCRooter::Kind::WrapperVector]);
+}
+
+/* static */
+inline void JS::RootingContext::traceGCRooterList(JSTracer* trc,
+ AutoGCRooter* head) {
+ for (AutoGCRooter* rooter = head; rooter; rooter = rooter->down) {
+ rooter->trace(trc);
+ }
+}
+
+void PropertyDescriptor::trace(JSTracer* trc) {
+ TraceRoot(trc, &value_, "Descriptor::value");
+ if (getter_) {
+ TraceRoot(trc, &getter_, "Descriptor::getter");
+ }
+ if (setter_) {
+ TraceRoot(trc, &setter_, "Descriptor::setter");
+ }
+}
+
+void js::gc::GCRuntime::traceRuntimeForMajorGC(JSTracer* trc,
+ AutoGCSession& session) {
+ MOZ_ASSERT(!TlsContext.get()->suppressGC);
+
+ gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::MARK_ROOTS);
+
+ // We only need to trace atoms when we're marking; atoms are never moved by
+ // compacting GC.
+ if (atomsZone()->isGCMarking()) {
+ traceRuntimeAtoms(trc);
+ }
+
+ {
+ // Trace incoming cross compartment edges from uncollected compartments,
+ // skipping gray edges which are traced later.
+ gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::MARK_CCWS);
+ Compartment::traceIncomingCrossCompartmentEdgesForZoneGC(
+ trc, Compartment::NonGrayEdges);
+ }
+
+ traceRuntimeCommon(trc, MarkRuntime);
+}
+
+void js::gc::GCRuntime::traceRuntimeForMinorGC(JSTracer* trc,
+ AutoGCSession& session) {
+ MOZ_ASSERT(!TlsContext.get()->suppressGC);
+
+ // Note that we *must* trace the runtime during the SHUTDOWN_GC's minor GC
+ // despite having called FinishRoots already. This is because FinishRoots
+ // does not clear the crossCompartmentWrapper map. It cannot do this
+ // because Proxy's trace for CrossCompartmentWrappers asserts presence in
+ // the map. And we can reach its trace function despite having finished the
+ // roots via the edges stored by the pre-barrier verifier when we finish
+ // the verifier for the last time.
+ gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::MARK_ROOTS);
+
+ traceRuntimeCommon(trc, TraceRuntime);
+}
+
+void js::TraceRuntime(JSTracer* trc) {
+ MOZ_ASSERT(!trc->isMarkingTracer());
+
+ JSRuntime* rt = trc->runtime();
+ AutoEmptyNurseryAndPrepareForTracing prep(rt->mainContextFromOwnThread());
+ gcstats::AutoPhase ap(rt->gc.stats(), gcstats::PhaseKind::TRACE_HEAP);
+ rt->gc.traceRuntime(trc, prep);
+}
+
+void js::TraceRuntimeWithoutEviction(JSTracer* trc) {
+ MOZ_ASSERT(!trc->isMarkingTracer());
+
+ JSRuntime* rt = trc->runtime();
+ AutoTraceSession session(rt);
+ gcstats::AutoPhase ap(rt->gc.stats(), gcstats::PhaseKind::TRACE_HEAP);
+ rt->gc.traceRuntime(trc, session);
+}
+
+void js::gc::GCRuntime::traceRuntime(JSTracer* trc, AutoTraceSession& session) {
+ MOZ_ASSERT(!rt->isBeingDestroyed());
+
+ gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::MARK_ROOTS);
+
+ traceRuntimeAtoms(trc);
+ traceRuntimeCommon(trc, TraceRuntime);
+}
+
+void js::gc::GCRuntime::traceRuntimeAtoms(JSTracer* trc) {
+ gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::MARK_RUNTIME_DATA);
+ TraceAtoms(trc);
+ jit::JitRuntime::TraceAtomZoneRoots(trc);
+}
+
+void js::gc::GCRuntime::traceRuntimeCommon(JSTracer* trc,
+ TraceOrMarkRuntime traceOrMark) {
+ {
+ gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::MARK_STACK);
+
+ JSContext* cx = rt->mainContextFromOwnThread();
+
+ // Trace active interpreter and JIT stack roots.
+ TraceInterpreterActivations(cx, trc);
+ jit::TraceJitActivations(cx, trc);
+
+ // Trace legacy C stack roots.
+ cx->traceAllGCRooters(trc);
+
+ // Trace C stack roots.
+ TraceExactStackRoots(cx, trc);
+
+ for (RootRange r = rootsHash.ref().all(); !r.empty(); r.popFront()) {
+ const RootEntry& entry = r.front();
+ TraceRoot(trc, entry.key(), entry.value());
+ }
+ }
+
+ // Trace runtime global roots.
+ TracePersistentRooted(rt, trc);
+
+#ifdef JS_HAS_INTL_API
+ // Trace the shared Intl data.
+ rt->traceSharedIntlData(trc);
+#endif
+
+ // Trace the JSContext.
+ rt->mainContextFromOwnThread()->trace(trc);
+
+ // Trace all realm roots, but not the realm itself; it is traced via the
+ // parent pointer if traceRoots actually traces anything.
+ for (RealmsIter r(rt); !r.done(); r.next()) {
+ r->traceRoots(trc, traceOrMark);
+ }
+
+ if (!JS::RuntimeHeapIsMinorCollecting()) {
+ // Trace the self-hosting stencil. The contents of this are always tenured.
+ rt->traceSelfHostingStencil(trc);
+
+ for (ZonesIter zone(this, ZoneSelector::SkipAtoms); !zone.done();
+ zone.next()) {
+ zone->traceRootsInMajorGC(trc);
+ }
+
+ // Trace interpreter entry code generated with --emit-interpreter-entry
+ if (rt->hasJitRuntime() && rt->jitRuntime()->hasInterpreterEntryMap()) {
+ rt->jitRuntime()->getInterpreterEntryMap()->traceTrampolineCode(trc);
+ }
+ }
+
+ // Trace helper thread roots.
+ HelperThreadState().trace(trc);
+
+ // Trace Debugger.Frames that have live hooks, since dropping them would be
+ // observable. In effect, they are rooted by the stack frames.
+ DebugAPI::traceFramesWithLiveHooks(trc);
+
+ // Trace the embedding's black and gray roots.
+ if (!JS::RuntimeHeapIsMinorCollecting()) {
+ gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::MARK_EMBEDDING);
+
+ /*
+ * The embedding can register additional roots here.
+ *
+ * We don't need to trace these in a minor GC because all pointers into
+ * the nursery should be in the store buffer, and we want to avoid the
+ * time taken to trace all these roots.
+ */
+ traceEmbeddingBlackRoots(trc);
+
+ /* During GC, we don't trace gray roots at this stage. */
+ if (traceOrMark == TraceRuntime) {
+ traceEmbeddingGrayRoots(trc);
+ }
+ }
+
+ traceKeptObjects(trc);
+}
+
+void GCRuntime::traceEmbeddingBlackRoots(JSTracer* trc) {
+ // The analysis doesn't like the function pointer below.
+ JS::AutoSuppressGCAnalysis nogc;
+
+ for (size_t i = 0; i < blackRootTracers.ref().length(); i++) {
+ const Callback<JSTraceDataOp>& e = blackRootTracers.ref()[i];
+ (*e.op)(trc, e.data);
+ }
+}
+
+void GCRuntime::traceEmbeddingGrayRoots(JSTracer* trc) {
+ SliceBudget budget = SliceBudget::unlimited();
+ MOZ_ALWAYS_TRUE(traceEmbeddingGrayRoots(trc, budget) == Finished);
+}
+
+IncrementalProgress GCRuntime::traceEmbeddingGrayRoots(JSTracer* trc,
+ SliceBudget& budget) {
+ // The analysis doesn't like the function pointer below.
+ JS::AutoSuppressGCAnalysis nogc;
+
+ const auto& callback = grayRootTracer.ref();
+ if (!callback.op) {
+ return Finished;
+ }
+
+ return callback.op(trc, budget, callback.data) ? Finished : NotFinished;
+}
+
+#ifdef DEBUG
+class AssertNoRootsTracer final : public JS::CallbackTracer {
+ void onChild(JS::GCCellPtr thing, const char* name) override {
+ MOZ_CRASH("There should not be any roots during runtime shutdown");
+ }
+
+ public:
+ // This skips tracking WeakMap entries because they are not roots.
+ explicit AssertNoRootsTracer(JSRuntime* rt)
+ : JS::CallbackTracer(rt, JS::TracerKind::Callback,
+ JS::WeakMapTraceAction::Skip) {}
+};
+#endif // DEBUG
+
+void js::gc::GCRuntime::finishRoots() {
+ AutoNoteSingleThreadedRegion anstr;
+
+ rt->finishAtoms();
+ restoreSharedAtomsZone();
+
+ rootsHash.ref().clear();
+
+ rt->finishPersistentRoots();
+
+ rt->finishSelfHosting();
+
+ for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next()) {
+ zone->finishRoots();
+ }
+
+#ifdef JS_GC_ZEAL
+ clearSelectedForMarking();
+#endif
+
+ // Clear out the interpreter entry map before the final gc.
+ ClearInterpreterEntryMap(rt);
+
+ // Clear any remaining roots from the embedding (as otherwise they will be
+ // left dangling after we shut down) and remove the callbacks.
+ ClearEdgesTracer trc(rt);
+ traceEmbeddingBlackRoots(&trc);
+ traceEmbeddingGrayRoots(&trc);
+ clearBlackAndGrayRootTracers();
+}
+
+void js::gc::GCRuntime::checkNoRuntimeRoots(AutoGCSession& session) {
+#ifdef DEBUG
+ AssertNoRootsTracer trc(rt);
+ traceRuntimeForMajorGC(&trc, session);
+#endif // DEBUG
+}
+
+JS_PUBLIC_API void JS::AddPersistentRoot(JS::RootingContext* cx, RootKind kind,
+ PersistentRootedBase* root) {
+ JSRuntime* rt = static_cast<JSContext*>(cx)->runtime();
+ rt->heapRoots.ref()[kind].insertBack(root);
+}
+
+JS_PUBLIC_API void JS::AddPersistentRoot(JSRuntime* rt, RootKind kind,
+ PersistentRootedBase* root) {
+ rt->heapRoots.ref()[kind].insertBack(root);
+}
diff --git a/js/src/gc/Scheduling.cpp b/js/src/gc/Scheduling.cpp
new file mode 100644
index 0000000000..ec03c85f8d
--- /dev/null
+++ b/js/src/gc/Scheduling.cpp
@@ -0,0 +1,873 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "gc/Scheduling.h"
+
+#include "mozilla/CheckedInt.h"
+#include "mozilla/ScopeExit.h"
+#include "mozilla/TimeStamp.h"
+
+#include <algorithm>
+#include <cmath>
+
+#include "gc/Memory.h"
+#include "gc/Nursery.h"
+#include "gc/RelocationOverlay.h"
+#include "gc/ZoneAllocator.h"
+#include "util/DifferentialTesting.h"
+#include "vm/MutexIDs.h"
+
+using namespace js;
+using namespace js::gc;
+
+using mozilla::CheckedInt;
+using mozilla::Maybe;
+using mozilla::Nothing;
+using mozilla::Some;
+using mozilla::TimeDuration;
+using mozilla::TimeStamp;
+
+/*
+ * We may start to collect a zone before its trigger threshold is reached if
+ * GCRuntime::maybeGC() is called for that zone or we start collecting other
+ * zones. These eager threshold factors are not configurable.
+ */
+static constexpr double HighFrequencyEagerAllocTriggerFactor = 0.85;
+static constexpr double LowFrequencyEagerAllocTriggerFactor = 0.9;
+
+/*
+ * Don't allow heap growth factors to be set so low that eager collections could
+ * reduce the trigger threshold.
+ */
+static constexpr double MinHeapGrowthFactor =
+ 1.0f / std::min(HighFrequencyEagerAllocTriggerFactor,
+ LowFrequencyEagerAllocTriggerFactor);
+
+// Limit various parameters to reasonable levels to catch errors.
+static constexpr double MaxHeapGrowthFactor = 100;
+static constexpr size_t MaxNurseryBytesParam = 128 * 1024 * 1024;
+
+namespace {
+
+// Helper classes to marshal GC parameter values to/from uint32_t.
+
+template <typename T>
+struct ConvertGeneric {
+ static uint32_t toUint32(T value) {
+ static_assert(std::is_arithmetic_v<T>);
+ if constexpr (std::is_signed_v<T>) {
+ MOZ_ASSERT(value >= 0);
+ }
+ if constexpr (!std::is_same_v<T, bool> &&
+ std::numeric_limits<T>::max() >
+ std::numeric_limits<uint32_t>::max()) {
+ MOZ_ASSERT(value <= UINT32_MAX);
+ }
+ return uint32_t(value);
+ }
+ static Maybe<T> fromUint32(uint32_t param) {
+ // Currently we use explicit conversion and don't range check.
+ return Some(T(param));
+ }
+};
+
+using ConvertBool = ConvertGeneric<bool>;
+using ConvertSize = ConvertGeneric<size_t>;
+using ConvertDouble = ConvertGeneric<double>;
+
+struct ConvertTimes100 {
+ static uint32_t toUint32(double value) { return uint32_t(value * 100.0); }
+ static Maybe<double> fromUint32(uint32_t param) {
+ return Some(double(param) / 100.0);
+ }
+};
+
+struct ConvertNurseryBytes : ConvertSize {
+ static Maybe<size_t> fromUint32(uint32_t param) {
+ return Some(Nursery::roundSize(param));
+ }
+};
+
+struct ConvertKB {
+ static uint32_t toUint32(size_t value) { return value / 1024; }
+ static Maybe<size_t> fromUint32(uint32_t param) {
+ // Parameters which represent heap sizes in bytes are restricted to values
+ // which can be represented on 32 bit platforms.
+ CheckedInt<uint32_t> size = CheckedInt<uint32_t>(param) * 1024;
+ return size.isValid() ? Some(size_t(size.value())) : Nothing();
+ }
+};
+
+struct ConvertMB {
+ static uint32_t toUint32(size_t value) { return value / (1024 * 1024); }
+ static Maybe<size_t> fromUint32(uint32_t param) {
+ // Parameters which represent heap sizes in bytes are restricted to values
+ // which can be represented on 32 bit platforms.
+ CheckedInt<uint32_t> size = CheckedInt<uint32_t>(param) * 1024 * 1024;
+ return size.isValid() ? Some(size_t(size.value())) : Nothing();
+ }
+};
+
+struct ConvertMillis {
+ static uint32_t toUint32(TimeDuration value) {
+ return uint32_t(value.ToMilliseconds());
+ }
+ static Maybe<TimeDuration> fromUint32(uint32_t param) {
+ return Some(TimeDuration::FromMilliseconds(param));
+ }
+};
+
+struct ConvertSeconds {
+ static uint32_t toUint32(TimeDuration value) {
+ return uint32_t(value.ToSeconds());
+ }
+ static Maybe<TimeDuration> fromUint32(uint32_t param) {
+ return Some(TimeDuration::FromSeconds(param));
+ }
+};
+
+} // anonymous namespace
+
+// Helper functions to check GC parameter values
+
+template <typename T>
+static bool NoCheck(T value) {
+ return true;
+}
+
+template <typename T>
+static bool CheckNonZero(T value) {
+ return value != 0;
+}
+
+static bool CheckNurserySize(size_t bytes) {
+ return bytes >= SystemPageSize() && bytes <= MaxNurseryBytesParam;
+}
+
+static bool CheckHeapGrowth(double growth) {
+ return growth >= MinHeapGrowthFactor && growth <= MaxHeapGrowthFactor;
+}
+
+static bool CheckIncrementalLimit(double factor) {
+ return factor >= 1.0 && factor <= MaxHeapGrowthFactor;
+}
+
+static bool CheckNonZeroUnitRange(double value) {
+ return value > 0.0 && value <= 100.0;
+}
+
+GCSchedulingTunables::GCSchedulingTunables() {
+#define INIT_TUNABLE_FIELD(key, type, name, convert, check, default) \
+ name##_ = default; \
+ MOZ_ASSERT(check(name##_));
+ FOR_EACH_GC_TUNABLE(INIT_TUNABLE_FIELD)
+#undef INIT_TUNABLE_FIELD
+
+ checkInvariants();
+}
+
+uint32_t GCSchedulingTunables::getParameter(JSGCParamKey key) {
+ switch (key) {
+#define GET_TUNABLE_FIELD(key, type, name, convert, check, default) \
+ case key: \
+ return convert::toUint32(name##_);
+ FOR_EACH_GC_TUNABLE(GET_TUNABLE_FIELD)
+#undef GET_TUNABLE_FIELD
+
+ default:
+ MOZ_CRASH("Unknown parameter key");
+ }
+}
+
+bool GCSchedulingTunables::setParameter(JSGCParamKey key, uint32_t value) {
+ auto guard = mozilla::MakeScopeExit([this] { checkInvariants(); });
+
+ switch (key) {
+#define SET_TUNABLE_FIELD(key, type, name, convert, check, default) \
+ case key: { \
+ Maybe<type> converted = convert::fromUint32(value); \
+ if (!converted || !check(converted.value())) { \
+ return false; \
+ } \
+ name##_ = converted.value(); \
+ break; \
+ }
+ FOR_EACH_GC_TUNABLE(SET_TUNABLE_FIELD)
+#undef SET_TUNABLE_FIELD
+
+ default:
+ MOZ_CRASH("Unknown GC parameter.");
+ }
+
+ maintainInvariantsAfterUpdate(key);
+ return true;
+}
+
+void GCSchedulingTunables::resetParameter(JSGCParamKey key) {
+ auto guard = mozilla::MakeScopeExit([this] { checkInvariants(); });
+
+ switch (key) {
+#define RESET_TUNABLE_FIELD(key, type, name, convert, check, default) \
+ case key: \
+ name##_ = default; \
+ MOZ_ASSERT(check(name##_)); \
+ break;
+ FOR_EACH_GC_TUNABLE(RESET_TUNABLE_FIELD)
+#undef RESET_TUNABLE_FIELD
+
+ default:
+ MOZ_CRASH("Unknown GC parameter.");
+ }
+
+ maintainInvariantsAfterUpdate(key);
+}
+
+void GCSchedulingTunables::maintainInvariantsAfterUpdate(JSGCParamKey updated) {
+ switch (updated) {
+ case JSGC_MIN_NURSERY_BYTES:
+ if (gcMaxNurseryBytes_ < gcMinNurseryBytes_) {
+ gcMaxNurseryBytes_ = gcMinNurseryBytes_;
+ }
+ break;
+ case JSGC_MAX_NURSERY_BYTES:
+ if (gcMinNurseryBytes_ > gcMaxNurseryBytes_) {
+ gcMinNurseryBytes_ = gcMaxNurseryBytes_;
+ }
+ break;
+ case JSGC_SMALL_HEAP_SIZE_MAX:
+ if (smallHeapSizeMaxBytes_ >= largeHeapSizeMinBytes_) {
+ largeHeapSizeMinBytes_ = smallHeapSizeMaxBytes_ + 1;
+ }
+ break;
+ case JSGC_LARGE_HEAP_SIZE_MIN:
+ if (largeHeapSizeMinBytes_ <= smallHeapSizeMaxBytes_) {
+ smallHeapSizeMaxBytes_ = largeHeapSizeMinBytes_ - 1;
+ }
+ break;
+ case JSGC_HIGH_FREQUENCY_SMALL_HEAP_GROWTH:
+ if (highFrequencySmallHeapGrowth_ < highFrequencyLargeHeapGrowth_) {
+ highFrequencyLargeHeapGrowth_ = highFrequencySmallHeapGrowth_;
+ }
+ break;
+ case JSGC_HIGH_FREQUENCY_LARGE_HEAP_GROWTH:
+ if (highFrequencyLargeHeapGrowth_ > highFrequencySmallHeapGrowth_) {
+ highFrequencySmallHeapGrowth_ = highFrequencyLargeHeapGrowth_;
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+void GCSchedulingTunables::checkInvariants() {
+ MOZ_ASSERT(gcMinNurseryBytes_ == Nursery::roundSize(gcMinNurseryBytes_));
+ MOZ_ASSERT(gcMaxNurseryBytes_ == Nursery::roundSize(gcMaxNurseryBytes_));
+ MOZ_ASSERT(gcMinNurseryBytes_ <= gcMaxNurseryBytes_);
+ MOZ_ASSERT(gcMinNurseryBytes_ >= SystemPageSize());
+ MOZ_ASSERT(gcMaxNurseryBytes_ <= MaxNurseryBytesParam);
+
+ MOZ_ASSERT(largeHeapSizeMinBytes_ > smallHeapSizeMaxBytes_);
+
+ MOZ_ASSERT(lowFrequencyHeapGrowth_ >= MinHeapGrowthFactor);
+ MOZ_ASSERT(lowFrequencyHeapGrowth_ <= MaxHeapGrowthFactor);
+
+ MOZ_ASSERT(highFrequencySmallHeapGrowth_ >= MinHeapGrowthFactor);
+ MOZ_ASSERT(highFrequencySmallHeapGrowth_ <= MaxHeapGrowthFactor);
+ MOZ_ASSERT(highFrequencyLargeHeapGrowth_ <= highFrequencySmallHeapGrowth_);
+ MOZ_ASSERT(highFrequencyLargeHeapGrowth_ >= MinHeapGrowthFactor);
+ MOZ_ASSERT(highFrequencySmallHeapGrowth_ <= MaxHeapGrowthFactor);
+}
+
+void GCSchedulingState::updateHighFrequencyMode(
+ const mozilla::TimeStamp& lastGCTime, const mozilla::TimeStamp& currentTime,
+ const GCSchedulingTunables& tunables) {
+ if (js::SupportDifferentialTesting()) {
+ return;
+ }
+
+ inHighFrequencyGCMode_ =
+ !lastGCTime.IsNull() &&
+ lastGCTime + tunables.highFrequencyThreshold() > currentTime;
+}
+
+void GCSchedulingState::updateHighFrequencyModeForReason(JS::GCReason reason) {
+ // These reason indicate that the embedding isn't triggering GC slices often
+ // enough and allocation rate is high.
+ if (reason == JS::GCReason::ALLOC_TRIGGER ||
+ reason == JS::GCReason::TOO_MUCH_MALLOC) {
+ inHighFrequencyGCMode_ = true;
+ }
+}
+
+static constexpr size_t BytesPerMB = 1024 * 1024;
+static constexpr double CollectionRateSmoothingFactor = 0.5;
+static constexpr double AllocationRateSmoothingFactor = 0.5;
+
+static double ExponentialMovingAverage(double prevAverage, double newData,
+ double smoothingFactor) {
+ MOZ_ASSERT(smoothingFactor > 0.0 && smoothingFactor <= 1.0);
+ return smoothingFactor * newData + (1.0 - smoothingFactor) * prevAverage;
+}
+
+void js::ZoneAllocator::updateCollectionRate(
+ mozilla::TimeDuration mainThreadGCTime, size_t initialBytesForAllZones) {
+ MOZ_ASSERT(initialBytesForAllZones != 0);
+ MOZ_ASSERT(gcHeapSize.initialBytes() <= initialBytesForAllZones);
+
+ double zoneFraction =
+ double(gcHeapSize.initialBytes()) / double(initialBytesForAllZones);
+ double zoneDuration = mainThreadGCTime.ToSeconds() * zoneFraction +
+ perZoneGCTime.ref().ToSeconds();
+ double collectionRate =
+ double(gcHeapSize.initialBytes()) / (zoneDuration * BytesPerMB);
+
+ if (!smoothedCollectionRate.ref()) {
+ smoothedCollectionRate = Some(collectionRate);
+ } else {
+ double prevRate = smoothedCollectionRate.ref().value();
+ smoothedCollectionRate = Some(ExponentialMovingAverage(
+ prevRate, collectionRate, CollectionRateSmoothingFactor));
+ }
+}
+
+void js::ZoneAllocator::updateAllocationRate(TimeDuration mutatorTime) {
+ // To get the total size allocated since the last collection we have to
+ // take account of how much memory got freed in the meantime.
+ size_t freedBytes = gcHeapSize.freedBytes();
+
+ size_t sizeIncludingFreedBytes = gcHeapSize.bytes() + freedBytes;
+
+ MOZ_ASSERT(prevGCHeapSize <= sizeIncludingFreedBytes);
+ size_t allocatedBytes = sizeIncludingFreedBytes - prevGCHeapSize;
+
+ double allocationRate =
+ double(allocatedBytes) / (mutatorTime.ToSeconds() * BytesPerMB);
+
+ if (!smoothedAllocationRate.ref()) {
+ smoothedAllocationRate = Some(allocationRate);
+ } else {
+ double prevRate = smoothedAllocationRate.ref().value();
+ smoothedAllocationRate = Some(ExponentialMovingAverage(
+ prevRate, allocationRate, AllocationRateSmoothingFactor));
+ }
+
+ gcHeapSize.clearFreedBytes();
+ prevGCHeapSize = gcHeapSize.bytes();
+}
+
+// GC thresholds may exceed the range of size_t on 32-bit platforms, so these
+// are calculated using 64-bit integers and clamped.
+static inline size_t ToClampedSize(uint64_t bytes) {
+ return std::min(bytes, uint64_t(SIZE_MAX));
+}
+
+void HeapThreshold::setIncrementalLimitFromStartBytes(
+ size_t retainedBytes, const GCSchedulingTunables& tunables) {
+ // Calculate the incremental limit for a heap based on its size and start
+ // threshold.
+ //
+ // This effectively classifies the heap size into small, medium or large, and
+ // uses the small heap incremental limit paramer, the large heap incremental
+ // limit parameter or an interpolation between them.
+ //
+ // The incremental limit is always set greater than the start threshold by at
+ // least the maximum nursery size to reduce the chance that tenuring a full
+ // nursery will send us straight into non-incremental collection.
+
+ MOZ_ASSERT(tunables.smallHeapIncrementalLimit() >=
+ tunables.largeHeapIncrementalLimit());
+
+ double factor = LinearInterpolate(
+ retainedBytes, tunables.smallHeapSizeMaxBytes(),
+ tunables.smallHeapIncrementalLimit(), tunables.largeHeapSizeMinBytes(),
+ tunables.largeHeapIncrementalLimit());
+
+ uint64_t bytes =
+ std::max(uint64_t(double(startBytes_) * factor),
+ uint64_t(startBytes_) + tunables.gcMaxNurseryBytes());
+ incrementalLimitBytes_ = ToClampedSize(bytes);
+ MOZ_ASSERT(incrementalLimitBytes_ >= startBytes_);
+
+ // Maintain the invariant that the slice threshold is always less than the
+ // incremental limit when adjusting GC parameters.
+ if (hasSliceThreshold() && sliceBytes() > incrementalLimitBytes()) {
+ sliceBytes_ = incrementalLimitBytes();
+ }
+}
+
+double HeapThreshold::eagerAllocTrigger(bool highFrequencyGC) const {
+ double eagerTriggerFactor = highFrequencyGC
+ ? HighFrequencyEagerAllocTriggerFactor
+ : LowFrequencyEagerAllocTriggerFactor;
+ return eagerTriggerFactor * startBytes();
+}
+
+void HeapThreshold::setSliceThreshold(ZoneAllocator* zone,
+ const HeapSize& heapSize,
+ const GCSchedulingTunables& tunables,
+ bool waitingOnBGTask) {
+ // Set the allocation threshold at which to trigger the a GC slice in an
+ // ongoing incremental collection. This is used to ensure progress in
+ // allocation heavy code that may not return to the main event loop.
+ //
+ // The threshold is based on the JSGC_ZONE_ALLOC_DELAY_KB parameter, but this
+ // is reduced to increase the slice frequency as we approach the incremental
+ // limit, in the hope that we never reach it. If collector is waiting for a
+ // background task to complete, don't trigger any slices until we reach the
+ // urgent threshold.
+
+ size_t bytesRemaining = incrementalBytesRemaining(heapSize);
+ bool isUrgent = bytesRemaining < tunables.urgentThresholdBytes();
+
+ size_t delayBeforeNextSlice = tunables.zoneAllocDelayBytes();
+ if (isUrgent) {
+ double fractionRemaining =
+ double(bytesRemaining) / double(tunables.urgentThresholdBytes());
+ delayBeforeNextSlice =
+ size_t(double(delayBeforeNextSlice) * fractionRemaining);
+ MOZ_ASSERT(delayBeforeNextSlice <= tunables.zoneAllocDelayBytes());
+ } else if (waitingOnBGTask) {
+ delayBeforeNextSlice = bytesRemaining - tunables.urgentThresholdBytes();
+ }
+
+ sliceBytes_ = ToClampedSize(
+ std::min(uint64_t(heapSize.bytes()) + uint64_t(delayBeforeNextSlice),
+ uint64_t(incrementalLimitBytes_)));
+}
+
+size_t HeapThreshold::incrementalBytesRemaining(
+ const HeapSize& heapSize) const {
+ if (heapSize.bytes() >= incrementalLimitBytes_) {
+ return 0;
+ }
+
+ return incrementalLimitBytes_ - heapSize.bytes();
+}
+
+/* static */
+double HeapThreshold::computeZoneHeapGrowthFactorForHeapSize(
+ size_t lastBytes, const GCSchedulingTunables& tunables,
+ const GCSchedulingState& state) {
+ // For small zones, our collection heuristics do not matter much: favor
+ // something simple in this case.
+ if (lastBytes < 1 * 1024 * 1024) {
+ return tunables.lowFrequencyHeapGrowth();
+ }
+
+ // The heap growth factor depends on the heap size after a GC and the GC
+ // frequency. If GC's are not triggering in rapid succession, use a lower
+ // threshold so that we will collect garbage sooner.
+ if (!state.inHighFrequencyGCMode()) {
+ return tunables.lowFrequencyHeapGrowth();
+ }
+
+ // For high frequency GCs we let the heap grow depending on whether we
+ // classify the heap as small, medium or large. There are parameters for small
+ // and large heap sizes and linear interpolation is used between them for
+ // medium sized heaps.
+
+ MOZ_ASSERT(tunables.smallHeapSizeMaxBytes() <=
+ tunables.largeHeapSizeMinBytes());
+ MOZ_ASSERT(tunables.highFrequencyLargeHeapGrowth() <=
+ tunables.highFrequencySmallHeapGrowth());
+
+ return LinearInterpolate(lastBytes, tunables.smallHeapSizeMaxBytes(),
+ tunables.highFrequencySmallHeapGrowth(),
+ tunables.largeHeapSizeMinBytes(),
+ tunables.highFrequencyLargeHeapGrowth());
+}
+
+/* static */
+size_t GCHeapThreshold::computeZoneTriggerBytes(
+ double growthFactor, size_t lastBytes,
+ const GCSchedulingTunables& tunables) {
+ size_t base = std::max(lastBytes, tunables.gcZoneAllocThresholdBase());
+ double trigger = double(base) * growthFactor;
+ double triggerMax =
+ double(tunables.gcMaxBytes()) / tunables.largeHeapIncrementalLimit();
+ return ToClampedSize(std::min(triggerMax, trigger));
+}
+
+// Parameters for balanced heap limits computation.
+
+// The W0 parameter. How much memory can be traversed in the minimum collection
+// time.
+static constexpr double BalancedHeapBaseMB = 5.0;
+
+// The minimum heap limit. Do not constrain the heap to any less than this size.
+static constexpr double MinBalancedHeapLimitMB = 10.0;
+
+// The minimum amount of additional space to allow beyond the retained size.
+static constexpr double MinBalancedHeadroomMB = 3.0;
+
+// The maximum factor by which to expand the heap beyond the retained size.
+static constexpr double MaxHeapGrowth = 3.0;
+
+// The default allocation rate in MB/s allocated by the mutator to use before we
+// have an estimate. Used to set the heap limit for zones that have not yet been
+// collected.
+static constexpr double DefaultAllocationRate = 0.0;
+
+// The s0 parameter. The default collection rate in MB/s to use before we have
+// an estimate. Used to set the heap limit for zones that have not yet been
+// collected.
+static constexpr double DefaultCollectionRate = 200.0;
+
+double GCHeapThreshold::computeBalancedHeapLimit(
+ size_t lastBytes, double allocationRate, double collectionRate,
+ const GCSchedulingTunables& tunables) {
+ MOZ_ASSERT(tunables.balancedHeapLimitsEnabled());
+
+ // Optimal heap limits as described in https://arxiv.org/abs/2204.10455
+
+ double W = double(lastBytes) / BytesPerMB; // Retained size / MB.
+ double W0 = BalancedHeapBaseMB;
+ double d = tunables.heapGrowthFactor(); // Rearranged constant 'c'.
+ double g = allocationRate;
+ double s = collectionRate;
+ double f = d * sqrt((W + W0) * (g / s));
+ double M = W + std::min(f, MaxHeapGrowth * W);
+ M = std::max({MinBalancedHeapLimitMB, W + MinBalancedHeadroomMB, M});
+
+ return M * double(BytesPerMB);
+}
+
+void GCHeapThreshold::updateStartThreshold(
+ size_t lastBytes, mozilla::Maybe<double> allocationRate,
+ mozilla::Maybe<double> collectionRate, const GCSchedulingTunables& tunables,
+ const GCSchedulingState& state, bool isAtomsZone) {
+ if (!tunables.balancedHeapLimitsEnabled()) {
+ double growthFactor =
+ computeZoneHeapGrowthFactorForHeapSize(lastBytes, tunables, state);
+
+ startBytes_ = computeZoneTriggerBytes(growthFactor, lastBytes, tunables);
+ } else {
+ double threshold = computeBalancedHeapLimit(
+ lastBytes, allocationRate.valueOr(DefaultAllocationRate),
+ collectionRate.valueOr(DefaultCollectionRate), tunables);
+
+ double triggerMax =
+ double(tunables.gcMaxBytes()) / tunables.largeHeapIncrementalLimit();
+
+ startBytes_ = ToClampedSize(uint64_t(std::min(triggerMax, threshold)));
+ }
+
+ setIncrementalLimitFromStartBytes(lastBytes, tunables);
+}
+
+/* static */
+size_t MallocHeapThreshold::computeZoneTriggerBytes(double growthFactor,
+ size_t lastBytes,
+ size_t baseBytes) {
+ return ToClampedSize(double(std::max(lastBytes, baseBytes)) * growthFactor);
+}
+
+void MallocHeapThreshold::updateStartThreshold(
+ size_t lastBytes, const GCSchedulingTunables& tunables,
+ const GCSchedulingState& state) {
+ double growthFactor =
+ computeZoneHeapGrowthFactorForHeapSize(lastBytes, tunables, state);
+
+ startBytes_ = computeZoneTriggerBytes(growthFactor, lastBytes,
+ tunables.mallocThresholdBase());
+
+ setIncrementalLimitFromStartBytes(lastBytes, tunables);
+}
+
+#ifdef DEBUG
+
+static const char* MemoryUseName(MemoryUse use) {
+ switch (use) {
+# define DEFINE_CASE(Name) \
+ case MemoryUse::Name: \
+ return #Name;
+ JS_FOR_EACH_MEMORY_USE(DEFINE_CASE)
+# undef DEFINE_CASE
+ }
+
+ MOZ_CRASH("Unknown memory use");
+}
+
+MemoryTracker::MemoryTracker() : mutex(mutexid::MemoryTracker) {}
+
+void MemoryTracker::checkEmptyOnDestroy() {
+ bool ok = true;
+
+ if (!gcMap.empty()) {
+ ok = false;
+ fprintf(stderr, "Missing calls to JS::RemoveAssociatedMemory:\n");
+ for (auto r = gcMap.all(); !r.empty(); r.popFront()) {
+ fprintf(stderr, " %p 0x%zx %s\n", r.front().key().ptr(),
+ r.front().value(), MemoryUseName(r.front().key().use()));
+ }
+ }
+
+ if (!nonGCMap.empty()) {
+ ok = false;
+ fprintf(stderr, "Missing calls to Zone::decNonGCMemory:\n");
+ for (auto r = nonGCMap.all(); !r.empty(); r.popFront()) {
+ fprintf(stderr, " %p 0x%zx\n", r.front().key().ptr(), r.front().value());
+ }
+ }
+
+ MOZ_ASSERT(ok);
+}
+
+/* static */
+inline bool MemoryTracker::isGCMemoryUse(MemoryUse use) {
+ // Most memory uses are for memory associated with GC things but some are for
+ // memory associated with non-GC thing pointers.
+ return !isNonGCMemoryUse(use);
+}
+
+/* static */
+inline bool MemoryTracker::isNonGCMemoryUse(MemoryUse use) {
+ return use == MemoryUse::TrackedAllocPolicy;
+}
+
+/* static */
+inline bool MemoryTracker::allowMultipleAssociations(MemoryUse use) {
+ // For most uses only one association is possible for each GC thing. Allow a
+ // one-to-many relationship only where necessary.
+ return isNonGCMemoryUse(use) || use == MemoryUse::RegExpSharedBytecode ||
+ use == MemoryUse::BreakpointSite || use == MemoryUse::Breakpoint ||
+ use == MemoryUse::ForOfPICStub || use == MemoryUse::ICUObject;
+}
+
+void MemoryTracker::trackGCMemory(Cell* cell, size_t nbytes, MemoryUse use) {
+ MOZ_ASSERT(cell->isTenured());
+ MOZ_ASSERT(isGCMemoryUse(use));
+
+ LockGuard<Mutex> lock(mutex);
+
+ Key<Cell> key{cell, use};
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ auto ptr = gcMap.lookupForAdd(key);
+ if (ptr) {
+ if (!allowMultipleAssociations(use)) {
+ MOZ_CRASH_UNSAFE_PRINTF("Association already present: %p 0x%zx %s", cell,
+ nbytes, MemoryUseName(use));
+ }
+ ptr->value() += nbytes;
+ return;
+ }
+
+ if (!gcMap.add(ptr, key, nbytes)) {
+ oomUnsafe.crash("MemoryTracker::trackGCMemory");
+ }
+}
+
+void MemoryTracker::untrackGCMemory(Cell* cell, size_t nbytes, MemoryUse use) {
+ MOZ_ASSERT(cell->isTenured());
+
+ LockGuard<Mutex> lock(mutex);
+
+ Key<Cell> key{cell, use};
+ auto ptr = gcMap.lookup(key);
+ if (!ptr) {
+ MOZ_CRASH_UNSAFE_PRINTF("Association not found: %p 0x%zx %s", cell, nbytes,
+ MemoryUseName(use));
+ }
+
+ if (!allowMultipleAssociations(use) && ptr->value() != nbytes) {
+ MOZ_CRASH_UNSAFE_PRINTF(
+ "Association for %p %s has different size: "
+ "expected 0x%zx but got 0x%zx",
+ cell, MemoryUseName(use), ptr->value(), nbytes);
+ }
+
+ if (nbytes > ptr->value()) {
+ MOZ_CRASH_UNSAFE_PRINTF(
+ "Association for %p %s size is too large: "
+ "expected at most 0x%zx but got 0x%zx",
+ cell, MemoryUseName(use), ptr->value(), nbytes);
+ }
+
+ ptr->value() -= nbytes;
+
+ if (ptr->value() == 0) {
+ gcMap.remove(ptr);
+ }
+}
+
+void MemoryTracker::swapGCMemory(Cell* a, Cell* b, MemoryUse use) {
+ Key<Cell> ka{a, use};
+ Key<Cell> kb{b, use};
+
+ LockGuard<Mutex> lock(mutex);
+
+ size_t sa = getAndRemoveEntry(ka, lock);
+ size_t sb = getAndRemoveEntry(kb, lock);
+
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+
+ if ((sa && b->isTenured() && !gcMap.put(kb, sa)) ||
+ (sb && a->isTenured() && !gcMap.put(ka, sb))) {
+ oomUnsafe.crash("MemoryTracker::swapGCMemory");
+ }
+}
+
+size_t MemoryTracker::getAndRemoveEntry(const Key<Cell>& key,
+ LockGuard<Mutex>& lock) {
+ auto ptr = gcMap.lookup(key);
+ if (!ptr) {
+ return 0;
+ }
+
+ size_t size = ptr->value();
+ gcMap.remove(ptr);
+ return size;
+}
+
+void MemoryTracker::registerNonGCMemory(void* mem, MemoryUse use) {
+ LockGuard<Mutex> lock(mutex);
+
+ Key<void> key{mem, use};
+ auto ptr = nonGCMap.lookupForAdd(key);
+ if (ptr) {
+ MOZ_CRASH_UNSAFE_PRINTF("%s assocaition %p already registered",
+ MemoryUseName(use), mem);
+ }
+
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ if (!nonGCMap.add(ptr, key, 0)) {
+ oomUnsafe.crash("MemoryTracker::registerNonGCMemory");
+ }
+}
+
+void MemoryTracker::unregisterNonGCMemory(void* mem, MemoryUse use) {
+ LockGuard<Mutex> lock(mutex);
+
+ Key<void> key{mem, use};
+ auto ptr = nonGCMap.lookup(key);
+ if (!ptr) {
+ MOZ_CRASH_UNSAFE_PRINTF("%s association %p not found", MemoryUseName(use),
+ mem);
+ }
+
+ if (ptr->value() != 0) {
+ MOZ_CRASH_UNSAFE_PRINTF(
+ "%s association %p still has 0x%zx bytes associated",
+ MemoryUseName(use), mem, ptr->value());
+ }
+
+ nonGCMap.remove(ptr);
+}
+
+void MemoryTracker::moveNonGCMemory(void* dst, void* src, MemoryUse use) {
+ LockGuard<Mutex> lock(mutex);
+
+ Key<void> srcKey{src, use};
+ auto srcPtr = nonGCMap.lookup(srcKey);
+ if (!srcPtr) {
+ MOZ_CRASH_UNSAFE_PRINTF("%s association %p not found", MemoryUseName(use),
+ src);
+ }
+
+ size_t nbytes = srcPtr->value();
+ nonGCMap.remove(srcPtr);
+
+ Key<void> dstKey{dst, use};
+ auto dstPtr = nonGCMap.lookupForAdd(dstKey);
+ if (dstPtr) {
+ MOZ_CRASH_UNSAFE_PRINTF("%s %p already registered", MemoryUseName(use),
+ dst);
+ }
+
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ if (!nonGCMap.add(dstPtr, dstKey, nbytes)) {
+ oomUnsafe.crash("MemoryTracker::moveNonGCMemory");
+ }
+}
+
+void MemoryTracker::incNonGCMemory(void* mem, size_t nbytes, MemoryUse use) {
+ MOZ_ASSERT(isNonGCMemoryUse(use));
+
+ LockGuard<Mutex> lock(mutex);
+
+ Key<void> key{mem, use};
+ auto ptr = nonGCMap.lookup(key);
+ if (!ptr) {
+ MOZ_CRASH_UNSAFE_PRINTF("%s allocation %p not found", MemoryUseName(use),
+ mem);
+ }
+
+ ptr->value() += nbytes;
+}
+
+void MemoryTracker::decNonGCMemory(void* mem, size_t nbytes, MemoryUse use) {
+ MOZ_ASSERT(isNonGCMemoryUse(use));
+
+ LockGuard<Mutex> lock(mutex);
+
+ Key<void> key{mem, use};
+ auto ptr = nonGCMap.lookup(key);
+ if (!ptr) {
+ MOZ_CRASH_UNSAFE_PRINTF("%s allocation %p not found", MemoryUseName(use),
+ mem);
+ }
+
+ size_t& value = ptr->value();
+ if (nbytes > value) {
+ MOZ_CRASH_UNSAFE_PRINTF(
+ "%s allocation %p is too large: "
+ "expected at most 0x%zx but got 0x%zx bytes",
+ MemoryUseName(use), mem, value, nbytes);
+ }
+
+ value -= nbytes;
+}
+
+void MemoryTracker::fixupAfterMovingGC() {
+ // Update the table after we move GC things. We don't use StableCellHasher
+ // because that would create a difference between debug and release builds.
+ for (GCMap::Enum e(gcMap); !e.empty(); e.popFront()) {
+ const auto& key = e.front().key();
+ Cell* cell = key.ptr();
+ if (cell->isForwarded()) {
+ cell = gc::RelocationOverlay::fromCell(cell)->forwardingAddress();
+ e.rekeyFront(Key<Cell>{cell, key.use()});
+ }
+ }
+}
+
+template <typename Ptr>
+inline MemoryTracker::Key<Ptr>::Key(Ptr* ptr, MemoryUse use)
+ : ptr_(uint64_t(ptr)), use_(uint64_t(use)) {
+# ifdef JS_64BIT
+ static_assert(sizeof(Key) == 8,
+ "MemoryTracker::Key should be packed into 8 bytes");
+# endif
+ MOZ_ASSERT(this->ptr() == ptr);
+ MOZ_ASSERT(this->use() == use);
+}
+
+template <typename Ptr>
+inline Ptr* MemoryTracker::Key<Ptr>::ptr() const {
+ return reinterpret_cast<Ptr*>(ptr_);
+}
+template <typename Ptr>
+inline MemoryUse MemoryTracker::Key<Ptr>::use() const {
+ return static_cast<MemoryUse>(use_);
+}
+
+template <typename Ptr>
+inline HashNumber MemoryTracker::Hasher<Ptr>::hash(const Lookup& l) {
+ return mozilla::HashGeneric(DefaultHasher<Ptr*>::hash(l.ptr()),
+ DefaultHasher<unsigned>::hash(unsigned(l.use())));
+}
+
+template <typename Ptr>
+inline bool MemoryTracker::Hasher<Ptr>::match(const KeyT& k, const Lookup& l) {
+ return k.ptr() == l.ptr() && k.use() == l.use();
+}
+
+template <typename Ptr>
+inline void MemoryTracker::Hasher<Ptr>::rekey(KeyT& k, const KeyT& newKey) {
+ k = newKey;
+}
+
+#endif // DEBUG
diff --git a/js/src/gc/Scheduling.h b/js/src/gc/Scheduling.h
new file mode 100644
index 0000000000..94b06bc786
--- /dev/null
+++ b/js/src/gc/Scheduling.h
@@ -0,0 +1,917 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * [SMDOC] GC Scheduling
+ *
+ * GC Scheduling Overview
+ * ======================
+ *
+ * See also GC scheduling from Firefox's perspective here:
+ * https://searchfox.org/mozilla-central/source/dom/base/CCGCScheduler.cpp
+ *
+ * Scheduling GC's in SpiderMonkey/Firefox is tremendously complicated because
+ * of the large number of subtle, cross-cutting, and widely dispersed factors
+ * that must be taken into account. A summary of some of the more important
+ * factors follows.
+ *
+ * Cost factors:
+ *
+ * * GC too soon and we'll revisit an object graph almost identical to the
+ * one we just visited; since we are unlikely to find new garbage, the
+ * traversal will be largely overhead. We rely heavily on external factors
+ * to signal us that we are likely to find lots of garbage: e.g. "a tab
+ * just got closed".
+ *
+ * * GC too late and we'll run out of memory to allocate (e.g. Out-Of-Memory,
+ * hereafter simply abbreviated to OOM). If this happens inside
+ * SpiderMonkey we may be able to recover, but most embedder allocations
+ * will simply crash on OOM, even if the GC has plenty of free memory it
+ * could surrender.
+ *
+ * * Memory fragmentation: if we fill the process with GC allocations, a
+ * request for a large block of contiguous memory may fail because no
+ * contiguous block is free, despite having enough memory available to
+ * service the request.
+ *
+ * * Management overhead: if our GC heap becomes large, we create extra
+ * overhead when managing the GC's structures, even if the allocations are
+ * mostly unused.
+ *
+ * Heap Management Factors:
+ *
+ * * GC memory: The GC has its own allocator that it uses to make fixed size
+ * allocations for GC managed things. In cases where the GC thing requires
+ * larger or variable sized memory to implement itself, it is responsible
+ * for using the system heap.
+ *
+ * * C Heap Memory: Rather than allowing for large or variable allocations,
+ * the SpiderMonkey GC allows GC things to hold pointers to C heap memory.
+ * It is the responsibility of the thing to free this memory with a custom
+ * finalizer (with the sole exception of NativeObject, which knows about
+ * slots and elements for performance reasons). C heap memory has different
+ * performance and overhead tradeoffs than GC internal memory, which need
+ * to be considered with scheduling a GC.
+ *
+ * Application Factors:
+ *
+ * * Most applications allocate heavily at startup, then enter a processing
+ * stage where memory utilization remains roughly fixed with a slower
+ * allocation rate. This is not always the case, however, so while we may
+ * optimize for this pattern, we must be able to handle arbitrary
+ * allocation patterns.
+ *
+ * Other factors:
+ *
+ * * Other memory: This is memory allocated outside the purview of the GC.
+ * Data mapped by the system for code libraries, data allocated by those
+ * libraries, data in the JSRuntime that is used to manage the engine,
+ * memory used by the embedding that is not attached to a GC thing, memory
+ * used by unrelated processes running on the hardware that use space we
+ * could otherwise use for allocation, etc. While we don't have to manage
+ * it, we do have to take it into account when scheduling since it affects
+ * when we will OOM.
+ *
+ * * Physical Reality: All real machines have limits on the number of bits
+ * that they are physically able to store. While modern operating systems
+ * can generally make additional space available with swapping, at some
+ * point there are simply no more bits to allocate. There is also the
+ * factor of address space limitations, particularly on 32bit machines.
+ *
+ * * Platform Factors: Each OS makes use of wildly different memory
+ * management techniques. These differences result in different performance
+ * tradeoffs, different fragmentation patterns, and different hard limits
+ * on the amount of physical and/or virtual memory that we can use before
+ * OOMing.
+ *
+ *
+ * Reasons for scheduling GC
+ * -------------------------
+ *
+ * While code generally takes the above factors into account in only an ad-hoc
+ * fashion, the API forces the user to pick a "reason" for the GC. We have a
+ * bunch of JS::GCReason reasons in GCAPI.h. These fall into a few categories
+ * that generally coincide with one or more of the above factors.
+ *
+ * Embedding reasons:
+ *
+ * 1) Do a GC now because the embedding knows something useful about the
+ * zone's memory retention state. These are GCReasons like LOAD_END,
+ * PAGE_HIDE, SET_NEW_DOCUMENT, DOM_UTILS. Mostly, Gecko uses these to
+ * indicate that a significant fraction of the scheduled zone's memory is
+ * probably reclaimable.
+ *
+ * 2) Do some known amount of GC work now because the embedding knows now is
+ * a good time to do a long, unblockable operation of a known duration.
+ * These are INTER_SLICE_GC and REFRESH_FRAME.
+ *
+ * Correctness reasons:
+ *
+ * 3) Do a GC now because correctness depends on some GC property. For
+ * example, CC_FORCED is where the embedding requires the mark bits to be
+ * set correctly. Also, EVICT_NURSERY where we need to work on the tenured
+ * heap.
+ *
+ * 4) Do a GC because we are shutting down: e.g. SHUTDOWN_CC or DESTROY_*.
+ *
+ * 5) Do a GC because a compartment was accessed between GC slices when we
+ * would have otherwise discarded it. We have to do a second GC to clean
+ * it up: e.g. COMPARTMENT_REVIVED.
+ *
+ * Emergency Reasons:
+ *
+ * 6) Do an all-zones, non-incremental GC now because the embedding knows it
+ * cannot wait: e.g. MEM_PRESSURE.
+ *
+ * 7) OOM when fetching a new Chunk results in a LAST_DITCH GC.
+ *
+ * Heap Size Limitation Reasons:
+ *
+ * 8) Do an incremental, zonal GC with reason MAYBEGC when we discover that
+ * the gc's allocated size is approaching the current trigger. This is
+ * called MAYBEGC because we make this check in the MaybeGC function.
+ * MaybeGC gets called at the top of the main event loop. Normally, it is
+ * expected that this callback will keep the heap size limited. It is
+ * relatively inexpensive, because it is invoked with no JS running and
+ * thus few stack roots to scan. For this reason, the GC's "trigger" bytes
+ * is less than the GC's "max" bytes as used by the trigger below.
+ *
+ * 9) Do an incremental, zonal GC with reason MAYBEGC when we go to allocate
+ * a new GC thing and find that the GC heap size has grown beyond the
+ * configured maximum (JSGC_MAX_BYTES). We trigger this GC by returning
+ * nullptr and then calling maybeGC at the top level of the allocator.
+ * This is then guaranteed to fail the "size greater than trigger" check
+ * above, since trigger is always less than max. After performing the GC,
+ * the allocator unconditionally returns nullptr to force an OOM exception
+ * is raised by the script.
+ *
+ * Note that this differs from a LAST_DITCH GC where we actually run out
+ * of memory (i.e., a call to a system allocator fails) when trying to
+ * allocate. Unlike above, LAST_DITCH GC only happens when we are really
+ * out of memory, not just when we cross an arbitrary trigger; despite
+ * this, it may still return an allocation at the end and allow the script
+ * to continue, if the LAST_DITCH GC was able to free up enough memory.
+ *
+ * 10) Do a GC under reason ALLOC_TRIGGER when we are over the GC heap trigger
+ * limit, but in the allocator rather than in a random call to maybeGC.
+ * This occurs if we allocate too much before returning to the event loop
+ * and calling maybeGC; this is extremely common in benchmarks and
+ * long-running Worker computations. Note that this uses a wildly
+ * different mechanism from the above in that it sets the interrupt flag
+ * and does the GC at the next loop head, before the next alloc, or
+ * maybeGC. The reason for this is that this check is made after the
+ * allocation and we cannot GC with an uninitialized thing in the heap.
+ *
+ * 11) Do an incremental, zonal GC with reason TOO_MUCH_MALLOC when the total
+ * amount of malloced memory is greater than the malloc trigger limit for the
+ * zone.
+ *
+ *
+ * Size Limitation Triggers Explanation
+ * ------------------------------------
+ *
+ * The GC internally is entirely unaware of the context of the execution of
+ * the mutator. It sees only:
+ *
+ * A) Allocated size: this is the amount of memory currently requested by the
+ * mutator. This quantity is monotonically increasing: i.e. the allocation
+ * rate is always >= 0. It is also easy for the system to track.
+ *
+ * B) Retained size: this is the amount of memory that the mutator can
+ * currently reach. Said another way, it is the size of the heap
+ * immediately after a GC (modulo background sweeping). This size is very
+ * costly to know exactly and also extremely hard to estimate with any
+ * fidelity.
+ *
+ * For reference, a common allocated vs. retained graph might look like:
+ *
+ * | ** **
+ * | ** ** * **
+ * | ** * ** * **
+ * | * ** * ** * **
+ * | ** ** * ** * **
+ * s| * * ** ** + + **
+ * i| * * * + + + + +
+ * z| * * * + + + + +
+ * e| * **+
+ * | * +
+ * | * +
+ * | * +
+ * | * +
+ * | * +
+ * |*+
+ * +--------------------------------------------------
+ * time
+ * *** = allocated
+ * +++ = retained
+ *
+ * Note that this is a bit of a simplification
+ * because in reality we track malloc and GC heap
+ * sizes separately and have a different level of
+ * granularity and accuracy on each heap.
+ *
+ * This presents some obvious implications for Mark-and-Sweep collectors.
+ * Namely:
+ * -> t[marking] ~= size[retained]
+ * -> t[sweeping] ~= size[allocated] - size[retained]
+ *
+ * In a non-incremental collector, maintaining low latency and high
+ * responsiveness requires that total GC times be as low as possible. Thus,
+ * in order to stay responsive when we did not have a fully incremental
+ * collector, our GC triggers were focused on minimizing collection time.
+ * Furthermore, since size[retained] is not under control of the GC, all the
+ * GC could do to control collection times was reduce sweep times by
+ * minimizing size[allocated], per the equation above.
+ *
+ * The result of the above is GC triggers that focus on size[allocated] to
+ * the exclusion of other important factors and default heuristics that are
+ * not optimal for a fully incremental collector. On the other hand, this is
+ * not all bad: minimizing size[allocated] also minimizes the chance of OOM
+ * and sweeping remains one of the hardest areas to further incrementalize.
+ *
+ * EAGER_ALLOC_TRIGGER
+ * -------------------
+ * Occurs when we return to the event loop and find our heap is getting
+ * largish, but before t[marking] OR t[sweeping] is too large for a
+ * responsive non-incremental GC. This is intended to be the common case
+ * in normal web applications: e.g. we just finished an event handler and
+ * the few objects we allocated when computing the new whatzitz have
+ * pushed us slightly over the limit. After this GC we rescale the new
+ * EAGER_ALLOC_TRIGGER trigger to 150% of size[retained] so that our
+ * non-incremental GC times will always be proportional to this size
+ * rather than being dominated by sweeping.
+ *
+ * As a concession to mutators that allocate heavily during their startup
+ * phase, we have a highFrequencyGCMode that ups the growth rate to 300%
+ * of the current size[retained] so that we'll do fewer longer GCs at the
+ * end of the mutator startup rather than more, smaller GCs.
+ *
+ * Assumptions:
+ * -> Responsiveness is proportional to t[marking] + t[sweeping].
+ * -> size[retained] is proportional only to GC allocations.
+ *
+ * ALLOC_TRIGGER (non-incremental)
+ * -------------------------------
+ * If we do not return to the event loop before getting all the way to our
+ * gc trigger bytes then MAYBEGC will never fire. To avoid OOMing, we
+ * succeed the current allocation and set the script interrupt so that we
+ * will (hopefully) do a GC before we overflow our max and have to raise
+ * an OOM exception for the script.
+ *
+ * Assumptions:
+ * -> Common web scripts will return to the event loop before using
+ * 10% of the current triggerBytes worth of GC memory.
+ *
+ * ALLOC_TRIGGER (incremental)
+ * ---------------------------
+ * In practice the above trigger is rough: if a website is just on the
+ * cusp, sometimes it will trigger a non-incremental GC moments before
+ * returning to the event loop, where it could have done an incremental
+ * GC. Thus, we recently added an incremental version of the above with a
+ * substantially lower threshold, so that we have a soft limit here. If
+ * IGC can collect faster than the allocator generates garbage, even if
+ * the allocator does not return to the event loop frequently, we should
+ * not have to fall back to a non-incremental GC.
+ *
+ * INCREMENTAL_TOO_SLOW
+ * --------------------
+ * Do a full, non-incremental GC if we overflow ALLOC_TRIGGER during an
+ * incremental GC. When in the middle of an incremental GC, we suppress
+ * our other triggers, so we need a way to backstop the IGC if the
+ * mutator allocates faster than the IGC can clean things up.
+ *
+ * TOO_MUCH_MALLOC
+ * ---------------
+ * Performs a GC before size[allocated] - size[retained] gets too large
+ * for non-incremental sweeping to be fast in the case that we have
+ * significantly more malloc allocation than GC allocation. This is meant
+ * to complement MAYBEGC triggers. We track this by counting malloced
+ * bytes; the counter gets reset at every GC since we do not always have a
+ * size at the time we call free. Because of this, the malloc heuristic
+ * is, unfortunately, not usefully able to augment our other GC heap
+ * triggers and is limited to this singular heuristic.
+ *
+ * Assumptions:
+ * -> EITHER size[allocated_by_malloc] ~= size[allocated_by_GC]
+ * OR time[sweeping] ~= size[allocated_by_malloc]
+ * -> size[retained] @ t0 ~= size[retained] @ t1
+ * i.e. That the mutator is in steady-state operation.
+ *
+ * LAST_DITCH_GC
+ * -------------
+ * Does a GC because we are out of memory.
+ *
+ * Assumptions:
+ * -> size[retained] < size[available_memory]
+ */
+
+#ifndef gc_Scheduling_h
+#define gc_Scheduling_h
+
+#include "mozilla/Atomics.h"
+#include "mozilla/DebugOnly.h"
+#include "mozilla/Maybe.h"
+#include "mozilla/TimeStamp.h"
+
+#include "gc/GCEnum.h"
+#include "js/AllocPolicy.h"
+#include "js/GCAPI.h"
+#include "js/HashTable.h"
+#include "js/HeapAPI.h"
+#include "threading/ProtectedData.h"
+
+// Macro to define scheduling tunables for GC parameters. Expands its argument
+// repeatedly with the following arguments:
+// - key: the JSGCParamKey value for this parameter
+// - type: the storage type
+// - name: the name of GCSchedulingTunables getter method
+// - convert: a helper class defined in Scheduling.cpp that provides
+// conversion methods
+// - check: a helper function defined in Scheduling.cppto check the value is
+// valid
+// - default: the initial value and that assigned by resetParameter
+#define FOR_EACH_GC_TUNABLE(_) \
+ /* \
+ * JSGC_MAX_BYTES \
+ * \
+ * Maximum nominal heap before last ditch GC. \
+ */ \
+ _(JSGC_MAX_BYTES, size_t, gcMaxBytes, ConvertSize, NoCheck, 0xffffffff) \
+ \
+ /* \
+ * JSGC_MIN_NURSERY_BYTES \
+ * JSGC_MAX_NURSERY_BYTES \
+ * \
+ * Minimum and maximum nursery size for each runtime. \
+ */ \
+ _(JSGC_MIN_NURSERY_BYTES, size_t, gcMinNurseryBytes, ConvertNurseryBytes, \
+ CheckNurserySize, 256 * 1024) \
+ _(JSGC_MAX_NURSERY_BYTES, size_t, gcMaxNurseryBytes, ConvertNurseryBytes, \
+ CheckNurserySize, JS::DefaultNurseryMaxBytes) \
+ \
+ /* \
+ * JSGC_ALLOCATION_THRESHOLD \
+ * \
+ * \
+ * The base value used to compute zone->threshold.bytes(). When \
+ * gcHeapSize.bytes() exceeds threshold.bytes() for a zone, the zone may be \
+ * scheduled for a GC, depending on the exact circumstances. \
+ */ \
+ _(JSGC_ALLOCATION_THRESHOLD, size_t, gcZoneAllocThresholdBase, ConvertMB, \
+ NoCheck, 27 * 1024 * 1024) \
+ \
+ /* \
+ * JSGC_SMALL_HEAP_SIZE_MAX \
+ * JSGC_LARGE_HEAP_SIZE_MIN \
+ * \
+ * Used to classify heap sizes into one of small, medium or large. This \
+ * affects the calcuation of the incremental GC trigger and the heap growth \
+ * factor in high frequency GC mode. \
+ */ \
+ _(JSGC_SMALL_HEAP_SIZE_MAX, size_t, smallHeapSizeMaxBytes, ConvertMB, \
+ NoCheck, 100 * 1024 * 1024) \
+ _(JSGC_LARGE_HEAP_SIZE_MIN, size_t, largeHeapSizeMinBytes, ConvertMB, \
+ CheckNonZero, 500 * 1024 * 1024) \
+ \
+ /* \
+ * JSGC_SMALL_HEAP_INCREMENTAL_LIMIT \
+ * JSGC_LARGE_HEAP_INCREMENTAL_LIMIT \
+ * \
+ * Multiple of threshold.bytes() which triggers a non-incremental GC. \
+ * \
+ * The small heap limit must be greater than 1.3 to maintain performance on \
+ * splay-latency. \
+ */ \
+ _(JSGC_SMALL_HEAP_INCREMENTAL_LIMIT, double, smallHeapIncrementalLimit, \
+ ConvertTimes100, CheckIncrementalLimit, 1.50) \
+ _(JSGC_LARGE_HEAP_INCREMENTAL_LIMIT, double, largeHeapIncrementalLimit, \
+ ConvertTimes100, CheckIncrementalLimit, 1.10) \
+ \
+ /* \
+ * JSGC_HIGH_FREQUENCY_TIME_LIMIT \
+ * \
+ * We enter high-frequency mode if we GC a twice within this many \
+ * millisconds. \
+ */ \
+ _(JSGC_HIGH_FREQUENCY_TIME_LIMIT, mozilla::TimeDuration, \
+ highFrequencyThreshold, ConvertMillis, NoCheck, \
+ mozilla::TimeDuration::FromSeconds(1)) \
+ \
+ /* \
+ * JSGC_LOW_FREQUENCY_HEAP_GROWTH \
+ * \
+ * When not in |highFrequencyGC| mode, this is the global (stored per-zone) \
+ * "HeapGrowthFactor". \
+ */ \
+ _(JSGC_LOW_FREQUENCY_HEAP_GROWTH, double, lowFrequencyHeapGrowth, \
+ ConvertTimes100, CheckHeapGrowth, 1.5) \
+ \
+ /* \
+ * JSGC_HIGH_FREQUENCY_SMALL_HEAP_GROWTH \
+ * JSGC_HIGH_FREQUENCY_LARGE_HEAP_GROWTH \
+ * \
+ * When in the |highFrequencyGC| mode, these parameterize the per-zone \
+ * "HeapGrowthFactor" computation. \
+ */ \
+ _(JSGC_HIGH_FREQUENCY_SMALL_HEAP_GROWTH, double, \
+ highFrequencySmallHeapGrowth, ConvertTimes100, CheckHeapGrowth, 3.0) \
+ _(JSGC_HIGH_FREQUENCY_LARGE_HEAP_GROWTH, double, \
+ highFrequencyLargeHeapGrowth, ConvertTimes100, CheckHeapGrowth, 1.5) \
+ \
+ /* \
+ * JSGC_MALLOC_THRESHOLD_BASE \
+ * \
+ * The base value used to compute the GC trigger for malloc allocated \
+ * memory. \
+ */ \
+ _(JSGC_MALLOC_THRESHOLD_BASE, size_t, mallocThresholdBase, ConvertMB, \
+ NoCheck, 38 * 1024 * 1024) \
+ \
+ /* \
+ * Number of bytes to allocate between incremental slices in GCs triggered \
+ * by the zone allocation threshold. \
+ */ \
+ _(JSGC_ZONE_ALLOC_DELAY_KB, size_t, zoneAllocDelayBytes, ConvertKB, \
+ CheckNonZero, 1024 * 1024) \
+ \
+ /* \
+ * JSGC_URGENT_THRESHOLD_MB \
+ * \
+ * The point before reaching the non-incremental limit at which to start \
+ * increasing the slice budget and frequency of allocation triggered slices. \
+ */ \
+ _(JSGC_URGENT_THRESHOLD_MB, size_t, urgentThresholdBytes, ConvertMB, \
+ NoCheck, 16 * 1024 * 1024) \
+ \
+ /* \
+ * JSGC_NURSERY_FREE_THRESHOLD_FOR_IDLE_COLLECTION \
+ * JSGC_NURSERY_FREE_THRESHOLD_FOR_IDLE_COLLECTION_FRACTION \
+ * JSGC_NURSERY_TIMEOUT_FOR_IDLE_COLLECTION_MS \
+ * \
+ * Attempt to run a minor GC in the idle time if the free space falls below \
+ * this threshold or if it hasn't been collected for too long. The absolute \
+ * threshold is used when the nursery is large and the percentage when it is \
+ * small. See Nursery::shouldCollect(). \
+ */ \
+ _(JSGC_NURSERY_FREE_THRESHOLD_FOR_IDLE_COLLECTION, size_t, \
+ nurseryFreeThresholdForIdleCollection, ConvertSize, NoCheck, \
+ ChunkSize / 4) \
+ _(JSGC_NURSERY_FREE_THRESHOLD_FOR_IDLE_COLLECTION_PERCENT, double, \
+ nurseryFreeThresholdForIdleCollectionFraction, ConvertTimes100, \
+ CheckNonZeroUnitRange, 0.25) \
+ _(JSGC_NURSERY_TIMEOUT_FOR_IDLE_COLLECTION_MS, mozilla::TimeDuration, \
+ nurseryTimeoutForIdleCollection, ConvertMillis, NoCheck, \
+ mozilla::TimeDuration::FromSeconds(5)) \
+ \
+ /* \
+ * JSGC_BALANCED_HEAP_LIMITS_ENABLED \
+ * JSGC_HEAP_GROWTH_FACTOR \
+ */ \
+ _(JSGC_BALANCED_HEAP_LIMITS_ENABLED, bool, balancedHeapLimitsEnabled, \
+ ConvertBool, NoCheck, false) \
+ _(JSGC_HEAP_GROWTH_FACTOR, double, heapGrowthFactor, ConvertDouble, NoCheck, \
+ 50.0) \
+ \
+ /* \
+ * JSGC_PRETENURE_THRESHOLD \
+ * \
+ * Fraction of objects tenured to trigger pretenuring (between 0 and 1). If \
+ * this fraction is met, the GC proceeds to calculate which objects will be \
+ * tenured. If this is 1.0f (actually if it is not < 1.0f) then pretenuring \
+ * is disabled. \
+ */ \
+ _(JSGC_PRETENURE_THRESHOLD, double, pretenureThreshold, ConvertTimes100, \
+ CheckNonZeroUnitRange, 0.6) \
+ \
+ /* \
+ * JSGC_PRETENURE_STRING_THRESHOLD \
+ * \
+ * If the percentage of the tenured strings exceeds this threshold, string \
+ * will be allocated in tenured heap instead. (Default is allocated in \
+ * nursery.) \
+ */ \
+ _(JSGC_PRETENURE_STRING_THRESHOLD, double, pretenureStringThreshold, \
+ ConvertTimes100, CheckNonZeroUnitRange, 0.55) \
+ \
+ /* \
+ * JSGC_STOP_PRETENURE_STRING_THRESHOLD \
+ * \
+ * If the finalization rate of the tenured strings exceeds this threshold, \
+ * string will be allocated in nursery. \
+ */ \
+ _(JSGC_STOP_PRETENURE_STRING_THRESHOLD, double, \
+ stopPretenureStringThreshold, ConvertTimes100, CheckNonZeroUnitRange, 0.9) \
+ \
+ /* \
+ * JSGC_MIN_LAST_DITCH_GC_PERIOD \
+ * \
+ * Last ditch GC is skipped if allocation failure occurs less than this many \
+ * seconds from the previous one. \
+ */ \
+ _(JSGC_MIN_LAST_DITCH_GC_PERIOD, mozilla::TimeDuration, \
+ minLastDitchGCPeriod, ConvertSeconds, NoCheck, \
+ TimeDuration::FromSeconds(60)) \
+ \
+ /* \
+ * JSGC_PARALLEL_MARKING_THRESHOLD_KB \
+ */ \
+ _(JSGC_PARALLEL_MARKING_THRESHOLD_KB, size_t, parallelMarkingThresholdBytes, \
+ ConvertKB, NoCheck, 10 * 1024 * 1024)
+
+namespace js {
+
+class ZoneAllocator;
+
+namespace gc {
+
+struct Cell;
+
+/*
+ * Default settings for tuning the GC. Some of these can be set at runtime,
+ * This list is not complete, some tuning parameters are not listed here.
+ *
+ * If you change the values here, please also consider changing them in
+ * modules/libpref/init/all.js where they are duplicated for the Firefox
+ * preferences.
+ */
+namespace TuningDefaults {
+
+/* JSGC_MIN_EMPTY_CHUNK_COUNT */
+static const uint32_t MinEmptyChunkCount = 1;
+
+/* JSGC_MAX_EMPTY_CHUNK_COUNT */
+static const uint32_t MaxEmptyChunkCount = 30;
+
+/* JSGC_SLICE_TIME_BUDGET_MS */
+static const int64_t DefaultTimeBudgetMS = 0; // Unlimited by default.
+
+/* JSGC_INCREMENTAL_GC_ENABLED */
+static const bool IncrementalGCEnabled = false;
+
+/* JSGC_PER_ZONE_GC_ENABLED */
+static const bool PerZoneGCEnabled = false;
+
+/* JSGC_COMPACTING_ENABLED */
+static const bool CompactingEnabled = true;
+
+/* JSGC_PARALLEL_MARKING_ENABLED */
+static const bool ParallelMarkingEnabled = false;
+
+/* JSGC_INCREMENTAL_WEAKMAP_ENABLED */
+static const bool IncrementalWeakMapMarkingEnabled = true;
+
+/* JSGC_HELPER_THREAD_RATIO */
+static const double HelperThreadRatio = 0.5;
+
+/* JSGC_MAX_HELPER_THREADS */
+static const size_t MaxHelperThreads = 8;
+
+} // namespace TuningDefaults
+
+/*
+ * Encapsulates all of the GC tunables. These are effectively constant and
+ * should only be modified by setParameter.
+ */
+class GCSchedulingTunables {
+#define DEFINE_TUNABLE_FIELD(key, type, name, convert, check, default) \
+ MainThreadOrGCTaskData<type> name##_;
+ FOR_EACH_GC_TUNABLE(DEFINE_TUNABLE_FIELD)
+#undef DEFINE_TUNABLE_FIELD
+
+ public:
+ GCSchedulingTunables();
+
+#define DEFINE_TUNABLE_ACCESSOR(key, type, name, convert, check, default) \
+ type name() const { return name##_; }
+ FOR_EACH_GC_TUNABLE(DEFINE_TUNABLE_ACCESSOR)
+#undef DEFINE_TUNABLE_ACCESSOR
+
+ bool attemptPretenuring() const { return pretenureThreshold_ < 1.0; }
+
+ uint32_t getParameter(JSGCParamKey key);
+ [[nodiscard]] bool setParameter(JSGCParamKey key, uint32_t value);
+ void resetParameter(JSGCParamKey key);
+
+ private:
+ void maintainInvariantsAfterUpdate(JSGCParamKey updated);
+ void checkInvariants();
+};
+
+class GCSchedulingState {
+ /*
+ * Influences how we schedule and run GC's in several subtle ways. The most
+ * important factor is in how it controls the "HeapGrowthFactor". The
+ * growth factor is a measure of how large (as a percentage of the last GC)
+ * the heap is allowed to grow before we try to schedule another GC.
+ */
+ mozilla::Atomic<bool, mozilla::ReleaseAcquire> inHighFrequencyGCMode_;
+
+ public:
+ GCSchedulingState() : inHighFrequencyGCMode_(false) {}
+
+ bool inHighFrequencyGCMode() const { return inHighFrequencyGCMode_; }
+
+ void updateHighFrequencyMode(const mozilla::TimeStamp& lastGCTime,
+ const mozilla::TimeStamp& currentTime,
+ const GCSchedulingTunables& tunables);
+ void updateHighFrequencyModeForReason(JS::GCReason reason);
+};
+
+struct TriggerResult {
+ bool shouldTrigger;
+ size_t usedBytes;
+ size_t thresholdBytes;
+};
+
+using AtomicByteCount = mozilla::Atomic<size_t, mozilla::ReleaseAcquire>;
+
+/*
+ * Tracks the size of allocated data. This is used for both GC and malloc data.
+ * It automatically maintains the memory usage relationship between parent and
+ * child instances, i.e. between those in a GCRuntime and its Zones.
+ */
+class HeapSize {
+ /*
+ * The number of bytes in use. For GC heaps this is approximate to the nearest
+ * ArenaSize. It is atomic because it is updated by both the active and GC
+ * helper threads.
+ */
+ AtomicByteCount bytes_;
+
+ /*
+ * The number of bytes in use at the start of the last collection.
+ */
+ MainThreadData<size_t> initialBytes_;
+
+ /*
+ * The number of bytes retained after the last collection. This is updated
+ * dynamically during incremental GC. It does not include allocations that
+ * happen during a GC.
+ */
+ AtomicByteCount retainedBytes_;
+
+ public:
+ explicit HeapSize() {
+ MOZ_ASSERT(bytes_ == 0);
+ MOZ_ASSERT(retainedBytes_ == 0);
+ }
+
+ size_t bytes() const { return bytes_; }
+ size_t initialBytes() const { return initialBytes_; }
+ size_t retainedBytes() const { return retainedBytes_; }
+
+ void updateOnGCStart() { retainedBytes_ = initialBytes_ = bytes(); }
+
+ void addGCArena() { addBytes(ArenaSize); }
+ void removeGCArena() {
+ MOZ_ASSERT(retainedBytes_ >= ArenaSize);
+ removeBytes(ArenaSize, true /* only sweeping removes arenas */);
+ MOZ_ASSERT(retainedBytes_ <= bytes_);
+ }
+
+ void addBytes(size_t nbytes) {
+ mozilla::DebugOnly<size_t> initialBytes(bytes_);
+ MOZ_ASSERT(initialBytes + nbytes > initialBytes);
+ bytes_ += nbytes;
+ }
+ void removeBytes(size_t nbytes, bool updateRetainedSize) {
+ if (updateRetainedSize) {
+ MOZ_ASSERT(retainedBytes_ >= nbytes);
+ retainedBytes_ -= nbytes;
+ }
+ MOZ_ASSERT(bytes_ >= nbytes);
+ bytes_ -= nbytes;
+ }
+};
+
+/*
+ * Like HeapSize, but also updates a 'parent' HeapSize. Used for per-zone heap
+ * size data that also updates a runtime-wide parent.
+ */
+class HeapSizeChild : public HeapSize {
+ public:
+ void addGCArena(HeapSize& parent) {
+ HeapSize::addGCArena();
+ parent.addGCArena();
+ }
+
+ void removeGCArena(HeapSize& parent) {
+ HeapSize::removeGCArena();
+ parent.removeGCArena();
+ }
+
+ void addBytes(size_t nbytes, HeapSize& parent) {
+ HeapSize::addBytes(nbytes);
+ parent.addBytes(nbytes);
+ }
+
+ void removeBytes(size_t nbytes, bool updateRetainedSize, HeapSize& parent) {
+ HeapSize::removeBytes(nbytes, updateRetainedSize);
+ parent.removeBytes(nbytes, updateRetainedSize);
+ }
+};
+
+class PerZoneGCHeapSize : public HeapSizeChild {
+ public:
+ size_t freedBytes() const { return freedBytes_; }
+ void clearFreedBytes() { freedBytes_ = 0; }
+
+ void removeGCArena(HeapSize& parent) {
+ HeapSizeChild::removeGCArena(parent);
+ freedBytes_ += ArenaSize;
+ }
+
+ void removeBytes(size_t nbytes, bool updateRetainedSize, HeapSize& parent) {
+ HeapSizeChild::removeBytes(nbytes, updateRetainedSize, parent);
+ freedBytes_ += nbytes;
+ }
+
+ private:
+ AtomicByteCount freedBytes_;
+};
+
+// Heap size thresholds used to trigger GC. This is an abstract base class for
+// GC heap and malloc thresholds defined below.
+class HeapThreshold {
+ protected:
+ HeapThreshold()
+ : startBytes_(SIZE_MAX),
+ incrementalLimitBytes_(SIZE_MAX),
+ sliceBytes_(SIZE_MAX) {}
+
+ // The threshold at which to start a new incremental collection.
+ //
+ // This can be read off main thread during collection, for example by sweep
+ // tasks that resize tables.
+ MainThreadOrGCTaskData<size_t> startBytes_;
+
+ // The threshold at which start a new non-incremental collection or finish an
+ // ongoing collection non-incrementally.
+ MainThreadData<size_t> incrementalLimitBytes_;
+
+ // The threshold at which to trigger a slice during an ongoing incremental
+ // collection.
+ MainThreadData<size_t> sliceBytes_;
+
+ public:
+ size_t startBytes() const { return startBytes_; }
+ size_t sliceBytes() const { return sliceBytes_; }
+ size_t incrementalLimitBytes() const { return incrementalLimitBytes_; }
+ double eagerAllocTrigger(bool highFrequencyGC) const;
+ size_t incrementalBytesRemaining(const HeapSize& heapSize) const;
+
+ void setSliceThreshold(ZoneAllocator* zone, const HeapSize& heapSize,
+ const GCSchedulingTunables& tunables,
+ bool waitingOnBGTask);
+ void clearSliceThreshold() { sliceBytes_ = SIZE_MAX; }
+ bool hasSliceThreshold() const { return sliceBytes_ != SIZE_MAX; }
+
+ protected:
+ static double computeZoneHeapGrowthFactorForHeapSize(
+ size_t lastBytes, const GCSchedulingTunables& tunables,
+ const GCSchedulingState& state);
+
+ void setIncrementalLimitFromStartBytes(size_t retainedBytes,
+ const GCSchedulingTunables& tunables);
+};
+
+// A heap threshold that is based on a multiple of the retained size after the
+// last collection adjusted based on collection frequency and retained
+// size. This is used to determine when to do a zone GC based on GC heap size.
+class GCHeapThreshold : public HeapThreshold {
+ public:
+ void updateStartThreshold(size_t lastBytes,
+ mozilla::Maybe<double> allocationRate,
+ mozilla::Maybe<double> collectionRate,
+ const GCSchedulingTunables& tunables,
+ const GCSchedulingState& state, bool isAtomsZone);
+
+ private:
+ // This is our original algorithm for calculating heap limits.
+ static size_t computeZoneTriggerBytes(double growthFactor, size_t lastBytes,
+ const GCSchedulingTunables& tunables);
+
+ // This is the algorithm described in the optimal heap limits paper.
+ static double computeBalancedHeapLimit(size_t lastBytes,
+ double allocationRate,
+ double collectionRate,
+ const GCSchedulingTunables& tunables);
+};
+
+// A heap threshold that is calculated as a constant multiple of the retained
+// size after the last collection. This is used to determines when to do a zone
+// GC based on malloc data.
+class MallocHeapThreshold : public HeapThreshold {
+ public:
+ void updateStartThreshold(size_t lastBytes,
+ const GCSchedulingTunables& tunables,
+ const GCSchedulingState& state);
+
+ private:
+ static size_t computeZoneTriggerBytes(double growthFactor, size_t lastBytes,
+ size_t baseBytes);
+};
+
+// A fixed threshold that's used to determine when we need to do a zone GC based
+// on allocated JIT code.
+class JitHeapThreshold : public HeapThreshold {
+ public:
+ explicit JitHeapThreshold(size_t bytes) { startBytes_ = bytes; }
+};
+
+#ifdef DEBUG
+
+// Counts memory associated with GC things in a zone.
+//
+// This records details of the cell (or non-cell pointer) the memory allocation
+// is associated with to check the correctness of the information provided. This
+// is not present in opt builds.
+class MemoryTracker {
+ public:
+ MemoryTracker();
+ void fixupAfterMovingGC();
+ void checkEmptyOnDestroy();
+
+ // Track memory by associated GC thing pointer.
+ void trackGCMemory(Cell* cell, size_t nbytes, MemoryUse use);
+ void untrackGCMemory(Cell* cell, size_t nbytes, MemoryUse use);
+ void swapGCMemory(Cell* a, Cell* b, MemoryUse use);
+
+ // Track memory by associated non-GC thing pointer.
+ void registerNonGCMemory(void* ptr, MemoryUse use);
+ void unregisterNonGCMemory(void* ptr, MemoryUse use);
+ void moveNonGCMemory(void* dst, void* src, MemoryUse use);
+ void incNonGCMemory(void* ptr, size_t nbytes, MemoryUse use);
+ void decNonGCMemory(void* ptr, size_t nbytes, MemoryUse use);
+
+ private:
+ template <typename Ptr>
+ struct Key {
+ Key(Ptr* ptr, MemoryUse use);
+ Ptr* ptr() const;
+ MemoryUse use() const;
+
+ private:
+# ifdef JS_64BIT
+ // Pack this into a single word on 64 bit platforms.
+ uintptr_t ptr_ : 56;
+ uintptr_t use_ : 8;
+# else
+ uintptr_t ptr_ : 32;
+ uintptr_t use_ : 8;
+# endif
+ };
+
+ template <typename Ptr>
+ struct Hasher {
+ using KeyT = Key<Ptr>;
+ using Lookup = KeyT;
+ static HashNumber hash(const Lookup& l);
+ static bool match(const KeyT& key, const Lookup& l);
+ static void rekey(KeyT& k, const KeyT& newKey);
+ };
+
+ template <typename Ptr>
+ using Map = HashMap<Key<Ptr>, size_t, Hasher<Ptr>, SystemAllocPolicy>;
+ using GCMap = Map<Cell>;
+ using NonGCMap = Map<void>;
+
+ static bool isGCMemoryUse(MemoryUse use);
+ static bool isNonGCMemoryUse(MemoryUse use);
+ static bool allowMultipleAssociations(MemoryUse use);
+
+ size_t getAndRemoveEntry(const Key<Cell>& key, LockGuard<Mutex>& lock);
+
+ Mutex mutex MOZ_UNANNOTATED;
+
+ // Map containing the allocated size associated with (cell, use) pairs.
+ GCMap gcMap;
+
+ // Map containing the allocated size associated (non-cell pointer, use) pairs.
+ NonGCMap nonGCMap;
+};
+
+#endif // DEBUG
+
+static inline double LinearInterpolate(double x, double x0, double y0,
+ double x1, double y1) {
+ MOZ_ASSERT(x0 < x1);
+
+ if (x < x0) {
+ return y0;
+ }
+
+ if (x < x1) {
+ return y0 + (y1 - y0) * ((x - x0) / (x1 - x0));
+ }
+
+ return y1;
+}
+
+} // namespace gc
+} // namespace js
+
+#endif // gc_Scheduling_h
diff --git a/js/src/gc/StableCellHasher-inl.h b/js/src/gc/StableCellHasher-inl.h
new file mode 100644
index 0000000000..f76748afef
--- /dev/null
+++ b/js/src/gc/StableCellHasher-inl.h
@@ -0,0 +1,245 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef gc_StableCellHasher_inl_h
+#define gc_StableCellHasher_inl_h
+
+#include "gc/StableCellHasher.h"
+
+#include "mozilla/HashFunctions.h"
+
+#include "gc/Cell.h"
+#include "gc/Marking.h"
+#include "gc/Zone.h"
+#include "vm/JSObject.h"
+#include "vm/NativeObject.h"
+#include "vm/Runtime.h"
+
+namespace js {
+namespace gc {
+
+extern uint64_t NextCellUniqueId(JSRuntime* rt);
+
+inline bool MaybeGetUniqueId(Cell* cell, uint64_t* uidp) {
+ MOZ_ASSERT(uidp);
+ MOZ_ASSERT(CurrentThreadCanAccessRuntime(cell->runtimeFromAnyThread()) ||
+ CurrentThreadIsPerformingGC());
+
+ if (cell->is<JSObject>()) {
+ JSObject* obj = cell->as<JSObject>();
+ if (obj->is<NativeObject>()) {
+ auto* nobj = &obj->as<NativeObject>();
+ if (!nobj->hasUniqueId()) {
+ return false;
+ }
+
+ *uidp = nobj->uniqueId();
+ return true;
+ }
+ }
+
+ // Get an existing uid, if one has been set.
+ auto p = cell->zone()->uniqueIds().readonlyThreadsafeLookup(cell);
+ if (!p) {
+ return false;
+ }
+
+ *uidp = p->value();
+
+ return true;
+}
+
+extern bool CreateUniqueIdForNativeObject(NativeObject* obj, uint64_t* uidp);
+extern bool CreateUniqueIdForNonNativeObject(Cell* cell, UniqueIdMap::AddPtr,
+ uint64_t* uidp);
+
+inline bool GetOrCreateUniqueId(Cell* cell, uint64_t* uidp) {
+ MOZ_ASSERT(uidp);
+ MOZ_ASSERT(CurrentThreadCanAccessRuntime(cell->runtimeFromAnyThread()) ||
+ CurrentThreadIsPerformingGC());
+
+ if (cell->is<JSObject>()) {
+ JSObject* obj = cell->as<JSObject>();
+ if (obj->is<NativeObject>()) {
+ auto* nobj = &obj->as<NativeObject>();
+ if (nobj->hasUniqueId()) {
+ *uidp = nobj->uniqueId();
+ return true;
+ }
+
+ return CreateUniqueIdForNativeObject(nobj, uidp);
+ }
+ }
+
+ // Get an existing uid, if one has been set.
+ auto p = cell->zone()->uniqueIds().lookupForAdd(cell);
+ if (p) {
+ *uidp = p->value();
+ return true;
+ }
+
+ return CreateUniqueIdForNonNativeObject(cell, p, uidp);
+}
+
+inline bool SetOrUpdateUniqueId(JSContext* cx, Cell* cell, uint64_t uid) {
+ MOZ_ASSERT(CurrentThreadCanAccessRuntime(cell->runtimeFromAnyThread()));
+
+ if (cell->is<JSObject>()) {
+ JSObject* obj = cell->as<JSObject>();
+ if (obj->is<NativeObject>()) {
+ auto* nobj = &obj->as<NativeObject>();
+ return nobj->setOrUpdateUniqueId(cx, uid);
+ }
+ }
+
+ // If the cell was in the nursery, hopefully unlikely, then we need to
+ // tell the nursery about it so that it can sweep the uid if the thing
+ // does not get tenured.
+ JSRuntime* runtime = cell->runtimeFromMainThread();
+ if (IsInsideNursery(cell) &&
+ !runtime->gc.nursery().addedUniqueIdToCell(cell)) {
+ return false;
+ }
+
+ return cell->zone()->uniqueIds().put(cell, uid);
+}
+
+inline uint64_t GetUniqueIdInfallible(Cell* cell) {
+ uint64_t uid;
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ if (!GetOrCreateUniqueId(cell, &uid)) {
+ oomUnsafe.crash("failed to allocate uid");
+ }
+ return uid;
+}
+
+inline bool HasUniqueId(Cell* cell) {
+ MOZ_ASSERT(CurrentThreadCanAccessRuntime(cell->runtimeFromAnyThread()) ||
+ CurrentThreadIsPerformingGC());
+
+ if (cell->is<JSObject>()) {
+ JSObject* obj = cell->as<JSObject>();
+ if (obj->is<NativeObject>()) {
+ return obj->as<NativeObject>().hasUniqueId();
+ }
+ }
+
+ return cell->zone()->uniqueIds().has(cell);
+}
+
+inline void TransferUniqueId(Cell* tgt, Cell* src) {
+ MOZ_ASSERT(src != tgt);
+ MOZ_ASSERT(!IsInsideNursery(tgt));
+ MOZ_ASSERT(CurrentThreadCanAccessRuntime(tgt->runtimeFromAnyThread()));
+ MOZ_ASSERT(src->zone() == tgt->zone());
+
+ Zone* zone = tgt->zone();
+ MOZ_ASSERT(!zone->uniqueIds().has(tgt));
+ zone->uniqueIds().rekeyIfMoved(src, tgt);
+}
+
+inline void RemoveUniqueId(Cell* cell) {
+ MOZ_ASSERT(CurrentThreadCanAccessRuntime(cell->runtimeFromAnyThread()));
+ // The cell may no longer be in the hash table if it was swapped with a
+ // NativeObject.
+ cell->zone()->uniqueIds().remove(cell);
+}
+
+} // namespace gc
+
+static inline js::HashNumber UniqueIdToHash(uint64_t uid) {
+ // This uses the default hasher which returns the lower 32 bits of 64 bit
+ // integers as the hash code. This is OK because he result will be scrambled
+ // later by ScrambleHashCode().
+ return DefaultHasher<uint64_t>::hash(uid);
+}
+
+template <typename T>
+/* static */ bool StableCellHasher<T>::maybeGetHash(const Lookup& l,
+ HashNumber* hashOut) {
+ if (!l) {
+ *hashOut = 0;
+ return true;
+ }
+
+ uint64_t uid;
+ if (!gc::MaybeGetUniqueId(l, &uid)) {
+ return false;
+ }
+
+ *hashOut = UniqueIdToHash(uid);
+ return true;
+}
+
+template <typename T>
+/* static */ bool StableCellHasher<T>::ensureHash(const Lookup& l,
+ HashNumber* hashOut) {
+ if (!l) {
+ *hashOut = 0;
+ return true;
+ }
+
+ uint64_t uid;
+ if (!gc::GetOrCreateUniqueId(l, &uid)) {
+ return false;
+ }
+
+ *hashOut = UniqueIdToHash(uid);
+ return true;
+}
+
+template <typename T>
+/* static */ HashNumber StableCellHasher<T>::hash(const Lookup& l) {
+ if (!l) {
+ return 0;
+ }
+
+ // We have to access the zone from-any-thread here: a worker thread may be
+ // cloning a self-hosted object from the main runtime's self- hosting zone
+ // into another runtime. The zone's uid lock will protect against multiple
+ // workers doing this simultaneously.
+ MOZ_ASSERT(CurrentThreadCanAccessZone(l->zoneFromAnyThread()) ||
+ CurrentThreadIsPerformingGC());
+
+ return UniqueIdToHash(gc::GetUniqueIdInfallible(l));
+}
+
+template <typename T>
+/* static */ bool StableCellHasher<T>::match(const Key& k, const Lookup& l) {
+ if (k == l) {
+ return true;
+ }
+
+ if (!k || !l) {
+ return false;
+ }
+
+ MOZ_ASSERT(CurrentThreadCanAccessZone(l->zoneFromAnyThread()) ||
+ CurrentThreadIsPerformingGC());
+
+#ifdef DEBUG
+ // Incremental table sweeping means that existing table entries may no
+ // longer have unique IDs. We fail the match in that case and the entry is
+ // removed from the table later on.
+ if (!gc::HasUniqueId(k)) {
+ Key key = k;
+ MOZ_ASSERT(IsAboutToBeFinalizedUnbarriered(key));
+ }
+ MOZ_ASSERT(gc::HasUniqueId(l));
+#endif
+
+ uint64_t keyId;
+ if (!gc::MaybeGetUniqueId(k, &keyId)) {
+ // Key is dead and cannot match lookup which must be live.
+ return false;
+ }
+
+ return keyId == gc::GetUniqueIdInfallible(l);
+}
+
+} // namespace js
+
+#endif // gc_StableCellHasher_inl_h
diff --git a/js/src/gc/StableCellHasher.h b/js/src/gc/StableCellHasher.h
new file mode 100644
index 0000000000..80b1856976
--- /dev/null
+++ b/js/src/gc/StableCellHasher.h
@@ -0,0 +1,46 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef gc_StableCellHasher_h
+#define gc_StableCellHasher_h
+
+#include "js/RootingAPI.h"
+#include "js/TypeDecls.h"
+
+namespace js {
+
+// StableCellHasher itself is defined in js/public/RootingAPI.h.
+
+namespace gc {
+
+struct Cell;
+
+// Gets an existing UID in |uidp| if one exists.
+[[nodiscard]] bool MaybeGetUniqueId(Cell* cell, uint64_t* uidp);
+
+// Puts an existing UID in |uidp|, or creates a new UID for this Cell and
+// puts that into |uidp|. Returns false on OOM.
+[[nodiscard]] bool GetOrCreateUniqueId(Cell* cell, uint64_t* uidp);
+
+uint64_t GetUniqueIdInfallible(Cell* cell);
+
+// Return true if this cell has a UID associated with it.
+[[nodiscard]] bool HasUniqueId(Cell* cell);
+
+// Transfer an id from another cell. This must only be called on behalf of a
+// moving GC. This method is infallible.
+void TransferUniqueId(Cell* tgt, Cell* src);
+
+// Remove any unique id associated with this Cell.
+void RemoveUniqueId(Cell* cell);
+
+// Used to restore unique ID after JSObject::swap.
+bool SetOrUpdateUniqueId(JSContext* cx, Cell* cell, uint64_t uid);
+
+} // namespace gc
+} // namespace js
+
+#endif // gc_StableCellHasher_h
diff --git a/js/src/gc/Statistics.cpp b/js/src/gc/Statistics.cpp
new file mode 100644
index 0000000000..66b1c40673
--- /dev/null
+++ b/js/src/gc/Statistics.cpp
@@ -0,0 +1,1811 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "gc/Statistics.h"
+
+#include "mozilla/DebugOnly.h"
+#include "mozilla/Sprintf.h"
+#include "mozilla/TimeStamp.h"
+
+#include <algorithm>
+#include <stdarg.h>
+#include <stdio.h>
+#include <type_traits>
+
+#include "gc/GC.h"
+#include "gc/GCInternals.h"
+#include "gc/Memory.h"
+#include "js/Printer.h"
+#include "util/GetPidProvider.h"
+#include "util/Text.h"
+#include "vm/JSONPrinter.h"
+#include "vm/Runtime.h"
+#include "vm/Time.h"
+
+#include "gc/PrivateIterators-inl.h"
+
+using namespace js;
+using namespace js::gc;
+using namespace js::gcstats;
+
+using mozilla::DebugOnly;
+using mozilla::EnumeratedArray;
+using mozilla::Maybe;
+using mozilla::TimeDuration;
+using mozilla::TimeStamp;
+
+static const size_t BYTES_PER_MB = 1024 * 1024;
+
+/*
+ * If this fails, then you can either delete this assertion and allow all
+ * larger-numbered reasons to pile up in the last telemetry bucket, or switch
+ * to GC_REASON_3 and bump the max value.
+ */
+static_assert(JS::GCReason::NUM_TELEMETRY_REASONS >= JS::GCReason::NUM_REASONS);
+
+static inline auto AllPhaseKinds() {
+ return mozilla::MakeEnumeratedRange(PhaseKind::FIRST, PhaseKind::LIMIT);
+}
+
+static inline auto MajorGCPhaseKinds() {
+ return mozilla::MakeEnumeratedRange(PhaseKind::GC_BEGIN,
+ PhaseKind(size_t(PhaseKind::GC_END) + 1));
+}
+
+static const char* ExplainGCOptions(JS::GCOptions options) {
+ switch (options) {
+ case JS::GCOptions::Normal:
+ return "Normal";
+ case JS::GCOptions::Shrink:
+ return "Shrink";
+ case JS::GCOptions::Shutdown:
+ return "Shutdown";
+ }
+
+ MOZ_CRASH("Unexpected GCOptions value");
+}
+
+JS_PUBLIC_API const char* JS::ExplainGCReason(JS::GCReason reason) {
+ switch (reason) {
+#define SWITCH_REASON(name, _) \
+ case JS::GCReason::name: \
+ return #name;
+ GCREASONS(SWITCH_REASON)
+#undef SWITCH_REASON
+
+ case JS::GCReason::NO_REASON:
+ return "NO_REASON";
+
+ default:
+ MOZ_CRASH("bad GC reason");
+ }
+}
+
+JS_PUBLIC_API bool JS::InternalGCReason(JS::GCReason reason) {
+ return reason < JS::GCReason::FIRST_FIREFOX_REASON;
+}
+
+const char* js::gcstats::ExplainAbortReason(GCAbortReason reason) {
+ switch (reason) {
+#define SWITCH_REASON(name, _) \
+ case GCAbortReason::name: \
+ return #name;
+ GC_ABORT_REASONS(SWITCH_REASON)
+
+ default:
+ MOZ_CRASH("bad GC abort reason");
+#undef SWITCH_REASON
+ }
+}
+
+static FILE* MaybeOpenFileFromEnv(const char* env,
+ FILE* defaultFile = nullptr) {
+ const char* value = getenv(env);
+ if (!value) {
+ return defaultFile;
+ }
+
+ FILE* file;
+ if (strcmp(value, "none") == 0) {
+ file = nullptr;
+ } else if (strcmp(value, "stdout") == 0) {
+ file = stdout;
+ } else if (strcmp(value, "stderr") == 0) {
+ file = stderr;
+ } else {
+ char path[300];
+ if (value[0] != '/') {
+ const char* dir = getenv("MOZ_UPLOAD_DIR");
+ if (dir) {
+ SprintfLiteral(path, "%s/%s", dir, value);
+ value = path;
+ }
+ }
+
+ file = fopen(value, "a");
+ if (!file || setvbuf(file, nullptr, _IOLBF, 256) != 0) {
+ perror("Error opening log file");
+ MOZ_CRASH("Failed to open log file.");
+ }
+ }
+
+ return file;
+}
+
+struct PhaseKindInfo {
+ Phase firstPhase;
+ uint8_t telemetryBucket;
+ const char* name;
+};
+
+// PhaseInfo objects form a tree.
+struct PhaseInfo {
+ Phase parent;
+ Phase firstChild;
+ Phase nextSibling;
+ Phase nextWithPhaseKind;
+ PhaseKind phaseKind;
+ uint8_t depth;
+ const char* name;
+ const char* path;
+};
+
+// A table of PhaseInfo indexed by Phase.
+using PhaseTable = EnumeratedArray<Phase, Phase::LIMIT, PhaseInfo>;
+
+// A table of PhaseKindInfo indexed by PhaseKind.
+using PhaseKindTable =
+ EnumeratedArray<PhaseKind, PhaseKind::LIMIT, PhaseKindInfo>;
+
+#include "gc/StatsPhasesGenerated.inc"
+
+// Iterate the phases in a phase kind.
+class PhaseIter {
+ Phase phase;
+
+ public:
+ explicit PhaseIter(PhaseKind kind) : phase(phaseKinds[kind].firstPhase) {}
+ bool done() const { return phase == Phase::NONE; }
+ void next() { phase = phases[phase].nextWithPhaseKind; }
+ Phase get() const { return phase; }
+ operator Phase() const { return phase; }
+};
+
+static double t(TimeDuration duration) { return duration.ToMilliseconds(); }
+
+inline JSContext* Statistics::context() {
+ return gc->rt->mainContextFromOwnThread();
+}
+
+inline Phase Statistics::currentPhase() const {
+ return phaseStack.empty() ? Phase::NONE : phaseStack.back();
+}
+
+PhaseKind Statistics::currentPhaseKind() const {
+ // Public API to get the current phase kind, suppressing the synthetic
+ // PhaseKind::MUTATOR phase.
+
+ Phase phase = currentPhase();
+ MOZ_ASSERT_IF(phase == Phase::MUTATOR, phaseStack.length() == 1);
+ if (phase == Phase::NONE || phase == Phase::MUTATOR) {
+ return PhaseKind::NONE;
+ }
+
+ return phases[phase].phaseKind;
+}
+
+static Phase LookupPhaseWithParent(PhaseKind phaseKind, Phase parentPhase) {
+ for (PhaseIter phase(phaseKind); !phase.done(); phase.next()) {
+ if (phases[phase].parent == parentPhase) {
+ return phase;
+ }
+ }
+
+ return Phase::NONE;
+}
+
+static const char* PhaseKindName(PhaseKind kind) {
+ if (kind == PhaseKind::NONE) {
+ return "NONE";
+ }
+
+ return phaseKinds[kind].name;
+}
+
+Phase Statistics::lookupChildPhase(PhaseKind phaseKind) const {
+ if (phaseKind == PhaseKind::IMPLICIT_SUSPENSION) {
+ return Phase::IMPLICIT_SUSPENSION;
+ }
+ if (phaseKind == PhaseKind::EXPLICIT_SUSPENSION) {
+ return Phase::EXPLICIT_SUSPENSION;
+ }
+
+ MOZ_ASSERT(phaseKind < PhaseKind::LIMIT);
+
+ // Search all expanded phases that correspond to the required
+ // phase to find the one whose parent is the current expanded phase.
+ Phase phase = LookupPhaseWithParent(phaseKind, currentPhase());
+
+ if (phase == Phase::NONE) {
+ MOZ_CRASH_UNSAFE_PRINTF(
+ "Child phase kind %s not found under current phase kind %s",
+ PhaseKindName(phaseKind), PhaseKindName(currentPhaseKind()));
+ }
+
+ return phase;
+}
+
+inline auto AllPhases() {
+ return mozilla::MakeEnumeratedRange(Phase::FIRST, Phase::LIMIT);
+}
+
+void Statistics::gcDuration(TimeDuration* total, TimeDuration* maxPause) const {
+ *total = *maxPause = 0;
+ for (auto& slice : slices_) {
+ *total += slice.duration();
+ if (slice.duration() > *maxPause) {
+ *maxPause = slice.duration();
+ }
+ }
+ if (*maxPause > maxPauseInInterval) {
+ maxPauseInInterval = *maxPause;
+ }
+}
+
+void Statistics::sccDurations(TimeDuration* total,
+ TimeDuration* maxPause) const {
+ *total = *maxPause = 0;
+ for (size_t i = 0; i < sccTimes.length(); i++) {
+ *total += sccTimes[i];
+ *maxPause = std::max(*maxPause, sccTimes[i]);
+ }
+}
+
+typedef Vector<UniqueChars, 8, SystemAllocPolicy> FragmentVector;
+
+static UniqueChars Join(const FragmentVector& fragments,
+ const char* separator = "") {
+ const size_t separatorLength = strlen(separator);
+ size_t length = 0;
+ for (size_t i = 0; i < fragments.length(); ++i) {
+ length += fragments[i] ? strlen(fragments[i].get()) : 0;
+ if (i < (fragments.length() - 1)) {
+ length += separatorLength;
+ }
+ }
+
+ char* joined = js_pod_malloc<char>(length + 1);
+ if (!joined) {
+ return UniqueChars();
+ }
+
+ joined[length] = '\0';
+ char* cursor = joined;
+ for (size_t i = 0; i < fragments.length(); ++i) {
+ if (fragments[i]) {
+ strcpy(cursor, fragments[i].get());
+ }
+ cursor += fragments[i] ? strlen(fragments[i].get()) : 0;
+ if (i < (fragments.length() - 1)) {
+ if (separatorLength) {
+ strcpy(cursor, separator);
+ }
+ cursor += separatorLength;
+ }
+ }
+
+ return UniqueChars(joined);
+}
+
+static TimeDuration SumChildTimes(Phase phase,
+ const Statistics::PhaseTimes& phaseTimes) {
+ TimeDuration total = 0;
+ for (phase = phases[phase].firstChild; phase != Phase::NONE;
+ phase = phases[phase].nextSibling) {
+ total += phaseTimes[phase];
+ }
+ return total;
+}
+
+UniqueChars Statistics::formatCompactSliceMessage() const {
+ // Skip if we OOM'ed.
+ if (slices_.length() == 0) {
+ return UniqueChars(nullptr);
+ }
+
+ const size_t index = slices_.length() - 1;
+ const SliceData& slice = slices_.back();
+
+ char budgetDescription[200];
+ slice.budget.describe(budgetDescription, sizeof(budgetDescription) - 1);
+
+ const char* format =
+ "GC Slice %u - Pause: %.3fms of %s budget (@ %.3fms); Reason: %s; Reset: "
+ "%s%s; Times: ";
+ char buffer[1024];
+ SprintfLiteral(buffer, format, index, t(slice.duration()), budgetDescription,
+ t(slice.start - slices_[0].start),
+ ExplainGCReason(slice.reason),
+ slice.wasReset() ? "yes - " : "no",
+ slice.wasReset() ? ExplainAbortReason(slice.resetReason) : "");
+
+ FragmentVector fragments;
+ if (!fragments.append(DuplicateString(buffer)) ||
+ !fragments.append(
+ formatCompactSlicePhaseTimes(slices_[index].phaseTimes))) {
+ return UniqueChars(nullptr);
+ }
+ return Join(fragments);
+}
+
+UniqueChars Statistics::formatCompactSummaryMessage() const {
+ FragmentVector fragments;
+ if (!fragments.append(DuplicateString("Summary - "))) {
+ return UniqueChars(nullptr);
+ }
+
+ TimeDuration total, longest;
+ gcDuration(&total, &longest);
+
+ const double mmu20 = computeMMU(TimeDuration::FromMilliseconds(20));
+ const double mmu50 = computeMMU(TimeDuration::FromMilliseconds(50));
+
+ char buffer[1024];
+ if (!nonincremental()) {
+ SprintfLiteral(buffer,
+ "Max Pause: %.3fms; MMU 20ms: %.1f%%; MMU 50ms: %.1f%%; "
+ "Total: %.3fms; ",
+ t(longest), mmu20 * 100., mmu50 * 100., t(total));
+ } else {
+ SprintfLiteral(buffer, "Non-Incremental: %.3fms (%s); ", t(total),
+ ExplainAbortReason(nonincrementalReason_));
+ }
+ if (!fragments.append(DuplicateString(buffer))) {
+ return UniqueChars(nullptr);
+ }
+
+ SprintfLiteral(buffer,
+ "Zones: %d of %d (-%d); Compartments: %d of %d (-%d); "
+ "HeapSize: %.3f MiB; "
+ "HeapChange (abs): %+d (%u); ",
+ zoneStats.collectedZoneCount, zoneStats.zoneCount,
+ zoneStats.sweptZoneCount, zoneStats.collectedCompartmentCount,
+ zoneStats.compartmentCount, zoneStats.sweptCompartmentCount,
+ double(preTotalHeapBytes) / BYTES_PER_MB,
+ int32_t(counts[COUNT_NEW_CHUNK] - counts[COUNT_DESTROY_CHUNK]),
+ counts[COUNT_NEW_CHUNK] + counts[COUNT_DESTROY_CHUNK]);
+ if (!fragments.append(DuplicateString(buffer))) {
+ return UniqueChars(nullptr);
+ }
+
+ MOZ_ASSERT_IF(counts[COUNT_ARENA_RELOCATED],
+ gcOptions == JS::GCOptions::Shrink);
+ if (gcOptions == JS::GCOptions::Shrink) {
+ SprintfLiteral(
+ buffer, "Kind: %s; Relocated: %.3f MiB; ", ExplainGCOptions(gcOptions),
+ double(ArenaSize * counts[COUNT_ARENA_RELOCATED]) / BYTES_PER_MB);
+ if (!fragments.append(DuplicateString(buffer))) {
+ return UniqueChars(nullptr);
+ }
+ }
+
+ return Join(fragments);
+}
+
+UniqueChars Statistics::formatCompactSlicePhaseTimes(
+ const PhaseTimes& phaseTimes) const {
+ static const TimeDuration MaxUnaccountedTime =
+ TimeDuration::FromMicroseconds(100);
+
+ FragmentVector fragments;
+ char buffer[128];
+ for (auto phase : AllPhases()) {
+ DebugOnly<uint8_t> level = phases[phase].depth;
+ MOZ_ASSERT(level < 4);
+
+ TimeDuration ownTime = phaseTimes[phase];
+ TimeDuration childTime = SumChildTimes(phase, phaseTimes);
+ if (ownTime > MaxUnaccountedTime) {
+ SprintfLiteral(buffer, "%s: %.3fms", phases[phase].name, t(ownTime));
+ if (!fragments.append(DuplicateString(buffer))) {
+ return UniqueChars(nullptr);
+ }
+
+ if (childTime && (ownTime - childTime) > MaxUnaccountedTime) {
+ MOZ_ASSERT(level < 3);
+ SprintfLiteral(buffer, "%s: %.3fms", "Other", t(ownTime - childTime));
+ if (!fragments.append(DuplicateString(buffer))) {
+ return UniqueChars(nullptr);
+ }
+ }
+ }
+ }
+ return Join(fragments, ", ");
+}
+
+UniqueChars Statistics::formatDetailedMessage() const {
+ FragmentVector fragments;
+
+ if (!fragments.append(formatDetailedDescription())) {
+ return UniqueChars(nullptr);
+ }
+
+ if (!slices_.empty()) {
+ for (unsigned i = 0; i < slices_.length(); i++) {
+ if (!fragments.append(formatDetailedSliceDescription(i, slices_[i]))) {
+ return UniqueChars(nullptr);
+ }
+ if (!fragments.append(formatDetailedPhaseTimes(slices_[i].phaseTimes))) {
+ return UniqueChars(nullptr);
+ }
+ }
+ }
+ if (!fragments.append(formatDetailedTotals())) {
+ return UniqueChars(nullptr);
+ }
+ if (!fragments.append(formatDetailedPhaseTimes(phaseTimes))) {
+ return UniqueChars(nullptr);
+ }
+
+ return Join(fragments);
+}
+
+UniqueChars Statistics::formatDetailedDescription() const {
+ TimeDuration sccTotal, sccLongest;
+ sccDurations(&sccTotal, &sccLongest);
+
+ const double mmu20 = computeMMU(TimeDuration::FromMilliseconds(20));
+ const double mmu50 = computeMMU(TimeDuration::FromMilliseconds(50));
+
+ const char* format =
+ "=================================================================\n\
+ Invocation Kind: %s\n\
+ Reason: %s\n\
+ Incremental: %s%s\n\
+ Zones Collected: %d of %d (-%d)\n\
+ Compartments Collected: %d of %d (-%d)\n\
+ MinorGCs since last GC: %d\n\
+ Store Buffer Overflows: %d\n\
+ MMU 20ms:%.1f%%; 50ms:%.1f%%\n\
+ SCC Sweep Total (MaxPause): %.3fms (%.3fms)\n\
+ HeapSize: %.3f MiB\n\
+ Chunk Delta (magnitude): %+d (%d)\n\
+ Arenas Relocated: %.3f MiB\n\
+";
+
+ char buffer[1024];
+ SprintfLiteral(
+ buffer, format, ExplainGCOptions(gcOptions),
+ ExplainGCReason(slices_[0].reason), nonincremental() ? "no - " : "yes",
+ nonincremental() ? ExplainAbortReason(nonincrementalReason_) : "",
+ zoneStats.collectedZoneCount, zoneStats.zoneCount,
+ zoneStats.sweptZoneCount, zoneStats.collectedCompartmentCount,
+ zoneStats.compartmentCount, zoneStats.sweptCompartmentCount,
+ getCount(COUNT_MINOR_GC), getCount(COUNT_STOREBUFFER_OVERFLOW),
+ mmu20 * 100., mmu50 * 100., t(sccTotal), t(sccLongest),
+ double(preTotalHeapBytes) / BYTES_PER_MB,
+ getCount(COUNT_NEW_CHUNK) - getCount(COUNT_DESTROY_CHUNK),
+ getCount(COUNT_NEW_CHUNK) + getCount(COUNT_DESTROY_CHUNK),
+ double(ArenaSize * getCount(COUNT_ARENA_RELOCATED)) / BYTES_PER_MB);
+
+ return DuplicateString(buffer);
+}
+
+UniqueChars Statistics::formatDetailedSliceDescription(
+ unsigned i, const SliceData& slice) const {
+ char budgetDescription[200];
+ slice.budget.describe(budgetDescription, sizeof(budgetDescription) - 1);
+
+ const char* format =
+ "\
+ ---- Slice %u ----\n\
+ Reason: %s\n\
+ Trigger: %s\n\
+ Reset: %s%s\n\
+ State: %s -> %s\n\
+ Page Faults: %" PRIu64
+ "\n\
+ Pause: %.3fms of %s budget (@ %.3fms)\n\
+";
+
+ char triggerBuffer[100] = "n/a";
+ if (slice.trigger) {
+ Trigger trigger = slice.trigger.value();
+ SprintfLiteral(triggerBuffer, "%.3f MiB of %.3f MiB threshold\n",
+ double(trigger.amount) / BYTES_PER_MB,
+ double(trigger.threshold) / BYTES_PER_MB);
+ }
+
+ char buffer[1024];
+ SprintfLiteral(
+ buffer, format, i, ExplainGCReason(slice.reason), triggerBuffer,
+ slice.wasReset() ? "yes - " : "no",
+ slice.wasReset() ? ExplainAbortReason(slice.resetReason) : "",
+ gc::StateName(slice.initialState), gc::StateName(slice.finalState),
+ uint64_t(slice.endFaults - slice.startFaults), t(slice.duration()),
+ budgetDescription, t(slice.start - slices_[0].start));
+ return DuplicateString(buffer);
+}
+
+static bool IncludePhase(TimeDuration duration) {
+ // Don't include durations that will print as "0.000ms".
+ return duration.ToMilliseconds() >= 0.001;
+}
+
+UniqueChars Statistics::formatDetailedPhaseTimes(
+ const PhaseTimes& phaseTimes) const {
+ static const TimeDuration MaxUnaccountedChildTime =
+ TimeDuration::FromMicroseconds(50);
+
+ FragmentVector fragments;
+ char buffer[128];
+ for (auto phase : AllPhases()) {
+ uint8_t level = phases[phase].depth;
+ TimeDuration ownTime = phaseTimes[phase];
+ TimeDuration childTime = SumChildTimes(phase, phaseTimes);
+ if (IncludePhase(ownTime)) {
+ SprintfLiteral(buffer, " %*s%s: %.3fms\n", level * 2, "",
+ phases[phase].name, t(ownTime));
+ if (!fragments.append(DuplicateString(buffer))) {
+ return UniqueChars(nullptr);
+ }
+
+ if (childTime && (ownTime - childTime) > MaxUnaccountedChildTime) {
+ SprintfLiteral(buffer, " %*s%s: %.3fms\n", (level + 1) * 2, "",
+ "Other", t(ownTime - childTime));
+ if (!fragments.append(DuplicateString(buffer))) {
+ return UniqueChars(nullptr);
+ }
+ }
+ }
+ }
+ return Join(fragments);
+}
+
+UniqueChars Statistics::formatDetailedTotals() const {
+ TimeDuration total, longest;
+ gcDuration(&total, &longest);
+
+ const char* format =
+ "\
+ ---- Totals ----\n\
+ Total Time: %.3fms\n\
+ Max Pause: %.3fms\n\
+";
+ char buffer[1024];
+ SprintfLiteral(buffer, format, t(total), t(longest));
+ return DuplicateString(buffer);
+}
+
+void Statistics::formatJsonSlice(size_t sliceNum, JSONPrinter& json) const {
+ /*
+ * We number each of the slice properties to keep the code in
+ * GCTelemetry.jsm in sync. See MAX_SLICE_KEYS.
+ */
+ json.beginObject();
+ formatJsonSliceDescription(sliceNum, slices_[sliceNum], json); // # 1-11
+
+ json.beginObjectProperty("times"); // # 12
+ formatJsonPhaseTimes(slices_[sliceNum].phaseTimes, json);
+ json.endObject();
+
+ json.endObject();
+}
+
+UniqueChars Statistics::renderJsonSlice(size_t sliceNum) const {
+ Sprinter printer(nullptr, false);
+ if (!printer.init()) {
+ return UniqueChars(nullptr);
+ }
+ JSONPrinter json(printer, false);
+
+ formatJsonSlice(sliceNum, json);
+ return printer.release();
+}
+
+UniqueChars Statistics::renderNurseryJson() const {
+ Sprinter printer(nullptr, false);
+ if (!printer.init()) {
+ return UniqueChars(nullptr);
+ }
+ JSONPrinter json(printer, false);
+ gc->nursery().renderProfileJSON(json);
+ return printer.release();
+}
+
+#ifdef DEBUG
+void Statistics::log(const char* fmt, ...) {
+ va_list args;
+ va_start(args, fmt);
+ if (gcDebugFile) {
+ TimeDuration sinceStart = TimeStamp::Now() - TimeStamp::FirstTimeStamp();
+ fprintf(gcDebugFile, "%12.3f: ", sinceStart.ToMicroseconds());
+ vfprintf(gcDebugFile, fmt, args);
+ fprintf(gcDebugFile, "\n");
+ fflush(gcDebugFile);
+ }
+ va_end(args);
+}
+#endif
+
+UniqueChars Statistics::renderJsonMessage() const {
+ /*
+ * The format of the JSON message is specified by the GCMajorMarkerPayload
+ * type in profiler.firefox.com
+ * https://github.com/firefox-devtools/profiler/blob/master/src/types/markers.js#L62
+ *
+ * All the properties listed here are created within the timings property
+ * of the GCMajor marker.
+ */
+ if (aborted) {
+ return DuplicateString("{status:\"aborted\"}"); // May return nullptr
+ }
+
+ Sprinter printer(nullptr, false);
+ if (!printer.init()) {
+ return UniqueChars(nullptr);
+ }
+ JSONPrinter json(printer, false);
+
+ json.beginObject();
+ json.property("status", "completed");
+ formatJsonDescription(json);
+
+ json.beginObjectProperty("totals");
+ formatJsonPhaseTimes(phaseTimes, json);
+ json.endObject();
+
+ json.endObject();
+
+ return printer.release();
+}
+
+void Statistics::formatJsonDescription(JSONPrinter& json) const {
+ // If you change JSON properties here, please update:
+ // Firefox Profiler:
+ // https://github.com/firefox-devtools/profiler
+
+ TimeDuration total, longest;
+ gcDuration(&total, &longest);
+ json.property("max_pause", longest, JSONPrinter::MILLISECONDS);
+ json.property("total_time", total, JSONPrinter::MILLISECONDS);
+ // We might be able to omit reason if profiler.firefox.com was able to retrive
+ // it from the first slice. But it doesn't do this yet.
+ json.property("reason", ExplainGCReason(slices_[0].reason));
+ json.property("zones_collected", zoneStats.collectedZoneCount);
+ json.property("total_zones", zoneStats.zoneCount);
+ json.property("total_compartments", zoneStats.compartmentCount);
+ json.property("minor_gcs", getCount(COUNT_MINOR_GC));
+ json.property("minor_gc_number", gc->minorGCCount());
+ json.property("major_gc_number", gc->majorGCCount());
+ uint32_t storebufferOverflows = getCount(COUNT_STOREBUFFER_OVERFLOW);
+ if (storebufferOverflows) {
+ json.property("store_buffer_overflows", storebufferOverflows);
+ }
+ json.property("slices", slices_.length());
+
+ const double mmu20 = computeMMU(TimeDuration::FromMilliseconds(20));
+ const double mmu50 = computeMMU(TimeDuration::FromMilliseconds(50));
+ json.property("mmu_20ms", int(mmu20 * 100));
+ json.property("mmu_50ms", int(mmu50 * 100));
+
+ TimeDuration sccTotal, sccLongest;
+ sccDurations(&sccTotal, &sccLongest);
+ json.property("scc_sweep_total", sccTotal, JSONPrinter::MILLISECONDS);
+ json.property("scc_sweep_max_pause", sccLongest, JSONPrinter::MILLISECONDS);
+
+ if (nonincrementalReason_ != GCAbortReason::None) {
+ json.property("nonincremental_reason",
+ ExplainAbortReason(nonincrementalReason_));
+ }
+ json.property("allocated_bytes", preTotalHeapBytes);
+ json.property("post_heap_size", postTotalHeapBytes);
+
+ uint32_t addedChunks = getCount(COUNT_NEW_CHUNK);
+ if (addedChunks) {
+ json.property("added_chunks", addedChunks);
+ }
+ uint32_t removedChunks = getCount(COUNT_DESTROY_CHUNK);
+ if (removedChunks) {
+ json.property("removed_chunks", removedChunks);
+ }
+ json.property("major_gc_number", startingMajorGCNumber);
+ json.property("minor_gc_number", startingMinorGCNumber);
+ json.property("slice_number", startingSliceNumber);
+}
+
+void Statistics::formatJsonSliceDescription(unsigned i, const SliceData& slice,
+ JSONPrinter& json) const {
+ // If you change JSON properties here, please update:
+ // Firefox Profiler:
+ // https://github.com/firefox-devtools/profiler
+ //
+ char budgetDescription[200];
+ slice.budget.describe(budgetDescription, sizeof(budgetDescription) - 1);
+ TimeStamp originTime = TimeStamp::ProcessCreation();
+
+ json.property("slice", i);
+ json.property("pause", slice.duration(), JSONPrinter::MILLISECONDS);
+ json.property("reason", ExplainGCReason(slice.reason));
+ json.property("initial_state", gc::StateName(slice.initialState));
+ json.property("final_state", gc::StateName(slice.finalState));
+ json.property("budget", budgetDescription);
+ json.property("major_gc_number", startingMajorGCNumber);
+ if (slice.trigger) {
+ Trigger trigger = slice.trigger.value();
+ json.property("trigger_amount", trigger.amount);
+ json.property("trigger_threshold", trigger.threshold);
+ }
+ int64_t numFaults = slice.endFaults - slice.startFaults;
+ if (numFaults != 0) {
+ json.property("page_faults", numFaults);
+ }
+ json.property("start_timestamp", slice.start - originTime,
+ JSONPrinter::SECONDS);
+}
+
+void Statistics::formatJsonPhaseTimes(const PhaseTimes& phaseTimes,
+ JSONPrinter& json) const {
+ for (auto phase : AllPhases()) {
+ TimeDuration ownTime = phaseTimes[phase];
+ if (!ownTime.IsZero()) {
+ json.property(phases[phase].path, ownTime, JSONPrinter::MILLISECONDS);
+ }
+ }
+}
+
+Statistics::Statistics(GCRuntime* gc)
+ : gc(gc),
+ gcTimerFile(nullptr),
+ gcDebugFile(nullptr),
+ nonincrementalReason_(GCAbortReason::None),
+ creationTime_(TimeStamp::Now()),
+ tenuredAllocsSinceMinorGC(0),
+ preTotalHeapBytes(0),
+ postTotalHeapBytes(0),
+ preCollectedHeapBytes(0),
+ startingMinorGCNumber(0),
+ startingMajorGCNumber(0),
+ startingSliceNumber(0),
+ maxPauseInInterval(0),
+ sliceCallback(nullptr),
+ nurseryCollectionCallback(nullptr),
+ aborted(false),
+ enableProfiling_(false),
+ sliceCount_(0) {
+ for (auto& count : counts) {
+ count = 0;
+ }
+
+ for (auto& stat : stats) {
+ stat = 0;
+ }
+
+#ifdef DEBUG
+ for (const auto& duration : totalTimes_) {
+ using ElementType = std::remove_reference_t<decltype(duration)>;
+ static_assert(!std::is_trivially_constructible_v<ElementType>,
+ "Statistics::Statistics will only initialize "
+ "totalTimes_'s elements if their default constructor is "
+ "non-trivial");
+ MOZ_ASSERT(duration.IsZero(),
+ "totalTimes_ default-initialization should have "
+ "default-initialized every element of totalTimes_ to zero");
+ }
+#endif
+
+ MOZ_ALWAYS_TRUE(phaseStack.reserve(MAX_PHASE_NESTING));
+ MOZ_ALWAYS_TRUE(suspendedPhases.reserve(MAX_SUSPENDED_PHASES));
+
+ gcTimerFile = MaybeOpenFileFromEnv("MOZ_GCTIMER");
+ gcDebugFile = MaybeOpenFileFromEnv("JS_GC_DEBUG");
+ gcProfileFile = MaybeOpenFileFromEnv("JS_GC_PROFILE_FILE", stderr);
+
+ gc::ReadProfileEnv("JS_GC_PROFILE",
+ "Report major GCs taking more than N milliseconds for "
+ "all or just the main runtime\n",
+ &enableProfiling_, &profileWorkers_, &profileThreshold_);
+}
+
+Statistics::~Statistics() {
+ if (gcTimerFile && gcTimerFile != stdout && gcTimerFile != stderr) {
+ fclose(gcTimerFile);
+ }
+ if (gcDebugFile && gcDebugFile != stdout && gcDebugFile != stderr) {
+ fclose(gcDebugFile);
+ }
+}
+
+/* static */
+bool Statistics::initialize() {
+#ifdef DEBUG
+ // Sanity check generated tables.
+ for (auto i : AllPhases()) {
+ auto parent = phases[i].parent;
+ if (parent != Phase::NONE) {
+ MOZ_ASSERT(phases[i].depth == phases[parent].depth + 1);
+ }
+ auto firstChild = phases[i].firstChild;
+ if (firstChild != Phase::NONE) {
+ MOZ_ASSERT(i == phases[firstChild].parent);
+ MOZ_ASSERT(phases[i].depth == phases[firstChild].depth - 1);
+ }
+ auto nextSibling = phases[i].nextSibling;
+ if (nextSibling != Phase::NONE) {
+ MOZ_ASSERT(parent == phases[nextSibling].parent);
+ MOZ_ASSERT(phases[i].depth == phases[nextSibling].depth);
+ }
+ auto nextWithPhaseKind = phases[i].nextWithPhaseKind;
+ if (nextWithPhaseKind != Phase::NONE) {
+ MOZ_ASSERT(phases[i].phaseKind == phases[nextWithPhaseKind].phaseKind);
+ MOZ_ASSERT(parent != phases[nextWithPhaseKind].parent);
+ }
+ }
+ for (auto i : AllPhaseKinds()) {
+ MOZ_ASSERT(phases[phaseKinds[i].firstPhase].phaseKind == i);
+ for (auto j : AllPhaseKinds()) {
+ MOZ_ASSERT_IF(i != j, phaseKinds[i].telemetryBucket !=
+ phaseKinds[j].telemetryBucket);
+ }
+ }
+#endif
+
+ return true;
+}
+
+JS::GCSliceCallback Statistics::setSliceCallback(
+ JS::GCSliceCallback newCallback) {
+ JS::GCSliceCallback oldCallback = sliceCallback;
+ sliceCallback = newCallback;
+ return oldCallback;
+}
+
+JS::GCNurseryCollectionCallback Statistics::setNurseryCollectionCallback(
+ JS::GCNurseryCollectionCallback newCallback) {
+ auto oldCallback = nurseryCollectionCallback;
+ nurseryCollectionCallback = newCallback;
+ return oldCallback;
+}
+
+TimeDuration Statistics::clearMaxGCPauseAccumulator() {
+ TimeDuration prior = maxPauseInInterval;
+ maxPauseInInterval = 0;
+ return prior;
+}
+
+TimeDuration Statistics::getMaxGCPauseSinceClear() {
+ return maxPauseInInterval;
+}
+
+// Sum up the time for a phase, including instances of the phase with different
+// parents.
+static TimeDuration SumPhase(PhaseKind phaseKind,
+ const Statistics::PhaseTimes& times) {
+ TimeDuration sum;
+ for (PhaseIter phase(phaseKind); !phase.done(); phase.next()) {
+ sum += times[phase];
+ }
+ return sum;
+}
+
+static bool CheckSelfTime(Phase parent, Phase child,
+ const Statistics::PhaseTimes& times,
+ const Statistics::PhaseTimes& selfTimes,
+ TimeDuration childTime) {
+ if (selfTimes[parent] < childTime) {
+ fprintf(
+ stderr,
+ "Parent %s time = %.3fms with %.3fms remaining, child %s time %.3fms\n",
+ phases[parent].name, times[parent].ToMilliseconds(),
+ selfTimes[parent].ToMilliseconds(), phases[child].name,
+ childTime.ToMilliseconds());
+ fflush(stderr);
+ return false;
+ }
+
+ return true;
+}
+
+static PhaseKind FindLongestPhaseKind(const Statistics::PhaseKindTimes& times) {
+ TimeDuration longestTime;
+ PhaseKind phaseKind = PhaseKind::NONE;
+ for (auto i : MajorGCPhaseKinds()) {
+ if (times[i] > longestTime) {
+ longestTime = times[i];
+ phaseKind = i;
+ }
+ }
+
+ return phaseKind;
+}
+
+static PhaseKind LongestPhaseSelfTimeInMajorGC(
+ const Statistics::PhaseTimes& times) {
+ // Start with total times per expanded phase, including children's times.
+ Statistics::PhaseTimes selfTimes(times);
+
+ // We have the total time spent in each phase, including descendant times.
+ // Loop over the children and subtract their times from their parent's self
+ // time.
+ for (auto i : AllPhases()) {
+ Phase parent = phases[i].parent;
+ if (parent != Phase::NONE) {
+ bool ok = CheckSelfTime(parent, i, times, selfTimes, times[i]);
+
+ // This happens very occasionally in release builds and frequently
+ // in Windows debug builds. Skip collecting longest phase telemetry
+ // if it does.
+#ifndef XP_WIN
+ MOZ_ASSERT(ok, "Inconsistent time data; see bug 1400153");
+#endif
+ if (!ok) {
+ return PhaseKind::NONE;
+ }
+
+ selfTimes[parent] -= times[i];
+ }
+ }
+
+ // Sum expanded phases corresponding to the same phase.
+ Statistics::PhaseKindTimes phaseKindTimes;
+ for (auto i : AllPhaseKinds()) {
+ phaseKindTimes[i] = SumPhase(i, selfTimes);
+ }
+
+ return FindLongestPhaseKind(phaseKindTimes);
+}
+
+void Statistics::printStats() {
+ if (aborted) {
+ fprintf(gcTimerFile,
+ "OOM during GC statistics collection. The report is unavailable "
+ "for this GC.\n");
+ } else {
+ UniqueChars msg = formatDetailedMessage();
+ if (msg) {
+ double secSinceStart =
+ (slices_[0].start - TimeStamp::ProcessCreation()).ToSeconds();
+ fprintf(gcTimerFile, "GC(T+%.3fs) %s\n", secSinceStart, msg.get());
+ }
+ }
+ fflush(gcTimerFile);
+}
+
+void Statistics::beginGC(JS::GCOptions options, const TimeStamp& currentTime) {
+ slices_.clearAndFree();
+ sccTimes.clearAndFree();
+ gcOptions = options;
+ nonincrementalReason_ = GCAbortReason::None;
+
+ preTotalHeapBytes = gc->heapSize.bytes();
+
+ preCollectedHeapBytes = 0;
+
+ startingMajorGCNumber = gc->majorGCCount();
+ startingSliceNumber = gc->gcNumber();
+
+ if (gc->lastGCEndTime()) {
+ timeSinceLastGC = currentTime - gc->lastGCEndTime();
+ }
+
+ totalGCTime_ = TimeDuration();
+}
+
+void Statistics::measureInitialHeapSize() {
+ MOZ_ASSERT(preCollectedHeapBytes == 0);
+ for (GCZonesIter zone(gc); !zone.done(); zone.next()) {
+ preCollectedHeapBytes += zone->gcHeapSize.bytes();
+ }
+}
+
+void Statistics::endGC() {
+ postTotalHeapBytes = gc->heapSize.bytes();
+
+ sendGCTelemetry();
+}
+
+TimeDuration Statistics::sumTotalParallelTime(PhaseKind phaseKind) const {
+ TimeDuration total;
+ for (const SliceData& slice : slices_) {
+ total += slice.totalParallelTimes[phaseKind];
+ }
+ return total;
+}
+
+void Statistics::sendGCTelemetry() {
+ JSRuntime* runtime = gc->rt;
+ // NOTE: "Compartmental" is term that was deprecated with the
+ // introduction of zone-based GC, but the old telemetry probe
+ // continues to be used.
+ runtime->metrics().GC_IS_COMPARTMENTAL(!gc->fullGCRequested);
+ runtime->metrics().GC_ZONE_COUNT(zoneStats.zoneCount);
+ runtime->metrics().GC_ZONES_COLLECTED(zoneStats.collectedZoneCount);
+
+ TimeDuration prepareTotal = phaseTimes[Phase::PREPARE];
+ TimeDuration markTotal = SumPhase(PhaseKind::MARK, phaseTimes);
+ TimeDuration markRootsTotal = SumPhase(PhaseKind::MARK_ROOTS, phaseTimes);
+
+ // Gray and weak marking time is counted under MARK_WEAK and not MARK_GRAY.
+ TimeDuration markWeakTotal = SumPhase(PhaseKind::MARK_WEAK, phaseTimes);
+ TimeDuration markGrayNotWeak =
+ SumPhase(PhaseKind::MARK_GRAY, phaseTimes) +
+ SumPhase(PhaseKind::MARK_INCOMING_GRAY, phaseTimes);
+ TimeDuration markGrayWeak = SumPhase(PhaseKind::MARK_GRAY_WEAK, phaseTimes);
+ TimeDuration markGrayTotal = markGrayNotWeak + markGrayWeak;
+ TimeDuration markNotGrayOrWeak = markTotal - markGrayNotWeak - markWeakTotal;
+ if (markNotGrayOrWeak < TimeDuration::FromMilliseconds(0)) {
+ markNotGrayOrWeak = TimeDuration();
+ }
+
+ size_t markCount = getCount(COUNT_CELLS_MARKED);
+
+ runtime->metrics().GC_PREPARE_MS(prepareTotal);
+ runtime->metrics().GC_MARK_MS(markNotGrayOrWeak);
+ if (markTotal >= TimeDuration::FromMicroseconds(1)) {
+ double markRate = double(markCount) / t(markTotal);
+ runtime->metrics().GC_MARK_RATE_2(uint32_t(markRate));
+ }
+ runtime->metrics().GC_SWEEP_MS(phaseTimes[Phase::SWEEP]);
+ if (gc->didCompactZones()) {
+ runtime->metrics().GC_COMPACT_MS(phaseTimes[Phase::COMPACT]);
+ }
+ runtime->metrics().GC_MARK_ROOTS_US(markRootsTotal);
+ runtime->metrics().GC_MARK_GRAY_MS_2(markGrayTotal);
+ runtime->metrics().GC_MARK_WEAK_MS(markWeakTotal);
+ runtime->metrics().GC_NON_INCREMENTAL(nonincremental());
+ if (nonincremental()) {
+ runtime->metrics().GC_NON_INCREMENTAL_REASON(
+ uint32_t(nonincrementalReason_));
+ }
+
+#ifdef DEBUG
+ // Reset happens non-incrementally, so only the last slice can be reset.
+ for (size_t i = 0; i < slices_.length() - 1; i++) {
+ MOZ_ASSERT(!slices_[i].wasReset());
+ }
+#endif
+ const auto& lastSlice = slices_.back();
+ runtime->metrics().GC_RESET(lastSlice.wasReset());
+ if (lastSlice.wasReset()) {
+ runtime->metrics().GC_RESET_REASON(uint32_t(lastSlice.resetReason));
+ }
+
+ TimeDuration total, longest;
+ gcDuration(&total, &longest);
+
+ runtime->metrics().GC_MS(total);
+ runtime->metrics().GC_MAX_PAUSE_MS_2(longest);
+
+ const double mmu50 = computeMMU(TimeDuration::FromMilliseconds(50));
+ runtime->metrics().GC_MMU_50(mmu50 * 100);
+
+ // Record scheduling telemetry for the main runtime but not for workers, which
+ // are scheduled differently.
+ if (!runtime->parentRuntime && timeSinceLastGC) {
+ runtime->metrics().GC_TIME_BETWEEN_S(timeSinceLastGC);
+ if (!nonincremental()) {
+ runtime->metrics().GC_SLICE_COUNT(slices_.length());
+ }
+ }
+
+ if (!lastSlice.wasReset() && preCollectedHeapBytes != 0) {
+ size_t bytesSurvived = 0;
+ for (ZonesIter zone(runtime, WithAtoms); !zone.done(); zone.next()) {
+ if (zone->wasCollected()) {
+ bytesSurvived += zone->gcHeapSize.retainedBytes();
+ }
+ }
+
+ MOZ_ASSERT(preCollectedHeapBytes >= bytesSurvived);
+ double survivalRate =
+ 100.0 * double(bytesSurvived) / double(preCollectedHeapBytes);
+ runtime->metrics().GC_TENURED_SURVIVAL_RATE(uint32_t(survivalRate));
+
+ // Calculate 'effectiveness' in MB / second, on main thread only for now.
+ if (!runtime->parentRuntime) {
+ size_t bytesFreed = preCollectedHeapBytes - bytesSurvived;
+ TimeDuration clampedTotal =
+ TimeDuration::Max(total, TimeDuration::FromMilliseconds(1));
+ double effectiveness =
+ (double(bytesFreed) / BYTES_PER_MB) / clampedTotal.ToSeconds();
+ runtime->metrics().GC_EFFECTIVENESS(uint32_t(effectiveness));
+ }
+ }
+
+ // Parallel marking stats.
+ if (gc->isParallelMarkingEnabled()) {
+ TimeDuration wallTime = SumPhase(PhaseKind::PARALLEL_MARK, phaseTimes);
+ TimeDuration parallelRunTime =
+ sumTotalParallelTime(PhaseKind::PARALLEL_MARK) -
+ sumTotalParallelTime(PhaseKind::PARALLEL_MARK_WAIT);
+ TimeDuration parallelMarkTime =
+ sumTotalParallelTime(PhaseKind::PARALLEL_MARK_MARK);
+ if (wallTime && parallelMarkTime) {
+ uint32_t threadCount = gc->markers.length();
+ double speedup = parallelMarkTime / wallTime;
+ double utilization = parallelRunTime / (wallTime * threadCount);
+ runtime->metrics().GC_PARALLEL_MARK_SPEEDUP(uint32_t(speedup * 100.0));
+ runtime->metrics().GC_PARALLEL_MARK_UTILIZATION(
+ std::clamp<uint32_t>(utilization * 100.0, 0, 100));
+ runtime->metrics().GC_PARALLEL_MARK_INTERRUPTIONS(
+ getCount(COUNT_PARALLEL_MARK_INTERRUPTIONS));
+ }
+ }
+}
+
+void Statistics::beginNurseryCollection(JS::GCReason reason) {
+ count(COUNT_MINOR_GC);
+ startingMinorGCNumber = gc->minorGCCount();
+ if (nurseryCollectionCallback) {
+ (*nurseryCollectionCallback)(
+ context(), JS::GCNurseryProgress::GC_NURSERY_COLLECTION_START, reason);
+ }
+}
+
+void Statistics::endNurseryCollection(JS::GCReason reason) {
+ if (nurseryCollectionCallback) {
+ (*nurseryCollectionCallback)(
+ context(), JS::GCNurseryProgress::GC_NURSERY_COLLECTION_END, reason);
+ }
+
+ tenuredAllocsSinceMinorGC = 0;
+}
+
+Statistics::SliceData::SliceData(const SliceBudget& budget,
+ Maybe<Trigger> trigger, JS::GCReason reason,
+ TimeStamp start, size_t startFaults,
+ gc::State initialState)
+ : budget(budget),
+ reason(reason),
+ trigger(trigger),
+ initialState(initialState),
+ start(start),
+ startFaults(startFaults) {}
+
+void Statistics::beginSlice(const ZoneGCStats& zoneStats, JS::GCOptions options,
+ const SliceBudget& budget, JS::GCReason reason,
+ bool budgetWasIncreased) {
+ MOZ_ASSERT(phaseStack.empty() ||
+ (phaseStack.length() == 1 && phaseStack[0] == Phase::MUTATOR));
+
+ this->zoneStats = zoneStats;
+
+ TimeStamp currentTime = TimeStamp::Now();
+
+ bool first = !gc->isIncrementalGCInProgress();
+ if (first) {
+ beginGC(options, currentTime);
+ }
+
+ JSRuntime* runtime = gc->rt;
+ if (!runtime->parentRuntime && !slices_.empty()) {
+ TimeDuration timeSinceLastSlice = currentTime - slices_.back().end;
+ runtime->metrics().GC_TIME_BETWEEN_SLICES_MS(timeSinceLastSlice);
+ }
+
+ Maybe<Trigger> trigger = recordedTrigger;
+ recordedTrigger.reset();
+
+ if (!slices_.emplaceBack(budget, trigger, reason, currentTime,
+ GetPageFaultCount(), gc->state())) {
+ // If we are OOM, set a flag to indicate we have missing slice data.
+ aborted = true;
+ return;
+ }
+
+ runtime->metrics().GC_REASON_2(uint32_t(reason));
+ runtime->metrics().GC_BUDGET_WAS_INCREASED(budgetWasIncreased);
+
+ // Slice callbacks should only fire for the outermost level.
+ if (sliceCallback) {
+ JSContext* cx = context();
+ JS::GCDescription desc(!gc->fullGCRequested, false, options, reason);
+ if (first) {
+ (*sliceCallback)(cx, JS::GC_CYCLE_BEGIN, desc);
+ }
+ (*sliceCallback)(cx, JS::GC_SLICE_BEGIN, desc);
+ }
+
+ log("begin slice");
+}
+
+void Statistics::endSlice() {
+ MOZ_ASSERT(phaseStack.empty() ||
+ (phaseStack.length() == 1 && phaseStack[0] == Phase::MUTATOR));
+
+ if (!aborted) {
+ auto& slice = slices_.back();
+ slice.end = TimeStamp::Now();
+ slice.endFaults = GetPageFaultCount();
+ slice.finalState = gc->state();
+
+ log("end slice");
+
+ sendSliceTelemetry(slice);
+
+ sliceCount_++;
+
+ totalGCTime_ += slice.end - slice.start;
+ }
+
+ bool last = !gc->isIncrementalGCInProgress();
+ if (last) {
+ if (gcTimerFile) {
+ printStats();
+ }
+
+ if (!aborted) {
+ endGC();
+ }
+ }
+
+ if (!aborted &&
+ ShouldPrintProfile(gc->rt, enableProfiling_, profileWorkers_,
+ profileThreshold_, slices_.back().duration())) {
+ printSliceProfile();
+ }
+
+ // Slice callbacks should only fire for the outermost level.
+ if (!aborted) {
+ if (sliceCallback) {
+ JSContext* cx = context();
+ JS::GCDescription desc(!gc->fullGCRequested, last, gcOptions,
+ slices_.back().reason);
+ (*sliceCallback)(cx, JS::GC_SLICE_END, desc);
+ if (last) {
+ (*sliceCallback)(cx, JS::GC_CYCLE_END, desc);
+ }
+ }
+ }
+
+ // Do this after the slice callback since it uses these values.
+ if (last) {
+ for (auto& count : counts) {
+ count = 0;
+ }
+
+ // Clear the timers at the end of a GC, preserving the data for
+ // PhaseKind::MUTATOR.
+ auto mutatorStartTime = phaseStartTimes[Phase::MUTATOR];
+ auto mutatorTime = phaseTimes[Phase::MUTATOR];
+
+ phaseStartTimes = PhaseTimeStamps();
+#ifdef DEBUG
+ phaseEndTimes = PhaseTimeStamps();
+#endif
+ phaseTimes = PhaseTimes();
+
+ phaseStartTimes[Phase::MUTATOR] = mutatorStartTime;
+ phaseTimes[Phase::MUTATOR] = mutatorTime;
+ }
+
+ aborted = false;
+}
+
+void Statistics::sendSliceTelemetry(const SliceData& slice) {
+ JSRuntime* runtime = gc->rt;
+ TimeDuration sliceTime = slice.end - slice.start;
+ runtime->metrics().GC_SLICE_MS(sliceTime);
+
+ if (slice.budget.isTimeBudget()) {
+ TimeDuration budgetDuration = slice.budget.timeBudgetDuration();
+ runtime->metrics().GC_BUDGET_MS_2(budgetDuration);
+
+ if (IsCurrentlyAnimating(runtime->lastAnimationTime, slice.end)) {
+ runtime->metrics().GC_ANIMATION_MS(sliceTime);
+ }
+
+ bool wasLongSlice = false;
+ if (sliceTime > budgetDuration) {
+ // Record how long we went over budget.
+ TimeDuration overrun = sliceTime - budgetDuration;
+ runtime->metrics().GC_BUDGET_OVERRUN(overrun);
+
+ // Long GC slices are those that go 50% or 5ms over their budget.
+ wasLongSlice = (overrun > TimeDuration::FromMilliseconds(5)) ||
+ (overrun > (budgetDuration / int64_t(2)));
+
+ // Record the longest phase in any long slice.
+ if (wasLongSlice) {
+ PhaseKind longest = LongestPhaseSelfTimeInMajorGC(slice.phaseTimes);
+ reportLongestPhaseInMajorGC(longest, [runtime](auto sample) {
+ runtime->metrics().GC_SLOW_PHASE(sample);
+ });
+
+ // If the longest phase was waiting for parallel tasks then record the
+ // longest task.
+ if (longest == PhaseKind::JOIN_PARALLEL_TASKS) {
+ PhaseKind longestParallel =
+ FindLongestPhaseKind(slice.maxParallelTimes);
+ reportLongestPhaseInMajorGC(longestParallel, [runtime](auto sample) {
+ runtime->metrics().GC_SLOW_TASK(sample);
+ });
+ }
+ }
+ }
+
+ // Record `wasLongSlice` for all TimeBudget slices.
+ runtime->metrics().GC_SLICE_WAS_LONG(wasLongSlice);
+ }
+}
+
+template <typename Fn>
+void Statistics::reportLongestPhaseInMajorGC(PhaseKind longest, Fn reportFn) {
+ if (longest != PhaseKind::NONE) {
+ uint8_t bucket = phaseKinds[longest].telemetryBucket;
+ reportFn(bucket);
+ }
+}
+
+bool Statistics::startTimingMutator() {
+ if (phaseStack.length() != 0) {
+ // Should only be called from outside of GC.
+ MOZ_ASSERT(phaseStack.length() == 1);
+ MOZ_ASSERT(phaseStack[0] == Phase::MUTATOR);
+ return false;
+ }
+
+ MOZ_ASSERT(suspendedPhases.empty());
+
+ timedGCTime = 0;
+ phaseStartTimes[Phase::MUTATOR] = TimeStamp();
+ phaseTimes[Phase::MUTATOR] = 0;
+ timedGCStart = TimeStamp();
+
+ beginPhase(PhaseKind::MUTATOR);
+ return true;
+}
+
+bool Statistics::stopTimingMutator(double& mutator_ms, double& gc_ms) {
+ // This should only be called from outside of GC, while timing the mutator.
+ if (phaseStack.length() != 1 || phaseStack[0] != Phase::MUTATOR) {
+ return false;
+ }
+
+ endPhase(PhaseKind::MUTATOR);
+ mutator_ms = t(phaseTimes[Phase::MUTATOR]);
+ gc_ms = t(timedGCTime);
+
+ return true;
+}
+
+void Statistics::suspendPhases(PhaseKind suspension) {
+ MOZ_ASSERT(suspension == PhaseKind::EXPLICIT_SUSPENSION ||
+ suspension == PhaseKind::IMPLICIT_SUSPENSION);
+ while (!phaseStack.empty()) {
+ MOZ_ASSERT(suspendedPhases.length() < MAX_SUSPENDED_PHASES);
+ Phase parent = phaseStack.back();
+ suspendedPhases.infallibleAppend(parent);
+ recordPhaseEnd(parent);
+ }
+ suspendedPhases.infallibleAppend(lookupChildPhase(suspension));
+}
+
+void Statistics::resumePhases() {
+ MOZ_ASSERT(suspendedPhases.back() == Phase::EXPLICIT_SUSPENSION ||
+ suspendedPhases.back() == Phase::IMPLICIT_SUSPENSION);
+ suspendedPhases.popBack();
+
+ while (!suspendedPhases.empty() &&
+ suspendedPhases.back() != Phase::EXPLICIT_SUSPENSION &&
+ suspendedPhases.back() != Phase::IMPLICIT_SUSPENSION) {
+ Phase resumePhase = suspendedPhases.popCopy();
+ if (resumePhase == Phase::MUTATOR) {
+ timedGCTime += TimeStamp::Now() - timedGCStart;
+ }
+ recordPhaseBegin(resumePhase);
+ }
+}
+
+void Statistics::beginPhase(PhaseKind phaseKind) {
+ // No longer timing these phases. We should never see these.
+ MOZ_ASSERT(phaseKind != PhaseKind::GC_BEGIN &&
+ phaseKind != PhaseKind::GC_END);
+
+ // PhaseKind::MUTATOR is suspended while performing GC.
+ if (currentPhase() == Phase::MUTATOR) {
+ suspendPhases(PhaseKind::IMPLICIT_SUSPENSION);
+ }
+
+ recordPhaseBegin(lookupChildPhase(phaseKind));
+}
+
+void Statistics::recordPhaseBegin(Phase phase) {
+ MOZ_ASSERT(CurrentThreadCanAccessRuntime(gc->rt));
+
+ // Guard against any other re-entry.
+ MOZ_ASSERT(!phaseStartTimes[phase]);
+
+ MOZ_ASSERT(phaseStack.length() < MAX_PHASE_NESTING);
+
+ Phase current = currentPhase();
+ MOZ_ASSERT(phases[phase].parent == current);
+
+ TimeStamp now = TimeStamp::Now();
+
+ if (current != Phase::NONE) {
+ MOZ_ASSERT(now >= phaseStartTimes[currentPhase()],
+ "Inconsistent time data; see bug 1400153");
+ if (now < phaseStartTimes[currentPhase()]) {
+ now = phaseStartTimes[currentPhase()];
+ aborted = true;
+ }
+ }
+
+ phaseStack.infallibleAppend(phase);
+ phaseStartTimes[phase] = now;
+ log("begin: %s", phases[phase].path);
+}
+
+void Statistics::recordPhaseEnd(Phase phase) {
+ MOZ_ASSERT(CurrentThreadCanAccessRuntime(gc->rt));
+
+ MOZ_ASSERT(phaseStartTimes[phase]);
+
+ TimeStamp now = TimeStamp::Now();
+
+ // Make sure this phase ends after it starts.
+ MOZ_ASSERT(now >= phaseStartTimes[phase],
+ "Inconsistent time data; see bug 1400153");
+
+#ifdef DEBUG
+ // Make sure this phase ends after all of its children. Note that some
+ // children might not have run in this instance, in which case they will
+ // have run in a previous instance of this parent or not at all.
+ for (Phase kid = phases[phase].firstChild; kid != Phase::NONE;
+ kid = phases[kid].nextSibling) {
+ if (phaseEndTimes[kid].IsNull()) {
+ continue;
+ }
+ if (phaseEndTimes[kid] > now) {
+ fprintf(stderr,
+ "Parent %s ended at %.3fms, before child %s ended at %.3fms?\n",
+ phases[phase].name, t(now - TimeStamp::FirstTimeStamp()),
+ phases[kid].name,
+ t(phaseEndTimes[kid] - TimeStamp::FirstTimeStamp()));
+ }
+ MOZ_ASSERT(phaseEndTimes[kid] <= now,
+ "Inconsistent time data; see bug 1400153");
+ }
+#endif
+
+ if (now < phaseStartTimes[phase]) {
+ now = phaseStartTimes[phase];
+ aborted = true;
+ }
+
+ if (phase == Phase::MUTATOR) {
+ timedGCStart = now;
+ }
+
+ phaseStack.popBack();
+
+ TimeDuration t = now - phaseStartTimes[phase];
+ if (!slices_.empty()) {
+ slices_.back().phaseTimes[phase] += t;
+ }
+ phaseTimes[phase] += t;
+ phaseStartTimes[phase] = TimeStamp();
+
+#ifdef DEBUG
+ phaseEndTimes[phase] = now;
+ log("end: %s", phases[phase].path);
+#endif
+}
+
+void Statistics::endPhase(PhaseKind phaseKind) {
+ Phase phase = currentPhase();
+ MOZ_ASSERT(phase != Phase::NONE);
+ MOZ_ASSERT(phases[phase].phaseKind == phaseKind);
+
+ recordPhaseEnd(phase);
+
+ // When emptying the stack, we may need to return to timing the mutator
+ // (PhaseKind::MUTATOR).
+ if (phaseStack.empty() && !suspendedPhases.empty() &&
+ suspendedPhases.back() == Phase::IMPLICIT_SUSPENSION) {
+ resumePhases();
+ }
+}
+
+void Statistics::recordParallelPhase(PhaseKind phaseKind,
+ TimeDuration duration) {
+ MOZ_ASSERT(CurrentThreadCanAccessRuntime(gc->rt));
+
+ if (aborted) {
+ return;
+ }
+
+ slices_.back().totalParallelTimes[phaseKind] += duration;
+
+ // Also record the maximum task time for each phase. Don't record times for
+ // parent phases.
+ TimeDuration& maxTime = slices_.back().maxParallelTimes[phaseKind];
+ maxTime = std::max(maxTime, duration);
+}
+
+TimeStamp Statistics::beginSCC() { return TimeStamp::Now(); }
+
+void Statistics::endSCC(unsigned scc, TimeStamp start) {
+ if (scc >= sccTimes.length() && !sccTimes.resize(scc + 1)) {
+ return;
+ }
+
+ sccTimes[scc] += TimeStamp::Now() - start;
+}
+
+/*
+ * MMU (minimum mutator utilization) is a measure of how much garbage collection
+ * is affecting the responsiveness of the system. MMU measurements are given
+ * with respect to a certain window size. If we report MMU(50ms) = 80%, then
+ * that means that, for any 50ms window of time, at least 80% of the window is
+ * devoted to the mutator. In other words, the GC is running for at most 20% of
+ * the window, or 10ms. The GC can run multiple slices during the 50ms window
+ * as long as the total time it spends is at most 10ms.
+ */
+double Statistics::computeMMU(TimeDuration window) const {
+ MOZ_ASSERT(!slices_.empty());
+
+ TimeDuration gc = slices_[0].end - slices_[0].start;
+ TimeDuration gcMax = gc;
+
+ if (gc >= window) {
+ return 0.0;
+ }
+
+ int startIndex = 0;
+ for (size_t endIndex = 1; endIndex < slices_.length(); endIndex++) {
+ auto* startSlice = &slices_[startIndex];
+ auto& endSlice = slices_[endIndex];
+ gc += endSlice.end - endSlice.start;
+
+ while (endSlice.end - startSlice->end >= window) {
+ gc -= startSlice->end - startSlice->start;
+ startSlice = &slices_[++startIndex];
+ }
+
+ TimeDuration cur = gc;
+ if (endSlice.end - startSlice->start > window) {
+ cur -= (endSlice.end - startSlice->start - window);
+ }
+ if (cur > gcMax) {
+ gcMax = cur;
+ }
+ }
+
+ return double((window - gcMax) / window);
+}
+
+void Statistics::maybePrintProfileHeaders() {
+ static int printedHeader = 0;
+ if ((printedHeader++ % 200) == 0) {
+ printProfileHeader();
+ if (gc->nursery().enableProfiling()) {
+ gc->nursery().printProfileHeader();
+ }
+ }
+}
+
+// The following macros define GC profile metadata fields that are printed
+// before the timing information defined by FOR_EACH_GC_PROFILE_TIME.
+
+#define FOR_EACH_GC_PROFILE_COMMON_METADATA(_) \
+ _("PID", 7, "%7zu", pid) \
+ _("Runtime", 14, "0x%12p", runtime)
+
+#define FOR_EACH_GC_PROFILE_SLICE_METADATA(_) \
+ _("Timestamp", 10, "%10.6f", timestamp.ToSeconds()) \
+ _("Reason", 20, "%-20.20s", reason) \
+ _("States", 6, "%6s", formatGCStates(slice)) \
+ _("FSNR", 4, "%4s", formatGCFlags(slice)) \
+ _("SizeKB", 8, "%8zu", sizeKB) \
+ _("Budget", 6, "%6s", formatBudget(slice))
+
+#define FOR_EACH_GC_PROFILE_METADATA(_) \
+ FOR_EACH_GC_PROFILE_COMMON_METADATA(_) \
+ FOR_EACH_GC_PROFILE_SLICE_METADATA(_)
+
+void Statistics::printProfileHeader() {
+ if (!enableProfiling_) {
+ return;
+ }
+
+ Sprinter sprinter;
+ if (!sprinter.init() || !sprinter.put(MajorGCProfilePrefix)) {
+ return;
+ }
+
+#define PRINT_METADATA_NAME(name, width, _1, _2) \
+ if (!sprinter.jsprintf(" %-*s", width, name)) { \
+ return; \
+ }
+ FOR_EACH_GC_PROFILE_METADATA(PRINT_METADATA_NAME)
+#undef PRINT_METADATA_NAME
+
+#define PRINT_PROFILE_NAME(_1, text, _2) \
+ if (!sprinter.jsprintf(" %-6.6s", text)) { \
+ return; \
+ }
+ FOR_EACH_GC_PROFILE_TIME(PRINT_PROFILE_NAME)
+#undef PRINT_PROFILE_NAME
+
+ if (!sprinter.put("\n")) {
+ return;
+ }
+
+ fputs(sprinter.string(), profileFile());
+}
+
+static TimeDuration SumAllPhaseKinds(const Statistics::PhaseKindTimes& times) {
+ TimeDuration sum;
+ for (PhaseKind kind : AllPhaseKinds()) {
+ sum += times[kind];
+ }
+ return sum;
+}
+
+void Statistics::printSliceProfile() {
+ maybePrintProfileHeaders();
+
+ const SliceData& slice = slices_.back();
+ ProfileDurations times = getProfileTimes(slice);
+ updateTotalProfileTimes(times);
+
+ Sprinter sprinter;
+ if (!sprinter.init() || !sprinter.put(MajorGCProfilePrefix)) {
+ return;
+ }
+
+ size_t pid = getpid();
+ JSRuntime* runtime = gc->rt;
+ TimeDuration timestamp = slice.end - creationTime();
+ const char* reason = ExplainGCReason(slice.reason);
+ size_t sizeKB = gc->heapSize.bytes() / 1024;
+
+#define PRINT_FIELD_VALUE(_1, _2, format, value) \
+ if (!sprinter.jsprintf(" " format, value)) { \
+ return; \
+ }
+ FOR_EACH_GC_PROFILE_METADATA(PRINT_FIELD_VALUE)
+#undef PRINT_FIELD_VALUE
+
+ if (!printProfileTimes(times, sprinter)) {
+ return;
+ }
+
+ fputs(sprinter.string(), profileFile());
+}
+
+Statistics::ProfileDurations Statistics::getProfileTimes(
+ const SliceData& slice) const {
+ ProfileDurations times;
+
+ times[ProfileKey::Total] = slice.duration();
+ times[ProfileKey::Background] = SumAllPhaseKinds(slice.totalParallelTimes);
+
+#define GET_PROFILE_TIME(name, text, phase) \
+ if (phase != PhaseKind::NONE) { \
+ times[ProfileKey::name] = SumPhase(phase, slice.phaseTimes); \
+ }
+ FOR_EACH_GC_PROFILE_TIME(GET_PROFILE_TIME)
+#undef GET_PROFILE_TIME
+
+ return times;
+}
+
+void Statistics::updateTotalProfileTimes(const ProfileDurations& times) {
+#define UPDATE_PROFILE_TIME(name, _, phase) \
+ totalTimes_[ProfileKey::name] += times[ProfileKey::name];
+ FOR_EACH_GC_PROFILE_TIME(UPDATE_PROFILE_TIME)
+#undef UPDATE_PROFILE_TIME
+}
+
+const char* Statistics::formatGCStates(const SliceData& slice) {
+ DebugOnly<int> r =
+ SprintfLiteral(formatBuffer_, "%1d -> %1d", int(slice.initialState),
+ int(slice.finalState));
+ MOZ_ASSERT(r > 0 && r < FormatBufferLength);
+ return formatBuffer_;
+}
+
+const char* Statistics::formatGCFlags(const SliceData& slice) {
+ bool fullGC = gc->fullGCRequested;
+ bool shrinkingGC = gcOptions == JS::GCOptions::Shrink;
+ bool nonIncrementalGC = nonincrementalReason_ != GCAbortReason::None;
+ bool wasReset = slice.resetReason != GCAbortReason::None;
+
+ MOZ_ASSERT(FormatBufferLength >= 5);
+ formatBuffer_[0] = fullGC ? 'F' : ' ';
+ formatBuffer_[1] = shrinkingGC ? 'S' : ' ';
+ formatBuffer_[2] = nonIncrementalGC ? 'N' : ' ';
+ formatBuffer_[3] = wasReset ? 'R' : ' ';
+ formatBuffer_[4] = '\0';
+
+ return formatBuffer_;
+}
+
+const char* Statistics::formatBudget(const SliceData& slice) {
+ if (nonincrementalReason_ != GCAbortReason::None ||
+ !slice.budget.isTimeBudget()) {
+ formatBuffer_[0] = '\0';
+ return formatBuffer_;
+ }
+
+ DebugOnly<int> r =
+ SprintfLiteral(formatBuffer_, " %6" PRIi64, slice.budget.timeBudget());
+ MOZ_ASSERT(r > 0 && r < FormatBufferLength);
+ return formatBuffer_;
+}
+
+/* static */
+bool Statistics::printProfileTimes(const ProfileDurations& times,
+ Sprinter& sprinter) {
+ for (auto time : times) {
+ int64_t millis = int64_t(time.ToMilliseconds());
+ if (!sprinter.jsprintf(" %6" PRIi64, millis)) {
+ return false;
+ }
+ }
+
+ return sprinter.put("\n");
+}
+
+constexpr size_t SliceMetadataFormatWidth() {
+ size_t fieldCount = 0;
+ size_t totalWidth = 0;
+
+#define UPDATE_COUNT_AND_WIDTH(_1, width, _2, _3) \
+ fieldCount++; \
+ totalWidth += width;
+ FOR_EACH_GC_PROFILE_SLICE_METADATA(UPDATE_COUNT_AND_WIDTH)
+#undef UPDATE_COUNT_AND_WIDTH
+
+ // Add padding between fields.
+ totalWidth += fieldCount - 1;
+
+ return totalWidth;
+}
+
+void Statistics::printTotalProfileTimes() {
+ if (!enableProfiling_) {
+ return;
+ }
+
+ Sprinter sprinter;
+ if (!sprinter.init() || !sprinter.put(MajorGCProfilePrefix)) {
+ return;
+ }
+
+ size_t pid = getpid();
+ JSRuntime* runtime = gc->rt;
+
+#define PRINT_FIELD_VALUE(_1, _2, format, value) \
+ if (!sprinter.jsprintf(" " format, value)) { \
+ return; \
+ }
+ FOR_EACH_GC_PROFILE_COMMON_METADATA(PRINT_FIELD_VALUE)
+#undef PRINT_FIELD_VALUE
+
+ // Use whole width of per-slice metadata to print total slices so the profile
+ // totals that follow line up.
+ size_t width = SliceMetadataFormatWidth();
+ if (!sprinter.jsprintf(" %-*s", int(width), formatTotalSlices())) {
+ return;
+ }
+
+ if (!printProfileTimes(totalTimes_, sprinter)) {
+ return;
+ }
+
+ fputs(sprinter.string(), profileFile());
+}
+
+const char* Statistics::formatTotalSlices() {
+ DebugOnly<int> r = SprintfLiteral(
+ formatBuffer_, "TOTALS: %7" PRIu64 " slices:", sliceCount_);
+ MOZ_ASSERT(r > 0 && r < FormatBufferLength);
+ return formatBuffer_;
+}
diff --git a/js/src/gc/Statistics.h b/js/src/gc/Statistics.h
new file mode 100644
index 0000000000..0e45cd651a
--- /dev/null
+++ b/js/src/gc/Statistics.h
@@ -0,0 +1,606 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef gc_Statistics_h
+#define gc_Statistics_h
+
+#include "mozilla/Array.h"
+#include "mozilla/Atomics.h"
+#include "mozilla/EnumeratedArray.h"
+#include "mozilla/Maybe.h"
+#include "mozilla/TimeStamp.h"
+
+#include "jspubtd.h"
+#include "NamespaceImports.h"
+
+#include "gc/GCEnum.h"
+#include "js/AllocPolicy.h"
+#include "js/GCAPI.h"
+#include "js/SliceBudget.h"
+#include "js/Vector.h"
+
+namespace js {
+
+class JS_PUBLIC_API Sprinter;
+class JSONPrinter;
+
+namespace gcstats {
+
+// Phase data is generated by a script. If you need to add phases, edit
+// js/src/gc/GenerateStatsPhases.py
+
+#include "gc/StatsPhasesGenerated.h"
+
+// Counts can be incremented with Statistics::count(). They're reset at the end
+// of a Major GC.
+enum Count {
+ COUNT_NEW_CHUNK,
+ COUNT_DESTROY_CHUNK,
+ COUNT_MINOR_GC,
+
+ // Number of times a 'put' into a storebuffer overflowed, triggering a
+ // compaction
+ COUNT_STOREBUFFER_OVERFLOW,
+
+ // Number of arenas relocated by compacting GC.
+ COUNT_ARENA_RELOCATED,
+
+ // Number of cells marked during the marking phase. Excludes atoms marked when
+ // not collecting the atoms zone.
+ COUNT_CELLS_MARKED,
+
+ // Number of times work was donated to a requesting thread during parallel
+ // marking.
+ COUNT_PARALLEL_MARK_INTERRUPTIONS,
+
+ COUNT_LIMIT
+};
+
+// Stats can be set with Statistics::setStat(). They're not reset automatically.
+enum Stat {
+ // Number of strings tenured.
+ STAT_STRINGS_TENURED,
+
+ // Number of strings deduplicated.
+ STAT_STRINGS_DEDUPLICATED,
+
+ // Number of BigInts tenured.
+ STAT_BIGINTS_TENURED,
+
+ STAT_LIMIT
+};
+
+struct ZoneGCStats {
+ /* Number of zones collected in this GC. */
+ int collectedZoneCount = 0;
+
+ /* Total number of zones in the Runtime at the start of this GC. */
+ int zoneCount = 0;
+
+ /* Number of zones swept in this GC. */
+ int sweptZoneCount = 0;
+
+ /* Total number of compartments in all zones collected. */
+ int collectedCompartmentCount = 0;
+
+ /* Total number of compartments in the Runtime at the start of this GC. */
+ int compartmentCount = 0;
+
+ /* Total number of compartments swept by this GC. */
+ int sweptCompartmentCount = 0;
+
+ ZoneGCStats() = default;
+};
+
+struct Trigger {
+ size_t amount = 0;
+ size_t threshold = 0;
+};
+
+#define FOR_EACH_GC_PROFILE_TIME(_) \
+ _(Total, "total", PhaseKind::NONE) \
+ _(Background, "bgwrk", PhaseKind::NONE) \
+ _(BeginCallback, "bgnCB", PhaseKind::GC_BEGIN) \
+ _(MinorForMajor, "evct4m", PhaseKind::EVICT_NURSERY_FOR_MAJOR_GC) \
+ _(WaitBgThread, "waitBG", PhaseKind::WAIT_BACKGROUND_THREAD) \
+ _(Prepare, "prep", PhaseKind::PREPARE) \
+ _(Mark, "mark", PhaseKind::MARK) \
+ _(Sweep, "sweep", PhaseKind::SWEEP) \
+ _(Compact, "cmpct", PhaseKind::COMPACT) \
+ _(EndCallback, "endCB", PhaseKind::GC_END) \
+ _(MinorGC, "minor", PhaseKind::MINOR_GC) \
+ _(EvictNursery, "evict", PhaseKind::EVICT_NURSERY)
+
+static const char* const MajorGCProfilePrefix = "MajorGC:";
+static const char* const MinorGCProfilePrefix = "MinorGC:";
+
+const char* ExplainAbortReason(GCAbortReason reason);
+
+/*
+ * Struct for collecting timing statistics on a "phase tree". The tree is
+ * specified as a limited DAG, but the timings are collected for the whole tree
+ * that you would get by expanding out the DAG by duplicating subtrees rooted
+ * at nodes with multiple parents.
+ *
+ * During execution, a child phase can be activated multiple times, and the
+ * total time will be accumulated. (So for example, you can start and end
+ * PhaseKind::MARK_ROOTS multiple times before completing the parent phase.)
+ *
+ * Incremental GC is represented by recording separate timing results for each
+ * slice within the overall GC.
+ */
+struct Statistics {
+ template <typename T, size_t Length>
+ using Array = mozilla::Array<T, Length>;
+
+ template <typename IndexType, IndexType SizeAsEnumValue, typename ValueType>
+ using EnumeratedArray =
+ mozilla::EnumeratedArray<IndexType, SizeAsEnumValue, ValueType>;
+
+ using TimeDuration = mozilla::TimeDuration;
+ using TimeStamp = mozilla::TimeStamp;
+
+ // Create types for tables of times, by phase and phase kind.
+ using PhaseTimes = EnumeratedArray<Phase, Phase::LIMIT, TimeDuration>;
+ using PhaseKindTimes =
+ EnumeratedArray<PhaseKind, PhaseKind::LIMIT, TimeDuration>;
+
+ using PhaseTimeStamps = EnumeratedArray<Phase, Phase::LIMIT, TimeStamp>;
+
+ [[nodiscard]] static bool initialize();
+
+ explicit Statistics(gc::GCRuntime* gc);
+ ~Statistics();
+
+ Statistics(const Statistics&) = delete;
+ Statistics& operator=(const Statistics&) = delete;
+
+ void beginPhase(PhaseKind phaseKind);
+ void endPhase(PhaseKind phaseKind);
+ void recordParallelPhase(PhaseKind phaseKind, TimeDuration duration);
+
+ // Occasionally, we may be in the middle of something that is tracked by
+ // this class, and we need to do something unusual (eg evict the nursery)
+ // that doesn't normally nest within the current phase. Suspend the
+ // currently tracked phase stack, at which time the caller is free to do
+ // other tracked operations.
+ //
+ // This also happens internally with the PhaseKind::MUTATOR "phase". While in
+ // this phase, any beginPhase will automatically suspend the non-GC phase,
+ // until that inner stack is complete, at which time it will automatically
+ // resume the non-GC phase. Explicit suspensions do not get auto-resumed.
+ void suspendPhases(PhaseKind suspension = PhaseKind::EXPLICIT_SUSPENSION);
+
+ // Resume a suspended stack of phases.
+ void resumePhases();
+
+ void beginSlice(const ZoneGCStats& zoneStats, JS::GCOptions options,
+ const SliceBudget& budget, JS::GCReason reason,
+ bool budgetWasIncreased);
+ void endSlice();
+
+ [[nodiscard]] bool startTimingMutator();
+ [[nodiscard]] bool stopTimingMutator(double& mutator_ms, double& gc_ms);
+
+ // Note when we sweep a zone or compartment.
+ void sweptZone() { ++zoneStats.sweptZoneCount; }
+ void sweptCompartment() { ++zoneStats.sweptCompartmentCount; }
+
+ void reset(GCAbortReason reason) {
+ MOZ_ASSERT(reason != GCAbortReason::None);
+ if (!aborted) {
+ slices_.back().resetReason = reason;
+ }
+ }
+
+ void measureInitialHeapSize();
+
+ void nonincremental(GCAbortReason reason) {
+ MOZ_ASSERT(reason != GCAbortReason::None);
+ nonincrementalReason_ = reason;
+ log("Non-incremental reason: %s", nonincrementalReason());
+ }
+
+ bool nonincremental() const {
+ return nonincrementalReason_ != GCAbortReason::None;
+ }
+
+ const char* nonincrementalReason() const {
+ return ExplainAbortReason(nonincrementalReason_);
+ }
+
+ void count(Count s) { counts[s]++; }
+ void addCount(Count s, uint32_t count) { counts[s] += count; }
+
+ uint32_t getCount(Count s) const { return uint32_t(counts[s]); }
+
+ void setStat(Stat s, uint32_t value) { stats[s] = value; }
+
+ uint32_t getStat(Stat s) const { return stats[s]; }
+
+ void recordTrigger(size_t amount, size_t threshold) {
+ recordedTrigger = mozilla::Some(Trigger{amount, threshold});
+ }
+ bool hasTrigger() const { return recordedTrigger.isSome(); }
+
+ // tenured allocs don't include nursery evictions.
+ void setAllocsSinceMinorGCTenured(uint32_t allocs) {
+ tenuredAllocsSinceMinorGC = allocs;
+ }
+
+ uint32_t allocsSinceMinorGCTenured() { return tenuredAllocsSinceMinorGC; }
+
+ void beginNurseryCollection(JS::GCReason reason);
+ void endNurseryCollection(JS::GCReason reason);
+
+ TimeStamp beginSCC();
+ void endSCC(unsigned scc, TimeStamp start);
+
+ UniqueChars formatCompactSliceMessage() const;
+ UniqueChars formatCompactSummaryMessage() const;
+ UniqueChars formatDetailedMessage() const;
+
+ JS::GCSliceCallback setSliceCallback(JS::GCSliceCallback callback);
+ JS::GCNurseryCollectionCallback setNurseryCollectionCallback(
+ JS::GCNurseryCollectionCallback callback);
+
+ TimeDuration clearMaxGCPauseAccumulator();
+ TimeDuration getMaxGCPauseSinceClear();
+
+ PhaseKind currentPhaseKind() const;
+
+ static const size_t MAX_SUSPENDED_PHASES = MAX_PHASE_NESTING * 3;
+
+ struct SliceData {
+ SliceData(const SliceBudget& budget, mozilla::Maybe<Trigger> trigger,
+ JS::GCReason reason, TimeStamp start, size_t startFaults,
+ gc::State initialState);
+
+ SliceBudget budget;
+ JS::GCReason reason = JS::GCReason::NO_REASON;
+ mozilla::Maybe<Trigger> trigger;
+ gc::State initialState = gc::State::NotActive;
+ gc::State finalState = gc::State::NotActive;
+ GCAbortReason resetReason = GCAbortReason::None;
+ TimeStamp start;
+ TimeStamp end;
+ size_t startFaults = 0;
+ size_t endFaults = 0;
+ PhaseTimes phaseTimes;
+ PhaseKindTimes totalParallelTimes;
+ PhaseKindTimes maxParallelTimes;
+
+ TimeDuration duration() const { return end - start; }
+ bool wasReset() const { return resetReason != GCAbortReason::None; }
+ };
+
+ typedef Vector<SliceData, 8, SystemAllocPolicy> SliceDataVector;
+
+ const SliceDataVector& slices() const { return slices_; }
+
+ const SliceData* lastSlice() const {
+ if (slices_.length() == 0) {
+ return nullptr;
+ }
+
+ return &slices_.back();
+ }
+
+ TimeStamp start() const { return slices_[0].start; }
+
+ TimeStamp end() const { return slices_.back().end; }
+
+ TimeStamp creationTime() const { return creationTime_; }
+
+ TimeDuration totalGCTime() const { return totalGCTime_; }
+ size_t initialCollectedBytes() const { return preCollectedHeapBytes; }
+
+ // File to write profiling information to, either stderr or file specified
+ // with JS_GC_PROFILE_FILE.
+ FILE* profileFile() const { return gcProfileFile; }
+
+ // Occasionally print header lines for profiling information.
+ void maybePrintProfileHeaders();
+
+ // Print header line for profile times.
+ void printProfileHeader();
+
+ // Print total profile times on shutdown.
+ void printTotalProfileTimes();
+
+ // These JSON strings are used by the firefox profiler to display the GC
+ // markers.
+
+ // Return JSON for a whole major GC
+ UniqueChars renderJsonMessage() const;
+
+ // Return JSON for the timings of just the given slice.
+ UniqueChars renderJsonSlice(size_t sliceNum) const;
+
+ // Return JSON for the previous nursery collection.
+ UniqueChars renderNurseryJson() const;
+
+#ifdef DEBUG
+ // Print a logging message.
+ void log(const char* fmt, ...);
+#else
+ void log(const char* fmt, ...){};
+#endif
+
+ private:
+ gc::GCRuntime* const gc;
+
+ /* File used for MOZ_GCTIMER output. */
+ FILE* gcTimerFile;
+
+ /* File used for JS_GC_DEBUG output. */
+ FILE* gcDebugFile;
+
+ /* File used for JS_GC_PROFILE output. */
+ FILE* gcProfileFile;
+
+ ZoneGCStats zoneStats;
+
+ JS::GCOptions gcOptions;
+
+ GCAbortReason nonincrementalReason_;
+
+ SliceDataVector slices_;
+
+ /* Most recent time when the given phase started. */
+ PhaseTimeStamps phaseStartTimes;
+
+#ifdef DEBUG
+ /* Most recent time when the given phase ended. */
+ PhaseTimeStamps phaseEndTimes;
+#endif
+
+ TimeStamp creationTime_;
+
+ /* Bookkeeping for GC timings when timingMutator is true */
+ TimeStamp timedGCStart;
+ TimeDuration timedGCTime;
+
+ /* Total main thread time in a given phase for this GC. */
+ PhaseTimes phaseTimes;
+
+ /* Total main thread time for this GC. */
+ TimeDuration totalGCTime_;
+
+ /* Number of events of this type for this GC. */
+ EnumeratedArray<Count, COUNT_LIMIT,
+ mozilla::Atomic<uint32_t, mozilla::ReleaseAcquire>>
+ counts;
+
+ /* Other GC statistics. */
+ EnumeratedArray<Stat, STAT_LIMIT, uint32_t> stats;
+
+ /*
+ * These events cannot be kept in the above array, we need to take their
+ * address.
+ */
+ uint32_t tenuredAllocsSinceMinorGC;
+
+ /* Total GC heap size before and after the GC ran. */
+ size_t preTotalHeapBytes;
+ size_t postTotalHeapBytes;
+
+ /* GC heap size for collected zones before GC ran. */
+ size_t preCollectedHeapBytes;
+
+ /*
+ * If a GC slice was triggered by exceeding some threshold, record the
+ * threshold and the value that exceeded it. This happens before the slice
+ * starts so this is recorded here first and then transferred to SliceData.
+ */
+ mozilla::Maybe<Trigger> recordedTrigger;
+
+ /* GC numbers as of the beginning of the collection. */
+ uint64_t startingMinorGCNumber;
+ uint64_t startingMajorGCNumber;
+ uint64_t startingSliceNumber;
+
+ /* Records the maximum GC pause in an API-controlled interval. */
+ mutable TimeDuration maxPauseInInterval;
+
+ /* Phases that are currently on stack. */
+ Vector<Phase, MAX_PHASE_NESTING, SystemAllocPolicy> phaseStack;
+
+ /*
+ * Certain phases can interrupt the phase stack, eg callback phases. When
+ * this happens, we move the suspended phases over to a sepearate list,
+ * terminated by a dummy PhaseKind::SUSPENSION phase (so that we can nest
+ * suspensions by suspending multiple stacks with a PhaseKind::SUSPENSION in
+ * between).
+ */
+ Vector<Phase, MAX_SUSPENDED_PHASES, SystemAllocPolicy> suspendedPhases;
+
+ /* Sweep times for SCCs of compartments. */
+ Vector<TimeDuration, 0, SystemAllocPolicy> sccTimes;
+
+ TimeDuration timeSinceLastGC;
+
+ JS::GCSliceCallback sliceCallback;
+ JS::GCNurseryCollectionCallback nurseryCollectionCallback;
+
+ /*
+ * True if we saw an OOM while allocating slices or we saw an impossible
+ * timestamp. The statistics for this GC will be invalid.
+ */
+ bool aborted;
+
+ /* Profiling data. */
+
+ enum class ProfileKey {
+#define DEFINE_PROFILE_KEY(name, _1, _2) name,
+ FOR_EACH_GC_PROFILE_TIME(DEFINE_PROFILE_KEY)
+#undef DEFINE_PROFILE_KEY
+ KeyCount
+ };
+
+ using ProfileDurations =
+ EnumeratedArray<ProfileKey, ProfileKey::KeyCount, TimeDuration>;
+
+ bool enableProfiling_;
+ bool profileWorkers_;
+ TimeDuration profileThreshold_;
+ ProfileDurations totalTimes_;
+ uint64_t sliceCount_;
+
+ char formatBuffer_[32];
+ static constexpr int FormatBufferLength = sizeof(formatBuffer_);
+
+ JSContext* context();
+
+ Phase currentPhase() const;
+ Phase lookupChildPhase(PhaseKind phaseKind) const;
+
+ void beginGC(JS::GCOptions options, const TimeStamp& currentTime);
+ void endGC();
+
+ void sendGCTelemetry();
+ void sendSliceTelemetry(const SliceData& slice);
+
+ TimeDuration sumTotalParallelTime(PhaseKind phaseKind) const;
+
+ void recordPhaseBegin(Phase phase);
+ void recordPhaseEnd(Phase phase);
+
+ void gcDuration(TimeDuration* total, TimeDuration* maxPause) const;
+ void sccDurations(TimeDuration* total, TimeDuration* maxPause) const;
+ void printStats();
+
+ template <typename Fn>
+ void reportLongestPhaseInMajorGC(PhaseKind longest, Fn reportFn);
+
+ UniqueChars formatCompactSlicePhaseTimes(const PhaseTimes& phaseTimes) const;
+
+ UniqueChars formatDetailedDescription() const;
+ UniqueChars formatDetailedSliceDescription(unsigned i,
+ const SliceData& slice) const;
+ UniqueChars formatDetailedPhaseTimes(const PhaseTimes& phaseTimes) const;
+ UniqueChars formatDetailedTotals() const;
+
+ void formatJsonDescription(JSONPrinter&) const;
+ void formatJsonSliceDescription(unsigned i, const SliceData& slice,
+ JSONPrinter&) const;
+ void formatJsonPhaseTimes(const PhaseTimes& phaseTimes, JSONPrinter&) const;
+ void formatJsonSlice(size_t sliceNum, JSONPrinter&) const;
+
+ double computeMMU(TimeDuration resolution) const;
+
+ void printSliceProfile();
+ ProfileDurations getProfileTimes(const SliceData& slice) const;
+ void updateTotalProfileTimes(const ProfileDurations& times);
+ const char* formatGCStates(const SliceData& slice);
+ const char* formatGCFlags(const SliceData& slice);
+ const char* formatBudget(const SliceData& slice);
+ const char* formatTotalSlices();
+ static bool printProfileTimes(const ProfileDurations& times,
+ Sprinter& sprinter);
+};
+
+struct MOZ_RAII AutoGCSlice {
+ AutoGCSlice(Statistics& stats, const ZoneGCStats& zoneStats,
+ JS::GCOptions options, const SliceBudget& budget,
+ JS::GCReason reason, bool budgetWasIncreased)
+ : stats(stats) {
+ stats.beginSlice(zoneStats, options, budget, reason, budgetWasIncreased);
+ }
+ ~AutoGCSlice() { stats.endSlice(); }
+
+ Statistics& stats;
+};
+
+struct MOZ_RAII AutoPhase {
+ AutoPhase(Statistics& stats, PhaseKind phaseKind)
+ : stats(stats), phaseKind(phaseKind), enabled(true) {
+ stats.beginPhase(phaseKind);
+ }
+
+ AutoPhase(Statistics& stats, bool condition, PhaseKind phaseKind)
+ : stats(stats), phaseKind(phaseKind), enabled(condition) {
+ if (enabled) {
+ stats.beginPhase(phaseKind);
+ }
+ }
+
+ ~AutoPhase() {
+ if (enabled) {
+ stats.endPhase(phaseKind);
+ }
+ }
+
+ Statistics& stats;
+ PhaseKind phaseKind;
+ bool enabled;
+};
+
+struct MOZ_RAII AutoSCC {
+ AutoSCC(Statistics& stats, unsigned scc) : stats(stats), scc(scc) {
+ start = stats.beginSCC();
+ }
+ ~AutoSCC() { stats.endSCC(scc, start); }
+
+ Statistics& stats;
+ unsigned scc;
+ mozilla::TimeStamp start;
+};
+
+void ReadProfileEnv(const char* envName, const char* helpText, bool* enableOut,
+ bool* workersOut, mozilla::TimeDuration* thresholdOut);
+
+} /* namespace gcstats */
+
+struct StringStats {
+ // number of strings that were deduplicated, and their sizes in characters
+ // and bytes
+ uint64_t deduplicatedStrings = 0;
+ uint64_t deduplicatedChars = 0;
+ uint64_t deduplicatedBytes = 0;
+
+ // number of live nursery strings at the start of a nursery collection
+ uint64_t liveNurseryStrings = 0;
+
+ // number of new strings added to the tenured heap
+ uint64_t tenuredStrings = 0;
+
+ // Currently, liveNurseryStrings = tenuredStrings + deduplicatedStrings (but
+ // in the future we may do more transformation during tenuring, eg
+ // atomizing.)
+
+ // number of malloced bytes associated with tenured strings (the actual
+ // malloc will have happened when the strings were allocated in the nursery;
+ // the ownership of the bytes will be transferred to the tenured strings)
+ uint64_t tenuredBytes = 0;
+
+ StringStats& operator+=(const StringStats& other) {
+ deduplicatedStrings += other.deduplicatedStrings;
+ deduplicatedChars += other.deduplicatedChars;
+ deduplicatedBytes += other.deduplicatedBytes;
+ liveNurseryStrings += other.liveNurseryStrings;
+ tenuredStrings += other.tenuredStrings;
+ tenuredBytes += other.tenuredBytes;
+ return *this;
+ }
+
+ void noteTenured(size_t mallocBytes) {
+ liveNurseryStrings++;
+ tenuredStrings++;
+ tenuredBytes += mallocBytes;
+ }
+
+ void noteDeduplicated(size_t numChars, size_t mallocBytes) {
+ liveNurseryStrings++;
+ deduplicatedStrings++;
+ deduplicatedChars += numChars;
+ deduplicatedBytes += mallocBytes;
+ }
+};
+
+} /* namespace js */
+
+#endif /* gc_Statistics_h */
diff --git a/js/src/gc/StoreBuffer-inl.h b/js/src/gc/StoreBuffer-inl.h
new file mode 100644
index 0000000000..1c97654761
--- /dev/null
+++ b/js/src/gc/StoreBuffer-inl.h
@@ -0,0 +1,97 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef gc_StoreBuffer_inl_h
+#define gc_StoreBuffer_inl_h
+
+#include "gc/StoreBuffer.h"
+
+#include "gc/Cell.h"
+#include "gc/Heap.h"
+
+#include "gc/Heap-inl.h"
+
+namespace js {
+namespace gc {
+
+inline /* static */ size_t ArenaCellSet::getCellIndex(const TenuredCell* cell) {
+ uintptr_t cellOffset = uintptr_t(cell) & ArenaMask;
+ MOZ_ASSERT(cellOffset % ArenaCellIndexBytes == 0);
+ return cellOffset / ArenaCellIndexBytes;
+}
+
+inline /* static */ void ArenaCellSet::getWordIndexAndMask(size_t cellIndex,
+ size_t* wordp,
+ uint32_t* maskp) {
+ BitArray<MaxArenaCellIndex>::getIndexAndMask(cellIndex, wordp, maskp);
+}
+
+inline bool ArenaCellSet::hasCell(size_t cellIndex) const {
+ MOZ_ASSERT(cellIndex < MaxArenaCellIndex);
+ return bits.get(cellIndex);
+}
+
+inline void ArenaCellSet::putCell(size_t cellIndex) {
+ MOZ_ASSERT(cellIndex < MaxArenaCellIndex);
+ MOZ_ASSERT(arena);
+
+ bits.set(cellIndex);
+ check();
+}
+
+inline void ArenaCellSet::check() const {
+#ifdef DEBUG
+ bool bitsZero = bits.isAllClear();
+ MOZ_ASSERT(isEmpty() == bitsZero);
+ MOZ_ASSERT(isEmpty() == !arena);
+ if (!isEmpty()) {
+ MOZ_ASSERT(IsCellPointerValid(arena));
+ MOZ_ASSERT(arena->bufferedCells() == this);
+ JSRuntime* runtime = arena->zone->runtimeFromMainThread();
+ MOZ_ASSERT(runtime->gc.minorGCCount() == minorGCNumberAtCreation);
+ }
+#endif
+}
+
+inline void StoreBuffer::WholeCellBuffer::put(const Cell* cell) {
+ if (cell != last_) {
+ putDontCheckLast(cell);
+ }
+}
+
+inline void StoreBuffer::WholeCellBuffer::putDontCheckLast(const Cell* cell) {
+ // This can still be called when |cell == last_| if the caller didn't check
+ // and that's OK.
+
+ MOZ_ASSERT(cell->isTenured());
+
+ // BigInts don't have any children, so shouldn't show up here.
+ MOZ_ASSERT(cell->getTraceKind() != JS::TraceKind::BigInt);
+
+ Arena* arena = cell->asTenured().arena();
+ ArenaCellSet* cells = arena->bufferedCells();
+ if (cells->isEmpty()) {
+ cells = allocateCellSet(arena);
+ if (!cells) {
+ return;
+ }
+ }
+
+ cells->putCell(&cell->asTenured());
+ cells->check();
+
+ last_ = cell;
+}
+
+inline void StoreBuffer::putWholeCell(Cell* cell) { bufferWholeCell.put(cell); }
+inline void StoreBuffer::putWholeCellDontCheckLast(Cell* cell) {
+ bufferWholeCell.putDontCheckLast(cell);
+}
+
+} // namespace gc
+} // namespace js
+
+#endif // gc_StoreBuffer_inl_h
diff --git a/js/src/gc/StoreBuffer.cpp b/js/src/gc/StoreBuffer.cpp
new file mode 100644
index 0000000000..4f6240d48b
--- /dev/null
+++ b/js/src/gc/StoreBuffer.cpp
@@ -0,0 +1,255 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "gc/StoreBuffer-inl.h"
+
+#include "mozilla/Assertions.h"
+
+#include "gc/Statistics.h"
+#include "vm/MutexIDs.h"
+#include "vm/Runtime.h"
+
+using namespace js;
+using namespace js::gc;
+
+JS_PUBLIC_API void js::gc::LockStoreBuffer(StoreBuffer* sb) {
+ MOZ_ASSERT(sb);
+ sb->lock();
+}
+
+JS_PUBLIC_API void js::gc::UnlockStoreBuffer(StoreBuffer* sb) {
+ MOZ_ASSERT(sb);
+ sb->unlock();
+}
+
+#ifdef DEBUG
+void StoreBuffer::checkAccess() const {
+ // The GC runs tasks that may access the storebuffer in parallel and so must
+ // take a lock. The mutator may only access the storebuffer from the main
+ // thread.
+ if (runtime_->heapState() != JS::HeapState::Idle) {
+ MOZ_ASSERT(!CurrentThreadIsGCMarking());
+ lock_.assertOwnedByCurrentThread();
+ } else {
+ MOZ_ASSERT(CurrentThreadCanAccessRuntime(runtime_));
+ }
+}
+#endif
+
+bool StoreBuffer::WholeCellBuffer::init() {
+ MOZ_ASSERT(!stringHead_);
+ MOZ_ASSERT(!nonStringHead_);
+ if (!storage_) {
+ storage_ = MakeUnique<LifoAlloc>(LifoAllocBlockSize);
+ // This prevents LifoAlloc::Enum from crashing with a release
+ // assertion if we ever allocate one entry larger than
+ // LifoAllocBlockSize.
+ if (storage_) {
+ storage_->disableOversize();
+ }
+ }
+ clear();
+ return bool(storage_);
+}
+
+bool StoreBuffer::GenericBuffer::init() {
+ if (!storage_) {
+ storage_ = MakeUnique<LifoAlloc>(LifoAllocBlockSize);
+ }
+ clear();
+ return bool(storage_);
+}
+
+void StoreBuffer::GenericBuffer::trace(JSTracer* trc) {
+ mozilla::ReentrancyGuard g(*owner_);
+ MOZ_ASSERT(owner_->isEnabled());
+ if (!storage_) {
+ return;
+ }
+
+ for (LifoAlloc::Enum e(*storage_); !e.empty();) {
+ unsigned size = *e.read<unsigned>();
+ BufferableRef* edge = e.read<BufferableRef>(size);
+ edge->trace(trc);
+ }
+}
+
+StoreBuffer::StoreBuffer(JSRuntime* rt, const Nursery& nursery)
+ : lock_(mutexid::StoreBuffer),
+ bufferVal(this, JS::GCReason::FULL_VALUE_BUFFER),
+ bufStrCell(this, JS::GCReason::FULL_CELL_PTR_STR_BUFFER),
+ bufBigIntCell(this, JS::GCReason::FULL_CELL_PTR_BIGINT_BUFFER),
+ bufObjCell(this, JS::GCReason::FULL_CELL_PTR_OBJ_BUFFER),
+ bufferSlot(this, JS::GCReason::FULL_SLOT_BUFFER),
+ bufferWholeCell(this),
+ bufferGeneric(this),
+ runtime_(rt),
+ nursery_(nursery),
+ aboutToOverflow_(false),
+ enabled_(false),
+ mayHavePointersToDeadCells_(false)
+#ifdef DEBUG
+ ,
+ mEntered(false),
+ markingNondeduplicatable(false)
+#endif
+{
+}
+
+void StoreBuffer::checkEmpty() const { MOZ_ASSERT(isEmpty()); }
+
+bool StoreBuffer::isEmpty() const {
+ return bufferVal.isEmpty() && bufStrCell.isEmpty() &&
+ bufBigIntCell.isEmpty() && bufObjCell.isEmpty() &&
+ bufferSlot.isEmpty() && bufferWholeCell.isEmpty() &&
+ bufferGeneric.isEmpty();
+}
+
+bool StoreBuffer::enable() {
+ if (enabled_) {
+ return true;
+ }
+
+ checkEmpty();
+
+ if (!bufferWholeCell.init() || !bufferGeneric.init()) {
+ return false;
+ }
+
+ enabled_ = true;
+ return true;
+}
+
+void StoreBuffer::disable() {
+ checkEmpty();
+
+ if (!enabled_) {
+ return;
+ }
+
+ aboutToOverflow_ = false;
+
+ enabled_ = false;
+}
+
+void StoreBuffer::clear() {
+ if (!enabled_) {
+ return;
+ }
+
+ aboutToOverflow_ = false;
+ mayHavePointersToDeadCells_ = false;
+
+ bufferVal.clear();
+ bufStrCell.clear();
+ bufBigIntCell.clear();
+ bufObjCell.clear();
+ bufferSlot.clear();
+ bufferWholeCell.clear();
+ bufferGeneric.clear();
+}
+
+void StoreBuffer::setAboutToOverflow(JS::GCReason reason) {
+ if (!aboutToOverflow_) {
+ aboutToOverflow_ = true;
+ runtime_->gc.stats().count(gcstats::COUNT_STOREBUFFER_OVERFLOW);
+ }
+ nursery_.requestMinorGC(reason);
+}
+
+void StoreBuffer::addSizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf,
+ JS::GCSizes* sizes) {
+ sizes->storeBufferVals += bufferVal.sizeOfExcludingThis(mallocSizeOf);
+ sizes->storeBufferCells += bufStrCell.sizeOfExcludingThis(mallocSizeOf) +
+ bufBigIntCell.sizeOfExcludingThis(mallocSizeOf) +
+ bufObjCell.sizeOfExcludingThis(mallocSizeOf);
+ sizes->storeBufferSlots += bufferSlot.sizeOfExcludingThis(mallocSizeOf);
+ sizes->storeBufferWholeCells +=
+ bufferWholeCell.sizeOfExcludingThis(mallocSizeOf);
+ sizes->storeBufferGenerics += bufferGeneric.sizeOfExcludingThis(mallocSizeOf);
+}
+
+ArenaCellSet ArenaCellSet::Empty;
+
+ArenaCellSet::ArenaCellSet(Arena* arena, ArenaCellSet* next)
+ : arena(arena),
+ next(next)
+#ifdef DEBUG
+ ,
+ minorGCNumberAtCreation(
+ arena->zone->runtimeFromMainThread()->gc.minorGCCount())
+#endif
+{
+ MOZ_ASSERT(arena);
+ MOZ_ASSERT(bits.isAllClear());
+}
+
+ArenaCellSet* StoreBuffer::WholeCellBuffer::allocateCellSet(Arena* arena) {
+ Zone* zone = arena->zone;
+ JSRuntime* rt = zone->runtimeFromMainThread();
+ if (!rt->gc.nursery().isEnabled()) {
+ return nullptr;
+ }
+
+ // Maintain separate lists for strings and non-strings, so that all buffered
+ // string whole cells will be processed before anything else (to prevent them
+ // from being deduplicated when their chars are used by a tenured string.)
+ bool isString =
+ MapAllocToTraceKind(arena->getAllocKind()) == JS::TraceKind::String;
+
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ ArenaCellSet*& head = isString ? stringHead_ : nonStringHead_;
+ auto cells = storage_->new_<ArenaCellSet>(arena, head);
+ if (!cells) {
+ oomUnsafe.crash("Failed to allocate ArenaCellSet");
+ }
+
+ arena->bufferedCells() = cells;
+ head = cells;
+
+ if (isAboutToOverflow()) {
+ rt->gc.storeBuffer().setAboutToOverflow(
+ JS::GCReason::FULL_WHOLE_CELL_BUFFER);
+ }
+
+ return cells;
+}
+
+void gc::CellHeaderPostWriteBarrier(JSObject** ptr, JSObject* prev,
+ JSObject* next) {
+ InternalBarrierMethods<JSObject*>::postBarrier(ptr, prev, next);
+}
+
+void StoreBuffer::WholeCellBuffer::clear() {
+ for (auto** headPtr : {&stringHead_, &nonStringHead_}) {
+ for (auto* set = *headPtr; set; set = set->next) {
+ set->arena->bufferedCells() = &ArenaCellSet::Empty;
+ }
+ *headPtr = nullptr;
+ }
+
+ if (storage_) {
+ storage_->used() ? storage_->releaseAll() : storage_->freeAll();
+ }
+
+ last_ = nullptr;
+}
+
+template struct StoreBuffer::MonoTypeBuffer<StoreBuffer::ValueEdge>;
+template struct StoreBuffer::MonoTypeBuffer<StoreBuffer::SlotsEdge>;
+
+void js::gc::PostWriteBarrierCell(Cell* cell, Cell* prev, Cell* next) {
+ if (!next || !cell->isTenured()) {
+ return;
+ }
+
+ StoreBuffer* buffer = next->storeBuffer();
+ if (!buffer || (prev && prev->storeBuffer())) {
+ return;
+ }
+
+ buffer->putWholeCell(cell);
+}
diff --git a/js/src/gc/StoreBuffer.h b/js/src/gc/StoreBuffer.h
new file mode 100644
index 0000000000..4700392074
--- /dev/null
+++ b/js/src/gc/StoreBuffer.h
@@ -0,0 +1,662 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef gc_StoreBuffer_h
+#define gc_StoreBuffer_h
+
+#include "mozilla/Attributes.h"
+#include "mozilla/HashFunctions.h"
+#include "mozilla/ReentrancyGuard.h"
+
+#include <algorithm>
+
+#include "ds/BitArray.h"
+#include "ds/LifoAlloc.h"
+#include "gc/Cell.h"
+#include "gc/Nursery.h"
+#include "gc/TraceKind.h"
+#include "js/AllocPolicy.h"
+#include "js/UniquePtr.h"
+#include "threading/Mutex.h"
+
+namespace JS {
+struct GCSizes;
+}
+
+namespace js {
+
+class NativeObject;
+
+#ifdef DEBUG
+extern bool CurrentThreadIsGCMarking();
+#endif
+
+namespace gc {
+
+class Arena;
+class ArenaCellSet;
+
+#ifdef DEBUG
+extern bool CurrentThreadHasLockedGC();
+#endif
+
+/*
+ * BufferableRef represents an abstract reference for use in the generational
+ * GC's remembered set. Entries in the store buffer that cannot be represented
+ * with the simple pointer-to-a-pointer scheme must derive from this class and
+ * use the generic store buffer interface.
+ *
+ * A single BufferableRef entry in the generic buffer can represent many entries
+ * in the remembered set. For example js::OrderedHashTableRef represents all
+ * the incoming edges corresponding to keys in an ordered hash table.
+ */
+class BufferableRef {
+ public:
+ virtual void trace(JSTracer* trc) = 0;
+ bool maybeInRememberedSet(const Nursery&) const { return true; }
+};
+
+typedef HashSet<void*, PointerHasher<void*>, SystemAllocPolicy> EdgeSet;
+
+/* The size of a single block of store buffer storage space. */
+static const size_t LifoAllocBlockSize = 8 * 1024;
+
+/*
+ * The StoreBuffer observes all writes that occur in the system and performs
+ * efficient filtering of them to derive a remembered set for nursery GC.
+ */
+class StoreBuffer {
+ friend class mozilla::ReentrancyGuard;
+
+ /* The size at which a block is about to overflow for the generic buffer. */
+ static const size_t GenericBufferLowAvailableThreshold =
+ LifoAllocBlockSize / 2;
+
+ /* The size at which other store buffers are about to overflow. */
+ static const size_t BufferOverflowThresholdBytes = 128 * 1024;
+
+ /*
+ * This buffer holds only a single type of edge. Using this buffer is more
+ * efficient than the generic buffer when many writes will be to the same
+ * type of edge: e.g. Value or Cell*.
+ */
+ template <typename T>
+ struct MonoTypeBuffer {
+ /* The canonical set of stores. */
+ typedef HashSet<T, typename T::Hasher, SystemAllocPolicy> StoreSet;
+ StoreSet stores_;
+
+ /*
+ * A one element cache in front of the canonical set to speed up
+ * temporary instances of HeapPtr.
+ */
+ T last_;
+
+ StoreBuffer* owner_;
+
+ JS::GCReason gcReason_;
+
+ /* Maximum number of entries before we request a minor GC. */
+ const static size_t MaxEntries = BufferOverflowThresholdBytes / sizeof(T);
+
+ explicit MonoTypeBuffer(StoreBuffer* owner, JS::GCReason reason)
+ : last_(T()), owner_(owner), gcReason_(reason) {}
+
+ void clear() {
+ last_ = T();
+ stores_.clear();
+ }
+
+ /* Add one item to the buffer. */
+ void put(const T& t) {
+ sinkStore();
+ last_ = t;
+ }
+
+ /* Remove an item from the store buffer. */
+ void unput(const T& v) {
+ // Fast, hashless remove of last put.
+ if (last_ == v) {
+ last_ = T();
+ return;
+ }
+ stores_.remove(v);
+ }
+
+ /* Move any buffered stores to the canonical store set. */
+ void sinkStore() {
+ if (last_) {
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ if (!stores_.put(last_)) {
+ oomUnsafe.crash("Failed to allocate for MonoTypeBuffer::put.");
+ }
+ }
+ last_ = T();
+
+ if (MOZ_UNLIKELY(stores_.count() > MaxEntries)) {
+ owner_->setAboutToOverflow(gcReason_);
+ }
+ }
+
+ /* Trace the source of all edges in the store buffer. */
+ void trace(TenuringTracer& mover);
+
+ size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) {
+ return stores_.shallowSizeOfExcludingThis(mallocSizeOf);
+ }
+
+ bool isEmpty() const { return last_ == T() && stores_.empty(); }
+
+ private:
+ MonoTypeBuffer(const MonoTypeBuffer& other) = delete;
+ MonoTypeBuffer& operator=(const MonoTypeBuffer& other) = delete;
+ };
+
+ struct WholeCellBuffer {
+ UniquePtr<LifoAlloc> storage_;
+ ArenaCellSet* stringHead_ = nullptr;
+ ArenaCellSet* nonStringHead_ = nullptr;
+ const Cell* last_ = nullptr;
+ StoreBuffer* owner_;
+
+ explicit WholeCellBuffer(StoreBuffer* owner) : owner_(owner) {}
+
+ [[nodiscard]] bool init();
+
+ void clear();
+
+ bool isAboutToOverflow() const {
+ return !storage_->isEmpty() &&
+ storage_->used() > BufferOverflowThresholdBytes;
+ }
+
+ void trace(TenuringTracer& mover);
+
+ inline void put(const Cell* cell);
+ inline void putDontCheckLast(const Cell* cell);
+
+ size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) {
+ return storage_ ? storage_->sizeOfIncludingThis(mallocSizeOf) : 0;
+ }
+
+ bool isEmpty() const {
+ MOZ_ASSERT_IF(!stringHead_ && !nonStringHead_,
+ !storage_ || storage_->isEmpty());
+ return !stringHead_ && !nonStringHead_;
+ }
+
+ const Cell** lastBufferedPtr() { return &last_; }
+
+ private:
+ ArenaCellSet* allocateCellSet(Arena* arena);
+
+ WholeCellBuffer(const WholeCellBuffer& other) = delete;
+ WholeCellBuffer& operator=(const WholeCellBuffer& other) = delete;
+ };
+
+ struct GenericBuffer {
+ UniquePtr<LifoAlloc> storage_;
+ StoreBuffer* owner_;
+
+ explicit GenericBuffer(StoreBuffer* owner)
+ : storage_(nullptr), owner_(owner) {}
+
+ [[nodiscard]] bool init();
+
+ void clear() {
+ if (storage_) {
+ storage_->used() ? storage_->releaseAll() : storage_->freeAll();
+ }
+ }
+
+ bool isAboutToOverflow() const {
+ return !storage_->isEmpty() && storage_->availableInCurrentChunk() <
+ GenericBufferLowAvailableThreshold;
+ }
+
+ /* Trace all generic edges. */
+ void trace(JSTracer* trc);
+
+ template <typename T>
+ void put(const T& t) {
+ MOZ_ASSERT(storage_);
+
+ /* Ensure T is derived from BufferableRef. */
+ (void)static_cast<const BufferableRef*>(&t);
+
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ unsigned size = sizeof(T);
+ unsigned* sizep = storage_->pod_malloc<unsigned>();
+ if (!sizep) {
+ oomUnsafe.crash("Failed to allocate for GenericBuffer::put.");
+ }
+ *sizep = size;
+
+ T* tp = storage_->new_<T>(t);
+ if (!tp) {
+ oomUnsafe.crash("Failed to allocate for GenericBuffer::put.");
+ }
+
+ if (isAboutToOverflow()) {
+ owner_->setAboutToOverflow(JS::GCReason::FULL_GENERIC_BUFFER);
+ }
+ }
+
+ size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) {
+ return storage_ ? storage_->sizeOfIncludingThis(mallocSizeOf) : 0;
+ }
+
+ bool isEmpty() const { return !storage_ || storage_->isEmpty(); }
+
+ private:
+ GenericBuffer(const GenericBuffer& other) = delete;
+ GenericBuffer& operator=(const GenericBuffer& other) = delete;
+ };
+
+ template <typename Edge>
+ struct PointerEdgeHasher {
+ using Lookup = Edge;
+ static HashNumber hash(const Lookup& l) {
+ return mozilla::HashGeneric(l.edge);
+ }
+ static bool match(const Edge& k, const Lookup& l) { return k == l; }
+ };
+
+ template <typename T>
+ struct CellPtrEdge {
+ T** edge = nullptr;
+
+ CellPtrEdge() = default;
+ explicit CellPtrEdge(T** v) : edge(v) {}
+ bool operator==(const CellPtrEdge& other) const {
+ return edge == other.edge;
+ }
+ bool operator!=(const CellPtrEdge& other) const {
+ return edge != other.edge;
+ }
+
+ bool maybeInRememberedSet(const Nursery& nursery) const {
+ MOZ_ASSERT(IsInsideNursery(*edge));
+ return !nursery.isInside(edge);
+ }
+
+ void trace(TenuringTracer& mover) const;
+
+ explicit operator bool() const { return edge != nullptr; }
+
+ using Hasher = PointerEdgeHasher<CellPtrEdge<T>>;
+ };
+
+ using ObjectPtrEdge = CellPtrEdge<JSObject>;
+ using StringPtrEdge = CellPtrEdge<JSString>;
+ using BigIntPtrEdge = CellPtrEdge<JS::BigInt>;
+
+ struct ValueEdge {
+ JS::Value* edge;
+
+ ValueEdge() : edge(nullptr) {}
+ explicit ValueEdge(JS::Value* v) : edge(v) {}
+ bool operator==(const ValueEdge& other) const { return edge == other.edge; }
+ bool operator!=(const ValueEdge& other) const { return edge != other.edge; }
+
+ Cell* deref() const {
+ return edge->isGCThing() ? static_cast<Cell*>(edge->toGCThing())
+ : nullptr;
+ }
+
+ bool maybeInRememberedSet(const Nursery& nursery) const {
+ MOZ_ASSERT(IsInsideNursery(deref()));
+ return !nursery.isInside(edge);
+ }
+
+ void trace(TenuringTracer& mover) const;
+
+ explicit operator bool() const { return edge != nullptr; }
+
+ using Hasher = PointerEdgeHasher<ValueEdge>;
+ };
+
+ struct SlotsEdge {
+ // These definitions must match those in HeapSlot::Kind.
+ const static int SlotKind = 0;
+ const static int ElementKind = 1;
+
+ uintptr_t objectAndKind_; // NativeObject* | Kind
+ uint32_t start_;
+ uint32_t count_;
+
+ SlotsEdge() : objectAndKind_(0), start_(0), count_(0) {}
+ SlotsEdge(NativeObject* object, int kind, uint32_t start, uint32_t count)
+ : objectAndKind_(uintptr_t(object) | kind),
+ start_(start),
+ count_(count) {
+ MOZ_ASSERT((uintptr_t(object) & 1) == 0);
+ MOZ_ASSERT(kind <= 1);
+ MOZ_ASSERT(count > 0);
+ MOZ_ASSERT(start + count > start);
+ }
+
+ NativeObject* object() const {
+ return reinterpret_cast<NativeObject*>(objectAndKind_ & ~1);
+ }
+ int kind() const { return (int)(objectAndKind_ & 1); }
+
+ bool operator==(const SlotsEdge& other) const {
+ return objectAndKind_ == other.objectAndKind_ && start_ == other.start_ &&
+ count_ == other.count_;
+ }
+
+ bool operator!=(const SlotsEdge& other) const { return !(*this == other); }
+
+ // True if this SlotsEdge range overlaps with the other SlotsEdge range,
+ // false if they do not overlap.
+ bool overlaps(const SlotsEdge& other) const {
+ if (objectAndKind_ != other.objectAndKind_) {
+ return false;
+ }
+
+ // Widen our range by one on each side so that we consider
+ // adjacent-but-not-actually-overlapping ranges as overlapping. This
+ // is particularly useful for coalescing a series of increasing or
+ // decreasing single index writes 0, 1, 2, ..., N into a SlotsEdge
+ // range of elements [0, N].
+ uint32_t end = start_ + count_ + 1;
+ uint32_t start = start_ > 0 ? start_ - 1 : 0;
+ MOZ_ASSERT(start < end);
+
+ uint32_t otherEnd = other.start_ + other.count_;
+ MOZ_ASSERT(other.start_ <= otherEnd);
+ return (start <= other.start_ && other.start_ <= end) ||
+ (start <= otherEnd && otherEnd <= end);
+ }
+
+ // Destructively make this SlotsEdge range the union of the other
+ // SlotsEdge range and this one. A precondition is that the ranges must
+ // overlap.
+ void merge(const SlotsEdge& other) {
+ MOZ_ASSERT(overlaps(other));
+ uint32_t end = std::max(start_ + count_, other.start_ + other.count_);
+ start_ = std::min(start_, other.start_);
+ count_ = end - start_;
+ }
+
+ bool maybeInRememberedSet(const Nursery& n) const {
+ return !IsInsideNursery(reinterpret_cast<Cell*>(object()));
+ }
+
+ void trace(TenuringTracer& mover) const;
+
+ explicit operator bool() const { return objectAndKind_ != 0; }
+
+ typedef struct Hasher {
+ using Lookup = SlotsEdge;
+ static HashNumber hash(const Lookup& l) {
+ return mozilla::HashGeneric(l.objectAndKind_, l.start_, l.count_);
+ }
+ static bool match(const SlotsEdge& k, const Lookup& l) { return k == l; }
+ } Hasher;
+ };
+
+#ifdef DEBUG
+ void checkAccess() const;
+#else
+ void checkAccess() const {}
+#endif
+
+ template <typename Buffer, typename Edge>
+ void unput(Buffer& buffer, const Edge& edge) {
+ checkAccess();
+ if (!isEnabled()) {
+ return;
+ }
+ mozilla::ReentrancyGuard g(*this);
+ buffer.unput(edge);
+ }
+
+ template <typename Buffer, typename Edge>
+ void put(Buffer& buffer, const Edge& edge) {
+ checkAccess();
+ if (!isEnabled()) {
+ return;
+ }
+ mozilla::ReentrancyGuard g(*this);
+ if (edge.maybeInRememberedSet(nursery_)) {
+ buffer.put(edge);
+ }
+ }
+
+ Mutex lock_ MOZ_UNANNOTATED;
+
+ MonoTypeBuffer<ValueEdge> bufferVal;
+ MonoTypeBuffer<StringPtrEdge> bufStrCell;
+ MonoTypeBuffer<BigIntPtrEdge> bufBigIntCell;
+ MonoTypeBuffer<ObjectPtrEdge> bufObjCell;
+ MonoTypeBuffer<SlotsEdge> bufferSlot;
+ WholeCellBuffer bufferWholeCell;
+ GenericBuffer bufferGeneric;
+
+ JSRuntime* runtime_;
+ const Nursery& nursery_;
+
+ bool aboutToOverflow_;
+ bool enabled_;
+ bool mayHavePointersToDeadCells_;
+#ifdef DEBUG
+ bool mEntered; /* For ReentrancyGuard. */
+#endif
+
+ public:
+#ifdef DEBUG
+ bool markingNondeduplicatable;
+#endif
+
+ explicit StoreBuffer(JSRuntime* rt, const Nursery& nursery);
+ [[nodiscard]] bool enable();
+
+ void disable();
+ bool isEnabled() const { return enabled_; }
+
+ bool isEmpty() const;
+ void clear();
+
+ const Nursery& nursery() const { return nursery_; }
+
+ /* Get the overflowed status. */
+ bool isAboutToOverflow() const { return aboutToOverflow_; }
+
+ /*
+ * Brain transplants may add whole cell buffer entires for dead cells. We must
+ * evict the nursery prior to sweeping arenas if any such entries are present.
+ */
+ bool mayHavePointersToDeadCells() const {
+ return mayHavePointersToDeadCells_;
+ }
+
+ /* Insert a single edge into the buffer/remembered set. */
+ void putValue(JS::Value* vp) { put(bufferVal, ValueEdge(vp)); }
+ void unputValue(JS::Value* vp) { unput(bufferVal, ValueEdge(vp)); }
+
+ void putCell(JSString** strp) { put(bufStrCell, StringPtrEdge(strp)); }
+ void unputCell(JSString** strp) { unput(bufStrCell, StringPtrEdge(strp)); }
+
+ void putCell(JS::BigInt** bip) { put(bufBigIntCell, BigIntPtrEdge(bip)); }
+ void unputCell(JS::BigInt** bip) { unput(bufBigIntCell, BigIntPtrEdge(bip)); }
+
+ void putCell(JSObject** strp) { put(bufObjCell, ObjectPtrEdge(strp)); }
+ void unputCell(JSObject** strp) { unput(bufObjCell, ObjectPtrEdge(strp)); }
+
+ void putSlot(NativeObject* obj, int kind, uint32_t start, uint32_t count) {
+ SlotsEdge edge(obj, kind, start, count);
+ if (bufferSlot.last_.overlaps(edge)) {
+ bufferSlot.last_.merge(edge);
+ } else {
+ put(bufferSlot, edge);
+ }
+ }
+
+ inline void putWholeCell(Cell* cell);
+ inline void putWholeCellDontCheckLast(Cell* cell);
+ const void* addressOfLastBufferedWholeCell() {
+ return bufferWholeCell.lastBufferedPtr();
+ }
+
+ /* Insert an entry into the generic buffer. */
+ template <typename T>
+ void putGeneric(const T& t) {
+ put(bufferGeneric, t);
+ }
+
+ void setMayHavePointersToDeadCells() { mayHavePointersToDeadCells_ = true; }
+
+ /* Methods to trace the source of all edges in the store buffer. */
+ void traceValues(TenuringTracer& mover) { bufferVal.trace(mover); }
+ void traceCells(TenuringTracer& mover) {
+ bufStrCell.trace(mover);
+ bufBigIntCell.trace(mover);
+ bufObjCell.trace(mover);
+ }
+ void traceSlots(TenuringTracer& mover) { bufferSlot.trace(mover); }
+ void traceWholeCells(TenuringTracer& mover) { bufferWholeCell.trace(mover); }
+ void traceGenericEntries(JSTracer* trc) { bufferGeneric.trace(trc); }
+
+ /* For use by our owned buffers and for testing. */
+ void setAboutToOverflow(JS::GCReason);
+
+ void addSizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf,
+ JS::GCSizes* sizes);
+
+ void checkEmpty() const;
+
+ // For use by the GC only.
+ void lock() { lock_.lock(); }
+ void unlock() { lock_.unlock(); }
+};
+
+// A set of cells in an arena used to implement the whole cell store buffer.
+class ArenaCellSet {
+ friend class StoreBuffer;
+
+ using ArenaCellBits = BitArray<MaxArenaCellIndex>;
+
+ // The arena this relates to.
+ Arena* arena;
+
+ // Pointer to next set forming a linked list.
+ ArenaCellSet* next;
+
+ // Bit vector for each possible cell start position.
+ ArenaCellBits bits;
+
+#ifdef DEBUG
+ // The minor GC number when this was created. This object should not survive
+ // past the next minor collection.
+ const uint64_t minorGCNumberAtCreation;
+#endif
+
+ // Construct the empty sentinel object.
+ constexpr ArenaCellSet()
+ : arena(nullptr),
+ next(nullptr)
+#ifdef DEBUG
+ ,
+ minorGCNumberAtCreation(0)
+#endif
+ {
+ }
+
+ public:
+ using WordT = ArenaCellBits::WordT;
+ const size_t BitsPerWord = ArenaCellBits::bitsPerElement;
+ const size_t NumWords = ArenaCellBits::numSlots;
+
+ ArenaCellSet(Arena* arena, ArenaCellSet* next);
+
+ bool hasCell(const TenuredCell* cell) const {
+ return hasCell(getCellIndex(cell));
+ }
+
+ void putCell(const TenuredCell* cell) { putCell(getCellIndex(cell)); }
+
+ bool isEmpty() const { return this == &Empty; }
+
+ bool hasCell(size_t cellIndex) const;
+
+ void putCell(size_t cellIndex);
+
+ void check() const;
+
+ WordT getWord(size_t wordIndex) const { return bits.getWord(wordIndex); }
+
+ void trace(TenuringTracer& mover);
+
+ // Sentinel object used for all empty sets.
+ //
+ // We use a sentinel because it simplifies the JIT code slightly as we can
+ // assume all arenas have a cell set.
+ static ArenaCellSet Empty;
+
+ static size_t getCellIndex(const TenuredCell* cell);
+ static void getWordIndexAndMask(size_t cellIndex, size_t* wordp,
+ uint32_t* maskp);
+
+ // Attempt to trigger a minor GC if free space in the nursery (where these
+ // objects are allocated) falls below this threshold.
+ static const size_t NurseryFreeThresholdBytes = 64 * 1024;
+
+ static size_t offsetOfArena() { return offsetof(ArenaCellSet, arena); }
+ static size_t offsetOfBits() { return offsetof(ArenaCellSet, bits); }
+};
+
+// Post-write barrier implementation for GC cells.
+
+// Implement the post-write barrier for nursery allocateable cell type |T|. Call
+// this from |T::postWriteBarrier|.
+template <typename T>
+MOZ_ALWAYS_INLINE void PostWriteBarrierImpl(void* cellp, T* prev, T* next) {
+ MOZ_ASSERT(cellp);
+
+ // If the target needs an entry, add it.
+ StoreBuffer* buffer;
+ if (next && (buffer = next->storeBuffer())) {
+ // If we know that the prev has already inserted an entry, we can skip
+ // doing the lookup to add the new entry. Note that we cannot safely
+ // assert the presence of the entry because it may have been added
+ // via a different store buffer.
+ if (prev && prev->storeBuffer()) {
+ return;
+ }
+ buffer->putCell(static_cast<T**>(cellp));
+ return;
+ }
+
+ // Remove the prev entry if the new value does not need it. There will only
+ // be a prev entry if the prev value was in the nursery.
+ if (prev && (buffer = prev->storeBuffer())) {
+ buffer->unputCell(static_cast<T**>(cellp));
+ }
+}
+
+template <typename T>
+MOZ_ALWAYS_INLINE void PostWriteBarrier(T** vp, T* prev, T* next) {
+ static_assert(std::is_base_of_v<Cell, T>);
+ static_assert(!std::is_same_v<Cell, T> && !std::is_same_v<TenuredCell, T>);
+
+ if constexpr (!GCTypeIsTenured<T>()) {
+ using BaseT = typename BaseGCType<T>::type;
+ PostWriteBarrierImpl<BaseT>(vp, prev, next);
+ return;
+ }
+
+ MOZ_ASSERT_IF(next, !IsInsideNursery(next));
+}
+
+// Used when we don't have a specific edge to put in the store buffer.
+void PostWriteBarrierCell(Cell* cell, Cell* prev, Cell* next);
+
+} /* namespace gc */
+} /* namespace js */
+
+#endif /* gc_StoreBuffer_h */
diff --git a/js/src/gc/Sweeping.cpp b/js/src/gc/Sweeping.cpp
new file mode 100644
index 0000000000..80f4a20e74
--- /dev/null
+++ b/js/src/gc/Sweeping.cpp
@@ -0,0 +1,2383 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * Implementation of GC sweeping.
+ *
+ * In the SpiderMonkey GC, 'sweeping' is used to mean two things:
+ * - updating data structures to remove pointers to dead GC things and updating
+ * pointers to moved GC things
+ * - finalizing dead GC things
+ *
+ * Furthermore, the GC carries out gray and weak marking after the start of the
+ * sweep phase. This is also implemented in this file.
+ */
+
+#include "mozilla/Maybe.h"
+#include "mozilla/ScopeExit.h"
+#include "mozilla/TimeStamp.h"
+
+#include "builtin/FinalizationRegistryObject.h"
+#include "builtin/WeakRefObject.h"
+#include "debugger/DebugAPI.h"
+#include "gc/AllocKind.h"
+#include "gc/FinalizationObservers.h"
+#include "gc/GCInternals.h"
+#include "gc/GCLock.h"
+#include "gc/GCProbes.h"
+#include "gc/GCRuntime.h"
+#include "gc/ParallelWork.h"
+#include "gc/Statistics.h"
+#include "gc/TraceKind.h"
+#include "gc/WeakMap.h"
+#include "gc/Zone.h"
+#include "jit/JitRuntime.h"
+#include "jit/JitZone.h"
+#include "proxy/DeadObjectProxy.h"
+#include "vm/BigIntType.h"
+#include "vm/HelperThreads.h"
+#include "vm/JSContext.h"
+#include "vm/Time.h"
+#include "vm/WrapperObject.h"
+
+#include "gc/PrivateIterators-inl.h"
+#include "vm/GeckoProfiler-inl.h"
+#include "vm/JSObject-inl.h"
+#include "vm/PropMap-inl.h"
+#include "vm/Shape-inl.h"
+#include "vm/StringType-inl.h"
+
+using namespace js;
+using namespace js::gc;
+
+using mozilla::TimeStamp;
+
+struct js::gc::FinalizePhase {
+ gcstats::PhaseKind statsPhase;
+ AllocKinds kinds;
+};
+
+/*
+ * Finalization order for objects swept incrementally on the main thread.
+ */
+static constexpr FinalizePhase ForegroundObjectFinalizePhase = {
+ gcstats::PhaseKind::FINALIZE_OBJECT,
+ {AllocKind::OBJECT0, AllocKind::OBJECT2, AllocKind::OBJECT4,
+ AllocKind::OBJECT8, AllocKind::OBJECT12, AllocKind::OBJECT16}};
+
+/*
+ * Finalization order for GC things swept incrementally on the main thread.
+ */
+static constexpr FinalizePhase ForegroundNonObjectFinalizePhase = {
+ gcstats::PhaseKind::FINALIZE_NON_OBJECT,
+ {AllocKind::SCRIPT, AllocKind::JITCODE}};
+
+/*
+ * Finalization order for GC things swept on the background thread.
+ */
+static constexpr FinalizePhase BackgroundFinalizePhases[] = {
+ {gcstats::PhaseKind::FINALIZE_OBJECT,
+ {AllocKind::FUNCTION, AllocKind::FUNCTION_EXTENDED,
+ AllocKind::OBJECT0_BACKGROUND, AllocKind::OBJECT2_BACKGROUND,
+ AllocKind::ARRAYBUFFER4, AllocKind::OBJECT4_BACKGROUND,
+ AllocKind::ARRAYBUFFER8, AllocKind::OBJECT8_BACKGROUND,
+ AllocKind::ARRAYBUFFER12, AllocKind::OBJECT12_BACKGROUND,
+ AllocKind::ARRAYBUFFER16, AllocKind::OBJECT16_BACKGROUND}},
+ {gcstats::PhaseKind::FINALIZE_NON_OBJECT,
+ {AllocKind::SCOPE, AllocKind::REGEXP_SHARED, AllocKind::FAT_INLINE_STRING,
+ AllocKind::STRING, AllocKind::EXTERNAL_STRING, AllocKind::FAT_INLINE_ATOM,
+ AllocKind::ATOM, AllocKind::SYMBOL, AllocKind::BIGINT, AllocKind::SHAPE,
+ AllocKind::BASE_SHAPE, AllocKind::GETTER_SETTER,
+ AllocKind::COMPACT_PROP_MAP, AllocKind::NORMAL_PROP_MAP,
+ AllocKind::DICT_PROP_MAP}}};
+
+template <typename T>
+inline size_t Arena::finalize(JS::GCContext* gcx, AllocKind thingKind,
+ size_t thingSize) {
+ /* Enforce requirements on size of T. */
+ MOZ_ASSERT(thingSize % CellAlignBytes == 0);
+ MOZ_ASSERT(thingSize >= MinCellSize);
+ MOZ_ASSERT(thingSize <= 255);
+
+ MOZ_ASSERT(allocated());
+ MOZ_ASSERT(thingKind == getAllocKind());
+ MOZ_ASSERT(thingSize == getThingSize());
+ MOZ_ASSERT(!onDelayedMarkingList_);
+
+ uint_fast16_t firstThing = firstThingOffset(thingKind);
+ uint_fast16_t firstThingOrSuccessorOfLastMarkedThing = firstThing;
+ uint_fast16_t lastThing = ArenaSize - thingSize;
+
+ FreeSpan newListHead;
+ FreeSpan* newListTail = &newListHead;
+ size_t nmarked = 0, nfinalized = 0;
+
+ for (ArenaCellIterUnderFinalize cell(this); !cell.done(); cell.next()) {
+ T* t = cell.as<T>();
+ if (TenuredThingIsMarkedAny(t)) {
+ uint_fast16_t thing = uintptr_t(t) & ArenaMask;
+ if (thing != firstThingOrSuccessorOfLastMarkedThing) {
+ // We just finished passing over one or more free things,
+ // so record a new FreeSpan.
+ newListTail->initBounds(firstThingOrSuccessorOfLastMarkedThing,
+ thing - thingSize, this);
+ newListTail = newListTail->nextSpanUnchecked(this);
+ }
+ firstThingOrSuccessorOfLastMarkedThing = thing + thingSize;
+ nmarked++;
+ } else {
+ t->finalize(gcx);
+ AlwaysPoison(t, JS_SWEPT_TENURED_PATTERN, thingSize,
+ MemCheckKind::MakeUndefined);
+ gcprobes::TenuredFinalize(t);
+ nfinalized++;
+ }
+ }
+
+ if constexpr (std::is_same_v<T, JSObject>) {
+ if (isNewlyCreated_) {
+ zone->pretenuring.updateCellCountsInNewlyCreatedArenas(
+ nmarked + nfinalized, nmarked);
+ }
+ }
+ isNewlyCreated_ = 0;
+
+ if (thingKind == AllocKind::STRING ||
+ thingKind == AllocKind::FAT_INLINE_STRING) {
+ zone->markedStrings += nmarked;
+ zone->finalizedStrings += nfinalized;
+ }
+
+ if (nmarked == 0) {
+ // Do nothing. The caller will update the arena appropriately.
+ MOZ_ASSERT(newListTail == &newListHead);
+ DebugOnlyPoison(data, JS_SWEPT_TENURED_PATTERN, sizeof(data),
+ MemCheckKind::MakeUndefined);
+ return nmarked;
+ }
+
+ MOZ_ASSERT(firstThingOrSuccessorOfLastMarkedThing != firstThing);
+ uint_fast16_t lastMarkedThing =
+ firstThingOrSuccessorOfLastMarkedThing - thingSize;
+ if (lastThing == lastMarkedThing) {
+ // If the last thing was marked, we will have already set the bounds of
+ // the final span, and we just need to terminate the list.
+ newListTail->initAsEmpty();
+ } else {
+ // Otherwise, end the list with a span that covers the final stretch of free
+ // things.
+ newListTail->initFinal(firstThingOrSuccessorOfLastMarkedThing, lastThing,
+ this);
+ }
+
+ firstFreeSpan = newListHead;
+#ifdef DEBUG
+ size_t nfree = numFreeThings(thingSize);
+ MOZ_ASSERT(nfree + nmarked == thingsPerArena(thingKind));
+#endif
+ return nmarked;
+}
+
+// Finalize arenas from src list, releasing empty arenas if keepArenas wasn't
+// specified and inserting the others into the appropriate destination size
+// bins.
+template <typename T>
+static inline bool FinalizeTypedArenas(JS::GCContext* gcx, ArenaList& src,
+ SortedArenaList& dest,
+ AllocKind thingKind,
+ SliceBudget& budget) {
+ MOZ_ASSERT(gcx->isFinalizing());
+
+ size_t thingSize = Arena::thingSize(thingKind);
+ size_t thingsPerArena = Arena::thingsPerArena(thingKind);
+ size_t markCount = 0;
+
+ auto updateMarkCount = mozilla::MakeScopeExit([&] {
+ GCRuntime* gc = &gcx->runtimeFromAnyThread()->gc;
+ gc->stats().addCount(gcstats::COUNT_CELLS_MARKED, markCount);
+ });
+
+ while (Arena* arena = src.takeFirstArena()) {
+ size_t nmarked = arena->finalize<T>(gcx, thingKind, thingSize);
+ size_t nfree = thingsPerArena - nmarked;
+
+ markCount += nmarked;
+
+ if (nmarked) {
+ dest.insertAt(arena, nfree);
+ } else {
+ arena->chunk()->recycleArena(arena, dest, thingsPerArena);
+ }
+
+ budget.step(thingsPerArena);
+ if (budget.isOverBudget()) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+/*
+ * Finalize the list of areans.
+ */
+static bool FinalizeArenas(JS::GCContext* gcx, ArenaList& src,
+ SortedArenaList& dest, AllocKind thingKind,
+ SliceBudget& budget) {
+ switch (thingKind) {
+#define EXPAND_CASE(allocKind, traceKind, type, sizedType, bgFinal, nursery, \
+ compact) \
+ case AllocKind::allocKind: \
+ return FinalizeTypedArenas<type>(gcx, src, dest, thingKind, budget);
+ FOR_EACH_ALLOCKIND(EXPAND_CASE)
+#undef EXPAND_CASE
+
+ default:
+ MOZ_CRASH("Invalid alloc kind");
+ }
+}
+
+void GCRuntime::initBackgroundSweep(Zone* zone, JS::GCContext* gcx,
+ const FinalizePhase& phase) {
+ gcstats::AutoPhase ap(stats(), phase.statsPhase);
+ for (auto kind : phase.kinds) {
+ zone->arenas.initBackgroundSweep(kind);
+ }
+}
+
+void ArenaLists::initBackgroundSweep(AllocKind thingKind) {
+ MOZ_ASSERT(IsBackgroundFinalized(thingKind));
+ MOZ_ASSERT(concurrentUse(thingKind) == ConcurrentUse::None);
+
+ if (!collectingArenaList(thingKind).isEmpty()) {
+ concurrentUse(thingKind) = ConcurrentUse::BackgroundFinalize;
+ }
+}
+
+void GCRuntime::backgroundFinalize(JS::GCContext* gcx, Zone* zone,
+ AllocKind kind, Arena** empty) {
+ MOZ_ASSERT(empty);
+
+ ArenaLists* lists = &zone->arenas;
+ ArenaList& arenas = lists->collectingArenaList(kind);
+ if (arenas.isEmpty()) {
+ MOZ_ASSERT(lists->concurrentUse(kind) == ArenaLists::ConcurrentUse::None);
+ return;
+ }
+
+ SortedArenaList finalizedSorted(Arena::thingsPerArena(kind));
+
+ auto unlimited = SliceBudget::unlimited();
+ FinalizeArenas(gcx, arenas, finalizedSorted, kind, unlimited);
+ MOZ_ASSERT(arenas.isEmpty());
+
+ finalizedSorted.extractEmpty(empty);
+
+ // When marking begins, all arenas are moved from arenaLists to
+ // collectingArenaLists. When the mutator runs, new arenas are allocated in
+ // arenaLists. Now that finalization is complete, we want to merge these lists
+ // back together.
+
+ // We must take the GC lock to be able to safely modify the ArenaList;
+ // however, this does not by itself make the changes visible to all threads,
+ // as not all threads take the GC lock to read the ArenaLists.
+ // That safety is provided by the ReleaseAcquire memory ordering of the
+ // background finalize state, which we explicitly set as the final step.
+ {
+ AutoLockGC lock(rt);
+ MOZ_ASSERT(lists->concurrentUse(kind) ==
+ ArenaLists::ConcurrentUse::BackgroundFinalize);
+ lists->mergeFinalizedArenas(kind, finalizedSorted);
+ }
+
+ lists->concurrentUse(kind) = ArenaLists::ConcurrentUse::None;
+}
+
+// After finalizing arenas, merge the following to get the final state of an
+// arena list:
+// - arenas allocated during marking
+// - arenas allocated during sweeping
+// - finalized arenas
+void ArenaLists::mergeFinalizedArenas(AllocKind kind,
+ SortedArenaList& finalizedArenas) {
+#ifdef DEBUG
+ // Updating arena lists off-thread requires taking the GC lock because the
+ // main thread uses these when allocating.
+ if (IsBackgroundFinalized(kind)) {
+ runtimeFromAnyThread()->gc.assertCurrentThreadHasLockedGC();
+ }
+#endif
+
+ ArenaList& arenas = arenaList(kind);
+
+ ArenaList allocatedDuringCollection = std::move(arenas);
+ arenas = finalizedArenas.toArenaList();
+ arenas.insertListWithCursorAtEnd(allocatedDuringCollection);
+
+ collectingArenaList(kind).clear();
+}
+
+void ArenaLists::queueForegroundThingsForSweep() {
+ gcCompactPropMapArenasToUpdate =
+ collectingArenaList(AllocKind::COMPACT_PROP_MAP).head();
+ gcNormalPropMapArenasToUpdate =
+ collectingArenaList(AllocKind::NORMAL_PROP_MAP).head();
+}
+
+void GCRuntime::sweepBackgroundThings(ZoneList& zones) {
+ if (zones.isEmpty()) {
+ return;
+ }
+
+ JS::GCContext* gcx = TlsGCContext.get();
+ MOZ_ASSERT(gcx->isFinalizing());
+
+ // Sweep zones in order. The atoms zone must be finalized last as other
+ // zones may have direct pointers into it.
+ while (!zones.isEmpty()) {
+ Zone* zone = zones.removeFront();
+ MOZ_ASSERT(zone->isGCFinished());
+
+ TimeStamp startTime = TimeStamp::Now();
+
+ Arena* emptyArenas = zone->arenas.takeSweptEmptyArenas();
+
+ // We must finalize thing kinds in the order specified by
+ // BackgroundFinalizePhases.
+ for (auto phase : BackgroundFinalizePhases) {
+ for (auto kind : phase.kinds) {
+ backgroundFinalize(gcx, zone, kind, &emptyArenas);
+ }
+ }
+
+ // Release any arenas that are now empty.
+ //
+ // Empty arenas are only released after everything has been finalized so
+ // that it's still possible to get a thing's zone after the thing has been
+ // finalized. The HeapPtr destructor depends on this, and this allows
+ // HeapPtrs between things of different alloc kind regardless of
+ // finalization order.
+ //
+ // Periodically drop and reaquire the GC lock every so often to avoid
+ // blocking the main thread from allocating chunks.
+ static const size_t LockReleasePeriod = 32;
+
+ while (emptyArenas) {
+ AutoLockGC lock(this);
+ for (size_t i = 0; i < LockReleasePeriod && emptyArenas; i++) {
+ Arena* arena = emptyArenas;
+ emptyArenas = emptyArenas->next;
+ releaseArena(arena, lock);
+ }
+ }
+
+ // Record time spent sweeping this zone.
+ TimeStamp endTime = TimeStamp::Now();
+ zone->perZoneGCTime += endTime - startTime;
+ }
+}
+
+void GCRuntime::assertBackgroundSweepingFinished() {
+#ifdef DEBUG
+ {
+ AutoLockHelperThreadState lock;
+ MOZ_ASSERT(backgroundSweepZones.ref().isEmpty());
+ }
+
+ for (ZonesIter zone(this, WithAtoms); !zone.done(); zone.next()) {
+ for (auto kind : AllAllocKinds()) {
+ MOZ_ASSERT_IF(state() != State::Prepare && state() != State::Mark &&
+ state() != State::Sweep,
+ zone->arenas.collectingArenaList(kind).isEmpty());
+ MOZ_ASSERT(zone->arenas.doneBackgroundFinalize(kind));
+ }
+ }
+#endif
+}
+
+void GCRuntime::queueZonesAndStartBackgroundSweep(ZoneList&& zones) {
+ {
+ AutoLockHelperThreadState lock;
+ MOZ_ASSERT(!requestSliceAfterBackgroundTask);
+ backgroundSweepZones.ref().appendList(std::move(zones));
+ if (useBackgroundThreads) {
+ sweepTask.startOrRunIfIdle(lock);
+ }
+ }
+ if (!useBackgroundThreads) {
+ sweepTask.join();
+ sweepTask.runFromMainThread();
+ }
+}
+
+BackgroundSweepTask::BackgroundSweepTask(GCRuntime* gc)
+ : GCParallelTask(gc, gcstats::PhaseKind::SWEEP, GCUse::Finalizing) {}
+
+void BackgroundSweepTask::run(AutoLockHelperThreadState& lock) {
+ gc->sweepFromBackgroundThread(lock);
+}
+
+void GCRuntime::sweepFromBackgroundThread(AutoLockHelperThreadState& lock) {
+ do {
+ ZoneList zones;
+ zones.appendList(std::move(backgroundSweepZones.ref()));
+
+ AutoUnlockHelperThreadState unlock(lock);
+ sweepBackgroundThings(zones);
+
+ // The main thread may call queueZonesAndStartBackgroundSweep() while this
+ // is running so we must check there is no more work after releasing the
+ // lock.
+ } while (!backgroundSweepZones.ref().isEmpty());
+
+ maybeRequestGCAfterBackgroundTask(lock);
+}
+
+void GCRuntime::waitBackgroundSweepEnd() {
+ sweepTask.join();
+ if (state() != State::Sweep) {
+ assertBackgroundSweepingFinished();
+ }
+}
+
+void GCRuntime::startBackgroundFree() {
+ AutoLockHelperThreadState lock;
+ freeTask.startOrRunIfIdle(lock);
+}
+
+BackgroundFreeTask::BackgroundFreeTask(GCRuntime* gc)
+ : GCParallelTask(gc, gcstats::PhaseKind::NONE) {
+ // This can occur outside GCs so doesn't have a stats phase.
+}
+
+void BackgroundFreeTask::run(AutoLockHelperThreadState& lock) {
+ gc->freeFromBackgroundThread(lock);
+}
+
+void GCRuntime::freeFromBackgroundThread(AutoLockHelperThreadState& lock) {
+ do {
+ LifoAlloc lifoBlocks(JSContext::TEMP_LIFO_ALLOC_PRIMARY_CHUNK_SIZE);
+ lifoBlocks.transferFrom(&lifoBlocksToFree.ref());
+
+ Nursery::BufferSet buffers;
+ std::swap(buffers, buffersToFreeAfterMinorGC.ref());
+
+ AutoUnlockHelperThreadState unlock(lock);
+
+ lifoBlocks.freeAll();
+
+ JS::GCContext* gcx = TlsGCContext.get();
+ for (Nursery::BufferSet::Range r = buffers.all(); !r.empty();
+ r.popFront()) {
+ // Malloc memory associated with nursery objects is not tracked as these
+ // are assumed to be short lived.
+ gcx->freeUntracked(r.front());
+ }
+ } while (!lifoBlocksToFree.ref().isEmpty() ||
+ !buffersToFreeAfterMinorGC.ref().empty());
+}
+
+void GCRuntime::waitBackgroundFreeEnd() { freeTask.join(); }
+
+template <class ZoneIterT>
+IncrementalProgress GCRuntime::markWeakReferences(
+ SliceBudget& incrementalBudget) {
+ MOZ_ASSERT(!marker().isWeakMarking());
+
+ gcstats::AutoPhase ap1(stats(), gcstats::PhaseKind::MARK_WEAK);
+
+ auto unlimited = SliceBudget::unlimited();
+ SliceBudget& budget =
+ marker().incrementalWeakMapMarkingEnabled ? incrementalBudget : unlimited;
+
+ // Ensure we don't return to the mutator while we're still in weak marking
+ // mode.
+ auto leaveOnExit =
+ mozilla::MakeScopeExit([&] { marker().leaveWeakMarkingMode(); });
+
+ if (marker().enterWeakMarkingMode()) {
+ // If there was an 'enter-weak-marking-mode' token in the queue, then it
+ // and everything after it will still be in the queue so we can process
+ // them now.
+ while (processTestMarkQueue() == QueueYielded) {
+ };
+
+ // Do not rely on the information about not-yet-marked weak keys that have
+ // been collected by barriers. Clear out the gcEphemeronEdges entries and
+ // rebuild the full table. Note that this a cross-zone operation; delegate
+ // zone entries will be populated by map zone traversals, so everything
+ // needs to be cleared first, then populated.
+ if (!marker().incrementalWeakMapMarkingEnabled) {
+ for (ZoneIterT zone(this); !zone.done(); zone.next()) {
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ if (!zone->gcEphemeronEdges().clear()) {
+ oomUnsafe.crash("clearing weak keys when entering weak marking mode");
+ }
+ }
+ }
+
+ for (ZoneIterT zone(this); !zone.done(); zone.next()) {
+ if (zone->enterWeakMarkingMode(&marker(), budget) == NotFinished) {
+ return NotFinished;
+ }
+ }
+ }
+
+ bool markedAny = true;
+ while (markedAny) {
+ if (!marker().markUntilBudgetExhausted(budget)) {
+ MOZ_ASSERT(marker().incrementalWeakMapMarkingEnabled);
+ return NotFinished;
+ }
+
+ markedAny = false;
+
+ if (!marker().isWeakMarking()) {
+ for (ZoneIterT zone(this); !zone.done(); zone.next()) {
+ markedAny |= WeakMapBase::markZoneIteratively(zone, &marker());
+ }
+ }
+
+ markedAny |= jit::JitRuntime::MarkJitcodeGlobalTableIteratively(&marker());
+ }
+
+ assertNoMarkingWork();
+
+ return Finished;
+}
+
+IncrementalProgress GCRuntime::markWeakReferencesInCurrentGroup(
+ SliceBudget& budget) {
+ return markWeakReferences<SweepGroupZonesIter>(budget);
+}
+
+template <class ZoneIterT>
+IncrementalProgress GCRuntime::markGrayRoots(SliceBudget& budget,
+ gcstats::PhaseKind phase) {
+ MOZ_ASSERT(marker().markColor() == MarkColor::Gray);
+
+ gcstats::AutoPhase ap(stats(), phase);
+
+ AutoUpdateLiveCompartments updateLive(this);
+ marker().setRootMarkingMode(true);
+ auto guard =
+ mozilla::MakeScopeExit([this]() { marker().setRootMarkingMode(false); });
+
+ IncrementalProgress result =
+ traceEmbeddingGrayRoots(marker().tracer(), budget);
+ if (result == NotFinished) {
+ return NotFinished;
+ }
+
+ Compartment::traceIncomingCrossCompartmentEdgesForZoneGC(
+ marker().tracer(), Compartment::GrayEdges);
+
+ return Finished;
+}
+
+IncrementalProgress GCRuntime::markAllWeakReferences() {
+ SliceBudget budget = SliceBudget::unlimited();
+ return markWeakReferences<GCZonesIter>(budget);
+}
+
+void GCRuntime::markAllGrayReferences(gcstats::PhaseKind phase) {
+ SliceBudget budget = SliceBudget::unlimited();
+ markGrayRoots<GCZonesIter>(budget, phase);
+ drainMarkStack();
+}
+
+void GCRuntime::dropStringWrappers() {
+ /*
+ * String "wrappers" are dropped on GC because their presence would require
+ * us to sweep the wrappers in all compartments every time we sweep a
+ * compartment group.
+ */
+ for (ZonesIter zone(this, WithAtoms); !zone.done(); zone.next()) {
+ zone->dropStringWrappersOnGC();
+ }
+}
+
+/*
+ * Group zones that must be swept at the same time.
+ *
+ * From the point of view of the mutator, groups of zones transition atomically
+ * from marking to sweeping. If compartment A has an edge to an unmarked object
+ * in compartment B, then we must not start sweeping A in a later slice than we
+ * start sweeping B. That's because a write barrier in A could lead to the
+ * unmarked object in B becoming marked. However, if we had already swept that
+ * object, we would be in trouble.
+ *
+ * If we consider these dependencies as a graph, then all the compartments in
+ * any strongly-connected component of this graph must start sweeping in the
+ * same slice.
+ *
+ * Tarjan's algorithm is used to calculate the components.
+ */
+
+bool Compartment::findSweepGroupEdges() {
+ Zone* source = zone();
+ for (WrappedObjectCompartmentEnum e(this); !e.empty(); e.popFront()) {
+ Compartment* targetComp = e.front();
+ Zone* target = targetComp->zone();
+
+ if (!target->isGCMarking() || source->hasSweepGroupEdgeTo(target)) {
+ continue;
+ }
+
+ for (ObjectWrapperEnum e(this, targetComp); !e.empty(); e.popFront()) {
+ JSObject* key = e.front().mutableKey();
+ MOZ_ASSERT(key->zone() == target);
+
+ // Add an edge to the wrapped object's zone to ensure that the wrapper
+ // zone is not still being marked when we start sweeping the wrapped zone.
+ // As an optimization, if the wrapped object is already marked black there
+ // is no danger of later marking and we can skip this.
+ if (key->isMarkedBlack()) {
+ continue;
+ }
+
+ if (!source->addSweepGroupEdgeTo(target)) {
+ return false;
+ }
+
+ // We don't need to consider any more wrappers for this target
+ // compartment since we already added an edge.
+ break;
+ }
+ }
+
+ return true;
+}
+
+bool Zone::findSweepGroupEdges(Zone* atomsZone) {
+ MOZ_ASSERT_IF(this != atomsZone, !isAtomsZone());
+
+#ifdef DEBUG
+ if (FinalizationObservers* observers = finalizationObservers()) {
+ observers->checkTables();
+ }
+#endif
+
+ // Any zone may have a pointer to an atom in the atoms zone, and these aren't
+ // in the cross compartment map.
+ if (atomsZone->wasGCStarted() && !addSweepGroupEdgeTo(atomsZone)) {
+ return false;
+ }
+
+ for (CompartmentsInZoneIter comp(this); !comp.done(); comp.next()) {
+ if (!comp->findSweepGroupEdges()) {
+ return false;
+ }
+ }
+
+ return WeakMapBase::findSweepGroupEdgesForZone(this);
+}
+
+bool GCRuntime::addEdgesForMarkQueue() {
+#ifdef DEBUG
+ // For testing only.
+ //
+ // Add edges between all objects mentioned in the test mark queue, since
+ // otherwise they will get marked in a different order than their sweep
+ // groups. Note that this is only done at the beginning of an incremental
+ // collection, so it is possible for objects to be added later that do not
+ // follow the sweep group ordering. These objects will wait until their sweep
+ // group comes up, or will be skipped if their sweep group is already past.
+ JS::Zone* prevZone = nullptr;
+ for (size_t i = 0; i < testMarkQueue.length(); i++) {
+ Value val = testMarkQueue[i].get();
+ if (!val.isObject()) {
+ continue;
+ }
+ JSObject* obj = &val.toObject();
+ JS::Zone* zone = obj->zone();
+ if (!zone->isGCMarking()) {
+ continue;
+ }
+ if (prevZone && prevZone != zone) {
+ if (!prevZone->addSweepGroupEdgeTo(zone)) {
+ return false;
+ }
+ }
+ prevZone = zone;
+ }
+#endif
+ return true;
+}
+
+bool GCRuntime::findSweepGroupEdges() {
+ for (GCZonesIter zone(this); !zone.done(); zone.next()) {
+ if (!zone->findSweepGroupEdges(atomsZone())) {
+ return false;
+ }
+ }
+
+ if (!addEdgesForMarkQueue()) {
+ return false;
+ }
+
+ return DebugAPI::findSweepGroupEdges(rt);
+}
+
+void GCRuntime::groupZonesForSweeping(JS::GCReason reason) {
+#ifdef DEBUG
+ for (ZonesIter zone(this, WithAtoms); !zone.done(); zone.next()) {
+ MOZ_ASSERT(zone->gcSweepGroupEdges().empty());
+ }
+#endif
+
+ JSContext* cx = rt->mainContextFromOwnThread();
+ ZoneComponentFinder finder(cx);
+ if (!isIncremental || !findSweepGroupEdges()) {
+ finder.useOneComponent();
+ }
+
+ // Use one component for two-slice zeal modes.
+ if (useZeal && hasIncrementalTwoSliceZealMode()) {
+ finder.useOneComponent();
+ }
+
+ for (GCZonesIter zone(this); !zone.done(); zone.next()) {
+ MOZ_ASSERT(zone->isGCMarking());
+ finder.addNode(zone);
+ }
+ sweepGroups = finder.getResultsList();
+ currentSweepGroup = sweepGroups;
+ sweepGroupIndex = 1;
+
+ for (GCZonesIter zone(this); !zone.done(); zone.next()) {
+ zone->clearSweepGroupEdges();
+ }
+
+#ifdef DEBUG
+ unsigned idx = sweepGroupIndex;
+ for (Zone* head = currentSweepGroup; head; head = head->nextGroup()) {
+ for (Zone* zone = head; zone; zone = zone->nextNodeInGroup()) {
+ MOZ_ASSERT(zone->isGCMarking());
+ zone->gcSweepGroupIndex = idx;
+ }
+ idx++;
+ }
+
+ MOZ_ASSERT_IF(!isIncremental, !currentSweepGroup->nextGroup());
+ for (ZonesIter zone(this, WithAtoms); !zone.done(); zone.next()) {
+ MOZ_ASSERT(zone->gcSweepGroupEdges().empty());
+ }
+#endif
+}
+
+void GCRuntime::getNextSweepGroup() {
+ currentSweepGroup = currentSweepGroup->nextGroup();
+ ++sweepGroupIndex;
+ if (!currentSweepGroup) {
+ abortSweepAfterCurrentGroup = false;
+ return;
+ }
+
+ MOZ_ASSERT_IF(abortSweepAfterCurrentGroup, !isIncremental);
+ if (!isIncremental) {
+ ZoneComponentFinder::mergeGroups(currentSweepGroup);
+ }
+
+ for (Zone* zone = currentSweepGroup; zone; zone = zone->nextNodeInGroup()) {
+ MOZ_ASSERT(zone->gcState() == zone->initialMarkingState());
+ MOZ_ASSERT(!zone->isQueuedForBackgroundSweep());
+ }
+
+ if (abortSweepAfterCurrentGroup) {
+ markTask.join();
+
+ // Abort collection of subsequent sweep groups.
+ for (SweepGroupZonesIter zone(this); !zone.done(); zone.next()) {
+ MOZ_ASSERT(!zone->gcNextGraphComponent);
+ zone->changeGCState(zone->initialMarkingState(), Zone::NoGC);
+ zone->arenas.unmarkPreMarkedFreeCells();
+ zone->arenas.mergeArenasFromCollectingLists();
+ zone->clearGCSliceThresholds();
+ }
+
+ for (SweepGroupCompartmentsIter comp(rt); !comp.done(); comp.next()) {
+ resetGrayList(comp);
+ }
+
+ abortSweepAfterCurrentGroup = false;
+ currentSweepGroup = nullptr;
+ }
+}
+
+/*
+ * Gray marking:
+ *
+ * At the end of collection, anything reachable from a gray root that has not
+ * otherwise been marked black must be marked gray.
+ *
+ * This means that when marking things gray we must not allow marking to leave
+ * the current compartment group, as that could result in things being marked
+ * gray when they might subsequently be marked black. To achieve this, when we
+ * find a cross compartment pointer we don't mark the referent but add it to a
+ * singly-linked list of incoming gray pointers that is stored with each
+ * compartment.
+ *
+ * The list head is stored in Compartment::gcIncomingGrayPointers and contains
+ * cross compartment wrapper objects. The next pointer is stored in the second
+ * extra slot of the cross compartment wrapper.
+ *
+ * The list is created during gray marking when one of the
+ * MarkCrossCompartmentXXX functions is called for a pointer that leaves the
+ * current compartent group. This calls DelayCrossCompartmentGrayMarking to
+ * push the referring object onto the list.
+ *
+ * The list is traversed and then unlinked in
+ * GCRuntime::markIncomingGrayCrossCompartmentPointers.
+ */
+
+static bool IsGrayListObject(JSObject* obj) {
+ MOZ_ASSERT(obj);
+ return obj->is<CrossCompartmentWrapperObject>() && !IsDeadProxyObject(obj);
+}
+
+/* static */
+unsigned ProxyObject::grayLinkReservedSlot(JSObject* obj) {
+ MOZ_ASSERT(IsGrayListObject(obj));
+ return CrossCompartmentWrapperObject::GrayLinkReservedSlot;
+}
+
+#ifdef DEBUG
+static void AssertNotOnGrayList(JSObject* obj) {
+ MOZ_ASSERT_IF(
+ IsGrayListObject(obj),
+ GetProxyReservedSlot(obj, ProxyObject::grayLinkReservedSlot(obj))
+ .isUndefined());
+}
+#endif
+
+static void AssertNoWrappersInGrayList(JSRuntime* rt) {
+#ifdef DEBUG
+ for (CompartmentsIter c(rt); !c.done(); c.next()) {
+ MOZ_ASSERT(!c->gcIncomingGrayPointers);
+ for (Compartment::ObjectWrapperEnum e(c); !e.empty(); e.popFront()) {
+ AssertNotOnGrayList(e.front().value().unbarrieredGet());
+ }
+ }
+#endif
+}
+
+static JSObject* CrossCompartmentPointerReferent(JSObject* obj) {
+ MOZ_ASSERT(IsGrayListObject(obj));
+ return &obj->as<ProxyObject>().private_().toObject();
+}
+
+static JSObject* NextIncomingCrossCompartmentPointer(JSObject* prev,
+ bool unlink) {
+ unsigned slot = ProxyObject::grayLinkReservedSlot(prev);
+ JSObject* next = GetProxyReservedSlot(prev, slot).toObjectOrNull();
+ MOZ_ASSERT_IF(next, IsGrayListObject(next));
+
+ if (unlink) {
+ SetProxyReservedSlot(prev, slot, UndefinedValue());
+ }
+
+ return next;
+}
+
+void js::gc::DelayCrossCompartmentGrayMarking(GCMarker* maybeMarker,
+ JSObject* src) {
+ MOZ_ASSERT_IF(!maybeMarker, !JS::RuntimeHeapIsBusy());
+ MOZ_ASSERT(IsGrayListObject(src));
+ MOZ_ASSERT(src->isMarkedGray());
+
+ AutoTouchingGrayThings tgt;
+
+ mozilla::Maybe<AutoLockGC> lock;
+ if (maybeMarker && maybeMarker->isParallelMarking()) {
+ // Synchronize access to JSCompartment::gcIncomingGrayPointers.
+ //
+ // TODO: Instead of building this list we could scan all incoming CCWs and
+ // mark through gray ones when marking gray roots for a sweep group.
+ lock.emplace(maybeMarker->runtime());
+ }
+
+ /* Called from MarkCrossCompartmentXXX functions. */
+ unsigned slot = ProxyObject::grayLinkReservedSlot(src);
+ JSObject* dest = CrossCompartmentPointerReferent(src);
+ Compartment* comp = dest->compartment();
+
+ if (GetProxyReservedSlot(src, slot).isUndefined()) {
+ SetProxyReservedSlot(src, slot,
+ ObjectOrNullValue(comp->gcIncomingGrayPointers));
+ comp->gcIncomingGrayPointers = src;
+ } else {
+ MOZ_ASSERT(GetProxyReservedSlot(src, slot).isObjectOrNull());
+ }
+
+#ifdef DEBUG
+ /*
+ * Assert that the object is in our list, also walking the list to check its
+ * integrity.
+ */
+ JSObject* obj = comp->gcIncomingGrayPointers;
+ bool found = false;
+ while (obj) {
+ if (obj == src) {
+ found = true;
+ }
+ obj = NextIncomingCrossCompartmentPointer(obj, false);
+ }
+ MOZ_ASSERT(found);
+#endif
+}
+
+void GCRuntime::markIncomingGrayCrossCompartmentPointers() {
+ gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::MARK_INCOMING_GRAY);
+
+ for (SweepGroupCompartmentsIter c(rt); !c.done(); c.next()) {
+ MOZ_ASSERT(c->zone()->isGCMarkingBlackAndGray());
+ MOZ_ASSERT_IF(c->gcIncomingGrayPointers,
+ IsGrayListObject(c->gcIncomingGrayPointers));
+
+ for (JSObject* src = c->gcIncomingGrayPointers; src;
+ src = NextIncomingCrossCompartmentPointer(src, true)) {
+ JSObject* dst = CrossCompartmentPointerReferent(src);
+ MOZ_ASSERT(dst->compartment() == c);
+ MOZ_ASSERT_IF(src->asTenured().isMarkedBlack(),
+ dst->asTenured().isMarkedBlack());
+
+ if (src->asTenured().isMarkedGray()) {
+ TraceManuallyBarrieredEdge(marker().tracer(), &dst,
+ "cross-compartment gray pointer");
+ }
+ }
+
+ c->gcIncomingGrayPointers = nullptr;
+ }
+}
+
+static bool RemoveFromGrayList(JSObject* wrapper) {
+ AutoTouchingGrayThings tgt;
+
+ if (!IsGrayListObject(wrapper)) {
+ return false;
+ }
+
+ unsigned slot = ProxyObject::grayLinkReservedSlot(wrapper);
+ if (GetProxyReservedSlot(wrapper, slot).isUndefined()) {
+ return false; /* Not on our list. */
+ }
+
+ JSObject* tail = GetProxyReservedSlot(wrapper, slot).toObjectOrNull();
+ SetProxyReservedSlot(wrapper, slot, UndefinedValue());
+
+ Compartment* comp = CrossCompartmentPointerReferent(wrapper)->compartment();
+ JSObject* obj = comp->gcIncomingGrayPointers;
+ if (obj == wrapper) {
+ comp->gcIncomingGrayPointers = tail;
+ return true;
+ }
+
+ while (obj) {
+ unsigned slot = ProxyObject::grayLinkReservedSlot(obj);
+ JSObject* next = GetProxyReservedSlot(obj, slot).toObjectOrNull();
+ if (next == wrapper) {
+ js::detail::SetProxyReservedSlotUnchecked(obj, slot,
+ ObjectOrNullValue(tail));
+ return true;
+ }
+ obj = next;
+ }
+
+ MOZ_CRASH("object not found in gray link list");
+}
+
+void GCRuntime::resetGrayList(Compartment* comp) {
+ JSObject* src = comp->gcIncomingGrayPointers;
+ while (src) {
+ src = NextIncomingCrossCompartmentPointer(src, true);
+ }
+ comp->gcIncomingGrayPointers = nullptr;
+}
+
+#ifdef DEBUG
+static bool HasIncomingCrossCompartmentPointers(JSRuntime* rt) {
+ for (SweepGroupCompartmentsIter c(rt); !c.done(); c.next()) {
+ if (c->gcIncomingGrayPointers) {
+ return true;
+ }
+ }
+
+ return false;
+}
+#endif
+
+void js::NotifyGCNukeWrapper(JSContext* cx, JSObject* wrapper) {
+ MOZ_ASSERT(IsCrossCompartmentWrapper(wrapper));
+
+ /*
+ * References to target of wrapper are being removed, we no longer have to
+ * remember to mark it.
+ */
+ RemoveFromGrayList(wrapper);
+
+ /*
+ * Clean up WeakRef maps which might include this wrapper.
+ */
+ JSObject* target = UncheckedUnwrapWithoutExpose(wrapper);
+ if (target->is<WeakRefObject>()) {
+ WeakRefObject* weakRef = &target->as<WeakRefObject>();
+ if (weakRef->target()) {
+ cx->runtime()->gc.nukeWeakRefWrapper(wrapper, weakRef);
+ }
+ }
+
+ /*
+ * Clean up FinalizationRecord record objects which might be the target of
+ * this wrapper.
+ */
+ if (target->is<FinalizationRecordObject>()) {
+ auto* record = &target->as<FinalizationRecordObject>();
+ cx->runtime()->gc.nukeFinalizationRecordWrapper(wrapper, record);
+ }
+}
+
+enum {
+ JS_GC_SWAP_OBJECT_A_REMOVED = 1 << 0,
+ JS_GC_SWAP_OBJECT_B_REMOVED = 1 << 1
+};
+
+unsigned js::NotifyGCPreSwap(JSObject* a, JSObject* b) {
+ /*
+ * Two objects in the same compartment are about to have had their contents
+ * swapped. If either of them are in our gray pointer list, then we remove
+ * them from the lists, returning a bitset indicating what happened.
+ */
+ return (RemoveFromGrayList(a) ? JS_GC_SWAP_OBJECT_A_REMOVED : 0) |
+ (RemoveFromGrayList(b) ? JS_GC_SWAP_OBJECT_B_REMOVED : 0);
+}
+
+void js::NotifyGCPostSwap(JSObject* a, JSObject* b, unsigned removedFlags) {
+ /*
+ * Two objects in the same compartment have had their contents swapped. If
+ * either of them were in our gray pointer list, we re-add them again.
+ */
+ if (removedFlags & JS_GC_SWAP_OBJECT_A_REMOVED) {
+ DelayCrossCompartmentGrayMarking(nullptr, b);
+ }
+ if (removedFlags & JS_GC_SWAP_OBJECT_B_REMOVED) {
+ DelayCrossCompartmentGrayMarking(nullptr, a);
+ }
+}
+
+static inline void MaybeCheckWeakMapMarking(GCRuntime* gc) {
+#if defined(JS_GC_ZEAL) || defined(DEBUG)
+
+ bool shouldCheck;
+# if defined(DEBUG)
+ shouldCheck = true;
+# else
+ shouldCheck = gc->hasZealMode(ZealMode::CheckWeakMapMarking);
+# endif
+
+ if (shouldCheck) {
+ for (SweepGroupZonesIter zone(gc); !zone.done(); zone.next()) {
+ MOZ_RELEASE_ASSERT(WeakMapBase::checkMarkingForZone(zone));
+ }
+ }
+
+#endif
+}
+
+IncrementalProgress GCRuntime::beginMarkingSweepGroup(JS::GCContext* gcx,
+ SliceBudget& budget) {
+#ifdef DEBUG
+ MOZ_ASSERT(!markOnBackgroundThreadDuringSweeping);
+ assertNoMarkingWork();
+ for (auto& marker : markers) {
+ MOZ_ASSERT(marker->markColor() == MarkColor::Black);
+ }
+ MOZ_ASSERT(cellsToAssertNotGray.ref().empty());
+#endif
+
+ gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::MARK);
+
+ // Change state of current group to MarkBlackAndGray to restrict gray marking
+ // to this group. Note that there may be pointers to the atoms zone, and these
+ // will be marked through, as they are not marked with
+ // TraceCrossCompartmentEdge.
+ for (SweepGroupZonesIter zone(this); !zone.done(); zone.next()) {
+ zone->changeGCState(zone->initialMarkingState(), Zone::MarkBlackAndGray);
+ }
+
+ AutoSetMarkColor setColorGray(marker(), MarkColor::Gray);
+
+ // Mark incoming gray pointers from previously swept compartments.
+ markIncomingGrayCrossCompartmentPointers();
+
+ return Finished;
+}
+
+IncrementalProgress GCRuntime::markGrayRootsInCurrentGroup(
+ JS::GCContext* gcx, SliceBudget& budget) {
+ gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::MARK);
+
+ AutoSetMarkColor setColorGray(marker(), MarkColor::Gray);
+
+ return markGrayRoots<SweepGroupZonesIter>(budget,
+ gcstats::PhaseKind::MARK_GRAY);
+}
+
+IncrementalProgress GCRuntime::markGray(JS::GCContext* gcx,
+ SliceBudget& budget) {
+ gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::MARK);
+
+ if (markUntilBudgetExhausted(budget, AllowParallelMarking) == NotFinished) {
+ return NotFinished;
+ }
+
+ return Finished;
+}
+
+IncrementalProgress GCRuntime::endMarkingSweepGroup(JS::GCContext* gcx,
+ SliceBudget& budget) {
+#ifdef DEBUG
+ MOZ_ASSERT(!markOnBackgroundThreadDuringSweeping);
+ assertNoMarkingWork();
+ for (auto& marker : markers) {
+ MOZ_ASSERT(marker->markColor() == MarkColor::Black);
+ }
+ MOZ_ASSERT(!HasIncomingCrossCompartmentPointers(rt));
+#endif
+
+ gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::MARK);
+
+ if (markWeakReferencesInCurrentGroup(budget) == NotFinished) {
+ return NotFinished;
+ }
+
+ AutoSetMarkColor setColorGray(marker(), MarkColor::Gray);
+
+ // Mark transitively inside the current compartment group.
+ if (markWeakReferencesInCurrentGroup(budget) == NotFinished) {
+ return NotFinished;
+ }
+
+ MOZ_ASSERT(marker().isDrained());
+
+ // We must not yield after this point before we start sweeping the group.
+ safeToYield = false;
+
+ MaybeCheckWeakMapMarking(this);
+
+ return Finished;
+}
+
+// Causes the given WeakCache to be swept when run.
+class ImmediateSweepWeakCacheTask : public GCParallelTask {
+ Zone* zone;
+ JS::detail::WeakCacheBase& cache;
+
+ ImmediateSweepWeakCacheTask(const ImmediateSweepWeakCacheTask&) = delete;
+
+ public:
+ ImmediateSweepWeakCacheTask(GCRuntime* gc, Zone* zone,
+ JS::detail::WeakCacheBase& wc)
+ : GCParallelTask(gc, gcstats::PhaseKind::SWEEP_WEAK_CACHES),
+ zone(zone),
+ cache(wc) {}
+
+ ImmediateSweepWeakCacheTask(ImmediateSweepWeakCacheTask&& other)
+ : GCParallelTask(std::move(other)),
+ zone(other.zone),
+ cache(other.cache) {}
+
+ void run(AutoLockHelperThreadState& lock) override {
+ AutoUnlockHelperThreadState unlock(lock);
+ AutoSetThreadIsSweeping threadIsSweeping(zone);
+ SweepingTracer trc(gc->rt);
+ cache.traceWeak(&trc, &gc->storeBuffer());
+ }
+};
+
+void GCRuntime::updateAtomsBitmap() {
+ DenseBitmap marked;
+ if (atomMarking.computeBitmapFromChunkMarkBits(rt, marked)) {
+ for (GCZonesIter zone(this); !zone.done(); zone.next()) {
+ atomMarking.refineZoneBitmapForCollectedZone(zone, marked);
+ }
+ } else {
+ // Ignore OOM in computeBitmapFromChunkMarkBits. The
+ // refineZoneBitmapForCollectedZone call can only remove atoms from the
+ // zone bitmap, so it is conservative to just not call it.
+ }
+
+ atomMarking.markAtomsUsedByUncollectedZones(rt);
+
+ // For convenience sweep these tables non-incrementally as part of bitmap
+ // sweeping; they are likely to be much smaller than the main atoms table.
+ SweepingTracer trc(rt);
+ rt->symbolRegistry().traceWeak(&trc);
+}
+
+void GCRuntime::sweepCCWrappers() {
+ SweepingTracer trc(rt);
+ for (SweepGroupZonesIter zone(this); !zone.done(); zone.next()) {
+ zone->traceWeakCCWEdges(&trc);
+ }
+}
+
+void GCRuntime::sweepRealmGlobals() {
+ SweepingTracer trc(rt);
+ for (SweepGroupRealmsIter r(this); !r.done(); r.next()) {
+ AutoSetThreadIsSweeping threadIsSweeping(r->zone());
+ r->traceWeakGlobalEdge(&trc);
+ }
+}
+
+void GCRuntime::sweepMisc() {
+ SweepingTracer trc(rt);
+ for (SweepGroupRealmsIter r(this); !r.done(); r.next()) {
+ AutoSetThreadIsSweeping threadIsSweeping(r->zone());
+ r->traceWeakSavedStacks(&trc);
+ r->traceWeakRegExps(&trc);
+ }
+ for (SweepGroupCompartmentsIter c(this); !c.done(); c.next()) {
+ AutoSetThreadIsSweeping threadIsSweeping(c->zone());
+ c->traceWeakNativeIterators(&trc);
+ }
+}
+
+void GCRuntime::sweepCompressionTasks() {
+ JSRuntime* runtime = rt;
+
+ // Attach finished compression tasks.
+ AutoLockHelperThreadState lock;
+ AttachFinishedCompressions(runtime, lock);
+ SweepPendingCompressions(lock);
+}
+
+void GCRuntime::sweepWeakMaps() {
+ SweepingTracer trc(rt);
+ for (SweepGroupZonesIter zone(this); !zone.done(); zone.next()) {
+ /* No need to look up any more weakmap keys from this sweep group. */
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ if (!zone->gcEphemeronEdges().clear()) {
+ oomUnsafe.crash("clearing weak keys in beginSweepingSweepGroup()");
+ }
+
+ // Lock the storebuffer since this may access it when rehashing or resizing
+ // the tables.
+ AutoLockStoreBuffer lock(&storeBuffer());
+ zone->sweepWeakMaps(&trc);
+ }
+}
+
+void GCRuntime::sweepUniqueIds() {
+ for (SweepGroupZonesIter zone(this); !zone.done(); zone.next()) {
+ AutoSetThreadIsSweeping threadIsSweeping(zone);
+ zone->sweepUniqueIds();
+ }
+}
+
+void JS::Zone::sweepUniqueIds() {
+ SweepingTracer trc(runtimeFromAnyThread());
+ uniqueIds().traceWeak(&trc);
+}
+
+/* static */
+bool UniqueIdGCPolicy::traceWeak(JSTracer* trc, Cell** keyp, uint64_t* valuep) {
+ // Since this is only ever used for sweeping, we can optimize it for that
+ // case. (Compacting GC updates this table manually when it moves a cell.)
+ MOZ_ASSERT(trc->kind() == JS::TracerKind::Sweeping);
+ return (*keyp)->isMarkedAny();
+}
+
+void GCRuntime::sweepFinalizationObserversOnMainThread() {
+ // This calls back into the browser which expects to be called from the main
+ // thread.
+ gcstats::AutoPhase ap1(stats(), gcstats::PhaseKind::SWEEP_COMPARTMENTS);
+ gcstats::AutoPhase ap2(stats(),
+ gcstats::PhaseKind::SWEEP_FINALIZATION_OBSERVERS);
+ SweepingTracer trc(rt);
+ AutoLockStoreBuffer lock(&storeBuffer());
+ for (SweepGroupZonesIter zone(this); !zone.done(); zone.next()) {
+ traceWeakFinalizationObserverEdges(&trc, zone);
+ }
+}
+
+void GCRuntime::startTask(GCParallelTask& task,
+ AutoLockHelperThreadState& lock) {
+ if (!CanUseExtraThreads()) {
+ AutoUnlockHelperThreadState unlock(lock);
+ task.runFromMainThread();
+ stats().recordParallelPhase(task.phaseKind, task.duration());
+ return;
+ }
+
+ task.startWithLockHeld(lock);
+}
+
+void GCRuntime::joinTask(GCParallelTask& task,
+ AutoLockHelperThreadState& lock) {
+ gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::JOIN_PARALLEL_TASKS);
+ task.joinWithLockHeld(lock);
+}
+
+void GCRuntime::sweepDebuggerOnMainThread(JS::GCContext* gcx) {
+ SweepingTracer trc(rt);
+ AutoLockStoreBuffer lock(&storeBuffer());
+
+ // Detach unreachable debuggers and global objects from each other.
+ // This can modify weakmaps and so must happen before weakmap sweeping.
+ DebugAPI::sweepAll(gcx);
+
+ gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::SWEEP_COMPARTMENTS);
+
+ // Sweep debug environment information. This performs lookups in the Zone's
+ // unique IDs table and so must not happen in parallel with sweeping that
+ // table.
+ {
+ gcstats::AutoPhase ap2(stats(), gcstats::PhaseKind::SWEEP_MISC);
+ for (SweepGroupRealmsIter r(rt); !r.done(); r.next()) {
+ r->traceWeakDebugEnvironmentEdges(&trc);
+ }
+ }
+}
+
+void GCRuntime::sweepJitDataOnMainThread(JS::GCContext* gcx) {
+ SweepingTracer trc(rt);
+ {
+ gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::SWEEP_JIT_DATA);
+
+ if (initialState != State::NotActive) {
+ // Cancel any active or pending off thread compilations. We also did
+ // this before marking (in DiscardJITCodeForGC) so this is a no-op
+ // for non-incremental GCs.
+ js::CancelOffThreadIonCompile(rt, JS::Zone::Sweep);
+ }
+
+ // Bug 1071218: the following method has not yet been refactored to
+ // work on a single zone-group at once.
+
+ // Sweep entries containing about-to-be-finalized JitCode and
+ // update relocated TypeSet::Types inside the JitcodeGlobalTable.
+ jit::JitRuntime::TraceWeakJitcodeGlobalTable(rt, &trc);
+ }
+
+ if (initialState != State::NotActive) {
+ gcstats::AutoPhase apdc(stats(), gcstats::PhaseKind::SWEEP_DISCARD_CODE);
+ for (SweepGroupZonesIter zone(this); !zone.done(); zone.next()) {
+ zone->discardJitCode(gcx);
+ }
+ }
+
+ // JitZone/JitRealm must be swept *after* discarding JIT code, because
+ // Zone::discardJitCode might access CacheIRStubInfos deleted here.
+ {
+ gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::SWEEP_JIT_DATA);
+
+ for (SweepGroupRealmsIter r(rt); !r.done(); r.next()) {
+ r->traceWeakEdgesInJitRealm(&trc);
+ }
+
+ for (SweepGroupZonesIter zone(this); !zone.done(); zone.next()) {
+ if (jit::JitZone* jitZone = zone->jitZone()) {
+ jitZone->traceWeak(&trc);
+ }
+ }
+ }
+}
+
+using WeakCacheTaskVector =
+ mozilla::Vector<ImmediateSweepWeakCacheTask, 0, SystemAllocPolicy>;
+
+// Call a functor for all weak caches that need to be swept in the current
+// sweep group.
+template <typename Functor>
+static inline bool IterateWeakCaches(JSRuntime* rt, Functor f) {
+ for (SweepGroupZonesIter zone(rt); !zone.done(); zone.next()) {
+ for (JS::detail::WeakCacheBase* cache : zone->weakCaches()) {
+ if (!f(cache, zone.get())) {
+ return false;
+ }
+ }
+ }
+
+ for (JS::detail::WeakCacheBase* cache : rt->weakCaches()) {
+ if (!f(cache, nullptr)) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static bool PrepareWeakCacheTasks(JSRuntime* rt,
+ WeakCacheTaskVector* immediateTasks) {
+ // Start incremental sweeping for caches that support it or add to a vector
+ // of sweep tasks to run on a helper thread.
+
+ MOZ_ASSERT(immediateTasks->empty());
+
+ GCRuntime* gc = &rt->gc;
+ bool ok =
+ IterateWeakCaches(rt, [&](JS::detail::WeakCacheBase* cache, Zone* zone) {
+ if (cache->empty()) {
+ return true;
+ }
+
+ // Caches that support incremental sweeping will be swept later.
+ if (zone && cache->setIncrementalBarrierTracer(&gc->sweepingTracer)) {
+ return true;
+ }
+
+ return immediateTasks->emplaceBack(gc, zone, *cache);
+ });
+
+ if (!ok) {
+ immediateTasks->clearAndFree();
+ }
+
+ return ok;
+}
+
+static void SweepAllWeakCachesOnMainThread(JSRuntime* rt) {
+ // If we ran out of memory, do all the work on the main thread.
+ gcstats::AutoPhase ap(rt->gc.stats(), gcstats::PhaseKind::SWEEP_WEAK_CACHES);
+ SweepingTracer trc(rt);
+ IterateWeakCaches(rt, [&](JS::detail::WeakCacheBase* cache, Zone* zone) {
+ if (cache->needsIncrementalBarrier()) {
+ cache->setIncrementalBarrierTracer(nullptr);
+ }
+ cache->traceWeak(&trc, &rt->gc.storeBuffer());
+ return true;
+ });
+}
+
+void GCRuntime::sweepEmbeddingWeakPointers(JS::GCContext* gcx) {
+ using namespace gcstats;
+
+ AutoLockStoreBuffer lock(&storeBuffer());
+
+ AutoPhase ap(stats(), PhaseKind::FINALIZE_START);
+ callFinalizeCallbacks(gcx, JSFINALIZE_GROUP_PREPARE);
+ {
+ AutoPhase ap2(stats(), PhaseKind::WEAK_ZONES_CALLBACK);
+ callWeakPointerZonesCallbacks(&sweepingTracer);
+ }
+ {
+ AutoPhase ap2(stats(), PhaseKind::WEAK_COMPARTMENT_CALLBACK);
+ for (SweepGroupZonesIter zone(this); !zone.done(); zone.next()) {
+ for (CompartmentsInZoneIter comp(zone); !comp.done(); comp.next()) {
+ callWeakPointerCompartmentCallbacks(&sweepingTracer, comp);
+ }
+ }
+ }
+ callFinalizeCallbacks(gcx, JSFINALIZE_GROUP_START);
+}
+
+IncrementalProgress GCRuntime::beginSweepingSweepGroup(JS::GCContext* gcx,
+ SliceBudget& budget) {
+ /*
+ * Begin sweeping the group of zones in currentSweepGroup, performing
+ * actions that must be done before yielding to caller.
+ */
+
+ using namespace gcstats;
+
+ AutoSCC scc(stats(), sweepGroupIndex);
+
+ bool sweepingAtoms = false;
+ for (SweepGroupZonesIter zone(this); !zone.done(); zone.next()) {
+ /* Set the GC state to sweeping. */
+ zone->changeGCState(Zone::MarkBlackAndGray, Zone::Sweep);
+
+ /* Purge the ArenaLists before sweeping. */
+ zone->arenas.checkSweepStateNotInUse();
+ zone->arenas.unmarkPreMarkedFreeCells();
+ zone->arenas.clearFreeLists();
+
+ if (zone->isAtomsZone()) {
+ sweepingAtoms = true;
+ }
+ }
+
+#ifdef DEBUG
+ for (auto cell : cellsToAssertNotGray.ref()) {
+ JS::AssertCellIsNotGray(cell);
+ }
+ cellsToAssertNotGray.ref().clearAndFree();
+#endif
+
+ // Updating the atom marking bitmaps. This marks atoms referenced by
+ // uncollected zones so cannot be done in parallel with the other sweeping
+ // work below.
+ if (sweepingAtoms) {
+ AutoPhase ap(stats(), PhaseKind::UPDATE_ATOMS_BITMAP);
+ updateAtomsBitmap();
+ }
+
+#ifdef JS_GC_ZEAL
+ validateIncrementalMarking();
+#endif
+
+ AutoSetThreadIsSweeping threadIsSweeping;
+
+ // This must happen before sweeping realm globals.
+ sweepDebuggerOnMainThread(gcx);
+
+ // FinalizationRegistry sweeping touches weak maps and so must not run in
+ // parallel with that. This triggers a read barrier and can add marking work
+ // for zones that are still marking. Must happen before sweeping realm
+ // globals.
+ sweepFinalizationObserversOnMainThread();
+
+ // This must happen before updating embedding weak pointers.
+ sweepRealmGlobals();
+
+ sweepEmbeddingWeakPointers(gcx);
+
+ {
+ AutoLockHelperThreadState lock;
+
+ AutoPhase ap(stats(), PhaseKind::SWEEP_COMPARTMENTS);
+
+ AutoRunParallelTask sweepCCWrappers(this, &GCRuntime::sweepCCWrappers,
+ PhaseKind::SWEEP_CC_WRAPPER,
+ GCUse::Sweeping, lock);
+ AutoRunParallelTask sweepMisc(this, &GCRuntime::sweepMisc,
+ PhaseKind::SWEEP_MISC, GCUse::Sweeping, lock);
+ AutoRunParallelTask sweepCompTasks(this, &GCRuntime::sweepCompressionTasks,
+ PhaseKind::SWEEP_COMPRESSION,
+ GCUse::Sweeping, lock);
+ AutoRunParallelTask sweepWeakMaps(this, &GCRuntime::sweepWeakMaps,
+ PhaseKind::SWEEP_WEAKMAPS,
+ GCUse::Sweeping, lock);
+ AutoRunParallelTask sweepUniqueIds(this, &GCRuntime::sweepUniqueIds,
+ PhaseKind::SWEEP_UNIQUEIDS,
+ GCUse::Sweeping, lock);
+
+ WeakCacheTaskVector sweepCacheTasks;
+ bool canSweepWeakCachesOffThread =
+ PrepareWeakCacheTasks(rt, &sweepCacheTasks);
+ if (canSweepWeakCachesOffThread) {
+ weakCachesToSweep.ref().emplace(currentSweepGroup);
+ for (auto& task : sweepCacheTasks) {
+ startTask(task, lock);
+ }
+ }
+
+ {
+ AutoUnlockHelperThreadState unlock(lock);
+ sweepJitDataOnMainThread(gcx);
+
+ if (!canSweepWeakCachesOffThread) {
+ MOZ_ASSERT(sweepCacheTasks.empty());
+ SweepAllWeakCachesOnMainThread(rt);
+ }
+ }
+
+ for (auto& task : sweepCacheTasks) {
+ joinTask(task, lock);
+ }
+ }
+
+ if (sweepingAtoms) {
+ startSweepingAtomsTable();
+ }
+
+ // Queue all GC things in all zones for sweeping, either on the foreground
+ // or on the background thread.
+
+ for (SweepGroupZonesIter zone(this); !zone.done(); zone.next()) {
+ for (const auto& phase : BackgroundFinalizePhases) {
+ initBackgroundSweep(zone, gcx, phase);
+ }
+
+ zone->arenas.queueForegroundThingsForSweep();
+ }
+
+ MOZ_ASSERT(!sweepZone);
+
+ safeToYield = true;
+ markOnBackgroundThreadDuringSweeping = CanUseExtraThreads();
+
+ return Finished;
+}
+
+#ifdef JS_GC_ZEAL
+bool GCRuntime::shouldYieldForZeal(ZealMode mode) {
+ bool yield = useZeal && hasZealMode(mode);
+
+ // Only yield on the first sweep slice for this mode.
+ bool firstSweepSlice = initialState != State::Sweep;
+ if (mode == ZealMode::IncrementalMultipleSlices && !firstSweepSlice) {
+ yield = false;
+ }
+
+ return yield;
+}
+#endif
+
+IncrementalProgress GCRuntime::endSweepingSweepGroup(JS::GCContext* gcx,
+ SliceBudget& budget) {
+ // This is to prevent a race between markTask checking the zone state and
+ // us changing it below.
+ if (joinBackgroundMarkTask() == NotFinished) {
+ return NotFinished;
+ }
+
+ assertNoMarkingWork();
+
+ // Disable background marking during sweeping until we start sweeping the next
+ // zone group.
+ markOnBackgroundThreadDuringSweeping = false;
+
+ {
+ gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::FINALIZE_END);
+ AutoLockStoreBuffer lock(&storeBuffer());
+ callFinalizeCallbacks(gcx, JSFINALIZE_GROUP_END);
+ }
+
+ /* Free LIFO blocks on a background thread if possible. */
+ startBackgroundFree();
+
+ /* Update the GC state for zones we have swept. */
+ for (SweepGroupZonesIter zone(this); !zone.done(); zone.next()) {
+ if (jit::JitZone* jitZone = zone->jitZone()) {
+ // Clear out any small pools that we're hanging on to.
+ jitZone->execAlloc().purge();
+ }
+ AutoLockGC lock(this);
+ zone->changeGCState(Zone::Sweep, Zone::Finished);
+ zone->arenas.unmarkPreMarkedFreeCells();
+ zone->arenas.checkNoArenasToUpdate();
+ zone->pretenuring.clearCellCountsInNewlyCreatedArenas();
+ }
+
+ /*
+ * Start background thread to sweep zones if required, sweeping any atoms
+ * zones last if present.
+ */
+ ZoneList zones;
+ for (SweepGroupZonesIter zone(this); !zone.done(); zone.next()) {
+ if (zone->isAtomsZone()) {
+ zones.append(zone);
+ } else {
+ zones.prepend(zone);
+ }
+ }
+
+ queueZonesAndStartBackgroundSweep(std::move(zones));
+
+ return Finished;
+}
+
+IncrementalProgress GCRuntime::markDuringSweeping(JS::GCContext* gcx,
+ SliceBudget& budget) {
+ MOZ_ASSERT(markTask.isIdle());
+
+ if (markOnBackgroundThreadDuringSweeping) {
+ if (!marker().isDrained() || hasDelayedMarking()) {
+ AutoLockHelperThreadState lock;
+ MOZ_ASSERT(markTask.isIdle(lock));
+ markTask.setBudget(budget);
+ markTask.startOrRunIfIdle(lock);
+ }
+ return Finished; // This means don't yield to the mutator here.
+ }
+
+ gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::MARK);
+ return markUntilBudgetExhausted(budget, AllowParallelMarking);
+}
+
+void GCRuntime::beginSweepPhase(JS::GCReason reason, AutoGCSession& session) {
+ /*
+ * Sweep phase.
+ *
+ * Finalize as we sweep, outside of lock but with RuntimeHeapIsBusy()
+ * true so that any attempt to allocate a GC-thing from a finalizer will
+ * fail, rather than nest badly and leave the unmarked newborn to be swept.
+ */
+
+ MOZ_ASSERT(!abortSweepAfterCurrentGroup);
+ MOZ_ASSERT(!markOnBackgroundThreadDuringSweeping);
+
+#ifdef DEBUG
+ releaseHeldRelocatedArenas();
+ verifyAllChunks();
+#endif
+
+#ifdef JS_GC_ZEAL
+ computeNonIncrementalMarkingForValidation(session);
+#endif
+
+ gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::SWEEP);
+
+ AssertNoWrappersInGrayList(rt);
+ dropStringWrappers();
+
+ groupZonesForSweeping(reason);
+
+ sweepActions->assertFinished();
+}
+
+bool GCRuntime::foregroundFinalize(JS::GCContext* gcx, Zone* zone,
+ AllocKind thingKind,
+ SliceBudget& sliceBudget,
+ SortedArenaList& sweepList) {
+ ArenaLists& lists = zone->arenas;
+ lists.checkNoArenasToUpdateForKind(thingKind);
+
+ // Non-empty arenas are reused for use for new allocations as soon as the
+ // finalizers for that allocation kind have run. Empty arenas are only
+ // released when everything in the zone has been swept (see
+ // GCRuntime::sweepBackgroundThings for more details).
+ if (!FinalizeArenas(gcx, lists.collectingArenaList(thingKind), sweepList,
+ thingKind, sliceBudget)) {
+ // Copy the current contents of sweepList so that ArenaIter can find them.
+ lists.setIncrementalSweptArenas(thingKind, sweepList);
+ return false;
+ }
+
+ sweepList.extractEmpty(&lists.savedEmptyArenas.ref());
+ lists.mergeFinalizedArenas(thingKind, sweepList);
+ lists.clearIncrementalSweptArenas();
+
+ return true;
+}
+
+BackgroundMarkTask::BackgroundMarkTask(GCRuntime* gc)
+ : GCParallelTask(gc, gcstats::PhaseKind::MARK, GCUse::Marking),
+ budget(SliceBudget::unlimited()) {}
+
+void js::gc::BackgroundMarkTask::run(AutoLockHelperThreadState& lock) {
+ AutoUnlockHelperThreadState unlock(lock);
+
+ // Time reporting is handled separately for parallel tasks.
+ gc->sweepMarkResult = gc->markUntilBudgetExhausted(
+ this->budget, GCRuntime::SingleThreadedMarking, DontReportMarkTime);
+}
+
+IncrementalProgress GCRuntime::joinBackgroundMarkTask() {
+ AutoLockHelperThreadState lock;
+ if (markTask.isIdle(lock)) {
+ return Finished;
+ }
+
+ joinTask(markTask, lock);
+
+ IncrementalProgress result = sweepMarkResult;
+ sweepMarkResult = Finished;
+ return result;
+}
+
+template <typename T>
+static void SweepThing(JS::GCContext* gcx, T* thing) {
+ if (!TenuredThingIsMarkedAny(thing)) {
+ thing->sweep(gcx);
+ }
+}
+
+template <typename T>
+static bool SweepArenaList(JS::GCContext* gcx, Arena** arenasToSweep,
+ SliceBudget& sliceBudget) {
+ while (Arena* arena = *arenasToSweep) {
+ MOZ_ASSERT(arena->zone->isGCSweeping());
+
+ for (ArenaCellIterUnderGC cell(arena); !cell.done(); cell.next()) {
+ SweepThing(gcx, cell.as<T>());
+ }
+
+ Arena* next = arena->next;
+ MOZ_ASSERT_IF(next, next->zone == arena->zone);
+ *arenasToSweep = next;
+
+ AllocKind kind = MapTypeToAllocKind<T>::kind;
+ sliceBudget.step(Arena::thingsPerArena(kind));
+ if (sliceBudget.isOverBudget()) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+void GCRuntime::startSweepingAtomsTable() {
+ auto& maybeAtoms = maybeAtomsToSweep.ref();
+ MOZ_ASSERT(maybeAtoms.isNothing());
+
+ AtomsTable* atomsTable = rt->atomsForSweeping();
+ if (!atomsTable) {
+ return;
+ }
+
+ // Create secondary tables to hold new atoms added while we're sweeping the
+ // main tables incrementally.
+ if (!atomsTable->startIncrementalSweep(maybeAtoms)) {
+ SweepingTracer trc(rt);
+ atomsTable->traceWeak(&trc);
+ }
+}
+
+IncrementalProgress GCRuntime::sweepAtomsTable(JS::GCContext* gcx,
+ SliceBudget& budget) {
+ if (!atomsZone()->isGCSweeping()) {
+ return Finished;
+ }
+
+ gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::SWEEP_ATOMS_TABLE);
+
+ auto& maybeAtoms = maybeAtomsToSweep.ref();
+ if (!maybeAtoms) {
+ return Finished;
+ }
+
+ if (!rt->atomsForSweeping()->sweepIncrementally(maybeAtoms.ref(), budget)) {
+ return NotFinished;
+ }
+
+ maybeAtoms.reset();
+
+ return Finished;
+}
+
+static size_t IncrementalSweepWeakCache(GCRuntime* gc,
+ const WeakCacheToSweep& item) {
+ AutoSetThreadIsSweeping threadIsSweeping(item.zone);
+
+ JS::detail::WeakCacheBase* cache = item.cache;
+ MOZ_ASSERT(cache->needsIncrementalBarrier());
+
+ SweepingTracer trc(gc->rt);
+ size_t steps = cache->traceWeak(&trc, &gc->storeBuffer());
+ cache->setIncrementalBarrierTracer(nullptr);
+
+ return steps;
+}
+
+WeakCacheSweepIterator::WeakCacheSweepIterator(JS::Zone* sweepGroup)
+ : sweepZone(sweepGroup), sweepCache(sweepZone->weakCaches().getFirst()) {
+ settle();
+}
+
+bool WeakCacheSweepIterator::done() const { return !sweepZone; }
+
+WeakCacheToSweep WeakCacheSweepIterator::get() const {
+ MOZ_ASSERT(!done());
+
+ return {sweepCache, sweepZone};
+}
+
+void WeakCacheSweepIterator::next() {
+ MOZ_ASSERT(!done());
+
+ sweepCache = sweepCache->getNext();
+ settle();
+}
+
+void WeakCacheSweepIterator::settle() {
+ while (sweepZone) {
+ while (sweepCache && !sweepCache->needsIncrementalBarrier()) {
+ sweepCache = sweepCache->getNext();
+ }
+
+ if (sweepCache) {
+ break;
+ }
+
+ sweepZone = sweepZone->nextNodeInGroup();
+ if (sweepZone) {
+ sweepCache = sweepZone->weakCaches().getFirst();
+ }
+ }
+
+ MOZ_ASSERT((!sweepZone && !sweepCache) ||
+ (sweepCache && sweepCache->needsIncrementalBarrier()));
+}
+
+IncrementalProgress GCRuntime::sweepWeakCaches(JS::GCContext* gcx,
+ SliceBudget& budget) {
+ if (weakCachesToSweep.ref().isNothing()) {
+ return Finished;
+ }
+
+ gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::SWEEP_COMPARTMENTS);
+
+ WeakCacheSweepIterator& work = weakCachesToSweep.ref().ref();
+
+ AutoLockHelperThreadState lock;
+
+ {
+ AutoRunParallelWork runWork(this, IncrementalSweepWeakCache,
+ gcstats::PhaseKind::SWEEP_WEAK_CACHES,
+ GCUse::Sweeping, work, budget, lock);
+ AutoUnlockHelperThreadState unlock(lock);
+ }
+
+ if (work.done()) {
+ weakCachesToSweep.ref().reset();
+ return Finished;
+ }
+
+ return NotFinished;
+}
+
+IncrementalProgress GCRuntime::finalizeAllocKind(JS::GCContext* gcx,
+ SliceBudget& budget) {
+ MOZ_ASSERT(sweepZone->isGCSweeping());
+
+ // Set the number of things per arena for this AllocKind.
+ size_t thingsPerArena = Arena::thingsPerArena(sweepAllocKind);
+ auto& sweepList = incrementalSweepList.ref();
+ sweepList.setThingsPerArena(thingsPerArena);
+
+ AutoSetThreadIsFinalizing threadIsFinalizing(gcx);
+
+ if (!foregroundFinalize(gcx, sweepZone, sweepAllocKind, budget, sweepList)) {
+ return NotFinished;
+ }
+
+ // Reset the slots of the sweep list that we used.
+ sweepList.reset(thingsPerArena);
+
+ return Finished;
+}
+
+IncrementalProgress GCRuntime::sweepPropMapTree(JS::GCContext* gcx,
+ SliceBudget& budget) {
+ // Remove dead SharedPropMaps from the tree. This happens incrementally on the
+ // main thread. PropMaps are finalized later on the a background thread.
+
+ gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::SWEEP_PROP_MAP);
+
+ ArenaLists& al = sweepZone->arenas;
+
+ if (!SweepArenaList<CompactPropMap>(
+ gcx, &al.gcCompactPropMapArenasToUpdate.ref(), budget)) {
+ return NotFinished;
+ }
+ if (!SweepArenaList<NormalPropMap>(
+ gcx, &al.gcNormalPropMapArenasToUpdate.ref(), budget)) {
+ return NotFinished;
+ }
+
+ return Finished;
+}
+
+// An iterator for a standard container that provides an STL-like begin()/end()
+// interface. This iterator provides a done()/get()/next() style interface.
+template <typename Container>
+class ContainerIter {
+ using Iter = decltype(std::declval<const Container>().begin());
+ using Elem = decltype(*std::declval<Iter>());
+
+ Iter iter;
+ const Iter end;
+
+ public:
+ explicit ContainerIter(const Container& container)
+ : iter(container.begin()), end(container.end()) {}
+
+ bool done() const { return iter == end; }
+
+ Elem get() const { return *iter; }
+
+ void next() {
+ MOZ_ASSERT(!done());
+ ++iter;
+ }
+};
+
+// IncrementalIter is a template class that makes a normal iterator into one
+// that can be used to perform incremental work by using external state that
+// persists between instantiations. The state is only initialised on the first
+// use and subsequent uses carry on from the previous state.
+template <typename Iter>
+struct IncrementalIter {
+ using State = mozilla::Maybe<Iter>;
+ using Elem = decltype(std::declval<Iter>().get());
+
+ private:
+ State& maybeIter;
+
+ public:
+ template <typename... Args>
+ explicit IncrementalIter(State& maybeIter, Args&&... args)
+ : maybeIter(maybeIter) {
+ if (maybeIter.isNothing()) {
+ maybeIter.emplace(std::forward<Args>(args)...);
+ }
+ }
+
+ ~IncrementalIter() {
+ if (done()) {
+ maybeIter.reset();
+ }
+ }
+
+ bool done() const { return maybeIter.ref().done(); }
+
+ Elem get() const { return maybeIter.ref().get(); }
+
+ void next() { maybeIter.ref().next(); }
+};
+
+// Iterate through the sweep groups created by
+// GCRuntime::groupZonesForSweeping().
+class js::gc::SweepGroupsIter {
+ GCRuntime* gc;
+
+ public:
+ explicit SweepGroupsIter(JSRuntime* rt) : gc(&rt->gc) {
+ MOZ_ASSERT(gc->currentSweepGroup);
+ }
+
+ bool done() const { return !gc->currentSweepGroup; }
+
+ Zone* get() const { return gc->currentSweepGroup; }
+
+ void next() {
+ MOZ_ASSERT(!done());
+ gc->getNextSweepGroup();
+ }
+};
+
+namespace sweepaction {
+
+// Implementation of the SweepAction interface that calls a method on GCRuntime.
+class SweepActionCall final : public SweepAction {
+ using Method = IncrementalProgress (GCRuntime::*)(JS::GCContext* gcx,
+ SliceBudget& budget);
+
+ Method method;
+
+ public:
+ explicit SweepActionCall(Method m) : method(m) {}
+ IncrementalProgress run(Args& args) override {
+ return (args.gc->*method)(args.gcx, args.budget);
+ }
+ void assertFinished() const override {}
+};
+
+// Implementation of the SweepAction interface that yields in a specified zeal
+// mode.
+class SweepActionMaybeYield final : public SweepAction {
+#ifdef JS_GC_ZEAL
+ ZealMode mode;
+ bool isYielding;
+#endif
+
+ public:
+ explicit SweepActionMaybeYield(ZealMode mode)
+#ifdef JS_GC_ZEAL
+ : mode(mode),
+ isYielding(false)
+#endif
+ {
+ }
+
+ IncrementalProgress run(Args& args) override {
+#ifdef JS_GC_ZEAL
+ if (!isYielding && args.gc->shouldYieldForZeal(mode)) {
+ isYielding = true;
+ return NotFinished;
+ }
+
+ isYielding = false;
+#endif
+ return Finished;
+ }
+
+ void assertFinished() const override { MOZ_ASSERT(!isYielding); }
+
+ // These actions should be skipped if GC zeal is not configured.
+#ifndef JS_GC_ZEAL
+ bool shouldSkip() override { return true; }
+#endif
+};
+
+// Implementation of the SweepAction interface that calls a list of actions in
+// sequence.
+class SweepActionSequence final : public SweepAction {
+ using ActionVector = Vector<UniquePtr<SweepAction>, 0, SystemAllocPolicy>;
+ using Iter = IncrementalIter<ContainerIter<ActionVector>>;
+
+ ActionVector actions;
+ typename Iter::State iterState;
+
+ public:
+ bool init(UniquePtr<SweepAction>* acts, size_t count) {
+ for (size_t i = 0; i < count; i++) {
+ auto& action = acts[i];
+ if (!action) {
+ return false;
+ }
+ if (action->shouldSkip()) {
+ continue;
+ }
+ if (!actions.emplaceBack(std::move(action))) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ IncrementalProgress run(Args& args) override {
+ for (Iter iter(iterState, actions); !iter.done(); iter.next()) {
+ if (iter.get()->run(args) == NotFinished) {
+ return NotFinished;
+ }
+ }
+ return Finished;
+ }
+
+ void assertFinished() const override {
+ MOZ_ASSERT(iterState.isNothing());
+ for (const auto& action : actions) {
+ action->assertFinished();
+ }
+ }
+};
+
+template <typename Iter, typename Init>
+class SweepActionForEach final : public SweepAction {
+ using Elem = decltype(std::declval<Iter>().get());
+ using IncrIter = IncrementalIter<Iter>;
+
+ Init iterInit;
+ Elem* elemOut;
+ UniquePtr<SweepAction> action;
+ typename IncrIter::State iterState;
+
+ public:
+ SweepActionForEach(const Init& init, Elem* maybeElemOut,
+ UniquePtr<SweepAction> action)
+ : iterInit(init), elemOut(maybeElemOut), action(std::move(action)) {}
+
+ IncrementalProgress run(Args& args) override {
+ MOZ_ASSERT_IF(elemOut, *elemOut == Elem());
+ auto clearElem = mozilla::MakeScopeExit([&] { setElem(Elem()); });
+ for (IncrIter iter(iterState, iterInit); !iter.done(); iter.next()) {
+ setElem(iter.get());
+ if (action->run(args) == NotFinished) {
+ return NotFinished;
+ }
+ }
+ return Finished;
+ }
+
+ void assertFinished() const override {
+ MOZ_ASSERT(iterState.isNothing());
+ MOZ_ASSERT_IF(elemOut, *elemOut == Elem());
+ action->assertFinished();
+ }
+
+ private:
+ void setElem(const Elem& value) {
+ if (elemOut) {
+ *elemOut = value;
+ }
+ }
+};
+
+static UniquePtr<SweepAction> Call(IncrementalProgress (GCRuntime::*method)(
+ JS::GCContext* gcx, SliceBudget& budget)) {
+ return MakeUnique<SweepActionCall>(method);
+}
+
+static UniquePtr<SweepAction> MaybeYield(ZealMode zealMode) {
+ return MakeUnique<SweepActionMaybeYield>(zealMode);
+}
+
+template <typename... Rest>
+static UniquePtr<SweepAction> Sequence(UniquePtr<SweepAction> first,
+ Rest... rest) {
+ UniquePtr<SweepAction> actions[] = {std::move(first), std::move(rest)...};
+ auto seq = MakeUnique<SweepActionSequence>();
+ if (!seq || !seq->init(actions, std::size(actions))) {
+ return nullptr;
+ }
+
+ return UniquePtr<SweepAction>(std::move(seq));
+}
+
+static UniquePtr<SweepAction> RepeatForSweepGroup(
+ JSRuntime* rt, UniquePtr<SweepAction> action) {
+ if (!action) {
+ return nullptr;
+ }
+
+ using Action = SweepActionForEach<SweepGroupsIter, JSRuntime*>;
+ return js::MakeUnique<Action>(rt, nullptr, std::move(action));
+}
+
+static UniquePtr<SweepAction> ForEachZoneInSweepGroup(
+ JSRuntime* rt, Zone** zoneOut, UniquePtr<SweepAction> action) {
+ if (!action) {
+ return nullptr;
+ }
+
+ using Action = SweepActionForEach<SweepGroupZonesIter, JSRuntime*>;
+ return js::MakeUnique<Action>(rt, zoneOut, std::move(action));
+}
+
+static UniquePtr<SweepAction> ForEachAllocKind(AllocKinds kinds,
+ AllocKind* kindOut,
+ UniquePtr<SweepAction> action) {
+ if (!action) {
+ return nullptr;
+ }
+
+ using Action = SweepActionForEach<ContainerIter<AllocKinds>, AllocKinds>;
+ return js::MakeUnique<Action>(kinds, kindOut, std::move(action));
+}
+
+} // namespace sweepaction
+
+bool GCRuntime::initSweepActions() {
+ using namespace sweepaction;
+ using sweepaction::Call;
+
+ sweepActions.ref() = RepeatForSweepGroup(
+ rt,
+ Sequence(
+ Call(&GCRuntime::beginMarkingSweepGroup),
+ Call(&GCRuntime::markGrayRootsInCurrentGroup),
+ MaybeYield(ZealMode::YieldWhileGrayMarking),
+ Call(&GCRuntime::markGray), Call(&GCRuntime::endMarkingSweepGroup),
+ Call(&GCRuntime::beginSweepingSweepGroup),
+ MaybeYield(ZealMode::IncrementalMultipleSlices),
+ MaybeYield(ZealMode::YieldBeforeSweepingAtoms),
+ Call(&GCRuntime::sweepAtomsTable),
+ MaybeYield(ZealMode::YieldBeforeSweepingCaches),
+ Call(&GCRuntime::sweepWeakCaches),
+ ForEachZoneInSweepGroup(
+ rt, &sweepZone.ref(),
+ Sequence(MaybeYield(ZealMode::YieldBeforeSweepingObjects),
+ ForEachAllocKind(ForegroundObjectFinalizePhase.kinds,
+ &sweepAllocKind.ref(),
+ Call(&GCRuntime::finalizeAllocKind)),
+ MaybeYield(ZealMode::YieldBeforeSweepingNonObjects),
+ ForEachAllocKind(ForegroundNonObjectFinalizePhase.kinds,
+ &sweepAllocKind.ref(),
+ Call(&GCRuntime::finalizeAllocKind)),
+ MaybeYield(ZealMode::YieldBeforeSweepingPropMapTrees),
+ Call(&GCRuntime::sweepPropMapTree))),
+ Call(&GCRuntime::endSweepingSweepGroup)));
+
+ return sweepActions != nullptr;
+}
+
+IncrementalProgress GCRuntime::performSweepActions(SliceBudget& budget) {
+ AutoMajorGCProfilerEntry s(this);
+ gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::SWEEP);
+
+ JS::GCContext* gcx = rt->gcContext();
+ AutoSetThreadIsSweeping threadIsSweeping(gcx);
+ AutoPoisonFreedJitCode pjc(gcx);
+
+ // Don't trigger pre-barriers when finalizing.
+ AutoDisableBarriers disableBarriers(this);
+
+ // Drain the mark stack, possibly in a parallel task if we're in a part of
+ // sweeping that allows it.
+ //
+ // In the first sweep slice where we must not yield to the mutator until we've
+ // starting sweeping a sweep group but in that case the stack must be empty
+ // already.
+
+#ifdef DEBUG
+ MOZ_ASSERT(initialState <= State::Sweep);
+ if (initialState != State::Sweep) {
+ assertNoMarkingWork();
+ }
+#endif
+
+ if (initialState == State::Sweep &&
+ markDuringSweeping(gcx, budget) == NotFinished) {
+ return NotFinished;
+ }
+
+ // Then continue running sweep actions.
+
+ SweepAction::Args args{this, gcx, budget};
+ IncrementalProgress sweepProgress = sweepActions->run(args);
+ IncrementalProgress markProgress = joinBackgroundMarkTask();
+
+ if (sweepProgress == Finished && markProgress == Finished) {
+ return Finished;
+ }
+
+ MOZ_ASSERT(isIncremental);
+ return NotFinished;
+}
+
+bool GCRuntime::allCCVisibleZonesWereCollected() {
+ // Calculate whether the gray marking state is now valid.
+ //
+ // The gray bits change from invalid to valid if we finished a full GC from
+ // the point of view of the cycle collector. We ignore the following:
+ //
+ // - Helper thread zones, as these are not reachable from the main heap.
+ // - The atoms zone, since strings and symbols are never marked gray.
+ // - Empty zones.
+ //
+ // These exceptions ensure that when the CC requests a full GC the gray mark
+ // state ends up valid even it we don't collect all of the zones.
+
+ for (ZonesIter zone(this, SkipAtoms); !zone.done(); zone.next()) {
+ if (!zone->isCollecting() && !zone->arenas.arenaListsAreEmpty()) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+void GCRuntime::endSweepPhase(bool destroyingRuntime) {
+ MOZ_ASSERT(!markOnBackgroundThreadDuringSweeping);
+
+ sweepActions->assertFinished();
+
+ gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::SWEEP);
+
+ MOZ_ASSERT_IF(destroyingRuntime, !useBackgroundThreads);
+
+ {
+ gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::DESTROY);
+
+ // Sweep shared script bytecode now all zones have been swept and finalizers
+ // for BaseScripts have released their references.
+ SweepScriptData(rt);
+ }
+
+ {
+ gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::FINALIZE_END);
+ AutoLockStoreBuffer lock(&storeBuffer());
+ callFinalizeCallbacks(rt->gcContext(), JSFINALIZE_COLLECTION_END);
+
+ if (allCCVisibleZonesWereCollected()) {
+ grayBitsValid = true;
+ }
+ }
+
+ if (isIncremental) {
+ findDeadCompartments();
+ }
+
+#ifdef JS_GC_ZEAL
+ finishMarkingValidation();
+#endif
+
+#ifdef DEBUG
+ for (ZonesIter zone(this, WithAtoms); !zone.done(); zone.next()) {
+ for (auto i : AllAllocKinds()) {
+ MOZ_ASSERT_IF(!IsBackgroundFinalized(i) || !useBackgroundThreads,
+ zone->arenas.collectingArenaList(i).isEmpty());
+ }
+ }
+#endif
+
+ AssertNoWrappersInGrayList(rt);
+}
diff --git a/js/src/gc/Tenuring.cpp b/js/src/gc/Tenuring.cpp
new file mode 100644
index 0000000000..457aa4876c
--- /dev/null
+++ b/js/src/gc/Tenuring.cpp
@@ -0,0 +1,1016 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * Implementation of nursery eviction (tenuring).
+ */
+
+#include "gc/Tenuring.h"
+
+#include "mozilla/PodOperations.h"
+
+#include "gc/Cell.h"
+#include "gc/GCInternals.h"
+#include "gc/GCProbes.h"
+#include "gc/Pretenuring.h"
+#include "gc/Zone.h"
+#include "jit/JitCode.h"
+#include "proxy/Proxy.h"
+#include "vm/BigIntType.h"
+#include "vm/JSScript.h"
+#include "vm/NativeObject.h"
+#include "vm/Runtime.h"
+#include "vm/TypedArrayObject.h"
+
+#include "gc/Heap-inl.h"
+#include "gc/Marking-inl.h"
+#include "gc/ObjectKind-inl.h"
+#include "gc/StoreBuffer-inl.h"
+#include "gc/TraceMethods-inl.h"
+#include "vm/JSObject-inl.h"
+#include "vm/PlainObject-inl.h"
+#ifdef ENABLE_RECORD_TUPLE
+# include "vm/TupleType.h"
+#endif
+
+using namespace js;
+using namespace js::gc;
+
+using mozilla::PodCopy;
+
+constexpr size_t MAX_DEDUPLICATABLE_STRING_LENGTH = 500;
+
+TenuringTracer::TenuringTracer(JSRuntime* rt, Nursery* nursery)
+ : JSTracer(rt, JS::TracerKind::Tenuring,
+ JS::WeakMapTraceAction::TraceKeysAndValues),
+ nursery_(*nursery) {
+ stringDeDupSet.emplace();
+}
+
+size_t TenuringTracer::getTenuredSize() const {
+ return tenuredSize + tenuredCells * sizeof(NurseryCellHeader);
+}
+
+size_t TenuringTracer::getTenuredCells() const { return tenuredCells; }
+
+static inline void UpdateAllocSiteOnTenure(Cell* cell) {
+ AllocSite* site = NurseryCellHeader::from(cell)->allocSite();
+ site->incTenuredCount();
+}
+
+void TenuringTracer::onObjectEdge(JSObject** objp, const char* name) {
+ JSObject* obj = *objp;
+ if (!IsInsideNursery(obj)) {
+ return;
+ }
+
+ if (obj->isForwarded()) {
+ const gc::RelocationOverlay* overlay = gc::RelocationOverlay::fromCell(obj);
+ *objp = static_cast<JSObject*>(overlay->forwardingAddress());
+ return;
+ }
+
+ UpdateAllocSiteOnTenure(obj);
+
+ // Take a fast path for tenuring a plain object which is by far the most
+ // common case.
+ if (obj->is<PlainObject>()) {
+ *objp = movePlainObjectToTenured(&obj->as<PlainObject>());
+ return;
+ }
+
+ *objp = moveToTenuredSlow(obj);
+}
+
+void TenuringTracer::onStringEdge(JSString** strp, const char* name) {
+ JSString* str = *strp;
+ if (!IsInsideNursery(str)) {
+ return;
+ }
+
+ if (str->isForwarded()) {
+ const gc::RelocationOverlay* overlay = gc::RelocationOverlay::fromCell(str);
+ *strp = static_cast<JSString*>(overlay->forwardingAddress());
+ return;
+ }
+
+ UpdateAllocSiteOnTenure(str);
+
+ *strp = moveToTenured(str);
+}
+
+void TenuringTracer::onBigIntEdge(JS::BigInt** bip, const char* name) {
+ JS::BigInt* bi = *bip;
+ if (!IsInsideNursery(bi)) {
+ return;
+ }
+
+ if (bi->isForwarded()) {
+ const gc::RelocationOverlay* overlay = gc::RelocationOverlay::fromCell(bi);
+ *bip = static_cast<JS::BigInt*>(overlay->forwardingAddress());
+ return;
+ }
+
+ UpdateAllocSiteOnTenure(bi);
+
+ *bip = moveToTenured(bi);
+}
+
+void TenuringTracer::onSymbolEdge(JS::Symbol** symp, const char* name) {}
+void TenuringTracer::onScriptEdge(BaseScript** scriptp, const char* name) {}
+void TenuringTracer::onShapeEdge(Shape** shapep, const char* name) {}
+void TenuringTracer::onRegExpSharedEdge(RegExpShared** sharedp,
+ const char* name) {}
+void TenuringTracer::onBaseShapeEdge(BaseShape** basep, const char* name) {}
+void TenuringTracer::onGetterSetterEdge(GetterSetter** gsp, const char* name) {}
+void TenuringTracer::onPropMapEdge(PropMap** mapp, const char* name) {}
+void TenuringTracer::onJitCodeEdge(jit::JitCode** codep, const char* name) {}
+void TenuringTracer::onScopeEdge(Scope** scopep, const char* name) {}
+
+void TenuringTracer::traverse(JS::Value* thingp) {
+ MOZ_ASSERT(!nursery().isInside(thingp));
+
+ Value value = *thingp;
+ CheckTracedThing(this, value);
+
+ // We only care about a few kinds of GC thing here and this generates much
+ // tighter code than using MapGCThingTyped.
+ Value post;
+ if (value.isObject()) {
+ JSObject* obj = &value.toObject();
+ onObjectEdge(&obj, "value");
+ post = JS::ObjectValue(*obj);
+ }
+#ifdef ENABLE_RECORD_TUPLE
+ else if (value.isExtendedPrimitive()) {
+ JSObject* obj = &value.toExtendedPrimitive();
+ onObjectEdge(&obj, "value");
+ post = JS::ExtendedPrimitiveValue(*obj);
+ }
+#endif
+ else if (value.isString()) {
+ JSString* str = value.toString();
+ onStringEdge(&str, "value");
+ post = JS::StringValue(str);
+ } else if (value.isBigInt()) {
+ JS::BigInt* bi = value.toBigInt();
+ onBigIntEdge(&bi, "value");
+ post = JS::BigIntValue(bi);
+ } else {
+ MOZ_ASSERT_IF(value.isGCThing(), !IsInsideNursery(value.toGCThing()));
+ return;
+ }
+
+ if (post != value) {
+ *thingp = post;
+ }
+}
+
+template <typename T>
+void js::gc::StoreBuffer::MonoTypeBuffer<T>::trace(TenuringTracer& mover) {
+ mozilla::ReentrancyGuard g(*owner_);
+ MOZ_ASSERT(owner_->isEnabled());
+ if (last_) {
+ last_.trace(mover);
+ }
+ for (typename StoreSet::Range r = stores_.all(); !r.empty(); r.popFront()) {
+ r.front().trace(mover);
+ }
+}
+
+namespace js {
+namespace gc {
+template void StoreBuffer::MonoTypeBuffer<StoreBuffer::ValueEdge>::trace(
+ TenuringTracer&);
+template void StoreBuffer::MonoTypeBuffer<StoreBuffer::SlotsEdge>::trace(
+ TenuringTracer&);
+template struct StoreBuffer::MonoTypeBuffer<StoreBuffer::StringPtrEdge>;
+template struct StoreBuffer::MonoTypeBuffer<StoreBuffer::BigIntPtrEdge>;
+template struct StoreBuffer::MonoTypeBuffer<StoreBuffer::ObjectPtrEdge>;
+} // namespace gc
+} // namespace js
+
+void js::gc::StoreBuffer::SlotsEdge::trace(TenuringTracer& mover) const {
+ NativeObject* obj = object();
+ MOZ_ASSERT(IsCellPointerValid(obj));
+
+ // Beware JSObject::swap exchanging a native object for a non-native one.
+ if (!obj->is<NativeObject>()) {
+ return;
+ }
+
+ MOZ_ASSERT(!IsInsideNursery(obj), "obj shouldn't live in nursery.");
+
+ if (kind() == ElementKind) {
+ uint32_t initLen = obj->getDenseInitializedLength();
+ uint32_t numShifted = obj->getElementsHeader()->numShiftedElements();
+ uint32_t clampedStart = start_;
+ clampedStart = numShifted < clampedStart ? clampedStart - numShifted : 0;
+ clampedStart = std::min(clampedStart, initLen);
+ uint32_t clampedEnd = start_ + count_;
+ clampedEnd = numShifted < clampedEnd ? clampedEnd - numShifted : 0;
+ clampedEnd = std::min(clampedEnd, initLen);
+ MOZ_ASSERT(clampedStart <= clampedEnd);
+ mover.traceSlots(
+ static_cast<HeapSlot*>(obj->getDenseElements() + clampedStart)
+ ->unbarrieredAddress(),
+ clampedEnd - clampedStart);
+ } else {
+ uint32_t start = std::min(start_, obj->slotSpan());
+ uint32_t end = std::min(start_ + count_, obj->slotSpan());
+ MOZ_ASSERT(start <= end);
+ mover.traceObjectSlots(obj, start, end);
+ }
+}
+
+static inline void TraceWholeCell(TenuringTracer& mover, JSObject* object) {
+ MOZ_ASSERT_IF(object->storeBuffer(),
+ !object->storeBuffer()->markingNondeduplicatable);
+ mover.traceObject(object);
+}
+
+// Non-deduplicatable marking is necessary because of the following 2 reasons:
+//
+// 1. Tenured string chars cannot be updated:
+//
+// If any of the tenured string's bases were deduplicated during tenuring,
+// the tenured string's chars pointer would need to be adjusted. This would
+// then require updating any other tenured strings that are dependent on the
+// first tenured string, and we have no way to find them without scanning
+// the entire tenured heap.
+//
+// 2. Tenured string cannot store its nursery base or base's chars:
+//
+// Tenured strings have no place to stash a pointer to their nursery base or
+// its chars. You need to be able to traverse any dependent string's chain
+// of bases up to a nursery "root base" that points to the malloced chars
+// that the dependent strings started out pointing to, so that you can
+// calculate the offset of any dependent string and update the ptr+offset if
+// the root base gets deduplicated to a different allocation. Tenured
+// strings in this base chain will stop you from reaching the nursery
+// version of the root base; you can only get to the tenured version, and it
+// has no place to store the original chars pointer.
+static inline void PreventDeduplicationOfReachableStrings(JSString* str) {
+ MOZ_ASSERT(str->isTenured());
+ MOZ_ASSERT(!str->isForwarded());
+
+ JSLinearString* baseOrRelocOverlay = str->nurseryBaseOrRelocOverlay();
+
+ // Walk along the chain of dependent strings' base string pointers
+ // to mark them all non-deduplicatable.
+ while (true) {
+ // baseOrRelocOverlay can be one of the three cases:
+ // 1. forwarded nursery string:
+ // The forwarded string still retains the flag that can tell whether
+ // this string is a dependent string with a base. Its
+ // StringRelocationOverlay holds a saved pointer to its base in the
+ // nursery.
+ // 2. not yet forwarded nursery string:
+ // Retrieve the base field directly from the string.
+ // 3. tenured string:
+ // The nursery base chain ends here, so stop traversing.
+ if (baseOrRelocOverlay->isForwarded()) {
+ JSLinearString* tenuredBase = Forwarded(baseOrRelocOverlay);
+ if (!tenuredBase->hasBase()) {
+ break;
+ }
+ baseOrRelocOverlay = StringRelocationOverlay::fromCell(baseOrRelocOverlay)
+ ->savedNurseryBaseOrRelocOverlay();
+ } else {
+ JSLinearString* base = baseOrRelocOverlay;
+ if (base->isTenured()) {
+ break;
+ }
+ if (base->isDeduplicatable()) {
+ base->setNonDeduplicatable();
+ }
+ if (!base->hasBase()) {
+ break;
+ }
+ baseOrRelocOverlay = base->nurseryBaseOrRelocOverlay();
+ }
+ }
+}
+
+static inline void TraceWholeCell(TenuringTracer& mover, JSString* str) {
+ MOZ_ASSERT_IF(str->storeBuffer(),
+ str->storeBuffer()->markingNondeduplicatable);
+
+ // Mark all strings reachable from the tenured string `str` as
+ // non-deduplicatable. These strings are the bases of the tenured dependent
+ // string.
+ if (str->hasBase()) {
+ PreventDeduplicationOfReachableStrings(str);
+ }
+
+ str->traceChildren(&mover);
+}
+
+static inline void TraceWholeCell(TenuringTracer& mover, BaseScript* script) {
+ script->traceChildren(&mover);
+}
+
+static inline void TraceWholeCell(TenuringTracer& mover,
+ jit::JitCode* jitcode) {
+ jitcode->traceChildren(&mover);
+}
+
+template <typename T>
+static void TraceBufferedCells(TenuringTracer& mover, Arena* arena,
+ ArenaCellSet* cells) {
+ for (size_t i = 0; i < MaxArenaCellIndex; i += cells->BitsPerWord) {
+ ArenaCellSet::WordT bitset = cells->getWord(i / cells->BitsPerWord);
+ while (bitset) {
+ size_t bit = i + js::detail::CountTrailingZeroes(bitset);
+ auto cell =
+ reinterpret_cast<T*>(uintptr_t(arena) + ArenaCellIndexBytes * bit);
+ TraceWholeCell(mover, cell);
+ bitset &= bitset - 1; // Clear the low bit.
+ }
+ }
+}
+
+void ArenaCellSet::trace(TenuringTracer& mover) {
+ for (ArenaCellSet* cells = this; cells; cells = cells->next) {
+ cells->check();
+
+ Arena* arena = cells->arena;
+ arena->bufferedCells() = &ArenaCellSet::Empty;
+
+ JS::TraceKind kind = MapAllocToTraceKind(arena->getAllocKind());
+ switch (kind) {
+ case JS::TraceKind::Object:
+ TraceBufferedCells<JSObject>(mover, arena, cells);
+ break;
+ case JS::TraceKind::String:
+ TraceBufferedCells<JSString>(mover, arena, cells);
+ break;
+ case JS::TraceKind::Script:
+ TraceBufferedCells<BaseScript>(mover, arena, cells);
+ break;
+ case JS::TraceKind::JitCode:
+ TraceBufferedCells<jit::JitCode>(mover, arena, cells);
+ break;
+ default:
+ MOZ_CRASH("Unexpected trace kind");
+ }
+ }
+}
+
+void js::gc::StoreBuffer::WholeCellBuffer::trace(TenuringTracer& mover) {
+ MOZ_ASSERT(owner_->isEnabled());
+
+#ifdef DEBUG
+ // Verify that all string whole cells are traced first before any other
+ // strings are visited for any reason.
+ MOZ_ASSERT(!owner_->markingNondeduplicatable);
+ owner_->markingNondeduplicatable = true;
+#endif
+ // Trace all of the strings to mark the non-deduplicatable bits, then trace
+ // all other whole cells.
+ if (stringHead_) {
+ stringHead_->trace(mover);
+ }
+#ifdef DEBUG
+ owner_->markingNondeduplicatable = false;
+#endif
+ if (nonStringHead_) {
+ nonStringHead_->trace(mover);
+ }
+
+ stringHead_ = nonStringHead_ = nullptr;
+}
+
+template <typename T>
+void js::gc::StoreBuffer::CellPtrEdge<T>::trace(TenuringTracer& mover) const {
+ static_assert(std::is_base_of_v<Cell, T>, "T must be a Cell type");
+ static_assert(!GCTypeIsTenured<T>(), "T must not be a tenured Cell type");
+
+ T* thing = *edge;
+ if (!thing) {
+ return;
+ }
+
+ MOZ_ASSERT(IsCellPointerValid(thing));
+ MOZ_ASSERT(thing->getTraceKind() == JS::MapTypeToTraceKind<T>::kind);
+
+ if (std::is_same_v<JSString, T>) {
+ // Nursery string deduplication requires all tenured string -> nursery
+ // string edges to be registered with the whole cell buffer in order to
+ // correctly set the non-deduplicatable bit.
+ MOZ_ASSERT(!mover.runtime()->gc.isPointerWithinTenuredCell(
+ edge, JS::TraceKind::String));
+ }
+
+ DispatchToOnEdge(&mover, edge, "CellPtrEdge");
+}
+
+void js::gc::StoreBuffer::ValueEdge::trace(TenuringTracer& mover) const {
+ if (deref()) {
+ mover.traverse(edge);
+ }
+}
+
+// Visit all object children of the object and trace them.
+void js::gc::TenuringTracer::traceObject(JSObject* obj) {
+ const JSClass* clasp = obj->getClass();
+ MOZ_ASSERT(clasp);
+
+ if (clasp->hasTrace()) {
+ clasp->doTrace(this, obj);
+ }
+
+ if (!obj->is<NativeObject>()) {
+ return;
+ }
+
+ NativeObject* nobj = &obj->as<NativeObject>();
+ if (!nobj->hasEmptyElements()) {
+ HeapSlotArray elements = nobj->getDenseElements();
+ Value* elems = elements.begin()->unbarrieredAddress();
+ traceSlots(elems, elems + nobj->getDenseInitializedLength());
+ }
+
+ traceObjectSlots(nobj, 0, nobj->slotSpan());
+}
+
+void js::gc::TenuringTracer::traceObjectSlots(NativeObject* nobj,
+ uint32_t start, uint32_t end) {
+ auto traceRange = [this](HeapSlot* slotStart, HeapSlot* slotEnd) {
+ traceSlots(slotStart->unbarrieredAddress(), slotEnd->unbarrieredAddress());
+ };
+ nobj->forEachSlotRange(start, end, traceRange);
+}
+
+void js::gc::TenuringTracer::traceSlots(Value* vp, Value* end) {
+ for (; vp != end; ++vp) {
+ traverse(vp);
+ }
+}
+
+inline void js::gc::TenuringTracer::traceSlots(JS::Value* vp, uint32_t nslots) {
+ traceSlots(vp, vp + nslots);
+}
+
+void js::gc::TenuringTracer::traceString(JSString* str) {
+ str->traceChildren(this);
+}
+
+void js::gc::TenuringTracer::traceBigInt(JS::BigInt* bi) {
+ bi->traceChildren(this);
+}
+
+#ifdef DEBUG
+static inline uintptr_t OffsetFromChunkStart(void* p) {
+ return uintptr_t(p) & gc::ChunkMask;
+}
+static inline ptrdiff_t OffsetToChunkEnd(void* p) {
+ return ChunkSize - (uintptr_t(p) & gc::ChunkMask);
+}
+#endif
+
+/* Insert the given relocation entry into the list of things to visit. */
+inline void js::gc::TenuringTracer::insertIntoObjectFixupList(
+ RelocationOverlay* entry) {
+ entry->setNext(objHead);
+ objHead = entry;
+}
+
+template <typename T>
+inline T* js::gc::TenuringTracer::allocTenured(Zone* zone, AllocKind kind) {
+ return static_cast<T*>(static_cast<Cell*>(AllocateCellInGC(zone, kind)));
+}
+
+JSString* js::gc::TenuringTracer::allocTenuredString(JSString* src, Zone* zone,
+ AllocKind dstKind) {
+ JSString* dst = allocTenured<JSString>(zone, dstKind);
+ tenuredSize += moveStringToTenured(dst, src, dstKind);
+ tenuredCells++;
+
+ return dst;
+}
+
+JSObject* js::gc::TenuringTracer::moveToTenuredSlow(JSObject* src) {
+ MOZ_ASSERT(IsInsideNursery(src));
+ MOZ_ASSERT(!src->is<PlainObject>());
+
+ AllocKind dstKind = src->allocKindForTenure(nursery());
+ auto dst = allocTenured<JSObject>(src->nurseryZone(), dstKind);
+
+ size_t srcSize = Arena::thingSize(dstKind);
+
+ // Arrays and Tuples do not necessarily have the same AllocKind between src
+ // and dst. We deal with this by copying elements manually, possibly
+ // re-inlining them if there is adequate room inline in dst.
+ //
+ // For Arrays and Tuples we're reducing tenuredSize to the smaller srcSize
+ // because moveElementsToTenured() accounts for all Array or Tuple elements,
+ // even if they are inlined.
+ if (src->is<TypedArrayObject>()) {
+ TypedArrayObject* tarray = &src->as<TypedArrayObject>();
+ // Typed arrays with inline data do not necessarily have the same
+ // AllocKind between src and dst. The nursery does not allocate an
+ // inline data buffer that has the same size as the slow path will do.
+ // In the slow path, the Typed Array Object stores the inline data
+ // in the allocated space that fits the AllocKind. In the fast path,
+ // the nursery will allocate another buffer that is directly behind the
+ // minimal JSObject. That buffer size plus the JSObject size is not
+ // necessarily as large as the slow path's AllocKind size.
+ if (tarray->hasInlineElements()) {
+ AllocKind srcKind = GetGCObjectKind(TypedArrayObject::FIXED_DATA_START);
+ size_t headerSize = Arena::thingSize(srcKind);
+ srcSize = headerSize + tarray->byteLength();
+ }
+ } else if (src->canHaveFixedElements()) {
+ srcSize = sizeof(NativeObject);
+ }
+
+ tenuredSize += srcSize;
+ tenuredCells++;
+
+ // Copy the Cell contents.
+ MOZ_ASSERT(OffsetFromChunkStart(src) >= sizeof(ChunkBase));
+ MOZ_ASSERT(OffsetToChunkEnd(src) >= ptrdiff_t(srcSize));
+ js_memcpy(dst, src, srcSize);
+
+ // Move the slots and elements, if we need to.
+ if (src->is<NativeObject>()) {
+ NativeObject* ndst = &dst->as<NativeObject>();
+ NativeObject* nsrc = &src->as<NativeObject>();
+ tenuredSize += moveSlotsToTenured(ndst, nsrc);
+ tenuredSize += moveElementsToTenured(ndst, nsrc, dstKind);
+ }
+
+ JSObjectMovedOp op = dst->getClass()->extObjectMovedOp();
+ MOZ_ASSERT_IF(src->is<ProxyObject>(), op == proxy_ObjectMoved);
+ if (op) {
+ // Tell the hazard analysis that the object moved hook can't GC.
+ JS::AutoSuppressGCAnalysis nogc;
+ tenuredSize += op(dst, src);
+ } else {
+ MOZ_ASSERT_IF(src->getClass()->hasFinalize(),
+ CanNurseryAllocateFinalizedClass(src->getClass()));
+ }
+
+ RelocationOverlay* overlay = RelocationOverlay::forwardCell(src, dst);
+ insertIntoObjectFixupList(overlay);
+
+ gcprobes::PromoteToTenured(src, dst);
+ return dst;
+}
+
+inline JSObject* js::gc::TenuringTracer::movePlainObjectToTenured(
+ PlainObject* src) {
+ // Fast path version of moveToTenuredSlow() for specialized for PlainObject.
+
+ MOZ_ASSERT(IsInsideNursery(src));
+
+ AllocKind dstKind = src->allocKindForTenure();
+ auto dst = allocTenured<PlainObject>(src->nurseryZone(), dstKind);
+
+ size_t srcSize = Arena::thingSize(dstKind);
+ tenuredSize += srcSize;
+ tenuredCells++;
+
+ // Copy the Cell contents.
+ MOZ_ASSERT(OffsetFromChunkStart(src) >= sizeof(ChunkBase));
+ MOZ_ASSERT(OffsetToChunkEnd(src) >= ptrdiff_t(srcSize));
+ js_memcpy(dst, src, srcSize);
+
+ // Move the slots and elements.
+ tenuredSize += moveSlotsToTenured(dst, src);
+ tenuredSize += moveElementsToTenured(dst, src, dstKind);
+
+ MOZ_ASSERT(!dst->getClass()->extObjectMovedOp());
+
+ RelocationOverlay* overlay = RelocationOverlay::forwardCell(src, dst);
+ insertIntoObjectFixupList(overlay);
+
+ gcprobes::PromoteToTenured(src, dst);
+ return dst;
+}
+
+size_t js::gc::TenuringTracer::moveSlotsToTenured(NativeObject* dst,
+ NativeObject* src) {
+ /* Fixed slots have already been copied over. */
+ if (!src->hasDynamicSlots()) {
+ return 0;
+ }
+
+ Zone* zone = src->nurseryZone();
+ size_t count = src->numDynamicSlots();
+
+ uint64_t uid = src->maybeUniqueId();
+
+ size_t allocSize = ObjectSlots::allocSize(count);
+
+ ObjectSlots* srcHeader = src->getSlotsHeader();
+ if (!nursery().isInside(srcHeader)) {
+ AddCellMemory(dst, allocSize, MemoryUse::ObjectSlots);
+ nursery().removeMallocedBufferDuringMinorGC(srcHeader);
+ return 0;
+ }
+
+ {
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ HeapSlot* allocation =
+ zone->pod_malloc<HeapSlot>(ObjectSlots::allocCount(count));
+ if (!allocation) {
+ oomUnsafe.crash(allocSize, "Failed to allocate slots while tenuring.");
+ }
+
+ ObjectSlots* slotsHeader = new (allocation)
+ ObjectSlots(count, srcHeader->dictionarySlotSpan(), uid);
+ dst->slots_ = slotsHeader->slots();
+ }
+
+ AddCellMemory(dst, allocSize, MemoryUse::ObjectSlots);
+
+ PodCopy(dst->slots_, src->slots_, count);
+ if (count) {
+ nursery().setSlotsForwardingPointer(src->slots_, dst->slots_, count);
+ }
+
+ return allocSize;
+}
+
+size_t js::gc::TenuringTracer::moveElementsToTenured(NativeObject* dst,
+ NativeObject* src,
+ AllocKind dstKind) {
+ if (src->hasEmptyElements()) {
+ return 0;
+ }
+
+ Zone* zone = src->nurseryZone();
+
+ ObjectElements* srcHeader = src->getElementsHeader();
+ size_t nslots = srcHeader->numAllocatedElements();
+ size_t allocSize = nslots * sizeof(HeapSlot);
+
+ void* srcAllocatedHeader = src->getUnshiftedElementsHeader();
+
+ /* TODO Bug 874151: Prefer to put element data inline if we have space. */
+ if (!nursery().isInside(srcAllocatedHeader)) {
+ MOZ_ASSERT(src->elements_ == dst->elements_);
+ nursery().removeMallocedBufferDuringMinorGC(srcAllocatedHeader);
+
+ AddCellMemory(dst, allocSize, MemoryUse::ObjectElements);
+
+ return 0;
+ }
+
+ // Shifted elements are copied too.
+ uint32_t numShifted = srcHeader->numShiftedElements();
+
+ /* Unlike other objects, Arrays and Tuples can have fixed elements. */
+ if (src->canHaveFixedElements() && nslots <= GetGCKindSlots(dstKind)) {
+ dst->as<NativeObject>().setFixedElements();
+ js_memcpy(dst->getElementsHeader(), srcAllocatedHeader, allocSize);
+ dst->elements_ += numShifted;
+ dst->getElementsHeader()->flags |= ObjectElements::FIXED;
+ nursery().setElementsForwardingPointer(srcHeader, dst->getElementsHeader(),
+ srcHeader->capacity);
+ return allocSize;
+ }
+
+ MOZ_ASSERT(nslots >= 2);
+
+ ObjectElements* dstHeader;
+ {
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ dstHeader =
+ reinterpret_cast<ObjectElements*>(zone->pod_malloc<HeapSlot>(nslots));
+ if (!dstHeader) {
+ oomUnsafe.crash(allocSize, "Failed to allocate elements while tenuring.");
+ }
+ }
+
+ AddCellMemory(dst, allocSize, MemoryUse::ObjectElements);
+
+ js_memcpy(dstHeader, srcAllocatedHeader, allocSize);
+ dst->elements_ = dstHeader->elements() + numShifted;
+ dst->getElementsHeader()->flags &= ~ObjectElements::FIXED;
+ nursery().setElementsForwardingPointer(srcHeader, dst->getElementsHeader(),
+ srcHeader->capacity);
+ return allocSize;
+}
+
+inline void js::gc::TenuringTracer::insertIntoStringFixupList(
+ StringRelocationOverlay* entry) {
+ entry->setNext(stringHead);
+ stringHead = entry;
+}
+
+JSString* js::gc::TenuringTracer::moveToTenured(JSString* src) {
+ MOZ_ASSERT(IsInsideNursery(src));
+ MOZ_ASSERT(!src->isExternal());
+
+ AllocKind dstKind = src->getAllocKind();
+ Zone* zone = src->nurseryZone();
+
+ // If this string is in the StringToAtomCache, try to deduplicate it by using
+ // the atom. Don't do this for dependent strings because they're more
+ // complicated. See StringRelocationOverlay and DeduplicationStringHasher
+ // comments.
+ if (src->isLinear() && src->inStringToAtomCache() &&
+ src->isDeduplicatable() && !src->hasBase()) {
+ JSLinearString* linear = &src->asLinear();
+ JSAtom* atom = runtime()->caches().stringToAtomCache.lookupInMap(linear);
+ MOZ_ASSERT(atom, "Why was the cache purged before minor GC?");
+
+ // Only deduplicate if both strings have the same encoding, to not confuse
+ // dependent strings.
+ if (src->hasTwoByteChars() == atom->hasTwoByteChars()) {
+ // The StringToAtomCache isn't used for inline strings (due to the minimum
+ // length) so canOwnDependentChars must be true for both src and atom.
+ // This means if there are dependent strings floating around using str's
+ // chars, they will be able to use the chars from the atom.
+ static_assert(StringToAtomCache::MinStringLength >
+ JSFatInlineString::MAX_LENGTH_LATIN1);
+ static_assert(StringToAtomCache::MinStringLength >
+ JSFatInlineString::MAX_LENGTH_TWO_BYTE);
+ MOZ_ASSERT(src->canOwnDependentChars());
+ MOZ_ASSERT(atom->canOwnDependentChars());
+
+ StringRelocationOverlay::forwardCell(src, atom);
+ gcprobes::PromoteToTenured(src, atom);
+ return atom;
+ }
+ }
+
+ JSString* dst;
+
+ // A live nursery string can only get deduplicated when:
+ // 1. Its length is smaller than MAX_DEDUPLICATABLE_STRING_LENGTH:
+ // Hashing a long string can affect performance.
+ // 2. It is linear:
+ // Deduplicating every node in it would end up doing O(n^2) hashing work.
+ // 3. It is deduplicatable:
+ // The JSString NON_DEDUP_BIT flag is unset.
+ // 4. It matches an entry in stringDeDupSet.
+
+ if (src->length() < MAX_DEDUPLICATABLE_STRING_LENGTH && src->isLinear() &&
+ src->isDeduplicatable() && stringDeDupSet.isSome()) {
+ if (auto p = stringDeDupSet->lookup(src)) {
+ // Deduplicate to the looked-up string!
+ dst = *p;
+ zone->stringStats.ref().noteDeduplicated(src->length(), src->allocSize());
+ StringRelocationOverlay::forwardCell(src, dst);
+ gcprobes::PromoteToTenured(src, dst);
+ return dst;
+ }
+
+ dst = allocTenuredString(src, zone, dstKind);
+
+ if (!stringDeDupSet->putNew(dst)) {
+ // When there is oom caused by the stringDeDupSet, stop deduplicating
+ // strings.
+ stringDeDupSet.reset();
+ }
+ } else {
+ dst = allocTenuredString(src, zone, dstKind);
+ dst->clearNonDeduplicatable();
+ }
+
+ zone->stringStats.ref().noteTenured(src->allocSize());
+
+ auto* overlay = StringRelocationOverlay::forwardCell(src, dst);
+ MOZ_ASSERT(dst->isDeduplicatable());
+
+ if (dst->hasBase() || dst->isRope()) {
+ // dst or one of its leaves might have a base that will be deduplicated.
+ // Insert the overlay into the fixup list to relocate it later.
+ insertIntoStringFixupList(overlay);
+ }
+
+ gcprobes::PromoteToTenured(src, dst);
+ return dst;
+}
+
+template <typename CharT>
+void js::gc::TenuringTracer::relocateDependentStringChars(
+ JSDependentString* tenuredDependentStr, JSLinearString* baseOrRelocOverlay,
+ size_t* offset, bool* rootBaseNotYetForwarded, JSLinearString** rootBase) {
+ MOZ_ASSERT(*offset == 0);
+ MOZ_ASSERT(*rootBaseNotYetForwarded == false);
+ MOZ_ASSERT(*rootBase == nullptr);
+
+ JS::AutoCheckCannotGC nogc;
+
+ const CharT* dependentStrChars =
+ tenuredDependentStr->nonInlineChars<CharT>(nogc);
+
+ // Traverse the dependent string nursery base chain to find the base that
+ // it's using chars from.
+ while (true) {
+ if (baseOrRelocOverlay->isForwarded()) {
+ JSLinearString* tenuredBase = Forwarded(baseOrRelocOverlay);
+ StringRelocationOverlay* relocOverlay =
+ StringRelocationOverlay::fromCell(baseOrRelocOverlay);
+
+ if (!tenuredBase->hasBase()) {
+ // The nursery root base is relocOverlay, it is tenured to tenuredBase.
+ // Relocate tenuredDependentStr chars and reassign the tenured root base
+ // as its base.
+ JSLinearString* tenuredRootBase = tenuredBase;
+ const CharT* rootBaseChars = relocOverlay->savedNurseryChars<CharT>();
+ *offset = dependentStrChars - rootBaseChars;
+ MOZ_ASSERT(*offset < tenuredRootBase->length());
+ tenuredDependentStr->relocateNonInlineChars<const CharT*>(
+ tenuredRootBase->nonInlineChars<CharT>(nogc), *offset);
+ tenuredDependentStr->setBase(tenuredRootBase);
+ return;
+ }
+
+ baseOrRelocOverlay = relocOverlay->savedNurseryBaseOrRelocOverlay();
+
+ } else {
+ JSLinearString* base = baseOrRelocOverlay;
+
+ if (!base->hasBase()) {
+ // The root base is not forwarded yet, it is simply base.
+ *rootBase = base;
+
+ // The root base can be in either the nursery or the tenured heap.
+ // dependentStr chars needs to be relocated after traceString if the
+ // root base is in the nursery.
+ if (!(*rootBase)->isTenured()) {
+ *rootBaseNotYetForwarded = true;
+ const CharT* rootBaseChars = (*rootBase)->nonInlineChars<CharT>(nogc);
+ *offset = dependentStrChars - rootBaseChars;
+ MOZ_ASSERT(*offset < base->length(), "Tenured root base");
+ }
+
+ tenuredDependentStr->setBase(*rootBase);
+
+ return;
+ }
+
+ baseOrRelocOverlay = base->nurseryBaseOrRelocOverlay();
+ }
+ }
+}
+
+JS::BigInt* js::gc::TenuringTracer::moveToTenured(JS::BigInt* src) {
+ MOZ_ASSERT(IsInsideNursery(src));
+
+ AllocKind dstKind = src->getAllocKind();
+ Zone* zone = src->nurseryZone();
+ zone->tenuredBigInts++;
+
+ JS::BigInt* dst = allocTenured<JS::BigInt>(zone, dstKind);
+ tenuredSize += moveBigIntToTenured(dst, src, dstKind);
+ tenuredCells++;
+
+ RelocationOverlay::forwardCell(src, dst);
+
+ gcprobes::PromoteToTenured(src, dst);
+ return dst;
+}
+
+void js::gc::TenuringTracer::collectToObjectFixedPoint() {
+ while (RelocationOverlay* p = objHead) {
+ objHead = objHead->next();
+ auto* obj = static_cast<JSObject*>(p->forwardingAddress());
+ traceObject(obj);
+ }
+}
+
+void js::gc::TenuringTracer::collectToStringFixedPoint() {
+ while (StringRelocationOverlay* p = stringHead) {
+ stringHead = stringHead->next();
+
+ auto* tenuredStr = static_cast<JSString*>(p->forwardingAddress());
+ // To ensure the NON_DEDUP_BIT was reset properly.
+ MOZ_ASSERT(tenuredStr->isDeduplicatable());
+
+ // The nursery root base might not be forwarded before
+ // traceString(tenuredStr). traceString(tenuredStr) will forward the root
+ // base if that's the case. Dependent string chars needs to be relocated
+ // after traceString if root base was not forwarded.
+ size_t offset = 0;
+ bool rootBaseNotYetForwarded = false;
+ JSLinearString* rootBase = nullptr;
+
+ if (tenuredStr->isDependent()) {
+ if (tenuredStr->hasTwoByteChars()) {
+ relocateDependentStringChars<char16_t>(
+ &tenuredStr->asDependent(), p->savedNurseryBaseOrRelocOverlay(),
+ &offset, &rootBaseNotYetForwarded, &rootBase);
+ } else {
+ relocateDependentStringChars<JS::Latin1Char>(
+ &tenuredStr->asDependent(), p->savedNurseryBaseOrRelocOverlay(),
+ &offset, &rootBaseNotYetForwarded, &rootBase);
+ }
+ }
+
+ traceString(tenuredStr);
+
+ if (rootBaseNotYetForwarded) {
+ MOZ_ASSERT(rootBase->isForwarded(),
+ "traceString() should make it forwarded");
+ JS::AutoCheckCannotGC nogc;
+
+ JSLinearString* tenuredRootBase = Forwarded(rootBase);
+ MOZ_ASSERT(offset < tenuredRootBase->length());
+
+ if (tenuredStr->hasTwoByteChars()) {
+ tenuredStr->asDependent().relocateNonInlineChars<const char16_t*>(
+ tenuredRootBase->twoByteChars(nogc), offset);
+ } else {
+ tenuredStr->asDependent().relocateNonInlineChars<const JS::Latin1Char*>(
+ tenuredRootBase->latin1Chars(nogc), offset);
+ }
+ tenuredStr->setBase(tenuredRootBase);
+ }
+ }
+}
+
+size_t js::gc::TenuringTracer::moveStringToTenured(JSString* dst, JSString* src,
+ AllocKind dstKind) {
+ size_t size = Arena::thingSize(dstKind);
+
+ // At the moment, strings always have the same AllocKind between src and
+ // dst. This may change in the future.
+ MOZ_ASSERT(dst->asTenured().getAllocKind() == src->getAllocKind());
+
+ // Copy the Cell contents.
+ MOZ_ASSERT(OffsetToChunkEnd(src) >= ptrdiff_t(size));
+ js_memcpy(dst, src, size);
+
+ if (src->ownsMallocedChars()) {
+ void* chars = src->asLinear().nonInlineCharsRaw();
+ nursery().removeMallocedBufferDuringMinorGC(chars);
+ AddCellMemory(dst, dst->asLinear().allocSize(), MemoryUse::StringContents);
+ }
+
+ return size;
+}
+
+size_t js::gc::TenuringTracer::moveBigIntToTenured(JS::BigInt* dst,
+ JS::BigInt* src,
+ AllocKind dstKind) {
+ size_t size = Arena::thingSize(dstKind);
+
+ // At the moment, BigInts always have the same AllocKind between src and
+ // dst. This may change in the future.
+ MOZ_ASSERT(dst->asTenured().getAllocKind() == src->getAllocKind());
+
+ // Copy the Cell contents.
+ MOZ_ASSERT(OffsetToChunkEnd(src) >= ptrdiff_t(size));
+ js_memcpy(dst, src, size);
+
+ MOZ_ASSERT(dst->zone() == src->nurseryZone());
+
+ if (src->hasHeapDigits()) {
+ size_t length = dst->digitLength();
+ if (!nursery().isInside(src->heapDigits_)) {
+ nursery().removeMallocedBufferDuringMinorGC(src->heapDigits_);
+ } else {
+ Zone* zone = src->nurseryZone();
+ {
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ dst->heapDigits_ = zone->pod_malloc<JS::BigInt::Digit>(length);
+ if (!dst->heapDigits_) {
+ oomUnsafe.crash(sizeof(JS::BigInt::Digit) * length,
+ "Failed to allocate digits while tenuring.");
+ }
+ }
+
+ PodCopy(dst->heapDigits_, src->heapDigits_, length);
+ nursery().setDirectForwardingPointer(src->heapDigits_, dst->heapDigits_);
+
+ size += length * sizeof(JS::BigInt::Digit);
+ }
+
+ AddCellMemory(dst, length * sizeof(JS::BigInt::Digit),
+ MemoryUse::BigIntDigits);
+ }
+
+ return size;
+}
+
+MinorSweepingTracer::MinorSweepingTracer(JSRuntime* rt)
+ : GenericTracerImpl(rt, JS::TracerKind::MinorSweeping,
+ JS::WeakMapTraceAction::TraceKeysAndValues) {
+ MOZ_ASSERT(CurrentThreadCanAccessRuntime(runtime()));
+ MOZ_ASSERT(JS::RuntimeHeapIsMinorCollecting());
+}
+
+template <typename T>
+inline void MinorSweepingTracer::onEdge(T** thingp, const char* name) {
+ T* thing = *thingp;
+ if (thing->isTenured()) {
+ return;
+ }
+
+ if (IsForwarded(thing)) {
+ *thingp = Forwarded(thing);
+ return;
+ }
+
+ *thingp = nullptr;
+}
diff --git a/js/src/gc/Tenuring.h b/js/src/gc/Tenuring.h
new file mode 100644
index 0000000000..34ef950e1d
--- /dev/null
+++ b/js/src/gc/Tenuring.h
@@ -0,0 +1,169 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef gc_Tenuring_h
+#define gc_Tenuring_h
+
+#include "mozilla/Maybe.h"
+
+#include "gc/AllocKind.h"
+#include "js/GCAPI.h"
+#include "js/TracingAPI.h"
+#include "util/Text.h"
+
+namespace js {
+
+class NativeObject;
+class Nursery;
+class PlainObject;
+
+namespace gc {
+
+class RelocationOverlay;
+class StringRelocationOverlay;
+
+template <typename Key>
+struct DeduplicationStringHasher {
+ using Lookup = Key;
+
+ static inline HashNumber hash(const Lookup& lookup) {
+ JS::AutoCheckCannotGC nogc;
+ HashNumber strHash;
+
+ // Include flags in the hash. A string relocation overlay stores either
+ // the nursery root base chars or the dependent string nursery base, but
+ // does not indicate which one. If strings with different string types
+ // were deduplicated, for example, a dependent string gets deduplicated
+ // into an extensible string, the base chain would be broken and the root
+ // base would be unreachable.
+
+ if (lookup->asLinear().hasLatin1Chars()) {
+ strHash = mozilla::HashString(lookup->asLinear().latin1Chars(nogc),
+ lookup->length());
+ } else {
+ MOZ_ASSERT(lookup->asLinear().hasTwoByteChars());
+ strHash = mozilla::HashString(lookup->asLinear().twoByteChars(nogc),
+ lookup->length());
+ }
+
+ return mozilla::HashGeneric(strHash, lookup->zone(), lookup->flags());
+ }
+
+ static MOZ_ALWAYS_INLINE bool match(const Key& key, const Lookup& lookup) {
+ if (!key->sameLengthAndFlags(*lookup) ||
+ key->asTenured().zone() != lookup->zone() ||
+ key->asTenured().getAllocKind() != lookup->getAllocKind()) {
+ return false;
+ }
+
+ JS::AutoCheckCannotGC nogc;
+
+ if (key->asLinear().hasLatin1Chars()) {
+ MOZ_ASSERT(lookup->asLinear().hasLatin1Chars());
+ return EqualChars(key->asLinear().latin1Chars(nogc),
+ lookup->asLinear().latin1Chars(nogc), lookup->length());
+ } else {
+ MOZ_ASSERT(key->asLinear().hasTwoByteChars());
+ MOZ_ASSERT(lookup->asLinear().hasTwoByteChars());
+ return EqualChars(key->asLinear().twoByteChars(nogc),
+ lookup->asLinear().twoByteChars(nogc),
+ lookup->length());
+ }
+ }
+};
+
+class TenuringTracer final : public JSTracer {
+ Nursery& nursery_;
+
+ // Amount of data moved to the tenured generation during collection.
+ size_t tenuredSize = 0;
+ // Number of cells moved to the tenured generation.
+ size_t tenuredCells = 0;
+
+ // These lists are threaded through the Nursery using the space from
+ // already moved things. The lists are used to fix up the moved things and
+ // to find things held live by intra-Nursery pointers.
+ gc::RelocationOverlay* objHead = nullptr;
+ gc::StringRelocationOverlay* stringHead = nullptr;
+
+ using StringDeDupSet =
+ HashSet<JSString*, DeduplicationStringHasher<JSString*>,
+ SystemAllocPolicy>;
+
+ // deDupSet is emplaced at the beginning of the nursery collection and reset
+ // at the end of the nursery collection. It can also be reset during nursery
+ // collection when out of memory to insert new entries.
+ mozilla::Maybe<StringDeDupSet> stringDeDupSet;
+
+#define DEFINE_ON_EDGE_METHOD(name, type, _1, _2) \
+ void on##name##Edge(type** thingp, const char* name) override;
+ JS_FOR_EACH_TRACEKIND(DEFINE_ON_EDGE_METHOD)
+#undef DEFINE_ON_EDGE_METHOD
+
+ public:
+ TenuringTracer(JSRuntime* rt, Nursery* nursery);
+
+ Nursery& nursery() { return nursery_; }
+
+ // Move all objects and everything they can reach to the tenured heap. Called
+ // after all roots have been traced.
+ void collectToObjectFixedPoint();
+
+ // Move all strings and all strings they can reach to the tenured heap, and
+ // additionally do any fixups for when strings are pointing into memory that
+ // was deduplicated. Called after collectToObjectFixedPoint().
+ void collectToStringFixedPoint();
+
+ size_t getTenuredSize() const;
+ size_t getTenuredCells() const;
+
+ void traverse(JS::Value* thingp);
+
+ // The store buffers need to be able to call these directly.
+ void traceObject(JSObject* src);
+ void traceObjectSlots(NativeObject* nobj, uint32_t start, uint32_t end);
+ void traceSlots(JS::Value* vp, uint32_t nslots);
+ void traceString(JSString* src);
+ void traceBigInt(JS::BigInt* src);
+
+ private:
+ // The dependent string chars needs to be relocated if the base which it's
+ // using chars from has been deduplicated.
+ template <typename CharT>
+ void relocateDependentStringChars(JSDependentString* tenuredDependentStr,
+ JSLinearString* baseOrRelocOverlay,
+ size_t* offset,
+ bool* rootBaseNotYetForwarded,
+ JSLinearString** rootBase);
+
+ inline void insertIntoObjectFixupList(gc::RelocationOverlay* entry);
+ inline void insertIntoStringFixupList(gc::StringRelocationOverlay* entry);
+
+ template <typename T>
+ inline T* allocTenured(JS::Zone* zone, gc::AllocKind kind);
+ JSString* allocTenuredString(JSString* src, JS::Zone* zone,
+ gc::AllocKind dstKind);
+
+ inline JSObject* movePlainObjectToTenured(PlainObject* src);
+ JSObject* moveToTenuredSlow(JSObject* src);
+ JSString* moveToTenured(JSString* src);
+ JS::BigInt* moveToTenured(JS::BigInt* src);
+
+ size_t moveElementsToTenured(NativeObject* dst, NativeObject* src,
+ gc::AllocKind dstKind);
+ size_t moveSlotsToTenured(NativeObject* dst, NativeObject* src);
+ size_t moveStringToTenured(JSString* dst, JSString* src,
+ gc::AllocKind dstKind);
+ size_t moveBigIntToTenured(JS::BigInt* dst, JS::BigInt* src,
+ gc::AllocKind dstKind);
+
+ void traceSlots(JS::Value* vp, JS::Value* end);
+};
+
+} // namespace gc
+} // namespace js
+
+#endif // gc_Tenuring_h
diff --git a/js/src/gc/TraceKind.h b/js/src/gc/TraceKind.h
new file mode 100644
index 0000000000..fdcb162239
--- /dev/null
+++ b/js/src/gc/TraceKind.h
@@ -0,0 +1,59 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef gc_TraceKind_h
+#define gc_TraceKind_h
+
+#include "js/TraceKind.h"
+
+namespace js {
+namespace gc {
+
+// Map from all trace kinds to the base GC type.
+template <JS::TraceKind kind>
+struct MapTraceKindToType {};
+
+#define DEFINE_TRACE_KIND_MAP(name, type, _, _1) \
+ template <> \
+ struct MapTraceKindToType<JS::TraceKind::name> { \
+ using Type = type; \
+ };
+JS_FOR_EACH_TRACEKIND(DEFINE_TRACE_KIND_MAP);
+#undef DEFINE_TRACE_KIND_MAP
+
+// Map from a possibly-derived type to the base GC type.
+template <typename T>
+struct BaseGCType {
+ using type =
+ typename MapTraceKindToType<JS::MapTypeToTraceKind<T>::kind>::Type;
+ static_assert(std::is_base_of_v<type, T>, "Failed to find base type");
+};
+
+template <typename T>
+struct TraceKindCanBeGray {};
+#define EXPAND_TRACEKIND_DEF(_, type, canBeGray, _1) \
+ template <> \
+ struct TraceKindCanBeGray<type> { \
+ static constexpr bool value = canBeGray; \
+ };
+JS_FOR_EACH_TRACEKIND(EXPAND_TRACEKIND_DEF)
+#undef EXPAND_TRACEKIND_DEF
+
+struct TraceKindCanBeGrayFunctor {
+ template <typename T>
+ bool operator()() {
+ return TraceKindCanBeGray<T>::value;
+ }
+};
+
+static inline bool TraceKindCanBeMarkedGray(JS::TraceKind kind) {
+ return DispatchTraceKindTyped(TraceKindCanBeGrayFunctor(), kind);
+}
+
+} /* namespace gc */
+} /* namespace js */
+
+#endif /* gc_TraceKind_h */
diff --git a/js/src/gc/TraceMethods-inl.h b/js/src/gc/TraceMethods-inl.h
new file mode 100644
index 0000000000..67d4142e0a
--- /dev/null
+++ b/js/src/gc/TraceMethods-inl.h
@@ -0,0 +1,384 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * Trace methods for all GC things, defined in a separate header to allow
+ * inlining.
+ *
+ * This also includes eager inline marking versions. Both paths must end up
+ * traversing equivalent subgraphs.
+ */
+
+#ifndef gc_TraceMethods_inl_h
+#define gc_TraceMethods_inl_h
+
+#include "gc/GCMarker.h"
+#include "gc/Tracer.h"
+#include "jit/JitCode.h"
+#include "vm/BigIntType.h"
+#include "vm/GetterSetter.h"
+#include "vm/GlobalObject.h"
+#include "vm/JSScript.h"
+#include "vm/PropMap.h"
+#include "vm/Realm.h"
+#include "vm/Scope.h"
+#include "vm/Shape.h"
+#include "vm/StringType.h"
+#include "vm/SymbolType.h"
+#include "wasm/WasmJS.h"
+
+inline void js::BaseScript::traceChildren(JSTracer* trc) {
+ TraceNullableEdge(trc, &function_, "function");
+ TraceEdge(trc, &sourceObject_, "sourceObject");
+
+ warmUpData_.trace(trc);
+
+ if (data_) {
+ data_->trace(trc);
+ }
+}
+
+inline void js::Shape::traceChildren(JSTracer* trc) {
+ TraceCellHeaderEdge(trc, this, "base");
+ if (isNative()) {
+ asNative().traceChildren(trc);
+ }
+}
+
+inline void js::NativeShape::traceChildren(JSTracer* trc) {
+ TraceNullableEdge(trc, &propMap_, "propertymap");
+}
+
+template <uint32_t opts>
+void js::GCMarker::eagerlyMarkChildren(Shape* shape) {
+ MOZ_ASSERT(shape->isMarked(markColor()));
+
+ BaseShape* base = shape->base();
+ checkTraversedEdge(shape, base);
+ if (mark<opts>(base)) {
+ base->traceChildren(tracer());
+ }
+
+ if (shape->isNative()) {
+ if (PropMap* map = shape->asNative().propMap()) {
+ markAndTraverseEdge<opts>(shape, map);
+ }
+ }
+}
+
+inline void JSString::traceChildren(JSTracer* trc) {
+ if (hasBase()) {
+ traceBase(trc);
+ } else if (isRope()) {
+ asRope().traceChildren(trc);
+ }
+}
+template <uint32_t opts>
+void js::GCMarker::eagerlyMarkChildren(JSString* str) {
+ if (str->isLinear()) {
+ eagerlyMarkChildren<opts>(&str->asLinear());
+ } else {
+ eagerlyMarkChildren<opts>(&str->asRope());
+ }
+}
+
+inline void JSString::traceBase(JSTracer* trc) {
+ MOZ_ASSERT(hasBase());
+ js::TraceManuallyBarrieredEdge(trc, &d.s.u3.base, "base");
+}
+template <uint32_t opts>
+void js::GCMarker::eagerlyMarkChildren(JSLinearString* linearStr) {
+ gc::AssertShouldMarkInZone(this, linearStr);
+ MOZ_ASSERT(linearStr->isMarkedAny());
+ MOZ_ASSERT(linearStr->JSString::isLinear());
+
+ // Use iterative marking to avoid blowing out the stack.
+ while (linearStr->hasBase()) {
+ linearStr = linearStr->base();
+
+ // It's possible to observe a rope as the base of a linear string if we
+ // process barriers during rope flattening. See the assignment of base in
+ // JSRope::flattenInternal's finish_node section.
+ if (static_cast<JSString*>(linearStr)->isRope()) {
+ MOZ_ASSERT(!JS::RuntimeHeapIsMajorCollecting());
+ break;
+ }
+
+ MOZ_ASSERT(linearStr->JSString::isLinear());
+ MOZ_ASSERT(!linearStr->isPermanentAtom());
+ gc::AssertShouldMarkInZone(this, linearStr);
+ if (!mark<opts>(static_cast<JSString*>(linearStr))) {
+ break;
+ }
+ }
+}
+
+inline void JSRope::traceChildren(JSTracer* trc) {
+ js::TraceManuallyBarrieredEdge(trc, &d.s.u2.left, "left child");
+ js::TraceManuallyBarrieredEdge(trc, &d.s.u3.right, "right child");
+}
+template <uint32_t opts>
+void js::GCMarker::eagerlyMarkChildren(JSRope* rope) {
+ // This function tries to scan the whole rope tree using the marking stack
+ // as temporary storage. If that becomes full, the unscanned ropes are
+ // added to the delayed marking list. When the function returns, the
+ // marking stack is at the same depth as it was on entry. This way we avoid
+ // using tags when pushing ropes to the stack as ropes never leak to other
+ // users of the stack. This also assumes that a rope can only point to
+ // other ropes or linear strings, it cannot refer to GC things of other
+ // types.
+ size_t savedPos = stack.position();
+ MOZ_DIAGNOSTIC_ASSERT(rope->getTraceKind() == JS::TraceKind::String);
+ while (true) {
+ MOZ_DIAGNOSTIC_ASSERT(rope->getTraceKind() == JS::TraceKind::String);
+ MOZ_DIAGNOSTIC_ASSERT(rope->JSString::isRope());
+ gc::AssertShouldMarkInZone(this, rope);
+ MOZ_ASSERT(rope->isMarkedAny());
+ JSRope* next = nullptr;
+
+ JSString* right = rope->rightChild();
+ if (mark<opts>(right)) {
+ MOZ_ASSERT(!right->isPermanentAtom());
+ if (right->isLinear()) {
+ eagerlyMarkChildren<opts>(&right->asLinear());
+ } else {
+ next = &right->asRope();
+ }
+ }
+
+ JSString* left = rope->leftChild();
+ if (mark<opts>(left)) {
+ MOZ_ASSERT(!left->isPermanentAtom());
+ if (left->isLinear()) {
+ eagerlyMarkChildren<opts>(&left->asLinear());
+ } else {
+ // When both children are ropes, set aside the right one to
+ // scan it later.
+ if (next && !stack.pushTempRope(next)) {
+ delayMarkingChildrenOnOOM(next);
+ }
+ next = &left->asRope();
+ }
+ }
+ if (next) {
+ rope = next;
+ } else if (savedPos != stack.position()) {
+ MOZ_ASSERT(savedPos < stack.position());
+ rope = stack.popPtr().asTempRope();
+ } else {
+ break;
+ }
+ }
+ MOZ_ASSERT(savedPos == stack.position());
+}
+
+inline void JS::Symbol::traceChildren(JSTracer* trc) {
+ js::TraceNullableCellHeaderEdge(trc, this, "symbol description");
+}
+
+template <typename SlotInfo>
+void js::RuntimeScopeData<SlotInfo>::trace(JSTracer* trc) {
+ TraceBindingNames(trc, GetScopeDataTrailingNamesPointer(this), length);
+}
+
+inline void js::FunctionScope::RuntimeData::trace(JSTracer* trc) {
+ TraceNullableEdge(trc, &canonicalFunction, "scope canonical function");
+ TraceNullableBindingNames(trc, GetScopeDataTrailingNamesPointer(this),
+ length);
+}
+inline void js::ModuleScope::RuntimeData::trace(JSTracer* trc) {
+ TraceNullableEdge(trc, &module, "scope module");
+ TraceBindingNames(trc, GetScopeDataTrailingNamesPointer(this), length);
+}
+inline void js::WasmInstanceScope::RuntimeData::trace(JSTracer* trc) {
+ TraceNullableEdge(trc, &instance, "wasm instance");
+ TraceBindingNames(trc, GetScopeDataTrailingNamesPointer(this), length);
+}
+
+inline void js::Scope::traceChildren(JSTracer* trc) {
+ TraceNullableEdge(trc, &environmentShape_, "scope env shape");
+ TraceNullableEdge(trc, &enclosingScope_, "scope enclosing");
+ applyScopeDataTyped([trc](auto data) { data->trace(trc); });
+}
+
+template <uint32_t opts>
+void js::GCMarker::eagerlyMarkChildren(Scope* scope) {
+ do {
+ if (Shape* shape = scope->environmentShape()) {
+ markAndTraverseEdge<opts>(scope, shape);
+ }
+ mozilla::Span<AbstractBindingName<JSAtom>> names;
+ switch (scope->kind()) {
+ case ScopeKind::Function: {
+ FunctionScope::RuntimeData& data = scope->as<FunctionScope>().data();
+ if (data.canonicalFunction) {
+ markAndTraverseObjectEdge<opts>(scope, data.canonicalFunction);
+ }
+ names = GetScopeDataTrailingNames(&data);
+ break;
+ }
+
+ case ScopeKind::FunctionBodyVar: {
+ VarScope::RuntimeData& data = scope->as<VarScope>().data();
+ names = GetScopeDataTrailingNames(&data);
+ break;
+ }
+
+ case ScopeKind::Lexical:
+ case ScopeKind::SimpleCatch:
+ case ScopeKind::Catch:
+ case ScopeKind::NamedLambda:
+ case ScopeKind::StrictNamedLambda:
+ case ScopeKind::FunctionLexical: {
+ LexicalScope::RuntimeData& data = scope->as<LexicalScope>().data();
+ names = GetScopeDataTrailingNames(&data);
+ break;
+ }
+
+ case ScopeKind::ClassBody: {
+ ClassBodyScope::RuntimeData& data = scope->as<ClassBodyScope>().data();
+ names = GetScopeDataTrailingNames(&data);
+ break;
+ }
+
+ case ScopeKind::Global:
+ case ScopeKind::NonSyntactic: {
+ GlobalScope::RuntimeData& data = scope->as<GlobalScope>().data();
+ names = GetScopeDataTrailingNames(&data);
+ break;
+ }
+
+ case ScopeKind::Eval:
+ case ScopeKind::StrictEval: {
+ EvalScope::RuntimeData& data = scope->as<EvalScope>().data();
+ names = GetScopeDataTrailingNames(&data);
+ break;
+ }
+
+ case ScopeKind::Module: {
+ ModuleScope::RuntimeData& data = scope->as<ModuleScope>().data();
+ if (data.module) {
+ markAndTraverseObjectEdge<opts>(scope, data.module);
+ }
+ names = GetScopeDataTrailingNames(&data);
+ break;
+ }
+
+ case ScopeKind::With:
+ break;
+
+ case ScopeKind::WasmInstance: {
+ WasmInstanceScope::RuntimeData& data =
+ scope->as<WasmInstanceScope>().data();
+ markAndTraverseObjectEdge<opts>(scope, data.instance);
+ names = GetScopeDataTrailingNames(&data);
+ break;
+ }
+
+ case ScopeKind::WasmFunction: {
+ WasmFunctionScope::RuntimeData& data =
+ scope->as<WasmFunctionScope>().data();
+ names = GetScopeDataTrailingNames(&data);
+ break;
+ }
+ }
+ if (scope->kind_ == ScopeKind::Function) {
+ for (auto& binding : names) {
+ if (JSAtom* name = binding.name()) {
+ markAndTraverseStringEdge<opts>(scope, name);
+ }
+ }
+ } else {
+ for (auto& binding : names) {
+ markAndTraverseStringEdge<opts>(scope, binding.name());
+ }
+ }
+ scope = scope->enclosing();
+ } while (scope && mark<opts>(scope));
+}
+
+inline void js::BaseShape::traceChildren(JSTracer* trc) {
+ // Note: the realm's global can be nullptr if we GC while creating the global.
+ if (JSObject* global = realm()->unsafeUnbarrieredMaybeGlobal()) {
+ TraceManuallyBarrieredEdge(trc, &global, "baseshape_global");
+ }
+
+ if (proto_.isObject()) {
+ TraceEdge(trc, &proto_, "baseshape_proto");
+ }
+}
+
+inline void js::GetterSetter::traceChildren(JSTracer* trc) {
+ if (getter()) {
+ TraceCellHeaderEdge(trc, this, "gettersetter_getter");
+ }
+ if (setter()) {
+ TraceEdge(trc, &setter_, "gettersetter_setter");
+ }
+}
+
+inline void js::PropMap::traceChildren(JSTracer* trc) {
+ if (hasPrevious()) {
+ TraceEdge(trc, &asLinked()->data_.previous, "propmap_previous");
+ }
+
+ if (isShared()) {
+ SharedPropMap::TreeData& treeData = asShared()->treeDataRef();
+ if (SharedPropMap* parent = treeData.parent.maybeMap()) {
+ TraceManuallyBarrieredEdge(trc, &parent, "propmap_parent");
+ if (parent != treeData.parent.map()) {
+ treeData.setParent(parent, treeData.parent.index());
+ }
+ }
+ }
+
+ for (uint32_t i = 0; i < PropMap::Capacity; i++) {
+ if (hasKey(i)) {
+ TraceEdge(trc, &keys_[i], "propmap_key");
+ }
+ }
+
+ if (canHaveTable() && asLinked()->hasTable()) {
+ asLinked()->data_.table->trace(trc);
+ }
+}
+
+template <uint32_t opts>
+void js::GCMarker::eagerlyMarkChildren(PropMap* map) {
+ MOZ_ASSERT(map->isMarkedAny());
+ do {
+ for (uint32_t i = 0; i < PropMap::Capacity; i++) {
+ if (map->hasKey(i)) {
+ markAndTraverseEdge<opts>(map, map->getKey(i));
+ }
+ }
+
+ if (map->canHaveTable()) {
+ // Special case: if a map has a table then all its pointers must point to
+ // this map or an ancestor. Since these pointers will be traced by this
+ // loop they do not need to be traced here as well.
+ MOZ_ASSERT(map->asLinked()->canSkipMarkingTable());
+ }
+
+ if (map->isDictionary()) {
+ map = map->asDictionary()->previous();
+ } else {
+ // For shared maps follow the |parent| link and not the |previous| link.
+ // They're different when a map had a branch that wasn't at the end of the
+ // map, but in this case they must have the same |previous| map. This is
+ // asserted in SharedPropMap::addChild. In other words, marking all
+ // |parent| maps will also mark all |previous| maps.
+ map = map->asShared()->treeDataRef().parent.maybeMap();
+ }
+ } while (map && mark<opts>(map));
+}
+
+inline void JS::BigInt::traceChildren(JSTracer* trc) {}
+
+// JitCode::traceChildren is not defined inline due to its dependence on
+// MacroAssembler.
+
+#endif // gc_TraceMethods_inl_h
diff --git a/js/src/gc/Tracer.cpp b/js/src/gc/Tracer.cpp
new file mode 100644
index 0000000000..d150e4d2b7
--- /dev/null
+++ b/js/src/gc/Tracer.cpp
@@ -0,0 +1,297 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "gc/Tracer.h"
+
+#include "mozilla/DebugOnly.h"
+
+#include "NamespaceImports.h"
+
+#include "gc/PublicIterators.h"
+#include "jit/JitCode.h"
+#include "util/Memory.h"
+#include "util/Text.h"
+#include "vm/BigIntType.h"
+#include "vm/JSContext.h"
+#include "vm/JSFunction.h"
+#include "vm/JSScript.h"
+#include "vm/RegExpShared.h"
+#include "vm/Scope.h"
+#include "vm/Shape.h"
+#include "vm/StringType.h"
+#include "vm/SymbolType.h"
+
+#include "gc/TraceMethods-inl.h"
+#include "vm/Shape-inl.h"
+
+using namespace js;
+using namespace js::gc;
+using mozilla::DebugOnly;
+
+template void RuntimeScopeData<LexicalScope::SlotInfo>::trace(JSTracer* trc);
+template void RuntimeScopeData<ClassBodyScope::SlotInfo>::trace(JSTracer* trc);
+template void RuntimeScopeData<VarScope::SlotInfo>::trace(JSTracer* trc);
+template void RuntimeScopeData<GlobalScope::SlotInfo>::trace(JSTracer* trc);
+template void RuntimeScopeData<EvalScope::SlotInfo>::trace(JSTracer* trc);
+template void RuntimeScopeData<WasmFunctionScope::SlotInfo>::trace(
+ JSTracer* trc);
+
+void JS::TracingContext::getEdgeName(const char* name, char* buffer,
+ size_t bufferSize) {
+ MOZ_ASSERT(bufferSize > 0);
+ if (functor_) {
+ (*functor_)(this, buffer, bufferSize);
+ return;
+ }
+ if (index_ != InvalidIndex) {
+ snprintf(buffer, bufferSize, "%s[%zu]", name, index_);
+ return;
+ }
+ snprintf(buffer, bufferSize, "%s", name);
+}
+
+/*** Public Tracing API *****************************************************/
+
+JS_PUBLIC_API void JS::TraceChildren(JSTracer* trc, GCCellPtr thing) {
+ ApplyGCThingTyped(thing.asCell(), thing.kind(), [trc](auto t) {
+ MOZ_ASSERT_IF(t->runtimeFromAnyThread() != trc->runtime(),
+ t->isPermanentAndMayBeShared());
+ t->traceChildren(trc);
+ });
+}
+
+void js::gc::TraceIncomingCCWs(JSTracer* trc,
+ const JS::CompartmentSet& compartments) {
+ for (CompartmentsIter source(trc->runtime()); !source.done(); source.next()) {
+ if (compartments.has(source)) {
+ continue;
+ }
+ // Iterate over all compartments that |source| has wrappers for.
+ for (Compartment::WrappedObjectCompartmentEnum dest(source); !dest.empty();
+ dest.popFront()) {
+ if (!compartments.has(dest)) {
+ continue;
+ }
+ // Iterate over all wrappers from |source| to |dest| compartments.
+ for (Compartment::ObjectWrapperEnum e(source, dest); !e.empty();
+ e.popFront()) {
+ JSObject* obj = e.front().key();
+ MOZ_ASSERT(compartments.has(obj->compartment()));
+ mozilla::DebugOnly<JSObject*> prior = obj;
+ TraceManuallyBarrieredEdge(trc, &obj,
+ "cross-compartment wrapper target");
+ MOZ_ASSERT(obj == prior);
+ }
+ }
+ }
+}
+
+/*** Cycle Collector Helpers ************************************************/
+
+// This function is used by the Cycle Collector (CC) to trace through -- or in
+// CC parlance, traverse -- a Shape. The CC does not care about Shapes,
+// BaseShapes or PropMaps, only the JSObjects held live by them. Thus, we only
+// report non-Shape things.
+void gc::TraceCycleCollectorChildren(JS::CallbackTracer* trc, Shape* shape) {
+ shape->base()->traceChildren(trc);
+ // Don't trace the PropMap because the CC doesn't care about PropertyKey.
+}
+
+/*** Traced Edge Printer ****************************************************/
+
+static size_t CountDecimalDigits(size_t num) {
+ size_t numDigits = 0;
+ do {
+ num /= 10;
+ numDigits++;
+ } while (num > 0);
+
+ return numDigits;
+}
+
+static const char* StringKindHeader(JSString* str) {
+ MOZ_ASSERT(str->isLinear());
+
+ if (str->isAtom()) {
+ if (str->isPermanentAtom()) {
+ return "permanent atom: ";
+ }
+ return "atom: ";
+ }
+
+ if (str->isExtensible()) {
+ return "extensible: ";
+ }
+
+ if (str->isInline()) {
+ if (str->isFatInline()) {
+ return "fat inline: ";
+ }
+ return "inline: ";
+ }
+
+ if (str->isDependent()) {
+ return "dependent: ";
+ }
+
+ if (str->isExternal()) {
+ return "external: ";
+ }
+
+ return "linear: ";
+}
+
+void js::gc::GetTraceThingInfo(char* buf, size_t bufsize, void* thing,
+ JS::TraceKind kind, bool details) {
+ const char* name = nullptr; /* silence uninitialized warning */
+ size_t n;
+
+ if (bufsize == 0) {
+ return;
+ }
+
+ switch (kind) {
+ case JS::TraceKind::BaseShape:
+ name = "base_shape";
+ break;
+
+ case JS::TraceKind::GetterSetter:
+ name = "getter_setter";
+ break;
+
+ case JS::TraceKind::PropMap:
+ name = "prop_map";
+ break;
+
+ case JS::TraceKind::JitCode:
+ name = "jitcode";
+ break;
+
+ case JS::TraceKind::Null:
+ name = "null_pointer";
+ break;
+
+ case JS::TraceKind::Object: {
+ name = static_cast<JSObject*>(thing)->getClass()->name;
+ break;
+ }
+
+ case JS::TraceKind::RegExpShared:
+ name = "reg_exp_shared";
+ break;
+
+ case JS::TraceKind::Scope:
+ name = "scope";
+ break;
+
+ case JS::TraceKind::Script:
+ name = "script";
+ break;
+
+ case JS::TraceKind::Shape:
+ name = "shape";
+ break;
+
+ case JS::TraceKind::String:
+ name = ((JSString*)thing)->isDependent() ? "substring" : "string";
+ break;
+
+ case JS::TraceKind::Symbol:
+ name = "symbol";
+ break;
+
+ case JS::TraceKind::BigInt:
+ name = "BigInt";
+ break;
+
+ default:
+ name = "INVALID";
+ break;
+ }
+
+ n = strlen(name);
+ if (n > bufsize - 1) {
+ n = bufsize - 1;
+ }
+ js_memcpy(buf, name, n + 1);
+ buf += n;
+ bufsize -= n;
+ *buf = '\0';
+
+ if (details && bufsize > 2) {
+ switch (kind) {
+ case JS::TraceKind::Object: {
+ JSObject* obj = (JSObject*)thing;
+ if (obj->is<JSFunction>()) {
+ JSFunction* fun = &obj->as<JSFunction>();
+ if (fun->displayAtom()) {
+ *buf++ = ' ';
+ bufsize--;
+ PutEscapedString(buf, bufsize, fun->displayAtom(), 0);
+ }
+ } else {
+ snprintf(buf, bufsize, " <unknown object>");
+ }
+ break;
+ }
+
+ case JS::TraceKind::Script: {
+ auto* script = static_cast<js::BaseScript*>(thing);
+ snprintf(buf, bufsize, " %s:%u", script->filename(), script->lineno());
+ break;
+ }
+
+ case JS::TraceKind::String: {
+ *buf++ = ' ';
+ bufsize--;
+ JSString* str = (JSString*)thing;
+
+ if (str->isLinear()) {
+ const char* header = StringKindHeader(str);
+ bool willFit = str->length() + strlen("<length > ") + strlen(header) +
+ CountDecimalDigits(str->length()) <
+ bufsize;
+
+ n = snprintf(buf, bufsize, "<%slength %zu%s> ", header, str->length(),
+ willFit ? "" : " (truncated)");
+ buf += n;
+ bufsize -= n;
+
+ PutEscapedString(buf, bufsize, &str->asLinear(), 0);
+ } else {
+ snprintf(buf, bufsize, "<rope: length %zu>", str->length());
+ }
+ break;
+ }
+
+ case JS::TraceKind::Symbol: {
+ *buf++ = ' ';
+ bufsize--;
+ auto* sym = static_cast<JS::Symbol*>(thing);
+ if (JSAtom* desc = sym->description()) {
+ PutEscapedString(buf, bufsize, desc, 0);
+ } else {
+ snprintf(buf, bufsize, "<null>");
+ }
+ break;
+ }
+
+ case JS::TraceKind::Scope: {
+ auto* scope = static_cast<js::Scope*>(thing);
+ snprintf(buf, bufsize, " %s", js::ScopeKindString(scope->kind()));
+ break;
+ }
+
+ default:
+ break;
+ }
+ }
+ buf[bufsize - 1] = '\0';
+}
+
+JS::CallbackTracer::CallbackTracer(JSContext* cx, JS::TracerKind kind,
+ JS::TraceOptions options)
+ : CallbackTracer(cx->runtime(), kind, options) {}
diff --git a/js/src/gc/Tracer.h b/js/src/gc/Tracer.h
new file mode 100644
index 0000000000..e1f10980de
--- /dev/null
+++ b/js/src/gc/Tracer.h
@@ -0,0 +1,401 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef js_Tracer_h
+#define js_Tracer_h
+
+#include "gc/Barrier.h"
+#include "gc/TraceKind.h"
+#include "js/HashTable.h"
+#include "js/TracingAPI.h"
+
+namespace JS {
+using CompartmentSet =
+ js::HashSet<Compartment*, js::DefaultHasher<Compartment*>,
+ js::SystemAllocPolicy>;
+} // namespace JS
+
+namespace js {
+
+class TaggedProto;
+
+// Internal Tracing API
+//
+// Tracing is an abstract visitation of each edge in a JS heap graph.[1] The
+// most common (and performance sensitive) use of this infrastructure is for GC
+// "marking" as part of the mark-and-sweep collector; however, this
+// infrastructure is much more general than that and is used for many other
+// purposes as well.
+//
+// One commonly misunderstood subtlety of the tracing architecture is the role
+// of graph vertices versus graph edges. Graph vertices are the heap
+// allocations -- GC things -- that are returned by Allocate. Graph edges are
+// pointers -- including tagged pointers like Value and jsid -- that link the
+// allocations into a complex heap. The tracing API deals *only* with edges.
+// Any action taken on the target of a graph edge is independent of the tracing
+// itself.
+//
+// Another common misunderstanding relates to the role of the JSTracer. The
+// JSTracer instance determines what tracing does when visiting an edge; it
+// does not itself participate in the tracing process, other than to be passed
+// through as opaque data. It works like a closure in that respect.
+//
+// Tracing implementations internal to SpiderMonkey should use these interfaces
+// instead of the public interfaces in js/TracingAPI.h. Unlike the public
+// tracing methods, these work on internal types and avoid an external call.
+//
+// Note that the implementations for these methods are, surprisingly, in
+// js/src/gc/Marking.cpp. This is so that the compiler can inline as much as
+// possible in the common, marking pathways. Conceptually, however, they remain
+// as part of the generic "tracing" architecture, rather than the more specific
+// marking implementation of tracing.
+//
+// 1 - In SpiderMonkey, we call this concept tracing rather than visiting
+// because "visiting" is already used by the compiler. Also, it's been
+// called "tracing" forever and changing it would be extremely difficult at
+// this point.
+
+class GCMarker;
+
+// Debugging functions to check tracing invariants.
+#ifdef DEBUG
+template <typename T>
+void CheckTracedThing(JSTracer* trc, T* thing);
+template <typename T>
+void CheckTracedThing(JSTracer* trc, const T& thing);
+#else
+template <typename T>
+inline void CheckTracedThing(JSTracer* trc, T* thing) {}
+template <typename T>
+inline void CheckTracedThing(JSTracer* trc, const T& thing) {}
+#endif
+
+namespace gc {
+
+// Our barrier templates are parameterized on the pointer types so that we can
+// share the definitions with Value and jsid. Thus, we need to strip the
+// pointer before sending the type to BaseGCType and re-add it on the other
+// side. As such:
+template <typename T>
+struct PtrBaseGCType {
+ using type = T;
+};
+template <typename T>
+struct PtrBaseGCType<T*> {
+ using type = typename BaseGCType<T>::type*;
+};
+
+// Cast a possibly-derived T** pointer to a base class pointer.
+template <typename T>
+typename PtrBaseGCType<T>::type* ConvertToBase(T* thingp) {
+ return reinterpret_cast<typename PtrBaseGCType<T>::type*>(thingp);
+}
+
+// Internal methods to trace edges.
+
+#define DEFINE_TRACE_FUNCTION(name, type, _1, _2) \
+ MOZ_ALWAYS_INLINE bool TraceEdgeInternal(JSTracer* trc, type** thingp, \
+ const char* name) { \
+ CheckTracedThing(trc, *thingp); \
+ trc->on##name##Edge(thingp, name); \
+ return *thingp; \
+ }
+JS_FOR_EACH_TRACEKIND(DEFINE_TRACE_FUNCTION)
+#undef DEFINE_TRACE_FUNCTION
+
+bool TraceEdgeInternal(JSTracer* trc, Value* thingp, const char* name);
+bool TraceEdgeInternal(JSTracer* trc, jsid* thingp, const char* name);
+bool TraceEdgeInternal(JSTracer* trc, TaggedProto* thingp, const char* name);
+
+template <typename T>
+void TraceRangeInternal(JSTracer* trc, size_t len, T* vec, const char* name);
+template <typename T>
+bool TraceWeakMapKeyInternal(JSTracer* trc, Zone* zone, T* thingp,
+ const char* name);
+
+#ifdef DEBUG
+void AssertRootMarkingPhase(JSTracer* trc);
+void AssertShouldMarkInZone(GCMarker* marker, gc::Cell* thing);
+#else
+inline void AssertRootMarkingPhase(JSTracer* trc) {}
+inline void AssertShouldMarkInZone(GCMarker* marker, gc::Cell* thing) {}
+#endif
+
+} // namespace gc
+
+// Trace through a strong edge in the live object graph on behalf of
+// tracing. The effect of tracing the edge depends on the JSTracer being
+// used. For pointer types, |*thingp| must not be null.
+//
+// Note that weak edges are handled separately. GC things with weak edges must
+// not trace those edges during marking tracing (which would keep the referent
+// alive) but instead arrange for the edge to be swept by calling
+// js::gc::IsAboutToBeFinalized or TraceWeakEdge during sweeping.
+//
+// GC things that are weakly held in containers can use WeakMap or a container
+// wrapped in the WeakCache<> template to perform the appropriate sweeping.
+
+template <typename T>
+inline void TraceEdge(JSTracer* trc, const WriteBarriered<T>* thingp,
+ const char* name) {
+ gc::TraceEdgeInternal(trc, gc::ConvertToBase(thingp->unbarrieredAddress()),
+ name);
+}
+
+template <typename T>
+inline void TraceEdge(JSTracer* trc, WeakHeapPtr<T>* thingp, const char* name) {
+ gc::TraceEdgeInternal(trc, gc::ConvertToBase(thingp->unbarrieredAddress()),
+ name);
+}
+
+template <class BC, class T>
+inline void TraceCellHeaderEdge(JSTracer* trc,
+ gc::CellWithTenuredGCPointer<BC, T>* thingp,
+ const char* name) {
+ T* thing = thingp->headerPtr();
+ gc::TraceEdgeInternal(trc, gc::ConvertToBase(&thing), name);
+ if (thing != thingp->headerPtr()) {
+ thingp->unbarrieredSetHeaderPtr(thing);
+ }
+}
+
+template <class T>
+inline void TraceCellHeaderEdge(JSTracer* trc,
+ gc::TenuredCellWithGCPointer<T>* thingp,
+ const char* name) {
+ T* thing = thingp->headerPtr();
+ gc::TraceEdgeInternal(trc, gc::ConvertToBase(&thing), name);
+ if (thing != thingp->headerPtr()) {
+ thingp->unbarrieredSetHeaderPtr(thing);
+ }
+}
+
+// Trace through a possibly-null edge in the live object graph on behalf of
+// tracing.
+
+template <typename T>
+inline void TraceNullableEdge(JSTracer* trc, const WriteBarriered<T>* thingp,
+ const char* name) {
+ if (InternalBarrierMethods<T>::isMarkable(thingp->get())) {
+ TraceEdge(trc, thingp, name);
+ }
+}
+
+template <typename T>
+inline void TraceNullableEdge(JSTracer* trc, WeakHeapPtr<T>* thingp,
+ const char* name) {
+ if (InternalBarrierMethods<T>::isMarkable(thingp->unbarrieredGet())) {
+ TraceEdge(trc, thingp, name);
+ }
+}
+
+template <class BC, class T>
+inline void TraceNullableCellHeaderEdge(
+ JSTracer* trc, gc::CellWithTenuredGCPointer<BC, T>* thingp,
+ const char* name) {
+ T* thing = thingp->headerPtr();
+ if (thing) {
+ gc::TraceEdgeInternal(trc, gc::ConvertToBase(&thing), name);
+ if (thing != thingp->headerPtr()) {
+ thingp->unbarrieredSetHeaderPtr(thing);
+ }
+ }
+}
+
+// Trace through a "root" edge. These edges are the initial edges in the object
+// graph traversal. Root edges are asserted to only be traversed in the initial
+// phase of a GC.
+
+template <typename T>
+inline void TraceRoot(JSTracer* trc, T* thingp, const char* name) {
+ gc::AssertRootMarkingPhase(trc);
+ gc::TraceEdgeInternal(trc, gc::ConvertToBase(thingp), name);
+}
+
+template <typename T>
+inline void TraceRoot(JSTracer* trc, const HeapPtr<T>* thingp,
+ const char* name) {
+ TraceRoot(trc, thingp->unbarrieredAddress(), name);
+}
+
+// Idential to TraceRoot, except that this variant will not crash if |*thingp|
+// is null.
+
+template <typename T>
+inline void TraceNullableRoot(JSTracer* trc, T* thingp, const char* name) {
+ gc::AssertRootMarkingPhase(trc);
+ if (InternalBarrierMethods<T>::isMarkable(*thingp)) {
+ gc::TraceEdgeInternal(trc, gc::ConvertToBase(thingp), name);
+ }
+}
+
+template <typename T>
+inline void TraceNullableRoot(JSTracer* trc, WeakHeapPtr<T>* thingp,
+ const char* name) {
+ TraceNullableRoot(trc, thingp->unbarrieredAddress(), name);
+}
+
+// Like TraceEdge, but for edges that do not use one of the automatic barrier
+// classes and, thus, must be treated specially for moving GC. This method is
+// separate from TraceEdge to make accidental use of such edges more obvious.
+
+template <typename T>
+inline void TraceManuallyBarrieredEdge(JSTracer* trc, T* thingp,
+ const char* name) {
+ gc::TraceEdgeInternal(trc, gc::ConvertToBase(thingp), name);
+}
+
+// Trace through a weak edge. If *thingp is not marked at the end of marking,
+// it is replaced by nullptr, and this method will return false to indicate that
+// the edge no longer exists.
+template <typename T>
+inline bool TraceManuallyBarrieredWeakEdge(JSTracer* trc, T* thingp,
+ const char* name) {
+ return gc::TraceEdgeInternal(trc, gc::ConvertToBase(thingp), name);
+}
+
+// The result of tracing a weak edge, which can be either:
+//
+// - the target is dead (and the edge has been cleared), or
+// - the target is alive (and the edge may have been updated)
+//
+// This includes the initial and final values of the edge to allow cleanup if
+// the target is dead or access to the referent if it is alive.
+template <typename T>
+struct TraceWeakResult {
+ const bool live_;
+ const T initial_;
+ const T final_;
+
+ bool isLive() const { return live_; }
+ bool isDead() const { return !live_; }
+
+ MOZ_IMPLICIT operator bool() const { return isLive(); }
+
+ T initialTarget() const {
+ MOZ_ASSERT(isDead());
+ return initial_;
+ }
+
+ T finalTarget() const {
+ MOZ_ASSERT(isLive());
+ return final_;
+ }
+};
+
+template <typename T>
+inline TraceWeakResult<T> TraceWeakEdge(JSTracer* trc, BarrieredBase<T>* thingp,
+ const char* name) {
+ T* addr = thingp->unbarrieredAddress();
+ T initial = *addr;
+ bool live = !InternalBarrierMethods<T>::isMarkable(initial) ||
+ gc::TraceEdgeInternal(trc, gc::ConvertToBase(addr), name);
+ return TraceWeakResult<T>{live, initial, *addr};
+}
+
+// Trace all edges contained in the given array.
+
+template <typename T>
+void TraceRange(JSTracer* trc, size_t len, BarrieredBase<T>* vec,
+ const char* name) {
+ gc::TraceRangeInternal(trc, len,
+ gc::ConvertToBase(vec[0].unbarrieredAddress()), name);
+}
+
+// Trace all root edges in the given array.
+
+template <typename T>
+void TraceRootRange(JSTracer* trc, size_t len, T* vec, const char* name) {
+ gc::AssertRootMarkingPhase(trc);
+ gc::TraceRangeInternal(trc, len, gc::ConvertToBase(vec), name);
+}
+
+// As below but with manual barriers.
+template <typename T>
+void TraceManuallyBarrieredCrossCompartmentEdge(JSTracer* trc, JSObject* src,
+ T* dst, const char* name);
+
+// Trace an edge that crosses compartment boundaries. If the compartment of the
+// destination thing is not being GC'd, then the edge will not be traced.
+template <typename T>
+void TraceCrossCompartmentEdge(JSTracer* trc, JSObject* src,
+ const WriteBarriered<T>* dst, const char* name) {
+ TraceManuallyBarrieredCrossCompartmentEdge(
+ trc, src, gc::ConvertToBase(dst->unbarrieredAddress()), name);
+}
+
+// Trace an edge that's guaranteed to be same-zone but may cross a compartment
+// boundary. This should NOT be used for object => object edges, as those have
+// to be in the cross-compartment wrapper map.
+//
+// WARNING: because this turns off certain compartment checks, you most likely
+// don't want to use this! If you still think you need this function, talk to a
+// GC peer first.
+template <typename T>
+void TraceSameZoneCrossCompartmentEdge(JSTracer* trc,
+ const WriteBarriered<T>* dst,
+ const char* name);
+
+// Trace a weak map key. For debugger weak maps these may be cross compartment,
+// but the compartment must always be within the current sweep group.
+template <typename T>
+void TraceWeakMapKeyEdgeInternal(JSTracer* trc, Zone* weakMapZone, T** thingp,
+ const char* name);
+
+template <typename T>
+inline void TraceWeakMapKeyEdge(JSTracer* trc, Zone* weakMapZone,
+ const WriteBarriered<T>* thingp,
+ const char* name) {
+ TraceWeakMapKeyEdgeInternal(
+ trc, weakMapZone, gc::ConvertToBase(thingp->unbarrieredAddress()), name);
+}
+
+// Trace a root edge that uses the base GC thing type, instead of a more
+// specific type.
+void TraceGenericPointerRoot(JSTracer* trc, gc::Cell** thingp,
+ const char* name);
+
+// Trace a non-root edge that uses the base GC thing type, instead of a more
+// specific type.
+void TraceManuallyBarrieredGenericPointerEdge(JSTracer* trc, gc::Cell** thingp,
+ const char* name);
+
+void TraceGCCellPtrRoot(JSTracer* trc, JS::GCCellPtr* thingp, const char* name);
+
+void TraceManuallyBarrieredGCCellPtr(JSTracer* trc, JS::GCCellPtr* thingp,
+ const char* name);
+
+namespace gc {
+
+// Trace through a shape or group iteratively during cycle collection to avoid
+// deep or infinite recursion.
+void TraceCycleCollectorChildren(JS::CallbackTracer* trc, Shape* shape);
+
+/**
+ * Trace every value within |compartments| that is wrapped by a
+ * cross-compartment wrapper from a compartment that is not an element of
+ * |compartments|.
+ */
+void TraceIncomingCCWs(JSTracer* trc, const JS::CompartmentSet& compartments);
+
+/* Get information about a GC thing. Used when dumping the heap. */
+void GetTraceThingInfo(char* buf, size_t bufsize, void* thing,
+ JS::TraceKind kind, bool includeDetails);
+
+// Overloaded function to call the correct GenericTracer method based on the
+// argument type.
+#define DEFINE_DISPATCH_FUNCTION(name, type, _1, _2) \
+ inline void DispatchToOnEdge(JSTracer* trc, type** thingp, \
+ const char* name) { \
+ trc->on##name##Edge(thingp, name); \
+ }
+JS_FOR_EACH_TRACEKIND(DEFINE_DISPATCH_FUNCTION)
+#undef DEFINE_DISPATCH_FUNCTION
+
+} // namespace gc
+} // namespace js
+
+#endif /* js_Tracer_h */
diff --git a/js/src/gc/Verifier.cpp b/js/src/gc/Verifier.cpp
new file mode 100644
index 0000000000..299d73c51f
--- /dev/null
+++ b/js/src/gc/Verifier.cpp
@@ -0,0 +1,1135 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/Maybe.h"
+#include "mozilla/Sprintf.h"
+
+#include <algorithm>
+#include <utility>
+
+#ifdef MOZ_VALGRIND
+# include <valgrind/memcheck.h>
+#endif
+
+#include "gc/GCInternals.h"
+#include "gc/GCLock.h"
+#include "gc/PublicIterators.h"
+#include "gc/WeakMap.h"
+#include "gc/Zone.h"
+#include "js/friend/DumpFunctions.h" // js::DumpObject
+#include "js/HashTable.h"
+#include "vm/JSContext.h"
+
+#include "gc/ArenaList-inl.h"
+#include "gc/GC-inl.h"
+#include "gc/Marking-inl.h"
+#include "gc/PrivateIterators-inl.h"
+
+using namespace js;
+using namespace js::gc;
+
+using mozilla::DebugOnly;
+
+#ifdef JS_GC_ZEAL
+
+/*
+ * Write barrier verification
+ *
+ * The next few functions are for write barrier verification.
+ *
+ * The VerifyBarriers function is a shorthand. It checks if a verification phase
+ * is currently running. If not, it starts one. Otherwise, it ends the current
+ * phase and starts a new one.
+ *
+ * The user can adjust the frequency of verifications, which causes
+ * VerifyBarriers to be a no-op all but one out of N calls. However, if the
+ * |always| parameter is true, it starts a new phase no matter what.
+ *
+ * Pre-Barrier Verifier:
+ * When StartVerifyBarriers is called, a snapshot is taken of all objects in
+ * the GC heap and saved in an explicit graph data structure. Later,
+ * EndVerifyBarriers traverses the heap again. Any pointer values that were in
+ * the snapshot and are no longer found must be marked; otherwise an assertion
+ * triggers. Note that we must not GC in between starting and finishing a
+ * verification phase.
+ */
+
+struct EdgeValue {
+ JS::GCCellPtr thing;
+ const char* label;
+};
+
+struct VerifyNode {
+ JS::GCCellPtr thing;
+ uint32_t count;
+ EdgeValue edges[1];
+};
+
+typedef HashMap<Cell*, VerifyNode*, DefaultHasher<Cell*>, SystemAllocPolicy>
+ NodeMap;
+
+/*
+ * The verifier data structures are simple. The entire graph is stored in a
+ * single block of memory. At the beginning is a VerifyNode for the root
+ * node. It is followed by a sequence of EdgeValues--the exact number is given
+ * in the node. After the edges come more nodes and their edges.
+ *
+ * The edgeptr and term fields are used to allocate out of the block of memory
+ * for the graph. If we run out of memory (i.e., if edgeptr goes beyond term),
+ * we just abandon the verification.
+ *
+ * The nodemap field is a hashtable that maps from the address of the GC thing
+ * to the VerifyNode that represents it.
+ */
+class js::VerifyPreTracer final : public JS::CallbackTracer {
+ JS::AutoDisableGenerationalGC noggc;
+
+ void onChild(JS::GCCellPtr thing, const char* name) override;
+
+ public:
+ /* The gcNumber when the verification began. */
+ uint64_t number;
+
+ /* This counts up to gcZealFrequency to decide whether to verify. */
+ int count;
+
+ /* This graph represents the initial GC "snapshot". */
+ VerifyNode* curnode;
+ VerifyNode* root;
+ char* edgeptr;
+ char* term;
+ NodeMap nodemap;
+
+ explicit VerifyPreTracer(JSRuntime* rt)
+ : JS::CallbackTracer(rt, JS::TracerKind::Callback,
+ JS::WeakEdgeTraceAction::Skip),
+ noggc(rt->mainContextFromOwnThread()),
+ number(rt->gc.gcNumber()),
+ count(0),
+ curnode(nullptr),
+ root(nullptr),
+ edgeptr(nullptr),
+ term(nullptr) {
+ // We don't care about weak edges here. Since they are not marked they
+ // cannot cause the problem that the pre-write barrier protects against.
+ }
+
+ ~VerifyPreTracer() { js_free(root); }
+};
+
+/*
+ * This function builds up the heap snapshot by adding edges to the current
+ * node.
+ */
+void VerifyPreTracer::onChild(JS::GCCellPtr thing, const char* name) {
+ MOZ_ASSERT(!IsInsideNursery(thing.asCell()));
+
+ // Skip things in other runtimes.
+ if (thing.asCell()->asTenured().runtimeFromAnyThread() != runtime()) {
+ return;
+ }
+
+ edgeptr += sizeof(EdgeValue);
+ if (edgeptr >= term) {
+ edgeptr = term;
+ return;
+ }
+
+ VerifyNode* node = curnode;
+ uint32_t i = node->count;
+
+ node->edges[i].thing = thing;
+ node->edges[i].label = name;
+ node->count++;
+}
+
+static VerifyNode* MakeNode(VerifyPreTracer* trc, JS::GCCellPtr thing) {
+ NodeMap::AddPtr p = trc->nodemap.lookupForAdd(thing.asCell());
+ if (!p) {
+ VerifyNode* node = (VerifyNode*)trc->edgeptr;
+ trc->edgeptr += sizeof(VerifyNode) - sizeof(EdgeValue);
+ if (trc->edgeptr >= trc->term) {
+ trc->edgeptr = trc->term;
+ return nullptr;
+ }
+
+ node->thing = thing;
+ node->count = 0;
+ if (!trc->nodemap.add(p, thing.asCell(), node)) {
+ trc->edgeptr = trc->term;
+ return nullptr;
+ }
+
+ return node;
+ }
+ return nullptr;
+}
+
+static VerifyNode* NextNode(VerifyNode* node) {
+ if (node->count == 0) {
+ return (VerifyNode*)((char*)node + sizeof(VerifyNode) - sizeof(EdgeValue));
+ } else {
+ return (VerifyNode*)((char*)node + sizeof(VerifyNode) +
+ sizeof(EdgeValue) * (node->count - 1));
+ }
+}
+
+template <typename ZonesIterT>
+static void ClearMarkBits(GCRuntime* gc) {
+ // This does not clear the mark bits for permanent atoms, whose arenas are
+ // removed from the arena lists by GCRuntime::freezePermanentAtoms.
+
+ for (ZonesIterT zone(gc); !zone.done(); zone.next()) {
+ for (auto kind : AllAllocKinds()) {
+ for (ArenaIter arena(zone, kind); !arena.done(); arena.next()) {
+ arena->unmarkAll();
+ }
+ }
+ }
+}
+
+void gc::GCRuntime::startVerifyPreBarriers() {
+ if (verifyPreData || isIncrementalGCInProgress()) {
+ return;
+ }
+
+ JSContext* cx = rt->mainContextFromOwnThread();
+
+ if (IsIncrementalGCUnsafe(rt) != GCAbortReason::None) {
+ return;
+ }
+
+ number++;
+
+ VerifyPreTracer* trc = js_new<VerifyPreTracer>(rt);
+ if (!trc) {
+ return;
+ }
+
+ AutoPrepareForTracing prep(cx);
+
+ ClearMarkBits<AllZonesIter>(this);
+
+ gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::TRACE_HEAP);
+
+ const size_t size = 64 * 1024 * 1024;
+ trc->root = (VerifyNode*)js_malloc(size);
+ if (!trc->root) {
+ goto oom;
+ }
+ trc->edgeptr = (char*)trc->root;
+ trc->term = trc->edgeptr + size;
+
+ /* Create the root node. */
+ trc->curnode = MakeNode(trc, JS::GCCellPtr());
+
+ MOZ_ASSERT(incrementalState == State::NotActive);
+ incrementalState = State::MarkRoots;
+
+ /* Make all the roots be edges emanating from the root node. */
+ traceRuntime(trc, prep);
+
+ VerifyNode* node;
+ node = trc->curnode;
+ if (trc->edgeptr == trc->term) {
+ goto oom;
+ }
+
+ /* For each edge, make a node for it if one doesn't already exist. */
+ while ((char*)node < trc->edgeptr) {
+ for (uint32_t i = 0; i < node->count; i++) {
+ EdgeValue& e = node->edges[i];
+ VerifyNode* child = MakeNode(trc, e.thing);
+ if (child) {
+ trc->curnode = child;
+ JS::TraceChildren(trc, e.thing);
+ }
+ if (trc->edgeptr == trc->term) {
+ goto oom;
+ }
+ }
+
+ node = NextNode(node);
+ }
+
+ verifyPreData = trc;
+ incrementalState = State::Mark;
+ marker().start();
+
+ for (ZonesIter zone(this, WithAtoms); !zone.done(); zone.next()) {
+ zone->changeGCState(Zone::NoGC, Zone::VerifyPreBarriers);
+ zone->setNeedsIncrementalBarrier(true);
+ zone->arenas.clearFreeLists();
+ }
+
+ return;
+
+oom:
+ incrementalState = State::NotActive;
+ js_delete(trc);
+ verifyPreData = nullptr;
+}
+
+static bool IsMarkedOrAllocated(TenuredCell* cell) {
+ return cell->isMarkedAny();
+}
+
+struct CheckEdgeTracer final : public JS::CallbackTracer {
+ VerifyNode* node;
+ explicit CheckEdgeTracer(JSRuntime* rt)
+ : JS::CallbackTracer(rt), node(nullptr) {}
+ void onChild(JS::GCCellPtr thing, const char* name) override;
+};
+
+static const uint32_t MAX_VERIFIER_EDGES = 1000;
+
+/*
+ * This function is called by EndVerifyBarriers for every heap edge. If the edge
+ * already existed in the original snapshot, we "cancel it out" by overwriting
+ * it with nullptr. EndVerifyBarriers later asserts that the remaining
+ * non-nullptr edges (i.e., the ones from the original snapshot that must have
+ * been modified) must point to marked objects.
+ */
+void CheckEdgeTracer::onChild(JS::GCCellPtr thing, const char* name) {
+ // Skip things in other runtimes.
+ if (thing.asCell()->asTenured().runtimeFromAnyThread() != runtime()) {
+ return;
+ }
+
+ /* Avoid n^2 behavior. */
+ if (node->count > MAX_VERIFIER_EDGES) {
+ return;
+ }
+
+ for (uint32_t i = 0; i < node->count; i++) {
+ if (node->edges[i].thing == thing) {
+ node->edges[i].thing = JS::GCCellPtr();
+ return;
+ }
+ }
+}
+
+static bool IsMarkedOrAllocated(const EdgeValue& edge) {
+ if (!edge.thing || IsMarkedOrAllocated(&edge.thing.asCell()->asTenured())) {
+ return true;
+ }
+
+ // Permanent atoms and well-known symbols aren't marked during graph
+ // traversal.
+ if (edge.thing.is<JSString>() &&
+ edge.thing.as<JSString>().isPermanentAtom()) {
+ return true;
+ }
+ if (edge.thing.is<JS::Symbol>() &&
+ edge.thing.as<JS::Symbol>().isWellKnownSymbol()) {
+ return true;
+ }
+
+ return false;
+}
+
+void gc::GCRuntime::endVerifyPreBarriers() {
+ VerifyPreTracer* trc = verifyPreData;
+
+ if (!trc) {
+ return;
+ }
+
+ MOZ_ASSERT(!JS::IsGenerationalGCEnabled(rt));
+
+ // Now that barrier marking has finished, prepare the heap to allow this
+ // method to trace cells and discover their outgoing edges.
+ AutoPrepareForTracing prep(rt->mainContextFromOwnThread());
+
+ bool compartmentCreated = false;
+
+ /* We need to disable barriers before tracing, which may invoke barriers. */
+ for (ZonesIter zone(this, WithAtoms); !zone.done(); zone.next()) {
+ if (zone->isVerifyingPreBarriers()) {
+ zone->changeGCState(Zone::VerifyPreBarriers, Zone::NoGC);
+ } else {
+ compartmentCreated = true;
+ }
+
+ MOZ_ASSERT(!zone->wasGCStarted());
+ MOZ_ASSERT(!zone->needsIncrementalBarrier());
+ }
+
+ verifyPreData = nullptr;
+ MOZ_ASSERT(incrementalState == State::Mark);
+ incrementalState = State::NotActive;
+
+ if (!compartmentCreated && IsIncrementalGCUnsafe(rt) == GCAbortReason::None) {
+ CheckEdgeTracer cetrc(rt);
+
+ /* Start after the roots. */
+ VerifyNode* node = NextNode(trc->root);
+ while ((char*)node < trc->edgeptr) {
+ cetrc.node = node;
+ JS::TraceChildren(&cetrc, node->thing);
+
+ if (node->count <= MAX_VERIFIER_EDGES) {
+ for (uint32_t i = 0; i < node->count; i++) {
+ EdgeValue& edge = node->edges[i];
+ if (!IsMarkedOrAllocated(edge)) {
+ char msgbuf[1024];
+ SprintfLiteral(
+ msgbuf,
+ "[barrier verifier] Unmarked edge: %s %p '%s' edge to %s %p",
+ JS::GCTraceKindToAscii(node->thing.kind()),
+ node->thing.asCell(), edge.label,
+ JS::GCTraceKindToAscii(edge.thing.kind()), edge.thing.asCell());
+ MOZ_ReportAssertionFailure(msgbuf, __FILE__, __LINE__);
+ MOZ_CRASH();
+ }
+ }
+ }
+
+ node = NextNode(node);
+ }
+ }
+
+ marker().reset();
+ marker().stop();
+ resetDelayedMarking();
+
+ js_delete(trc);
+}
+
+/*** Barrier Verifier Scheduling ***/
+
+void gc::GCRuntime::verifyPreBarriers() {
+ if (verifyPreData) {
+ endVerifyPreBarriers();
+ } else {
+ startVerifyPreBarriers();
+ }
+}
+
+void gc::VerifyBarriers(JSRuntime* rt, VerifierType type) {
+ if (type == PreBarrierVerifier) {
+ rt->gc.verifyPreBarriers();
+ }
+}
+
+void gc::GCRuntime::maybeVerifyPreBarriers(bool always) {
+ if (!hasZealMode(ZealMode::VerifierPre)) {
+ return;
+ }
+
+ if (rt->mainContextFromOwnThread()->suppressGC) {
+ return;
+ }
+
+ if (verifyPreData) {
+ if (++verifyPreData->count < zealFrequency && !always) {
+ return;
+ }
+
+ endVerifyPreBarriers();
+ }
+
+ startVerifyPreBarriers();
+}
+
+void js::gc::MaybeVerifyBarriers(JSContext* cx, bool always) {
+ GCRuntime* gc = &cx->runtime()->gc;
+ gc->maybeVerifyPreBarriers(always);
+}
+
+void js::gc::GCRuntime::finishVerifier() {
+ if (verifyPreData) {
+ js_delete(verifyPreData.ref());
+ verifyPreData = nullptr;
+ }
+}
+
+struct GCChunkHasher {
+ typedef gc::TenuredChunk* Lookup;
+
+ /*
+ * Strip zeros for better distribution after multiplying by the golden
+ * ratio.
+ */
+ static HashNumber hash(gc::TenuredChunk* chunk) {
+ MOZ_ASSERT(!(uintptr_t(chunk) & gc::ChunkMask));
+ return HashNumber(uintptr_t(chunk) >> gc::ChunkShift);
+ }
+
+ static bool match(gc::TenuredChunk* k, gc::TenuredChunk* l) {
+ MOZ_ASSERT(!(uintptr_t(k) & gc::ChunkMask));
+ MOZ_ASSERT(!(uintptr_t(l) & gc::ChunkMask));
+ return k == l;
+ }
+};
+
+class js::gc::MarkingValidator {
+ public:
+ explicit MarkingValidator(GCRuntime* gc);
+ void nonIncrementalMark(AutoGCSession& session);
+ void validate();
+
+ private:
+ GCRuntime* gc;
+ bool initialized;
+
+ using BitmapMap = HashMap<TenuredChunk*, UniquePtr<MarkBitmap>, GCChunkHasher,
+ SystemAllocPolicy>;
+ BitmapMap map;
+};
+
+js::gc::MarkingValidator::MarkingValidator(GCRuntime* gc)
+ : gc(gc), initialized(false) {}
+
+void js::gc::MarkingValidator::nonIncrementalMark(AutoGCSession& session) {
+ /*
+ * Perform a non-incremental mark for all collecting zones and record
+ * the results for later comparison.
+ *
+ * Currently this does not validate gray marking.
+ */
+
+ JSRuntime* runtime = gc->rt;
+ GCMarker* gcmarker = &gc->marker();
+
+ MOZ_ASSERT(!gcmarker->isWeakMarking());
+
+ /* Wait for off-thread parsing which can allocate. */
+ WaitForAllHelperThreads();
+
+ gc->waitBackgroundAllocEnd();
+ gc->waitBackgroundSweepEnd();
+
+ /* Save existing mark bits. */
+ {
+ AutoLockGC lock(gc);
+ for (auto chunk = gc->allNonEmptyChunks(lock); !chunk.done();
+ chunk.next()) {
+ MarkBitmap* bitmap = &chunk->markBits;
+ auto entry = MakeUnique<MarkBitmap>();
+ if (!entry) {
+ return;
+ }
+
+ memcpy((void*)entry->bitmap, (void*)bitmap->bitmap,
+ sizeof(bitmap->bitmap));
+
+ if (!map.putNew(chunk, std::move(entry))) {
+ return;
+ }
+ }
+ }
+
+ /*
+ * Temporarily clear the weakmaps' mark flags for the compartments we are
+ * collecting.
+ */
+
+ WeakMapColors markedWeakMaps;
+
+ /*
+ * For saving, smush all of the keys into one big table and split them back
+ * up into per-zone tables when restoring.
+ */
+ gc::EphemeronEdgeTable savedEphemeronEdges(
+ SystemAllocPolicy(), runtime->randomHashCodeScrambler());
+ if (!savedEphemeronEdges.init()) {
+ return;
+ }
+
+ for (GCZonesIter zone(gc); !zone.done(); zone.next()) {
+ if (!WeakMapBase::saveZoneMarkedWeakMaps(zone, markedWeakMaps)) {
+ return;
+ }
+
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ for (auto r = zone->gcEphemeronEdges().mutableAll(); !r.empty();
+ r.popFront()) {
+ MOZ_ASSERT(r.front().key->asTenured().zone() == zone);
+ if (!savedEphemeronEdges.put(r.front().key, std::move(r.front().value))) {
+ oomUnsafe.crash("saving weak keys table for validator");
+ }
+ }
+
+ if (!zone->gcEphemeronEdges().clear()) {
+ oomUnsafe.crash("clearing weak keys table for validator");
+ }
+ }
+
+ /* Save and restore test mark queue state. */
+# ifdef DEBUG
+ size_t savedQueuePos = gc->queuePos;
+ mozilla::Maybe<MarkColor> savedQueueColor = gc->queueMarkColor;
+# endif
+
+ /*
+ * After this point, the function should run to completion, so we shouldn't
+ * do anything fallible.
+ */
+ initialized = true;
+
+ /* Re-do all the marking, but non-incrementally. */
+ js::gc::State state = gc->incrementalState;
+ gc->incrementalState = State::MarkRoots;
+
+ {
+ gcstats::AutoPhase ap(gc->stats(), gcstats::PhaseKind::PREPARE);
+
+ {
+ gcstats::AutoPhase ap(gc->stats(), gcstats::PhaseKind::UNMARK);
+
+ for (GCZonesIter zone(gc); !zone.done(); zone.next()) {
+ WeakMapBase::unmarkZone(zone);
+ }
+
+ MOZ_ASSERT(gcmarker->isDrained());
+ gcmarker->reset();
+
+ ClearMarkBits<GCZonesIter>(gc);
+ }
+ }
+
+ {
+ gcstats::AutoPhase ap(gc->stats(), gcstats::PhaseKind::MARK);
+
+ gc->traceRuntimeForMajorGC(gcmarker->tracer(), session);
+
+ gc->incrementalState = State::Mark;
+ gc->drainMarkStack();
+ }
+
+ gc->incrementalState = State::Sweep;
+ {
+ gcstats::AutoPhase ap1(gc->stats(), gcstats::PhaseKind::SWEEP);
+ gcstats::AutoPhase ap2(gc->stats(), gcstats::PhaseKind::MARK);
+
+ gc->markAllWeakReferences();
+
+ /* Update zone state for gray marking. */
+ for (GCZonesIter zone(gc); !zone.done(); zone.next()) {
+ zone->changeGCState(zone->initialMarkingState(), Zone::MarkBlackAndGray);
+ }
+
+ AutoSetMarkColor setColorGray(*gcmarker, MarkColor::Gray);
+
+ gc->markAllGrayReferences(gcstats::PhaseKind::MARK_GRAY);
+ gc->markAllWeakReferences();
+
+ /* Restore zone state. */
+ for (GCZonesIter zone(gc); !zone.done(); zone.next()) {
+ zone->changeGCState(Zone::MarkBlackAndGray, zone->initialMarkingState());
+ }
+ MOZ_ASSERT(gc->marker().isDrained());
+ }
+
+ /* Take a copy of the non-incremental mark state and restore the original. */
+ {
+ AutoLockGC lock(gc);
+ for (auto chunk = gc->allNonEmptyChunks(lock); !chunk.done();
+ chunk.next()) {
+ MarkBitmap* bitmap = &chunk->markBits;
+ auto ptr = map.lookup(chunk);
+ MOZ_RELEASE_ASSERT(ptr, "Chunk not found in map");
+ MarkBitmap* entry = ptr->value().get();
+ for (size_t i = 0; i < MarkBitmap::WordCount; i++) {
+ uintptr_t v = entry->bitmap[i];
+ entry->bitmap[i] = uintptr_t(bitmap->bitmap[i]);
+ bitmap->bitmap[i] = v;
+ }
+ }
+ }
+
+ for (GCZonesIter zone(gc); !zone.done(); zone.next()) {
+ WeakMapBase::unmarkZone(zone);
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ if (!zone->gcEphemeronEdges().clear()) {
+ oomUnsafe.crash("clearing weak keys table for validator");
+ }
+ }
+
+ WeakMapBase::restoreMarkedWeakMaps(markedWeakMaps);
+
+ for (auto r = savedEphemeronEdges.mutableAll(); !r.empty(); r.popFront()) {
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ Zone* zone = r.front().key->asTenured().zone();
+ if (!zone->gcEphemeronEdges().put(r.front().key,
+ std::move(r.front().value))) {
+ oomUnsafe.crash("restoring weak keys table for validator");
+ }
+ }
+
+# ifdef DEBUG
+ gc->queuePos = savedQueuePos;
+ gc->queueMarkColor = savedQueueColor;
+# endif
+
+ gc->incrementalState = state;
+}
+
+void js::gc::MarkingValidator::validate() {
+ /*
+ * Validates the incremental marking for a single compartment by comparing
+ * the mark bits to those previously recorded for a non-incremental mark.
+ */
+
+ if (!initialized) {
+ return;
+ }
+
+ MOZ_ASSERT(!gc->marker().isWeakMarking());
+
+ gc->waitBackgroundSweepEnd();
+
+ bool ok = true;
+ AutoLockGC lock(gc->rt);
+ for (auto chunk = gc->allNonEmptyChunks(lock); !chunk.done(); chunk.next()) {
+ BitmapMap::Ptr ptr = map.lookup(chunk);
+ if (!ptr) {
+ continue; /* Allocated after we did the non-incremental mark. */
+ }
+
+ MarkBitmap* bitmap = ptr->value().get();
+ MarkBitmap* incBitmap = &chunk->markBits;
+
+ for (size_t i = 0; i < ArenasPerChunk; i++) {
+ if (chunk->decommittedPages[chunk->pageIndex(i)]) {
+ continue;
+ }
+ Arena* arena = &chunk->arenas[i];
+ if (!arena->allocated()) {
+ continue;
+ }
+ if (!arena->zone->isGCSweeping()) {
+ continue;
+ }
+
+ AllocKind kind = arena->getAllocKind();
+ uintptr_t thing = arena->thingsStart();
+ uintptr_t end = arena->thingsEnd();
+ while (thing < end) {
+ auto* cell = reinterpret_cast<TenuredCell*>(thing);
+
+ /*
+ * If a non-incremental GC wouldn't have collected a cell, then
+ * an incremental GC won't collect it.
+ */
+ if (bitmap->isMarkedAny(cell)) {
+ if (!incBitmap->isMarkedAny(cell)) {
+ ok = false;
+ const char* color = TenuredCell::getColor(bitmap, cell).name();
+ fprintf(stderr,
+ "%p: cell not marked, but would be marked %s by "
+ "non-incremental marking\n",
+ cell, color);
+# ifdef DEBUG
+ cell->dump();
+ fprintf(stderr, "\n");
+# endif
+ }
+ }
+
+ /*
+ * If the cycle collector isn't allowed to collect an object
+ * after a non-incremental GC has run, then it isn't allowed to
+ * collected it after an incremental GC.
+ */
+ if (!bitmap->isMarkedGray(cell)) {
+ if (incBitmap->isMarkedGray(cell)) {
+ ok = false;
+ const char* color = TenuredCell::getColor(bitmap, cell).name();
+ fprintf(stderr,
+ "%p: cell marked gray, but would be marked %s by "
+ "non-incremental marking\n",
+ cell, color);
+# ifdef DEBUG
+ cell->dump();
+ fprintf(stderr, "\n");
+# endif
+ }
+ }
+
+ thing += Arena::thingSize(kind);
+ }
+ }
+ }
+
+ MOZ_RELEASE_ASSERT(ok, "Incremental marking verification failed");
+}
+
+void GCRuntime::computeNonIncrementalMarkingForValidation(
+ AutoGCSession& session) {
+ MOZ_ASSERT(!markingValidator);
+ if (isIncremental && hasZealMode(ZealMode::IncrementalMarkingValidator)) {
+ markingValidator = js_new<MarkingValidator>(this);
+ }
+ if (markingValidator) {
+ markingValidator->nonIncrementalMark(session);
+ }
+}
+
+void GCRuntime::validateIncrementalMarking() {
+ if (markingValidator) {
+ markingValidator->validate();
+ }
+}
+
+void GCRuntime::finishMarkingValidation() {
+ js_delete(markingValidator.ref());
+ markingValidator = nullptr;
+}
+
+#endif /* JS_GC_ZEAL */
+
+#if defined(JS_GC_ZEAL) || defined(DEBUG)
+
+class HeapCheckTracerBase : public JS::CallbackTracer {
+ public:
+ explicit HeapCheckTracerBase(JSRuntime* rt, JS::TraceOptions options);
+ bool traceHeap(AutoTraceSession& session);
+ virtual void checkCell(Cell* cell, const char* name) = 0;
+
+ protected:
+ void dumpCellInfo(Cell* cell);
+ void dumpCellPath(const char* name);
+
+ Cell* parentCell() {
+ return parentIndex == -1 ? nullptr : stack[parentIndex].thing.asCell();
+ }
+
+ size_t failures;
+
+ private:
+ void onChild(JS::GCCellPtr thing, const char* name) override;
+
+ struct WorkItem {
+ WorkItem(JS::GCCellPtr thing, const char* name, int parentIndex)
+ : thing(thing),
+ name(name),
+ parentIndex(parentIndex),
+ processed(false) {}
+
+ JS::GCCellPtr thing;
+ const char* name;
+ int parentIndex;
+ bool processed;
+ };
+
+ JSRuntime* rt;
+ bool oom;
+ HashSet<Cell*, DefaultHasher<Cell*>, SystemAllocPolicy> visited;
+ Vector<WorkItem, 0, SystemAllocPolicy> stack;
+ int parentIndex;
+};
+
+HeapCheckTracerBase::HeapCheckTracerBase(JSRuntime* rt,
+ JS::TraceOptions options)
+ : CallbackTracer(rt, JS::TracerKind::Callback, options),
+ failures(0),
+ rt(rt),
+ oom(false),
+ parentIndex(-1) {}
+
+void HeapCheckTracerBase::onChild(JS::GCCellPtr thing, const char* name) {
+ Cell* cell = thing.asCell();
+ checkCell(cell, name);
+
+ if (visited.lookup(cell)) {
+ return;
+ }
+
+ if (!visited.put(cell)) {
+ oom = true;
+ return;
+ }
+
+ // Don't trace into GC things owned by another runtime.
+ if (cell->runtimeFromAnyThread() != rt) {
+ return;
+ }
+
+ WorkItem item(thing, name, parentIndex);
+ if (!stack.append(item)) {
+ oom = true;
+ }
+}
+
+bool HeapCheckTracerBase::traceHeap(AutoTraceSession& session) {
+ // The analysis thinks that traceRuntime might GC by calling a GC callback.
+ JS::AutoSuppressGCAnalysis nogc;
+ if (!rt->isBeingDestroyed()) {
+ rt->gc.traceRuntime(this, session);
+ }
+
+ while (!stack.empty() && !oom) {
+ WorkItem item = stack.back();
+ if (item.processed) {
+ stack.popBack();
+ } else {
+ parentIndex = stack.length() - 1;
+ stack.back().processed = true;
+ TraceChildren(this, item.thing);
+ }
+ }
+
+ return !oom;
+}
+
+void HeapCheckTracerBase::dumpCellInfo(Cell* cell) {
+ auto kind = cell->getTraceKind();
+ JSObject* obj =
+ kind == JS::TraceKind::Object ? static_cast<JSObject*>(cell) : nullptr;
+
+ fprintf(stderr, "%s %s", cell->color().name(), GCTraceKindToAscii(kind));
+ if (obj) {
+ fprintf(stderr, " %s", obj->getClass()->name);
+ }
+ fprintf(stderr, " %p", cell);
+ if (obj) {
+ fprintf(stderr, " (compartment %p)", obj->compartment());
+ }
+}
+
+void HeapCheckTracerBase::dumpCellPath(const char* name) {
+ for (int index = parentIndex; index != -1; index = stack[index].parentIndex) {
+ const WorkItem& parent = stack[index];
+ Cell* cell = parent.thing.asCell();
+ fprintf(stderr, " from ");
+ dumpCellInfo(cell);
+ fprintf(stderr, " %s edge\n", name);
+ name = parent.name;
+ }
+ fprintf(stderr, " from root %s\n", name);
+}
+
+class CheckHeapTracer final : public HeapCheckTracerBase {
+ public:
+ enum GCType { Moving, NonMoving };
+
+ explicit CheckHeapTracer(JSRuntime* rt, GCType type);
+ void check(AutoTraceSession& session);
+
+ private:
+ void checkCell(Cell* cell, const char* name) override;
+ GCType gcType;
+};
+
+CheckHeapTracer::CheckHeapTracer(JSRuntime* rt, GCType type)
+ : HeapCheckTracerBase(rt, JS::WeakMapTraceAction::TraceKeysAndValues),
+ gcType(type) {}
+
+inline static bool IsValidGCThingPointer(Cell* cell) {
+ return (uintptr_t(cell) & CellAlignMask) == 0;
+}
+
+void CheckHeapTracer::checkCell(Cell* cell, const char* name) {
+ // Moving
+ if (!IsValidGCThingPointer(cell) ||
+ ((gcType == GCType::Moving) && !IsGCThingValidAfterMovingGC(cell)) ||
+ ((gcType == GCType::NonMoving) && cell->isForwarded())) {
+ failures++;
+ fprintf(stderr, "Bad pointer %p\n", cell);
+ dumpCellPath(name);
+ }
+}
+
+void CheckHeapTracer::check(AutoTraceSession& session) {
+ if (!traceHeap(session)) {
+ return;
+ }
+
+ if (failures) {
+ fprintf(stderr, "Heap check: %zu failure(s)\n", failures);
+ }
+ MOZ_RELEASE_ASSERT(failures == 0);
+}
+
+void js::gc::CheckHeapAfterGC(JSRuntime* rt) {
+ AutoTraceSession session(rt);
+ CheckHeapTracer::GCType gcType;
+
+ if (rt->gc.nursery().isEmpty()) {
+ gcType = CheckHeapTracer::GCType::Moving;
+ } else {
+ gcType = CheckHeapTracer::GCType::NonMoving;
+ }
+
+ CheckHeapTracer tracer(rt, gcType);
+ tracer.check(session);
+}
+
+class CheckGrayMarkingTracer final : public HeapCheckTracerBase {
+ public:
+ explicit CheckGrayMarkingTracer(JSRuntime* rt);
+ bool check(AutoTraceSession& session);
+
+ private:
+ void checkCell(Cell* cell, const char* name) override;
+};
+
+CheckGrayMarkingTracer::CheckGrayMarkingTracer(JSRuntime* rt)
+ : HeapCheckTracerBase(rt, JS::TraceOptions(JS::WeakMapTraceAction::Skip,
+ JS::WeakEdgeTraceAction::Skip)) {
+ // Weak gray->black edges are allowed.
+}
+
+void CheckGrayMarkingTracer::checkCell(Cell* cell, const char* name) {
+ Cell* parent = parentCell();
+ if (!parent) {
+ return;
+ }
+
+ if (parent->isMarkedBlack() && cell->isMarkedGray()) {
+ failures++;
+
+ fprintf(stderr, "Found black to gray edge to ");
+ dumpCellInfo(cell);
+ fprintf(stderr, "\n");
+ dumpCellPath(name);
+
+# ifdef DEBUG
+ if (parent->is<JSObject>()) {
+ fprintf(stderr, "\nSource: ");
+ DumpObject(parent->as<JSObject>(), stderr);
+ }
+ if (cell->is<JSObject>()) {
+ fprintf(stderr, "\nTarget: ");
+ DumpObject(cell->as<JSObject>(), stderr);
+ }
+# endif
+ }
+}
+
+bool CheckGrayMarkingTracer::check(AutoTraceSession& session) {
+ if (!traceHeap(session)) {
+ return true; // Ignore failure.
+ }
+
+ return failures == 0;
+}
+
+JS_PUBLIC_API bool js::CheckGrayMarkingState(JSRuntime* rt) {
+ MOZ_ASSERT(!JS::RuntimeHeapIsCollecting());
+ MOZ_ASSERT(!rt->gc.isIncrementalGCInProgress());
+ if (!rt->gc.areGrayBitsValid()) {
+ return true;
+ }
+
+ gcstats::AutoPhase ap(rt->gc.stats(), gcstats::PhaseKind::TRACE_HEAP);
+ AutoTraceSession session(rt);
+ CheckGrayMarkingTracer tracer(rt);
+
+ return tracer.check(session);
+}
+
+static JSObject* MaybeGetDelegate(Cell* cell) {
+ if (!cell->is<JSObject>()) {
+ return nullptr;
+ }
+
+ JSObject* object = cell->as<JSObject>();
+ return js::UncheckedUnwrapWithoutExpose(object);
+}
+
+bool js::gc::CheckWeakMapEntryMarking(const WeakMapBase* map, Cell* key,
+ Cell* value) {
+ bool ok = true;
+
+ Zone* zone = map->zone();
+ MOZ_ASSERT(CurrentThreadCanAccessZone(zone));
+ MOZ_ASSERT(zone->isGCMarking());
+
+ JSObject* object = map->memberOf;
+ MOZ_ASSERT_IF(object, object->zone() == zone);
+
+ // Debugger weak maps can have keys in different zones.
+ Zone* keyZone = key->zoneFromAnyThread();
+ MOZ_ASSERT_IF(!map->allowKeysInOtherZones(),
+ keyZone == zone || keyZone->isAtomsZone());
+
+ Zone* valueZone = value->zoneFromAnyThread();
+ MOZ_ASSERT(valueZone == zone || valueZone->isAtomsZone());
+
+ if (object && object->color() != map->mapColor) {
+ fprintf(stderr, "WeakMap object is marked differently to the map\n");
+ fprintf(stderr, "(map %p is %s, object %p is %s)\n", map,
+ map->mapColor.name(), object, object->color().name());
+ ok = false;
+ }
+
+ // Values belonging to other runtimes or in uncollected zones are treated as
+ // black.
+ JSRuntime* mapRuntime = zone->runtimeFromAnyThread();
+ auto effectiveColor = [=](Cell* cell, Zone* cellZone) -> CellColor {
+ if (cell->runtimeFromAnyThread() != mapRuntime) {
+ return CellColor::Black;
+ }
+ if (cellZone->isGCMarkingOrSweeping()) {
+ return cell->color();
+ }
+ return CellColor::Black;
+ };
+
+ CellColor valueColor = effectiveColor(value, valueZone);
+ CellColor keyColor = effectiveColor(key, keyZone);
+
+ if (valueColor < std::min(map->mapColor, keyColor)) {
+ fprintf(stderr, "WeakMap value is less marked than map and key\n");
+ fprintf(stderr, "(map %p is %s, key %p is %s, value %p is %s)\n", map,
+ map->mapColor.name(), key, keyColor.name(), value,
+ valueColor.name());
+# ifdef DEBUG
+ fprintf(stderr, "Key:\n");
+ key->dump();
+ if (auto delegate = MaybeGetDelegate(key); delegate) {
+ fprintf(stderr, "Delegate:\n");
+ delegate->dump();
+ }
+ fprintf(stderr, "Value:\n");
+ value->dump();
+# endif
+
+ ok = false;
+ }
+
+ JSObject* delegate = MaybeGetDelegate(key);
+ if (!delegate) {
+ return ok;
+ }
+
+ CellColor delegateColor = effectiveColor(delegate, delegate->zone());
+ if (keyColor < std::min(map->mapColor, delegateColor)) {
+ fprintf(stderr, "WeakMap key is less marked than map or delegate\n");
+ fprintf(stderr, "(map %p is %s, delegate %p is %s, key %p is %s)\n", map,
+ map->mapColor.name(), delegate, delegateColor.name(), key,
+ keyColor.name());
+ ok = false;
+ }
+
+ return ok;
+}
+
+#endif // defined(JS_GC_ZEAL) || defined(DEBUG)
+
+#ifdef DEBUG
+// Return whether an arbitrary pointer is within a cell with the given
+// traceKind. Only for assertions.
+bool GCRuntime::isPointerWithinTenuredCell(void* ptr, JS::TraceKind traceKind) {
+ AutoLockGC lock(this);
+ for (auto chunk = allNonEmptyChunks(lock); !chunk.done(); chunk.next()) {
+ MOZ_ASSERT(!chunk->isNurseryChunk());
+ if (ptr >= &chunk->arenas[0] && ptr < &chunk->arenas[ArenasPerChunk]) {
+ auto* arena = reinterpret_cast<Arena*>(uintptr_t(ptr) & ~ArenaMask);
+ if (!arena->allocated()) {
+ return false;
+ }
+
+ return MapAllocToTraceKind(arena->getAllocKind()) == traceKind;
+ }
+ }
+
+ return false;
+}
+#endif // DEBUG
diff --git a/js/src/gc/WeakMap-inl.h b/js/src/gc/WeakMap-inl.h
new file mode 100644
index 0000000000..1b5626211f
--- /dev/null
+++ b/js/src/gc/WeakMap-inl.h
@@ -0,0 +1,413 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef gc_WeakMap_inl_h
+#define gc_WeakMap_inl_h
+
+#include "gc/WeakMap.h"
+
+#include "mozilla/DebugOnly.h"
+#include "mozilla/Maybe.h"
+
+#include <algorithm>
+#include <type_traits>
+
+#include "gc/GCLock.h"
+#include "gc/Marking.h"
+#include "gc/Zone.h"
+#include "js/TraceKind.h"
+#include "vm/JSContext.h"
+
+#include "gc/StableCellHasher-inl.h"
+
+namespace js {
+namespace gc {
+
+namespace detail {
+
+// Return the effective cell color given the current marking state.
+// This must be kept in sync with ShouldMark in Marking.cpp.
+template <typename T>
+static CellColor GetEffectiveColor(GCMarker* marker, const T& item) {
+ Cell* cell = ToMarkable(item);
+ if (!cell->isTenured()) {
+ return CellColor::Black;
+ }
+ const TenuredCell& t = cell->asTenured();
+ if (!t.zoneFromAnyThread()->shouldMarkInZone(marker->markColor())) {
+ return CellColor::Black;
+ }
+ MOZ_ASSERT(t.runtimeFromAnyThread() == marker->runtime());
+ return t.color();
+}
+
+// Only objects have delegates, so default to returning nullptr. Note that some
+// compilation units will only ever use the object version.
+static MOZ_MAYBE_UNUSED JSObject* GetDelegateInternal(gc::Cell* key) {
+ return nullptr;
+}
+
+static MOZ_MAYBE_UNUSED JSObject* GetDelegateInternal(JSObject* key) {
+ JSObject* delegate = UncheckedUnwrapWithoutExpose(key);
+ return (key == delegate) ? nullptr : delegate;
+}
+
+// Use a helper function to do overload resolution to handle cases like
+// Heap<ObjectSubclass*>: find everything that is convertible to JSObject* (and
+// avoid calling barriers).
+template <typename T>
+static inline JSObject* GetDelegate(const T& key) {
+ return GetDelegateInternal(key);
+}
+
+template <>
+inline JSObject* GetDelegate(gc::Cell* const&) = delete;
+
+} /* namespace detail */
+} /* namespace gc */
+
+// Weakmap entry -> value edges are only visible if the map is traced, which
+// only happens if the map zone is being collected. If the map and the value
+// were in different zones, then we could have a case where the map zone is not
+// collecting but the value zone is, and incorrectly free a value that is
+// reachable solely through weakmaps.
+template <class K, class V>
+void WeakMap<K, V>::assertMapIsSameZoneWithValue(const V& v) {
+#ifdef DEBUG
+ gc::Cell* cell = gc::ToMarkable(v);
+ if (cell) {
+ Zone* cellZone = cell->zoneFromAnyThread();
+ MOZ_ASSERT(zone() == cellZone || cellZone->isAtomsZone());
+ }
+#endif
+}
+
+template <class K, class V>
+WeakMap<K, V>::WeakMap(JSContext* cx, JSObject* memOf)
+ : WeakMap(cx->zone(), memOf) {}
+
+template <class K, class V>
+WeakMap<K, V>::WeakMap(JS::Zone* zone, JSObject* memOf)
+ : Base(zone), WeakMapBase(memOf, zone) {
+ using ElemType = typename K::ElementType;
+ using NonPtrType = std::remove_pointer_t<ElemType>;
+
+ // The object's TraceKind needs to be added to CC graph if this object is
+ // used as a WeakMap key, otherwise the key is considered to be pointed from
+ // somewhere unknown, and results in leaking the subgraph which contains the
+ // key. See the comments in NoteWeakMapsTracer::trace for more details.
+ static_assert(JS::IsCCTraceKind(NonPtrType::TraceKind),
+ "Object's TraceKind should be added to CC graph.");
+
+ zone->gcWeakMapList().insertFront(this);
+ if (zone->gcState() > Zone::Prepare) {
+ mapColor = CellColor::Black;
+ }
+}
+
+// If the entry is live, ensure its key and value are marked. Also make sure the
+// key is at least as marked as min(map, delegate), so it cannot get discarded
+// and then recreated by rewrapping the delegate.
+//
+// Optionally adds edges to the ephemeron edges table for any keys (or
+// delegates) where future changes to their mark color would require marking the
+// value (or the key).
+template <class K, class V>
+bool WeakMap<K, V>::markEntry(GCMarker* marker, K& key, V& value,
+ bool populateWeakKeysTable) {
+#ifdef DEBUG
+ MOZ_ASSERT(mapColor);
+ if (marker->isParallelMarking()) {
+ marker->runtime()->gc.assertCurrentThreadHasLockedGC();
+ }
+#endif
+
+ bool marked = false;
+ CellColor markColor = marker->markColor();
+ CellColor keyColor = gc::detail::GetEffectiveColor(marker, key);
+ JSObject* delegate = gc::detail::GetDelegate(key);
+ JSTracer* trc = marker->tracer();
+
+ if (delegate) {
+ CellColor delegateColor = gc::detail::GetEffectiveColor(marker, delegate);
+ // The key needs to stay alive while both the delegate and map are live.
+ CellColor proxyPreserveColor = std::min(delegateColor, mapColor);
+ if (keyColor < proxyPreserveColor) {
+ MOZ_ASSERT(markColor >= proxyPreserveColor);
+ if (markColor == proxyPreserveColor) {
+ TraceWeakMapKeyEdge(trc, zone(), &key,
+ "proxy-preserved WeakMap entry key");
+ MOZ_ASSERT(key->color() >= proxyPreserveColor);
+ marked = true;
+ keyColor = proxyPreserveColor;
+ }
+ }
+ }
+
+ gc::Cell* cellValue = gc::ToMarkable(value);
+ if (keyColor) {
+ if (cellValue) {
+ CellColor targetColor = std::min(mapColor, keyColor);
+ CellColor valueColor = gc::detail::GetEffectiveColor(marker, cellValue);
+ if (valueColor < targetColor) {
+ MOZ_ASSERT(markColor >= targetColor);
+ if (markColor == targetColor) {
+ TraceEdge(trc, &value, "WeakMap entry value");
+ MOZ_ASSERT(cellValue->color() >= targetColor);
+ marked = true;
+ }
+ }
+ }
+ }
+
+ if (populateWeakKeysTable) {
+ // Note that delegateColor >= keyColor because marking a key marks its
+ // delegate, so we only need to check whether keyColor < mapColor to tell
+ // this.
+
+ if (keyColor < mapColor) {
+ MOZ_ASSERT(trc->weakMapAction() == JS::WeakMapTraceAction::Expand);
+ // The final color of the key is not yet known. Record this weakmap and
+ // the lookup key in the list of weak keys. If the key has a delegate,
+ // then the lookup key is the delegate (because marking the key will end
+ // up marking the delegate and thereby mark the entry.)
+ gc::TenuredCell* tenuredValue = nullptr;
+ if (cellValue && cellValue->isTenured()) {
+ tenuredValue = &cellValue->asTenured();
+ }
+
+ if (!this->addImplicitEdges(key, delegate, tenuredValue)) {
+ marker->abortLinearWeakMarking();
+ }
+ }
+ }
+
+ return marked;
+}
+
+template <class K, class V>
+void WeakMap<K, V>::trace(JSTracer* trc) {
+ MOZ_ASSERT(isInList());
+
+ TraceNullableEdge(trc, &memberOf, "WeakMap owner");
+
+ if (trc->isMarkingTracer()) {
+ MOZ_ASSERT(trc->weakMapAction() == JS::WeakMapTraceAction::Expand);
+ auto marker = GCMarker::fromTracer(trc);
+
+ // Lock if we are marking in parallel to synchronize updates to:
+ // - the weak map's color
+ // - the ephemeron edges table
+ mozilla::Maybe<AutoLockGC> lock;
+ if (marker->isParallelMarking()) {
+ lock.emplace(marker->runtime());
+ }
+
+ // Don't downgrade the map color from black to gray. This can happen when a
+ // barrier pushes the map object onto the black mark stack when it's
+ // already present on the gray mark stack, which is marked later.
+ if (mapColor < marker->markColor()) {
+ mapColor = marker->markColor();
+ (void)markEntries(marker);
+ }
+ return;
+ }
+
+ if (trc->weakMapAction() == JS::WeakMapTraceAction::Skip) {
+ return;
+ }
+
+ // Trace keys only if weakMapAction() says to.
+ if (trc->weakMapAction() == JS::WeakMapTraceAction::TraceKeysAndValues) {
+ for (Enum e(*this); !e.empty(); e.popFront()) {
+ TraceWeakMapKeyEdge(trc, zone(), &e.front().mutableKey(),
+ "WeakMap entry key");
+ }
+ }
+
+ // Always trace all values (unless weakMapAction() is Skip).
+ for (Range r = Base::all(); !r.empty(); r.popFront()) {
+ TraceEdge(trc, &r.front().value(), "WeakMap entry value");
+ }
+}
+
+bool WeakMapBase::addImplicitEdges(gc::Cell* key, gc::Cell* delegate,
+ gc::TenuredCell* value) {
+ if (delegate) {
+ auto& edgeTable = delegate->zone()->gcEphemeronEdges(delegate);
+ auto* p = edgeTable.get(delegate);
+
+ gc::EphemeronEdgeVector newVector;
+ gc::EphemeronEdgeVector& edges = p ? p->value : newVector;
+
+ // Add a <weakmap, delegate> -> key edge: the key must be preserved for
+ // future lookups until either the weakmap or the delegate dies.
+ gc::EphemeronEdge keyEdge{mapColor, key};
+ if (!edges.append(keyEdge)) {
+ return false;
+ }
+
+ if (value) {
+ gc::EphemeronEdge valueEdge{mapColor, value};
+ if (!edges.append(valueEdge)) {
+ return false;
+ }
+ }
+
+ if (!p) {
+ return edgeTable.put(delegate, std::move(newVector));
+ }
+
+ return true;
+ }
+
+ // No delegate. Insert just the key -> value edge.
+
+ if (!value) {
+ return true;
+ }
+
+ auto& edgeTable = key->zone()->gcEphemeronEdges(key);
+ auto* p = edgeTable.get(key);
+ gc::EphemeronEdge valueEdge{mapColor, value};
+ if (p) {
+ return p->value.append(valueEdge);
+ } else {
+ gc::EphemeronEdgeVector edges;
+ MOZ_ALWAYS_TRUE(edges.append(valueEdge));
+ return edgeTable.put(key, std::move(edges));
+ }
+}
+
+template <class K, class V>
+bool WeakMap<K, V>::markEntries(GCMarker* marker) {
+ // This method is called whenever the map's mark color changes. Mark values
+ // (and keys with delegates) as required for the new color and populate the
+ // ephemeron edges if we're in incremental marking mode.
+
+#ifdef DEBUG
+ if (marker->isParallelMarking()) {
+ marker->runtime()->gc.assertCurrentThreadHasLockedGC();
+ }
+#endif
+
+ MOZ_ASSERT(mapColor);
+ bool markedAny = false;
+
+ // If we don't populate the weak keys table now then we do it when we enter
+ // weak marking mode.
+ bool populateWeakKeysTable =
+ marker->incrementalWeakMapMarkingEnabled || marker->isWeakMarking();
+
+ for (Enum e(*this); !e.empty(); e.popFront()) {
+ if (markEntry(marker, e.front().mutableKey(), e.front().value(),
+ populateWeakKeysTable)) {
+ markedAny = true;
+ }
+ }
+
+ return markedAny;
+}
+
+template <class K, class V>
+void WeakMap<K, V>::traceWeakEdges(JSTracer* trc) {
+ // Remove all entries whose keys remain unmarked.
+ for (Enum e(*this); !e.empty(); e.popFront()) {
+ if (!TraceWeakEdge(trc, &e.front().mutableKey(), "WeakMap key")) {
+ e.removeFront();
+ }
+ }
+
+#if DEBUG
+ // Once we've swept, all remaining edges should stay within the known-live
+ // part of the graph.
+ assertEntriesNotAboutToBeFinalized();
+#endif
+}
+
+// memberOf can be nullptr, which means that the map is not part of a JSObject.
+template <class K, class V>
+void WeakMap<K, V>::traceMappings(WeakMapTracer* tracer) {
+ for (Range r = Base::all(); !r.empty(); r.popFront()) {
+ gc::Cell* key = gc::ToMarkable(r.front().key());
+ gc::Cell* value = gc::ToMarkable(r.front().value());
+ if (key && value) {
+ tracer->trace(memberOf, JS::GCCellPtr(r.front().key().get()),
+ JS::GCCellPtr(r.front().value().get()));
+ }
+ }
+}
+
+template <class K, class V>
+bool WeakMap<K, V>::findSweepGroupEdges() {
+ // For weakmap keys with delegates in a different zone, add a zone edge to
+ // ensure that the delegate zone finishes marking before the key zone.
+ JS::AutoSuppressGCAnalysis nogc;
+ for (Range r = all(); !r.empty(); r.popFront()) {
+ const K& key = r.front().key();
+
+ // If the key type doesn't have delegates, then this will always return
+ // nullptr and the optimizer can remove the entire body of this function.
+ JSObject* delegate = gc::detail::GetDelegate(key);
+ if (!delegate) {
+ continue;
+ }
+
+ // Marking a WeakMap key's delegate will mark the key, so process the
+ // delegate zone no later than the key zone.
+ Zone* delegateZone = delegate->zone();
+ Zone* keyZone = key->zone();
+ if (delegateZone != keyZone && delegateZone->isGCMarking() &&
+ keyZone->isGCMarking()) {
+ if (!delegateZone->addSweepGroupEdgeTo(keyZone)) {
+ return false;
+ }
+ }
+ }
+ return true;
+}
+
+template <class K, class V>
+size_t WeakMap<K, V>::sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) {
+ return mallocSizeOf(this) + shallowSizeOfExcludingThis(mallocSizeOf);
+}
+
+#if DEBUG
+template <class K, class V>
+void WeakMap<K, V>::assertEntriesNotAboutToBeFinalized() {
+ for (Range r = Base::all(); !r.empty(); r.popFront()) {
+ UnbarrieredKey k = r.front().key();
+ MOZ_ASSERT(!gc::IsAboutToBeFinalizedUnbarriered(k));
+ JSObject* delegate = gc::detail::GetDelegate(k);
+ if (delegate) {
+ MOZ_ASSERT(!gc::IsAboutToBeFinalizedUnbarriered(delegate),
+ "weakmap marking depends on a key tracing its delegate");
+ }
+ MOZ_ASSERT(!gc::IsAboutToBeFinalized(r.front().value()));
+ }
+}
+#endif
+
+#ifdef JS_GC_ZEAL
+template <class K, class V>
+bool WeakMap<K, V>::checkMarking() const {
+ bool ok = true;
+ for (Range r = Base::all(); !r.empty(); r.popFront()) {
+ gc::Cell* key = gc::ToMarkable(r.front().key());
+ gc::Cell* value = gc::ToMarkable(r.front().value());
+ if (key && value) {
+ if (!gc::CheckWeakMapEntryMarking(this, key, value)) {
+ ok = false;
+ }
+ }
+ }
+ return ok;
+}
+#endif
+
+} /* namespace js */
+
+#endif /* gc_WeakMap_inl_h */
diff --git a/js/src/gc/WeakMap.cpp b/js/src/gc/WeakMap.cpp
new file mode 100644
index 0000000000..82684c7c9c
--- /dev/null
+++ b/js/src/gc/WeakMap.cpp
@@ -0,0 +1,175 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "gc/WeakMap-inl.h"
+
+#include <string.h>
+
+#include "gc/PublicIterators.h"
+#include "vm/JSObject.h"
+
+#include "gc/Marking-inl.h"
+
+using namespace js;
+using namespace js::gc;
+
+WeakMapBase::WeakMapBase(JSObject* memOf, Zone* zone)
+ : memberOf(memOf), zone_(zone), mapColor(CellColor::White) {
+ MOZ_ASSERT_IF(memberOf, memberOf->compartment()->zone() == zone);
+}
+
+WeakMapBase::~WeakMapBase() {
+ MOZ_ASSERT(CurrentThreadIsGCFinalizing() ||
+ CurrentThreadCanAccessZone(zone_));
+}
+
+void WeakMapBase::unmarkZone(JS::Zone* zone) {
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ if (!zone->gcEphemeronEdges().clear()) {
+ oomUnsafe.crash("clearing ephemeron edges table");
+ }
+ MOZ_ASSERT(zone->gcNurseryEphemeronEdges().count() == 0);
+
+ for (WeakMapBase* m : zone->gcWeakMapList()) {
+ m->mapColor = CellColor::White;
+ }
+}
+
+void Zone::traceWeakMaps(JSTracer* trc) {
+ MOZ_ASSERT(trc->weakMapAction() != JS::WeakMapTraceAction::Skip);
+ for (WeakMapBase* m : gcWeakMapList()) {
+ m->trace(trc);
+ TraceNullableEdge(trc, &m->memberOf, "memberOf");
+ }
+}
+
+#if defined(JS_GC_ZEAL) || defined(DEBUG)
+bool WeakMapBase::checkMarkingForZone(JS::Zone* zone) {
+ // This is called at the end of marking.
+ MOZ_ASSERT(zone->isGCMarking());
+
+ bool ok = true;
+ for (WeakMapBase* m : zone->gcWeakMapList()) {
+ if (m->mapColor && !m->checkMarking()) {
+ ok = false;
+ }
+ }
+
+ return ok;
+}
+#endif
+
+bool WeakMapBase::markZoneIteratively(JS::Zone* zone, GCMarker* marker) {
+ bool markedAny = false;
+ for (WeakMapBase* m : zone->gcWeakMapList()) {
+ if (m->mapColor && m->markEntries(marker)) {
+ markedAny = true;
+ }
+ }
+ return markedAny;
+}
+
+bool WeakMapBase::findSweepGroupEdgesForZone(JS::Zone* zone) {
+ for (WeakMapBase* m : zone->gcWeakMapList()) {
+ if (!m->findSweepGroupEdges()) {
+ return false;
+ }
+ }
+ return true;
+}
+
+void Zone::sweepWeakMaps(JSTracer* trc) {
+ for (WeakMapBase* m = gcWeakMapList().getFirst(); m;) {
+ WeakMapBase* next = m->getNext();
+ if (m->mapColor) {
+ m->traceWeakEdges(trc);
+ } else {
+ m->clearAndCompact();
+ m->removeFrom(gcWeakMapList());
+ }
+ m = next;
+ }
+
+#ifdef DEBUG
+ for (WeakMapBase* m : gcWeakMapList()) {
+ MOZ_ASSERT(m->isInList() && m->mapColor);
+ }
+#endif
+}
+
+void WeakMapBase::traceAllMappings(WeakMapTracer* tracer) {
+ JSRuntime* rt = tracer->runtime;
+ for (ZonesIter zone(rt, SkipAtoms); !zone.done(); zone.next()) {
+ for (WeakMapBase* m : zone->gcWeakMapList()) {
+ // The WeakMapTracer callback is not allowed to GC.
+ JS::AutoSuppressGCAnalysis nogc;
+ m->traceMappings(tracer);
+ }
+ }
+}
+
+bool WeakMapBase::saveZoneMarkedWeakMaps(JS::Zone* zone,
+ WeakMapColors& markedWeakMaps) {
+ for (WeakMapBase* m : zone->gcWeakMapList()) {
+ if (m->mapColor && !markedWeakMaps.put(m, m->mapColor)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+void WeakMapBase::restoreMarkedWeakMaps(WeakMapColors& markedWeakMaps) {
+ for (WeakMapColors::Range r = markedWeakMaps.all(); !r.empty();
+ r.popFront()) {
+ WeakMapBase* map = r.front().key();
+ MOZ_ASSERT(map->zone()->isGCMarking());
+ MOZ_ASSERT(map->mapColor == CellColor::White);
+ map->mapColor = r.front().value();
+ }
+}
+
+ObjectWeakMap::ObjectWeakMap(JSContext* cx) : map(cx, nullptr) {}
+
+JSObject* ObjectWeakMap::lookup(const JSObject* obj) {
+ if (ObjectValueWeakMap::Ptr p = map.lookup(const_cast<JSObject*>(obj))) {
+ return &p->value().toObject();
+ }
+ return nullptr;
+}
+
+bool ObjectWeakMap::add(JSContext* cx, JSObject* obj, JSObject* target) {
+ MOZ_ASSERT(obj && target);
+
+ Value targetVal(ObjectValue(*target));
+ if (!map.putNew(obj, targetVal)) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ return true;
+}
+
+void ObjectWeakMap::remove(JSObject* key) {
+ MOZ_ASSERT(key);
+ map.remove(key);
+}
+
+void ObjectWeakMap::clear() { map.clear(); }
+
+void ObjectWeakMap::trace(JSTracer* trc) { map.trace(trc); }
+
+size_t ObjectWeakMap::sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) {
+ return map.shallowSizeOfExcludingThis(mallocSizeOf);
+}
+
+#ifdef JSGC_HASH_TABLE_CHECKS
+void ObjectWeakMap::checkAfterMovingGC() {
+ for (ObjectValueWeakMap::Range r = map.all(); !r.empty(); r.popFront()) {
+ CheckGCThingAfterMovingGC(r.front().key().get());
+ CheckGCThingAfterMovingGC(&r.front().value().toObject());
+ }
+}
+#endif // JSGC_HASH_TABLE_CHECKS
diff --git a/js/src/gc/WeakMap.h b/js/src/gc/WeakMap.h
new file mode 100644
index 0000000000..4693bd838c
--- /dev/null
+++ b/js/src/gc/WeakMap.h
@@ -0,0 +1,354 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef gc_WeakMap_h
+#define gc_WeakMap_h
+
+#include "mozilla/LinkedList.h"
+
+#include "gc/Barrier.h"
+#include "gc/Tracer.h"
+#include "gc/ZoneAllocator.h"
+#include "js/HashTable.h"
+#include "js/HeapAPI.h"
+
+namespace JS {
+class Zone;
+}
+
+namespace js {
+
+class GCMarker;
+class WeakMapBase;
+struct WeakMapTracer;
+
+extern void DumpWeakMapLog(JSRuntime* rt);
+
+namespace gc {
+
+struct WeakMarkable;
+
+#if defined(JS_GC_ZEAL) || defined(DEBUG)
+// Check whether a weak map entry is marked correctly.
+bool CheckWeakMapEntryMarking(const WeakMapBase* map, Cell* key, Cell* value);
+#endif
+
+} // namespace gc
+
+// A subclass template of js::HashMap whose keys and values may be
+// garbage-collected. When a key is collected, the table entry disappears,
+// dropping its reference to the value.
+//
+// More precisely:
+//
+// A WeakMap entry is live if and only if both the WeakMap and the entry's
+// key are live. An entry holds a strong reference to its value.
+//
+// You must call this table's 'trace' method when its owning object is reached
+// by the garbage collection tracer. Once a table is known to be live, the
+// implementation takes care of the special weak marking (ie, marking through
+// the implicit edges stored in the map) and of removing (sweeping) table
+// entries when collection is complete.
+
+// WeakMaps are marked with an incremental linear-time algorithm that handles
+// all orderings of map and key marking. The basic algorithm is:
+//
+// At first while marking, do nothing special when marking WeakMap keys (there
+// is no straightforward way to know whether a particular object is being used
+// as a key in some weakmap.) When a WeakMap is marked, scan through it to mark
+// all entries with live keys, and collect all unmarked keys into a "weak keys"
+// table.
+//
+// At some point, everything reachable has been marked. At this point, enter
+// "weak marking mode". In this mode, whenever any object is marked, look it up
+// in the weak keys table to see if it is the key for any WeakMap entry and if
+// so, mark the value. When entering weak marking mode, scan the weak key table
+// to find all keys that have been marked since we added them to the table, and
+// mark those entries.
+//
+// In addition, we want weakmap marking to work incrementally. So WeakMap
+// mutations are barriered to keep the weak keys table up to date: entries are
+// removed if their key is removed from the table, etc.
+//
+// You can break down various ways that WeakMap values get marked based on the
+// order that the map and key are marked. All of these assume the map and key
+// get marked at some point:
+//
+// key marked, then map marked:
+// - value was marked with map in `markEntries()`
+// map marked, key already in map, key marked before weak marking mode:
+// - key added to gcEphemeronEdges when map marked in `markEntries()`
+// - value marked during `enterWeakMarkingMode`
+// map marked, key already in map, key marked after weak marking mode:
+// - when key is marked, gcEphemeronEdges[key] triggers marking of value in
+// `markImplicitEdges()`
+// map marked, key inserted into map, key marked:
+// - value was live when inserted and must get marked at some point
+//
+
+using WeakMapColors = HashMap<WeakMapBase*, js::gc::CellColor,
+ DefaultHasher<WeakMapBase*>, SystemAllocPolicy>;
+
+// Common base class for all WeakMap specializations, used for calling
+// subclasses' GC-related methods.
+class WeakMapBase : public mozilla::LinkedListElement<WeakMapBase> {
+ friend class js::GCMarker;
+
+ public:
+ using CellColor = js::gc::CellColor;
+
+ WeakMapBase(JSObject* memOf, JS::Zone* zone);
+ virtual ~WeakMapBase();
+
+ JS::Zone* zone() const { return zone_; }
+
+ // Garbage collector entry points.
+
+ // Unmark all weak maps in a zone.
+ static void unmarkZone(JS::Zone* zone);
+
+ // Check all weak maps in a zone that have been marked as live in this garbage
+ // collection, and mark the values of all entries that have become strong
+ // references to them. Return true if we marked any new values, indicating
+ // that we need to make another pass. In other words, mark my marked maps'
+ // marked members' mid-collection.
+ static bool markZoneIteratively(JS::Zone* zone, GCMarker* marker);
+
+ // Add zone edges for weakmaps with key delegates in a different zone.
+ [[nodiscard]] static bool findSweepGroupEdgesForZone(JS::Zone* zone);
+
+ // Sweep the marked weak maps in a zone, updating moved keys.
+ static void sweepZoneAfterMinorGC(JS::Zone* zone);
+
+ // Trace all weak map bindings. Used by the cycle collector.
+ static void traceAllMappings(WeakMapTracer* tracer);
+
+ // Save information about which weak maps are marked for a zone.
+ static bool saveZoneMarkedWeakMaps(JS::Zone* zone,
+ WeakMapColors& markedWeakMaps);
+
+ // Restore information about which weak maps are marked for many zones.
+ static void restoreMarkedWeakMaps(WeakMapColors& markedWeakMaps);
+
+#if defined(JS_GC_ZEAL) || defined(DEBUG)
+ static bool checkMarkingForZone(JS::Zone* zone);
+#endif
+
+ protected:
+ // Instance member functions called by the above. Instantiations of WeakMap
+ // override these with definitions appropriate for their Key and Value types.
+ virtual void trace(JSTracer* tracer) = 0;
+ virtual bool findSweepGroupEdges() = 0;
+ virtual void traceWeakEdges(JSTracer* trc) = 0;
+ virtual void traceMappings(WeakMapTracer* tracer) = 0;
+ virtual void clearAndCompact() = 0;
+
+ // We have a key that, if it or its delegate is marked, may lead to a WeakMap
+ // value getting marked. Insert it or its delegate (if any) into the
+ // appropriate zone's gcEphemeronEdges or gcNurseryEphemeronEdges.
+ inline bool addImplicitEdges(gc::Cell* key, gc::Cell* delegate,
+ gc::TenuredCell* value);
+
+ virtual bool markEntries(GCMarker* marker) = 0;
+
+#ifdef JS_GC_ZEAL
+ virtual bool checkMarking() const = 0;
+ virtual bool allowKeysInOtherZones() const { return false; }
+ friend bool gc::CheckWeakMapEntryMarking(const WeakMapBase*, gc::Cell*,
+ gc::Cell*);
+#endif
+
+ // Object that this weak map is part of, if any.
+ HeapPtr<JSObject*> memberOf;
+
+ // Zone containing this weak map.
+ JS::Zone* zone_;
+
+ // Whether this object has been marked during garbage collection and which
+ // color it was marked.
+ gc::CellColor mapColor;
+
+ friend class JS::Zone;
+};
+
+template <class Key, class Value>
+class WeakMap
+ : private HashMap<Key, Value, StableCellHasher<Key>, ZoneAllocPolicy>,
+ public WeakMapBase {
+ public:
+ using Base = HashMap<Key, Value, StableCellHasher<Key>, ZoneAllocPolicy>;
+
+ using Lookup = typename Base::Lookup;
+ using Entry = typename Base::Entry;
+ using Range = typename Base::Range;
+ using Ptr = typename Base::Ptr;
+ using AddPtr = typename Base::AddPtr;
+
+ struct Enum : public Base::Enum {
+ explicit Enum(WeakMap& map) : Base::Enum(static_cast<Base&>(map)) {}
+ };
+
+ using Base::all;
+ using Base::clear;
+ using Base::count;
+ using Base::empty;
+ using Base::has;
+ using Base::shallowSizeOfExcludingThis;
+
+ // Resolve ambiguity with LinkedListElement<>::remove.
+ using Base::remove;
+
+ using UnbarrieredKey = typename RemoveBarrier<Key>::Type;
+
+ explicit WeakMap(JSContext* cx, JSObject* memOf = nullptr);
+ explicit WeakMap(JS::Zone* zone, JSObject* memOf = nullptr);
+
+ // Add a read barrier to prevent an incorrectly gray value from escaping the
+ // weak map. See the UnmarkGrayTracer::onChild comment in gc/Marking.cpp.
+ Ptr lookup(const Lookup& l) const {
+ Ptr p = Base::lookup(l);
+ if (p) {
+ exposeGCThingToActiveJS(p->value());
+ }
+ return p;
+ }
+
+ Ptr lookupUnbarriered(const Lookup& l) const { return Base::lookup(l); }
+
+ AddPtr lookupForAdd(const Lookup& l) {
+ AddPtr p = Base::lookupForAdd(l);
+ if (p) {
+ exposeGCThingToActiveJS(p->value());
+ }
+ return p;
+ }
+
+ template <typename KeyInput, typename ValueInput>
+ [[nodiscard]] bool add(AddPtr& p, KeyInput&& k, ValueInput&& v) {
+ MOZ_ASSERT(k);
+ return Base::add(p, std::forward<KeyInput>(k), std::forward<ValueInput>(v));
+ }
+
+ template <typename KeyInput, typename ValueInput>
+ [[nodiscard]] bool relookupOrAdd(AddPtr& p, KeyInput&& k, ValueInput&& v) {
+ MOZ_ASSERT(k);
+ return Base::relookupOrAdd(p, std::forward<KeyInput>(k),
+ std::forward<ValueInput>(v));
+ }
+
+ template <typename KeyInput, typename ValueInput>
+ [[nodiscard]] bool put(KeyInput&& k, ValueInput&& v) {
+ MOZ_ASSERT(k);
+ return Base::put(std::forward<KeyInput>(k), std::forward<ValueInput>(v));
+ }
+
+ template <typename KeyInput, typename ValueInput>
+ [[nodiscard]] bool putNew(KeyInput&& k, ValueInput&& v) {
+ MOZ_ASSERT(k);
+ return Base::putNew(std::forward<KeyInput>(k), std::forward<ValueInput>(v));
+ }
+
+ template <typename KeyInput, typename ValueInput>
+ void putNewInfallible(KeyInput&& k, ValueInput&& v) {
+ MOZ_ASSERT(k);
+ Base::putNewInfallible(std::forward(k), std::forward<KeyInput>(k));
+ }
+
+#ifdef DEBUG
+ template <typename KeyInput, typename ValueInput>
+ bool hasEntry(KeyInput&& key, ValueInput&& value) {
+ Ptr p = Base::lookup(std::forward<KeyInput>(key));
+ return p && p->value() == value;
+ }
+#endif
+
+ bool markEntry(GCMarker* marker, Key& key, Value& value,
+ bool populateWeakKeysTable);
+
+ void trace(JSTracer* trc) override;
+
+ size_t sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf);
+
+ protected:
+ inline void assertMapIsSameZoneWithValue(const Value& v);
+
+ bool markEntries(GCMarker* marker) override;
+
+ protected:
+ // Find sweep group edges for delegates, if the key type has delegates. (If
+ // not, the optimizer should make this a nop.)
+ bool findSweepGroupEdges() override;
+
+ /**
+ * If a wrapper is used as a key in a weakmap, the garbage collector should
+ * keep that object around longer than it otherwise would. We want to avoid
+ * collecting the wrapper (and removing the weakmap entry) as long as the
+ * wrapped object is alive (because the object can be rewrapped and looked up
+ * again). As long as the wrapper is used as a weakmap key, it will not be
+ * collected (and remain in the weakmap) until the wrapped object is
+ * collected.
+ */
+ private:
+ void exposeGCThingToActiveJS(const JS::Value& v) const {
+ JS::ExposeValueToActiveJS(v);
+ }
+ void exposeGCThingToActiveJS(JSObject* obj) const {
+ JS::ExposeObjectToActiveJS(obj);
+ }
+
+ void traceWeakEdges(JSTracer* trc) override;
+
+ void clearAndCompact() override {
+ Base::clear();
+ Base::compact();
+ }
+
+ // memberOf can be nullptr, which means that the map is not part of a
+ // JSObject.
+ void traceMappings(WeakMapTracer* tracer) override;
+
+ protected:
+#if DEBUG
+ void assertEntriesNotAboutToBeFinalized();
+#endif
+
+#ifdef JS_GC_ZEAL
+ bool checkMarking() const override;
+#endif
+};
+
+using ObjectValueWeakMap = WeakMap<HeapPtr<JSObject*>, HeapPtr<Value>>;
+
+// Generic weak map for mapping objects to other objects.
+class ObjectWeakMap {
+ ObjectValueWeakMap map;
+
+ public:
+ explicit ObjectWeakMap(JSContext* cx);
+
+ JS::Zone* zone() const { return map.zone(); }
+
+ JSObject* lookup(const JSObject* obj);
+ bool add(JSContext* cx, JSObject* obj, JSObject* target);
+ void remove(JSObject* key);
+ void clear();
+
+ void trace(JSTracer* trc);
+ size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf);
+ size_t sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) {
+ return mallocSizeOf(this) + sizeOfExcludingThis(mallocSizeOf);
+ }
+
+ ObjectValueWeakMap& valueMap() { return map; }
+
+#ifdef JSGC_HASH_TABLE_CHECKS
+ void checkAfterMovingGC();
+#endif
+};
+
+} /* namespace js */
+
+#endif /* gc_WeakMap_h */
diff --git a/js/src/gc/WeakMapPtr.cpp b/js/src/gc/WeakMapPtr.cpp
new file mode 100644
index 0000000000..f58fc52372
--- /dev/null
+++ b/js/src/gc/WeakMapPtr.cpp
@@ -0,0 +1,114 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "js/WeakMapPtr.h"
+
+#include "gc/WeakMap-inl.h"
+
+//
+// Machinery for the externally-linkable JS::WeakMapPtr, which wraps js::WeakMap
+// for a few public data types.
+//
+
+using namespace js;
+
+namespace WeakMapDetails {
+
+template <typename T>
+struct DataType {};
+
+template <>
+struct DataType<JSObject*> {
+ using BarrieredType = HeapPtr<JSObject*>;
+ using HasherType = StableCellHasher<BarrieredType>;
+ static JSObject* NullValue() { return nullptr; }
+};
+
+template <>
+struct DataType<JS::Value> {
+ using BarrieredType = HeapPtr<Value>;
+ static JS::Value NullValue() { return JS::UndefinedValue(); }
+};
+
+template <typename K, typename V>
+struct Utils {
+ using KeyType = typename DataType<K>::BarrieredType;
+ using ValueType = typename DataType<V>::BarrieredType;
+ typedef WeakMap<KeyType, ValueType> Type;
+ using PtrType = Type*;
+ static PtrType cast(void* ptr) { return static_cast<PtrType>(ptr); }
+};
+
+} // namespace WeakMapDetails
+
+template <typename K, typename V>
+void JS::WeakMapPtr<K, V>::destroy() {
+ MOZ_ASSERT(initialized());
+ js_delete(WeakMapDetails::Utils<K, V>::cast(ptr));
+ ptr = nullptr;
+}
+
+template <typename K, typename V>
+bool JS::WeakMapPtr<K, V>::init(JSContext* cx) {
+ MOZ_ASSERT(!initialized());
+ typename WeakMapDetails::Utils<K, V>::PtrType map =
+ cx->new_<typename WeakMapDetails::Utils<K, V>::Type>(cx);
+ if (!map) {
+ return false;
+ }
+ ptr = map;
+ return true;
+}
+
+template <typename K, typename V>
+void JS::WeakMapPtr<K, V>::trace(JSTracer* trc) {
+ MOZ_ASSERT(initialized());
+ return WeakMapDetails::Utils<K, V>::cast(ptr)->trace(trc);
+}
+
+template <typename K, typename V>
+V JS::WeakMapPtr<K, V>::lookup(const K& key) {
+ MOZ_ASSERT(initialized());
+ typename WeakMapDetails::Utils<K, V>::Type::Ptr result =
+ WeakMapDetails::Utils<K, V>::cast(ptr)->lookup(key);
+ if (!result) {
+ return WeakMapDetails::DataType<V>::NullValue();
+ }
+ return result->value();
+}
+
+template <typename K, typename V>
+bool JS::WeakMapPtr<K, V>::put(JSContext* cx, const K& key, const V& value) {
+ MOZ_ASSERT(initialized());
+ return WeakMapDetails::Utils<K, V>::cast(ptr)->put(key, value);
+}
+
+template <typename K, typename V>
+V JS::WeakMapPtr<K, V>::removeValue(const K& key) {
+ typedef typename WeakMapDetails::Utils<K, V>::Type Map;
+ using Ptr = typename Map::Ptr;
+
+ MOZ_ASSERT(initialized());
+
+ Map* map = WeakMapDetails::Utils<K, V>::cast(ptr);
+ if (Ptr result = map->lookup(key)) {
+ V value = result->value();
+ map->remove(result);
+ return value;
+ }
+ return WeakMapDetails::DataType<V>::NullValue();
+}
+
+//
+// Supported specializations of JS::WeakMap:
+//
+
+template class JS_PUBLIC_API JS::WeakMapPtr<JSObject*, JSObject*>;
+
+#ifdef DEBUG
+// Nobody's using this at the moment, but we want to make sure it compiles.
+template class JS_PUBLIC_API JS::WeakMapPtr<JSObject*, JS::Value>;
+#endif
diff --git a/js/src/gc/Zone.cpp b/js/src/gc/Zone.cpp
new file mode 100644
index 0000000000..982b12f17f
--- /dev/null
+++ b/js/src/gc/Zone.cpp
@@ -0,0 +1,979 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "gc/Zone.h"
+#include "js/shadow/Zone.h" // JS::shadow::Zone
+
+#include "mozilla/Sprintf.h"
+#include "mozilla/TimeStamp.h"
+
+#include <type_traits>
+
+#include "gc/FinalizationObservers.h"
+#include "gc/GCContext.h"
+#include "gc/PublicIterators.h"
+#include "jit/BaselineIC.h"
+#include "jit/BaselineJIT.h"
+#include "jit/Invalidation.h"
+#include "jit/JitZone.h"
+#include "vm/Runtime.h"
+#include "vm/Time.h"
+
+#include "debugger/DebugAPI-inl.h"
+#include "gc/GC-inl.h"
+#include "gc/Marking-inl.h"
+#include "gc/Nursery-inl.h"
+#include "gc/WeakMap-inl.h"
+#include "vm/JSScript-inl.h"
+#include "vm/Realm-inl.h"
+
+using namespace js;
+using namespace js::gc;
+
+Zone* const Zone::NotOnList = reinterpret_cast<Zone*>(1);
+
+ZoneAllocator::ZoneAllocator(JSRuntime* rt, Kind kind)
+ : JS::shadow::Zone(rt, rt->gc.marker().tracer(), kind),
+ jitHeapThreshold(jit::MaxCodeBytesPerProcess * 0.8) {}
+
+ZoneAllocator::~ZoneAllocator() {
+#ifdef DEBUG
+ mallocTracker.checkEmptyOnDestroy();
+ MOZ_ASSERT(gcHeapSize.bytes() == 0);
+ MOZ_ASSERT(mallocHeapSize.bytes() == 0);
+ MOZ_ASSERT(jitHeapSize.bytes() == 0);
+#endif
+}
+
+void ZoneAllocator::fixupAfterMovingGC() {
+#ifdef DEBUG
+ mallocTracker.fixupAfterMovingGC();
+#endif
+}
+
+void js::ZoneAllocator::updateSchedulingStateOnGCStart() {
+ gcHeapSize.updateOnGCStart();
+ mallocHeapSize.updateOnGCStart();
+ jitHeapSize.updateOnGCStart();
+ perZoneGCTime = mozilla::TimeDuration();
+}
+
+void js::ZoneAllocator::updateGCStartThresholds(GCRuntime& gc) {
+ bool isAtomsZone = JS::Zone::from(this)->isAtomsZone();
+ gcHeapThreshold.updateStartThreshold(
+ gcHeapSize.retainedBytes(), smoothedAllocationRate.ref(),
+ smoothedCollectionRate.ref(), gc.tunables, gc.schedulingState,
+ isAtomsZone);
+
+ mallocHeapThreshold.updateStartThreshold(mallocHeapSize.retainedBytes(),
+ gc.tunables, gc.schedulingState);
+}
+
+void js::ZoneAllocator::setGCSliceThresholds(GCRuntime& gc,
+ bool waitingOnBGTask) {
+ gcHeapThreshold.setSliceThreshold(this, gcHeapSize, gc.tunables,
+ waitingOnBGTask);
+ mallocHeapThreshold.setSliceThreshold(this, mallocHeapSize, gc.tunables,
+ waitingOnBGTask);
+ jitHeapThreshold.setSliceThreshold(this, jitHeapSize, gc.tunables,
+ waitingOnBGTask);
+}
+
+void js::ZoneAllocator::clearGCSliceThresholds() {
+ gcHeapThreshold.clearSliceThreshold();
+ mallocHeapThreshold.clearSliceThreshold();
+ jitHeapThreshold.clearSliceThreshold();
+}
+
+bool ZoneAllocator::addSharedMemory(void* mem, size_t nbytes, MemoryUse use) {
+ // nbytes can be zero here for SharedArrayBuffers.
+
+ MOZ_ASSERT(CurrentThreadCanAccessRuntime(runtime_));
+
+ auto ptr = sharedMemoryUseCounts.lookupForAdd(mem);
+ MOZ_ASSERT_IF(ptr, ptr->value().use == use);
+
+ if (!ptr && !sharedMemoryUseCounts.add(ptr, mem, gc::SharedMemoryUse(use))) {
+ return false;
+ }
+
+ ptr->value().count++;
+
+ // Allocations can grow, so add any increase over the previous size and record
+ // the new size.
+ if (nbytes > ptr->value().nbytes) {
+ mallocHeapSize.addBytes(nbytes - ptr->value().nbytes);
+ ptr->value().nbytes = nbytes;
+ }
+
+ maybeTriggerGCOnMalloc();
+
+ return true;
+}
+
+void ZoneAllocator::removeSharedMemory(void* mem, size_t nbytes,
+ MemoryUse use) {
+ // nbytes can be zero here for SharedArrayBuffers.
+
+ MOZ_ASSERT(CurrentThreadCanAccessRuntime(runtime_));
+ MOZ_ASSERT(CurrentThreadIsGCFinalizing());
+
+ auto ptr = sharedMemoryUseCounts.lookup(mem);
+
+ MOZ_ASSERT(ptr);
+ MOZ_ASSERT(ptr->value().count != 0);
+ MOZ_ASSERT(ptr->value().use == use);
+ MOZ_ASSERT(ptr->value().nbytes >= nbytes);
+
+ ptr->value().count--;
+ if (ptr->value().count == 0) {
+ mallocHeapSize.removeBytes(ptr->value().nbytes, true);
+ sharedMemoryUseCounts.remove(ptr);
+ }
+}
+
+template <TrackingKind kind>
+void js::TrackedAllocPolicy<kind>::decMemory(size_t nbytes) {
+ bool updateRetainedSize = false;
+ if constexpr (kind == TrackingKind::Cell) {
+ // Only subtract freed cell memory from retained size for cell associations
+ // during sweeping.
+ JS::GCContext* gcx = TlsGCContext.get();
+ updateRetainedSize = gcx->isFinalizing();
+ }
+
+ zone_->decNonGCMemory(this, nbytes, MemoryUse::TrackedAllocPolicy,
+ updateRetainedSize);
+}
+
+namespace js {
+template class TrackedAllocPolicy<TrackingKind::Zone>;
+template class TrackedAllocPolicy<TrackingKind::Cell>;
+} // namespace js
+
+JS::Zone::Zone(JSRuntime* rt, Kind kind)
+ : ZoneAllocator(rt, kind),
+ arenas(this),
+ data(nullptr),
+ tenuredBigInts(0),
+ markedStrings(0),
+ finalizedStrings(0),
+ suppressAllocationMetadataBuilder(false),
+ allocNurseryObjects_(true),
+ allocNurseryStrings_(true),
+ allocNurseryBigInts_(true),
+ pretenuring(this),
+ compartments_(),
+ crossZoneStringWrappers_(this),
+ gcEphemeronEdges_(SystemAllocPolicy(), rt->randomHashCodeScrambler()),
+ gcNurseryEphemeronEdges_(SystemAllocPolicy(),
+ rt->randomHashCodeScrambler()),
+ shapeZone_(this),
+ gcScheduled_(false),
+ gcScheduledSaved_(false),
+ gcPreserveCode_(false),
+ keepPropMapTables_(false),
+ wasCollected_(false),
+ listNext_(NotOnList),
+ keptObjects(this) {
+ /* Ensure that there are no vtables to mess us up here. */
+ MOZ_ASSERT(reinterpret_cast<JS::shadow::Zone*>(this) ==
+ static_cast<JS::shadow::Zone*>(this));
+ MOZ_ASSERT_IF(isAtomsZone(), rt->gc.zones().empty());
+
+ updateGCStartThresholds(rt->gc);
+ rt->gc.nursery().setAllocFlagsForZone(this);
+}
+
+Zone::~Zone() {
+ MOZ_ASSERT_IF(regExps_.ref(), regExps().empty());
+
+ DebugAPI::deleteDebugScriptMap(debugScriptMap);
+ js_delete(finalizationObservers_.ref().release());
+
+ MOZ_ASSERT(gcWeakMapList().isEmpty());
+
+ JSRuntime* rt = runtimeFromAnyThread();
+ if (this == rt->gc.systemZone) {
+ MOZ_ASSERT(isSystemZone());
+ rt->gc.systemZone = nullptr;
+ }
+
+ js_delete(jitZone_.ref());
+}
+
+bool Zone::init() {
+ regExps_.ref() = make_unique<RegExpZone>(this);
+ return regExps_.ref() && gcEphemeronEdges().init() &&
+ gcNurseryEphemeronEdges().init();
+}
+
+void Zone::setNeedsIncrementalBarrier(bool needs) {
+ needsIncrementalBarrier_ = needs;
+}
+
+void Zone::changeGCState(GCState prev, GCState next) {
+ MOZ_ASSERT(RuntimeHeapIsBusy());
+ MOZ_ASSERT(gcState() == prev);
+
+ // This can be called when barriers have been temporarily disabled by
+ // AutoDisableBarriers. In that case, don't update needsIncrementalBarrier_
+ // and barriers will be re-enabled by ~AutoDisableBarriers() if necessary.
+ bool barriersDisabled = isGCMarking() && !needsIncrementalBarrier();
+
+ gcState_ = next;
+
+ // Update the barriers state when we transition between marking and
+ // non-marking states, unless barriers have been disabled.
+ if (!barriersDisabled) {
+ needsIncrementalBarrier_ = isGCMarking();
+ }
+}
+
+template <class Pred>
+static void EraseIf(js::gc::EphemeronEdgeVector& entries, Pred pred) {
+ auto* begin = entries.begin();
+ auto* const end = entries.end();
+
+ auto* newEnd = begin;
+ for (auto* p = begin; p != end; p++) {
+ if (!pred(*p)) {
+ *newEnd++ = *p;
+ }
+ }
+
+ size_t removed = end - newEnd;
+ entries.shrinkBy(removed);
+}
+
+static void SweepEphemeronEdgesWhileMinorSweeping(
+ js::gc::EphemeronEdgeVector& entries) {
+ EraseIf(entries, [](js::gc::EphemeronEdge& edge) -> bool {
+ return IsAboutToBeFinalizedDuringMinorSweep(&edge.target);
+ });
+}
+
+void Zone::sweepAfterMinorGC(JSTracer* trc) {
+ sweepEphemeronTablesAfterMinorGC();
+ crossZoneStringWrappers().sweepAfterMinorGC(trc);
+
+ for (CompartmentsInZoneIter comp(this); !comp.done(); comp.next()) {
+ comp->sweepAfterMinorGC(trc);
+ }
+}
+
+void Zone::sweepEphemeronTablesAfterMinorGC() {
+ for (auto r = gcNurseryEphemeronEdges().mutableAll(); !r.empty();
+ r.popFront()) {
+ // Sweep gcNurseryEphemeronEdges to move live (forwarded) keys to
+ // gcEphemeronEdges, scanning through all the entries for such keys to
+ // update them.
+ //
+ // Forwarded and dead keys may also appear in their delegates' entries,
+ // so sweep those too (see below.)
+
+ // The tricky case is when the key has a delegate that was already
+ // tenured. Then it will be in its compartment's gcEphemeronEdges, but we
+ // still need to update the key (which will be in the entries
+ // associated with it.)
+ gc::Cell* key = r.front().key;
+ MOZ_ASSERT(!key->isTenured());
+ if (!Nursery::getForwardedPointer(&key)) {
+ // Dead nursery cell => discard.
+ continue;
+ }
+
+ // Key been moved. The value is an array of <color,cell> pairs; update all
+ // cells in that array.
+ EphemeronEdgeVector& entries = r.front().value;
+ SweepEphemeronEdgesWhileMinorSweeping(entries);
+
+ // Live (moved) nursery cell. Append entries to gcEphemeronEdges.
+ EphemeronEdgeTable& tenuredEdges = gcEphemeronEdges();
+ auto* entry = tenuredEdges.get(key);
+ if (!entry) {
+ if (!tenuredEdges.put(key, gc::EphemeronEdgeVector())) {
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ oomUnsafe.crash("Failed to tenure weak keys entry");
+ }
+ entry = tenuredEdges.get(key);
+ }
+
+ if (!entry->value.appendAll(entries)) {
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ oomUnsafe.crash("Failed to tenure weak keys entry");
+ }
+
+ // If the key has a delegate, then it will map to a WeakKeyEntryVector
+ // containing the key that needs to be updated.
+
+ JSObject* delegate = gc::detail::GetDelegate(key->as<JSObject>());
+ if (!delegate) {
+ continue;
+ }
+ MOZ_ASSERT(delegate->isTenured());
+
+ // If delegate was formerly nursery-allocated, we will sweep its entries
+ // when we visit its gcNurseryEphemeronEdges (if we haven't already). Note
+ // that we don't know the nursery address of the delegate, since the
+ // location it was stored in has already been updated.
+ //
+ // Otherwise, it will be in gcEphemeronEdges and we sweep it here.
+ auto* p = delegate->zone()->gcEphemeronEdges().get(delegate);
+ if (p) {
+ SweepEphemeronEdgesWhileMinorSweeping(p->value);
+ }
+ }
+
+ if (!gcNurseryEphemeronEdges().clear()) {
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ oomUnsafe.crash("OOM while clearing gcNurseryEphemeronEdges.");
+ }
+}
+
+void Zone::traceWeakCCWEdges(JSTracer* trc) {
+ crossZoneStringWrappers().traceWeak(trc);
+ for (CompartmentsInZoneIter comp(this); !comp.done(); comp.next()) {
+ comp->traceCrossCompartmentObjectWrapperEdges(trc);
+ }
+}
+
+/* static */
+void Zone::fixupAllCrossCompartmentWrappersAfterMovingGC(JSTracer* trc) {
+ MOZ_ASSERT(trc->runtime()->gc.isHeapCompacting());
+
+ for (ZonesIter zone(trc->runtime(), WithAtoms); !zone.done(); zone.next()) {
+ // Trace the wrapper map to update keys (wrapped values) in other
+ // compartments that may have been moved.
+ zone->crossZoneStringWrappers().traceWeak(trc);
+
+ for (CompartmentsInZoneIter comp(zone); !comp.done(); comp.next()) {
+ comp->fixupCrossCompartmentObjectWrappersAfterMovingGC(trc);
+ }
+ }
+}
+
+void Zone::dropStringWrappersOnGC() {
+ MOZ_ASSERT(JS::RuntimeHeapIsCollecting());
+ crossZoneStringWrappers().clear();
+}
+
+#ifdef JSGC_HASH_TABLE_CHECKS
+
+void Zone::checkAllCrossCompartmentWrappersAfterMovingGC() {
+ checkStringWrappersAfterMovingGC();
+ for (CompartmentsInZoneIter comp(this); !comp.done(); comp.next()) {
+ comp->checkObjectWrappersAfterMovingGC();
+ }
+}
+
+void Zone::checkStringWrappersAfterMovingGC() {
+ for (StringWrapperMap::Enum e(crossZoneStringWrappers()); !e.empty();
+ e.popFront()) {
+ // Assert that the postbarriers have worked and that nothing is left in the
+ // wrapper map that points into the nursery, and that the hash table entries
+ // are discoverable.
+ auto key = e.front().key();
+ CheckGCThingAfterMovingGC(key.get());
+
+ auto ptr = crossZoneStringWrappers().lookup(key);
+ MOZ_RELEASE_ASSERT(ptr.found() && &*ptr == &e.front());
+ }
+}
+#endif
+
+void Zone::discardJitCode(JS::GCContext* gcx, const DiscardOptions& options) {
+ if (!isPreservingCode()) {
+ forceDiscardJitCode(gcx, options);
+ }
+}
+
+void Zone::forceDiscardJitCode(JS::GCContext* gcx,
+ const DiscardOptions& options) {
+ if (!jitZone()) {
+ return;
+ }
+
+ if (options.discardJitScripts && options.discardBaselineCode) {
+ lastDiscardedCodeTime_ = mozilla::TimeStamp::Now();
+ }
+
+ if (options.discardBaselineCode || options.discardJitScripts) {
+#ifdef DEBUG
+ // Assert no JitScripts are marked as active.
+ for (auto iter = cellIter<BaseScript>(); !iter.done(); iter.next()) {
+ BaseScript* base = iter.unbarrieredGet();
+ if (jit::JitScript* jitScript = base->maybeJitScript()) {
+ MOZ_ASSERT(!jitScript->active());
+ }
+ }
+#endif
+
+ // Mark JitScripts on the stack as active.
+ jit::MarkActiveJitScripts(this);
+ }
+
+ // Invalidate all Ion code in this zone.
+ jit::InvalidateAll(gcx, this);
+
+ for (auto base = cellIterUnsafe<BaseScript>(); !base.done(); base.next()) {
+ jit::JitScript* jitScript = base->maybeJitScript();
+ if (!jitScript) {
+ continue;
+ }
+
+ JSScript* script = base->asJSScript();
+ jit::FinishInvalidation(gcx, script);
+
+ // Discard baseline script if it's not marked as active.
+ if (options.discardBaselineCode) {
+ if (jitScript->hasBaselineScript() && !jitScript->active()) {
+ jit::FinishDiscardBaselineScript(gcx, script);
+ }
+ }
+
+#ifdef JS_CACHEIR_SPEW
+ maybeUpdateWarmUpCount(script);
+#endif
+
+ // Warm-up counter for scripts are reset on GC. After discarding code we
+ // need to let it warm back up to get information such as which
+ // opcodes are setting array holes or accessing getter properties.
+ script->resetWarmUpCounterForGC();
+
+ // Try to release the script's JitScript. This should happen after
+ // releasing JIT code because we can't do this when the script still has
+ // JIT code.
+ if (options.discardJitScripts) {
+ script->maybeReleaseJitScript(gcx);
+ jitScript = script->maybeJitScript();
+ if (!jitScript) {
+ // Try to discard the ScriptCounts too.
+ if (!script->realm()->collectCoverageForDebug() &&
+ !gcx->runtime()->profilingScripts) {
+ script->destroyScriptCounts();
+ }
+ continue;
+ }
+ }
+
+ // If we did not release the JitScript, we need to purge optimized IC
+ // stubs because the optimizedStubSpace will be purged below.
+ if (options.discardBaselineCode) {
+ jitScript->purgeOptimizedStubs(script);
+ }
+
+ if (options.resetNurseryAllocSites || options.resetPretenuredAllocSites) {
+ jitScript->resetAllocSites(options.resetNurseryAllocSites,
+ options.resetPretenuredAllocSites);
+ }
+
+ // Finally, reset the active flag.
+ jitScript->resetActive();
+ }
+
+ // Also clear references to jit code from RegExpShared cells at this point.
+ // This avoid holding onto ExecutablePools.
+ for (auto regExp = cellIterUnsafe<RegExpShared>(); !regExp.done();
+ regExp.next()) {
+ regExp->discardJitCode();
+ }
+
+ /*
+ * When scripts contains pointers to nursery things, the store buffer
+ * can contain entries that point into the optimized stub space. Since
+ * this method can be called outside the context of a GC, this situation
+ * could result in us trying to mark invalid store buffer entries.
+ *
+ * Defer freeing any allocated blocks until after the next minor GC.
+ */
+ if (options.discardBaselineCode) {
+ jitZone()->optimizedStubSpace()->freeAllAfterMinorGC(this);
+ jitZone()->purgeIonCacheIRStubInfo();
+ }
+
+ // Generate a profile marker
+ if (gcx->runtime()->geckoProfiler().enabled()) {
+ char discardingJitScript = options.discardJitScripts ? 'Y' : 'N';
+ char discardingBaseline = options.discardBaselineCode ? 'Y' : 'N';
+ char discardingIon = 'Y';
+
+ char discardingRegExp = 'Y';
+ char discardingNurserySites = options.resetNurseryAllocSites ? 'Y' : 'N';
+ char discardingPretenuredSites =
+ options.resetPretenuredAllocSites ? 'Y' : 'N';
+
+ char buf[100];
+ SprintfLiteral(buf,
+ "JitScript:%c Baseline:%c Ion:%c "
+ "RegExp:%c NurserySites:%c PretenuredSites:%c",
+ discardingJitScript, discardingBaseline, discardingIon,
+ discardingRegExp, discardingNurserySites,
+ discardingPretenuredSites);
+ gcx->runtime()->geckoProfiler().markEvent("DiscardJit", buf);
+ }
+}
+
+void JS::Zone::resetAllocSitesAndInvalidate(bool resetNurserySites,
+ bool resetPretenuredSites) {
+ MOZ_ASSERT(resetNurserySites || resetPretenuredSites);
+
+ if (!jitZone()) {
+ return;
+ }
+
+ JSContext* cx = runtime_->mainContextFromOwnThread();
+ for (auto base = cellIterUnsafe<BaseScript>(); !base.done(); base.next()) {
+ jit::JitScript* jitScript = base->maybeJitScript();
+ if (!jitScript) {
+ continue;
+ }
+
+ if (!jitScript->resetAllocSites(resetNurserySites, resetPretenuredSites)) {
+ continue;
+ }
+
+ JSScript* script = base->asJSScript();
+ CancelOffThreadIonCompile(script);
+
+ if (!script->hasIonScript()) {
+ continue;
+ }
+
+ jit::Invalidate(cx, script,
+ /* resetUses = */ true,
+ /* cancelOffThread = */ true);
+ }
+}
+
+void JS::Zone::beforeClearDelegateInternal(JSObject* wrapper,
+ JSObject* delegate) {
+ MOZ_ASSERT(js::gc::detail::GetDelegate(wrapper) == delegate);
+ MOZ_ASSERT(needsIncrementalBarrier());
+ MOZ_ASSERT(!RuntimeFromMainThreadIsHeapMajorCollecting(this));
+ runtimeFromMainThread()->gc.marker().severWeakDelegate(wrapper, delegate);
+}
+
+void JS::Zone::afterAddDelegateInternal(JSObject* wrapper) {
+ MOZ_ASSERT(!RuntimeFromMainThreadIsHeapMajorCollecting(this));
+ JSObject* delegate = js::gc::detail::GetDelegate(wrapper);
+ if (delegate) {
+ runtimeFromMainThread()->gc.marker().restoreWeakDelegate(wrapper, delegate);
+ }
+}
+
+#ifdef JSGC_HASH_TABLE_CHECKS
+void JS::Zone::checkUniqueIdTableAfterMovingGC() {
+ for (auto r = uniqueIds().all(); !r.empty(); r.popFront()) {
+ js::gc::CheckGCThingAfterMovingGC(r.front().key());
+ }
+}
+#endif
+
+js::jit::JitZone* Zone::createJitZone(JSContext* cx) {
+ MOZ_ASSERT(!jitZone_);
+ MOZ_ASSERT(cx->runtime()->hasJitRuntime());
+
+ UniquePtr<jit::JitZone> jitZone(cx->new_<js::jit::JitZone>());
+ if (!jitZone) {
+ return nullptr;
+ }
+
+ jitZone_ = jitZone.release();
+ return jitZone_;
+}
+
+bool Zone::hasMarkedRealms() {
+ for (RealmsInZoneIter realm(this); !realm.done(); realm.next()) {
+ if (realm->marked()) {
+ return true;
+ }
+ }
+ return false;
+}
+
+void Zone::notifyObservingDebuggers() {
+ AutoAssertNoGC nogc;
+ MOZ_ASSERT(JS::RuntimeHeapIsCollecting(),
+ "This method should be called during GC.");
+
+ JSRuntime* rt = runtimeFromMainThread();
+
+ for (RealmsInZoneIter realms(this); !realms.done(); realms.next()) {
+ GlobalObject* global = realms->unsafeUnbarrieredMaybeGlobal();
+ if (!global) {
+ continue;
+ }
+
+ DebugAPI::notifyParticipatesInGC(global, rt->gc.majorGCCount());
+ }
+}
+
+bool Zone::isOnList() const { return listNext_ != NotOnList; }
+
+Zone* Zone::nextZone() const {
+ MOZ_ASSERT(isOnList());
+ return listNext_;
+}
+
+void Zone::fixupAfterMovingGC() {
+ ZoneAllocator::fixupAfterMovingGC();
+ shapeZone().fixupPropMapShapeTableAfterMovingGC();
+}
+
+void Zone::purgeAtomCache() {
+ atomCache().clearAndCompact();
+
+ // Also purge the dtoa caches so that subsequent lookups populate atom
+ // cache too.
+ for (RealmsInZoneIter r(this); !r.done(); r.next()) {
+ r->dtoaCache.purge();
+ }
+}
+
+void Zone::addSizeOfIncludingThis(
+ mozilla::MallocSizeOf mallocSizeOf, JS::CodeSizes* code, size_t* regexpZone,
+ size_t* jitZone, size_t* baselineStubsOptimized, size_t* uniqueIdMap,
+ size_t* initialPropMapTable, size_t* shapeTables, size_t* atomsMarkBitmaps,
+ size_t* compartmentObjects, size_t* crossCompartmentWrappersTables,
+ size_t* compartmentsPrivateData, size_t* scriptCountsMapArg) {
+ *regexpZone += regExps().sizeOfIncludingThis(mallocSizeOf);
+ if (jitZone_) {
+ jitZone_->addSizeOfIncludingThis(mallocSizeOf, code, jitZone,
+ baselineStubsOptimized);
+ }
+ *uniqueIdMap += uniqueIds().shallowSizeOfExcludingThis(mallocSizeOf);
+ shapeZone().addSizeOfExcludingThis(mallocSizeOf, initialPropMapTable,
+ shapeTables);
+ *atomsMarkBitmaps += markedAtoms().sizeOfExcludingThis(mallocSizeOf);
+ *crossCompartmentWrappersTables +=
+ crossZoneStringWrappers().sizeOfExcludingThis(mallocSizeOf);
+
+ for (CompartmentsInZoneIter comp(this); !comp.done(); comp.next()) {
+ comp->addSizeOfIncludingThis(mallocSizeOf, compartmentObjects,
+ crossCompartmentWrappersTables,
+ compartmentsPrivateData);
+ }
+
+ if (scriptCountsMap) {
+ *scriptCountsMapArg +=
+ scriptCountsMap->shallowSizeOfIncludingThis(mallocSizeOf);
+ for (auto r = scriptCountsMap->all(); !r.empty(); r.popFront()) {
+ *scriptCountsMapArg +=
+ r.front().value()->sizeOfIncludingThis(mallocSizeOf);
+ }
+ }
+}
+
+void* ZoneAllocator::onOutOfMemory(js::AllocFunction allocFunc,
+ arena_id_t arena, size_t nbytes,
+ void* reallocPtr) {
+ if (!js::CurrentThreadCanAccessRuntime(runtime_)) {
+ return nullptr;
+ }
+ // The analysis sees that JSRuntime::onOutOfMemory could report an error,
+ // which with a JSErrorInterceptor could GC. But we're passing a null cx (to
+ // a default parameter) so the error will not be reported.
+ JS::AutoSuppressGCAnalysis suppress;
+ return runtimeFromMainThread()->onOutOfMemory(allocFunc, arena, nbytes,
+ reallocPtr);
+}
+
+void ZoneAllocator::reportAllocationOverflow() const {
+ js::ReportAllocationOverflow(static_cast<JSContext*>(nullptr));
+}
+
+ZoneList::ZoneList() : head(nullptr), tail(nullptr) {}
+
+ZoneList::ZoneList(Zone* zone) : head(zone), tail(zone) {
+ MOZ_RELEASE_ASSERT(!zone->isOnList());
+ zone->listNext_ = nullptr;
+}
+
+ZoneList::~ZoneList() { MOZ_ASSERT(isEmpty()); }
+
+void ZoneList::check() const {
+#ifdef DEBUG
+ MOZ_ASSERT((head == nullptr) == (tail == nullptr));
+ if (!head) {
+ return;
+ }
+
+ Zone* zone = head;
+ for (;;) {
+ MOZ_ASSERT(zone && zone->isOnList());
+ if (zone == tail) break;
+ zone = zone->listNext_;
+ }
+ MOZ_ASSERT(!zone->listNext_);
+#endif
+}
+
+bool ZoneList::isEmpty() const { return head == nullptr; }
+
+Zone* ZoneList::front() const {
+ MOZ_ASSERT(!isEmpty());
+ MOZ_ASSERT(head->isOnList());
+ return head;
+}
+
+void ZoneList::prepend(Zone* zone) { prependList(ZoneList(zone)); }
+
+void ZoneList::append(Zone* zone) { appendList(ZoneList(zone)); }
+
+void ZoneList::prependList(ZoneList&& other) {
+ check();
+ other.check();
+
+ if (other.isEmpty()) {
+ return;
+ }
+
+ MOZ_ASSERT(tail != other.tail);
+
+ if (!isEmpty()) {
+ other.tail->listNext_ = head;
+ } else {
+ tail = other.tail;
+ }
+ head = other.head;
+
+ other.head = nullptr;
+ other.tail = nullptr;
+}
+
+void ZoneList::appendList(ZoneList&& other) {
+ check();
+ other.check();
+
+ if (other.isEmpty()) {
+ return;
+ }
+
+ MOZ_ASSERT(tail != other.tail);
+
+ if (!isEmpty()) {
+ tail->listNext_ = other.head;
+ } else {
+ head = other.head;
+ }
+ tail = other.tail;
+
+ other.head = nullptr;
+ other.tail = nullptr;
+}
+
+Zone* ZoneList::removeFront() {
+ MOZ_ASSERT(!isEmpty());
+ check();
+
+ Zone* front = head;
+ head = head->listNext_;
+ if (!head) {
+ tail = nullptr;
+ }
+
+ front->listNext_ = Zone::NotOnList;
+
+ return front;
+}
+
+void ZoneList::clear() {
+ while (!isEmpty()) {
+ removeFront();
+ }
+}
+
+JS_PUBLIC_API void JS::shadow::RegisterWeakCache(
+ JS::Zone* zone, detail::WeakCacheBase* cachep) {
+ zone->registerWeakCache(cachep);
+}
+
+void Zone::traceRootsInMajorGC(JSTracer* trc) {
+ if (trc->isMarkingTracer() && !isGCMarking()) {
+ return;
+ }
+
+ // Trace zone script-table roots. See comment below for justification re:
+ // calling this only during major (non-nursery) collections.
+ traceScriptTableRoots(trc);
+
+ if (FinalizationObservers* observers = finalizationObservers()) {
+ observers->traceRoots(trc);
+ }
+}
+
+void Zone::traceScriptTableRoots(JSTracer* trc) {
+ static_assert(std::is_convertible_v<BaseScript*, gc::TenuredCell*>,
+ "BaseScript must not be nursery-allocated for script-table "
+ "tracing to work");
+
+ // Performance optimization: the script-table keys are JSScripts, which
+ // cannot be in the nursery, so we can skip this tracing if we are only in a
+ // minor collection. We static-assert this fact above.
+ MOZ_ASSERT(!JS::RuntimeHeapIsMinorCollecting());
+
+ // N.B.: the script-table keys are weak *except* in an exceptional case: when
+ // then --dump-bytecode command line option or the PCCount JSFriend API is
+ // used, then the scripts for all counts must remain alive. We only trace
+ // when the `trc->runtime()->profilingScripts` flag is set. This flag is
+ // cleared in JSRuntime::destroyRuntime() during shutdown to ensure that
+ // scripts are collected before the runtime goes away completely.
+ if (scriptCountsMap && trc->runtime()->profilingScripts) {
+ for (ScriptCountsMap::Range r = scriptCountsMap->all(); !r.empty();
+ r.popFront()) {
+ BaseScript* script = r.front().key();
+ MOZ_ASSERT(script->hasScriptCounts());
+ TraceRoot(trc, &script, "profilingScripts");
+ }
+ }
+
+ // Trace the debugger's DebugScript weak map.
+ if (debugScriptMap) {
+ DebugAPI::traceDebugScriptMap(trc, debugScriptMap);
+ }
+}
+
+void Zone::fixupScriptMapsAfterMovingGC(JSTracer* trc) {
+ // Map entries are removed by BaseScript::finalize, but we need to update the
+ // script pointers here in case they are moved by the GC.
+
+ if (scriptCountsMap) {
+ scriptCountsMap->traceWeak(trc);
+ }
+
+ if (scriptLCovMap) {
+ scriptLCovMap->traceWeak(trc);
+ }
+
+#ifdef MOZ_VTUNE
+ if (scriptVTuneIdMap) {
+ scriptVTuneIdMap->traceWeak(trc);
+ }
+#endif
+
+#ifdef JS_CACHEIR_SPEW
+ if (scriptFinalWarmUpCountMap) {
+ scriptFinalWarmUpCountMap->traceWeak(trc);
+ }
+#endif
+}
+
+#ifdef JSGC_HASH_TABLE_CHECKS
+void Zone::checkScriptMapsAfterMovingGC() {
+ if (scriptCountsMap) {
+ for (auto r = scriptCountsMap->all(); !r.empty(); r.popFront()) {
+ BaseScript* script = r.front().key();
+ MOZ_ASSERT(script->zone() == this);
+ CheckGCThingAfterMovingGC(script);
+ auto ptr = scriptCountsMap->lookup(script);
+ MOZ_RELEASE_ASSERT(ptr.found() && &*ptr == &r.front());
+ }
+ }
+
+ if (scriptLCovMap) {
+ for (auto r = scriptLCovMap->all(); !r.empty(); r.popFront()) {
+ BaseScript* script = r.front().key();
+ MOZ_ASSERT(script->zone() == this);
+ CheckGCThingAfterMovingGC(script);
+ auto ptr = scriptLCovMap->lookup(script);
+ MOZ_RELEASE_ASSERT(ptr.found() && &*ptr == &r.front());
+ }
+ }
+
+# ifdef MOZ_VTUNE
+ if (scriptVTuneIdMap) {
+ for (auto r = scriptVTuneIdMap->all(); !r.empty(); r.popFront()) {
+ BaseScript* script = r.front().key();
+ MOZ_ASSERT(script->zone() == this);
+ CheckGCThingAfterMovingGC(script);
+ auto ptr = scriptVTuneIdMap->lookup(script);
+ MOZ_RELEASE_ASSERT(ptr.found() && &*ptr == &r.front());
+ }
+ }
+# endif // MOZ_VTUNE
+
+# ifdef JS_CACHEIR_SPEW
+ if (scriptFinalWarmUpCountMap) {
+ for (auto r = scriptFinalWarmUpCountMap->all(); !r.empty(); r.popFront()) {
+ BaseScript* script = r.front().key();
+ MOZ_ASSERT(script->zone() == this);
+ CheckGCThingAfterMovingGC(script);
+ auto ptr = scriptFinalWarmUpCountMap->lookup(script);
+ MOZ_RELEASE_ASSERT(ptr.found() && &*ptr == &r.front());
+ }
+ }
+# endif // JS_CACHEIR_SPEW
+}
+#endif
+
+void Zone::clearScriptCounts(Realm* realm) {
+ if (!scriptCountsMap) {
+ return;
+ }
+
+ // Clear all hasScriptCounts_ flags of BaseScript, in order to release all
+ // ScriptCounts entries of the given realm.
+ for (auto i = scriptCountsMap->modIter(); !i.done(); i.next()) {
+ BaseScript* script = i.get().key();
+ if (script->realm() != realm) {
+ continue;
+ }
+ // We can't destroy the ScriptCounts yet if the script has Baseline code,
+ // because Baseline code bakes in pointers to the counters. The ScriptCounts
+ // will be destroyed in Zone::discardJitCode when discarding the JitScript.
+ if (script->hasBaselineScript()) {
+ continue;
+ }
+ script->clearHasScriptCounts();
+ i.remove();
+ }
+}
+
+void Zone::clearScriptLCov(Realm* realm) {
+ if (!scriptLCovMap) {
+ return;
+ }
+
+ for (auto i = scriptLCovMap->modIter(); !i.done(); i.next()) {
+ BaseScript* script = i.get().key();
+ if (script->realm() == realm) {
+ i.remove();
+ }
+ }
+}
+
+void Zone::clearRootsForShutdownGC() {
+ // Finalization callbacks are not called if we're shutting down.
+ if (finalizationObservers()) {
+ finalizationObservers()->clearRecords();
+ }
+
+ clearKeptObjects();
+}
+
+void Zone::finishRoots() {
+ for (RealmsInZoneIter r(this); !r.done(); r.next()) {
+ r->finishRoots();
+ }
+}
+
+void Zone::traceKeptObjects(JSTracer* trc) { keptObjects.ref().trace(trc); }
+
+bool Zone::keepDuringJob(HandleObject target) {
+ return keptObjects.ref().put(target);
+}
+
+void Zone::clearKeptObjects() { keptObjects.ref().clear(); }
+
+bool Zone::ensureFinalizationObservers() {
+ if (finalizationObservers_.ref()) {
+ return true;
+ }
+
+ finalizationObservers_ = js::MakeUnique<FinalizationObservers>(this);
+ return bool(finalizationObservers_.ref());
+}
diff --git a/js/src/gc/Zone.h b/js/src/gc/Zone.h
new file mode 100644
index 0000000000..ba3de7ec3e
--- /dev/null
+++ b/js/src/gc/Zone.h
@@ -0,0 +1,653 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef gc_Zone_h
+#define gc_Zone_h
+
+#include "mozilla/Array.h"
+#include "mozilla/Assertions.h"
+#include "mozilla/Attributes.h"
+#include "mozilla/LinkedList.h"
+#include "mozilla/MemoryReporting.h"
+#include "mozilla/PodOperations.h"
+#include "mozilla/TimeStamp.h"
+
+#include "jstypes.h"
+
+#include "ds/Bitmap.h"
+#include "gc/ArenaList.h"
+#include "gc/Barrier.h"
+#include "gc/FindSCCs.h"
+#include "gc/GCMarker.h"
+#include "gc/NurseryAwareHashMap.h"
+#include "gc/Pretenuring.h"
+#include "gc/Statistics.h"
+#include "gc/ZoneAllocator.h"
+#include "js/GCHashTable.h"
+#include "js/Vector.h"
+#include "vm/AtomsTable.h"
+#include "vm/JSObject.h"
+#include "vm/JSScript.h"
+#include "vm/ShapeZone.h"
+
+namespace js {
+
+class DebugScriptMap;
+class RegExpZone;
+class WeakRefObject;
+
+namespace jit {
+class JitZone;
+} // namespace jit
+
+namespace gc {
+
+class FinalizationObservers;
+class ZoneList;
+
+using ZoneComponentFinder = ComponentFinder<JS::Zone>;
+
+struct UniqueIdGCPolicy {
+ static bool traceWeak(JSTracer* trc, Cell** keyp, uint64_t* valuep);
+};
+
+// Maps a Cell* to a unique, 64bit id.
+using UniqueIdMap = GCHashMap<Cell*, uint64_t, PointerHasher<Cell*>,
+ SystemAllocPolicy, UniqueIdGCPolicy>;
+
+template <typename T>
+class ZoneAllCellIter;
+
+template <typename T>
+class ZoneCellIter;
+
+} // namespace gc
+
+// If two different nursery strings are wrapped into the same zone, and have
+// the same contents, then deduplication may make them duplicates.
+// `DuplicatesPossible` will allow this and map both wrappers to the same (now
+// tenured) source string.
+using StringWrapperMap =
+ NurseryAwareHashMap<JSString*, JSString*, ZoneAllocPolicy,
+ DuplicatesPossible>;
+
+class MOZ_NON_TEMPORARY_CLASS ExternalStringCache {
+ static const size_t NumEntries = 4;
+ mozilla::Array<JSString*, NumEntries> entries_;
+
+ ExternalStringCache(const ExternalStringCache&) = delete;
+ void operator=(const ExternalStringCache&) = delete;
+
+ public:
+ ExternalStringCache() { purge(); }
+ void purge() { mozilla::PodArrayZero(entries_); }
+
+ MOZ_ALWAYS_INLINE JSString* lookup(const char16_t* chars, size_t len) const;
+ MOZ_ALWAYS_INLINE void put(JSString* s);
+};
+
+class MOZ_NON_TEMPORARY_CLASS FunctionToStringCache {
+ struct Entry {
+ BaseScript* script;
+ JSString* string;
+
+ void set(BaseScript* scriptArg, JSString* stringArg) {
+ script = scriptArg;
+ string = stringArg;
+ }
+ };
+ static const size_t NumEntries = 2;
+ mozilla::Array<Entry, NumEntries> entries_;
+
+ FunctionToStringCache(const FunctionToStringCache&) = delete;
+ void operator=(const FunctionToStringCache&) = delete;
+
+ public:
+ FunctionToStringCache() { purge(); }
+ void purge() { mozilla::PodArrayZero(entries_); }
+
+ MOZ_ALWAYS_INLINE JSString* lookup(BaseScript* script) const;
+ MOZ_ALWAYS_INLINE void put(BaseScript* script, JSString* string);
+};
+
+} // namespace js
+
+namespace JS {
+
+// [SMDOC] GC Zones
+//
+// A zone is a collection of compartments. Every compartment belongs to exactly
+// one zone. In Firefox, there is roughly one zone per tab along with a system
+// zone for everything else. Zones mainly serve as boundaries for garbage
+// collection. Unlike compartments, they have no special security properties.
+//
+// Every GC thing belongs to exactly one zone. GC things from the same zone but
+// different compartments can share an arena (4k page). GC things from different
+// zones cannot be stored in the same arena. The garbage collector is capable of
+// collecting one zone at a time; it cannot collect at the granularity of
+// compartments.
+//
+// GC things are tied to zones and compartments as follows:
+//
+// - JSObjects belong to a compartment and cannot be shared between
+// compartments. If an object needs to point to a JSObject in a different
+// compartment, regardless of zone, it must go through a cross-compartment
+// wrapper. Each compartment keeps track of its outgoing wrappers in a table.
+// JSObjects find their compartment via their ObjectGroup.
+//
+// - JSStrings do not belong to any particular compartment, but they do belong
+// to a zone. Thus, two different compartments in the same zone can point to a
+// JSString. When a string needs to be wrapped, we copy it if it's in a
+// different zone and do nothing if it's in the same zone. Thus, transferring
+// strings within a zone is very efficient.
+//
+// - Shapes and base shapes belong to a zone and are shared between compartments
+// in that zone where possible. Accessor shapes store getter and setter
+// JSObjects which belong to a single compartment, so these shapes and all
+// their descendants can't be shared with other compartments.
+//
+// - Scripts are also compartment-local and cannot be shared. A script points to
+// its compartment.
+//
+// - ObjectGroup and JitCode objects belong to a compartment and cannot be
+// shared. There is no mechanism to obtain the compartment from a JitCode
+// object.
+//
+// A zone remains alive as long as any GC things in the zone are alive. A
+// compartment remains alive as long as any JSObjects, scripts, shapes, or base
+// shapes within it are alive.
+//
+// We always guarantee that a zone has at least one live compartment by refusing
+// to delete the last compartment in a live zone.
+class Zone : public js::ZoneAllocator, public js::gc::GraphNodeBase<JS::Zone> {
+ public:
+ js::gc::ArenaLists arenas;
+
+ // Per-zone data for use by an embedder.
+ js::MainThreadData<void*> data;
+
+ js::MainThreadData<uint32_t> tenuredBigInts;
+
+ // Number of marked/finalized JSStrings/JSFatInlineStrings during major GC.
+ js::MainThreadOrGCTaskData<size_t> markedStrings;
+ js::MainThreadOrGCTaskData<size_t> finalizedStrings;
+
+ // When true, skip calling the metadata callback. We use this:
+ // - to avoid invoking the callback recursively;
+ // - to avoid observing lazy prototype setup (which confuses callbacks that
+ // want to use the types being set up!);
+ // - to avoid attaching allocation stacks to allocation stack nodes, which
+ // is silly
+ // And so on.
+ js::MainThreadData<bool> suppressAllocationMetadataBuilder;
+
+ // Flags permanently set when nursery allocation is disabled for this zone.
+ js::MainThreadData<bool> nurseryStringsDisabled;
+ js::MainThreadData<bool> nurseryBigIntsDisabled;
+
+ private:
+ // Flags dynamically updated based on more than one condition, including the
+ // flags above.
+ js::MainThreadOrIonCompileData<bool> allocNurseryObjects_;
+ js::MainThreadOrIonCompileData<bool> allocNurseryStrings_;
+ js::MainThreadOrIonCompileData<bool> allocNurseryBigInts_;
+
+ // Minimum Heap value which results in tenured allocation.
+ js::MainThreadData<js::gc::Heap> minObjectHeapToTenure_;
+ js::MainThreadData<js::gc::Heap> minStringHeapToTenure_;
+ js::MainThreadData<js::gc::Heap> minBigintHeapToTenure_;
+
+ public:
+ // Script side-tables. These used to be held by Realm, but are now placed
+ // here in order to allow JSScript to access them during finalize (see bug
+ // 1568245; this change in 1575350). The tables are initialized lazily by
+ // JSScript.
+ js::UniquePtr<js::ScriptCountsMap> scriptCountsMap;
+ js::UniquePtr<js::ScriptLCovMap> scriptLCovMap;
+ js::MainThreadData<js::DebugScriptMap*> debugScriptMap;
+#ifdef MOZ_VTUNE
+ js::UniquePtr<js::ScriptVTuneIdMap> scriptVTuneIdMap;
+#endif
+#ifdef JS_CACHEIR_SPEW
+ js::UniquePtr<js::ScriptFinalWarmUpCountMap> scriptFinalWarmUpCountMap;
+#endif
+
+ js::MainThreadData<js::StringStats> previousGCStringStats;
+ js::MainThreadData<js::StringStats> stringStats;
+
+#ifdef DEBUG
+ js::MainThreadData<unsigned> gcSweepGroupIndex;
+#endif
+
+ js::gc::PretenuringZone pretenuring;
+
+ private:
+ // Side map for storing unique ids for cells, independent of address.
+ js::MainThreadOrGCTaskData<js::gc::UniqueIdMap> uniqueIds_;
+
+ // Number of allocations since the most recent minor GC for this thread.
+ uint32_t tenuredAllocsSinceMinorGC_ = 0;
+
+ // Live weakmaps in this zone.
+ js::MainThreadOrGCTaskData<mozilla::LinkedList<js::WeakMapBase>>
+ gcWeakMapList_;
+
+ // The set of compartments in this zone.
+ using CompartmentVector =
+ js::Vector<JS::Compartment*, 1, js::SystemAllocPolicy>;
+ js::MainThreadOrGCTaskData<CompartmentVector> compartments_;
+
+ // All cross-zone string wrappers in the zone.
+ js::MainThreadOrGCTaskData<js::StringWrapperMap> crossZoneStringWrappers_;
+
+ // List of non-ephemeron weak containers to sweep during
+ // beginSweepingSweepGroup.
+ js::MainThreadOrGCTaskData<mozilla::LinkedList<detail::WeakCacheBase>>
+ weakCaches_;
+
+ // Mapping from not yet marked keys to a vector of all values that the key
+ // maps to in any live weak map. Separate tables for nursery and tenured
+ // keys.
+ js::MainThreadOrGCTaskData<js::gc::EphemeronEdgeTable> gcEphemeronEdges_;
+ js::MainThreadOrGCTaskData<js::gc::EphemeronEdgeTable>
+ gcNurseryEphemeronEdges_;
+
+ js::MainThreadData<js::UniquePtr<js::RegExpZone>> regExps_;
+
+ // Bitmap of atoms marked by this zone.
+ js::MainThreadOrGCTaskData<js::SparseBitmap> markedAtoms_;
+
+ // Set of atoms recently used by this Zone. Purged on GC.
+ js::MainThreadOrGCTaskData<js::AtomSet> atomCache_;
+
+ // Cache storing allocated external strings. Purged on GC.
+ js::MainThreadOrGCTaskData<js::ExternalStringCache> externalStringCache_;
+
+ // Cache for Function.prototype.toString. Purged on GC.
+ js::MainThreadOrGCTaskData<js::FunctionToStringCache> functionToStringCache_;
+
+ // Cache for Function.prototype.bind mapping an atom `name` to atom
+ // `"bound " + name`. Purged on GC.
+ using BoundPrefixCache =
+ js::HashMap<JSAtom*, JSAtom*, js::PointerHasher<JSAtom*>,
+ js::SystemAllocPolicy>;
+ js::MainThreadData<BoundPrefixCache> boundPrefixCache_;
+
+ // Information about Shapes and BaseShapes.
+ js::MainThreadData<js::ShapeZone> shapeZone_;
+
+ // Information about finalization registries, created on demand.
+ js::MainThreadOrGCTaskData<js::UniquePtr<js::gc::FinalizationObservers>>
+ finalizationObservers_;
+
+ js::MainThreadOrGCTaskData<js::jit::JitZone*> jitZone_;
+
+ // Last time at which JIT code was discarded for this zone. This is only set
+ // when JitScripts and Baseline code are discarded as well.
+ js::MainThreadData<mozilla::TimeStamp> lastDiscardedCodeTime_;
+
+ js::MainThreadData<bool> gcScheduled_;
+ js::MainThreadData<bool> gcScheduledSaved_;
+ js::MainThreadData<bool> gcPreserveCode_;
+ js::MainThreadData<bool> keepPropMapTables_;
+ js::MainThreadData<bool> wasCollected_;
+
+ // Allow zones to be linked into a list
+ js::MainThreadOrGCTaskData<Zone*> listNext_;
+ static Zone* const NotOnList;
+ friend class js::gc::ZoneList;
+
+ using KeptAliveSet =
+ JS::GCHashSet<js::HeapPtr<JSObject*>,
+ js::StableCellHasher<js::HeapPtr<JSObject*>>,
+ js::ZoneAllocPolicy>;
+ friend class js::WeakRefObject;
+ js::MainThreadOrGCTaskData<KeptAliveSet> keptObjects;
+
+ public:
+ static JS::Zone* from(ZoneAllocator* zoneAlloc) {
+ return static_cast<Zone*>(zoneAlloc);
+ }
+
+ explicit Zone(JSRuntime* rt, Kind kind = NormalZone);
+ ~Zone();
+
+ [[nodiscard]] bool init();
+
+ void destroy(JS::GCContext* gcx);
+
+ [[nodiscard]] bool findSweepGroupEdges(Zone* atomsZone);
+
+ struct DiscardOptions {
+ DiscardOptions() {}
+ bool discardBaselineCode = true;
+ bool discardJitScripts = false;
+ bool resetNurseryAllocSites = false;
+ bool resetPretenuredAllocSites = false;
+ };
+
+ void discardJitCode(JS::GCContext* gcx,
+ const DiscardOptions& options = DiscardOptions());
+
+ // Discard JIT code regardless of isPreservingCode().
+ void forceDiscardJitCode(JS::GCContext* gcx,
+ const DiscardOptions& options = DiscardOptions());
+
+ void resetAllocSitesAndInvalidate(bool resetNurserySites,
+ bool resetPretenuredSites);
+
+ void addSizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf,
+ JS::CodeSizes* code, size_t* regexpZone,
+ size_t* jitZone, size_t* baselineStubsOptimized,
+ size_t* uniqueIdMap, size_t* initialPropMapTable,
+ size_t* shapeTables, size_t* atomsMarkBitmaps,
+ size_t* compartmentObjects,
+ size_t* crossCompartmentWrappersTables,
+ size_t* compartmentsPrivateData,
+ size_t* scriptCountsMapArg);
+
+ // Iterate over all cells in the zone. See the definition of ZoneCellIter
+ // in gc/GC-inl.h for the possible arguments and documentation.
+ template <typename T, typename... Args>
+ js::gc::ZoneCellIter<T> cellIter(Args&&... args) {
+ return js::gc::ZoneCellIter<T>(const_cast<Zone*>(this),
+ std::forward<Args>(args)...);
+ }
+
+ // As above, but can return about-to-be-finalised things.
+ template <typename T, typename... Args>
+ js::gc::ZoneAllCellIter<T> cellIterUnsafe(Args&&... args) {
+ return js::gc::ZoneAllCellIter<T>(const_cast<Zone*>(this),
+ std::forward<Args>(args)...);
+ }
+
+ bool hasMarkedRealms();
+
+ void scheduleGC() {
+ MOZ_ASSERT(!RuntimeHeapIsBusy());
+ gcScheduled_ = true;
+ }
+ void unscheduleGC() { gcScheduled_ = false; }
+ bool isGCScheduled() { return gcScheduled_; }
+
+ void setPreservingCode(bool preserving) { gcPreserveCode_ = preserving; }
+ bool isPreservingCode() const { return gcPreserveCode_; }
+
+ mozilla::TimeStamp lastDiscardedCodeTime() const {
+ return lastDiscardedCodeTime_;
+ }
+
+ void changeGCState(GCState prev, GCState next);
+
+ bool isCollecting() const {
+ MOZ_ASSERT(js::CurrentThreadCanAccessRuntime(runtimeFromMainThread()));
+ return isCollectingFromAnyThread();
+ }
+
+ inline bool isCollectingFromAnyThread() const {
+ return needsIncrementalBarrier() || wasGCStarted();
+ }
+
+ GCState initialMarkingState() const;
+
+ bool shouldMarkInZone(js::gc::MarkColor color) const {
+ // Check whether the zone is in one or both of the MarkBlackOnly and
+ // MarkBlackAndGray states, depending on the mark color. Also check for
+ // VerifyPreBarriers when the mark color is black (we don't do any gray
+ // marking when verifying pre-barriers).
+ if (color == js::gc::MarkColor::Black) {
+ return isGCMarkingOrVerifyingPreBarriers();
+ }
+
+ return isGCMarkingBlackAndGray();
+ }
+
+ // Was this zone collected in the last GC.
+ bool wasCollected() const { return wasCollected_; }
+ void setWasCollected(bool v) { wasCollected_ = v; }
+
+ void setNeedsIncrementalBarrier(bool needs);
+ const BarrierState* addressOfNeedsIncrementalBarrier() const {
+ return &needsIncrementalBarrier_;
+ }
+
+ static constexpr size_t offsetOfNeedsIncrementalBarrier() {
+ return offsetof(Zone, needsIncrementalBarrier_);
+ }
+
+ js::jit::JitZone* getJitZone(JSContext* cx) {
+ return jitZone_ ? jitZone_ : createJitZone(cx);
+ }
+ js::jit::JitZone* jitZone() { return jitZone_; }
+
+ void prepareForCompacting();
+
+ void traceRootsInMajorGC(JSTracer* trc);
+
+ void sweepAfterMinorGC(JSTracer* trc);
+ void sweepUniqueIds();
+ void sweepCompartments(JS::GCContext* gcx, bool keepAtleastOne, bool lastGC);
+
+ // Remove dead weak maps from gcWeakMapList_ and remove entries from the
+ // remaining weak maps whose keys are dead.
+ void sweepWeakMaps(JSTracer* trc);
+
+ // Trace all weak maps in this zone. Used to update edges after a moving GC.
+ void traceWeakMaps(JSTracer* trc);
+
+ js::gc::UniqueIdMap& uniqueIds() { return uniqueIds_.ref(); }
+
+ void notifyObservingDebuggers();
+
+ void noteTenuredAlloc() { tenuredAllocsSinceMinorGC_++; }
+
+ uint32_t* addressOfTenuredAllocCount() { return &tenuredAllocsSinceMinorGC_; }
+
+ uint32_t getAndResetTenuredAllocsSinceMinorGC() {
+ uint32_t res = tenuredAllocsSinceMinorGC_;
+ tenuredAllocsSinceMinorGC_ = 0;
+ return res;
+ }
+
+ mozilla::LinkedList<js::WeakMapBase>& gcWeakMapList() {
+ return gcWeakMapList_.ref();
+ }
+
+ CompartmentVector& compartments() { return compartments_.ref(); }
+
+ js::StringWrapperMap& crossZoneStringWrappers() {
+ return crossZoneStringWrappers_.ref();
+ }
+ const js::StringWrapperMap& crossZoneStringWrappers() const {
+ return crossZoneStringWrappers_.ref();
+ }
+
+ void dropStringWrappersOnGC();
+
+ void traceWeakCCWEdges(JSTracer* trc);
+ static void fixupAllCrossCompartmentWrappersAfterMovingGC(JSTracer* trc);
+
+ void fixupAfterMovingGC();
+ void fixupScriptMapsAfterMovingGC(JSTracer* trc);
+
+ void setNurseryAllocFlags(bool allocObjects, bool allocStrings,
+ bool allocBigInts);
+
+ bool allocKindInNursery(JS::TraceKind kind) const {
+ switch (kind) {
+ case JS::TraceKind::Object:
+ return allocNurseryObjects_;
+ case JS::TraceKind::String:
+ return allocNurseryStrings_;
+ case JS::TraceKind::BigInt:
+ return allocNurseryBigInts_;
+ default:
+ MOZ_CRASH("Unsupported kind for nursery allocation");
+ }
+ }
+ bool allocNurseryObjects() const { return allocNurseryObjects_; }
+ bool allocNurseryStrings() const { return allocNurseryStrings_; }
+ bool allocNurseryBigInts() const { return allocNurseryBigInts_; }
+
+ js::gc::Heap minHeapToTenure(JS::TraceKind kind) const {
+ switch (kind) {
+ case JS::TraceKind::Object:
+ return minObjectHeapToTenure_;
+ case JS::TraceKind::String:
+ return minStringHeapToTenure_;
+ case JS::TraceKind::BigInt:
+ return minBigintHeapToTenure_;
+ default:
+ MOZ_CRASH("Unsupported kind for nursery allocation");
+ }
+ }
+
+ mozilla::LinkedList<detail::WeakCacheBase>& weakCaches() {
+ return weakCaches_.ref();
+ }
+ void registerWeakCache(detail::WeakCacheBase* cachep) {
+ weakCaches().insertBack(cachep);
+ }
+
+ void beforeClearDelegate(JSObject* wrapper, JSObject* delegate) {
+ if (needsIncrementalBarrier()) {
+ beforeClearDelegateInternal(wrapper, delegate);
+ }
+ }
+
+ void afterAddDelegate(JSObject* wrapper) {
+ if (needsIncrementalBarrier()) {
+ afterAddDelegateInternal(wrapper);
+ }
+ }
+
+ void beforeClearDelegateInternal(JSObject* wrapper, JSObject* delegate);
+ void afterAddDelegateInternal(JSObject* wrapper);
+ js::gc::EphemeronEdgeTable& gcEphemeronEdges() {
+ return gcEphemeronEdges_.ref();
+ }
+ js::gc::EphemeronEdgeTable& gcNurseryEphemeronEdges() {
+ return gcNurseryEphemeronEdges_.ref();
+ }
+
+ js::gc::EphemeronEdgeTable& gcEphemeronEdges(const js::gc::Cell* cell) {
+ return cell->isTenured() ? gcEphemeronEdges() : gcNurseryEphemeronEdges();
+ }
+
+ // Perform all pending weakmap entry marking for this zone after
+ // transitioning to weak marking mode.
+ js::gc::IncrementalProgress enterWeakMarkingMode(js::GCMarker* marker,
+ js::SliceBudget& budget);
+
+ // A set of edges from this zone to other zones used during GC to calculate
+ // sweep groups.
+ NodeSet& gcSweepGroupEdges() {
+ return gcGraphEdges; // Defined in GraphNodeBase base class.
+ }
+ bool hasSweepGroupEdgeTo(Zone* otherZone) const {
+ return gcGraphEdges.has(otherZone);
+ }
+ [[nodiscard]] bool addSweepGroupEdgeTo(Zone* otherZone) {
+ MOZ_ASSERT(otherZone->isGCMarking());
+ return gcSweepGroupEdges().put(otherZone);
+ }
+ void clearSweepGroupEdges() { gcSweepGroupEdges().clear(); }
+
+ js::RegExpZone& regExps() { return *regExps_.ref(); }
+
+ js::SparseBitmap& markedAtoms() { return markedAtoms_.ref(); }
+
+ js::AtomSet& atomCache() { return atomCache_.ref(); }
+
+ void purgeAtomCache();
+
+ js::ExternalStringCache& externalStringCache() {
+ return externalStringCache_.ref();
+ };
+
+ js::FunctionToStringCache& functionToStringCache() {
+ return functionToStringCache_.ref();
+ }
+
+ BoundPrefixCache& boundPrefixCache() { return boundPrefixCache_.ref(); }
+
+ js::ShapeZone& shapeZone() { return shapeZone_.ref(); }
+
+ bool keepPropMapTables() const { return keepPropMapTables_; }
+ void setKeepPropMapTables(bool b) { keepPropMapTables_ = b; }
+
+ void clearRootsForShutdownGC();
+ void finishRoots();
+
+ void traceScriptTableRoots(JSTracer* trc);
+
+ void clearScriptCounts(Realm* realm);
+ void clearScriptLCov(Realm* realm);
+
+ // Add the target of JS WeakRef to a kept-alive set maintained by GC.
+ // See: https://tc39.es/proposal-weakrefs/#sec-keepduringjob
+ bool keepDuringJob(HandleObject target);
+
+ void traceKeptObjects(JSTracer* trc);
+
+ // Clear the kept-alive set.
+ // See: https://tc39.es/proposal-weakrefs/#sec-clear-kept-objects
+ void clearKeptObjects();
+
+ js::gc::AllocSite* unknownAllocSite(JS::TraceKind kind) {
+ return &pretenuring.unknownAllocSite(kind);
+ }
+ js::gc::AllocSite* optimizedAllocSite() {
+ return &pretenuring.optimizedAllocSite;
+ }
+ uint32_t nurseryAllocCount(JS::TraceKind kind) const {
+ return pretenuring.nurseryAllocCount(kind);
+ }
+
+#ifdef JSGC_HASH_TABLE_CHECKS
+ void checkAllCrossCompartmentWrappersAfterMovingGC();
+ void checkStringWrappersAfterMovingGC();
+
+ // Assert that the UniqueId table has been redirected successfully.
+ void checkUniqueIdTableAfterMovingGC();
+
+ void checkScriptMapsAfterMovingGC();
+#endif
+
+#ifdef DEBUG
+ // For testing purposes, return the index of the sweep group which this zone
+ // was swept in in the last GC.
+ unsigned lastSweepGroupIndex() { return gcSweepGroupIndex; }
+#endif
+
+ private:
+ js::jit::JitZone* createJitZone(JSContext* cx);
+
+ bool isQueuedForBackgroundSweep() { return isOnList(); }
+
+ void sweepEphemeronTablesAfterMinorGC();
+
+ js::gc::FinalizationObservers* finalizationObservers() {
+ return finalizationObservers_.ref().get();
+ }
+ bool ensureFinalizationObservers();
+
+ bool isOnList() const;
+ Zone* nextZone() const;
+
+ friend bool js::CurrentThreadCanAccessZone(Zone* zone);
+ friend class js::gc::GCRuntime;
+};
+
+} // namespace JS
+
+namespace js {
+namespace gc {
+const char* StateName(JS::Zone::GCState state);
+} // namespace gc
+} // namespace js
+
+#endif // gc_Zone_h
diff --git a/js/src/gc/ZoneAllocator.h b/js/src/gc/ZoneAllocator.h
new file mode 100644
index 0000000000..de2dd7da28
--- /dev/null
+++ b/js/src/gc/ZoneAllocator.h
@@ -0,0 +1,354 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * Public header for allocating memory associated with GC things.
+ */
+
+#ifndef gc_ZoneAllocator_h
+#define gc_ZoneAllocator_h
+
+#include "mozilla/Maybe.h"
+
+#include "jsfriendapi.h"
+#include "jstypes.h"
+#include "gc/Cell.h"
+#include "gc/Scheduling.h"
+#include "js/GCAPI.h"
+#include "js/shadow/Zone.h" // JS::shadow::Zone
+#include "vm/MallocProvider.h"
+
+namespace JS {
+class JS_PUBLIC_API Zone;
+} // namespace JS
+
+namespace js {
+
+class ZoneAllocator;
+
+#ifdef DEBUG
+bool CurrentThreadIsGCFinalizing();
+#endif
+
+namespace gc {
+void MaybeMallocTriggerZoneGC(JSRuntime* rt, ZoneAllocator* zoneAlloc,
+ const HeapSize& heap,
+ const HeapThreshold& threshold,
+ JS::GCReason reason);
+}
+
+// Base class of JS::Zone that provides malloc memory allocation and accounting.
+class ZoneAllocator : public JS::shadow::Zone,
+ public js::MallocProvider<JS::Zone> {
+ protected:
+ explicit ZoneAllocator(JSRuntime* rt, Kind kind);
+ ~ZoneAllocator();
+ void fixupAfterMovingGC();
+
+ public:
+ static ZoneAllocator* from(JS::Zone* zone) {
+ // This is a safe upcast, but the compiler hasn't seen the definition yet.
+ return reinterpret_cast<ZoneAllocator*>(zone);
+ }
+
+ [[nodiscard]] void* onOutOfMemory(js::AllocFunction allocFunc,
+ arena_id_t arena, size_t nbytes,
+ void* reallocPtr = nullptr);
+ void reportAllocationOverflow() const;
+
+ void updateSchedulingStateOnGCStart();
+ void updateGCStartThresholds(gc::GCRuntime& gc);
+ void setGCSliceThresholds(gc::GCRuntime& gc, bool waitingOnBGTask);
+ void clearGCSliceThresholds();
+
+ // Memory accounting APIs for malloc memory owned by GC cells.
+
+ void addCellMemory(js::gc::Cell* cell, size_t nbytes, js::MemoryUse use) {
+ MOZ_ASSERT(cell);
+ MOZ_ASSERT(nbytes);
+
+ mallocHeapSize.addBytes(nbytes);
+
+#ifdef DEBUG
+ mallocTracker.trackGCMemory(cell, nbytes, use);
+#endif
+
+ maybeTriggerGCOnMalloc();
+ }
+
+ void removeCellMemory(js::gc::Cell* cell, size_t nbytes, js::MemoryUse use,
+ bool updateRetainedSize = false) {
+ MOZ_ASSERT(cell);
+ MOZ_ASSERT(nbytes);
+ MOZ_ASSERT_IF(CurrentThreadIsGCFinalizing(), updateRetainedSize);
+
+ mallocHeapSize.removeBytes(nbytes, updateRetainedSize);
+
+#ifdef DEBUG
+ mallocTracker.untrackGCMemory(cell, nbytes, use);
+#endif
+ }
+
+ void swapCellMemory(js::gc::Cell* a, js::gc::Cell* b, js::MemoryUse use) {
+#ifdef DEBUG
+ mallocTracker.swapGCMemory(a, b, use);
+#endif
+ }
+
+ void registerNonGCMemory(void* mem, MemoryUse use) {
+#ifdef DEBUG
+ return mallocTracker.registerNonGCMemory(mem, use);
+#endif
+ }
+ void unregisterNonGCMemory(void* mem, MemoryUse use) {
+#ifdef DEBUG
+ return mallocTracker.unregisterNonGCMemory(mem, use);
+#endif
+ }
+ void moveOtherMemory(void* dst, void* src, MemoryUse use) {
+#ifdef DEBUG
+ return mallocTracker.moveNonGCMemory(dst, src, use);
+#endif
+ }
+
+ void incNonGCMemory(void* mem, size_t nbytes, MemoryUse use) {
+ MOZ_ASSERT(nbytes);
+ mallocHeapSize.addBytes(nbytes);
+
+#ifdef DEBUG
+ mallocTracker.incNonGCMemory(mem, nbytes, use);
+#endif
+
+ maybeTriggerGCOnMalloc();
+ }
+ void decNonGCMemory(void* mem, size_t nbytes, MemoryUse use,
+ bool updateRetainedSize) {
+ MOZ_ASSERT(nbytes);
+
+ mallocHeapSize.removeBytes(nbytes, updateRetainedSize);
+
+#ifdef DEBUG
+ mallocTracker.decNonGCMemory(mem, nbytes, use);
+#endif
+ }
+
+ // Account for allocations that may be referenced by more than one GC thing.
+ bool addSharedMemory(void* mem, size_t nbytes, MemoryUse use);
+ void removeSharedMemory(void* mem, size_t nbytes, MemoryUse use);
+
+ void incJitMemory(size_t nbytes) {
+ MOZ_ASSERT(nbytes);
+ jitHeapSize.addBytes(nbytes);
+ maybeTriggerZoneGC(jitHeapSize, jitHeapThreshold,
+ JS::GCReason::TOO_MUCH_JIT_CODE);
+ }
+ void decJitMemory(size_t nbytes) {
+ MOZ_ASSERT(nbytes);
+ jitHeapSize.removeBytes(nbytes, true);
+ }
+
+ // Check malloc allocation threshold and trigger a zone GC if necessary.
+ void maybeTriggerGCOnMalloc() {
+ maybeTriggerZoneGC(mallocHeapSize, mallocHeapThreshold,
+ JS::GCReason::TOO_MUCH_MALLOC);
+ }
+
+ private:
+ void maybeTriggerZoneGC(const js::gc::HeapSize& heap,
+ const js::gc::HeapThreshold& threshold,
+ JS::GCReason reason) {
+ if (heap.bytes() >= threshold.startBytes()) {
+ gc::MaybeMallocTriggerZoneGC(runtimeFromAnyThread(), this, heap,
+ threshold, reason);
+ }
+ }
+
+ void updateCollectionRate(mozilla::TimeDuration mainThreadGCTime,
+ size_t initialBytesForAllZones);
+
+ void updateAllocationRate(mozilla::TimeDuration mutatorTime);
+
+ public:
+ // The size of allocated GC arenas in this zone.
+ gc::PerZoneGCHeapSize gcHeapSize;
+
+ // Threshold used to trigger GC based on GC heap size.
+ gc::GCHeapThreshold gcHeapThreshold;
+
+ // Amount of malloc data owned by tenured GC things in this zone, including
+ // external allocations supplied by JS::AddAssociatedMemory.
+ gc::HeapSize mallocHeapSize;
+
+ // Threshold used to trigger GC based on malloc allocations.
+ gc::MallocHeapThreshold mallocHeapThreshold;
+
+ // Amount of exectuable JIT code owned by GC things in this zone.
+ gc::HeapSize jitHeapSize;
+
+ // Threshold used to trigger GC based on JIT allocations.
+ gc::JitHeapThreshold jitHeapThreshold;
+
+ // Use counts for memory that can be referenced by more than one GC thing.
+ // Memory recorded here is also recorded in mallocHeapSize. This structure
+ // is used to avoid over-counting in mallocHeapSize.
+ gc::SharedMemoryMap sharedMemoryUseCounts;
+
+ // Collection rate estimate for this zone in MB/s, and state used to calculate
+ // it. Updated every time this zone is collected.
+ MainThreadData<mozilla::Maybe<double>> smoothedCollectionRate;
+ MainThreadOrGCTaskData<mozilla::TimeDuration> perZoneGCTime;
+
+ // Allocation rate estimate in MB/s of mutator time, and state used to
+ // calculate it.
+ MainThreadData<mozilla::Maybe<double>> smoothedAllocationRate;
+ MainThreadData<size_t> prevGCHeapSize;
+
+ private:
+#ifdef DEBUG
+ // In debug builds, malloc allocations can be tracked to make debugging easier
+ // (possible?) if allocation and free sizes don't balance.
+ gc::MemoryTracker mallocTracker;
+#endif
+
+ friend class gc::GCRuntime;
+};
+
+// Whether memory is associated with a single cell or whether it is associated
+// with the zone as a whole (for memory used by the system).
+enum class TrackingKind { Cell, Zone };
+
+/*
+ * Allocation policy that performs memory tracking for malloced memory. This
+ * should be used for all containers associated with a GC thing or a zone.
+ *
+ * Since it doesn't hold a JSContext (those may not live long enough), it can't
+ * report out-of-memory conditions itself; the caller must check for OOM and
+ * take the appropriate action.
+ *
+ * FIXME bug 647103 - replace these *AllocPolicy names.
+ */
+template <TrackingKind kind>
+class TrackedAllocPolicy : public MallocProvider<TrackedAllocPolicy<kind>> {
+ ZoneAllocator* zone_;
+
+#ifdef DEBUG
+ friend class js::gc::MemoryTracker; // Can clear |zone_| on merge.
+#endif
+
+ public:
+ MOZ_IMPLICIT TrackedAllocPolicy(ZoneAllocator* z) : zone_(z) {
+ zone()->registerNonGCMemory(this, MemoryUse::TrackedAllocPolicy);
+ }
+ MOZ_IMPLICIT TrackedAllocPolicy(JS::Zone* z)
+ : TrackedAllocPolicy(ZoneAllocator::from(z)) {}
+ TrackedAllocPolicy(TrackedAllocPolicy& other)
+ : TrackedAllocPolicy(other.zone_) {}
+ TrackedAllocPolicy(TrackedAllocPolicy&& other) : zone_(other.zone_) {
+ zone()->moveOtherMemory(this, &other, MemoryUse::TrackedAllocPolicy);
+ other.zone_ = nullptr;
+ }
+ ~TrackedAllocPolicy() {
+ if (zone_) {
+ zone_->unregisterNonGCMemory(this, MemoryUse::TrackedAllocPolicy);
+ }
+ }
+
+ TrackedAllocPolicy& operator=(const TrackedAllocPolicy& other) {
+ zone()->unregisterNonGCMemory(this, MemoryUse::TrackedAllocPolicy);
+ zone_ = other.zone();
+ zone()->registerNonGCMemory(this, MemoryUse::TrackedAllocPolicy);
+ return *this;
+ }
+ TrackedAllocPolicy& operator=(TrackedAllocPolicy&& other) {
+ MOZ_ASSERT(this != &other);
+ zone()->unregisterNonGCMemory(this, MemoryUse::TrackedAllocPolicy);
+ zone_ = other.zone();
+ zone()->moveOtherMemory(this, &other, MemoryUse::TrackedAllocPolicy);
+ other.zone_ = nullptr;
+ return *this;
+ }
+
+ // Public methods required to fulfill the AllocPolicy interface.
+
+ template <typename T>
+ void free_(T* p, size_t numElems) {
+ if (p) {
+ decMemory(numElems * sizeof(T));
+ js_free(p);
+ }
+ }
+
+ [[nodiscard]] bool checkSimulatedOOM() const {
+ return !js::oom::ShouldFailWithOOM();
+ }
+
+ void reportAllocOverflow() const { reportAllocationOverflow(); }
+
+ // Internal methods called by the MallocProvider implementation.
+
+ [[nodiscard]] void* onOutOfMemory(js::AllocFunction allocFunc,
+ arena_id_t arena, size_t nbytes,
+ void* reallocPtr = nullptr) {
+ return zone()->onOutOfMemory(allocFunc, arena, nbytes, reallocPtr);
+ }
+ void reportAllocationOverflow() const { zone()->reportAllocationOverflow(); }
+ void updateMallocCounter(size_t nbytes) {
+ zone()->incNonGCMemory(this, nbytes, MemoryUse::TrackedAllocPolicy);
+ }
+
+ private:
+ ZoneAllocator* zone() const {
+ MOZ_ASSERT(zone_);
+ return zone_;
+ }
+ void decMemory(size_t nbytes);
+};
+
+using ZoneAllocPolicy = TrackedAllocPolicy<TrackingKind::Zone>;
+using CellAllocPolicy = TrackedAllocPolicy<TrackingKind::Cell>;
+
+// Functions for memory accounting on the zone.
+
+// Associate malloc memory with a GC thing. This call should be matched by a
+// following call to RemoveCellMemory with the same size and use. The total
+// amount of malloc memory associated with a zone is used to trigger GC.
+//
+// You should use InitReservedSlot / InitObjectPrivate in preference to this
+// where possible.
+
+inline void AddCellMemory(gc::TenuredCell* cell, size_t nbytes, MemoryUse use) {
+ if (nbytes) {
+ ZoneAllocator::from(cell->zone())->addCellMemory(cell, nbytes, use);
+ }
+}
+inline void AddCellMemory(gc::Cell* cell, size_t nbytes, MemoryUse use) {
+ if (cell->isTenured()) {
+ AddCellMemory(&cell->asTenured(), nbytes, use);
+ }
+}
+
+// Remove association between malloc memory and a GC thing. This call should
+// follow a call to AddCellMemory with the same size and use.
+
+inline void RemoveCellMemory(gc::TenuredCell* cell, size_t nbytes,
+ MemoryUse use) {
+ MOZ_ASSERT(!CurrentThreadIsGCFinalizing(),
+ "Use GCContext methods to remove associated memory in finalizers");
+
+ if (nbytes) {
+ auto zoneBase = ZoneAllocator::from(cell->zoneFromAnyThread());
+ zoneBase->removeCellMemory(cell, nbytes, use, false);
+ }
+}
+inline void RemoveCellMemory(gc::Cell* cell, size_t nbytes, MemoryUse use) {
+ if (cell->isTenured()) {
+ RemoveCellMemory(&cell->asTenured(), nbytes, use);
+ }
+}
+
+} // namespace js
+
+#endif // gc_ZoneAllocator_h
diff --git a/js/src/gc/moz.build b/js/src/gc/moz.build
new file mode 100644
index 0000000000..489b5a96ae
--- /dev/null
+++ b/js/src/gc/moz.build
@@ -0,0 +1,61 @@
+# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+FINAL_LIBRARY = "js"
+
+# Includes should be relative to parent path
+LOCAL_INCLUDES += ["!..", ".."]
+
+include("../js-config.mozbuild")
+include("../js-cxxflags.mozbuild")
+
+# Generate GC statistics phase data.
+GeneratedFile(
+ "StatsPhasesGenerated.h",
+ script="GenerateStatsPhases.py",
+ entry_point="generateHeader",
+)
+GeneratedFile(
+ "StatsPhasesGenerated.inc",
+ script="GenerateStatsPhases.py",
+ entry_point="generateCpp",
+)
+
+UNIFIED_SOURCES += [
+ "Allocator.cpp",
+ "AtomMarking.cpp",
+ "Barrier.cpp",
+ "Compacting.cpp",
+ "FinalizationObservers.cpp",
+ "GC.cpp",
+ "GCAPI.cpp",
+ "GCParallelTask.cpp",
+ "Heap.cpp",
+ "MallocedBlockCache.cpp",
+ "Marking.cpp",
+ "Memory.cpp",
+ "Nursery.cpp",
+ "ParallelMarking.cpp",
+ "Pretenuring.cpp",
+ "PublicIterators.cpp",
+ "RootMarking.cpp",
+ "Scheduling.cpp",
+ "Statistics.cpp",
+ "Sweeping.cpp",
+ "Tenuring.cpp",
+ "Tracer.cpp",
+ "Verifier.cpp",
+ "WeakMap.cpp",
+ "WeakMapPtr.cpp",
+ "Zone.cpp",
+]
+
+# StoreBuffer.cpp cannot be built in unified mode because its template
+# instantiations may or may not be needed depending on what it gets bundled
+# with.
+SOURCES += [
+ "StoreBuffer.cpp",
+]